python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | libs/libcommon/src/libcommon/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Optional
from huggingface_hub.hf_api import DatasetInfo, HfApi
from huggingface_hub.utils._errors import RepositoryNotFoundError, RevisionNotFoundError
from libcommon.exceptions import (
CustomError,
DatasetInfoHubRequestError,
DatasetNotFoundError,
DatasetRevisionEmptyError,
DatasetRevisionNotFoundError,
DisabledViewerError,
)
DOES_NOT_EXIST_OR_PRIVATE_DATASET_ERROR_MESSAGE = (
"The dataset does not exist on the Hub, or is private. Private datasets are not yet supported."
)
def raise_if_not_supported(dataset_info: DatasetInfo) -> None:
"""
Raise an error if the dataset is not supported by the datasets-server.
Args:
dataset_info (`DatasetInfo`):
The dataset info.
Returns:
`None`
Raises the following errors:
- [`~exceptions.DatasetNotFoundError`]
if the dataset id does not exist, or if the dataset is private
- [`~exceptions.DisabledViewerError`]
if the dataset viewer is disabled.
"""
if not dataset_info.id or dataset_info.private:
raise DatasetNotFoundError(DOES_NOT_EXIST_OR_PRIVATE_DATASET_ERROR_MESSAGE)
if dataset_info.cardData and not dataset_info.cardData.get("viewer", True):
raise DisabledViewerError("The dataset viewer has been disabled on this dataset.")
def is_supported(dataset_info: DatasetInfo) -> bool:
"""
Check if the dataset is supported by the datasets-server.
Args:
dataset_info (`DatasetInfo`):
The dataset info.
Returns:
`bool`: True if the dataset is supported, False otherwise.
"""
try:
raise_if_not_supported(dataset_info)
except CustomError:
return False
return True
def get_dataset_info_for_supported_datasets(
dataset: str,
hf_endpoint: str,
hf_token: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
revision: Optional[str] = None,
files_metadata: bool = False,
) -> DatasetInfo:
"""
Get the DatasetInfo of the dataset, after checking if it's supported (no private datasets).
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
hf_endpoint (`str`):
The Hub endpoint (for example: "https://huggingface.co")
hf_token (`str`, *optional*):
An authentication token (See https://huggingface.co/settings/token)
hf_timeout_seconds (`float`, *optional*, defaults to None):
The timeout in seconds for the request to the Hub.
revision (`str`, *optional*, defaults to None):
The revision of the dataset repository from which to get the
information.
files_metadata (`bool`, *optional*, defaults to False):
Whether or not to retrieve metadata for files in the repository
(size, LFS metadata, etc). Defaults to `False`.
Returns:
`DatasetInfo`: the dataset info.
Raises the following errors:
- [`~exceptions.DatasetInfoHubRequestError`]
if the request to the Hub to get the dataset info failed or timed out.
- [`~exceptions.DatasetNotFoundError`]:
if the dataset does not exist, or if the token does not give the sufficient access to the dataset,
or if the dataset is private (private datasets are not supported by the datasets server).
- [`~exceptions.DatasetRevisionNotFoundError`]
if the git revision (branch, commit) does not exist in the repository.
- [`~exceptions.DisabledViewerError`]
if the dataset viewer is disabled.
- ['requests.exceptions.HTTPError'](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
any other error when asking access
"""
try:
dataset_info = HfApi(endpoint=hf_endpoint).dataset_info(
repo_id=dataset,
token=hf_token,
timeout=hf_timeout_seconds,
revision=revision,
files_metadata=files_metadata,
)
except CustomError as err:
raise err
except RepositoryNotFoundError as err:
raise DatasetNotFoundError(DOES_NOT_EXIST_OR_PRIVATE_DATASET_ERROR_MESSAGE, cause=err) from err
except RevisionNotFoundError as err:
raise DatasetRevisionNotFoundError(
f"The default branch cannot be found in dataset {dataset} on the Hub.", cause=err
) from err
except Exception as err:
raise DatasetInfoHubRequestError(
(
"Request to the Hub to get the dataset info failed or timed out. Please try again later, it's a"
" temporary internal issue."
),
cause=err,
) from err
raise_if_not_supported(dataset_info)
return dataset_info
def get_dataset_git_revision(
dataset: str,
hf_endpoint: str,
hf_token: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> str:
"""
Get the git revision of the dataset.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
hf_endpoint (`str`):
The Hub endpoint (for example: "https://huggingface.co")
hf_token (`str`, *optional*):
An authentication token (See https://huggingface.co/settings/token)
hf_timeout_seconds (`float`, *optional*, defaults to None):
The timeout in seconds for the request to the Hub.
Returns:
`Union[str, None]`: the dataset git revision (sha) if any.
Raises the following errors:
- [`~exceptions.DatasetInfoHubRequestError`]
if the request to the Hub to get the dataset info failed or timed out.
- [`~exceptions.DatasetNotFoundError`]:
if the dataset does not exist, or if the token does not give the sufficient access to the dataset,
or if the dataset is private (private datasets are not supported by the datasets server).
- [`~exceptions.DatasetRevisionEmptyError`]
if the current git revision (branch, commit) could not be obtained.
- [`~exceptions.DatasetRevisionNotFoundError`]
if the git revision (branch, commit) does not exist in the repository.
- [`~exceptions.DisabledViewerError`]
if the dataset viewer is disabled.
- ['requests.exceptions.HTTPError'](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
any other error when asking access
"""
sha = get_dataset_info_for_supported_datasets(
dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds
).sha
if sha is None:
raise DatasetRevisionEmptyError(f"The dataset {dataset} has no git revision.")
return sha # type: ignore
def get_supported_dataset_infos(hf_endpoint: str, hf_token: Optional[str] = None) -> list[DatasetInfo]:
return [d for d in HfApi(endpoint=hf_endpoint, token=hf_token).list_datasets() if is_supported(d)]
| datasets-server-main | libs/libcommon/src/libcommon/dataset.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.orchestrator import DatasetOrchestrator
from libcommon.processing_graph import ProcessingGraph
from libcommon.simple_cache import delete_dataset_responses
from libcommon.utils import Priority
def backfill_dataset(
dataset: str,
revision: str,
processing_graph: ProcessingGraph,
cache_max_days: int,
priority: Priority = Priority.LOW,
) -> None:
"""
Update a dataset
Args:
dataset (str): the dataset
revision (str): The revision of the dataset.
processing_graph (ProcessingGraph): the processing graph
cache_max_days (int): the number of days to keep the cache
priority (Priority, optional): The priority of the job. Defaults to Priority.LOW.
Returns: None.
"""
logging.debug(f"backfill {dataset=} {revision=} {priority=}")
DatasetOrchestrator(dataset=dataset, processing_graph=processing_graph).set_revision(
revision=revision, priority=priority, error_codes_to_retry=[], cache_max_days=cache_max_days
)
def delete_dataset(dataset: str) -> None:
"""
Delete a dataset
Args:
dataset (str): the dataset
Returns: None.
"""
logging.debug(f"delete cache for dataset='{dataset}'")
delete_dataset_responses(dataset=dataset)
| datasets-server-main | libs/libcommon/src/libcommon/operations.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from __future__ import annotations
from collections.abc import Mapping
from dataclasses import dataclass, field
from typing import Any, Literal, Optional, TypedDict, Union, get_args
import networkx as nx
from libcommon.constants import (
DEFAULT_DIFFICULTY,
DEFAULT_INPUT_TYPE,
DEFAULT_JOB_RUNNER_VERSION,
)
from libcommon.utils import inputs_to_string
InputType = Literal["dataset", "config", "split"]
# ^ note that for now, the "dataset" input type means: dataset + git revision
def guard_input_type(x: Any) -> InputType:
if x == "dataset":
return "dataset"
elif x == "config":
return "config"
elif x == "split":
return "split"
if x in get_args(InputType):
raise RuntimeError(f"Value {x} should be included in the literal values")
raise ValueError(f"Invalid input type: {x}")
def guard_int(x: Any) -> int:
if isinstance(x, int):
return x
raise ValueError(f"Invalid int: {x}")
class ProcessingStepSpecification(TypedDict, total=False):
input_type: InputType
triggered_by: Union[list[str], str, None]
enables_preview: Literal[True]
enables_viewer: Literal[True]
enables_search: Literal[True]
job_runner_version: int
provides_dataset_config_names: bool
provides_config_split_names: bool
provides_config_parquet: bool
provides_config_parquet_metadata: bool
difficulty: int
ProcessingGraphSpecification = Mapping[str, ProcessingStepSpecification]
class ProcessingStepDoesNotExist(Exception):
pass
@dataclass
class ProcessingStep:
"""A dataset processing step.
Attributes:
name (str): The processing step name.
input_type (InputType): The input type ('dataset', 'config' or 'split').
job_runner_version (int): The version of the job runner to use to compute the response.
Getters:
cache_kind (str): The cache kind (ie. the key in the cache).
job_type (str): The job type (ie. the job to run to compute the response).
"""
name: str
input_type: InputType
job_runner_version: int
difficulty: int
cache_kind: str = field(init=False)
job_type: str = field(init=False)
def __post_init__(self) -> None:
self.cache_kind = self.name
self.job_type = self.name
def copy(self) -> ProcessingStep:
"""Copy the processing step.
Returns:
ProcessingStep: The copy of the processing step.
"""
return ProcessingStep(
name=self.name,
input_type=self.input_type,
job_runner_version=self.job_runner_version,
difficulty=self.difficulty,
)
def get_triggered_by_as_list(triggered_by: Union[list[str], str, None]) -> list[str]:
if triggered_by is None:
return []
return [triggered_by] if isinstance(triggered_by, str) else triggered_by
def copy_processing_steps_list(processing_steps: list[ProcessingStep]) -> list[ProcessingStep]:
return [processing_step.copy() for processing_step in processing_steps]
@dataclass
class ProcessingGraph:
"""A graph of processing steps.
The processing steps can have multiple parents, and multiple children (next processing steps, found automatically
by traversing the graph).
The graph can have multiple roots.
Args:
processing_graph_specification (ProcessingGraphSpecification): The specification of the graph.
Raises:
ValueError: If the graph is not a DAG.
ValueError: If a processing step provides dataset config names but its input type is not 'dataset', or if a
processing step provides config split names but its input type is not 'config'.
ValueError: If a root processing step (ie. a processing step with no parent) is not a dataset processing step.
"""
processing_graph_specification: ProcessingGraphSpecification
_nx_graph: nx.DiGraph = field(init=False)
_processing_steps: Mapping[str, ProcessingStep] = field(init=False)
_processing_step_names_by_input_type: Mapping[InputType, list[str]] = field(init=False)
_first_processing_steps: list[ProcessingStep] = field(init=False)
_processing_steps_enables_preview: list[ProcessingStep] = field(init=False)
_processing_steps_enables_viewer: list[ProcessingStep] = field(init=False)
_processing_steps_enables_search: list[ProcessingStep] = field(init=False)
_config_split_names_processing_steps: list[ProcessingStep] = field(init=False)
_config_parquet_processing_steps: list[ProcessingStep] = field(init=False)
_config_parquet_metadata_processing_steps: list[ProcessingStep] = field(init=False)
_dataset_config_names_processing_steps: list[ProcessingStep] = field(init=False)
_topologically_ordered_processing_steps: list[ProcessingStep] = field(init=False)
_alphabetically_ordered_processing_steps: list[ProcessingStep] = field(init=False)
def __post_init__(self) -> None:
_nx_graph = nx.DiGraph()
_processing_steps: dict[str, ProcessingStep] = {}
_processing_step_names_by_input_type: dict[InputType, list[str]] = {
"dataset": [],
"config": [],
"split": [],
}
for name, specification in self.processing_graph_specification.items():
# check that the step is consistent with its specification
input_type = guard_input_type(specification.get("input_type", DEFAULT_INPUT_TYPE))
provides_dataset_config_names = specification.get("provides_dataset_config_names", False)
if provides_dataset_config_names and input_type != "dataset":
raise ValueError(
f"Processing step {name} provides dataset config names but its input type is {input_type}."
)
provides_config_split_names = specification.get("provides_config_split_names", False)
if provides_config_split_names and input_type != "config":
raise ValueError(
f"Processing step {name} provides config split names but its input type is {input_type}."
)
provides_config_parquet = specification.get("provides_config_parquet", False)
if provides_config_parquet and input_type != "config":
raise ValueError(f"Processing step {name} provides config parquet but its input type is {input_type}.")
provides_config_parquet_metadata = specification.get("provides_config_parquet_metadata", False)
if provides_config_parquet_metadata and input_type != "config":
raise ValueError(
f"Processing step {name} provides config parquet metadata but its input type is {input_type}."
)
if (
_nx_graph.has_node(name)
or name in _processing_steps
or name in _processing_step_names_by_input_type[input_type]
):
raise ValueError(f"Processing step {name} is defined twice.")
_nx_graph.add_node(
name,
enables_preview=specification.get("enables_preview", False),
enables_viewer=specification.get("enables_viewer", False),
enables_search=specification.get("enables_search", False),
provides_dataset_config_names=provides_dataset_config_names,
provides_config_split_names=provides_config_split_names,
provides_config_parquet=provides_config_parquet,
provides_config_parquet_metadata=provides_config_parquet_metadata,
)
_processing_steps[name] = ProcessingStep(
name=name,
input_type=input_type,
job_runner_version=specification.get("job_runner_version", DEFAULT_JOB_RUNNER_VERSION),
difficulty=specification.get("difficulty", DEFAULT_DIFFICULTY),
)
_processing_step_names_by_input_type[input_type].append(name)
for name, specification in self.processing_graph_specification.items():
triggered_by = get_triggered_by_as_list(specification.get("triggered_by"))
for processing_step_name in triggered_by:
if not _nx_graph.has_node(processing_step_name):
raise ValueError(
f"Processing step {name} is triggered by {processing_step_name} but {processing_step_name} is"
" not defined."
)
_nx_graph.add_edge(processing_step_name, name)
if not nx.is_directed_acyclic_graph(_nx_graph):
raise ValueError("The graph is not a directed acyclic graph.")
self._nx_graph = _nx_graph
self._processing_steps = _processing_steps
self._processing_step_names_by_input_type = _processing_step_names_by_input_type
self._first_processing_steps = [
self._processing_steps[processing_step_name]
for processing_step_name, degree in _nx_graph.in_degree()
if degree == 0
]
if any(processing_step.input_type != "dataset" for processing_step in self._first_processing_steps):
raise ValueError("The first processing steps must be dataset-level. The graph state is incoherent.")
self._processing_steps_enables_preview = [
self._processing_steps[processing_step_name]
for (processing_step_name, required) in _nx_graph.nodes(data="enables_preview")
if required
]
self._processing_steps_enables_viewer = [
self._processing_steps[processing_step_name]
for (processing_step_name, required) in _nx_graph.nodes(data="enables_viewer")
if required
]
self._processing_steps_enables_search = [
self._processing_steps[processing_step_name]
for (processing_step_name, required) in _nx_graph.nodes(data="enables_search")
if required
]
self._config_parquet_processing_steps = [
self._processing_steps[processing_step_name]
for (processing_step_name, provides) in _nx_graph.nodes(data="provides_config_parquet")
if provides
]
self._config_parquet_metadata_processing_steps = [
self._processing_steps[processing_step_name]
for (processing_step_name, provides) in _nx_graph.nodes(data="provides_config_parquet_metadata")
if provides
]
self._config_split_names_processing_steps = [
self._processing_steps[processing_step_name]
for (processing_step_name, provides) in _nx_graph.nodes(data="provides_config_split_names")
if provides
]
self._dataset_config_names_processing_steps = [
self.get_processing_step(processing_step_name)
for (processing_step_name, provides) in _nx_graph.nodes(data="provides_dataset_config_names")
if provides
]
self._topologically_ordered_processing_steps = [
self.get_processing_step(processing_step_name) for processing_step_name in nx.topological_sort(_nx_graph)
]
self._alphabetically_ordered_processing_steps = [
self.get_processing_step(processing_step_name) for processing_step_name in sorted(_nx_graph.nodes())
]
def get_processing_step(self, processing_step_name: str) -> ProcessingStep:
"""
Get a processing step by its name.
The returned processing step is a copy of the original one, so that it can be modified without affecting the
original one.
Args:
processing_step_name (str): The name of the processing step
Returns:
ProcessingStep: The processing step
"""
try:
return self._processing_steps[processing_step_name].copy()
except nx.NetworkXError as e:
raise ProcessingStepDoesNotExist(f"Unknown job type: {processing_step_name}") from e
def get_processing_step_by_job_type(self, job_type: str) -> ProcessingStep:
"""
Get a processing step by its job type.
The returned processing step is a copy of the original one, so that it can be modified without affecting the
original one.
Args:
job_type (str): The job type of the processing step
Returns:
ProcessingStep: The processing step
"""
# for now: the job_type is just an alias for the processing step name
return self.get_processing_step(job_type)
def get_children(self, processing_step_name: str) -> list[ProcessingStep]:
"""
Get the list of children processing steps
The children processing steps are the ones that will be triggered at the end of the processing step.
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Args:
processing_step_name (str): The name of the processing step
Returns:
list[ProcessingStep]: The list of children processing steps (successors)
Raises:
ProcessingStepDoesNotExist: If the processing step is not in the graph
"""
try:
return [
self.get_processing_step(successor) for successor in self._nx_graph.successors(processing_step_name)
]
except nx.NetworkXError as e:
raise ProcessingStepDoesNotExist(f"Unknown processing step: {processing_step_name}") from e
def get_parents(self, processing_step_name: str) -> list[ProcessingStep]:
"""
Get the list of parents processing steps
The parent processing steps are the ones that trigger the processing step.
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Args:
processing_step_name (str): The name of the processing step
Returns:
list[ProcessingStep]: The list of parent processing steps (predecessors)
Raises:
ProcessingStepDoesNotExist: If the processing step is not in the graph
"""
try:
return [
self.get_processing_step(predecessor)
for predecessor in self._nx_graph.predecessors(processing_step_name)
]
except nx.NetworkXError as e:
raise ProcessingStepDoesNotExist(f"Unknown processing step: {processing_step_name}") from e
def get_ancestors(self, processing_step_name: str) -> list[ProcessingStep]:
"""
Get the list of ancestors processing steps
The ancestor processing steps are the ones that trigger the processing step, directly or not.
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Args:
processing_step_name (str): The name of the processing step
Returns:
list[ProcessingStep]: The list of ancestor processing steps
Raises:
ProcessingStepDoesNotExist: If the processing step is not in the graph
"""
try:
return [
self.get_processing_step(ancestor) for ancestor in nx.ancestors(self._nx_graph, processing_step_name)
]
except nx.NetworkXError as e:
raise ProcessingStepDoesNotExist(f"Unknown processing step: {processing_step_name}") from e
def get_first_processing_steps(self) -> list[ProcessingStep]:
"""
Get the first processing steps.
The first processing steps are the ones that don't have a previous step. This means that they will be computed
first when a dataset is updated. Their input type is always "dataset".
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Returns:
list[ProcessingStep]: The list of first processing steps
"""
return copy_processing_steps_list(self._first_processing_steps)
def get_processing_steps_enables_preview(self) -> list[ProcessingStep]:
"""
Get the processing steps that enable the dataset preview (first rows).
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Returns:
list[ProcessingStep]: The list of processing steps that enable the dataset preview
"""
return copy_processing_steps_list(self._processing_steps_enables_preview)
def get_processing_steps_enables_viewer(self) -> list[ProcessingStep]:
"""
Get the processing steps that enable the dataset viewer (all rows).
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Returns:
list[ProcessingStep]: The list of processing steps that enable the dataset viewer
"""
return copy_processing_steps_list(self._processing_steps_enables_viewer)
def get_processing_steps_enables_search(self) -> list[ProcessingStep]:
"""
Get the processing steps that enable the dataset split search.
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Returns:
list[ProcessingStep]: The list of processing steps that enable the dataset viewer
"""
return copy_processing_steps_list(self._processing_steps_enables_search)
def get_config_parquet_processing_steps(self) -> list[ProcessingStep]:
"""
Get the processing steps that provide a config's parquet response.
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Returns:
list[ProcessingStep]: The list of processing steps that provide a config's parquet response
"""
return copy_processing_steps_list(self._config_parquet_processing_steps)
def get_config_parquet_metadata_processing_steps(self) -> list[ProcessingStep]:
"""
Get the processing steps that provide a config's parquet metadata response.
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Returns:
list[ProcessingStep]: The list of processing steps that provide a config's parquet response
"""
return copy_processing_steps_list(self._config_parquet_metadata_processing_steps)
def get_config_split_names_processing_steps(self) -> list[ProcessingStep]:
"""
Get the processing steps that provide a config's split names.
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Returns:
list[ProcessingStep]: The list of processing steps that provide a config's split names
"""
return copy_processing_steps_list(self._config_split_names_processing_steps)
def get_dataset_config_names_processing_steps(self) -> list[ProcessingStep]:
"""
Get the processing steps that provide a dataset's config names.
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Returns:
list[ProcessingStep]: The list of processing steps that provide a dataset's config names
"""
return copy_processing_steps_list(self._dataset_config_names_processing_steps)
def get_topologically_ordered_processing_steps(self) -> list[ProcessingStep]:
"""
Get the processing steps, ordered topologically.
This means that the first processing steps are the ones that don't have a previous step, and that the last
processing steps are the ones that don't have a next step.
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Returns:
list[ProcessingStep]: The list of processing steps
"""
return copy_processing_steps_list(self._topologically_ordered_processing_steps)
def get_alphabetically_ordered_processing_steps(self) -> list[ProcessingStep]:
"""
Get the processing steps, ordered alphabetically by the name of the processing steps.
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Returns:
list[ProcessingStep]: The list of processing steps
"""
return copy_processing_steps_list(self._alphabetically_ordered_processing_steps)
def get_processing_steps(
self, order: Optional[Literal["alphabetical", "topological"]] = None
) -> list[ProcessingStep]:
"""
Get the processing steps.
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Args:
order (Optional[Literal["alphabetical", "topological"]], optional): The order in which to return the
processing steps. If None, the order is alphabetical. Defaults to None.
Returns:
list[ProcessingStep]: The list of processing steps
"""
if order == "topological":
return self.get_topologically_ordered_processing_steps()
# default
return self.get_alphabetically_ordered_processing_steps()
def get_input_type_processing_steps(self, input_type: InputType = "dataset") -> list[ProcessingStep]:
"""
Get the processing steps of input type `input_type`, in an undefined order.
The returned processing steps are copies of the original ones, so that they can be modified without affecting
the original ones.
Args:
input_type (InputType, optional): The input type. Defaults to "dataset".
Returns:
list[ProcessingStep]: The list of processing steps
"""
return [
self.get_processing_step(processing_step_name)
for processing_step_name in self._processing_step_names_by_input_type[input_type]
]
@dataclass
class Artifact:
"""An artifact."""
processing_step: ProcessingStep
dataset: str
revision: str
config: Optional[str]
split: Optional[str]
id: str = field(init=False)
def __post_init__(self) -> None:
if self.processing_step.input_type == "dataset":
if self.config is not None or self.split is not None:
raise ValueError("Step input type is dataset, but config or split is not None")
elif self.processing_step.input_type == "config":
if self.config is None or self.split is not None:
raise ValueError("Step input type is config, but config is None or split is not None")
elif self.processing_step.input_type == "split":
if self.config is None or self.split is None:
raise ValueError("Step input type is split, but config or split is None")
else:
raise ValueError(f"Invalid step input type: {self.processing_step.input_type}")
self.id = Artifact.get_id(
dataset=self.dataset,
revision=self.revision,
config=self.config,
split=self.split,
processing_step_name=self.processing_step.name,
)
@staticmethod
def get_id(
dataset: str,
revision: str,
config: Optional[str],
split: Optional[str],
processing_step_name: str,
) -> str:
return inputs_to_string(
dataset=dataset,
revision=revision,
config=config,
split=split,
prefix=processing_step_name,
)
@staticmethod
def parse_id(id: str) -> tuple[str, str, Optional[str], Optional[str], str]:
parts = id.split(",")
prefix = parts[0]
parts = parts[1:]
dataset = parts[0]
revision = parts[1]
parts = parts[2:]
config = None
split = None
if len(parts) > 1:
config = parts[1]
if len(parts) > 2:
split = parts[2]
return dataset, revision, config, split, prefix
| datasets-server-main | libs/libcommon/src/libcommon/processing_graph.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import base64
import enum
import mimetypes
from collections.abc import Mapping
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Any, Optional, TypedDict
import orjson
class Status(str, enum.Enum):
WAITING = "waiting"
STARTED = "started"
SUCCESS = "success"
ERROR = "error"
CANCELLED = "cancelled"
class Priority(str, enum.Enum):
HIGH = "high"
NORMAL = "normal"
LOW = "low"
class JobParams(TypedDict):
dataset: str
revision: str
config: Optional[str]
split: Optional[str]
class JobInfo(TypedDict):
job_id: str
type: str
params: JobParams
priority: Priority
difficulty: int
class FlatJobInfo(TypedDict):
job_id: str
type: str
dataset: str
revision: str
config: Optional[str]
split: Optional[str]
priority: str
status: str
difficulty: int
created_at: datetime
class JobOutput(TypedDict):
content: Mapping[str, Any]
http_status: HTTPStatus
error_code: Optional[str]
details: Optional[Mapping[str, Any]]
progress: Optional[float]
class JobResult(TypedDict):
job_info: JobInfo
job_runner_version: int
is_success: bool
output: Optional[JobOutput]
class SplitHubFile(TypedDict):
dataset: str
config: str
split: str
url: str
filename: str
size: int
Row = dict[str, Any]
class RowItem(TypedDict):
row_idx: int
row: Row
truncated_cells: list[str]
class FeatureItem(TypedDict):
feature_idx: int
name: str
type: dict[str, Any]
class PaginatedResponse(TypedDict):
features: list[FeatureItem]
rows: list[RowItem]
num_rows_total: int
num_rows_per_page: int
# orjson is used to get rid of errors with datetime (see allenai/c4)
def orjson_default(obj: Any) -> Any:
if isinstance(obj, bytes):
# see https://stackoverflow.com/a/40000564/7351594 for example
# the bytes are encoded with base64, and then decoded as utf-8
# (ascii only, by the way) to get a string
return base64.b64encode(obj).decode("utf-8")
raise TypeError
def orjson_dumps(content: Any) -> bytes:
return orjson.dumps(content, option=orjson.OPT_UTC_Z, default=orjson_default)
def get_datetime(days: Optional[float] = None) -> datetime:
date = datetime.now(timezone.utc)
if days is not None:
date = date - timedelta(days=days)
return date
def inputs_to_string(
dataset: str,
revision: Optional[str] = None,
config: Optional[str] = None,
split: Optional[str] = None,
prefix: Optional[str] = None,
) -> str:
result = dataset
if revision is not None:
result = f"{result},{revision}"
if config is not None:
result = f"{result},{config}"
if split is not None:
result = f"{result},{split}"
if prefix is not None:
result = f"{prefix},{result}"
return result
def is_image_url(text: str) -> bool:
is_url = text.startswith("https://") or text.startswith("http://")
(mime_type, _) = mimetypes.guess_type(text.split("/")[-1].split("?")[0])
return is_url and mime_type is not None and mime_type.startswith("image/")
| datasets-server-main | libs/libcommon/src/libcommon/utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import os
import time
from types import TracebackType
from typing import Any, Optional, TypeVar
from prometheus_client import (
REGISTRY,
CollectorRegistry,
Gauge,
Histogram,
generate_latest,
)
from prometheus_client.multiprocess import MultiProcessCollector
from psutil import disk_usage
from libcommon.queue import JobTotalMetricDocument
from libcommon.simple_cache import CacheTotalMetricDocument
from libcommon.storage import StrPath
class Prometheus:
def getRegistry(self) -> CollectorRegistry:
# taken from https://github.com/perdy/starlette-prometheus/blob/master/starlette_prometheus/view.py
# see https://github.com/prometheus/client_python#multiprocess-mode-eg-gunicorn
if "PROMETHEUS_MULTIPROC_DIR" in os.environ:
registry = CollectorRegistry()
MultiProcessCollector(registry=registry)
else:
registry = REGISTRY
return registry
def getLatestContent(self) -> Any:
# ^ returns Any because we cannot be sure latest are UTF8Bytes
latest = generate_latest(self.getRegistry())
return latest.decode("utf-8")
# the metrics are global to the process
QUEUE_JOBS_TOTAL = Gauge(
name="queue_jobs_total",
documentation="Number of jobs in the queue",
labelnames=["queue", "status"],
multiprocess_mode="liveall",
)
RESPONSES_IN_CACHE_TOTAL = Gauge(
name="responses_in_cache_total",
documentation="Number of cached responses in the cache",
labelnames=["kind", "http_status", "error_code"],
multiprocess_mode="liveall",
)
ASSETS_DISK_USAGE = Gauge(
name="assets_disk_usage",
documentation="Usage of the disk where the assets and cached_assets are stored",
labelnames=["type"],
multiprocess_mode="liveall",
)
DESCRIPTIVE_STATISTICS_DISK_USAGE = Gauge(
name="descriptive_statistics_disk_usage",
documentation="Usage of the disk where the descriptive statistics temporary files are stored (workers)",
labelnames=["type"],
multiprocess_mode="liveall",
)
DUCKDB_DISK_USAGE = Gauge(
name="duckdb_disk_usage",
documentation="Usage of the disk where the temporary duckdb files are stored (/search)",
labelnames=["type"],
multiprocess_mode="liveall",
)
HF_DATASETS_DISK_USAGE = Gauge(
name="hf_datasets_disk_usage",
documentation="Usage of the disk where the HF datasets library stores its cache (workers)",
labelnames=["type"],
multiprocess_mode="liveall",
)
PARQUET_METADATA_DISK_USAGE = Gauge(
name="parquet_metadata_disk_usage",
documentation="Usage of the disk where the parquet metadata are stored (workers, used by /rows)",
labelnames=["type"],
multiprocess_mode="liveall",
)
METHOD_STEPS_PROCESSING_TIME = Histogram(
"method_steps_processing_time_seconds",
"Histogram of the processing time of specific steps in methods for a given context (in seconds)",
["method", "step", "context"],
)
def update_queue_jobs_total() -> None:
for job_metric in JobTotalMetricDocument.objects():
QUEUE_JOBS_TOTAL.labels(queue=job_metric.job_type, status=job_metric.status).set(job_metric.total)
def update_responses_in_cache_total() -> None:
for cache_metric in CacheTotalMetricDocument.objects():
RESPONSES_IN_CACHE_TOTAL.labels(
kind=cache_metric.kind, http_status=cache_metric.http_status, error_code=cache_metric.error_code
).set(cache_metric.total)
def update_disk_gauge(gauge: Gauge, directory: StrPath) -> None:
# TODO: move to metrics, as for the other metrics (queue, cache)
total, used, free, percent = disk_usage(str(directory))
gauge.labels(type="total").set(total)
gauge.labels(type="used").set(used)
gauge.labels(type="free").set(free)
gauge.labels(type="percent").set(percent)
def update_assets_disk_usage(directory: StrPath) -> None:
update_disk_gauge(ASSETS_DISK_USAGE, directory)
def update_descriptive_statistics_disk_usage(directory: StrPath) -> None:
update_disk_gauge(DESCRIPTIVE_STATISTICS_DISK_USAGE, directory)
def update_duckdb_disk_usage(directory: StrPath) -> None:
update_disk_gauge(DUCKDB_DISK_USAGE, directory)
def update_hf_datasets_disk_usage(directory: StrPath) -> None:
update_disk_gauge(HF_DATASETS_DISK_USAGE, directory)
def update_parquet_metadata_disk_usage(directory: StrPath) -> None:
update_disk_gauge(PARQUET_METADATA_DISK_USAGE, directory)
T = TypeVar("T", bound="StepProfiler")
class StepProfiler:
"""
A context manager that measures the time spent in a step of a method and reports it to Prometheus.
Example:
>>> with StepProfiler("method", "step", "context") as profiler:
... pass
Args:
method (str): The name of the method.
step (str): The name of the step.
context (str|None): An optional string that adds context. If None, the label "None" is used.
"""
def __init__(self, method: str, step: str, context: Optional[str] = None):
self.method = method
self.step = step
self.context = str(context)
self.before_time = time.perf_counter()
def __enter__(self: T) -> T:
return self
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
after_time = time.perf_counter()
METHOD_STEPS_PROCESSING_TIME.labels(method=self.method, step=self.step, context=self.context).observe(
after_time - self.before_time
)
| datasets-server-main | libs/libcommon/src/libcommon/prometheus.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Optional, Union
import pandas as pd
from libcommon.constants import ERROR_CODES_TO_RETRY
from libcommon.processing_graph import (
ProcessingGraph,
ProcessingStep,
ProcessingStepDoesNotExist,
)
from libcommon.prometheus import StepProfiler
from libcommon.queue import Queue
from libcommon.simple_cache import (
fetch_names,
get_cache_entries_df,
has_some_cache,
upsert_response_params,
)
from libcommon.state import ArtifactState, DatasetState, FirstStepsDatasetState
from libcommon.utils import JobInfo, JobResult, Priority
# TODO: clean dangling cache entries
@dataclass
class CacheStatus:
cache_has_different_git_revision: dict[str, ArtifactState] = field(default_factory=dict)
cache_is_old: dict[str, ArtifactState] = field(default_factory=dict)
cache_is_outdated_by_parent: dict[str, ArtifactState] = field(default_factory=dict)
cache_is_empty: dict[str, ArtifactState] = field(default_factory=dict)
cache_is_error_to_retry: dict[str, ArtifactState] = field(default_factory=dict)
cache_is_job_runner_obsolete: dict[str, ArtifactState] = field(default_factory=dict)
up_to_date: dict[str, ArtifactState] = field(default_factory=dict)
def as_response(self) -> dict[str, list[str]]:
return {
"cache_has_different_git_revision": sorted(self.cache_has_different_git_revision.keys()),
"cache_is_old": sorted(self.cache_is_old.keys()),
"cache_is_outdated_by_parent": sorted(self.cache_is_outdated_by_parent.keys()),
"cache_is_empty": sorted(self.cache_is_empty.keys()),
"cache_is_error_to_retry": sorted(self.cache_is_error_to_retry.keys()),
"cache_is_job_runner_obsolete": sorted(self.cache_is_job_runner_obsolete.keys()),
"up_to_date": sorted(self.up_to_date.keys()),
}
@dataclass
class QueueStatus:
in_process: set[str] = field(default_factory=set)
def as_response(self) -> dict[str, list[str]]:
return {"in_process": sorted(self.in_process)}
@dataclass
class Task(ABC):
id: str = field(init=False)
long_id: str = field(init=False)
@abstractmethod
def run(self) -> None:
pass
@dataclass
class CreateJobsTask(Task):
job_infos: list[JobInfo] = field(default_factory=list)
def __post_init__(self) -> None:
# for debug and testing
self.id = f"CreateJobs,{len(self.job_infos)}"
types = [job_info["type"] for job_info in self.job_infos]
self.long_id = f"CreateJobs,{types}"
def run(self) -> None:
with StepProfiler(
method="CreateJobsTask.run",
step="all",
context=f"num_jobs_to_create={len(self.job_infos)}",
):
created_jobs_count = Queue().create_jobs(job_infos=self.job_infos)
if created_jobs_count != len(self.job_infos):
raise ValueError(
f"Something went wrong when creating jobs: {len(self.job_infos)} jobs were supposed to be"
f" created, but {created_jobs_count} were created."
)
@dataclass
class DeleteJobsTask(Task):
jobs_df: pd.DataFrame
def __post_init__(self) -> None:
# for debug and testing
self.id = f"DeleteJobs,{len(self.jobs_df)}"
types = [row["type"] for _, row in self.jobs_df.iterrows()]
self.long_id = f"DeleteJobs,{types}"
def run(self) -> None:
with StepProfiler(
method="DeleteJobsTask.run",
step="all",
context=f"num_jobs_to_delete={len(self.jobs_df)}",
):
cancelled_jobs_count = Queue().cancel_jobs_by_job_id(job_ids=self.jobs_df["job_id"].tolist())
if cancelled_jobs_count != len(self.jobs_df):
raise ValueError(
f"Something went wrong when cancelling jobs: {len(self.jobs_df)} jobs were supposed to be"
f" cancelled, but {cancelled_jobs_count} were cancelled."
)
SupportedTask = Union[CreateJobsTask, DeleteJobsTask]
@dataclass
class Plan:
tasks: list[SupportedTask] = field(init=False)
def __post_init__(self) -> None:
self.tasks = []
def add_task(self, task: SupportedTask) -> None:
self.tasks.append(task)
def run(self) -> int:
"""Run all the tasks in the plan.
Returns:
The number of tasks that were run.
"""
for idx, task in enumerate(self.tasks):
logging.debug(f"Running task [{idx}/{len(self.tasks)}]: {task.long_id}")
task.run()
return len(self.tasks)
def as_response(self) -> list[str]:
return sorted(task.id for task in self.tasks)
@dataclass
class AfterJobPlan(Plan):
"""
Plan to create jobs after a processing step has finished.
Args:
job_info (JobInfo): The job info.
processing_graph (ProcessingGraph): The processing graph.
"""
job_info: JobInfo
processing_graph: ProcessingGraph
dataset: str = field(init=False)
config: Optional[str] = field(init=False)
split: Optional[str] = field(init=False)
revision: str = field(init=False)
priority: Priority = field(init=False)
def __post_init__(self) -> None:
super().__post_init__()
self.dataset = self.job_info["params"]["dataset"]
self.revision = self.job_info["params"]["revision"]
self.priority = self.job_info["priority"]
config = self.job_info["params"]["config"]
split = self.job_info["params"]["split"]
job_type = self.job_info["type"]
try:
processing_step = self.processing_graph.get_processing_step_by_job_type(job_type)
next_processing_steps = self.processing_graph.get_children(processing_step.name)
except ProcessingStepDoesNotExist as e:
raise ValueError(f"Processing step with job type: {job_type} does not exist") from e
if len(next_processing_steps) == 0:
# no next processing step, nothing to do
return
# get the list of pending jobs for the children
# note that it can contain a lot of unrelated jobs, we will clean after
self.pending_jobs_df = Queue().get_pending_jobs_df(
dataset=self.dataset,
job_types=[next_processing_step.job_type for next_processing_step in next_processing_steps],
)
self.job_infos_to_create: list[JobInfo] = []
config_names: Optional[list[str]] = None
split_names: Optional[list[str]] = None
# filter to only get the jobs that are not already in the queue
for next_processing_step in next_processing_steps:
if processing_step.input_type == next_processing_step.input_type:
# same level, one job is expected
# D -> D, C -> C, S -> S
self.update(next_processing_step, config, split)
elif processing_step.input_type in ["config", "split"] and next_processing_step.input_type == "dataset":
# going to upper level (fan-in), one job is expected
# S -> D, C -> D
self.update(next_processing_step, None, None)
elif processing_step.input_type == "split" and next_processing_step.input_type == "config":
# going to upper level (fan-in), one job is expected
# S -> C
self.update(next_processing_step, config, None)
elif processing_step.input_type == "dataset" and next_processing_step.input_type == "config":
# going to lower level (fan-out), one job is expected per config, we need the list of configs
# D -> C
if config_names is None:
config_names = fetch_names(
dataset=self.dataset,
config=None,
cache_kinds=[
processing_step.cache_kind
for processing_step in self.processing_graph.get_dataset_config_names_processing_steps()
],
names_field="config_names",
name_field="config",
) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
for config_name in config_names:
self.update(next_processing_step, config_name, None)
elif processing_step.input_type == "config" and next_processing_step.input_type == "split":
# going to lower level (fan-out), one job is expected per split, we need the list of splits
# C -> S
if split_names is None:
split_names = fetch_names(
dataset=self.dataset,
config=config,
cache_kinds=[
processing_step.cache_kind
for processing_step in self.processing_graph.get_config_split_names_processing_steps()
],
names_field="splits",
name_field="split",
) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
for split_name in split_names:
self.update(next_processing_step, config, split_name)
else:
raise NotImplementedError(
f"Unsupported input types: {processing_step.input_type} -> {next_processing_step.input_type}"
)
# we don't support fan-out dataset-level to split-level (no need for now)
# Better keep this order: delete, then create
# Note that all the pending jobs for other revisions will be deleted
if not self.pending_jobs_df.empty:
self.add_task(DeleteJobsTask(jobs_df=self.pending_jobs_df))
if self.job_infos_to_create:
self.add_task(CreateJobsTask(job_infos=self.job_infos_to_create))
def update(
self,
next_processing_step: ProcessingStep,
config: Optional[str],
split: Optional[str],
) -> None:
# ignore unrelated jobs
config_mask = (
self.pending_jobs_df["config"].isnull() if config is None else self.pending_jobs_df["config"] == config
)
split_mask = (
self.pending_jobs_df["split"].isnull() if split is None else self.pending_jobs_df["split"] == split
)
unrelated_jobs_mask = (self.pending_jobs_df["type"] == next_processing_step.job_type) & (
(self.pending_jobs_df["dataset"] != self.dataset) | (~config_mask) | (~split_mask)
)
self.pending_jobs_df = self.pending_jobs_df[~unrelated_jobs_mask]
jobs_mask = (
(self.pending_jobs_df["type"] == next_processing_step.job_type)
& (self.pending_jobs_df["dataset"] == self.dataset)
& (config_mask)
& (split_mask)
)
ok_jobs_mask = jobs_mask & (self.pending_jobs_df["revision"] == self.revision)
if ok_jobs_mask.any():
# remove the first ok job for the list, and keep the others to delete them later
self.pending_jobs_df.drop(ok_jobs_mask.idxmax(), inplace=True)
else:
# no pending job for the current processing step
self.job_infos_to_create.append(
{
"job_id": "not used", # TODO: remove this field
"type": next_processing_step.job_type,
"params": {
"dataset": self.dataset,
"config": config,
"split": split,
"revision": self.revision,
},
"priority": self.priority,
"difficulty": next_processing_step.difficulty,
}
)
@dataclass
class DatasetBackfillPlan(Plan):
"""
Plan to backfill a dataset for a given revision.
The plan is composed of tasks to delete and create jobs.
Args:
dataset: dataset name
processing_graph: processing graph
revision: revision to backfill
cache_max_days: maximum number of days to keep the cache
error_codes_to_retry: list of error codes to retry
priority: priority of the jobs to create
only_first_processing_steps: if True, only the first processing steps are backfilled
"""
dataset: str
processing_graph: ProcessingGraph
revision: str
cache_max_days: int
error_codes_to_retry: Optional[list[str]] = None
priority: Priority = Priority.LOW
only_first_processing_steps: bool = False
pending_jobs_df: pd.DataFrame = field(init=False)
cache_entries_df: pd.DataFrame = field(init=False)
dataset_state: DatasetState = field(init=False)
cache_status: CacheStatus = field(init=False)
def __post_init__(self) -> None:
super().__post_init__()
if self.error_codes_to_retry is None:
self.error_codes_to_retry = ERROR_CODES_TO_RETRY.split(",")
with StepProfiler(
method="DatasetBackfillPlan.__post_init__",
step="all",
context=f"dataset={self.dataset}",
):
with StepProfiler(
method="DatasetBackfillPlan.__post_init__",
step="get_pending_jobs_df",
context=f"dataset={self.dataset}",
):
job_types = (
[
processing_step.job_type
for processing_step in self.processing_graph.get_first_processing_steps()
]
if self.only_first_processing_steps
else None
)
self.pending_jobs_df = Queue().get_pending_jobs_df(
dataset=self.dataset,
job_types=job_types,
)
with StepProfiler(
method="DatasetBackfillPlan.__post_init__",
step="get_cache_entries_df",
context=f"dataset={self.dataset}",
):
cache_kinds = (
[
processing_step.cache_kind
for processing_step in self.processing_graph.get_first_processing_steps()
]
if self.only_first_processing_steps
else None
)
self.cache_entries_df = get_cache_entries_df(
dataset=self.dataset,
cache_kinds=cache_kinds,
)
with StepProfiler(
method="DatasetBackfillPlan.__post_init__",
step="get_dataset_state",
context=f"dataset={self.dataset}",
):
self.dataset_state = (
FirstStepsDatasetState(
dataset=self.dataset,
processing_graph=self.processing_graph,
revision=self.revision,
pending_jobs_df=self.pending_jobs_df,
cache_entries_df=self.cache_entries_df,
error_codes_to_retry=self.error_codes_to_retry,
)
if self.only_first_processing_steps
else DatasetState(
dataset=self.dataset,
processing_graph=self.processing_graph,
revision=self.revision,
pending_jobs_df=self.pending_jobs_df,
cache_entries_df=self.cache_entries_df,
error_codes_to_retry=self.error_codes_to_retry,
)
)
with StepProfiler(
method="DatasetBackfillPlan.__post_init__",
step="_get_cache_status",
context=f"dataset={self.dataset}",
):
self.cache_status = self._get_cache_status()
with StepProfiler(
method="DatasetBackfillPlan.__post_init__",
step="_create_plan",
context=f"dataset={self.dataset}",
):
self._create_plan()
def _get_artifact_states_for_step(
self, processing_step: ProcessingStep, config: Optional[str] = None, split: Optional[str] = None
) -> list[ArtifactState]:
"""Get the artifact states for a step.
Args:
processing_step (ProcessingStep): the processing step
config (str, optional): if not None, and step input type is config or split, only return the artifact
states for this config
split (str, optional): if not None, and step input type is split, only return the artifact states for
this split (config must be specified)
Returns:
the artifact states for the step
"""
if processing_step.input_type == "dataset":
artifact_states = [self.dataset_state.artifact_state_by_step[processing_step.name]]
elif processing_step.input_type == "config":
if config is None:
artifact_states = [
config_state.artifact_state_by_step[processing_step.name]
for config_state in self.dataset_state.config_states
]
else:
artifact_states = [
config_state.artifact_state_by_step[processing_step.name]
for config_state in self.dataset_state.config_states
if config_state.config == config
]
elif processing_step.input_type == "split":
if config is None:
artifact_states = [
split_state.artifact_state_by_step[processing_step.name]
for config_state in self.dataset_state.config_states
for split_state in config_state.split_states
]
elif split is None:
artifact_states = [
split_state.artifact_state_by_step[processing_step.name]
for config_state in self.dataset_state.config_states
if config_state.config == config
for split_state in config_state.split_states
]
else:
artifact_states = [
split_state.artifact_state_by_step[processing_step.name]
for config_state in self.dataset_state.config_states
if config_state.config == config
for split_state in config_state.split_states
if split_state.split == split
]
else:
raise ValueError(f"Invalid input type: {processing_step.input_type}")
artifact_states_ids = {artifact_state.id for artifact_state in artifact_states}
if len(artifact_states_ids) != len(artifact_states):
raise ValueError(f"Duplicate artifact states for processing_step {processing_step}")
return artifact_states
def _get_cache_status(self) -> CacheStatus:
cache_status = CacheStatus()
processing_steps = (
self.processing_graph.get_first_processing_steps()
if self.only_first_processing_steps
else self.processing_graph.get_topologically_ordered_processing_steps()
)
for processing_step in processing_steps:
# Every step can have one or multiple artifacts, for example config-level steps have one artifact per
# config
artifact_states = self._get_artifact_states_for_step(processing_step)
for artifact_state in artifact_states:
# is an old entry?
if artifact_state.cache_state.is_old(days=self.cache_max_days):
cache_status.cache_is_old[artifact_state.id] = artifact_state
continue
# any of the parents is more recent?
if any(
artifact_state.cache_state.is_older_than(parent_artifact_state.cache_state)
for parent_step in self.processing_graph.get_parents(processing_step.name)
for parent_artifact_state in self._get_artifact_states_for_step(
processing_step=parent_step,
config=artifact_state.config,
split=artifact_state.split,
)
):
cache_status.cache_is_outdated_by_parent[artifact_state.id] = artifact_state
continue
# is empty?
if artifact_state.cache_state.is_empty():
cache_status.cache_is_empty[artifact_state.id] = artifact_state
continue
# is an error that can be retried?
if artifact_state.cache_state.is_error_to_retry():
cache_status.cache_is_error_to_retry[artifact_state.id] = artifact_state
continue
# was created with an obsolete version of the job runner?
if artifact_state.cache_state.is_job_runner_obsolete():
cache_status.cache_is_job_runner_obsolete[artifact_state.id] = artifact_state
continue
# has a different git revision from the dataset current revision?
if artifact_state.cache_state.is_git_revision_different_from(self.revision):
cache_status.cache_has_different_git_revision[artifact_state.id] = artifact_state
continue
# ok
cache_status.up_to_date[artifact_state.id] = artifact_state
return cache_status
def get_queue_status(self) -> QueueStatus:
processing_steps = (
self.processing_graph.get_first_processing_steps()
if self.only_first_processing_steps
else self.processing_graph.get_topologically_ordered_processing_steps()
)
return QueueStatus(
in_process={
artifact_state.id
for processing_step in processing_steps
for artifact_state in self._get_artifact_states_for_step(processing_step)
if artifact_state.job_state.is_in_process
}
)
def _create_plan(self) -> None:
pending_jobs_to_delete_df = self.pending_jobs_df.copy()
job_infos_to_create: list[JobInfo] = []
artifact_states = (
list(self.cache_status.cache_is_empty.values())
+ list(self.cache_status.cache_is_error_to_retry.values())
+ list(self.cache_status.cache_is_old.values())
+ list(self.cache_status.cache_is_outdated_by_parent.values())
+ list(self.cache_status.cache_is_job_runner_obsolete.values())
+ list(self.cache_status.cache_has_different_git_revision.values())
)
for artifact_state in artifact_states:
valid_pending_jobs_df = artifact_state.job_state.valid_pending_jobs_df
if valid_pending_jobs_df.empty:
job_infos_to_create.append(
{
"job_id": "not used",
"type": artifact_state.processing_step.job_type,
"params": {
"dataset": self.dataset,
"revision": self.revision,
"config": artifact_state.config,
"split": artifact_state.split,
},
"priority": self.priority,
"difficulty": artifact_state.processing_step.difficulty,
}
)
else:
pending_jobs_to_delete_df.drop(valid_pending_jobs_df.index, inplace=True)
# Better keep this order: delete, then create
# Note that all the pending jobs for other revisions will be deleted
if not pending_jobs_to_delete_df.empty:
self.add_task(DeleteJobsTask(jobs_df=pending_jobs_to_delete_df))
if job_infos_to_create:
self.add_task(CreateJobsTask(job_infos=job_infos_to_create))
@dataclass
class DatasetOrchestrator:
dataset: str
processing_graph: ProcessingGraph
def set_revision(
self, revision: str, priority: Priority, error_codes_to_retry: list[str], cache_max_days: int
) -> None:
"""
Set the current revision of the dataset.
If the revision is already set to the same value, this is a no-op. Else: one job is created for every first
step.
Args:
revision (str): The new revision of the dataset.
priority (Priority): The priority of the jobs to create.
error_codes_to_retry (list[str]): The error codes for which the jobs should be retried.
cache_max_days (int): The maximum number of days for which the cache is considered valid.
Returns:
None
Raises:
ValueError: If the first processing steps are not dataset steps, or if the processing graph has no first
step.
"""
first_processing_steps = self.processing_graph.get_first_processing_steps()
if len(first_processing_steps) < 1:
raise ValueError("Processing graph has no first step")
if any(first_processing_step.input_type != "dataset" for first_processing_step in first_processing_steps):
raise ValueError("One of the first processing steps is not a dataset step")
with StepProfiler(
method="DatasetOrchestrator.set_revision",
step="all",
context=f"dataset={self.dataset}",
):
logging.info(f"Analyzing {self.dataset}")
with StepProfiler(
method="DatasetOrchestrator.set_revision",
step="plan",
context=f"dataset={self.dataset}",
):
plan = DatasetBackfillPlan(
dataset=self.dataset,
revision=revision,
priority=priority,
processing_graph=self.processing_graph,
error_codes_to_retry=error_codes_to_retry,
only_first_processing_steps=True,
cache_max_days=cache_max_days,
)
logging.info(f"Setting new revision to {self.dataset}")
with StepProfiler(
method="DatasetOrchestrator.set_revision",
step="run",
context=f"dataset={self.dataset}",
):
plan.run()
def finish_job(self, job_result: JobResult) -> None:
"""
Finish a job.
It will finish the job, store the result in the cache, and trigger the next steps.
Args:
job_result (JobResult): The result of the job.
Returns:
None
Raises:
ValueError: If the job is not found, or if the processing step is not found.
"""
# check if the job is still in started status
job_info = job_result["job_info"]
if not Queue().is_job_started(job_id=job_info["job_id"]):
logging.debug("the job was cancelled, don't update the cache")
return
# if the job could not provide an output, finish it and return
if not job_result["output"]:
Queue().finish_job(job_id=job_info["job_id"], is_success=False)
logging.debug("the job raised an exception, don't update the cache")
return
# update the cache
output = job_result["output"]
params = job_info["params"]
try:
processing_step = self.processing_graph.get_processing_step_by_job_type(job_info["type"])
except ProcessingStepDoesNotExist as e:
raise ValueError(f"Processing step for job type {job_info['type']} does not exist") from e
upsert_response_params(
# inputs
kind=processing_step.cache_kind,
job_params=params,
job_runner_version=job_result["job_runner_version"],
# output
content=output["content"],
http_status=output["http_status"],
error_code=output["error_code"],
details=output["details"],
progress=output["progress"],
)
logging.debug("the job output has been written to the cache.")
# finish the job
Queue().finish_job(job_id=job_info["job_id"], is_success=job_result["is_success"])
logging.debug("the job has been finished.")
# trigger the next steps
plan = AfterJobPlan(job_info=job_info, processing_graph=self.processing_graph)
plan.run()
logging.debug("jobs have been created for the next steps.")
def has_some_cache(self) -> bool:
"""
Check if the cache has some entries for the dataset.
Returns:
bool: True if the cache has some entries for the dataset, False otherwise.
"""
return has_some_cache(dataset=self.dataset)
def has_pending_ancestor_jobs(self, processing_step_names: list[str]) -> bool:
"""
Check if the processing steps, or one of their ancestors, have a pending job, ie. if artifacts could exist
in the cache in the future. This method is used when a cache entry is missing in the API,
to return a:
- 404 error, saying that the artifact does not exist,
- or a 500 error, saying that the artifact could be available soon (retry).
It is implemented by checking if a job exists for the artifacts or one of their ancestors.
Note that, if dataset-config-names' job is pending, we cannot know if the config is valid or not, so we
consider that the artifact could exist.
Args:
processing_step_names (list[str]): The processing step names (artifacts) to check.
Returns:
bool: True if any of the artifact could exist, False otherwise.
Raises:
ValueError: If any of the processing step does not exist.
"""
job_types: set[str] = set()
for processing_step_name in processing_step_names:
try:
processing_step = self.processing_graph.get_processing_step(processing_step_name)
except ProcessingStepDoesNotExist as e:
raise ValueError(f"Processing step {processing_step_name} does not exist") from e
ancestors = self.processing_graph.get_ancestors(processing_step_name)
job_types.add(processing_step.job_type)
job_types.update(ancestor.job_type for ancestor in ancestors)
# check if a pending job exists for the artifact or one of its ancestors
# note that we cannot know if the ancestor is really for the artifact (ie: ancestor is for config1,
# while we look for config2,split1). Looking in this detail would be too complex, this approximation
# is good enough.
return Queue().has_pending_jobs(dataset=self.dataset, job_types=list(job_types))
def backfill(
self, revision: str, priority: Priority, cache_max_days: int, error_codes_to_retry: Optional[list[str]] = None
) -> int:
"""
Backfill the cache for a given revision.
Args:
revision (str): The revision.
priority (Priority): The priority of the jobs.
cache_max_days (int): The maximum number of days to keep the cache.
error_codes_to_retry (Optional[list[str]]): The error codes for which the jobs should be retried.
Returns:
int: The number of jobs created.
"""
with StepProfiler(
method="DatasetOrchestrator.backfill",
step="all",
context=f"dataset={self.dataset}",
):
logging.info(f"Analyzing {self.dataset}")
with StepProfiler(
method="DatasetOrchestrator.backfill",
step="plan",
context=f"dataset={self.dataset}",
):
plan = DatasetBackfillPlan(
dataset=self.dataset,
revision=revision,
priority=priority,
processing_graph=self.processing_graph,
error_codes_to_retry=error_codes_to_retry,
only_first_processing_steps=False,
cache_max_days=cache_max_days,
)
logging.info(f"Analyzing {self.dataset}")
with StepProfiler(
method="DatasetOrchestrator.backfill",
step="run",
context=f"dataset={self.dataset}",
):
return plan.run()
| datasets-server-main | libs/libcommon/src/libcommon/orchestrator.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
import shutil
from os import PathLike, makedirs
from pathlib import Path
from typing import Optional, Union
from appdirs import user_cache_dir # type:ignore
from libcommon.constants import (
ASSETS_CACHE_APPNAME,
CACHED_ASSETS_CACHE_APPNAME,
DESCRIPTIVE_STATISTICS_CACHE_APPNAME,
DUCKDB_INDEX_CACHE_APPNAME,
HF_DATASETS_CACHE_APPNAME,
PARQUET_METADATA_CACHE_APPNAME,
)
StrPath = Union[str, PathLike[str]]
def init_dir(directory: Optional[StrPath] = None, appname: Optional[str] = None) -> StrPath:
"""Initialize a directory.
If directory is None, it will be set to the default cache location on the machine (using appname as a key, if
not None).
Args:
directory (Optional[Union[str, PathLike[str]]], optional): The directory to initialize. Defaults to None.
appname (Optional[str], optional): The name of the application. Used if `directory`is None. Defaults to None.
Returns:
Union[str, PathLike[str]]: The directory.
"""
if directory is None:
directory = user_cache_dir(appname=appname)
logging.debug(f"Directory defaulting to user-specific cache: {directory}")
makedirs(directory, exist_ok=True)
logging.debug(f"Directory created at: {directory}")
return directory
def init_assets_dir(directory: Optional[StrPath] = None) -> StrPath:
"""Initialize the assets directory.
If directory is None, it will be set to the default cache location on the machine.
Args:
directory (Optional[Union[str, PathLike[str]]], optional): The directory to initialize. Defaults to None.
Returns:
Union[str, PathLike[str]]: The directory.
"""
return init_dir(directory, appname=ASSETS_CACHE_APPNAME)
def init_cached_assets_dir(directory: Optional[StrPath] = None) -> StrPath:
"""Initialize the cached assets directory.
If directory is None, it will be set to the default cache location on the machine.
Args:
directory (Optional[Union[str, PathLike[str]]], optional): The directory to initialize. Defaults to None.
Returns:
Union[str, PathLike[str]]: The directory.
"""
return init_dir(directory, appname=CACHED_ASSETS_CACHE_APPNAME)
def init_parquet_metadata_dir(directory: Optional[StrPath] = None) -> StrPath:
"""Initialize the parquet metadata directory.
If directory is None, it will be set to the default cache location on the machine.
Args:
directory (Optional[Union[str, PathLike[str]]], optional): The directory to initialize. Defaults to None.
Returns:
Union[str, PathLike[str]]: The directory.
"""
return init_dir(directory, appname=PARQUET_METADATA_CACHE_APPNAME)
def init_duckdb_index_cache_dir(directory: Optional[StrPath] = None) -> StrPath:
"""Initialize the duckdb index directory.
If directory is None, it will be set to the default duckdb index location on the machine.
Args:
directory (Optional[Union[str, PathLike[str]]], optional): The directory to initialize. Defaults to None.
Returns:
Union[str, PathLike[str]]: The directory.
"""
return init_dir(directory, appname=DUCKDB_INDEX_CACHE_APPNAME)
def init_hf_datasets_cache_dir(directory: Optional[StrPath] = None) -> StrPath:
"""Initialize the cache directory for the datasets library.
If directory is None, it will be set to the default cache location on the machine.
Args:
directory (Optional[Union[str, PathLike[str]]], optional): The directory to initialize. Defaults to None.
Returns:
Union[str, PathLike[str]]: The directory.
"""
return init_dir(directory, appname=HF_DATASETS_CACHE_APPNAME)
def init_statistics_cache_dir(directory: Optional[StrPath] = None) -> StrPath:
"""Initialize the cache directory for storage of a dataset in parquet format for statistics computations.
If directory is None, it will be set to the default cache location on the machine.
Args:
directory (Optional[Union[str, PathLike[str]]], optional): The directory to initialize. Defaults to None.
Returns:
Union[str, PathLike[str]]: The directory.
"""
return init_dir(directory, appname=DESCRIPTIVE_STATISTICS_CACHE_APPNAME)
def exists(path: StrPath) -> bool:
"""Check if a path exists.
Args:
path (Union[str, PathLike[str]]): The path to check.
Returns:
bool: True if the path exists, False otherwise.
"""
return Path(path).exists()
def remove_dir(directory: StrPath) -> None:
"""Remove a directory.
If the directory does not exist, don't raise.
Args:
directory (Union[str, PathLike[str]]): The directory to remove.
"""
shutil.rmtree(directory, ignore_errors=True)
logging.debug(f"Directory removed: {directory}")
| datasets-server-main | libs/libcommon/src/libcommon/storage.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
import sys
import traceback
from http import HTTPStatus
from typing import Literal, Optional, TypedDict, Union
class ErrorResponseWithoutCause(TypedDict):
error: str
class ErrorResponseWithCause(ErrorResponseWithoutCause, total=False):
cause_exception: str
cause_message: str
cause_traceback: list[str]
ErrorResponse = Union[ErrorResponseWithoutCause, ErrorResponseWithCause]
class LoggedError(Exception):
def __init__(self, message: str):
self.message = message
logging.debug(self.message)
super().__init__(self.message)
class CustomError(LoggedError):
"""Base class for exceptions in this module."""
def __init__(
self,
message: str,
status_code: HTTPStatus,
code: str,
cause: Optional[BaseException] = None,
disclose_cause: Optional[bool] = None,
):
super().__init__(message)
self.exception = type(self).__name__
self.status_code = status_code
self.code = code
self.message = str(self)
self.disclose_cause = disclose_cause if disclose_cause is not None else cause is not None
if cause is not None:
self.cause_exception: Optional[str] = type(cause).__name__
self.cause_message: Optional[str] = str(cause)
(t, v, tb) = sys.exc_info()
self.cause_traceback: Optional[list[str]] = traceback.format_exception(t, v, tb)
else:
self.cause_exception = None
self.cause_message = None
self.cause_traceback = None
def as_response_with_cause(self) -> ErrorResponseWithCause:
error: ErrorResponseWithCause = {"error": self.message}
if self.cause_exception is not None:
error["cause_exception"] = self.cause_exception
if self.cause_message is not None:
error["cause_message"] = self.cause_message
if self.cause_traceback is not None:
error["cause_traceback"] = self.cause_traceback
return error
def as_response_without_cause(self) -> ErrorResponseWithoutCause:
return {"error": self.message}
def as_response(self) -> ErrorResponse:
return self.as_response_with_cause() if self.disclose_cause else self.as_response_without_cause()
CacheableErrorCode = Literal[
"CacheDirectoryNotInitializedError",
"ComputationError",
"ConfigNamesError",
"ConfigNotFoundError",
"CreateCommitError",
"DatasetInBlockListError",
"DatasetInfoHubRequestError",
"DatasetManualDownloadError",
"DatasetModuleNotInstalledError",
"DatasetNotFoundError",
"DatasetRevisionEmptyError",
"DatasetRevisionNotFoundError",
"DatasetScriptError",
"DatasetWithTooManyConfigsError",
"DatasetWithTooManyParquetFilesError",
"DisabledViewerError",
"DiskError",
"DuckDBIndexFileNotFoundError",
"EmptyDatasetError",
"ExternalFilesSizeRequestConnectionError",
"ExternalFilesSizeRequestError",
"ExternalFilesSizeRequestHTTPError",
"ExternalFilesSizeRequestTimeoutError",
"ExternalServerError",
"FeaturesError",
"FileSystemError",
"InfoError",
"JobManagerCrashedError",
"JobManagerExceededMaximumDurationError",
"LockedDatasetTimeoutError",
"MissingSpawningTokenError",
"NoIndexableColumnsError",
"NoSupportedFeaturesError",
"NormalRowsError",
"ParameterMissingError",
"ParquetResponseEmptyError",
"PreviousStepFormatError",
"PreviousStepStatusError",
"ResponseAlreadyComputedError",
"RowsPostProcessingError",
"SplitsNamesError",
"SplitNamesFromStreamingError",
"SplitNotFoundError",
"SplitWithTooBigParquetError",
"StreamingRowsError",
"TooBigContentError",
"TooManyColumnsError",
"UnexpectedError",
"UnsupportedExternalFilesError",
]
class CacheableError(CustomError):
"""Base class for exceptions that can be cached in the database."""
def __init__(
self,
message: str,
status_code: HTTPStatus,
code: CacheableErrorCode,
cause: Optional[BaseException] = None,
disclose_cause: bool = False,
):
super().__init__(
message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
)
class CacheDirectoryNotInitializedError(CacheableError):
"""The cache directory has not been initialized before job compute."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "CacheDirectoryNotInitializedError", cause, True)
class ConfigNamesError(CacheableError):
"""The config names could not be fetched."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ConfigNamesError", cause, True)
class ConfigNotFoundError(CacheableError):
"""The config does not exist."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message=message,
status_code=HTTPStatus.NOT_FOUND,
code="ConfigNotFoundError",
cause=cause,
disclose_cause=False,
)
class CreateCommitError(CacheableError):
"""A commit could not be created on the Hub."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "CreateCommitError", cause, False)
class DatasetInBlockListError(CacheableError):
"""The dataset is in the list of blocked datasets."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetInBlockListError", cause, False)
class DatasetInfoHubRequestError(CacheableError):
"""The request to the Hub's dataset-info endpoint times out."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message=message,
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
code="DatasetInfoHubRequestError",
cause=cause,
disclose_cause=False,
)
class DatasetManualDownloadError(CacheableError):
"""The dataset requires manual download."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "DatasetManualDownloadError", cause, True)
class DatasetModuleNotInstalledError(CacheableError):
"""The dataset tries to import a module that is not installed."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "DatasetModuleNotInstalledError", cause, True)
class DatasetNotFoundError(CacheableError):
"""The dataset does not exist."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message=message,
status_code=HTTPStatus.NOT_FOUND,
code="DatasetNotFoundError",
cause=cause,
disclose_cause=False,
)
class DatasetRevisionEmptyError(CacheableError):
"""The current git revision (branch, commit) could not be obtained."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "DatasetRevisionEmptyError", cause, False)
class DatasetRevisionNotFoundError(CacheableError):
"""The revision of a dataset repository does not exist."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_FOUND, "DatasetRevisionNotFoundError", cause, False)
class DatasetScriptError(CacheableError):
"""The dataset script generated an error."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetScriptError", cause, False)
class DatasetWithTooManyConfigsError(CacheableError):
"""The number of configs of a dataset exceeded the limit."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetWithTooManyConfigsError", cause, True)
class DatasetWithTooManyParquetFilesError(CacheableError):
"""The number of parquet files of a dataset is too big."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetWithTooManyParquetFilesError", cause, True)
class DuckDBIndexFileNotFoundError(CacheableError):
"""No duckdb index file was found for split."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "DuckDBIndexFileNotFoundError", cause, False)
class DisabledViewerError(CacheableError):
"""The dataset viewer is disabled."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message=message,
status_code=HTTPStatus.NOT_FOUND,
code="DisabledViewerError",
cause=cause,
disclose_cause=False,
)
class DiskError(CacheableError):
"""Disk-related issues, for example, incorrect permissions."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message=message,
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
code="DiskError",
cause=cause,
disclose_cause=False,
)
class EmptyDatasetError(CacheableError):
"""The dataset has no data."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "EmptyDatasetError", cause, True)
class ExternalFilesSizeRequestConnectionError(CacheableError):
"""We failed to get the size of the external files."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "ExternalFilesSizeRequestConnectionError", cause, True)
class ExternalFilesSizeRequestError(CacheableError):
"""We failed to get the size of the external files."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "ExternalFilesSizeRequestError", cause, True)
class ExternalFilesSizeRequestHTTPError(CacheableError):
"""We failed to get the size of the external files."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "ExternalFilesSizeRequestHTTPError", cause, True)
class ExternalFilesSizeRequestTimeoutError(CacheableError):
"""We failed to get the size of the external files."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "ExternalFilesSizeRequestTimeoutError", cause, True)
class ExternalServerError(CacheableError):
"""The spawning.ai server is not responding."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ExternalServerError", cause, False)
class FeaturesError(CacheableError):
"""The features could not be fetched."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "FeaturesError", cause, True)
class FileSystemError(CacheableError):
"""An error happen reading from File System."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "FileSystemError", cause, False)
class InfoError(CacheableError):
"""The info could not be fetched."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "InfoError", cause, True)
class JobManagerCrashedError(CacheableError):
"""The job runner crashed and the job became a zombie."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message=message,
status_code=HTTPStatus.NOT_IMPLEMENTED,
code="JobManagerCrashedError",
cause=cause,
disclose_cause=False,
)
class JobManagerExceededMaximumDurationError(CacheableError):
"""The job runner was killed because the job exceeded the maximum duration."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message=message,
status_code=HTTPStatus.NOT_IMPLEMENTED,
code="JobManagerExceededMaximumDurationError",
cause=cause,
disclose_cause=False,
)
class LockedDatasetTimeoutError(CacheableError):
"""A dataset is locked by another job."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "LockedDatasetTimeoutError", cause, True)
class MissingSpawningTokenError(CacheableError):
"""The spawning.ai token is not set."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "MissingSpawningTokenError", cause, False)
class NormalRowsError(CacheableError):
"""The rows could not be fetched in normal mode."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "NormalRowsError", cause, True)
class NoIndexableColumnsError(CacheableError):
"""The split does not have string columns to index."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "NoIndexableColumnsError", cause, True)
class NoSupportedFeaturesError(CacheableError):
"""The dataset does not have any features which types are supported by a worker's processing pipeline."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "NoSupportedFeaturesError", cause, True)
class ParameterMissingError(CacheableError):
"""The request is missing some parameter."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message=message,
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
code="ParameterMissingError",
cause=cause,
disclose_cause=False,
)
class ParquetResponseEmptyError(CacheableError):
"""No parquet files were found for split."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ParquetResponseEmptyError", cause, False)
class PreviousStepFormatError(CacheableError):
"""The content of the previous step has not the expected format."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
class PreviousStepStatusError(CacheableError):
"""The previous step gave an error. The job should not have been created."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepStatusError", cause, False)
class ResponseAlreadyComputedError(CacheableError):
"""The response has been already computed by another job runner."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message=message,
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
code="ResponseAlreadyComputedError",
cause=cause,
disclose_cause=True,
)
class RowsPostProcessingError(CacheableError):
"""The rows could not be post-processed successfully."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "RowsPostProcessingError", cause, False)
class SplitsNamesError(CacheableError):
"""The split names could not be fetched."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "SplitsNamesError", cause, True)
class SplitNamesFromStreamingError(CacheableError):
"""The split names could not be fetched."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "SplitNamesFromStreamingError", cause, True)
class SplitNotFoundError(CacheableError):
"""The split does not exist."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message=message,
status_code=HTTPStatus.NOT_FOUND,
code="SplitNotFoundError",
cause=cause,
disclose_cause=False,
)
class SplitWithTooBigParquetError(CacheableError):
"""The split parquet size (sum of parquet sizes given) is too big."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "SplitWithTooBigParquetError", cause, False)
class StatisticsComputationError(CacheableError):
"""An unexpected behavior or error occurred during statistics computations."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ComputationError", cause, True)
class StreamingRowsError(CacheableError):
"""The rows could not be fetched in streaming mode."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "StreamingRowsError", cause, True)
class TooBigContentError(CacheableError):
"""The content size in bytes is bigger than the supported value."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message=message,
status_code=HTTPStatus.NOT_IMPLEMENTED,
code="TooBigContentError",
cause=cause,
disclose_cause=False,
)
class TooManyColumnsError(CacheableError):
"""The dataset exceeded the max number of columns."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "TooManyColumnsError", cause, True)
class UnexpectedError(CacheableError):
"""The job runner raised an unexpected error."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message=message,
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
code="UnexpectedError",
cause=cause,
disclose_cause=False,
)
logging.error(message, exc_info=cause)
class UnsupportedExternalFilesError(CacheableError):
"""We failed to get the size of the external files."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "UnsupportedExternalFilesError", cause, True)
| datasets-server-main | libs/libcommon/src/libcommon/exceptions.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from dataclasses import dataclass, field
from types import TracebackType
from typing import Any, Optional, TypeVar
from mongoengine.connection import ConnectionFailure, connect, disconnect
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError
from libcommon.constants import (
CACHE_MONGOENGINE_ALIAS,
METRICS_MONGOENGINE_ALIAS,
QUEUE_MONGOENGINE_ALIAS,
)
T = TypeVar("T", bound="Resource")
@dataclass
class Resource:
"""
A resource that can be allocated and released.
The method allocate() is called when the resource is created.
The method release() allows to free the resource.
It can be used as a context manager, in which case the resource is released when the context is exited.
Example:
>>> with Resource() as resource:
... pass
Resources should be inherited from this class and must implement the allocate(), check() and release() methods.
"""
def __post_init__(self) -> None:
self.allocate()
def __enter__(self: T) -> T:
return self
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.release()
def allocate(self) -> None:
pass
def release(self) -> None:
pass
class MongoConnectionFailure(Exception):
pass
@dataclass
class MongoResource(Resource):
"""
A base resource that represents a connection to a database.
The method is_available() allows to check if the resource is available. It's not called automatically.
Args:
database (:obj:`str`): The name of the mongo database.
host (:obj:`str`): The host of the mongo database. It must start with ``mongodb://`` or ``mongodb+srv://``.
mongoengine_alias (:obj:`str`): The alias of the connection in mongoengine.
server_selection_timeout_ms (:obj:`int`, `optional`, defaults to 30_000): The timeout in milliseconds for
server selection.
"""
database: str
host: str
mongoengine_alias: str
server_selection_timeout_ms: int = 30_000
_client: MongoClient = field(init=False)
def allocate(self) -> None:
try:
self._client = connect(
db=self.database,
host=self.host,
alias=self.mongoengine_alias,
serverSelectionTimeoutMS=self.server_selection_timeout_ms,
)
except ConnectionFailure as e:
raise MongoConnectionFailure(f"Failed to connect to MongoDB: {e}") from e
def is_available(self) -> bool:
"""Check if the connection is available."""
try:
self._client.is_mongos
return True
except ServerSelectionTimeoutError:
return False
def create_collection(self, document: Any) -> None:
document.ensure_indexes()
def enable_pre_and_post_images(self, collection_name: str) -> None:
self._client[self.database].command(
"collMod", collection_name, changeStreamPreAndPostImages={"enabled": True}
) # type: ignore
def release(self) -> None:
disconnect(alias=self.mongoengine_alias)
def __reduce__(self) -> tuple[Any, ...]:
# Needed to be able to use the resource in subprocesses in tests (e.g. tests/test_queue.py::test_lock).
# This is because the _client in not picklable.
return (MongoResource, (self.database, self.host, self.mongoengine_alias, self.server_selection_timeout_ms))
@dataclass
class CacheMongoResource(MongoResource):
"""
A resource that represents a connection to the cache mongo database.
Args:
database (:obj:`str`): The name of the mongo database.
host (:obj:`str`): The host of the mongo database. It must start with ``mongodb://`` or ``mongodb+srv://``.
"""
mongoengine_alias: str = field(default=CACHE_MONGOENGINE_ALIAS, init=False)
@dataclass
class QueueMongoResource(MongoResource):
"""
A resource that represents a connection to the queue mongo database.
Args:
database (:obj:`str`): The name of the mongo database.
host (:obj:`str`): The host of the mongo database. It must start with ``mongodb://`` or ``mongodb+srv://``.
"""
mongoengine_alias: str = field(default=QUEUE_MONGOENGINE_ALIAS, init=False)
@dataclass
class MetricsMongoResource(MongoResource):
"""
A resource that represents a connection to the metrics mongo database.
Args:
database (:obj:`str`): The name of the mongo database.
host (:obj:`str`): The host of the mongo database. It must start with ``mongodb://`` or ``mongodb+srv://``.
"""
mongoengine_alias: str = field(default=METRICS_MONGOENGINE_ALIAS, init=False)
| datasets-server-main | libs/libcommon/src/libcommon/resources.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import types
from collections.abc import Mapping
from dataclasses import dataclass
from datetime import datetime
from http import HTTPStatus
from typing import Any, Generic, NamedTuple, Optional, TypedDict, TypeVar, overload
import pandas as pd
from bson import ObjectId
from bson.errors import InvalidId
from mongoengine import Document
from mongoengine.errors import DoesNotExist
from mongoengine.fields import (
DateTimeField,
DictField,
EnumField,
FloatField,
IntField,
ObjectIdField,
StringField,
)
from mongoengine.queryset.queryset import QuerySet
from libcommon.constants import (
CACHE_COLLECTION_RESPONSES,
CACHE_METRICS_COLLECTION,
CACHE_MONGOENGINE_ALIAS,
)
from libcommon.utils import JobParams, get_datetime
# START monkey patching ### hack ###
# see https://github.com/sbdchd/mongo-types#install
U = TypeVar("U", bound=Document)
def no_op(self, _): # type: ignore
return self
QuerySet.__class_getitem__ = types.MethodType(no_op, QuerySet)
class QuerySetManager(Generic[U]):
def __get__(self, instance: object, cls: type[U]) -> QuerySet[U]:
return QuerySet(cls, cls._get_collection())
# END monkey patching ### hack ###
class SplitFullName(NamedTuple):
"""A split full name is a tuple of (dataset, config, split)."""
dataset: str
config: Optional[str]
split: Optional[str]
# cache of any job
class CachedResponseDocument(Document):
"""A response computed for a job, cached in the mongoDB database
Args:
kind (`str`): The kind of the cached response, identifies the job type
dataset (`str`): The requested dataset.
config (`str`, optional): The requested config, if any.
split (`str`, optional): The requested split, if any.
http_status (`HTTPStatus`): The HTTP status code.
error_code (`str`, optional): The error code, if any.
content (`dict`): The content of the cached response. Can be an error or a valid content.
details (`dict`, optional): Additional details, eg. a detailed error that we don't want to send as a response.
updated_at (`datetime`): When the cache entry has been last updated.
job_runner_version (`int`): The version of the job runner that cached the response.
dataset_git_revision (`str`): The commit (of the git dataset repo) used to generate the response.
progress (`float`): Progress percentage (between 0. and 1.) if the result is not complete yet.
"""
id = ObjectIdField(db_field="_id", primary_key=True, default=ObjectId)
kind = StringField(required=True, unique_with=["dataset", "config", "split"])
dataset = StringField(required=True)
config = StringField()
split = StringField()
http_status = EnumField(HTTPStatus, required=True)
error_code = StringField()
content = DictField(required=True)
dataset_git_revision = StringField()
progress = FloatField(min_value=0.0, max_value=1.0)
job_runner_version = IntField()
details = DictField()
updated_at = DateTimeField(default=get_datetime)
meta = {
"collection": CACHE_COLLECTION_RESPONSES,
"db_alias": CACHE_MONGOENGINE_ALIAS,
"indexes": [
("kind", "dataset", "config", "split"),
("dataset", "kind", "http_status"),
("kind", "http_status", "error_code"),
("kind", "http_status", "_id"),
],
}
objects = QuerySetManager["CachedResponseDocument"]()
DEFAULT_INCREASE_AMOUNT = 1
DEFAULT_DECREASE_AMOUNT = -1
class CacheTotalMetricDocument(Document):
"""Cache total metric in the mongoDB database, used to compute prometheus metrics.
Args:
kind (`str`): kind name
http_status (`int`): cache http_status
error_code (`str`): error code name
total (`int`): total of jobs
created_at (`datetime`): when the metric has been created.
"""
id = ObjectIdField(db_field="_id", primary_key=True, default=ObjectId)
kind = StringField(required=True)
http_status = IntField(required=True)
error_code = StringField()
total = IntField(required=True, default=0)
created_at = DateTimeField(default=get_datetime)
meta = {
"collection": CACHE_METRICS_COLLECTION,
"db_alias": CACHE_MONGOENGINE_ALIAS,
"indexes": [
{
"fields": ["kind", "http_status", "error_code"],
"unique": True,
}
],
}
objects = QuerySetManager["CacheTotalMetricDocument"]()
# Fix issue with mongoengine: https://github.com/MongoEngine/mongoengine/issues/1242#issuecomment-810501601
# mongoengine automatically sets "config" and "splits" as required fields, because they are listed in the unique_with
# field of the "kind" field. But it's an error, since unique indexes (which are used to enforce unique_with) accept
# null values, see https://www.mongodb.com/docs/v5.0/core/index-unique/#unique-index-and-missing-field.
CachedResponseDocument.config.required = False # type: ignore
CachedResponseDocument.split.required = False # type: ignore
class CacheEntryDoesNotExistError(DoesNotExist):
pass
def _update_metrics(kind: str, http_status: HTTPStatus, increase_by: int, error_code: Optional[str] = None) -> None:
CacheTotalMetricDocument.objects(kind=kind, http_status=http_status, error_code=error_code).upsert_one(
inc__total=increase_by
)
def increase_metric(kind: str, http_status: HTTPStatus, error_code: Optional[str] = None) -> None:
_update_metrics(kind=kind, http_status=http_status, error_code=error_code, increase_by=DEFAULT_INCREASE_AMOUNT)
def decrease_metric(kind: str, http_status: HTTPStatus, error_code: Optional[str] = None) -> None:
_update_metrics(kind=kind, http_status=http_status, error_code=error_code, increase_by=DEFAULT_DECREASE_AMOUNT)
def decrease_metric_for_artifact(kind: str, dataset: str, config: Optional[str], split: Optional[str]) -> None:
try:
existing_cache = CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split).get()
except DoesNotExist:
return
decrease_metric(kind=kind, http_status=existing_cache.http_status, error_code=existing_cache.error_code)
# Note: we let the exceptions throw (ie DocumentTooLarge): it's the responsibility of the caller to manage them
def upsert_response(
kind: str,
dataset: str,
content: Mapping[str, Any],
http_status: HTTPStatus,
config: Optional[str] = None,
split: Optional[str] = None,
error_code: Optional[str] = None,
details: Optional[Mapping[str, Any]] = None,
job_runner_version: Optional[int] = None,
dataset_git_revision: Optional[str] = None,
progress: Optional[float] = None,
updated_at: Optional[datetime] = None,
) -> None:
decrease_metric_for_artifact(kind=kind, dataset=dataset, config=config, split=split)
CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split).upsert_one(
content=content,
http_status=http_status,
error_code=error_code,
details=details,
dataset_git_revision=dataset_git_revision,
progress=progress,
updated_at=updated_at or get_datetime(),
job_runner_version=job_runner_version,
)
increase_metric(kind=kind, http_status=http_status, error_code=error_code)
def upsert_response_params(
kind: str,
job_params: JobParams,
content: Mapping[str, Any],
http_status: HTTPStatus,
error_code: Optional[str] = None,
details: Optional[Mapping[str, Any]] = None,
job_runner_version: Optional[int] = None,
progress: Optional[float] = None,
updated_at: Optional[datetime] = None,
) -> None:
upsert_response(
kind=kind,
dataset=job_params["dataset"],
config=job_params["config"],
split=job_params["split"],
content=content,
dataset_git_revision=job_params["revision"],
details=details,
error_code=error_code,
http_status=http_status,
job_runner_version=job_runner_version,
progress=progress,
updated_at=updated_at,
)
def delete_response(
kind: str, dataset: str, config: Optional[str] = None, split: Optional[str] = None
) -> Optional[int]:
decrease_metric_for_artifact(kind=kind, dataset=dataset, config=config, split=split)
return CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split).delete()
def delete_dataset_responses(dataset: str) -> Optional[int]:
existing_cache = CachedResponseDocument.objects(dataset=dataset)
for cache in existing_cache:
decrease_metric(kind=cache.kind, http_status=cache.http_status, error_code=cache.error_code)
return existing_cache.delete()
T = TypeVar("T")
@overload
def _clean_nested_mongo_object(obj: dict[str, T]) -> dict[str, T]:
...
@overload
def _clean_nested_mongo_object(obj: list[T]) -> list[T]:
...
@overload
def _clean_nested_mongo_object(obj: T) -> T:
...
def _clean_nested_mongo_object(obj: Any) -> Any:
"""get rid of BaseDict and BaseList objects from mongo (Feature.from_dict doesn't support them)"""
if isinstance(obj, dict):
return {k: _clean_nested_mongo_object(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [_clean_nested_mongo_object(v) for v in obj]
elif isinstance(obj, tuple):
return tuple(_clean_nested_mongo_object(v) for v in obj)
else:
return obj
class CacheEntryWithoutContent(TypedDict):
http_status: HTTPStatus
error_code: Optional[str]
dataset_git_revision: Optional[str]
progress: Optional[float]
job_runner_version: Optional[int]
# Note: we let the exceptions throw: it's the responsibility of the caller to manage them
def get_response_without_content(
kind: str, dataset: str, config: Optional[str] = None, split: Optional[str] = None
) -> CacheEntryWithoutContent:
try:
response = (
CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split)
.only("http_status", "error_code", "job_runner_version", "dataset_git_revision", "progress")
.get()
)
except DoesNotExist as e:
raise CacheEntryDoesNotExistError(f"Cache entry does not exist: {kind=} {dataset=} {config=} {split=}") from e
return {
"http_status": response.http_status,
"error_code": response.error_code,
"dataset_git_revision": response.dataset_git_revision,
"job_runner_version": response.job_runner_version,
"progress": response.progress,
}
def get_response_without_content_params(kind: str, job_params: JobParams) -> CacheEntryWithoutContent:
return get_response_without_content(
kind=kind, dataset=job_params["dataset"], config=job_params["config"], split=job_params["split"]
)
class CacheEntryMetadata(CacheEntryWithoutContent):
updated_at: datetime
# Note: we let the exceptions throw: it's the responsibility of the caller to manage them
def get_response_metadata(
kind: str, dataset: str, config: Optional[str] = None, split: Optional[str] = None
) -> CacheEntryMetadata:
try:
response = (
CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split)
.only("http_status", "error_code", "job_runner_version", "dataset_git_revision", "progress", "updated_at")
.get()
)
except DoesNotExist as e:
raise CacheEntryDoesNotExistError(f"Cache entry does not exist: {kind=} {dataset=} {config=} {split=}") from e
return {
"http_status": response.http_status,
"error_code": response.error_code,
"dataset_git_revision": response.dataset_git_revision,
"job_runner_version": response.job_runner_version,
"progress": response.progress,
"updated_at": response.updated_at,
}
class CacheEntry(CacheEntryWithoutContent):
content: Mapping[str, Any]
class CacheEntryWithDetails(CacheEntry):
details: Mapping[str, str]
class CachedArtifactNotFoundError(Exception):
kind: str
dataset: str
config: Optional[str]
split: Optional[str]
def __init__(
self,
kind: str,
dataset: str,
config: Optional[str],
split: Optional[str],
):
super().__init__("The cache entry has not been found.")
self.kind = kind
self.dataset = dataset
self.config = config
self.split = split
class CachedArtifactError(Exception):
kind: str
dataset: str
config: Optional[str]
split: Optional[str]
cache_entry_with_details: CacheEntryWithDetails
enhanced_details: dict[str, Any]
def __init__(
self,
message: str,
kind: str,
dataset: str,
config: Optional[str],
split: Optional[str],
cache_entry_with_details: CacheEntryWithDetails,
):
super().__init__(message)
self.kind = kind
self.dataset = dataset
self.config = config
self.split = split
self.cache_entry_with_details = cache_entry_with_details
self.enhanced_details: dict[str, Any] = dict(self.cache_entry_with_details["details"].items())
self.enhanced_details["copied_from_artifact"] = {
"kind": self.kind,
"dataset": self.dataset,
"config": self.config,
"split": self.split,
}
# Note: we let the exceptions throw: it's the responsibility of the caller to manage them
def get_response(kind: str, dataset: str, config: Optional[str] = None, split: Optional[str] = None) -> CacheEntry:
try:
response = (
CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split)
.only("content", "http_status", "error_code", "job_runner_version", "dataset_git_revision", "progress")
.get()
)
except DoesNotExist as e:
raise CacheEntryDoesNotExistError(f"Cache entry does not exist: {kind=} {dataset=} {config=} {split=}") from e
return {
"content": _clean_nested_mongo_object(response.content),
"http_status": response.http_status,
"error_code": response.error_code,
"job_runner_version": response.job_runner_version,
"dataset_git_revision": response.dataset_git_revision,
"progress": response.progress,
}
# Note: we let the exceptions throw: it's the responsibility of the caller to manage them
def get_response_with_details(
kind: str, dataset: str, config: Optional[str] = None, split: Optional[str] = None
) -> CacheEntryWithDetails:
try:
response = (
CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split)
.only(
"content",
"http_status",
"error_code",
"job_runner_version",
"dataset_git_revision",
"progress",
"details",
)
.get()
)
except DoesNotExist as e:
raise CacheEntryDoesNotExistError(f"Cache entry does not exist: {kind=} {dataset=} {config=} {split=}") from e
return {
"content": _clean_nested_mongo_object(response.content),
"http_status": response.http_status,
"error_code": response.error_code,
"job_runner_version": response.job_runner_version,
"dataset_git_revision": response.dataset_git_revision,
"progress": response.progress,
"details": _clean_nested_mongo_object(response.details),
}
CACHED_RESPONSE_NOT_FOUND = "CachedResponseNotFound"
def get_response_or_missing_error(
kind: str, dataset: str, config: Optional[str] = None, split: Optional[str] = None
) -> CacheEntryWithDetails:
try:
response = get_response_with_details(kind=kind, dataset=dataset, config=config, split=split)
except CacheEntryDoesNotExistError:
response = CacheEntryWithDetails(
content={
"error": (
f"Cached response not found for kind {kind}, dataset {dataset}, config {config}, split {split}"
)
},
http_status=HTTPStatus.NOT_FOUND,
error_code=CACHED_RESPONSE_NOT_FOUND,
dataset_git_revision=None,
job_runner_version=None,
progress=None,
details={},
)
return response
@dataclass
class BestResponse:
kind: str
response: CacheEntryWithDetails
def get_best_response(
kinds: list[str], dataset: str, config: Optional[str] = None, split: Optional[str] = None
) -> BestResponse:
"""
Get the best response from a list of cache kinds.
Best means:
- the first success response with the highest progress,
- else: the first error response (including cache miss)
Args:
kinds (`list[str]`):
A non-empty list of cache kinds to look responses for.
dataset (`str`):
A namespace (user or an organization) and a repo name separated by a `/`.
config (`str`, optional):
A config name.
split (`str`, optional):
A split name.
Returns:
BestResponse: The best response (object with fields: kind and response). The response can be an error,
including a cache miss (error code: `CachedResponseNotFound`)
"""
if not kinds:
raise ValueError("kinds must be a non-empty list")
best_response_candidates = [
BestResponse(
kind=kind, response=get_response_or_missing_error(kind=kind, dataset=dataset, config=config, split=split)
)
for kind in kinds
]
max_index = 0
max_value = float("-inf")
for index, candidate in enumerate(best_response_candidates):
if candidate.response["http_status"] >= HTTPStatus.BAD_REQUEST.value:
# only the first error response is considered
continue
value = (
0.0
if candidate.response["progress"] is None or candidate.response["progress"] < 0.0
else candidate.response["progress"]
)
if value > max_value:
max_value = value
max_index = index
return best_response_candidates[max_index]
def get_previous_step_or_raise(
kinds: list[str], dataset: str, config: Optional[str] = None, split: Optional[str] = None
) -> BestResponse:
"""Get the previous step from the cache, or raise an exception if it failed."""
best_response = get_best_response(kinds=kinds, dataset=dataset, config=config, split=split)
if "error_code" in best_response.response and best_response.response["error_code"] == CACHED_RESPONSE_NOT_FOUND:
raise CachedArtifactNotFoundError(kind=best_response.kind, dataset=dataset, config=config, split=split)
if best_response.response["http_status"] != HTTPStatus.OK:
raise CachedArtifactError(
message="The previous step failed.",
kind=best_response.kind,
dataset=dataset,
config=config,
split=split,
cache_entry_with_details=best_response.response,
)
return best_response
def get_all_datasets() -> set[str]:
return set(CachedResponseDocument.objects().distinct("dataset"))
def has_any_successful_response(
kinds: list[str], dataset: str, config: Optional[str] = None, split: Optional[str] = None
) -> bool:
return (
CachedResponseDocument.objects(
dataset=dataset, config=config, split=split, kind__in=kinds, http_status=HTTPStatus.OK
).count()
> 0
)
# admin /metrics endpoint
class CountEntry(TypedDict):
kind: str
http_status: int
error_code: Optional[str]
count: int
def format_group(group: dict[str, Any]) -> CountEntry:
kind = group["kind"]
if not isinstance(kind, str):
raise TypeError("kind must be a str")
http_status = group["http_status"]
if not isinstance(http_status, int):
raise TypeError("http_status must be an int")
error_code = group["error_code"]
if not isinstance(error_code, str) and error_code is not None:
raise TypeError("error_code must be a str or None")
count = group["count"]
if not isinstance(count, int):
raise TypeError("count must be an int")
return {"kind": kind, "http_status": http_status, "error_code": error_code, "count": count}
def get_responses_count_by_kind_status_and_error_code() -> list[CountEntry]:
groups = CachedResponseDocument.objects().aggregate(
[
{"$sort": {"kind": 1, "http_status": 1, "error_code": 1}},
{
"$group": {
"_id": {"kind": "$kind", "http_status": "$http_status", "error_code": "$error_code"},
"count": {"$sum": 1},
}
},
{
"$project": {
"kind": "$_id.kind",
"http_status": "$_id.http_status",
"error_code": "$_id.error_code",
"count": "$count",
}
},
]
)
return [format_group(group) for group in groups]
# /cache-reports/... endpoints
class CacheReport(TypedDict):
kind: str
dataset: str
config: Optional[str]
split: Optional[str]
http_status: int
error_code: Optional[str]
details: Mapping[str, Any]
updated_at: datetime
job_runner_version: Optional[int]
dataset_git_revision: Optional[str]
progress: Optional[float]
class CacheReportsPage(TypedDict):
cache_reports: list[CacheReport]
next_cursor: str
class InvalidCursor(Exception):
pass
class InvalidLimit(Exception):
pass
def get_cache_reports(kind: str, cursor: Optional[str], limit: int) -> CacheReportsPage:
"""
Get a list of reports of the cache entries, along with the next cursor.
See https://solovyov.net/blog/2020/api-pagination-design/.
The "reports" are the cached entries, without the "content", "details" and "updated_at" fields.
Args:
kind (str): the kind of the cache entries
cursor (`str`):
An opaque string value representing a pointer to a specific CachedResponse item in the dataset. The
server returns results after the given pointer.
An empty string means to start from the beginning.
limit (strictly positive `int`):
The maximum number of results.
Returns:
[`CacheReportsPage`]: A dict with the list of reports and the next cursor. The next cursor is
an empty string if there are no more items to be fetched.
Raises the following errors:
- [`~simple_cache.InvalidCursor`]
If the cursor is invalid.
- [`~simple_cache.InvalidLimit`]
If the limit is an invalid number.
"""
if not cursor:
queryset = CachedResponseDocument.objects(kind=kind)
else:
try:
queryset = CachedResponseDocument.objects(kind=kind, id__gt=ObjectId(cursor))
except InvalidId as err:
raise InvalidCursor("Invalid cursor.") from err
if limit <= 0:
raise InvalidLimit("Invalid limit.")
objects = list(queryset.order_by("+id").exclude("content").limit(limit))
return {
"cache_reports": [
{
"kind": kind,
"dataset": object.dataset,
"config": object.config,
"split": object.split,
"http_status": object.http_status.value,
"error_code": object.error_code,
"details": _clean_nested_mongo_object(object.details),
"updated_at": object.updated_at,
"job_runner_version": object.job_runner_version,
"dataset_git_revision": object.dataset_git_revision,
"progress": object.progress,
}
for object in objects
],
"next_cursor": "" if len(objects) < limit else str(objects[-1].id),
}
def get_outdated_split_full_names_for_step(kind: str, current_version: int) -> list[SplitFullName]:
responses = CachedResponseDocument.objects(kind=kind, job_runner_version__lt=current_version).only(
"dataset", "config", "split"
)
return [
SplitFullName(dataset=response.dataset, config=response.config, split=response.split) for response in responses
]
def get_dataset_responses_without_content_for_kind(kind: str, dataset: str) -> list[CacheReport]:
responses = CachedResponseDocument.objects(kind=kind, dataset=dataset).exclude("content")
return [
{
"kind": response.kind,
"dataset": response.dataset,
"config": response.config,
"split": response.split,
"http_status": response.http_status,
"error_code": response.error_code,
"details": _clean_nested_mongo_object(response.details),
"updated_at": response.updated_at,
"job_runner_version": response.job_runner_version,
"dataset_git_revision": response.dataset_git_revision,
"progress": response.progress,
}
for response in responses
]
class CacheReportWithContent(CacheReport):
content: Mapping[str, Any]
class CacheReportsWithContentPage(TypedDict):
cache_reports_with_content: list[CacheReportWithContent]
next_cursor: str
def get_cache_reports_with_content(kind: str, cursor: Optional[str], limit: int) -> CacheReportsWithContentPage:
"""
Get a list of the cache report with content, along with the next cursor.
See https://solovyov.net/blog/2020/api-pagination-design/.
The cache reports contain all the fields of the object, including the "content" field.
Args:
kind (str): the kind of the cache entries
cursor (`str`):
An opaque string value representing a pointer to a specific CachedResponse item in the dataset. The
server returns results after the given pointer.
An empty string means to start from the beginning.
limit (strictly positive `int`):
The maximum number of results.
Returns:
[`CacheReportsWithContentPage`]: A dict with the list of reports and the next cursor. The next cursor is
an empty string if there are no more items to be fetched.
Raises the following errors:
- [`~simple_cache.InvalidCursor`]
If the cursor is invalid.
- [`~simple_cache.InvalidLimit`]
If the limit is an invalid number.
"""
if not cursor:
queryset = CachedResponseDocument.objects(kind=kind)
else:
try:
queryset = CachedResponseDocument.objects(kind=kind, id__gt=ObjectId(cursor))
except InvalidId as err:
raise InvalidCursor("Invalid cursor.") from err
if limit <= 0:
raise InvalidLimit("Invalid limit.")
objects = list(queryset.order_by("+id").limit(limit))
return {
"cache_reports_with_content": [
{
"kind": kind,
"dataset": object.dataset,
"config": object.config,
"split": object.split,
"http_status": object.http_status.value,
"error_code": object.error_code,
"content": _clean_nested_mongo_object(object.content),
"job_runner_version": object.job_runner_version,
"dataset_git_revision": object.dataset_git_revision,
"details": _clean_nested_mongo_object(object.details),
"updated_at": object.updated_at,
"progress": object.progress,
}
for object in objects
],
"next_cursor": "" if len(objects) < limit else str(objects[-1].id),
}
class CacheEntryFullMetadata(CacheEntryMetadata):
kind: str
dataset: str
config: Optional[str]
split: Optional[str]
def _get_df(entries: list[CacheEntryFullMetadata]) -> pd.DataFrame:
return pd.DataFrame(
{
"kind": pd.Series([entry["kind"] for entry in entries], dtype="category"),
"dataset": pd.Series([entry["dataset"] for entry in entries], dtype="str"),
"config": pd.Series([entry["config"] for entry in entries], dtype="str"),
"split": pd.Series([entry["split"] for entry in entries], dtype="str"),
"http_status": pd.Series(
[entry["http_status"] for entry in entries], dtype="category"
), # check if it's working as expected
"error_code": pd.Series([entry["error_code"] for entry in entries], dtype="category"),
"dataset_git_revision": pd.Series([entry["dataset_git_revision"] for entry in entries], dtype="str"),
"job_runner_version": pd.Series([entry["job_runner_version"] for entry in entries], dtype=pd.Int16Dtype()),
"progress": pd.Series([entry["progress"] for entry in entries], dtype="float"),
"updated_at": pd.Series(
[entry["updated_at"] for entry in entries], dtype="datetime64[ns]"
), # check if it's working as expected
}
)
# ^ does not seem optimal at all, but I get the types right
def get_cache_entries_df(dataset: str, cache_kinds: Optional[list[str]] = None) -> pd.DataFrame:
filters = {}
if cache_kinds:
filters["kind__in"] = cache_kinds
return _get_df(
[
{
"kind": response.kind,
"dataset": response.dataset,
"config": response.config,
"split": response.split,
"http_status": response.http_status,
"error_code": response.error_code,
"dataset_git_revision": response.dataset_git_revision,
"job_runner_version": response.job_runner_version,
"progress": response.progress,
"updated_at": response.updated_at,
}
for response in CachedResponseDocument.objects(dataset=dataset, **filters).only(
"kind",
"dataset",
"config",
"split",
"http_status",
"error_code",
"job_runner_version",
"dataset_git_revision",
"progress",
"updated_at",
)
]
)
def get_cache_count_for_dataset(dataset: str) -> int:
return CachedResponseDocument.objects(dataset=dataset).count()
def has_some_cache(dataset: str) -> bool:
return get_cache_count_for_dataset(dataset) > 0
def fetch_names(
dataset: str, config: Optional[str], cache_kinds: list[str], names_field: str, name_field: str
) -> list[str]:
"""
Fetch a list of names from the cache database.
If no entry is found in cache, return an empty list. Exceptions are silently caught.
Args:
dataset (str): The dataset name.
config (Optional[str]): The config name. Only needed for split names.
cache_kinds (list[str]): The cache kinds to fetch, eg ["dataset-config-names"],
or ["config-split-names-from-streaming", "config-split-names-from-info"].
names_field (str): The name of the field containing the list of names, eg: "config_names", or "splits".
name_field (str): The name of the field containing the name, eg: "config", or "split".
Returns:
list[str]: The list of names.
"""
try:
names = []
best_response = get_best_response(kinds=cache_kinds, dataset=dataset, config=config)
for name_item in best_response.response["content"][names_field]:
name = name_item[name_field]
if not isinstance(name, str):
raise ValueError(f"Invalid name: {name}, type should be str, got: {type(name)}")
names.append(name)
return names
except Exception:
return []
@dataclass
class DatasetWithRevision:
dataset: str
revision: Optional[str]
def get_datasets_with_last_updated_kind(kind: str, days: int) -> list[DatasetWithRevision]:
"""
Get the list of datasets for which an artifact of some kind has been updated in the last days.
Args:
kind (str): The kind of the cache entries.
days (int): The number of days to look back.
Returns:
list[DatasetWithRevision]: The list of datasets, with the git revision of the last artifact.
"""
pipeline = [
{"$match": {"kind": kind, "http_status": HTTPStatus.OK, "updated_at": {"$gt": get_datetime(days=days)}}},
{"$sort": {"updated_at": 1}},
{"$group": {"_id": "$dataset", "revision": {"$last": "$dataset_git_revision"}}},
{"$project": {"dataset": "$_id", "_id": 0, "revision": 1}},
]
return list(
DatasetWithRevision(dataset=response["dataset"], revision=response["revision"])
for response in CachedResponseDocument.objects(
kind=kind, http_status=HTTPStatus.OK, updated_at__gt=get_datetime(days=days)
).aggregate(pipeline)
)
# only for the tests
def _clean_cache_database() -> None:
CachedResponseDocument.drop_collection() # type: ignore
CacheTotalMetricDocument.drop_collection() # type: ignore
| datasets-server-main | libs/libcommon/src/libcommon/simple_cache.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from dataclasses import dataclass, field
from typing import Optional
import pandas as pd
from libcommon.processing_graph import Artifact, ProcessingGraph
from libcommon.prometheus import StepProfiler
from libcommon.simple_cache import CacheEntryMetadata, fetch_names
from libcommon.utils import get_datetime
# TODO: assets, cached_assets, parquet files
@dataclass
class JobState:
"""The state of a job for a given input."""
dataset: str
revision: str
config: Optional[str]
split: Optional[str]
job_type: str
pending_jobs_df: pd.DataFrame
valid_pending_jobs_df: pd.DataFrame = field(
init=False
) # contains at most one row (but the logic does not depend on it)
is_in_process: bool = field(init=False)
def __post_init__(self) -> None:
self.valid_pending_jobs_df = self.pending_jobs_df.sort_values(
["status", "priority", "created_at"], ascending=[False, False, True]
).head(1)
# ^ only keep the first valid job, if any, in order of priority
self.is_in_process = not self.valid_pending_jobs_df.empty
@dataclass
class CacheState:
"""The state of a cache entry for a given input."""
dataset: str
config: Optional[str]
split: Optional[str]
cache_kind: str
cache_entries_df: pd.DataFrame
job_runner_version: int
error_codes_to_retry: Optional[list[str]] = None
cache_entry_metadata: Optional[CacheEntryMetadata] = field(init=False)
exists: bool = field(init=False)
is_success: bool = field(init=False)
def __post_init__(self) -> None:
if len(self.cache_entries_df) > 1:
logging.warning(
f"More than one cache entry found for {self.dataset}, {self.config}, {self.split}, {self.cache_kind}"
)
if len(self.cache_entries_df) == 0:
self.cache_entry_metadata = None
else:
entry = self.cache_entries_df.iloc[0]
self.cache_entry_metadata = CacheEntryMetadata(
http_status=entry["http_status"],
error_code=None if entry["error_code"] is pd.NA else entry["error_code"],
job_runner_version=None if entry["job_runner_version"] is pd.NA else entry["job_runner_version"],
dataset_git_revision=None if entry["dataset_git_revision"] is pd.NA else entry["dataset_git_revision"],
updated_at=entry["updated_at"],
progress=None if entry["progress"] is pd.NA else entry["progress"],
)
"""Whether the cache entry exists."""
self.exists = self.cache_entry_metadata is not None
self.is_success = self.cache_entry_metadata is not None and self.cache_entry_metadata["http_status"] < 400
def is_empty(self) -> bool:
return self.cache_entry_metadata is None
def is_error_to_retry(self) -> bool:
return (
self.error_codes_to_retry is not None
and self.cache_entry_metadata is not None
and (
self.cache_entry_metadata["http_status"] >= 400
and self.cache_entry_metadata["error_code"] in self.error_codes_to_retry
)
)
def is_old(self, days: int) -> bool:
if self.cache_entry_metadata is None or days <= 0:
return False
return self.cache_entry_metadata["updated_at"] < get_datetime(days).replace(tzinfo=None)
# ^ we remove the timezone to avoid comparing timezone-aware and timezone-naive datetimes
# could be done better, but we don't need more precision
def is_older_than(self, other: "CacheState") -> bool:
if self.cache_entry_metadata is None or other.cache_entry_metadata is None:
return False
return self.cache_entry_metadata["updated_at"] < other.cache_entry_metadata["updated_at"]
def is_git_revision_different_from(self, git_revision: Optional[str]) -> bool:
return self.cache_entry_metadata is None or self.cache_entry_metadata["dataset_git_revision"] != git_revision
def is_job_runner_obsolete(self) -> bool:
if self.cache_entry_metadata is None:
return False
if self.cache_entry_metadata["job_runner_version"] is None:
return True
return self.cache_entry_metadata["job_runner_version"] < self.job_runner_version
@dataclass
class ArtifactState(Artifact):
"""The state of an artifact."""
pending_jobs_df: pd.DataFrame
cache_entries_df: pd.DataFrame
error_codes_to_retry: Optional[list[str]] = None
job_state: JobState = field(init=False)
cache_state: CacheState = field(init=False)
def __post_init__(self) -> None:
super().__post_init__()
self.job_state = JobState(
job_type=self.processing_step.job_type,
dataset=self.dataset,
revision=self.revision,
config=self.config,
split=self.split,
pending_jobs_df=self.pending_jobs_df,
)
self.cache_state = CacheState(
cache_kind=self.processing_step.cache_kind,
dataset=self.dataset,
config=self.config,
split=self.split,
job_runner_version=self.processing_step.job_runner_version,
error_codes_to_retry=self.error_codes_to_retry,
cache_entries_df=self.cache_entries_df,
)
@dataclass
class SplitState:
"""The state of a split."""
dataset: str
revision: str
config: str
split: str
processing_graph: ProcessingGraph
pending_jobs_df: pd.DataFrame
cache_entries_df: pd.DataFrame
error_codes_to_retry: Optional[list[str]] = None
artifact_state_by_step: dict[str, ArtifactState] = field(init=False)
def __post_init__(self) -> None:
self.artifact_state_by_step = {
processing_step.name: ArtifactState(
processing_step=processing_step,
dataset=self.dataset,
revision=self.revision,
config=self.config,
split=self.split,
error_codes_to_retry=self.error_codes_to_retry,
pending_jobs_df=self.pending_jobs_df[self.pending_jobs_df["type"] == processing_step.job_type],
cache_entries_df=self.cache_entries_df[self.cache_entries_df["kind"] == processing_step.cache_kind],
)
for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="split")
}
@dataclass
class ConfigState:
"""The state of a config."""
dataset: str
revision: str
config: str
processing_graph: ProcessingGraph
pending_jobs_df: pd.DataFrame
cache_entries_df: pd.DataFrame
error_codes_to_retry: Optional[list[str]] = None
split_names: list[str] = field(init=False)
split_states: list[SplitState] = field(init=False)
artifact_state_by_step: dict[str, ArtifactState] = field(init=False)
def __post_init__(self) -> None:
with StepProfiler(
method="ConfigState.__post_init__",
step="get_config_level_artifact_states",
context=f"dataset={self.dataset},config={self.config}",
):
self.artifact_state_by_step = {
processing_step.name: ArtifactState(
processing_step=processing_step,
dataset=self.dataset,
revision=self.revision,
config=self.config,
split=None,
error_codes_to_retry=self.error_codes_to_retry,
pending_jobs_df=self.pending_jobs_df[
(self.pending_jobs_df["split"].isnull())
& (self.pending_jobs_df["type"] == processing_step.job_type)
],
cache_entries_df=self.cache_entries_df[
self.cache_entries_df["kind"] == processing_step.cache_kind
],
)
for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="config")
}
with StepProfiler(
method="ConfigState.__post_init__",
step="get_split_names",
context=f"dataset={self.dataset},config={self.config}",
):
self.split_names = fetch_names(
dataset=self.dataset,
config=self.config,
cache_kinds=[
processing_step.cache_kind
for processing_step in self.processing_graph.get_config_split_names_processing_steps()
],
names_field="splits",
name_field="split",
) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
with StepProfiler(
method="ConfigState.__post_init__",
step="get_split_states",
context=f"dataset={self.dataset},config={self.config}",
):
self.split_states = [
SplitState(
self.dataset,
self.revision,
self.config,
split_name,
processing_graph=self.processing_graph,
error_codes_to_retry=self.error_codes_to_retry,
pending_jobs_df=self.pending_jobs_df[self.pending_jobs_df["split"] == split_name],
cache_entries_df=self.cache_entries_df[self.cache_entries_df["split"] == split_name],
)
for split_name in self.split_names
]
@dataclass
class DatasetState:
"""The state of a dataset."""
dataset: str
revision: str
processing_graph: ProcessingGraph
pending_jobs_df: pd.DataFrame
cache_entries_df: pd.DataFrame
error_codes_to_retry: Optional[list[str]] = None
config_names: list[str] = field(init=False)
config_states: list[ConfigState] = field(init=False)
artifact_state_by_step: dict[str, ArtifactState] = field(init=False)
def __post_init__(self) -> None:
with StepProfiler(
method="DatasetState.__post_init__",
step="get_dataset_level_artifact_states",
context=f"dataset={self.dataset}",
):
self.artifact_state_by_step = {
processing_step.name: ArtifactState(
processing_step=processing_step,
dataset=self.dataset,
revision=self.revision,
config=None,
split=None,
error_codes_to_retry=self.error_codes_to_retry,
pending_jobs_df=self.pending_jobs_df[
(self.pending_jobs_df["revision"] == self.revision)
& (self.pending_jobs_df["config"].isnull())
& (self.pending_jobs_df["split"].isnull())
& (self.pending_jobs_df["type"] == processing_step.job_type)
],
cache_entries_df=self.cache_entries_df[
(self.cache_entries_df["kind"] == processing_step.cache_kind)
& (self.cache_entries_df["config"].isnull())
& (self.cache_entries_df["split"].isnull())
],
)
for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="dataset")
}
with StepProfiler(
method="DatasetState.__post_init__",
step="get_config_names",
context=f"dataset={self.dataset}",
):
self.config_names = fetch_names(
dataset=self.dataset,
config=None,
cache_kinds=[
step.cache_kind for step in self.processing_graph.get_dataset_config_names_processing_steps()
],
names_field="config_names",
name_field="config",
) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
with StepProfiler(
method="DatasetState.__post_init__",
step="get_config_states",
context=f"dataset={self.dataset}",
):
self.config_states = [
ConfigState(
dataset=self.dataset,
revision=self.revision,
config=config_name,
processing_graph=self.processing_graph,
error_codes_to_retry=self.error_codes_to_retry,
pending_jobs_df=self.pending_jobs_df[
(self.pending_jobs_df["revision"] == self.revision)
& (self.pending_jobs_df["config"] == config_name)
],
cache_entries_df=self.cache_entries_df[self.cache_entries_df["config"] == config_name],
)
for config_name in self.config_names
]
@dataclass
class FirstStepsDatasetState(DatasetState):
"""The state of the first dataset steps."""
def __post_init__(self) -> None:
with StepProfiler(
method="FirstStepsDatasetState.__post_init__",
step="get_dataset_level_artifact_states",
context=f"dataset={self.dataset}",
):
self.artifact_state_by_step = {
processing_step.name: ArtifactState(
processing_step=processing_step,
dataset=self.dataset,
revision=self.revision,
config=None,
split=None,
error_codes_to_retry=self.error_codes_to_retry,
pending_jobs_df=self.pending_jobs_df[
(self.pending_jobs_df["revision"] == self.revision)
& (self.pending_jobs_df["config"].isnull())
& (self.pending_jobs_df["split"].isnull())
& (self.pending_jobs_df["type"] == processing_step.job_type)
],
cache_entries_df=self.cache_entries_df[
(self.cache_entries_df["kind"] == processing_step.cache_kind)
& (self.cache_entries_df["config"].isnull())
& (self.cache_entries_df["split"].isnull())
],
)
for processing_step in self.processing_graph.get_first_processing_steps()
}
self.config_names = []
self.config_states = []
| datasets-server-main | libs/libcommon/src/libcommon/state.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from functools import partial
from typing import Optional
from datasets import Features
from tqdm.contrib.concurrent import thread_map
from libcommon.storage import StrPath
from libcommon.utils import Row
from libcommon.viewer_utils.features import get_cell_value
def _transform_row(
row_idx_and_row: tuple[int, Row],
dataset: str,
config: str,
split: str,
features: Features,
assets_base_url: str,
assets_directory: StrPath,
offset: int,
row_idx_column: Optional[str],
) -> Row:
row_idx, row = row_idx_and_row
return {
featureName: get_cell_value(
dataset=dataset,
config=config,
split=split,
row_idx=offset + row_idx if row_idx_column is None else row[row_idx_column],
cell=row[featureName] if featureName in row else None,
featureName=featureName,
fieldType=fieldType,
assets_base_url=assets_base_url,
assets_directory=assets_directory,
)
for (featureName, fieldType) in features.items()
}
def transform_rows(
dataset: str,
config: str,
split: str,
rows: list[Row],
features: Features,
cached_assets_base_url: str,
cached_assets_directory: StrPath,
offset: int,
row_idx_column: Optional[str],
) -> list[Row]:
fn = partial(
_transform_row,
dataset=dataset,
config=config,
split=split,
features=features,
assets_base_url=cached_assets_base_url,
assets_directory=cached_assets_directory,
offset=offset,
row_idx_column=row_idx_column,
)
if "Audio(" in str(features):
# use multithreading to parallelize audio files processing
# (we use pydub which might spawn one ffmpeg process per conversion, which releases the GIL)
desc = f"transform_rows(audio) for {dataset}"
return thread_map(fn, enumerate(rows), desc=desc, total=len(rows)) # type: ignore
else:
return [fn((row_idx, row)) for row_idx, row in enumerate(rows)]
| datasets-server-main | libs/libcommon/src/libcommon/rows_utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import contextlib
import os
from collections.abc import Generator
from os import makedirs
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import TypedDict
from PIL import Image # type: ignore
from pydub import AudioSegment # type:ignore
from libcommon.storage import StrPath, remove_dir
DATASET_SEPARATOR = "--"
ASSET_DIR_MODE = 0o755
DATASETS_SERVER_MDATE_FILENAME = ".dss"
SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE = {".wav": "audio/wav", ".mp3": "audio/mpeg"}
def create_asset_dir(
dataset: str, config: str, split: str, row_idx: int, column: str, assets_directory: StrPath
) -> tuple[Path, str]:
dir_path = Path(assets_directory).resolve() / dataset / DATASET_SEPARATOR / config / split / str(row_idx) / column
url_dir_path = f"{dataset}/{DATASET_SEPARATOR}/{config}/{split}/{row_idx}/{column}"
makedirs(dir_path, ASSET_DIR_MODE, exist_ok=True)
return dir_path, url_dir_path
def delete_asset_dir(dataset: str, directory: StrPath) -> None:
dir_path = Path(directory).resolve() / dataset
remove_dir(dir_path)
def glob_rows_in_assets_dir(
dataset: str,
assets_directory: StrPath,
) -> Generator[Path, None, None]:
return Path(assets_directory).resolve().glob(os.path.join(dataset, DATASET_SEPARATOR, "*", "*", "*"))
def update_directory_modification_date(path: Path) -> None:
if path.is_dir():
# update the directory's last modified date
temporary_file = path / DATASETS_SERVER_MDATE_FILENAME
if temporary_file.is_dir():
raise ValueError(f"Cannot create temporary file {temporary_file} in {path}")
temporary_file.touch(exist_ok=True)
if temporary_file.is_file():
with contextlib.suppress(FileNotFoundError):
temporary_file.unlink()
def update_last_modified_date_of_rows_in_assets_dir(
dataset: str,
config: str,
split: str,
offset: int,
length: int,
assets_directory: StrPath,
) -> None:
update_directory_modification_date(Path(assets_directory).resolve() / dataset.split("/")[0])
row_dirs_path = Path(assets_directory).resolve() / dataset / DATASET_SEPARATOR / config / split
for row_idx in range(offset, offset + length):
update_directory_modification_date(row_dirs_path / str(row_idx))
class ImageSource(TypedDict):
src: str
height: int
width: int
def create_image_file(
dataset: str,
config: str,
split: str,
row_idx: int,
column: str,
filename: str,
image: Image.Image,
assets_base_url: str,
assets_directory: StrPath,
overwrite: bool = True,
) -> ImageSource:
dir_path, url_dir_path = create_asset_dir(
dataset=dataset,
config=config,
split=split,
row_idx=row_idx,
column=column,
assets_directory=assets_directory,
)
makedirs(dir_path, ASSET_DIR_MODE, exist_ok=True)
file_path = dir_path / filename
if overwrite or not file_path.exists():
image.save(file_path)
return {
"src": f"{assets_base_url}/{url_dir_path}/{filename}",
"height": image.height,
"width": image.width,
}
class AudioSource(TypedDict):
src: str
type: str
def create_audio_file(
dataset: str,
config: str,
split: str,
row_idx: int,
column: str,
audio_file_bytes: bytes,
audio_file_extension: str,
assets_base_url: str,
filename: str,
assets_directory: StrPath,
overwrite: bool = True,
) -> list[AudioSource]:
dir_path, url_dir_path = create_asset_dir(
dataset=dataset,
config=config,
split=split,
row_idx=row_idx,
column=column,
assets_directory=assets_directory,
)
makedirs(dir_path, ASSET_DIR_MODE, exist_ok=True)
file_path = dir_path / filename
if file_path.suffix not in SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE:
raise ValueError(
f"Audio format {file_path.suffix} is not supported. Supported formats are"
f" {','.join(SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE)}."
)
media_type = SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE[file_path.suffix]
if overwrite or not file_path.exists():
if audio_file_extension == file_path.suffix:
with open(file_path, "wb") as f:
f.write(audio_file_bytes)
else: # we need to convert
# might spawn a process to convert the audio file using ffmpeg
with NamedTemporaryFile("wb", suffix=audio_file_extension) as tmpfile:
tmpfile.write(audio_file_bytes)
segment: AudioSegment = AudioSegment.from_file(tmpfile.name)
segment.export(file_path, format=file_path.suffix[1:])
return [
{"src": f"{assets_base_url}/{url_dir_path}/{filename}", "type": media_type},
]
| datasets-server-main | libs/libcommon/src/libcommon/viewer_utils/asset.py |
datasets-server-main | libs/libcommon/src/libcommon/viewer_utils/__init__.py |
|
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import json
import os
from io import BytesIO
from typing import Any, Optional, Union
from zlib import adler32
import numpy as np
import soundfile # type: ignore
from datasets import (
Array2D,
Array3D,
Array4D,
Array5D,
Audio,
ClassLabel,
Features,
Image,
Sequence,
Translation,
TranslationVariableLanguages,
Value,
)
from datasets.features.features import FeatureType, _visit
from PIL import Image as PILImage # type: ignore
from libcommon.storage import StrPath
from libcommon.utils import FeatureItem
from libcommon.viewer_utils.asset import create_audio_file, create_image_file
def append_hash_suffix(string: str, json_path: Optional[list[Union[str, int]]] = None) -> str:
"""
Hash the json path to a string.
Args:
string (``str``): The string to append the hash to.
json_path (``list(str|int)``): the json path, which is a list of keys and indices
Returns:
the string suffixed with the hash of the json path
Details:
- no suffix if the list is empty
- converted to hexadecimal to make the hash shorter
- the 0x prefix is removed
"""
return f"{string}-{hex(adler32(json.dumps(json_path).encode()))[2:]}" if json_path else string
def image(
dataset: str,
config: str,
split: str,
row_idx: int,
value: Any,
featureName: str,
assets_base_url: str,
assets_directory: StrPath,
json_path: Optional[list[Union[str, int]]] = None,
overwrite: bool = True,
) -> Any:
if value is None:
return None
if isinstance(value, dict) and value.get("bytes"):
value = PILImage.open(BytesIO(value["bytes"]))
elif (
isinstance(value, dict)
and "path" in value
and isinstance(value["path"], str)
and os.path.exists(value["path"])
):
value = PILImage.open(value["path"])
if not isinstance(value, PILImage.Image):
raise TypeError(
"Image cell must be a PIL image or an encoded dict of an image, "
f"but got {str(value)[:300]}{'...' if len(str(value)) > 300 else ''}"
)
# attempt to generate one of the supported formats; if unsuccessful, throw an error
for ext in [".jpg", ".png"]:
try:
return create_image_file(
dataset=dataset,
config=config,
split=split,
row_idx=row_idx,
column=featureName,
filename=f"{append_hash_suffix('image', json_path)}{ext}",
image=value,
assets_base_url=assets_base_url,
assets_directory=assets_directory,
overwrite=overwrite,
)
except OSError:
# if wrong format, try the next one, see https://github.com/huggingface/datasets-server/issues/191
# OSError: cannot write mode P as JPEG
# OSError: cannot write mode RGBA as JPEG
continue
raise ValueError("Image cannot be written as JPEG or PNG")
def audio(
dataset: str,
config: str,
split: str,
row_idx: int,
value: Any,
featureName: str,
assets_base_url: str,
assets_directory: StrPath,
json_path: Optional[list[Union[str, int]]] = None,
overwrite: bool = True,
) -> Any:
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"Audio cell must be an encoded dict of an audio sample, "
f"but got {str(value)[:300]}{'...' if len(str(value)) > 300 else ''}"
)
if "path" in value and isinstance(value["path"], str):
audio_file_extension = os.path.splitext(value["path"])[1]
if not audio_file_extension:
raise ValueError(
f"An audio sample should have a 'path' with a valid extension but got '{audio_file_extension}'."
)
elif "array" in value:
audio_file_extension = ".wav"
else:
raise ValueError(
"An audio sample should have 'path' and 'bytes' (or 'array' and 'sampling_rate') but got"
f" {','.join(value)}."
)
if "bytes" in value and isinstance(value["bytes"], bytes):
audio_file_bytes = value["bytes"]
elif "path" in value and isinstance(value["path"], str) and os.path.exists(value["path"]):
with open(value["path"], "rb") as f:
audio_file_bytes = f.read()
elif (
"array" in value
and isinstance(value["array"], np.ndarray)
and "sampling_rate" in value
and isinstance(value["sampling_rate"], int)
):
buffer = BytesIO()
soundfile.write(buffer, value["array"], value["sampling_rate"], format="wav")
audio_file_bytes = buffer.read()
else:
raise ValueError(
"An audio sample should have 'path' and 'bytes' (or 'array' and 'sampling_rate') but got"
f" {','.join(value)}."
)
# convert to wav if the file is not wav or mp3 already
ext = audio_file_extension if audio_file_extension in [".wav", ".mp3"] else ".wav"
# this function can raise, we don't catch it
return create_audio_file(
dataset=dataset,
config=config,
split=split,
row_idx=row_idx,
column=featureName,
audio_file_bytes=audio_file_bytes,
audio_file_extension=audio_file_extension,
assets_base_url=assets_base_url,
filename=f"{append_hash_suffix('audio', json_path)}{ext}",
assets_directory=assets_directory,
overwrite=overwrite,
)
def get_cell_value(
dataset: str,
config: str,
split: str,
row_idx: int,
cell: Any,
featureName: str,
fieldType: Any,
assets_base_url: str,
assets_directory: StrPath,
json_path: Optional[list[Union[str, int]]] = None,
overwrite: bool = True,
) -> Any:
# always allow None values in the cells
if cell is None:
return cell
if isinstance(fieldType, Image):
return image(
dataset=dataset,
config=config,
split=split,
row_idx=row_idx,
value=cell,
featureName=featureName,
assets_base_url=assets_base_url,
assets_directory=assets_directory,
json_path=json_path,
overwrite=overwrite,
)
elif isinstance(fieldType, Audio):
return audio(
dataset=dataset,
config=config,
split=split,
row_idx=row_idx,
value=cell,
featureName=featureName,
assets_base_url=assets_base_url,
assets_directory=assets_directory,
json_path=json_path,
overwrite=overwrite,
)
elif isinstance(fieldType, list):
if type(cell) != list:
raise TypeError("list cell must be a list.")
if len(fieldType) != 1:
raise TypeError("the feature type should be a 1-element list.")
subFieldType = fieldType[0]
return [
get_cell_value(
dataset=dataset,
config=config,
split=split,
row_idx=row_idx,
cell=subCell,
featureName=featureName,
fieldType=subFieldType,
assets_base_url=assets_base_url,
assets_directory=assets_directory,
json_path=json_path + [idx] if json_path else [idx],
overwrite=overwrite,
)
for (idx, subCell) in enumerate(cell)
]
elif isinstance(fieldType, Sequence):
if type(cell) == list:
if fieldType.length >= 0 and len(cell) != fieldType.length:
raise TypeError("the cell length should be the same as the Sequence length.")
return [
get_cell_value(
dataset=dataset,
config=config,
split=split,
row_idx=row_idx,
cell=subCell,
featureName=featureName,
fieldType=fieldType.feature,
assets_base_url=assets_base_url,
assets_directory=assets_directory,
json_path=json_path + [idx] if json_path else [idx],
overwrite=overwrite,
)
for (idx, subCell) in enumerate(cell)
]
# if the internal feature of the Sequence is a dict, then the value will automatically
# be converted into a dictionary of lists. See
# https://huggingface.co/docs/datasets/v2.5.1/en/package_reference/main_classes#datasets.Features
if type(cell) == dict:
if any((type(v) != list) or (k not in fieldType.feature) for k, v in cell.items()):
raise TypeError("The value of a Sequence of dicts should be a dictionary of lists.")
return {
key: [
get_cell_value(
dataset=dataset,
config=config,
split=split,
row_idx=row_idx,
cell=subCellItem,
featureName=featureName,
fieldType=fieldType.feature[key],
assets_base_url=assets_base_url,
assets_directory=assets_directory,
json_path=json_path + [key, idx] if json_path else [key, idx],
overwrite=overwrite,
)
for (idx, subCellItem) in enumerate(subCell)
]
for (key, subCell) in cell.items()
}
raise TypeError("Sequence cell must be a list or a dict.")
elif isinstance(fieldType, dict):
if type(cell) != dict:
raise TypeError("dict cell must be a dict.")
return {
key: get_cell_value(
dataset=dataset,
config=config,
split=split,
row_idx=row_idx,
cell=subCell,
featureName=featureName,
fieldType=fieldType[key],
assets_base_url=assets_base_url,
assets_directory=assets_directory,
json_path=json_path + [key] if json_path else [key],
overwrite=overwrite,
)
for (key, subCell) in cell.items()
}
elif isinstance(
fieldType,
(
Value,
ClassLabel,
Array2D,
Array3D,
Array4D,
Array5D,
Translation,
TranslationVariableLanguages,
),
):
return cell
else:
raise TypeError("could not determine the type of the data cell.")
# in JSON, dicts do not carry any order, so we need to return a list
#
# > An object is an *unordered* collection of zero or more name/value pairs, where a name is a string and a value
# is a string, number, boolean, null, object, or array.
# > An array is an *ordered* sequence of zero or more values.
# > The terms "object" and "array" come from the conventions of JavaScript.
# from https://stackoverflow.com/a/7214312/7351594 / https://www.rfc-editor.org/rfc/rfc7159.html
def to_features_list(features: Features) -> list[FeatureItem]:
features_dict = features.to_dict()
return [
{
"feature_idx": idx,
"name": name,
"type": features_dict[name],
}
for idx, name in enumerate(features)
]
def get_supported_unsupported_columns(
features: Features,
unsupported_features: list[FeatureType] = [],
) -> tuple[list[str], list[str]]:
supported_columns, unsupported_columns = [], []
for column, feature in features.items():
str_column = str(column)
supported = True
def classify(feature: FeatureType) -> None:
nonlocal supported
for unsupported_feature in unsupported_features:
if type(unsupported_feature) == type(feature) == Value:
if unsupported_feature.dtype == feature.dtype:
supported = False
elif type(unsupported_feature) == type(feature):
supported = False
_visit(feature, classify)
if supported:
supported_columns.append(str_column)
else:
unsupported_columns.append(str_column)
return supported_columns, unsupported_columns
| datasets-server-main | libs/libcommon/src/libcommon/viewer_utils/features.py |
from os import makedirs
from pathlib import Path
import pyarrow.parquet as pq
from libcommon.storage import StrPath
DATASET_SEPARATOR = "--"
PARQUET_METADATA_DIR_MODE = 0o755
def create_parquet_metadata_dir(
dataset: str, config: str, split: str, parquet_metadata_directory: StrPath
) -> tuple[Path, str]:
dir_path = Path(parquet_metadata_directory).resolve() / dataset / DATASET_SEPARATOR / config / split
parquet_metadata_dir_subpath = f"{dataset}/{DATASET_SEPARATOR}/{config}/{split}"
makedirs(dir_path, PARQUET_METADATA_DIR_MODE, exist_ok=True)
return dir_path, parquet_metadata_dir_subpath
def create_parquet_metadata_file(
dataset: str,
config: str,
split: str,
parquet_file_metadata: pq.FileMetaData,
filename: str,
parquet_metadata_directory: StrPath,
overwrite: bool = True,
) -> str:
dir_path, parquet_metadata_dir_subpath = create_parquet_metadata_dir(
dataset=dataset,
config=config,
split=split,
parquet_metadata_directory=parquet_metadata_directory,
)
parquet_metadata_file_path = dir_path / filename
if overwrite or not parquet_metadata_file_path.exists():
parquet_file_metadata.write_metadata_file(parquet_metadata_file_path)
parquet_metadata_subpath = f"{parquet_metadata_dir_subpath}/{filename}"
return parquet_metadata_subpath
| datasets-server-main | libs/libcommon/src/libcommon/viewer_utils/parquet_metadata.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import os
import time
from pathlib import Path
from unittest.mock import patch
import pytest
from libcommon.storage import StrPath
from libapi.utils import clean_cached_assets
@pytest.mark.parametrize(
"n_rows,keep_most_recent_rows_number,keep_first_rows_number,max_cleaned_rows_number,expected_remaining_rows",
[
(8, 1, 1, 100, [0, 7]),
(8, 2, 2, 100, [0, 1, 6, 7]),
(8, 1, 1, 3, [0, 4, 5, 6, 7]),
],
)
def test_clean_cached_assets(
tmp_path: Path,
n_rows: int,
keep_most_recent_rows_number: int,
keep_first_rows_number: int,
max_cleaned_rows_number: int,
expected_remaining_rows: list[int],
) -> None:
cached_assets_directory = tmp_path / "cached-assets"
split_dir = cached_assets_directory / "ds/--/plain_text/train"
split_dir.mkdir(parents=True)
for i in range(n_rows):
(split_dir / str(i)).mkdir()
time.sleep(0.01)
def deterministic_glob_rows_in_assets_dir(
dataset: str,
assets_directory: StrPath,
) -> list[Path]:
return sorted(
list(Path(assets_directory).resolve().glob(os.path.join(dataset, "--", "*", "*", "*"))),
key=lambda p: int(p.name),
)
with patch("libapi.utils.glob_rows_in_assets_dir", deterministic_glob_rows_in_assets_dir):
clean_cached_assets(
"ds",
cached_assets_directory,
keep_most_recent_rows_number=keep_most_recent_rows_number,
keep_first_rows_number=keep_first_rows_number,
max_cleaned_rows_number=max_cleaned_rows_number,
)
remaining_rows = sorted(int(row_dir.name) for row_dir in split_dir.glob("*"))
assert remaining_rows == expected_remaining_rows
| datasets-server-main | libs/libapi/tests/test_utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from pytest import fixture
from libapi.config import ApiConfig
@fixture(scope="session")
def hostname() -> str:
return "localhost"
@fixture(scope="session")
def port() -> str:
return "8888"
@fixture(scope="session")
def httpserver_listen_address(hostname: str, port: int) -> tuple[str, int]:
return (hostname, port)
@fixture(scope="session")
def hf_endpoint(hostname: str, port: int) -> str:
return f"http://{hostname}:{port}"
@fixture(scope="session")
def api_config(hf_endpoint: str) -> ApiConfig:
return ApiConfig.from_env(hf_endpoint=hf_endpoint)
@fixture(scope="session")
def hf_auth_path(api_config: ApiConfig) -> str:
return api_config.hf_auth_path
| datasets-server-main | libs/libapi/tests/conftest.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | libs/libapi/tests/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import datetime
import time
from collections.abc import Mapping
from contextlib import nullcontext as does_not_raise
from typing import Any, Optional
import jwt
import pytest
import werkzeug.wrappers
from pytest_httpserver import HTTPServer
from starlette.datastructures import Headers
from starlette.requests import Request
from werkzeug.wrappers import Request as WerkzeugRequest
from werkzeug.wrappers import Response as WerkzeugResponse
from libapi.authentication import auth_check
from libapi.exceptions import (
AuthCheckHubRequestError,
ExternalAuthenticatedError,
ExternalUnauthenticatedError,
)
private_key = """-----BEGIN RSA PRIVATE KEY-----
MIIBOQIBAAJAZTmplhS/Jd73ycVut7TglMObheQqXM7RZYlwazLU4wpfIVIwOh9I
sCZGSgLyFq42KWIikKLEs/yqx3pRGfq+rwIDAQABAkAMyF9WCICq86Eu5bO5lynV
H26AVfPTjHp87AI6R00C7p9n8hO/DhHaHpc3InOSsXsw9d2hmz37jwwBFiwMHMMh
AiEAtbttHlIO+yO29oXw4P6+yO11lMy1UpT1sPVTnR9TXbUCIQCOl7Zuyy2ZY9ZW
pDhW91x/14uXjnLXPypgY9bcfggJUwIhAJQG1LzrzjQWRUPMmgZKuhBkC3BmxhM8
LlwzmCXVjEw5AiA7JnAFEb9+q82T71d3q/DxD0bWvb6hz5ASoBfXK2jGBQIgbaQp
h4Tk6UJuj1xgKNs75Pk3pG2tj8AQiuBk3l62vRU=
-----END RSA PRIVATE KEY-----"""
public_key = """-----BEGIN PUBLIC KEY-----
MFswDQYJKoZIhvcNAQEBBQADSgAwRwJAZTmplhS/Jd73ycVut7TglMObheQqXM7R
ZYlwazLU4wpfIVIwOh9IsCZGSgLyFq42KWIikKLEs/yqx3pRGfq+rwIDAQAB
-----END PUBLIC KEY-----"""
exp_ok = datetime.datetime.now().timestamp() + 1000
read_ok = True
algorithm_rs256 = "RS256"
dataset_public = "dataset_public"
dataset_protected_with_access = "dataset_protected_with_access"
dataset_protected_without_access = "dataset_protected_without_access"
dataset_inexistent = "dataset_inexistent"
dataset_throttled = "dataset_throttled"
cookie_ok = "cookie ok"
cookie_wrong = "cookie wrong"
api_token_ok = "api token ok"
api_token_wrong = "api token wrong"
def auth_callback(request: WerkzeugRequest) -> WerkzeugResponse:
"""Simulates the https://huggingface.co/api/datasets/%s/auth-check Hub API endpoint.
It returns:
- 200: if the user can access the dataset
- 401: if the user is not authenticated
- 403: if the user is authenticated but can't access the dataset
- 404: if the user is authenticated but the dataset doesn't exist
- 429: if the user is authenticated but the request is throttled
Args:
request (WerkzeugRequest): the request sent to the endpoint
Returns:
WerkzeugResponse: the response sent by the endpoint
"""
dataset = request.path.split("/")[-2]
if dataset == dataset_public:
# a public dataset always has read access
return WerkzeugResponse(status=200)
if request.headers.get("cookie") != cookie_ok and request.headers.get("authorization") != f"Bearer {api_token_ok}":
# the user is not authenticated
return WerkzeugResponse(status=401)
if dataset == dataset_protected_with_access:
# the user is authenticated and has access to the dataset
return WerkzeugResponse(status=200)
if dataset == dataset_protected_without_access:
# the user is authenticated but doesn't have access to the dataset
return WerkzeugResponse(status=403)
if dataset == dataset_inexistent:
# the user is authenticated but the dataset doesn't exist
return WerkzeugResponse(status=404)
if dataset == dataset_throttled:
# the user is authenticated but the request is throttled (too many requests)
return WerkzeugResponse(status=429)
raise RuntimeError(f"Unexpected dataset: {dataset}")
def test_no_external_auth_check() -> None:
assert auth_check(dataset_public)
def test_invalid_external_auth_check_url() -> None:
with pytest.raises(ValueError):
auth_check(dataset_public, external_auth_url="https://doesnotexist/")
def test_unreachable_external_auth_check_service() -> None:
with pytest.raises(AuthCheckHubRequestError):
auth_check(dataset_public, external_auth_url="https://doesnotexist/%s")
@pytest.mark.parametrize(
"status_code,expectation",
[
(200, does_not_raise()),
(401, pytest.raises(ExternalUnauthenticatedError)),
(403, pytest.raises(ExternalAuthenticatedError)),
(404, pytest.raises(ExternalAuthenticatedError)),
(429, pytest.raises(ValueError)),
],
)
def test_external_auth_responses_without_request(
httpserver: HTTPServer,
hf_endpoint: str,
hf_auth_path: str,
status_code: int,
expectation: Any,
) -> None:
dataset = "dataset"
external_auth_url = hf_endpoint + hf_auth_path
httpserver.expect_request(hf_auth_path % dataset).respond_with_data(status=status_code)
with expectation:
auth_check(dataset, external_auth_url=external_auth_url)
TIMEOUT_TIME = 0.2
def sleeping(_: werkzeug.wrappers.Request) -> werkzeug.wrappers.Response:
time.sleep(TIMEOUT_TIME)
return werkzeug.wrappers.Response(status=200)
@pytest.mark.parametrize(
"hf_timeout_seconds,expectation",
[
(TIMEOUT_TIME * 2, does_not_raise()),
(None, does_not_raise()),
(TIMEOUT_TIME / 2, pytest.raises(AuthCheckHubRequestError)),
],
)
def test_hf_timeout_seconds(
httpserver: HTTPServer,
hf_endpoint: str,
hf_auth_path: str,
hf_timeout_seconds: Optional[float],
expectation: Any,
) -> None:
dataset = "dataset"
external_auth_url = hf_endpoint + hf_auth_path
httpserver.expect_request(hf_auth_path % dataset).respond_with_handler(func=sleeping)
with expectation:
auth_check(dataset, external_auth_url=external_auth_url, hf_timeout_seconds=hf_timeout_seconds)
def create_request(headers: Mapping[str, str]) -> Request:
return Request(
{
"type": "http",
"path": "/some-path",
"headers": Headers(headers).raw,
"http_version": "1.1",
"method": "GET",
"scheme": "https",
"client": ("127.0.0.1", 8080),
"server": ("some.server", 443),
}
)
def get_jwt(dataset: str) -> str:
return jwt.encode(
{"sub": f"datasets/{dataset}", "read": read_ok, "exp": exp_ok}, private_key, algorithm=algorithm_rs256
)
def assert_auth_headers(
httpserver: HTTPServer,
hf_endpoint: str,
hf_auth_path: str,
dataset: str,
headers: Mapping[str, str],
expectation: Any,
) -> None:
external_auth_url = hf_endpoint + hf_auth_path
httpserver.expect_request(hf_auth_path % dataset).respond_with_handler(auth_callback)
with expectation:
auth_check(
dataset,
external_auth_url=external_auth_url,
request=create_request(headers=headers),
hf_jwt_public_keys=[public_key],
hf_jwt_algorithm=algorithm_rs256,
)
@pytest.mark.parametrize(
"headers,expectation",
[
({}, does_not_raise()),
({"Authorization": f"Bearer {api_token_wrong}"}, does_not_raise()),
({"Authorization": api_token_ok}, does_not_raise()),
({"Cookie": cookie_wrong}, does_not_raise()),
({"Authorization": f"Bearer {api_token_ok}"}, does_not_raise()),
({"Cookie": cookie_ok}, does_not_raise()),
({"X-Api-Key": get_jwt(dataset_public)}, does_not_raise()),
({"Authorization": f"Bearer {get_jwt(dataset_public)}"}, does_not_raise()),
],
)
def test_external_auth_service_dataset_public(
httpserver: HTTPServer,
hf_endpoint: str,
hf_auth_path: str,
headers: Mapping[str, str],
expectation: Any,
) -> None:
assert_auth_headers(httpserver, hf_endpoint, hf_auth_path, dataset_public, headers, expectation)
@pytest.mark.parametrize(
"headers,expectation",
[
({}, pytest.raises(ExternalUnauthenticatedError)),
({"Authorization": f"Bearer {api_token_wrong}"}, pytest.raises(ExternalUnauthenticatedError)),
({"Authorization": api_token_ok}, pytest.raises(ExternalUnauthenticatedError)),
({"Cookie": cookie_wrong}, pytest.raises(ExternalUnauthenticatedError)),
({"Authorization": f"Bearer {api_token_ok}"}, does_not_raise()),
({"Cookie": cookie_ok}, does_not_raise()),
({"X-Api-Key": get_jwt(dataset_protected_with_access)}, does_not_raise()),
({"Authorization": f"Bearer jwt:{get_jwt(dataset_protected_with_access)}"}, does_not_raise()),
],
)
def test_external_auth_service_dataset_protected_with_access(
httpserver: HTTPServer,
hf_endpoint: str,
hf_auth_path: str,
headers: Mapping[str, str],
expectation: Any,
) -> None:
assert_auth_headers(httpserver, hf_endpoint, hf_auth_path, dataset_protected_with_access, headers, expectation)
@pytest.mark.parametrize(
"headers,expectation",
[
({}, pytest.raises(ExternalUnauthenticatedError)),
({"Authorization": f"Bearer {api_token_wrong}"}, pytest.raises(ExternalUnauthenticatedError)),
({"Authorization": api_token_ok}, pytest.raises(ExternalUnauthenticatedError)),
({"Cookie": cookie_wrong}, pytest.raises(ExternalUnauthenticatedError)),
({"Authorization": f"Bearer {api_token_ok}"}, pytest.raises(ExternalAuthenticatedError)),
({"Cookie": cookie_ok}, pytest.raises(ExternalAuthenticatedError)),
({"X-Api-Key": get_jwt(dataset_protected_without_access)}, does_not_raise()),
({"Authorization": f"Bearer jwt:{get_jwt(dataset_protected_without_access)}"}, does_not_raise()),
],
)
def test_external_auth_service_dataset_protected_without_access(
httpserver: HTTPServer,
hf_endpoint: str,
hf_auth_path: str,
headers: Mapping[str, str],
expectation: Any,
) -> None:
assert_auth_headers(httpserver, hf_endpoint, hf_auth_path, dataset_protected_without_access, headers, expectation)
@pytest.mark.parametrize(
"headers,expectation",
[
({}, pytest.raises(ExternalUnauthenticatedError)),
({"Authorization": f"Bearer {api_token_wrong}"}, pytest.raises(ExternalUnauthenticatedError)),
({"Authorization": api_token_ok}, pytest.raises(ExternalUnauthenticatedError)),
({"Cookie": cookie_wrong}, pytest.raises(ExternalUnauthenticatedError)),
({"Authorization": f"Bearer {api_token_ok}"}, pytest.raises(ExternalAuthenticatedError)),
({"Cookie": cookie_ok}, pytest.raises(ExternalAuthenticatedError)),
({"X-Api-Key": get_jwt(dataset_inexistent)}, does_not_raise()),
({"Authorization": f"Bearer jwt:{get_jwt(dataset_inexistent)}"}, does_not_raise()),
],
)
def test_external_auth_service_dataset_inexistent(
httpserver: HTTPServer,
hf_endpoint: str,
hf_auth_path: str,
headers: Mapping[str, str],
expectation: Any,
) -> None:
assert_auth_headers(httpserver, hf_endpoint, hf_auth_path, dataset_inexistent, headers, expectation)
@pytest.mark.parametrize(
"headers,expectation",
[
({}, pytest.raises(ExternalUnauthenticatedError)),
({"Authorization": f"Bearer {api_token_wrong}"}, pytest.raises(ExternalUnauthenticatedError)),
({"Authorization": api_token_ok}, pytest.raises(ExternalUnauthenticatedError)),
({"Cookie": cookie_wrong}, pytest.raises(ExternalUnauthenticatedError)),
({"Authorization": f"Bearer {api_token_ok}"}, pytest.raises(ValueError)),
({"Cookie": cookie_ok}, pytest.raises(ValueError)),
({"X-Api-Key": get_jwt(dataset_throttled)}, does_not_raise()),
({"Authorization": f"Bearer jwt:{get_jwt(dataset_throttled)}"}, does_not_raise()),
],
)
def test_external_auth_service_dataset_throttled(
httpserver: HTTPServer,
hf_endpoint: str,
hf_auth_path: str,
headers: Mapping[str, str],
expectation: Any,
) -> None:
assert_auth_headers(httpserver, hf_endpoint, hf_auth_path, dataset_throttled, headers, expectation)
| datasets-server-main | libs/libapi/tests/test_authentication.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import datetime
from contextlib import nullcontext as does_not_raise
from typing import Any, Optional
from unittest.mock import patch
import jwt
import pytest
from ecdsa import Ed25519, SigningKey
from libapi.config import ApiConfig
from libapi.exceptions import (
JWTExpiredSignature,
JWTInvalidClaimRead,
JWTInvalidClaimSub,
JWTInvalidKeyOrAlgorithm,
JWTInvalidSignature,
JWTKeysError,
JWTMissingRequiredClaim,
)
from libapi.jwt_token import (
create_algorithm,
get_jwt_public_keys,
parse_jwt_public_key_json,
parse_jwt_public_key_pem,
validate_jwt,
)
algorithm_name_eddsa = "EdDSA"
algorithm_name_rs256 = "RS256"
algorithm_name_hs256 = "HS256"
algorithm_name_unknown = "unknown"
@pytest.mark.parametrize(
"algorithm_name,expectation",
[
(algorithm_name_eddsa, does_not_raise()),
(algorithm_name_rs256, does_not_raise()),
(algorithm_name_hs256, does_not_raise()),
(algorithm_name_unknown, pytest.raises(RuntimeError)),
],
)
def test_create_algorithm(algorithm_name: str, expectation: Any) -> None:
with expectation:
create_algorithm(algorithm_name)
algorithm_eddsa = create_algorithm(algorithm_name_eddsa)
eddsa_public_key_json_payload = {"crv": "Ed25519", "x": "-RBhgyNluwaIL5KFJb6ZOL2H1nmyI8mW4Z2EHGDGCXM", "kty": "OKP"}
# ^ given by https://huggingface.co/api/keys/jwt (as of 2023/08/18)
eddsa_public_key_pem = """-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEA+RBhgyNluwaIL5KFJb6ZOL2H1nmyI8mW4Z2EHGDGCXM=
-----END PUBLIC KEY-----
"""
another_algorithm_public_key_json_payload = {
"alg": "EC",
"crv": "P-256",
"x": "MKBCTNIcKUSDii11ySs3526iDZ8AiTo7Tu6KPAqv7D4",
"y": "4Etl6SRW2YiLUrN5vfvVHuhp7x8PxltmWWlbbM4IFyM",
"use": "enc",
"kid": "1",
}
@pytest.mark.parametrize(
"payload,expected_pem,expectation",
[
([], None, pytest.raises(ValueError)),
(eddsa_public_key_json_payload, None, pytest.raises(ValueError)),
([another_algorithm_public_key_json_payload], None, pytest.raises(RuntimeError)),
([eddsa_public_key_json_payload], eddsa_public_key_pem, does_not_raise()),
],
)
def test_parse_jwt_public_key_json(payload: Any, expected_pem: str, expectation: Any) -> None:
with expectation:
pem = parse_jwt_public_key_json(algorithm=algorithm_eddsa, payload=payload)
if expected_pem:
assert pem == expected_pem
eddsa_public_key_pem_with_bad_linebreaks = (
"-----BEGIN PUBLIC KEY-----\\nMCowBQYDK2VwAyEA+RBhgyNluwaIL5KFJb6ZOL2H1nmyI8mW4Z2EHGDGCXM=\\n-----END PUBLIC"
" KEY-----"
)
@pytest.mark.parametrize(
"payload,expected_pem,expectation",
[
(eddsa_public_key_pem_with_bad_linebreaks, None, pytest.raises(Exception)),
(eddsa_public_key_pem, eddsa_public_key_pem, does_not_raise()),
],
)
def test_parse_jwt_public_key_pem(payload: Any, expected_pem: str, expectation: Any) -> None:
with expectation:
pem = parse_jwt_public_key_pem(algorithm=algorithm_eddsa, payload=payload)
if expected_pem:
assert pem == expected_pem
private_key_ok = SigningKey.generate(curve=Ed25519)
private_key_pem_ok = private_key_ok.to_pem(format="pkcs8")
public_key_pem_ok = private_key_ok.get_verifying_key().to_pem().decode("utf-8")
other_private_key = SigningKey.generate(curve=Ed25519)
other_private_key_pem = other_private_key.to_pem(format="pkcs8")
other_public_key_pem = other_private_key.get_verifying_key().to_pem().decode("utf-8")
@pytest.mark.parametrize(
"keys_env_var,expected_keys",
[
("", []),
(public_key_pem_ok, [public_key_pem_ok]),
(f"{public_key_pem_ok},{public_key_pem_ok}", [public_key_pem_ok, public_key_pem_ok]),
(f"{public_key_pem_ok},{other_public_key_pem}", [public_key_pem_ok, other_public_key_pem]),
(
f"{public_key_pem_ok},{other_public_key_pem},{eddsa_public_key_pem}",
[public_key_pem_ok, other_public_key_pem, eddsa_public_key_pem],
),
],
)
def test_get_jwt_public_keys_from_env(keys_env_var: str, expected_keys: list[str]) -> None:
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setenv("API_HF_JWT_ADDITIONAL_PUBLIC_KEYS", keys_env_var)
api_config = ApiConfig.from_env(hf_endpoint="")
assert (
get_jwt_public_keys(
algorithm_name=algorithm_name_eddsa,
additional_public_keys=api_config.hf_jwt_additional_public_keys,
)
== expected_keys
)
monkeypatch.undo()
@pytest.mark.parametrize(
"remote_payload,keys_payload,expected_keys,expectation",
[
([], [], None, pytest.raises(JWTKeysError)),
([another_algorithm_public_key_json_payload], [], None, pytest.raises(JWTKeysError)),
(None, [eddsa_public_key_pem_with_bad_linebreaks], None, pytest.raises(JWTKeysError)),
([eddsa_public_key_json_payload], [], [eddsa_public_key_pem], does_not_raise()),
(
None,
[public_key_pem_ok, other_public_key_pem, eddsa_public_key_pem],
[public_key_pem_ok, other_public_key_pem, eddsa_public_key_pem],
does_not_raise(),
),
(
[eddsa_public_key_json_payload],
[public_key_pem_ok, other_public_key_pem, eddsa_public_key_pem],
[eddsa_public_key_pem, public_key_pem_ok, other_public_key_pem, eddsa_public_key_pem],
does_not_raise(),
),
],
)
def test_get_jwt_public_keys(
remote_payload: Any, keys_payload: list[str], expected_keys: list[str], expectation: Any
) -> None:
def fake_fetch(
url: str,
hf_timeout_seconds: Optional[float] = None,
) -> Any:
return remote_payload
with patch("libapi.jwt_token.fetch_jwt_public_key_json", wraps=fake_fetch):
with expectation:
keys = get_jwt_public_keys(
algorithm_name=algorithm_name_eddsa,
public_key_url=None if remote_payload is None else "mock",
additional_public_keys=keys_payload,
)
if expected_keys:
assert keys == expected_keys
token_for_severo_glue = (
"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJzdWIiOiJkYXRhc2V0cy9zZXZlcm8vZ2x1ZSIsImV4cCI6MTY3ODgwMjk0NH0"
".nIi1ZKinMBpYi4kKtirW-cQEt1cGnAziTGmJsZeN5UpE62jz4DcPaIPlSI5P5ciGOlTxy4SEhD1WITkQzpo3Aw"
)
dataset_severo_glue = "severo/glue"
def test_is_jwt_valid_with_ec() -> None:
validate_jwt(
dataset=dataset_severo_glue,
token=token_for_severo_glue,
public_keys=[eddsa_public_key_pem],
algorithm=algorithm_name_eddsa,
verify_exp=False,
# This is a test token generated on 2023/03/14, so we don't want to verify the exp.
)
dataset_ok = "dataset"
wrong_dataset = "wrong_dataset"
exp_ok = datetime.datetime.now().timestamp() + 1000
wrong_exp_1 = datetime.datetime.now().timestamp() - 1000
wrong_exp_2 = 1
sub_ok = f"datasets/{dataset_ok}"
sub_wrong_1 = dataset_ok
sub_wrong_2 = f"dataset/{dataset_ok}"
sub_wrong_3 = f"models/{dataset_ok}"
sub_wrong_4 = f"datasets/{wrong_dataset}"
read_ok = True
read_wrong_1 = False
read_wrong_2 = "True"
payload_ok = {"sub": sub_ok, "read": read_ok, "exp": exp_ok}
algorithm_ok = algorithm_name_eddsa
algorithm_wrong = algorithm_name_rs256
def encode_jwt(payload: dict[str, Any]) -> str:
return jwt.encode(payload, private_key_pem_ok, algorithm=algorithm_ok)
def assert_jwt(
token: str, expectation: Any, public_keys: Optional[list[str]] = None, algorithm: str = algorithm_ok
) -> None:
if public_keys is None:
public_keys = [public_key_pem_ok]
with expectation:
validate_jwt(dataset=dataset_ok, token=token, public_keys=public_keys, algorithm=algorithm)
@pytest.mark.parametrize(
"public_keys,expectation",
[
([other_public_key_pem], pytest.raises(JWTInvalidSignature)),
([public_key_pem_ok], does_not_raise()),
([public_key_pem_ok, other_public_key_pem], does_not_raise()),
([other_public_key_pem, public_key_pem_ok], does_not_raise()),
],
)
def test_validate_jwt_public_keys(public_keys: list[str], expectation: Any) -> None:
assert_jwt(encode_jwt(payload_ok), expectation, public_keys=public_keys)
@pytest.mark.parametrize(
"algorithm,expectation",
[
(algorithm_wrong, pytest.raises(JWTInvalidKeyOrAlgorithm)),
(algorithm_ok, does_not_raise()),
],
)
def test_validate_jwt_algorithm(algorithm: str, expectation: Any) -> None:
assert_jwt(encode_jwt(payload_ok), expectation, algorithm=algorithm)
@pytest.mark.parametrize(
"payload,expectation",
[
({}, pytest.raises(JWTMissingRequiredClaim)),
({"sub": sub_ok}, pytest.raises(JWTMissingRequiredClaim)),
({"read": read_ok}, pytest.raises(JWTMissingRequiredClaim)),
({"exp": exp_ok}, pytest.raises(JWTMissingRequiredClaim)),
({"read": read_ok, "exp": exp_ok}, pytest.raises(JWTMissingRequiredClaim)),
({"sub": sub_ok, "exp": exp_ok}, pytest.raises(JWTMissingRequiredClaim)),
({"sub": sub_ok, "read": read_ok}, pytest.raises(JWTMissingRequiredClaim)),
({"sub": sub_ok, "read": read_ok, "exp": exp_ok}, does_not_raise()),
],
)
def test_validate_jwt_content_format(payload: dict[str, str], expectation: Any) -> None:
assert_jwt(encode_jwt(payload), expectation)
@pytest.mark.parametrize(
"read,expectation",
[
(read_wrong_1, pytest.raises(JWTInvalidClaimRead)),
(read_wrong_2, pytest.raises(JWTInvalidClaimRead)),
(read_ok, does_not_raise()),
],
)
def test_validate_jwt_read(read: str, expectation: Any) -> None:
assert_jwt(encode_jwt({"sub": sub_ok, "read": read, "exp": exp_ok}), expectation)
@pytest.mark.parametrize(
"sub,expectation",
[
(sub_wrong_1, pytest.raises(JWTInvalidClaimSub)),
(sub_wrong_2, pytest.raises(JWTInvalidClaimSub)),
(sub_wrong_3, pytest.raises(JWTInvalidClaimSub)),
(sub_wrong_4, pytest.raises(JWTInvalidClaimSub)),
(sub_ok, does_not_raise()),
],
)
def test_validate_jwt_subject(sub: str, expectation: Any) -> None:
assert_jwt(encode_jwt({"sub": sub, "read": read_ok, "exp": exp_ok}), expectation)
@pytest.mark.parametrize(
"expiration,expectation",
[
(wrong_exp_1, pytest.raises(JWTExpiredSignature)),
(wrong_exp_2, pytest.raises(JWTExpiredSignature)),
(exp_ok, does_not_raise()),
],
)
def test_validate_jwt_expiration(expiration: str, expectation: Any) -> None:
assert_jwt(
encode_jwt({"sub": sub_ok, "read": read_ok, "exp": expiration}),
expectation,
)
| datasets-server-main | libs/libapi/tests/test_jwt_token.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from dataclasses import dataclass, field
from typing import Optional
from environs import Env
API_UVICORN_HOSTNAME = "localhost"
API_UVICORN_NUM_WORKERS = 2
API_UVICORN_PORT = 8000
@dataclass(frozen=True)
class UvicornConfig:
hostname: str = API_UVICORN_HOSTNAME
num_workers: int = API_UVICORN_NUM_WORKERS
port: int = API_UVICORN_PORT
@classmethod
def from_env(cls) -> "UvicornConfig":
env = Env(expand_vars=True)
with env.prefixed("API_UVICORN_"):
return cls(
hostname=env.str(name="HOSTNAME", default=API_UVICORN_HOSTNAME),
num_workers=env.int(name="NUM_WORKERS", default=API_UVICORN_NUM_WORKERS),
port=env.int(name="PORT", default=API_UVICORN_PORT),
)
API_EXTERNAL_AUTH_URL = None
API_HF_AUTH_PATH = "/api/datasets/%s/auth-check"
API_HF_JWT_PUBLIC_KEY_URL = None
API_HF_JWT_ADDITIONAL_PUBLIC_KEYS: list[str] = []
API_HF_JWT_ALGORITHM = "EdDSA"
API_HF_TIMEOUT_SECONDS = 0.2
API_HF_WEBHOOK_SECRET = None
API_MAX_AGE_LONG = 120 # 2 minutes
API_MAX_AGE_SHORT = 10 # 10 seconds
@dataclass(frozen=True)
class ApiConfig:
external_auth_url: Optional[str] = API_EXTERNAL_AUTH_URL # not documented
hf_auth_path: str = API_HF_AUTH_PATH
hf_jwt_public_key_url: Optional[str] = API_HF_JWT_PUBLIC_KEY_URL
hf_jwt_additional_public_keys: list[str] = field(default_factory=API_HF_JWT_ADDITIONAL_PUBLIC_KEYS.copy)
hf_jwt_algorithm: Optional[str] = API_HF_JWT_ALGORITHM
hf_timeout_seconds: Optional[float] = API_HF_TIMEOUT_SECONDS
hf_webhook_secret: Optional[str] = API_HF_WEBHOOK_SECRET
max_age_long: int = API_MAX_AGE_LONG
max_age_short: int = API_MAX_AGE_SHORT
@classmethod
def from_env(cls, hf_endpoint: str) -> "ApiConfig":
env = Env(expand_vars=True)
with env.prefixed("API_"):
hf_auth_path = env.str(name="HF_AUTH_PATH", default=API_HF_AUTH_PATH)
external_auth_url = None if hf_auth_path is None else f"{hf_endpoint}{hf_auth_path}"
return cls(
external_auth_url=external_auth_url,
hf_auth_path=hf_auth_path,
hf_jwt_public_key_url=env.str(name="HF_JWT_PUBLIC_KEY_URL", default=API_HF_JWT_PUBLIC_KEY_URL),
hf_jwt_additional_public_keys=env.list(
name="HF_JWT_ADDITIONAL_PUBLIC_KEYS", default=API_HF_JWT_ADDITIONAL_PUBLIC_KEYS.copy()
),
hf_jwt_algorithm=env.str(name="HF_JWT_ALGORITHM", default=API_HF_JWT_ALGORITHM),
hf_timeout_seconds=env.float(name="HF_TIMEOUT_SECONDS", default=API_HF_TIMEOUT_SECONDS),
hf_webhook_secret=env.str(name="HF_WEBHOOK_SECRET", default=API_HF_WEBHOOK_SECRET),
max_age_long=env.int(name="MAX_AGE_LONG", default=API_MAX_AGE_LONG),
max_age_short=env.int(name="MAX_AGE_SHORT", default=API_MAX_AGE_SHORT),
)
| datasets-server-main | libs/libapi/src/libapi/config.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | libs/libapi/src/libapi/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Any, Optional, Union
import jwt
import requests
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.ec import (
EllipticCurvePrivateKey,
EllipticCurvePublicKey,
)
from cryptography.hazmat.primitives.asymmetric.ed448 import (
Ed448PrivateKey,
Ed448PublicKey,
)
from cryptography.hazmat.primitives.asymmetric.ed25519 import (
Ed25519PrivateKey,
Ed25519PublicKey,
)
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey, RSAPublicKey
from jwt.algorithms import (
ECAlgorithm,
HMACAlgorithm,
OKPAlgorithm,
RSAAlgorithm,
RSAPSSAlgorithm,
)
from libapi.exceptions import (
JWTExpiredSignature,
JWTInvalidClaimRead,
JWTInvalidClaimSub,
JWTInvalidKeyOrAlgorithm,
JWTInvalidSignature,
JWTKeysError,
JWTMissingRequiredClaim,
UnexpectedApiError,
)
ASYMMETRIC_ALGORITHMS = (ECAlgorithm, OKPAlgorithm, RSAAlgorithm, RSAPSSAlgorithm)
SYMMETRIC_ALGORITHMS = (HMACAlgorithm,)
SupportedAlgorithm = Union[ECAlgorithm, OKPAlgorithm, RSAAlgorithm, RSAPSSAlgorithm, HMACAlgorithm]
SupportedKey = Union[
Ed448PrivateKey,
Ed448PublicKey,
Ed25519PrivateKey,
Ed25519PublicKey,
EllipticCurvePrivateKey,
EllipticCurvePublicKey,
RSAPrivateKey,
RSAPublicKey,
bytes,
]
def is_public_key(key: SupportedKey) -> bool:
return hasattr(key, "public_bytes")
def create_algorithm(algorithm_name: str) -> SupportedAlgorithm:
"""
Create an algorithm object from the algorithm name.
Args:
algorithm_name (str): the algorithm name
Returns:
SupportedAlgorithm: the algorithm object
Raises:
RuntimeError: if the algorithm is not supported
"""
try:
algorithm = jwt.get_algorithm_by_name(algorithm_name)
if not isinstance(algorithm, (*ASYMMETRIC_ALGORITHMS, *SYMMETRIC_ALGORITHMS)):
raise NotImplementedError()
except NotImplementedError as err:
raise RuntimeError(f"Invalid algorithm for JWT verification: {algorithm_name} is not supported") from err
return algorithm
def _key_to_pem(key: SupportedKey, algorithm: SupportedAlgorithm) -> str:
"""
Convert the key to PEM format.
Args:
key (SupportedKey): the key to convert
Returns:
str: the key in PEM format (PKCS#8)
Raises:
RuntimeError: if the key is not a public key
"""
if isinstance(algorithm, SYMMETRIC_ALGORITHMS) or isinstance(key, bytes):
return key.decode("utf-8") # type: ignore
if not is_public_key(key):
raise RuntimeError("Failed to parse JWT key: the provided key is a private key")
return key.public_bytes( # type: ignore
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
).decode("utf-8")
# ^ we assume that the key contain UTF-8 encoded bytes, which is why we use type ignore for mypy
def parse_jwt_public_key_json(payload: Any, algorithm: SupportedAlgorithm) -> str:
"""
Parse the payload (JSON format) to extract the public key, validating that it's a public key, and that it is
compatible with the algorithm
Args:
keys (Any): the JSON to parse. It must be a list of keys in JWK format
algorithm (SupportedAlgorithm): the algorithm the key should implement
Returns:
str: the public key in PEM format
Raises:
RuntimeError: if the payload is not compatible with the algorithm, or if the key is not public
ValueError: if the input is not a list
"""
if not isinstance(payload, list) or not payload:
raise ValueError("Payload must be a list of JWK formatted keys.")
try:
key = algorithm.from_jwk(payload[0])
except (jwt.InvalidKeyError, KeyError) as err:
raise RuntimeError(f"Failed to parse JWT key: {err.args[0]}") from err
return _key_to_pem(key, algorithm)
def parse_jwt_public_key_pem(payload: str, algorithm: SupportedAlgorithm) -> str:
"""
Parse the input string to validate it's a public key in PEM format, and that it is compatible
with the algorithm
Args:
key (str): the key to parse. It should be a public key in PEM format
algorithm (SupportedAlgorithm): the algorithm the key should implement
Returns:
str: the public key in PEM format
Raises:
RuntimeError: if the payload is not compatible with the algorithm, or if the key is not public
ValueError: if the input is not a list
"""
try:
key = algorithm.prepare_key(payload)
except (jwt.InvalidKeyError, KeyError) as err:
raise RuntimeError(f"Failed to parse JWT key: {err.args[0]}") from err
return _key_to_pem(key, algorithm)
def fetch_jwt_public_key_json(
url: str,
hf_timeout_seconds: Optional[float] = None,
) -> Any:
"""
Fetch the public key from the input URL
See https://huggingface.co/api/keys/jwt
Args:
url (str): the URL to fetch the public key from
hf_timeout_seconds (float|None): the timeout in seconds for the external authentication service. It
is used both for the connection timeout and the read timeout. If None, the request never timeouts.
Returns:
Any: the response JSON payload
Raises:
RuntimeError: if the request fails
"""
try:
response = requests.get(url, timeout=hf_timeout_seconds)
response.raise_for_status()
return response.json()
except Exception as err:
raise RuntimeError(f"Failed to fetch the JWT public key from {url}. ") from err
def get_jwt_public_keys(
algorithm_name: Optional[str] = None,
public_key_url: Optional[str] = None,
additional_public_keys: Optional[list[str]] = None,
timeout_seconds: Optional[float] = None,
) -> list[str]:
"""
Get the public keys to use to decode the JWT token.
The keys can be created by two mechanisms:
- one key can be fetched from the public_key_url (must be in JWK format, ie. JSON)
- additional keys can be provided as a list of PEM formatted keys
All of these keys are then converted to PEM format (PKCS#8) and returned as a list. The remote key is first.
The keys must be compatible with the algorithm.
Args:
algorithm_name (str|None): the algorithm to use to decode the JWT token. If not provided, no keys will be
returned
public_key_url (str|None): the URL to fetch the public key from
additional_public_keys (list[str]|None): additional public keys to use to decode the JWT token
timeout_seconds (float|None): the timeout in seconds for fetching the remote key
Returns:
list[str]: the list of public keys in PEM format
Raises:
JWTKeysError: if some exception occurred while creating the public keys. Some reasons: if the algorithm
is not supported, if a payload could not be parsed, if a key is not compatible with the algorithm,
if a key is not public, if th remote key could not be fetch or parsed
"""
try:
keys: list[str] = []
if not algorithm_name:
return keys
algorithm = create_algorithm(algorithm_name)
if public_key_url:
payload = fetch_jwt_public_key_json(
url=public_key_url,
hf_timeout_seconds=timeout_seconds,
)
keys.append(parse_jwt_public_key_json(payload=payload, algorithm=algorithm))
if additional_public_keys:
keys.extend(
parse_jwt_public_key_pem(payload=payload, algorithm=algorithm) for payload in additional_public_keys
)
logging.debug(f"JWT public keys are: {', '.join(keys)}.")
return keys
except Exception as err:
raise JWTKeysError("Failed to create the JWT public keys.") from err
def validate_jwt(
dataset: str, token: Any, public_keys: list[str], algorithm: str, verify_exp: Optional[bool] = True
) -> None:
"""
Check if the JWT is valid for the dataset.
The JWT is decoded with the public key, and the payload must be:
{"sub": "datasets/<...dataset identifier...>", "read": true, "exp": <...date...>}
Raise an exception if any of the condition is not met.
Args:
dataset (str): the dataset identifier
token (Any): the JWT token to decode
public_keys (list[str]): the public keys to use to decode the JWT token. They are tried in order.
algorithm (str): the algorithm to use to decode the JWT token
verify_exp (bool|None): whether to verify the expiration of the JWT token. Default to True.
Raise:
JWTInvalidSignature: if the signature verification failed
JWTMissingRequiredClaim: if a claim is missing in the JWT payload
JWTExpiredSignature: if the JWT signature has expired
JWTInvalidKeyOrAlgorithm: if the key used to verify the signature is not compatible with the algorithm
JWTInvalidClaimSub: if the 'sub' claim in JWT payload is invalid
JWTInvalidClaimRead: if the 'read' claim in JWT payload is invalid
UnexpectedApiError: if another error occurred while decoding the JWT
"""
for public_key in public_keys:
logging.debug(f"Trying to decode the JWT with key #{public_keys.index(public_key)}: {public_key}.")
try:
decoded = jwt.decode(
jwt=token,
key=public_key,
algorithms=[algorithm],
options={"require": ["exp", "sub", "read"], "verify_exp": verify_exp},
)
logging.debug(f"Decoded JWT is: '{public_key}'.")
break
except jwt.exceptions.InvalidSignatureError as e:
if public_key == public_keys[-1]:
raise JWTInvalidSignature(
"The JWT signature verification failed. Check the signing key and the algorithm.", e
) from e
logging.debug(f"JWT signature verification failed with key: '{public_key}'. Trying next key.")
except jwt.exceptions.MissingRequiredClaimError as e:
raise JWTMissingRequiredClaim("A claim is missing in the JWT payload.", e) from e
except jwt.exceptions.ExpiredSignatureError as e:
raise JWTExpiredSignature("The JWT signature has expired. Try to refresh the token.", e) from e
except (jwt.exceptions.InvalidKeyError, jwt.exceptions.InvalidAlgorithmError) as e:
raise JWTInvalidKeyOrAlgorithm(
(
"The key used to verify the signature is not compatible with the algorithm. Check the signing key"
" and the algorithm."
),
e,
) from e
except Exception as e:
raise UnexpectedApiError("An error has occurred while decoding the JWT.", e) from e
sub = decoded.get("sub")
if not isinstance(sub, str) or not sub.startswith("datasets/") or sub.removeprefix("datasets/") != dataset:
raise JWTInvalidClaimSub(
"The 'sub' claim in JWT payload is invalid. It should be in the form 'datasets/<...dataset"
" identifier...>'."
)
read = decoded.get("read")
if read is not True:
raise JWTInvalidClaimRead("The 'read' claim in JWT payload is invalid. It should be set to 'true'.")
| datasets-server-main | libs/libapi/src/libapi/jwt_token.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
import os
import shutil
from collections.abc import Callable, Coroutine
from http import HTTPStatus
from itertools import islice
from typing import Any, Optional
import pyarrow as pa
from datasets import Features
from libcommon.dataset import get_dataset_git_revision
from libcommon.exceptions import CustomError
from libcommon.orchestrator import DatasetOrchestrator
from libcommon.processing_graph import ProcessingGraph, ProcessingStep
from libcommon.rows_utils import transform_rows
from libcommon.simple_cache import (
CACHED_RESPONSE_NOT_FOUND,
CacheEntry,
get_best_response,
)
from libcommon.storage import StrPath
from libcommon.utils import Priority, RowItem, orjson_dumps
from libcommon.viewer_utils.asset import glob_rows_in_assets_dir
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from libapi.exceptions import (
ResponseNotFoundError,
ResponseNotReadyError,
TransformRowsProcessingError,
)
class OrjsonResponse(JSONResponse):
def render(self, content: Any) -> bytes:
return orjson_dumps(content=content)
def get_response(content: Any, status_code: int = 200, max_age: int = 0) -> Response:
headers = {"Cache-Control": f"max-age={max_age}"} if max_age > 0 else {"Cache-Control": "no-store"}
return OrjsonResponse(content=content, status_code=status_code, headers=headers)
def get_json_response(
content: Any,
status_code: HTTPStatus = HTTPStatus.OK,
max_age: int = 0,
error_code: Optional[str] = None,
revision: Optional[str] = None,
headers: Optional[dict[str, str]] = None,
) -> Response:
if not headers:
headers = {}
headers["Cache-Control"] = f"max-age={max_age}" if max_age > 0 else "no-store"
if error_code is not None:
headers["X-Error-Code"] = error_code
if revision is not None:
headers["X-Revision"] = revision
return OrjsonResponse(content=content, status_code=status_code.value, headers=headers)
# these headers are exposed to the client (browser)
EXPOSED_HEADERS = [
"X-Error-Code",
"X-Revision",
]
def get_json_ok_response(
content: Any, max_age: int = 0, revision: Optional[str] = None, headers: Optional[dict[str, str]] = None
) -> Response:
return get_json_response(content=content, max_age=max_age, revision=revision, headers=headers)
def get_json_error_response(
content: Any,
status_code: HTTPStatus = HTTPStatus.OK,
max_age: int = 0,
error_code: Optional[str] = None,
revision: Optional[str] = None,
) -> Response:
return get_json_response(
content=content, status_code=status_code, max_age=max_age, error_code=error_code, revision=revision
)
def get_json_api_error_response(error: CustomError, max_age: int = 0, revision: Optional[str] = None) -> Response:
return get_json_error_response(
content=error.as_response(),
status_code=error.status_code,
max_age=max_age,
error_code=error.code,
revision=revision,
)
def is_non_empty_string(string: Any) -> bool:
return isinstance(string, str) and bool(string and string.strip())
def are_valid_parameters(parameters: list[Any]) -> bool:
return all(is_non_empty_string(s) for s in parameters)
def try_backfill_dataset_then_raise(
processing_steps: list[ProcessingStep],
dataset: str,
processing_graph: ProcessingGraph,
cache_max_days: int,
hf_endpoint: str,
hf_token: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> None:
dataset_orchestrator = DatasetOrchestrator(dataset=dataset, processing_graph=processing_graph)
if not dataset_orchestrator.has_some_cache():
# We have to check if the dataset exists and is supported
try:
revision = get_dataset_git_revision(
dataset=dataset,
hf_endpoint=hf_endpoint,
hf_token=hf_token,
hf_timeout_seconds=hf_timeout_seconds,
)
except Exception as e:
# The dataset is not supported
raise ResponseNotFoundError("Not found.") from e
# The dataset is supported, and the revision is known. We set the revision (it will create the jobs)
# and tell the user to retry.
logging.info(f"Set orchestrator revision for dataset={dataset}, revision={revision}")
dataset_orchestrator.set_revision(
revision=revision, priority=Priority.NORMAL, error_codes_to_retry=[], cache_max_days=cache_max_days
)
raise ResponseNotReadyError(
"The server is busier than usual and the response is not ready yet. Please retry later."
)
elif dataset_orchestrator.has_pending_ancestor_jobs(
processing_step_names=[processing_step.name for processing_step in processing_steps]
):
# some jobs are still in progress, the cache entries could exist in the future
raise ResponseNotReadyError(
"The server is busier than usual and the response is not ready yet. Please retry later."
)
else:
# no pending job: the cache entry will not be created
raise ResponseNotFoundError("Not found.")
def get_cache_entry_from_steps(
processing_steps: list[ProcessingStep],
dataset: str,
config: Optional[str],
split: Optional[str],
processing_graph: ProcessingGraph,
cache_max_days: int,
hf_endpoint: str,
hf_token: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> CacheEntry:
"""Gets the cache from the first successful step in the processing steps list.
If no successful result is found, it will return the last one even if it's an error,
Checks if job is still in progress by each processing step in case of no entry found.
Raises:
- [`~utils.ResponseNotFoundError`]
if no result is found.
- [`~utils.ResponseNotReadyError`]
if the response is not ready yet.
Returns: the cached record
"""
kinds = [processing_step.cache_kind for processing_step in processing_steps]
best_response = get_best_response(kinds=kinds, dataset=dataset, config=config, split=split)
if "error_code" in best_response.response and best_response.response["error_code"] == CACHED_RESPONSE_NOT_FOUND:
try_backfill_dataset_then_raise(
processing_steps=processing_steps,
processing_graph=processing_graph,
dataset=dataset,
hf_endpoint=hf_endpoint,
hf_timeout_seconds=hf_timeout_seconds,
hf_token=hf_token,
cache_max_days=cache_max_days,
)
return best_response.response
Endpoint = Callable[[Request], Coroutine[Any, Any, Response]]
def to_rows_list(
pa_table: pa.Table,
dataset: str,
config: str,
split: str,
cached_assets_base_url: str,
cached_assets_directory: StrPath,
offset: int,
features: Features,
unsupported_columns: list[str],
row_idx_column: Optional[str] = None,
) -> list[RowItem]:
num_rows = pa_table.num_rows
for idx, (column, feature) in enumerate(features.items()):
if column in unsupported_columns:
pa_table = pa_table.add_column(idx, column, pa.array([None] * num_rows))
# transform the rows, if needed (e.g. save the images or audio to the assets, and return their URL)
try:
transformed_rows = transform_rows(
dataset=dataset,
config=config,
split=split,
rows=pa_table.to_pylist(),
features=features,
cached_assets_base_url=cached_assets_base_url,
cached_assets_directory=cached_assets_directory,
offset=offset,
row_idx_column=row_idx_column,
)
except Exception as err:
raise TransformRowsProcessingError(
"Server error while post-processing the split rows. Please report the issue."
) from err
return [
{
"row_idx": idx + offset if row_idx_column is None else row.pop(row_idx_column),
"row": row,
"truncated_cells": [],
}
for idx, row in enumerate(transformed_rows)
]
def _greater_or_equal(row_dir_name: str, row_idx: int, on_error: bool) -> bool:
try:
return int(row_dir_name) >= row_idx
except ValueError:
return on_error
def clean_cached_assets(
dataset: str,
cached_assets_directory: StrPath,
keep_first_rows_number: int,
keep_most_recent_rows_number: int,
max_cleaned_rows_number: int,
) -> None:
"""
The cached assets directory is cleaned to save disk space using this simple (?) heuristic:
1. it takes a big sample of rows from the cache using glob (max `max_cleaned_rows_number`)
2. it keeps the most recent ones (max `keep_most_recent_rows_number`)
3. it keeps the rows below a certain index (max `keep_first_rows_number`)
4. it discards the rest
To check for the most recent rows, it looks at the "last modified time" of rows directories.
This time is updated every time a row is accessed using `update_last_modified_date_of_rows_in_assets_dir()`.
Args:
dataset (`str`):
Dataset name e.g `squad` or `lhoestq/demo1`.
Rows are cleaned in any dataset configuration or split of this dataset.
cached_assets_directory (`str`):
Directory containing the cached image and audio files
keep_first_rows_number (`int`):
Keep the rows with an index below a certain number
keep_most_recent_rows_number (`int`):
Keep the most recently accessed rows.
max_cleaned_rows_number (`int`):
Maximum number of rows to discard.
"""
if keep_first_rows_number < 0 or keep_most_recent_rows_number < 0 or max_cleaned_rows_number < 0:
raise ValueError(
"Failed to run cached assets cleaning. Make sure all of keep_first_rows_number,"
f" keep_most_recent_rows_number and max_cleaned_rows_number are set (got {keep_first_rows_number},"
f" {keep_most_recent_rows_number} and {max_cleaned_rows_number})"
)
row_directories = glob_rows_in_assets_dir(dataset, cached_assets_directory)
row_directories_sample = list(
islice(
(
row_dir
for row_dir in row_directories
if _greater_or_equal(row_dir.name, keep_first_rows_number, on_error=True)
),
max_cleaned_rows_number + keep_most_recent_rows_number,
)
)
if len(row_directories_sample) > keep_most_recent_rows_number:
row_dirs_to_delete = sorted(row_directories_sample, key=os.path.getmtime, reverse=True)[
keep_most_recent_rows_number:
]
for row_dir_to_delete in row_dirs_to_delete:
shutil.rmtree(row_dir_to_delete, ignore_errors=True)
| datasets-server-main | libs/libapi/src/libapi/utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Literal, Optional
import requests
from libcommon.prometheus import StepProfiler
from requests import PreparedRequest
from requests.auth import AuthBase
from starlette.requests import Request
from libapi.exceptions import (
AuthCheckHubRequestError,
ExternalAuthenticatedError,
ExternalUnauthenticatedError,
)
from libapi.jwt_token import validate_jwt
class RequestAuth(AuthBase):
"""Attaches input Request authentication headers to the given Request object."""
def __init__(self, request: Optional[Request]) -> None:
if request is not None:
self.cookie = request.headers.get("cookie")
self.authorization = request.headers.get("authorization")
else:
self.cookie = None
self.authorization = None
def __call__(self, r: PreparedRequest) -> PreparedRequest:
# modify and return the request
if self.cookie:
r.headers["cookie"] = self.cookie
if self.authorization:
r.headers["authorization"] = self.authorization
return r
def get_jwt_token(request: Optional[Request] = None) -> Optional[str]:
if not request:
return None
# x-api-token is deprecated and will be removed in the future
if token := request.headers.get("x-api-key"):
return token
authorization = request.headers.get("authorization")
if not authorization:
return None
token = authorization.removeprefix("Bearer jwt:")
return None if token == authorization else token
def auth_check(
dataset: str,
external_auth_url: Optional[str] = None,
request: Optional[Request] = None,
hf_jwt_public_keys: Optional[list[str]] = None,
hf_jwt_algorithm: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> Literal[True]:
"""check if the dataset is authorized for the request
It sends a request to the Hugging Face API to check if the dataset is authorized for the input request. The request
to the Hugging Face API is authenticated with the same authentication headers as the input request. It timeouts
after 200ms.
Args:
dataset (str): the dataset name
external_auth_url (str|None): the URL of an external authentication service. The URL must contain `%s`,
which will be replaced with the dataset name, for example: https://huggingface.co/api/datasets/%s/auth-check
The authentication service must return 200, 401, 403 or 404.
If None, the dataset is always authorized.
request (Request | None): the request which optionally bears authentication headers: "cookie",
"authorization" or "X-Api-Key"
hf_jwt_public_keys (list[str]|None): the public keys to use to decode the JWT token
hf_jwt_algorithm (str): the algorithm to use to decode the JWT token
hf_timeout_seconds (float|None): the timeout in seconds for the external authentication service. It
is used both for the connection timeout and the read timeout. If None, the request never timeouts.
Returns:
None: the dataset is authorized for the request
"""
with StepProfiler(method="auth_check", step="all"):
with StepProfiler(method="auth_check", step="check JWT"):
if (jwt_token := get_jwt_token(request)) and hf_jwt_public_keys and hf_jwt_algorithm:
validate_jwt(
dataset=dataset, token=jwt_token, public_keys=hf_jwt_public_keys, algorithm=hf_jwt_algorithm
)
logging.debug(
"By-passing the authentication step, because a valid JWT was passed in headers"
f" for dataset {dataset}. JWT was: {jwt_token}"
)
return True
with StepProfiler(method="auth_check", step="prepare parameters"):
if external_auth_url is None:
return True
try:
url = external_auth_url % dataset
except TypeError as e:
raise ValueError("external_auth_url must contain %s") from e
with StepProfiler(method="auth_check", step="create auth parameter"):
auth = RequestAuth(request)
with StepProfiler(
method="auth_check",
step="requests.get",
context=f"external_auth_url={external_auth_url} timeout={hf_timeout_seconds}",
):
try:
logging.debug(
f"Checking authentication on the Hugging Face Hub for dataset {dataset}, url: {url}, timeout:"
f" {hf_timeout_seconds}, authorization: {auth.authorization}"
)
response = requests.get(url, auth=auth, timeout=hf_timeout_seconds)
except Exception as err:
raise AuthCheckHubRequestError(
(
"Authentication check on the Hugging Face Hub failed or timed out. Please try again later,"
" it's a temporary internal issue."
),
err,
) from err
with StepProfiler(method="auth_check", step="return or raise"):
if response.status_code == 200:
return True
elif response.status_code == 401:
raise ExternalUnauthenticatedError(
"The dataset does not exist, or is not accessible without authentication (private or gated). Please"
" check the spelling of the dataset name or retry with authentication."
)
elif response.status_code in {403, 404}:
raise ExternalAuthenticatedError(
"The dataset does not exist, or is not accessible with the current credentials (private or gated)."
" Please check the spelling of the dataset name or retry with other authentication credentials."
)
else:
raise ValueError(f"Unexpected status code {response.status_code}")
| datasets-server-main | libs/libapi/src/libapi/authentication.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from http import HTTPStatus
from typing import Literal, Optional
from libcommon.exceptions import CustomError
ApiErrorCode = Literal[
"AuthCheckHubRequestError",
"ExternalAuthenticatedError",
"ExternalUnauthenticatedError",
"InvalidParameter",
"JWTExpiredSignature",
"JWTInvalidClaimRead",
"JWTInvalidClaimSub",
"JWTInvalidKeyOrAlgorithm",
"JWTInvalidSignature",
"JWTKeysError",
"JWTMissingRequiredClaim",
"MissingProcessingStepsError",
"MissingRequiredParameter",
"ResponseNotFound",
"ResponseNotReady",
"TransformRowsProcessingError",
"UnexpectedApiError",
]
class ApiError(CustomError):
"""Base class for exceptions raised by an API service."""
def __init__(
self,
message: str,
status_code: HTTPStatus,
code: ApiErrorCode,
cause: Optional[BaseException] = None,
disclose_cause: bool = False,
):
super().__init__(
message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
)
class AuthCheckHubRequestError(ApiError):
"""The external authentication check failed or timed out."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(
message, HTTPStatus.INTERNAL_SERVER_ERROR, "AuthCheckHubRequestError", cause=cause, disclose_cause=False
)
class ExternalAuthenticatedError(ApiError):
"""The external authentication check failed while the user was authenticated.
Even if the external authentication server returns 403 in that case, we return 404 because
we don't know if the dataset exist or not. It's also coherent with how the Hugging Face Hub works.
TODO: should we return DatasetNotFoundError instead? maybe the error code is leaking existence of private datasets.
"""
def __init__(self, message: str):
super().__init__(message, HTTPStatus.NOT_FOUND, "ExternalAuthenticatedError")
class ExternalUnauthenticatedError(ApiError):
"""The external authentication check failed while the user was unauthenticated."""
def __init__(self, message: str):
super().__init__(message, HTTPStatus.UNAUTHORIZED, "ExternalUnauthenticatedError")
class InvalidParameterError(ApiError):
"""A parameter has an invalid value."""
def __init__(self, message: str):
super().__init__(message, HTTPStatus.UNPROCESSABLE_ENTITY, "InvalidParameter")
class JWTKeysError(ApiError):
"""The public keys for decoding JWT could not be created."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "JWTKeysError", cause=cause, disclose_cause=False)
class MissingRequiredParameterError(ApiError):
"""A required parameter is missing."""
def __init__(self, message: str):
super().__init__(message, HTTPStatus.UNPROCESSABLE_ENTITY, "MissingRequiredParameter")
class ResponseNotFoundError(ApiError):
"""The response has not been found."""
def __init__(self, message: str):
super().__init__(message, HTTPStatus.NOT_FOUND, "ResponseNotFound")
class ResponseNotReadyError(ApiError):
"""The response has not been processed yet."""
def __init__(self, message: str):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ResponseNotReady")
class TransformRowsProcessingError(ApiError):
"""There was an error when transforming rows to list."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "TransformRowsProcessingError", cause, True)
class JWTExpiredSignature(ApiError):
"""The JWT signature has expired."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.UNAUTHORIZED, "JWTExpiredSignature", cause, True)
class JWTInvalidClaimRead(ApiError):
"""The 'read' claim in the JWT payload is invalid."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.UNAUTHORIZED, "JWTInvalidClaimRead", cause, True)
class JWTInvalidClaimSub(ApiError):
"""The 'sub' claim in the JWT payload is invalid."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.UNAUTHORIZED, "JWTInvalidClaimSub", cause, True)
class JWTInvalidKeyOrAlgorithm(ApiError):
"""The key and the algorithm used to verify the JWT signature are not compatible."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.UNAUTHORIZED, "JWTInvalidKeyOrAlgorithm", cause, True)
class JWTInvalidSignature(ApiError):
"""The JWT signature verification failed."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.UNAUTHORIZED, "JWTInvalidSignature", cause, True)
class JWTMissingRequiredClaim(ApiError):
"""A claim is missing in the JWT payload."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.UNAUTHORIZED, "JWTMissingRequiredClaim", cause, True)
class UnexpectedApiError(ApiError):
"""The server raised an unexpected error."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
logging.error(message, exc_info=cause)
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "UnexpectedApiError", cause)
| datasets-server-main | libs/libapi/src/libapi/exceptions.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from libcommon.prometheus import Prometheus
from prometheus_client import CONTENT_TYPE_LATEST
from starlette.requests import Request
from starlette.responses import Response
from libapi.utils import Endpoint
def create_metrics_endpoint() -> Endpoint:
prometheus = Prometheus()
async def metrics_endpoint(_: Request) -> Response:
logging.info("/metrics")
return Response(prometheus.getLatestContent(), headers={"Content-Type": CONTENT_TYPE_LATEST})
return metrics_endpoint
| datasets-server-main | libs/libapi/src/libapi/routes/metrics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | libs/libapi/src/libapi/routes/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.prometheus import StepProfiler
from starlette.requests import Request
from starlette.responses import PlainTextResponse, Response
async def healthcheck_endpoint(_: Request) -> Response:
logging.info("/healthcheck")
with StepProfiler(method="healthcheck_endpoint", step="all"):
return PlainTextResponse("ok", headers={"Cache-Control": "no-store"})
| datasets-server-main | libs/libapi/src/libapi/routes/healthcheck.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from mongoengine import Document
from mongoengine.fields import StringField
from mongodb_migration.resources import MigrationsMongoResource
def test_cache_database(mongo_host: str) -> None:
resource = MigrationsMongoResource(database="test_migrations_database", host=mongo_host)
class User(Document):
name = StringField()
meta = {"db_alias": resource.mongoengine_alias}
assert len(User.objects()) == 0 # type: ignore
# clean
User.drop_collection() # type: ignore
assert resource.is_available()
resource.release()
| datasets-server-main | jobs/mongodb_migration/tests/test_resources.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from environs import Env
from pytest import fixture
@fixture(scope="session")
def env() -> Env:
return Env(expand_vars=True)
@fixture(scope="session")
def mongo_host(env: Env) -> str:
try:
url = env.str(name="DATABASE_MIGRATIONS_MONGO_URL")
if type(url) is not str:
raise ValueError("DATABASE_MIGRATIONS_MONGO_URL is not set")
return url
except Exception as e:
raise ValueError("DATABASE_MIGRATIONS_MONGO_URL is not set") from e
| datasets-server-main | jobs/mongodb_migration/tests/conftest.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Iterator
from typing import Optional
import pytest
from mongodb_migration.database_migrations import (
DatabaseMigration,
_clean_maintenance_database,
)
from mongodb_migration.migration import IrreversibleMigrationError, Migration
from mongodb_migration.plan import Plan, SavedMigrationsError
from mongodb_migration.resources import MigrationsMongoResource
@pytest.fixture(autouse=True)
def migrations_mongo_resource(mongo_host: str) -> Iterator[MigrationsMongoResource]:
database = "datasets_server_migrations_test"
if "test" not in database:
raise ValueError("Test must be launched on a test mongo database")
with MigrationsMongoResource(database=database, host=mongo_host) as resource:
yield resource
_clean_maintenance_database()
class MigrationOK(Migration):
def up(self) -> None:
pass
def down(self) -> None:
pass
def validate(self) -> None:
pass
class MigrationErrorInUp(Migration):
def up(self) -> None:
raise RuntimeError("Error in up")
def down(self) -> None:
pass
def validate(self) -> None:
pass
class MigrationErrorInValidate(Migration):
def up(self) -> None:
pass
def down(self) -> None:
pass
def validate(self) -> None:
raise RuntimeError("Error in validation")
class MigrationErrorInUpAndDown(Migration):
def up(self) -> None:
raise RuntimeError("Error in up")
def down(self) -> None:
raise RuntimeError("Error in down")
def validate(self) -> None:
pass
class MigrationErrorIrreversible(Migration):
def up(self) -> None:
raise RuntimeError("Error in up")
def down(self) -> None:
raise IrreversibleMigrationError("Error in down")
def validate(self) -> None:
pass
def test_empty_plan() -> None:
plan = Plan(collected_migrations=[])
assert plan.collected_migrations == []
plan.execute()
assert plan.executed_migrations == []
migration_ok_a = MigrationOK(version="20221110230400", description="ok a")
migration_ok_b = MigrationOK(version="20221110230401", description="ok b")
migration_error_in_up = MigrationErrorInUp(version="20221110230402", description="error in up")
migration_error_in_validate = MigrationErrorInValidate(version="20221110230403", description="error in validate")
migration_error_in_up_and_down = MigrationErrorInUpAndDown(
version="20221110230404", description="error in up and down"
)
migration_error_irreversible = MigrationErrorIrreversible(
version="20221110230405", description="error because migration is irreversible"
)
@pytest.mark.parametrize(
"collected_migrations",
(
[migration_ok_a, migration_ok_b],
[migration_ok_b, migration_ok_a],
),
)
def test_collected_migrations_order_dont_matter(collected_migrations: list[Migration]) -> None:
assert DatabaseMigration.objects.distinct("version") == []
plan = Plan(collected_migrations=collected_migrations)
assert plan.executed_migrations == []
plan.execute()
sorted_migrations = sorted(collected_migrations, key=lambda migration: migration.version)
assert plan.executed_migrations == sorted_migrations
assert DatabaseMigration.objects.distinct("version") == [migration.version for migration in sorted_migrations]
@pytest.mark.parametrize(
"collected_migrations,executed_migrations,exception",
[
([migration_error_in_up], [], RuntimeError),
([migration_error_in_validate], [], RuntimeError),
([migration_error_in_up_and_down], [migration_error_in_up_and_down], RuntimeError),
([migration_error_irreversible], [migration_error_irreversible], IrreversibleMigrationError),
([migration_ok_a, migration_error_in_up], [], RuntimeError),
(
[migration_ok_a, migration_error_in_up_and_down],
[migration_ok_a, migration_error_in_up_and_down],
RuntimeError,
),
],
)
def test_errors_in_migration_steps(
collected_migrations: list[Migration], executed_migrations: list[Migration], exception: Optional[type[Exception]]
) -> None:
assert DatabaseMigration.objects.distinct("version") == []
plan = Plan(collected_migrations=collected_migrations)
assert plan.executed_migrations == []
if exception is None:
# rollback worked
plan.execute()
else:
# rollback failed
with pytest.raises(exception):
plan.execute()
assert plan.executed_migrations == executed_migrations
assert DatabaseMigration.objects.distinct("version") == [migration.version for migration in executed_migrations]
@pytest.mark.parametrize(
"previous_migrations,collected_migrations,executed_migrations,exception",
[
([], [], [], None),
([], [migration_ok_a], [migration_ok_a], None),
([migration_ok_a], [migration_ok_a, migration_ok_b], [migration_ok_b], None),
# the previous migrations must be in the collected migrations
([migration_ok_a], [], [], SavedMigrationsError),
([migration_ok_a], [migration_ok_b], [], SavedMigrationsError),
# error with the versions order
([migration_ok_b], [migration_ok_a, migration_ok_b], [], SavedMigrationsError),
],
)
def test_get_planned_migrations(
previous_migrations: list[Migration],
collected_migrations: list[Migration],
executed_migrations: list[Migration],
exception: Optional[type[Exception]],
) -> None:
for migration in previous_migrations:
DatabaseMigration(version=migration.version, description=migration.description).save()
assert DatabaseMigration.objects.distinct("version") == [migration.version for migration in previous_migrations]
plan = Plan(collected_migrations=collected_migrations)
assert plan.executed_migrations == []
if exception is None:
# up worked
plan.apply()
else:
# up failed
with pytest.raises(exception):
plan.apply()
assert plan.executed_migrations == executed_migrations
assert DatabaseMigration.objects.distinct("version") == [
migration.version for migration in (previous_migrations + executed_migrations)
]
def test_internal_operations_are_idempotent() -> None:
plan = Plan(collected_migrations=[migration_ok_a, migration_ok_b])
plan.rollback()
plan.rollback()
plan.rollback()
plan.apply()
plan.apply()
plan.apply()
plan.apply()
plan.rollback()
plan.apply()
plan.rollback()
def test_execute_is_idempotent() -> None:
plan = Plan(collected_migrations=[migration_ok_a, migration_ok_b])
plan.execute()
plan.execute()
Plan(collected_migrations=[migration_ok_a, migration_ok_b]).execute()
| datasets-server-main | jobs/mongodb_migration/tests/test_plan.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from libcommon.constants import (
CACHE_COLLECTION_RESPONSES,
CACHE_MONGOENGINE_ALIAS,
QUEUE_COLLECTION_JOBS,
QUEUE_MONGOENGINE_ALIAS,
)
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from mongodb_migration.renaming_migrations import (
CacheRenamingMigration,
QueueRenamingMigration,
)
def test_cache_renaming_migration(mongo_host: str) -> None:
old_kind, new_kind = "/kind-name", "kind-name"
with MongoResource(database="test_cache_rename_kind", host=mongo_host, mongoengine_alias=CACHE_MONGOENGINE_ALIAS):
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].insert_many([{"kind": old_kind, "dataset": "dataset", "http_status": 200}])
assert db[CACHE_COLLECTION_RESPONSES].find_one(
{"kind": old_kind}
) # Ensure there is at least one record to update
migration = CacheRenamingMigration(
cache_kind=old_kind,
new_cache_kind=new_kind,
version="20230516165100",
description=f"update 'kind' field in cache from {old_kind} to {new_kind}",
)
migration.up()
assert not db[CACHE_COLLECTION_RESPONSES].find_one({"kind": old_kind}) # Ensure 0 records with old kind
assert db[CACHE_COLLECTION_RESPONSES].find_one({"kind": new_kind})
db[CACHE_COLLECTION_RESPONSES].drop()
def test_queue_renaming_migration(mongo_host: str) -> None:
old_job, new_job = "/job-name", "job-name"
with MongoResource(
database="test_test_queue_renaming_migration", host=mongo_host, mongoengine_alias=QUEUE_MONGOENGINE_ALIAS
):
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].insert_many(
[
{
"type": old_job,
"unicity_id": f"{old_job},dataset,config,split",
"dataset": "dataset",
"http_status": 200,
}
]
)
assert db[QUEUE_COLLECTION_JOBS].find_one({"type": old_job}) # Ensure there is at least one record to update
migration = QueueRenamingMigration(
job_type=old_job,
new_job_type=new_job,
version="20230516170300",
description=f"update 'type' and 'unicity_id' fields in job from {old_job} to {new_job}",
)
migration.up()
assert not db[QUEUE_COLLECTION_JOBS].find_one({"type": old_job}) # Ensure 0 records with old type
result = db[QUEUE_COLLECTION_JOBS].find_one({"type": new_job})
assert result
assert result["unicity_id"] == f"{new_job},dataset,config,split"
db[QUEUE_COLLECTION_JOBS].drop()
| datasets-server-main | jobs/mongodb_migration/tests/test_renaming_migration.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | jobs/mongodb_migration/tests/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Optional
import pytest
from mongodb_migration.migration import Migration
class MigrationOK(Migration):
def up(self) -> None:
pass
def down(self) -> None:
pass
def validate(self) -> None:
pass
version_ok = "20221110230400"
description = "description a"
version_date_error = "20225510230400"
version_format_error = "wrong format"
version_too_short = "20221110"
@pytest.mark.parametrize(
"version,description,exception",
[
(version_ok, None, ValueError),
(None, description, ValueError),
(version_date_error, description, ValueError),
(version_format_error, description, ValueError),
(version_too_short, description, ValueError),
(version_ok, description, None),
],
)
def test_migration(version: str, description: str, exception: Optional[type[Exception]]) -> None:
if exception is None:
MigrationOK(version=version, description=description)
else:
with pytest.raises(exception):
MigrationOK(version=version, description=description)
| datasets-server-main | jobs/mongodb_migration/tests/test_migration.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from libcommon.constants import (
CACHE_COLLECTION_RESPONSES,
CACHE_METRICS_COLLECTION,
CACHE_MONGOENGINE_ALIAS,
METRICS_MONGOENGINE_ALIAS,
QUEUE_COLLECTION_JOBS,
QUEUE_METRICS_COLLECTION,
QUEUE_MONGOENGINE_ALIAS,
)
from libcommon.queue import JobDocument
from libcommon.resources import MongoResource
from libcommon.utils import get_datetime
from mongoengine.connection import get_db
from mongodb_migration.deletion_migrations import (
CacheDeletionMigration,
MetricsDeletionMigration,
MigrationQueueDeleteTTLIndex,
QueueDeletionMigration,
get_index_names,
)
def test_cache_deletion_migration(mongo_host: str) -> None:
kind = "cache_kind"
with MongoResource(
database="test_cache_delete_migration",
host=mongo_host,
mongoengine_alias=CACHE_MONGOENGINE_ALIAS,
):
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].insert_many([{"kind": kind, "dataset": "dataset", "http_status": 200}])
assert db[CACHE_COLLECTION_RESPONSES].find_one({"kind": kind}) # Ensure there is at least one record to delete
migration = CacheDeletionMigration(
cache_kind=kind,
version="20230505180100",
description=f"remove cache for kind {kind}",
)
migration.up()
assert not db[CACHE_COLLECTION_RESPONSES].find_one({"kind": kind}) # Ensure 0 records with old kind
db[CACHE_COLLECTION_RESPONSES].drop()
def test_queue_deletion_migration(mongo_host: str) -> None:
job_type = "job_type"
with MongoResource(
database="test_queue_delete_migration",
host=mongo_host,
mongoengine_alias=QUEUE_MONGOENGINE_ALIAS,
):
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].insert_many(
[
{
"type": job_type,
"unicity_id": f"{job_type},dataset,config,split",
"dataset": "dataset",
"revision": "revision",
"http_status": 200,
}
]
)
assert db[QUEUE_COLLECTION_JOBS].find_one({"type": job_type}) # Ensure there is at least one record to delete
migration = QueueDeletionMigration(
job_type=job_type,
version="20230505180200",
description=f"remove jobs of type '{job_type}'",
)
migration.up()
assert not db[QUEUE_COLLECTION_JOBS].find_one({"type": job_type}) # Ensure 0 records with old type
db[QUEUE_COLLECTION_JOBS].drop()
def test_metrics_deletion_migration(mongo_host: str) -> None:
step_name = job_type = cache_kind = "step_name"
with MongoResource(
database="test_metrics_delete_migration",
host=mongo_host,
mongoengine_alias=METRICS_MONGOENGINE_ALIAS,
):
db = get_db(METRICS_MONGOENGINE_ALIAS)
db[QUEUE_METRICS_COLLECTION].insert_many([{"queue": job_type, "status": "waiting", "total": 0}])
db[CACHE_METRICS_COLLECTION].insert_many([{"kind": cache_kind, "http_status": 400, "total": 0}])
assert db[QUEUE_METRICS_COLLECTION].find_one(
{"queue": job_type}
) # Ensure there is at least one record to delete
assert db[CACHE_METRICS_COLLECTION].find_one(
{"kind": cache_kind}
) # Ensure there is at least one record to delete
migration = MetricsDeletionMigration(
job_type=job_type,
cache_kind=cache_kind,
version="20230505180300",
description=f"delete the queue and cache metrics for step '{step_name}'",
)
migration.up()
assert not db[QUEUE_METRICS_COLLECTION].find_one({"queue": job_type}) # Ensure 0 records after deletion
assert not db[CACHE_METRICS_COLLECTION].find_one({"kind": cache_kind}) # Ensure 0 records after deletion
db[QUEUE_METRICS_COLLECTION].drop()
db[CACHE_METRICS_COLLECTION].drop()
def test_queue_delete_ttl_index(mongo_host: str) -> None:
with MongoResource(database="test_queue_delete_ttl_index", host=mongo_host, mongoengine_alias="queue"):
JobDocument(
type="test",
dataset="test",
revision="test",
unicity_id="test",
namespace="test",
created_at=get_datetime(),
difficulty=50,
).save()
db = get_db(QUEUE_MONGOENGINE_ALIAS)
assert (
len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information(), "finished_at")) == 1
) # Ensure the TTL index exists
migration = MigrationQueueDeleteTTLIndex(
version="20230428145000",
description="remove ttl index on field 'finished_at'",
field_name="finished_at",
)
migration.up()
assert (
len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information(), "finished_at")) == 0
) # Ensure the TTL index does not exist anymore
db[QUEUE_COLLECTION_JOBS].drop()
| datasets-server-main | jobs/mongodb_migration/tests/test_deletion_migrations.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from libcommon.constants import CACHE_METRICS_COLLECTION, METRICS_MONGOENGINE_ALIAS
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from pytest import raises
from mongodb_migration.drop_migrations import MigrationDropCollection
from mongodb_migration.migration import IrreversibleMigrationError
def test_drop_collection(mongo_host: str) -> None:
with MongoResource(database="test_drop_collection", host=mongo_host, mongoengine_alias=METRICS_MONGOENGINE_ALIAS):
db = get_db(METRICS_MONGOENGINE_ALIAS)
db[CACHE_METRICS_COLLECTION].insert_many(
[
{
"kind": "kind",
"error_code": "UnexpectedError",
"http_status": 500,
"total": 1,
}
]
)
assert db[CACHE_METRICS_COLLECTION].find_one({"kind": "kind"}) is not None
assert CACHE_METRICS_COLLECTION in db.list_collection_names() # type: ignore
migration = MigrationDropCollection(
version="20230811063600",
description="drop cache metrics collection",
alias=METRICS_MONGOENGINE_ALIAS,
collection_name=CACHE_METRICS_COLLECTION,
)
migration.up()
assert db[CACHE_METRICS_COLLECTION].find_one({"kind": "kind"}) is None
assert CACHE_METRICS_COLLECTION not in db.list_collection_names() # type: ignore
with raises(IrreversibleMigrationError):
migration.down()
| datasets-server-main | jobs/mongodb_migration/tests/test_drop_migrations.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from mongodb_migration.collector import MigrationsCollector
def test_collector() -> None:
collector = MigrationsCollector()
migrations = collector.get_migrations()
assert len(migrations) >= 1
assert migrations[0].version == "20221110230400"
assert migrations[0].description == "example"
| datasets-server-main | jobs/mongodb_migration/tests/test_collector.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Optional
from libcommon.constants import QUEUE_COLLECTION_LOCKS, QUEUE_MONGOENGINE_ALIAS
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from mongodb_migration.migrations._20230622131500_lock_add_owner import (
MigrationAddOwnerToQueueLock,
)
def assert_owner(key: str, owner: Optional[str]) -> None:
db = get_db(QUEUE_MONGOENGINE_ALIAS)
entry = db[QUEUE_COLLECTION_LOCKS].find_one({"key": key})
assert entry is not None
if owner is None:
assert "owner" not in entry or entry["owner"] is None
else:
assert entry["owner"] == owner
def test_lock_add_owner(mongo_host: str) -> None:
with MongoResource(database="test_lock_add_owner", host=mongo_host, mongoengine_alias="queue"):
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_LOCKS].insert_many(
[
{
"key": "key1",
"job_id": "job_id1",
"created_at": "2022-01-01T00:00:00.000000Z",
},
{
"key": "key2",
"job_id": None,
"created_at": "2022-01-01T00:00:00.000000Z",
},
{
"key": "key3",
"job_id": "job_id3",
"owner": "owner3",
"created_at": "2022-01-01T00:00:00.000000Z",
},
]
)
migration = MigrationAddOwnerToQueueLock(
version="20230622131500",
description="add owner field to locks",
)
migration.up()
assert_owner("key1", "job_id1")
assert_owner("key2", None)
assert_owner("key3", "owner3")
migration.down()
assert_owner("key1", None)
assert_owner("key2", None)
assert_owner("key3", None)
db[QUEUE_COLLECTION_LOCKS].drop()
| datasets-server-main | jobs/mongodb_migration/tests/migrations/test_20230622131500_lock_add_owner.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from mongodb_migration.migrations._20230705160600_queue_job_add_difficulty import (
MigrationQueueAddDifficultyToJob,
)
def test_queue_add_difficulty_to_jobs(mongo_host: str) -> None:
with MongoResource(database="test_queue_add_difficulty_to_jobs", host=mongo_host, mongoengine_alias="queue"):
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].insert_many(
[
{
"type": "test",
"dataset": "test",
"revision": "123",
"unicity_id": "test",
"namespace": "test",
"created_at": "2022-01-01T00:00:00.000000Z",
},
{
"type": "dataset-is-valid",
"dataset": "test",
"revision": "123",
"unicity_id": "test",
"namespace": "test",
"created_at": "2022-01-01T00:00:00.000000Z",
},
]
)
migration = MigrationQueueAddDifficultyToJob(
version="20230705160600",
description="add difficulty field to jobs",
)
migration.up()
results = list(db[QUEUE_COLLECTION_JOBS].find({"type": "test"}))
assert len(results) == 1
assert results[0]["difficulty"] == 50
results = list(db[QUEUE_COLLECTION_JOBS].find({"type": "dataset-is-valid"}))
assert len(results) == 1
assert results[0]["difficulty"] == 20
migration.down()
results = list(db[QUEUE_COLLECTION_JOBS].find({"dataset": "test"}))
assert len(results) == 2
assert all("difficulty" not in result for result in results)
db[QUEUE_COLLECTION_JOBS].drop()
| datasets-server-main | jobs/mongodb_migration/tests/migrations/test_20230705160600_queue_job_add_difficulty.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from libcommon.queue import JobDocument
from libcommon.resources import MongoResource
from libcommon.utils import get_datetime
from mongoengine.connection import get_db
from pytest import raises
from mongodb_migration.migration import IrreversibleMigrationError
from mongodb_migration.migrations._20230511100700_queue_delete_indexes_with_force import (
MigrationQueueDeleteIndexesWithForce,
field_name,
get_index_names,
)
def test_queue_delete_indexes_with_force(mongo_host: str) -> None:
with MongoResource(database="test_queue_delete_indexes_with_force", host=mongo_host, mongoengine_alias="queue"):
JobDocument(
type="test",
dataset="test",
revision="revision",
unicity_id="test",
namespace="test",
created_at=get_datetime(),
difficulty=50,
).save()
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].create_index(field_name)
db[QUEUE_COLLECTION_JOBS].create_index([(field_name, 1), ("type", 1)])
db[QUEUE_COLLECTION_JOBS].create_index([("type", 1), (field_name, 1)])
assert (
len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information(), "force")) == 3
) # Ensure the indexes exists
migration = MigrationQueueDeleteIndexesWithForce(
version="20230511100700",
description="remove indexes with field 'force'",
)
migration.up()
assert (
len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information(), "force")) == 0
) # Ensure the indexes do not exist anymore
with raises(IrreversibleMigrationError):
migration.down()
db[QUEUE_COLLECTION_JOBS].drop()
| datasets-server-main | jobs/mongodb_migration/tests/migrations/test_20230511100700_queue_delete_indexes_with_force.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Optional
import pytest
from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from mongodb_migration.migrations._20230309141600_cache_add_job_runner_version import (
MigrationAddJobRunnerVersionToCacheResponse,
)
def test_cache_add_job_runner_version_without_worker_version(mongo_host: str) -> None:
with MongoResource(database="test_cache_add_job_runner_version", host=mongo_host, mongoengine_alias="cache"):
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].insert_many(
[{"kind": "/splits", "dataset": "dataset_without_worker_version", "http_status": 200}]
)
migration = MigrationAddJobRunnerVersionToCacheResponse(
version="20230309141600", description="add 'job_runner_version' field based on 'worker_version' value"
)
migration.up()
result = db[CACHE_COLLECTION_RESPONSES].find_one({"dataset": "dataset_without_worker_version"})
assert result
assert not result["job_runner_version"]
db[CACHE_COLLECTION_RESPONSES].drop()
@pytest.mark.parametrize(
"worker_version,expected",
[
("2.0.0", 2),
("1.5.0", 1),
("WrongFormat", None),
(None, None),
],
)
def test_cache_add_job_runner_version(mongo_host: str, worker_version: str, expected: Optional[int]) -> None:
with MongoResource(database="test_cache_add_job_runner_version", host=mongo_host, mongoengine_alias="cache"):
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].insert_many(
[{"kind": "/splits", "dataset": "dataset", "http_status": 200, "worker_version": worker_version}]
)
migration = MigrationAddJobRunnerVersionToCacheResponse(
version="20230309141600", description="add 'job_runner_version' field based on 'worker_version' value"
)
migration.up()
result = db[CACHE_COLLECTION_RESPONSES].find_one({"dataset": "dataset"})
assert result
assert result["job_runner_version"] == expected
db[CACHE_COLLECTION_RESPONSES].drop()
| datasets-server-main | jobs/mongodb_migration/tests/migrations/test_20230309141600_cache_add_job_runner_version.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from pytest import raises
from mongodb_migration.migration import IrreversibleMigrationError
from mongodb_migration.migrations._20230511100600_queue_remove_force import (
MigrationRemoveForceFromJob,
)
def test_queue_remove_force(mongo_host: str) -> None:
with MongoResource(database="test_queue_remove_force", host=mongo_host, mongoengine_alias="queue"):
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].delete_many({})
db[QUEUE_COLLECTION_JOBS].insert_many(
[
{
"type": "test",
"dataset": "dataset_without_force",
"force": True,
}
]
)
migration = MigrationRemoveForceFromJob(
version="20230511100600", description="remove 'force' field from queue"
)
migration.up()
result = db[QUEUE_COLLECTION_JOBS].find_one({"dataset": "dataset_without_force"})
assert result
assert "force" not in result
with raises(IrreversibleMigrationError):
migration.down()
db[QUEUE_COLLECTION_JOBS].drop()
| datasets-server-main | jobs/mongodb_migration/tests/migrations/test_20230511100600_queue_remove_force.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from pytest import raises
from mongodb_migration.migration import IrreversibleMigrationError
from mongodb_migration.migrations._20230511110700_queue_delete_skipped_jobs import (
MigrationDeleteSkippedJobs,
status,
)
def test_queue_delete_skipped_jobs(mongo_host: str) -> None:
with MongoResource(database="test_delete_skipped_jobs", host=mongo_host, mongoengine_alias="queue"):
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].delete_many({})
db[QUEUE_COLLECTION_JOBS].insert_many(
[
{
"type": "test",
"dataset": "dataset",
"status": status,
},
{
"type": "test",
"dataset": "dataset",
"status": "waiting",
},
{
"type": "test",
"dataset": "dataset",
"status": status,
},
{
"type": "test",
"dataset": "dataset",
"status": "started",
},
]
)
migration = MigrationDeleteSkippedJobs(
version="20230511110700", description=f"delete jobs with {status} status"
)
migration.up()
result = list(db[QUEUE_COLLECTION_JOBS].find({"dataset": "dataset"}))
assert len(result) == 2
assert all(doc["status"] != status for doc in result)
with raises(IrreversibleMigrationError):
migration.down()
db[QUEUE_COLLECTION_JOBS].drop()
| datasets-server-main | jobs/mongodb_migration/tests/migrations/test_20230511110700_queue_delete_skipped_jobs.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from mongodb_migration.migrations._20230516101500_queue_job_add_revision import (
MigrationQueueAddRevisionToJob,
)
def test_queue_add_revision_to_jobs(mongo_host: str) -> None:
with MongoResource(database="test_queue_add_revision_to_jobs", host=mongo_host, mongoengine_alias="queue"):
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].insert_one(
{
"type": "test",
"dataset": "test",
"unicity_id": "test",
"namespace": "test",
"created_at": "2022-01-01T00:00:00.000000Z",
}
)
migration = MigrationQueueAddRevisionToJob(
version="20230516101500",
description="add revision field to jobs",
)
migration.up()
result = list(db[QUEUE_COLLECTION_JOBS].find({"dataset": "test"}))
assert len(result) == 1
assert result[0]["revision"] == "main"
migration.down()
result = list(db[QUEUE_COLLECTION_JOBS].find({"dataset": "test"}))
assert len(result) == 1
assert "revision" not in result[0]
db[QUEUE_COLLECTION_JOBS].drop()
| datasets-server-main | jobs/mongodb_migration/tests/migrations/test_20230516101500_queue_job_add_revision.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Any
from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from mongodb_migration.migrations._20230703110100_cache_add_partial_field_in_config_parquet_and_info import (
MigrationAddPartialToCacheResponse,
)
def assert_partial(dataset: str, kind: str) -> None:
db = get_db(CACHE_MONGOENGINE_ALIAS)
entry = db[CACHE_COLLECTION_RESPONSES].find_one({"dataset": dataset, "kind": kind})
assert entry is not None
assert entry["content"]["partial"] is False
def assert_unchanged(dataset: str, kind: str) -> None:
db = get_db(CACHE_MONGOENGINE_ALIAS)
entry = db[CACHE_COLLECTION_RESPONSES].find_one({"dataset": dataset, "kind": kind})
assert entry is not None
assert "partial" not in entry["content"]
cache: list[dict[str, Any]] = [
{
"config": "lhoestq--demo1",
"dataset": "lhoestq/demo1",
"kind": "config-parquet-and-info",
"split": None,
"content": {
"parquet_files": [
{
"dataset": "lhoestq/demo1",
"config": "lhoestq--demo1",
"split": "test",
"url": "https://huggingface.co/.../csv-test.parquet",
"filename": "csv-test.parquet",
"size": 4415,
},
{
"dataset": "lhoestq/demo1",
"config": "lhoestq--demo1",
"split": "train",
"url": "https://huggingface.co/.../csv-train.parquet",
"filename": "csv-train.parquet",
"size": 5038,
},
],
"dataset_info": {
"description": "",
"citation": "",
"homepage": "",
"license": "",
"features": {},
"builder_name": "csv",
"config_name": "lhoestq--demo1",
"version": {},
"splits": {},
"download_checksums": {},
"download_size": 2340,
"dataset_size": 2464,
"size_in_bytes": 4804,
},
},
"dataset_git_revision": "87ecf163bedca9d80598b528940a9c4f99e14c11",
"details": None,
"error_code": None,
"http_status": 200,
"job_runner_version": 3,
"progress": 1.0,
},
{
"config": "lhoestq--error",
"dataset": "lhoestq/error",
"kind": "config-parquet-and-info",
"split": None,
"content": {"error": "Streaming is not supported for lhoestq/error"},
"dataset_git_revision": "ec3c8d414af3dfe600399f5e6ef2c682938676f3",
"details": {
"error": "Streaming is not supported for lhoestq/error",
"cause_exception": "TypeError",
"cause_message": "Streaming is not supported for lhoestq/error",
"cause_traceback": [
"Traceback (most recent call last):\n",
(
' File "/src/services/worker/src/worker/job_manager.py", line 163, in process\n '
" job_result = self.job_runner.compute()\n"
),
(
' File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line'
" 932, in compute\n compute_config_parquet_and_info_response(\n"
),
(
' File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line'
" 825, in compute_config_parquet_and_info_response\n raise_if_not_supported(\n"
),
(
' File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line'
" 367, in raise_if_not_supported\n raise_if_too_big_from_external_data_files(\n"
),
(
' File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line'
" 447, in raise_if_too_big_from_external_data_files\n "
" builder._split_generators(mock_dl_manager)\n"
),
(
" File"
' "/tmp/modules-cache/datasets_modules/.../error.py",'
' line 190, in _split_generators\n raise TypeError("Streaming is not supported for'
' lhoestq/error")\n'
),
"TypeError: Streaming is not supported for lhoestq/error\n",
],
},
"error_code": "UnexpectedError",
"http_status": 500,
"job_runner_version": 3,
"progress": None,
},
]
cache2: list[dict[str, Any]] = [
{
"config": "lhoestq--demo2",
"dataset": "lhoestq/demo2",
"kind": kind,
"split": None,
"content": {},
"http_status": 200,
}
for kind in [
"config-parquet-and-info",
"config-parquet",
"dataset-parquet",
"config-parquet-metadata",
"config-info",
"dataset-info",
"config-size",
"dataset-size",
]
]
def test_cache_add_partial(mongo_host: str) -> None:
with MongoResource(database="test_cache_add_partial", host=mongo_host, mongoengine_alias="cache"):
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].insert_many(cache + cache2)
migration = MigrationAddPartialToCacheResponse(
version="20230703110100",
description="add partial field to config-parquet-and-info",
)
migration.up()
assert_partial("lhoestq/demo1", kind="config-parquet-and-info")
assert_unchanged("lhoestq/error", kind="config-parquet-and-info")
for kind in [
"config-parquet-and-info",
"config-parquet",
"dataset-parquet",
"config-parquet-metadata",
"config-info",
"dataset-info",
"config-size",
"dataset-size",
]:
assert_partial("lhoestq/demo2", kind=kind)
migration.down()
assert_unchanged("lhoestq/demo1", kind="config-parquet-and-info")
assert_unchanged("lhoestq/error", kind="config-parquet-and-info")
for kind in [
"config-parquet-and-info",
"config-parquet",
"dataset-parquet",
"config-parquet-metadata",
"config-info",
"dataset-info",
"config-size",
"dataset-size",
]:
assert_unchanged("lhoestq/demo2", kind=kind)
db[CACHE_COLLECTION_RESPONSES].drop()
| datasets-server-main | jobs/mongodb_migration/tests/migrations/test_20230703110100_cache_add_partial_field_in_config_parquet_and_info.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | jobs/mongodb_migration/tests/migrations/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from pytest import raises
from mongodb_migration.migration import IrreversibleMigrationError
from mongodb_migration.migrations._20230313164200_cache_remove_worker_version import (
MigrationRemoveWorkerVersionFromCachedResponse,
)
def test_cache_remove_worker_version(mongo_host: str) -> None:
with MongoResource(database="test_cache_remove_worker_version", host=mongo_host, mongoengine_alias="cache"):
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].delete_many({})
db[CACHE_COLLECTION_RESPONSES].insert_many(
[
{
"kind": "/splits",
"dataset": "dataset_without_worker_version",
"http_status": 200,
"worker_version": "1.0.0",
}
]
)
migration = MigrationRemoveWorkerVersionFromCachedResponse(
version="20230313164200", description="remove 'worker_version' field from cache"
)
migration.up()
result = db[CACHE_COLLECTION_RESPONSES].find_one({"dataset": "dataset_without_worker_version"})
assert result
assert "worker_version" not in result
with raises(IrreversibleMigrationError):
migration.down()
db[CACHE_COLLECTION_RESPONSES].drop()
| datasets-server-main | jobs/mongodb_migration/tests/migrations/test_20230313164200_cache_remove_worker_version.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Optional
from libcommon.constants import QUEUE_COLLECTION_LOCKS, QUEUE_MONGOENGINE_ALIAS
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from mongodb_migration.migrations._20230825170200_lock_add_ttl import (
MigrationAddTtlToQueueLock,
)
def assert_ttl(key: str, ttl: Optional[int]) -> None:
db = get_db(QUEUE_MONGOENGINE_ALIAS)
entry = db[QUEUE_COLLECTION_LOCKS].find_one({"key": key})
assert entry is not None
if ttl is None:
assert "ttl" not in entry or entry["ttl"] is None
else:
assert entry["ttl"] == ttl
def test_lock_add_ttl(mongo_host: str) -> None:
with MongoResource(database="test_lock_add_ttl", host=mongo_host, mongoengine_alias="queue"):
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_LOCKS].insert_many(
[
{
"key": "key1",
"owner": "job_id1",
"created_at": "2022-01-01T00:00:00.000000Z",
},
{
"key": "key2",
"owner": None,
"created_at": "2022-01-01T00:00:00.000000Z",
},
{
"key": "key3",
"owner": "job_id3",
"ttl": 600,
"created_at": "2022-01-01T00:00:00.000000Z",
},
]
)
migration = MigrationAddTtlToQueueLock(
version="20230825170200",
description="add ttl field to locks",
)
migration.up()
assert_ttl("key1", None)
assert_ttl("key2", None)
assert_ttl("key3", 600)
migration.down()
assert_ttl("key1", None)
assert_ttl("key2", None)
assert_ttl("key3", None)
db[QUEUE_COLLECTION_LOCKS].drop()
| datasets-server-main | jobs/mongodb_migration/tests/migrations/test_20230825170200_lock_add_ttl.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from pytest import raises
from mongodb_migration.migration import IrreversibleMigrationError
from mongodb_migration.migrations._20230516101600_queue_delete_index_without_revision import (
INDEX_DEFINITION,
MigrationQueueDeleteIndexWithoutRevision,
get_index_names,
)
def test_queue_delete_index_without_revision(mongo_host: str) -> None:
with MongoResource(
database="test_queue_delete_index_without_revision", host=mongo_host, mongoengine_alias="queue"
):
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].create_index(INDEX_DEFINITION)
assert len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information())) == 1 # Ensure the indexes exists
migration = MigrationQueueDeleteIndexWithoutRevision(
version="20230516101600",
description="remove index without revision",
)
migration.up()
assert (
len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information())) == 0
) # Ensure the indexes do not exist anymore
with raises(IrreversibleMigrationError):
migration.down()
db[QUEUE_COLLECTION_JOBS].drop()
| datasets-server-main | jobs/mongodb_migration/tests/migrations/test_20230516101600_queue_delete_index_without_revision.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Any
from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
from libcommon.resources import MongoResource
from mongoengine.connection import get_db
from mongodb_migration.migrations._20230824154900_cache_add_features_field_in_split_duckdb_index import (
MigrationAddFeaturesToSplitDuckdbIndexCacheResponse,
)
def assert_features(dataset: str, kind: str) -> None:
db = get_db(CACHE_MONGOENGINE_ALIAS)
entry = db[CACHE_COLLECTION_RESPONSES].find_one({"dataset": dataset, "kind": kind})
assert entry is not None
assert entry["content"]["features"] is None
def assert_unchanged(dataset: str, kind: str) -> None:
db = get_db(CACHE_MONGOENGINE_ALIAS)
entry = db[CACHE_COLLECTION_RESPONSES].find_one({"dataset": dataset, "kind": kind})
assert entry is not None
assert "features" not in entry["content"]
cache: list[dict[str, Any]] = [
{
"config": "lhoestq--demo1",
"dataset": "lhoestq/demo1",
"kind": "split-duckdb-index",
"split": "train",
"content": {
"dataset": "lhoestq/demo1",
"config": "default",
"split": "train",
"url": "https://huggingface.co/.../index.duckdb",
"filename": "index.duckdb",
"size": 5038,
},
"dataset_git_revision": "87ecf163bedca9d80598b528940a9c4f99e14c11",
"details": None,
"error_code": None,
"http_status": 200,
"job_runner_version": 3,
"progress": 1.0,
},
{
"config": "lhoestq--error",
"dataset": "lhoestq/error",
"kind": "split-duckdb-index",
"split": "train",
"content": {"error": "Streaming is not supported for lhoestq/error"},
"dataset_git_revision": "ec3c8d414af3dfe600399f5e6ef2c682938676f3",
"details": {
"error": "Streaming is not supported for lhoestq/error",
"cause_exception": "TypeError",
"cause_message": "Streaming is not supported for lhoestq/error",
"cause_traceback": [
"Traceback (most recent call last):\n",
(
' File "/src/services/worker/src/worker/job_manager.py", line 163, in process\n '
" job_result = self.job_runner.compute()\n"
),
(
' File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line'
" 932, in compute\n compute_config_parquet_and_info_response(\n"
),
(
' File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line'
" 825, in compute_config_parquet_and_info_response\n raise_if_not_supported(\n"
),
(
' File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line'
" 367, in raise_if_not_supported\n raise_if_too_big_from_external_data_files(\n"
),
(
' File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line'
" 447, in raise_if_too_big_from_external_data_files\n "
" builder._split_generators(mock_dl_manager)\n"
),
(
" File"
' "/tmp/modules-cache/datasets_modules/.../error.py",'
' line 190, in _split_generators\n raise TypeError("Streaming is not supported for'
' lhoestq/error")\n'
),
"TypeError: Streaming is not supported for lhoestq/error\n",
],
},
"error_code": "UnexpectedError",
"http_status": 500,
"job_runner_version": 3,
"progress": None,
},
]
def test_cache_add_features(mongo_host: str) -> None:
with MongoResource(database="test_cache_add_features", host=mongo_host, mongoengine_alias="cache"):
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].insert_many(cache)
migration = MigrationAddFeaturesToSplitDuckdbIndexCacheResponse(
version="20230824154900",
description="add features field to split-duckdb-index",
)
migration.up()
assert_features("lhoestq/demo1", kind="split-duckdb-index")
assert_unchanged("lhoestq/error", kind="split-duckdb-index")
migration.down()
assert_unchanged("lhoestq/demo1", kind="split-duckdb-index")
assert_unchanged("lhoestq/error", kind="split-duckdb-index")
db[CACHE_COLLECTION_RESPONSES].drop()
| datasets-server-main | jobs/mongodb_migration/tests/migrations/test_20230824154900_cache_add_features_field_in_split_duckdb_index.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from mongodb_migration.database_migrations import DatabaseMigration
from mongodb_migration.migration import Migration
class SavedMigrationsError(Exception):
pass
class Plan:
collected_migrations: list[Migration]
executed_migrations: list[Migration]
def __init__(self, collected_migrations: list[Migration]):
self.collected_migrations = collected_migrations
self.executed_migrations = []
def get_saved_migrations_versions(self) -> list[str]:
return DatabaseMigration.objects().distinct("version")
def get_planned_migrations(self) -> list[Migration]:
saved_migrations_versions = sorted(self.get_saved_migrations_versions())
collected_migrations = sorted(self.collected_migrations, key=lambda m: m.version)
first_collected_migrations_versions = [
migration.version for migration in collected_migrations[: len(saved_migrations_versions)]
]
if saved_migrations_versions != first_collected_migrations_versions:
logging.error(
"Database migrations are not in sync with collected migrations. Database:"
f" {saved_migrations_versions}, Collected: {first_collected_migrations_versions}"
)
raise SavedMigrationsError(
"The saved migrations in the database should be the first collected migrations."
)
num_saved_migrations = len(saved_migrations_versions)
num_collected_migrations = len(collected_migrations)
if not num_collected_migrations:
logging.error("No collected migrations")
if num_saved_migrations:
logging.info(f"{num_saved_migrations} migrations have already been applied. They will be skipped.")
if num_saved_migrations == len(collected_migrations):
logging.info("All migrations have already been applied.")
return collected_migrations[num_saved_migrations:]
def execute(self) -> None:
try:
self.apply()
except Exception as e:
logging.error(f"Migration failed: {e}")
self.rollback()
raise e
# ^ the script must stop with an error code
def apply(self) -> None:
logging.info("Start migrations")
self.executed_migrations = []
for migration in self.get_planned_migrations():
self.executed_migrations.append(migration)
logging.info(f"Migrate {migration.version}: add to the migrations collection")
self.save(migration)
logging.info(f"Migrate {migration.version}: apply")
migration.up()
logging.info(f"Migrate {migration.version}: validate")
migration.validate()
logging.info(f"Migrate {migration.version}: done")
logging.info("All migrations have been applied")
def rollback(self) -> None:
logging.info("Start rollback")
try:
while self.executed_migrations:
migration = self.executed_migrations[-1]
logging.info(f"Rollback {migration.version}: roll back")
migration.down()
logging.info(f"Rollback {migration.version}: removed from the migrations collection")
self.remove(migration)
logging.info(f"Rollback {migration.version}: done")
self.executed_migrations.pop()
logging.info("All executed migrations have been rolled back")
except Exception as e:
logging.error(
f"Rollback failed: {e}. The database is in an inconsistent state. Try to restore the backup manually."
)
raise e
def save(self, migration: Migration) -> None:
DatabaseMigration(version=migration.version, description=migration.description).save()
def remove(self, migration: Migration) -> None:
DatabaseMigration.objects(version=migration.version).delete()
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/plan.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from dataclasses import dataclass, field
from environs import Env
from libcommon.config import CacheConfig, LogConfig, QueueConfig
# TODO: Remove this config once all the collections in metrics db have been removed
METRICS_MONGO_DATABASE = "datasets_server_metrics"
METRICS_MONGO_URL = "mongodb://localhost:27017"
@dataclass(frozen=True)
class MetricsConfig:
mongo_database: str = METRICS_MONGO_DATABASE
mongo_url: str = METRICS_MONGO_URL
@classmethod
def from_env(cls) -> "MetricsConfig":
env = Env(expand_vars=True)
with env.prefixed("METRICS_"):
return cls(
mongo_database=env.str(name="MONGO_DATABASE", default=METRICS_MONGO_DATABASE),
mongo_url=env.str(name="MONGO_URL", default=METRICS_MONGO_URL),
)
DATABASE_MIGRATIONS_MONGO_DATABASE = "datasets_server_maintenance"
DATABASE_MIGRATIONS_MONGO_URL = "mongodb://localhost:27017"
@dataclass(frozen=True)
class DatabaseMigrationsConfig:
mongo_database: str = DATABASE_MIGRATIONS_MONGO_DATABASE
mongo_url: str = DATABASE_MIGRATIONS_MONGO_URL
@classmethod
def from_env(cls) -> "DatabaseMigrationsConfig":
env = Env(expand_vars=True)
with env.prefixed("DATABASE_MIGRATIONS_"):
return cls(
mongo_database=env.str(name="MONGO_DATABASE", default=DATABASE_MIGRATIONS_MONGO_DATABASE),
mongo_url=env.str(name="MONGO_URL", default=DATABASE_MIGRATIONS_MONGO_URL),
)
@dataclass(frozen=True)
class JobConfig:
cache: CacheConfig = field(default_factory=CacheConfig)
log: LogConfig = field(default_factory=LogConfig)
database_migrations: DatabaseMigrationsConfig = field(default_factory=DatabaseMigrationsConfig)
metrics: MetricsConfig = field(default_factory=MetricsConfig)
queue: QueueConfig = field(default_factory=QueueConfig)
@classmethod
def from_env(cls) -> "JobConfig":
return cls(
log=LogConfig.from_env(),
cache=CacheConfig.from_env(),
database_migrations=DatabaseMigrationsConfig.from_env(),
metrics=MetricsConfig.from_env(),
queue=QueueConfig.from_env(),
)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/config.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
# adapted from https://docs.mongoengine.org/guide/migration.html#post-processing-checks
import logging
from collections.abc import Callable, Iterator
from typing import Optional, TypeVar
from mongoengine import Document
from pymongo.collection import Collection
# --- some typing subtleties, see https://github.com/sbdchd/mongo-types
U = TypeVar("U", bound=Document)
DocumentClass = type[U]
CustomValidation = Callable[[U], None]
# --- end
def get_random_oids(collection: Collection, sample_size: int) -> list[int]:
pipeline = [{"$project": {"_id": 1}}, {"$sample": {"size": sample_size}}]
return [s["_id"] for s in collection.aggregate(pipeline)]
def get_random_documents(DocCls: DocumentClass[Document], sample_size: int) -> Iterator[Document]:
doc_collection = DocCls._get_collection()
random_oids = get_random_oids(doc_collection, sample_size)
return DocCls.objects(pk__in=random_oids) # type: ignore
def check_documents(
DocCls: DocumentClass[Document],
sample_size: int,
custom_validation: Optional[CustomValidation[Document]] = None,
) -> None:
for doc in get_random_documents(DocCls, sample_size):
try:
# general validation (types and values)
doc.validate()
# load all subfields,
# this may trigger additional queries if you have ReferenceFields
# so it may be slow
for field in doc._fields:
try:
getattr(doc, field)
except Exception:
logging.error(f"Could not load field {field} in Document {doc.pk}. Document: {doc.to_json()}")
raise
# custom validation
if custom_validation is not None:
custom_validation(doc)
except Exception as e:
logging.error(f"Validation error on document {doc.pk}: {e}. Document: {doc.to_json()}")
raise e
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/check.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from typing import Optional
from libcommon.queue import JobDocument
from libcommon.simple_cache import CachedResponseDocument
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import CacheMigration, QueueMigration
class CacheRenamingMigration(CacheMigration):
def __init__(self, cache_kind: str, new_cache_kind: str, version: str, description: Optional[str] = None):
self.new_cache_kind: str = new_cache_kind
if not description:
description = f"update 'kind' field in cache from '{cache_kind}' to '{new_cache_kind}'"
super().__init__(cache_kind=cache_kind, version=version, description=description)
def up(self) -> None:
logging.info(f"Rename cache_kind field from '{self.cache_kind}' to '{self.new_cache_kind}'")
db = get_db(self.MONGOENGINE_ALIAS)
# update existing documents with the old kind
result = db[self.COLLECTION_RESPONSES].update_many(
{"kind": self.cache_kind}, {"$set": {"kind": self.new_cache_kind}}
)
logging.info(
f"{result.matched_count} cache entries to be renamed - {result.modified_count} cache entries renamed"
)
def down(self) -> None:
logging.info(f"Rollback cache_kind field from '{self.new_cache_kind}' to '{self.cache_kind}'")
db = get_db(self.MONGOENGINE_ALIAS)
result = db[self.COLLECTION_RESPONSES].update_many(
{"kind": self.new_cache_kind}, {"$set": {"kind": self.cache_kind}}
)
logging.info(
f"{result.matched_count} cache entries to be renamed - {result.modified_count} cache entries renamed"
)
def validate(self) -> None:
logging.info("Validate modified documents")
check_documents(DocCls=CachedResponseDocument, sample_size=10)
class QueueRenamingMigration(QueueMigration):
def __init__(self, job_type: str, new_job_type: str, version: str, description: Optional[str] = None):
self.new_job_type: str = new_job_type
if not description:
description = f"update 'type' and 'unicity_id' fields in job from '{job_type}' to '{new_job_type}'"
super().__init__(job_type=job_type, version=version, description=description)
def up(self) -> None:
logging.info(
f"Rename unicity_id field from '{self.job_type}' to "
f"'{self.new_job_type}' and change type from '{self.job_type}' to "
f"'{self.new_job_type}'"
)
db = get_db(self.MONGOENGINE_ALIAS)
result = db[self.COLLECTION_JOBS].update_many(
{"type": self.job_type},
[
{
"$set": {
"unicity_id": {
"$replaceOne": {
"input": "$unicity_id",
"find": f"{self.job_type}",
"replacement": f"{self.new_job_type}",
}
},
"type": self.new_job_type,
}
},
], # type: ignore
)
logging.info(f"{result.matched_count} jobs to be renamed - {result.modified_count} jobs renamed")
def down(self) -> None:
logging.info(
f"Rename unicity_id field from '{self.new_job_type}' to "
f"'{self.job_type}' and change type from '{self.new_job_type}' to "
f"'{self.job_type}'"
)
db = get_db(self.MONGOENGINE_ALIAS)
result = db[self.COLLECTION_JOBS].update_many(
{"type": self.new_job_type},
[
{
"$set": {
"unicity_id": {
"$replaceOne": {
"input": "$unicity_id",
"find": f"{self.new_job_type}",
"replacement": f"{self.job_type}",
}
},
"type": self.new_job_type,
}
},
], # type: ignore
)
logging.info(f"{result.matched_count} jobs to be renamed - {result.modified_count} jobs renamed")
def validate(self) -> None:
logging.info("Validate modified documents")
check_documents(DocCls=JobDocument, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
DATABASE_MIGRATIONS_COLLECTION_MIGRATIONS = "databaseMigrations"
DATABASE_MIGRATIONS_MONGOENGINE_ALIAS = "maintenance"
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/constants.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from libcommon.constants import (
CACHE_METRICS_COLLECTION,
METRICS_MONGOENGINE_ALIAS,
QUEUE_METRICS_COLLECTION,
)
from mongodb_migration.deletion_migrations import (
CacheDeletionMigration,
MetricsDeletionMigration,
MigrationQueueDeleteTTLIndex,
QueueDeletionMigration,
)
from mongodb_migration.drop_migrations import MigrationDropCollection
from mongodb_migration.migration import Migration
from mongodb_migration.migrations._20221110230400_example import MigrationExample
from mongodb_migration.migrations._20221116133500_queue_job_add_force import (
MigrationAddForceToJob,
)
from mongodb_migration.migrations._20221117223000_cache_generic_response import (
MigrationMoveToGenericCachedResponse,
)
from mongodb_migration.migrations._20230126164900_queue_job_add_priority import (
MigrationAddPriorityToJob,
)
from mongodb_migration.migrations._20230309123100_cache_add_progress import (
MigrationAddProgressToCacheResponse,
)
from mongodb_migration.migrations._20230309141600_cache_add_job_runner_version import (
MigrationAddJobRunnerVersionToCacheResponse,
)
from mongodb_migration.migrations._20230313164200_cache_remove_worker_version import (
MigrationRemoveWorkerVersionFromCachedResponse,
)
from mongodb_migration.migrations._20230511100600_queue_remove_force import (
MigrationRemoveForceFromJob,
)
from mongodb_migration.migrations._20230511100700_queue_delete_indexes_with_force import (
MigrationQueueDeleteIndexesWithForce,
)
from mongodb_migration.migrations._20230511110700_queue_delete_skipped_jobs import (
MigrationDeleteSkippedJobs,
)
from mongodb_migration.migrations._20230516101500_queue_job_add_revision import (
MigrationQueueAddRevisionToJob,
)
from mongodb_migration.migrations._20230516101600_queue_delete_index_without_revision import (
MigrationQueueDeleteIndexWithoutRevision,
)
from mongodb_migration.migrations._20230622131500_lock_add_owner import (
MigrationAddOwnerToQueueLock,
)
from mongodb_migration.migrations._20230703110100_cache_add_partial_field_in_config_parquet_and_info import (
MigrationAddPartialToCacheResponse,
)
from mongodb_migration.migrations._20230705160600_queue_job_add_difficulty import (
MigrationQueueAddDifficultyToJob,
)
from mongodb_migration.renaming_migrations import (
CacheRenamingMigration,
QueueRenamingMigration,
)
# TODO: add a way to automatically collect migrations from the migrations/ folder
class MigrationsCollector:
def get_migrations(self) -> list[Migration]:
return [
MigrationExample(version="20221110230400", description="example"),
MigrationAddForceToJob(
version="20221116133500", description="add 'force' field to jobs in queue database"
),
MigrationMoveToGenericCachedResponse(
version="20221117223000",
description="replace SplitsResponse and FirstRowsResponse with a generic CachedResponse",
),
MigrationAddPriorityToJob(
version="20230126164900",
description="add 'priority' field to jobs in queue database",
),
CacheRenamingMigration(
cache_kind="/split-names",
new_cache_kind="/split-names-from-streaming",
version="20230216112500",
),
QueueRenamingMigration(
job_type="/split-names",
new_job_type="/split-names-from-streaming",
version="20230216141000",
),
MigrationAddProgressToCacheResponse(
version="20230309123100",
description="add the 'progress' field with the default value (1.0) to the cached results",
),
MigrationAddJobRunnerVersionToCacheResponse(
version="20230309141600", description="add 'job_runner_version' field based on 'worker_version' value"
),
MigrationRemoveWorkerVersionFromCachedResponse(
version="20230313164200", description="remove 'worker_version' field from cache"
),
CacheRenamingMigration(
cache_kind="/first-rows",
new_cache_kind="split-first-rows-from-streaming",
version="20230320163700",
),
QueueRenamingMigration(
job_type="/first-rows",
new_job_type="split-first-rows-from-streaming",
version="20230320165700",
),
CacheRenamingMigration(
cache_kind="/dataset-info",
new_cache_kind="dataset-info",
version="20230323155000",
),
QueueRenamingMigration(
job_type="/dataset-info",
new_job_type="dataset-info",
version="20230323160000",
),
QueueDeletionMigration(
job_type="/splits",
version="20230407091400",
),
CacheDeletionMigration(
cache_kind="/splits",
version="20230407091500",
),
QueueDeletionMigration(
job_type="/parquet-and-dataset-info",
version="20230424173000",
),
CacheDeletionMigration(
cache_kind="/parquet-and-dataset-info",
version="20230424174000",
),
MetricsDeletionMigration(
job_type="/parquet-and-dataset-info",
cache_kind="/parquet-and-dataset-info",
version="20230427121500",
),
MigrationQueueDeleteTTLIndex(
version="20230428145000",
description="delete the TTL index on the 'finished_at' field in the queue database",
field_name="finished_at",
),
CacheDeletionMigration(
cache_kind="dataset-split-names-from-streaming",
version="20230428175100",
),
QueueDeletionMigration(
job_type="dataset-split-names-from-streaming",
version="20230428181800",
),
MetricsDeletionMigration(
job_type="dataset-split-names-from-streaming",
cache_kind="dataset-split-names-from-streaming",
version="20230428193100",
),
CacheDeletionMigration(
cache_kind="dataset-split-names-from-dataset-info",
version="20230504185100",
),
QueueDeletionMigration(
job_type="dataset-split-names-from-dataset-info",
version="20230504192200",
),
MetricsDeletionMigration(
job_type="dataset-split-names-from-dataset-info",
cache_kind="dataset-split-names-from-dataset-info",
version="20230504194600",
),
MigrationRemoveForceFromJob(version="20230511100600", description="remove 'force' field from queue"),
MigrationQueueDeleteIndexesWithForce(
version="20230511100700", description="remove indexes with field 'force'"
),
MigrationDeleteSkippedJobs(version="20230511110700", description="delete jobs with skipped status"),
MigrationQueueAddRevisionToJob(
version="20230516101500", description="add 'revision' field to jobs in queue database"
),
MigrationQueueDeleteIndexWithoutRevision(
version="20230516101600", description="remove index without revision"
),
CacheRenamingMigration(
cache_kind="/split-names-from-streaming",
new_cache_kind="config-split-names-from-streaming",
version="20230516164500",
),
QueueRenamingMigration(
job_type="/split-names-from-streaming",
new_job_type="config-split-names-from-streaming",
version="20230516164700",
),
MetricsDeletionMigration(
job_type="/split-names-from-streaming",
cache_kind="/split-names-from-streaming",
version="20230522094400",
),
MigrationQueueDeleteTTLIndex(
version="20230523171700",
description=(
"delete the TTL index on the 'finished_at' field in the queue database to update its TTL value"
),
field_name="finished_at",
),
CacheRenamingMigration(
cache_kind="/split-names-from-dataset-info",
new_cache_kind="config-split-names-from-info",
version="20230524095900",
),
QueueRenamingMigration(
job_type="/split-names-from-dataset-info",
new_job_type="config-split-names-from-info",
version="20230524095901",
),
MetricsDeletionMigration(
job_type="/split-names-from-dataset-info",
cache_kind="/split-names-from-dataset-info",
version="20230524095902",
),
CacheRenamingMigration(
cache_kind="/config-names", new_cache_kind="dataset-config-names", version="20230524192200"
),
QueueRenamingMigration(
job_type="/config-names",
new_job_type="dataset-config-names",
version="20230524192300",
),
MetricsDeletionMigration(job_type="/config-names", cache_kind="/config-names", version="20230524192400"),
MigrationQueueDeleteTTLIndex(
version="20230607154800",
description=(
"delete the TTL index on the 'finished_at' field in the queue database to update its TTL condition"
),
field_name="finished_at",
),
MigrationQueueDeleteTTLIndex(
version="202306201100",
description=(
"delete the TTL index on the 'finished_at' field in the queue database to update its TTL condition"
),
field_name="finished_at",
),
MigrationAddOwnerToQueueLock(
version="20230622131800", description="add 'owner' field copying the job_id value"
),
MigrationAddPartialToCacheResponse(
version="20230703110100", description="add 'partial' field to config-parquet-and-info"
),
MigrationQueueAddDifficultyToJob(version="20230705160600", description="add 'difficulty' field to jobs"),
MigrationDropCollection(
version="20230811063600",
description="drop cache metrics collection",
alias=METRICS_MONGOENGINE_ALIAS,
collection_name=CACHE_METRICS_COLLECTION,
),
MigrationDropCollection(
version="20230814121400",
description="drop queue metrics collection",
alias=METRICS_MONGOENGINE_ALIAS,
collection_name=QUEUE_METRICS_COLLECTION,
),
]
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/collector.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from mongoengine.connection import get_db
from mongodb_migration.migration import IrreversibleMigrationError, Migration
class MigrationDropCollection(Migration):
def __init__(self, version: str, description: str, collection_name: str, alias: str):
super().__init__(version=version, description=description)
self.collection_name = collection_name
self.alias = alias
def up(self) -> None:
logging.info(f"drop {self.collection_name} collection from {self.alias}")
db = get_db(self.alias)
db[self.collection_name].drop()
def down(self) -> None:
raise IrreversibleMigrationError("This migration does not support rollback")
def validate(self) -> None:
logging.info("check that collection does not exist")
db = get_db(self.alias)
collections = db.list_collection_names() # type: ignore
if self.collection_name in collections:
raise ValueError(f"found collection with name {self.collection_name}")
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/drop_migrations.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from dataclasses import dataclass, field
from libcommon.resources import MongoResource
from mongodb_migration.constants import DATABASE_MIGRATIONS_MONGOENGINE_ALIAS
@dataclass
class MigrationsMongoResource(MongoResource):
"""
A resource that represents a connection to the migrations mongo database.
Args:
database (:obj:`str`): The name of the mongo database.
host (:obj:`str`): The host of the mongo database. It must start with ``mongodb://`` or ``mongodb+srv://``.
"""
mongoengine_alias: str = field(default=DATABASE_MIGRATIONS_MONGOENGINE_ALIAS, init=False)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/resources.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
import sys
from libcommon.log import init_logging
from libcommon.resources import (
CacheMongoResource,
MetricsMongoResource,
QueueMongoResource,
)
from mongodb_migration.collector import MigrationsCollector
from mongodb_migration.config import JobConfig
from mongodb_migration.plan import Plan
from mongodb_migration.resources import MigrationsMongoResource
def run_job() -> None:
job_config = JobConfig.from_env()
init_logging(level=job_config.log.level)
# ^ set first to have logs as soon as possible
with (
CacheMongoResource(
database=job_config.cache.mongo_database, host=job_config.cache.mongo_url
) as cache_resource,
MetricsMongoResource(
database=job_config.metrics.mongo_database, host=job_config.metrics.mongo_url
) as metrics_resource,
QueueMongoResource(
database=job_config.queue.mongo_database, host=job_config.queue.mongo_url
) as queue_resource,
MigrationsMongoResource(
database=job_config.database_migrations.mongo_database, host=job_config.database_migrations.mongo_url
) as migrations_database_resource,
):
if not cache_resource.is_available():
logging.warning(
"The connection to the cache database could not be established. The migration job is skipped."
)
return
if not metrics_resource.is_available():
logging.warning(
"The connection to the metrics database could not be established. The migration job is skipped."
)
return
if not queue_resource.is_available():
logging.warning(
"The connection to the queue database could not be established. The migration job is skipped."
)
return
if not migrations_database_resource.is_available():
logging.warning(
"The connection to the migrations database could not be established. The migration job is skipped."
)
return
collected_migrations = MigrationsCollector().get_migrations()
Plan(collected_migrations=collected_migrations).execute()
if __name__ == "__main__":
try:
run_job()
sys.exit(0)
except Exception as e:
logging.error(e)
sys.exit(1)
# See:
# https://blog.appsignal.com/2020/04/14/dissecting-rails-migrationsl.html
# https://edgeguides.rubyonrails.org/active_record_migrations.html
# https://docs.mongoengine.org/guide/migration.html
# https://andrewlock.net/deploying-asp-net-core-applications-to-kubernetes-part-7-running-database-migrations/
# https://helm.sh/docs/topics/charts_hooks/
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/main.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import datetime
from abc import ABC, abstractmethod
from libcommon.constants import (
CACHE_COLLECTION_RESPONSES,
CACHE_METRICS_COLLECTION,
CACHE_MONGOENGINE_ALIAS,
METRICS_MONGOENGINE_ALIAS,
QUEUE_COLLECTION_JOBS,
QUEUE_METRICS_COLLECTION,
QUEUE_MONGOENGINE_ALIAS,
)
class IrreversibleMigrationError(Exception):
pass
class Migration(ABC):
def __init__(self, version: str, description: str):
if version is None or description is None:
raise ValueError("The version and the description are required.")
try:
datetime.datetime.strptime(version, "%Y%m%d%H%M%S")
except Exception as e:
raise ValueError("The version should be a string representing a date in the format YYYYMMDDHHMMSS") from e
self.version = version
self.description = description
@abstractmethod
def up(self) -> None:
raise NotImplementedError()
@abstractmethod
def validate(self) -> None:
raise NotImplementedError()
@abstractmethod
def down(self) -> None:
raise IrreversibleMigrationError()
class BaseQueueMigration(Migration):
MONGOENGINE_ALIAS: str = QUEUE_MONGOENGINE_ALIAS
COLLECTION_JOBS: str = QUEUE_COLLECTION_JOBS
def __init__(self, version: str, description: str):
super().__init__(version=version, description=description)
class QueueMigration(BaseQueueMigration):
def __init__(self, job_type: str, version: str, description: str):
self.job_type = job_type
super().__init__(version=version, description=description)
class CacheMigration(Migration):
MONGOENGINE_ALIAS: str = CACHE_MONGOENGINE_ALIAS
COLLECTION_RESPONSES: str = CACHE_COLLECTION_RESPONSES
def __init__(self, cache_kind: str, version: str, description: str):
self.cache_kind = cache_kind
super().__init__(version=version, description=description)
class MetricsMigration(Migration):
MONGOENGINE_ALIAS: str = METRICS_MONGOENGINE_ALIAS
COLLECTION_JOB_TOTAL_METRIC: str = QUEUE_METRICS_COLLECTION
COLLECTION_CACHE_TOTAL_METRIC: str = CACHE_METRICS_COLLECTION
def __init__(self, job_type: str, cache_kind: str, version: str, description: str):
self.job_type = job_type
self.cache_kind = cache_kind
super().__init__(version=version, description=description)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migration.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import types
from typing import Generic, TypeVar
from mongoengine import Document
from mongoengine.fields import StringField
from mongoengine.queryset.queryset import QuerySet
from mongodb_migration.constants import (
DATABASE_MIGRATIONS_COLLECTION_MIGRATIONS,
DATABASE_MIGRATIONS_MONGOENGINE_ALIAS,
)
# START monkey patching ### hack ###
# see https://github.com/sbdchd/mongo-types#install
U = TypeVar("U", bound=Document)
def no_op(self, _): # type: ignore
return self
QuerySet.__class_getitem__ = types.MethodType(no_op, QuerySet)
class QuerySetManager(Generic[U]):
def __get__(self, instance: object, cls: type[U]) -> QuerySet[U]:
return QuerySet(cls, cls._get_collection())
# END monkey patching ### hack ###
class DatabaseMigration(Document):
"""A database migration that has already been executed.
Args:
version (`str`): The version of the migration, with the format YYYYMMDDHHMMSS
description (`str`): A description of the migration
"""
meta = {
"collection": DATABASE_MIGRATIONS_COLLECTION_MIGRATIONS,
"db_alias": DATABASE_MIGRATIONS_MONGOENGINE_ALIAS,
}
version = StringField(required=True)
description = StringField(required=True)
objects = QuerySetManager["DatabaseMigration"]()
# only for the tests
def _clean_maintenance_database() -> None:
"""Delete all the jobs in the database"""
DatabaseMigration.drop_collection() # type: ignore
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/database_migrations.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from collections.abc import Mapping
from typing import Any, Optional
from mongoengine.connection import get_db
from mongodb_migration.migration import (
BaseQueueMigration,
CacheMigration,
IrreversibleMigrationError,
MetricsMigration,
QueueMigration,
)
class MetricsDeletionMigration(MetricsMigration):
def __init__(self, job_type: str, cache_kind: str, version: str, description: Optional[str] = None):
if not description:
description = f"delete the queue and cache metrics for step '{job_type}'"
super().__init__(job_type=job_type, cache_kind=cache_kind, version=version, description=description)
def up(self) -> None:
logging.info(f"Delete job metrics of type {self.job_type}")
db = get_db(self.MONGOENGINE_ALIAS)
result = db[self.COLLECTION_JOB_TOTAL_METRIC].delete_many({"queue": self.job_type})
logging.info(f"{result.deleted_count} deleted job metrics")
result = db[self.COLLECTION_CACHE_TOTAL_METRIC].delete_many({"kind": self.cache_kind})
logging.info(f"{result.deleted_count} deleted cache metrics")
def down(self) -> None:
raise IrreversibleMigrationError("This migration does not support rollback")
def validate(self) -> None:
logging.info(f"Check that none of the documents has the {self.job_type} type or {self.cache_kind} kind")
db = get_db(self.MONGOENGINE_ALIAS)
if db[self.COLLECTION_JOB_TOTAL_METRIC].count_documents({"queue": self.job_type}):
raise ValueError(f"Found documents with type {self.job_type}")
if db[self.COLLECTION_CACHE_TOTAL_METRIC].count_documents({"kind": self.cache_kind}):
raise ValueError(f"Found documents with kind {self.cache_kind}")
class CacheDeletionMigration(CacheMigration):
def __init__(self, cache_kind: str, version: str, description: Optional[str] = None):
if not description:
description = f"delete the cache entries of kind '{cache_kind}'"
super().__init__(cache_kind=cache_kind, version=version, description=description)
def up(self) -> None:
logging.info(f"Delete cache entries of kind {self.cache_kind}")
db = get_db(self.MONGOENGINE_ALIAS)
# delete existing documents
result = db[self.COLLECTION_RESPONSES].delete_many({"kind": self.cache_kind})
logging.info(f"{result.deleted_count} deleted cache entries")
def down(self) -> None:
raise IrreversibleMigrationError("This migration does not support rollback")
def validate(self) -> None:
logging.info(f"Check that none of the documents has the {self.cache_kind} kind")
db = get_db(self.MONGOENGINE_ALIAS)
if db[self.COLLECTION_RESPONSES].count_documents({"kind": self.cache_kind}):
raise ValueError(f"Found documents with kind {self.cache_kind}")
class QueueDeletionMigration(QueueMigration):
def __init__(self, job_type: str, version: str, description: Optional[str] = None):
if not description:
description = f"delete the jobs of type '{job_type}'"
super().__init__(job_type=job_type, version=version, description=description)
def up(self) -> None:
logging.info(f"Delete jobs of type {self.job_type}")
db = get_db(self.MONGOENGINE_ALIAS)
result = db[self.COLLECTION_JOBS].delete_many({"type": self.job_type})
logging.info(f"{result.deleted_count} deleted jobs")
def down(self) -> None:
raise IrreversibleMigrationError("This migration does not support rollback")
def validate(self) -> None:
logging.info(f"Check that none of the documents has the {self.job_type} type")
db = get_db(self.MONGOENGINE_ALIAS)
if db[self.COLLECTION_JOBS].count_documents({"type": self.job_type}):
raise ValueError(f"Found documents with type {self.job_type}")
def get_index_names(index_information: Mapping[str, Any], field_name: str) -> list[str]:
return [
name
for name, value in index_information.items()
if isinstance(value, dict)
and "expireAfterSeconds" in value
and "key" in value
and value["key"] == [(field_name, 1)]
]
class MigrationQueueDeleteTTLIndex(BaseQueueMigration):
def __init__(self, version: str, description: str, field_name: str):
super().__init__(version=version, description=description)
self.field_name = field_name
def up(self) -> None:
logging.info(
f"Delete ttl index on field {self.field_name}. Mongoengine will create it again with a different TTL"
" parameter"
)
db = get_db(self.MONGOENGINE_ALIAS)
collection = db[self.COLLECTION_JOBS]
ttl_index_names = get_index_names(index_information=collection.index_information(), field_name=self.field_name)
if len(ttl_index_names) != 1:
raise ValueError(f"Expected 1 ttl index on field {self.field_name}, found {len(ttl_index_names)}")
collection.drop_index(ttl_index_names[0])
def down(self) -> None:
raise IrreversibleMigrationError("This migration does not support rollback")
def validate(self) -> None:
logging.info("Check that the index does not exists anymore")
db = get_db(self.MONGOENGINE_ALIAS)
collection = db[self.COLLECTION_JOBS]
ttl_index_names = get_index_names(index_information=collection.index_information(), field_name=self.field_name)
if len(ttl_index_names) > 0:
raise ValueError(f"Found TTL index for field {self.field_name}")
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from mongodb_migration.migration import Migration
class MigrationExample(Migration):
def up(self) -> None:
logging.info("Example migration, upgrade step")
def down(self) -> None:
logging.info("Example migration, downgrade step")
def validate(self) -> None:
logging.info("Example migration, validation is OK")
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20221110230400_example.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
from libcommon.simple_cache import CachedResponseDocument
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import Migration
# connection already occurred in the main.py (caveat: we use globals)
class MigrationAddFeaturesToSplitDuckdbIndexCacheResponse(Migration):
def up(self) -> None:
# See https://docs.mongoengine.org/guide/migration.html#example-1-addition-of-a-field
logging.info(
"If missing, add the features field with the default value (None) to the cached results of"
" split-duckdb-index"
)
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].update_many(
{
"kind": "split-duckdb-index",
"http_status": 200,
"content.features": {"$exists": False},
},
{"$set": {"content.features": None}},
)
def down(self) -> None:
logging.info("Remove the features field from all the cached results")
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].update_many(
{
"kind": "split-duckdb-index",
"http_status": 200,
},
{"$unset": {"content.features": ""}},
)
def validate(self) -> None:
logging.info("Ensure that a random selection of cached results have the 'features' field")
check_documents(DocCls=CachedResponseDocument, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230824154900_cache_add_features_field_in_split_duckdb_index.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from collections.abc import Mapping
from typing import Any
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from mongoengine.connection import get_db
from mongodb_migration.migration import IrreversibleMigrationError, Migration
INDEX_DEFINITION = [("type", 1), ("dataset", 1), ("config", 1), ("split", 1), ("status", 1), ("priority", 1)]
def get_index_names(index_information: Mapping[str, Any]) -> list[str]:
return [
name
for name, value in index_information.items()
if isinstance(value, dict) and "key" in value and value["key"] == INDEX_DEFINITION
]
class MigrationQueueDeleteIndexWithoutRevision(Migration):
def up(self) -> None:
logging.info("Delete index.")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
collection = db[QUEUE_COLLECTION_JOBS]
index_names = get_index_names(index_information=collection.index_information())
if len(index_names) != 1:
raise ValueError(f"Found {len(index_names)} indexes (should be 1): {index_names}.")
collection.drop_index(index_names[0])
def down(self) -> None:
raise IrreversibleMigrationError("This migration does not support rollback")
def validate(self) -> None:
logging.info("Check that the indexes do not exist anymore")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
collection = db[QUEUE_COLLECTION_JOBS]
index_names = get_index_names(index_information=collection.index_information())
if len(index_names) > 0:
raise ValueError(f"Found indexes: {index_names}")
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230516101600_queue_delete_index_without_revision.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from libcommon.queue import JobDocument
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import IrreversibleMigrationError, Migration
# connection already occurred in the main.py (caveat: we use globals)
class MigrationRemoveForceFromJob(Migration):
def up(self) -> None:
logging.info("Removing 'force' field.")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].update_many({}, {"$unset": {"force": ""}})
def down(self) -> None:
raise IrreversibleMigrationError("This migration does not support rollback")
def validate(self) -> None:
logging.info("Ensure that a random selection of cached results don't have 'force' field")
check_documents(DocCls=JobDocument, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511100600_queue_remove_force.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from libcommon.queue import JobDocument
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import Migration
# connection already occurred in the main.py (caveat: we use globals)
class MigrationAddPriorityToJob(Migration):
def up(self) -> None:
# See https://docs.mongoengine.org/guide/migration.html#example-1-addition-of-a-field
logging.info("If missing, add the priority field with the default value ('normal') to the jobs")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].update_many({"priority": {"$exists": False}}, {"$set": {"priority": "normal"}})
def down(self) -> None:
logging.info("Remove the priority field from all the jobs")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].update_many({}, {"$unset": {"priority": ""}})
def validate(self) -> None:
logging.info("Ensure that a random selection of jobs have the 'priority' field set")
check_documents(DocCls=JobDocument, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230126164900_queue_job_add_priority.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from libcommon.queue import JobDocument
from mongoengine import Document
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import IrreversibleMigrationError, Migration
status = "skipped"
# connection already occurred in the main.py (caveat: we use globals)
class MigrationDeleteSkippedJobs(Migration):
def up(self) -> None:
logging.info(f"Delete jobs with status {status}.")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].delete_many({"status": status})
def down(self) -> None:
raise IrreversibleMigrationError("This migration does not support rollback")
def validate(self) -> None:
logging.info("Ensure that a random selection of jobs don't have the status {status}")
def custom_validation(doc: Document) -> None:
if not isinstance(doc, JobDocument):
raise ValueError("Document is not a Job")
if doc.status == status:
raise ValueError(f"Document has the status {status}")
check_documents(DocCls=JobDocument, sample_size=10, custom_validation=custom_validation)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511110700_queue_delete_skipped_jobs.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.config import ProcessingGraphConfig
from libcommon.constants import (
DEFAULT_DIFFICULTY,
QUEUE_COLLECTION_JOBS,
QUEUE_MONGOENGINE_ALIAS,
)
from libcommon.queue import JobDocument
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import Migration
# connection already occurred in the main.py (caveat: we use globals)
class MigrationQueueAddDifficultyToJob(Migration):
def up(self) -> None:
logging.info("If missing, add the difficulty with a value that depends on the job type, else 50")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
processing_graph_config = ProcessingGraphConfig()
for job_type, spec in processing_graph_config.specification.items():
difficulty = spec.get("difficulty", DEFAULT_DIFFICULTY)
db[QUEUE_COLLECTION_JOBS].update_many(
{"type": job_type, "difficulty": {"$exists": False}},
{"$set": {"difficulty": difficulty}},
)
db[QUEUE_COLLECTION_JOBS].update_many(
{"difficulty": {"$exists": False}},
{"$set": {"difficulty": DEFAULT_DIFFICULTY}},
)
def down(self) -> None:
logging.info("Remove the difficulty field from all the jobs")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].update_many({}, {"$unset": {"difficulty": ""}})
def validate(self) -> None:
logging.info("Ensure that a random selection of jobs have the 'difficulty' field set")
check_documents(DocCls=JobDocument, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230705160600_queue_job_add_difficulty.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
from libcommon.simple_cache import CachedResponseDocument
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import IrreversibleMigrationError, Migration
# connection already occurred in the main.py (caveat: we use globals)
class MigrationRemoveWorkerVersionFromCachedResponse(Migration):
def up(self) -> None:
logging.info("Removing 'worker_version' field.")
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].update_many({}, {"$unset": {"worker_version": ""}})
def down(self) -> None:
raise IrreversibleMigrationError("This migration does not support rollback")
def validate(self) -> None:
logging.info("Ensure that a random selection of cached results don't have 'worker_version' field")
check_documents(DocCls=CachedResponseDocument, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230313164200_cache_remove_worker_version.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import contextlib
import logging
from libcommon.simple_cache import CachedResponseDocument
from mongoengine.connection import get_db
from pymongo.errors import InvalidName
from mongodb_migration.check import check_documents
from mongodb_migration.migration import Migration
db_name = "cache"
splitsResponseCollection = "splitsResponse"
firstRowsResponseCollection = "firstRowsResponse"
cachedResponseCollection = "cachedResponsesBlue"
SPLITS_KIND = "/splits"
FIRST_ROWS_KIND = "/first-rows"
# connection already occurred in the main.py (caveat: we use globals)
class MigrationMoveToGenericCachedResponse(Migration):
def up(self) -> None:
# See https://docs.mongoengine.org/guide/migration.html#example-1-addition-of-a-field
logging.info(
f"Create the {cachedResponseCollection} collection, and fill it with the data from splits and first-rows"
)
db = get_db(db_name)
# Copy the data from the previous collections (splitsResponse, firstRowsResponse) to
# the new generic collection (cachedResponse)
with contextlib.suppress(InvalidName):
for splits_response in db[splitsResponseCollection].find():
if not isinstance(splits_response, dict):
# for mypy
raise ValueError("splits_response should be a dict")
db[cachedResponseCollection].insert_one(
{
"_id": splits_response.get("_id"),
"kind": SPLITS_KIND,
# ^ "kind" is a new field
"dataset": splits_response.get("dataset_name"),
"config": None,
"split": None,
# ^ "config" and "split" are None for kind=/splits
"http_status": splits_response.get("http_status"),
"error_code": splits_response.get("error_code"),
"content": splits_response.get("response"),
# ^ "response" field has been renamed to "content"
"worker_version": splits_response.get("worker_version"),
"dataset_git_revision": splits_response.get("dataset_git_revision"),
"details": splits_response.get("details"),
"updated_at": splits_response.get("updated_at"),
# "stale" field is not used anymore
}
)
with contextlib.suppress(InvalidName):
for first_rows_response in db[firstRowsResponseCollection].find():
if not isinstance(first_rows_response, dict):
# for mypy
raise ValueError("first_rows_response should be a dict")
db[cachedResponseCollection].insert_one(
{
"_id": first_rows_response.get("_id"),
"kind": FIRST_ROWS_KIND,
# ^ "kind" is a new field
"dataset": first_rows_response.get("dataset_name"),
"config": first_rows_response.get("config_name"),
"split": first_rows_response.get("split_name"),
# ^ "config" and "split" are None for kind=/splits
"http_status": first_rows_response.get("http_status"),
"error_code": first_rows_response.get("error_code"),
"content": first_rows_response.get("response"),
# ^ "response" field has been renamed to "content"
"worker_version": first_rows_response.get("worker_version"),
"dataset_git_revision": first_rows_response.get("dataset_git_revision"),
"details": first_rows_response.get("details"),
"updated_at": first_rows_response.get("updated_at"),
# "stale" field is not used anymore
}
)
# We will not delete the old collections for now. It will be made in a later migration.
# Also: no need to create indexes on the new collection, mongoengine will do it automatically on the next
# request.
def down(self) -> None:
logging.info(f"Delete the {cachedResponseCollection} collection")
db = get_db(db_name)
with contextlib.suppress(InvalidName):
db[cachedResponseCollection].drop()
def validate(self) -> None:
logging.info("Validate the migrated documents")
check_documents(DocCls=CachedResponseDocument, sample_size=10)
db = get_db(db_name)
try:
splits_responses_count = db[splitsResponseCollection].count_documents({})
except InvalidName:
splits_responses_count = 0
try:
first_rows_responses_count = db[firstRowsResponseCollection].count_documents({})
except InvalidName:
first_rows_responses_count = 0
cached_responses_count = CachedResponseDocument.objects.count()
if splits_responses_count + first_rows_responses_count > cached_responses_count:
raise ValueError(
f"Some documents are missing in the new collection: splitsResponse ({splits_responses_count}),"
f" firstRowsResponse ({first_rows_responses_count}), cachedResponseBlue ({cached_responses_count})"
)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20221117223000_cache_generic_response.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from mongoengine.connection import get_db
from mongodb_migration.migration import Migration
# connection already occurred in the main.py (caveat: we use globals)
class MigrationAddForceToJob(Migration):
def up(self) -> None:
# See https://docs.mongoengine.org/guide/migration.html#example-1-addition-of-a-field
logging.info("If missing, add the force field with the default value (False) to the jobs")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].update_many({"force": {"$exists": False}}, {"$set": {"force": False}})
def down(self) -> None:
logging.info("Remove the force field from all the jobs")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].update_many({}, {"$unset": {"force": ""}})
def validate(self) -> None:
logging.info("Ensure that a random selection of jobs have the 'force' field")
# The Job object does not contain the force field anymore. See _20230511100600_queue_remove_force.py
# check_documents(DocCls=Job, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20221116133500_queue_job_add_force.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from collections.abc import Mapping
from typing import Any
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from mongoengine.connection import get_db
from mongodb_migration.migration import IrreversibleMigrationError, Migration
field_name = "force"
def get_index_names(index_information: Mapping[str, Any], field_name: str) -> list[str]:
return [
name
for name, value in index_information.items()
if isinstance(value, dict)
and "key" in value
and any(t[0] == field_name for t in value["key"] if isinstance(t, tuple) and len(t))
]
class MigrationQueueDeleteIndexesWithForce(Migration):
def up(self) -> None:
logging.info(f"Delete indexes that contain the {field_name} field.")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
collection = db[QUEUE_COLLECTION_JOBS]
index_names = get_index_names(index_information=collection.index_information(), field_name=field_name)
for index_name in index_names:
collection.drop_index(index_name)
def down(self) -> None:
raise IrreversibleMigrationError("This migration does not support rollback")
def validate(self) -> None:
logging.info("Check that the indexes do not exist anymore")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
collection = db[QUEUE_COLLECTION_JOBS]
index_names = get_index_names(index_information=collection.index_information(), field_name=field_name)
if len(index_names) > 0:
raise ValueError(f"Found indexes for field {field_name}: {index_names}")
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511100700_queue_delete_indexes_with_force.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
from libcommon.simple_cache import CachedResponseDocument
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import Migration
# connection already occurred in the main.py (caveat: we use globals)
class MigrationAddProgressToCacheResponse(Migration):
def up(self) -> None:
# See https://docs.mongoengine.org/guide/migration.html#example-1-addition-of-a-field
logging.info("If missing, add the progress field with the default value (1.0) to the cached results")
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].update_many({"progress": {"$exists": False}}, {"$set": {"progress": 1.0}})
def down(self) -> None:
logging.info("Remove the progress field from all the cached results")
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].update_many({}, {"$unset": {"progress": ""}})
def validate(self) -> None:
logging.info("Ensure that a random selection of cached results have the 'progress' field")
check_documents(DocCls=CachedResponseDocument, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230309123100_cache_add_progress.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from libcommon.constants import QUEUE_COLLECTION_LOCKS, QUEUE_MONGOENGINE_ALIAS
from libcommon.queue import Lock
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import Migration
# connection already occurred in the main.py (caveat: we use globals)
class MigrationAddTtlToQueueLock(Migration):
def up(self) -> None:
# See https://docs.mongoengine.org/guide/migration.html#example-1-addition-of-a-field
logging.info("If missing, add the ttl field to the locks")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_LOCKS].update_many({"ttl": {"$exists": False}}, [{"$set": {"ttl": None}}]) # type: ignore
def down(self) -> None:
logging.info("Remove the ttl field from all the locks")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_LOCKS].update_many({}, {"$unset": {"ttl": ""}})
def validate(self) -> None:
logging.info("Ensure that a random selection of locks have the 'ttl' field")
check_documents(DocCls=Lock, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230825170200_lock_add_ttl.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
from libcommon.simple_cache import CachedResponseDocument
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import Migration
# connection already occurred in the main.py (caveat: we use globals)
class MigrationAddPartialToCacheResponse(Migration):
def up(self) -> None:
# See https://docs.mongoengine.org/guide/migration.html#example-1-addition-of-a-field
logging.info(
"If missing, add the partial field with the default value (false) to the cached results of"
" config-parquet-and-info and subsequent steps"
)
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].update_many(
{
"kind": {
"$in": [
"config-parquet-and-info",
"config-parquet",
"dataset-parquet",
"config-parquet-metadata",
"config-info",
"dataset-info",
"config-size",
"dataset-size",
]
},
"http_status": 200,
"content.partial": {"$exists": False},
},
{"$set": {"content.partial": False}},
)
def down(self) -> None:
logging.info("Remove the partial field from all the cached results")
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].update_many(
{
"kind": {
"$in": [
"config-parquet-and-info",
"config-parquet",
"dataset-parquet",
"config-parquet-metadata",
"config-info",
"dataset-info",
"config-size",
"dataset-size",
]
},
"http_status": 200,
},
{"$unset": {"content.partial": ""}},
)
def validate(self) -> None:
logging.info("Ensure that a random selection of cached results have the 'partial' field")
check_documents(DocCls=CachedResponseDocument, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230703110100_cache_add_partial_field_in_config_parquet_and_info.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
from libcommon.queue import JobDocument
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import Migration
# connection already occurred in the main.py (caveat: we use globals)
class MigrationQueueAddRevisionToJob(Migration):
def up(self) -> None:
logging.info("If missing, add the revision field with the value ('main') to the jobs")
# Note that setting the value to "main" is a trick, that should avoid deleting the jobs,
# since we don't know the git revision when the jobs were created.
# The functions that create jobs in the code will set revision to the commit hash, not to "main" anymore.
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].update_many({"revision": {"$exists": False}}, {"$set": {"revision": "main"}})
def down(self) -> None:
logging.info("Remove the revision field from all the jobs")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_JOBS].update_many({}, {"$unset": {"revision": ""}})
def validate(self) -> None:
logging.info("Ensure that a random selection of jobs have the 'revision' field set")
check_documents(DocCls=JobDocument, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230516101500_queue_job_add_revision.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
from libcommon.simple_cache import CachedResponseDocument
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import Migration
# connection already occurred in the main.py (caveat: we use globals)
class MigrationAddJobRunnerVersionToCacheResponse(Migration):
def up(self) -> None:
# See https://docs.mongoengine.org/guide/migration.html#example-1-addition-of-a-field
logging.info("If missing, add 'job_runner_version' field based on 'worker_version' value")
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].update_many(
{"job_runner_version": {"$exists": False}},
[
{
"$set": {
"job_runner_version": {
"$convert": {
"input": {"$first": {"$split": ["$worker_version", "."]}},
"to": "int",
"onError": None,
"onNull": None,
}
}
}
}
], # type: ignore
)
def down(self) -> None:
logging.info("Remove 'job_runner_version' field from all the cached results")
db = get_db(CACHE_MONGOENGINE_ALIAS)
db[CACHE_COLLECTION_RESPONSES].update_many({}, {"$unset": {"job_runner_version": ""}})
def validate(self) -> None:
logging.info("Ensure that a random selection of cached results have the 'job_runner_version' field")
check_documents(DocCls=CachedResponseDocument, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230309141600_cache_add_job_runner_version.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import QUEUE_COLLECTION_LOCKS, QUEUE_MONGOENGINE_ALIAS
from libcommon.queue import Lock
from mongoengine.connection import get_db
from mongodb_migration.check import check_documents
from mongodb_migration.migration import Migration
# connection already occurred in the main.py (caveat: we use globals)
class MigrationAddOwnerToQueueLock(Migration):
def up(self) -> None:
# See https://docs.mongoengine.org/guide/migration.html#example-1-addition-of-a-field
logging.info("If missing, add the owner field with the same value as the field job_id to the locks")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_LOCKS].update_many(
{"owner": {"$exists": False}}, [{"$set": {"owner": "$job_id"}}] # type: ignore
)
def down(self) -> None:
logging.info("Remove the owner field from all the locks")
db = get_db(QUEUE_MONGOENGINE_ALIAS)
db[QUEUE_COLLECTION_LOCKS].update_many({}, {"$unset": {"owner": ""}})
def validate(self) -> None:
logging.info("Ensure that a random selection of locks have the 'owner' field")
check_documents(DocCls=Lock, sample_size=10)
| datasets-server-main | jobs/mongodb_migration/src/mongodb_migration/migrations/_20230622131500_lock_add_owner.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Iterator
from libcommon.queue import _clean_queue_database
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import _clean_cache_database
from pytest import MonkeyPatch, fixture
from cache_maintenance.config import JobConfig
from .constants import (
CI_APP_TOKEN,
CI_HUB_ENDPOINT,
CI_PARQUET_CONVERTER_APP_TOKEN,
CI_PARQUET_CONVERTER_USER,
)
# see https://github.com/pytest-dev/pytest/issues/363#issuecomment-406536200
@fixture(scope="session")
def monkeypatch_session() -> Iterator[MonkeyPatch]:
monkeypatch_session = MonkeyPatch()
monkeypatch_session.setenv("CACHE_MONGO_DATABASE", "datasets_server_cache_test")
monkeypatch_session.setenv("QUEUE_MONGO_DATABASE", "datasets_server_queue_test")
monkeypatch_session.setenv("COMMON_HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch_session.setenv("COMMON_HF_TOKEN", CI_APP_TOKEN)
monkeypatch_session.setenv("DISCUSSIONS_BOT_ASSOCIATED_USER_NAME", CI_PARQUET_CONVERTER_USER)
monkeypatch_session.setenv("DISCUSSIONS_BOT_TOKEN", CI_PARQUET_CONVERTER_APP_TOKEN)
yield monkeypatch_session
monkeypatch_session.undo()
@fixture(scope="session")
def job_config(monkeypatch_session: MonkeyPatch) -> JobConfig:
job_config = JobConfig.from_env()
if "test" not in job_config.cache.mongo_database or "test" not in job_config.queue.mongo_database:
raise ValueError("Test must be launched on a test mongo database")
return job_config
@fixture(autouse=True)
def cache_mongo_resource(job_config: JobConfig) -> Iterator[CacheMongoResource]:
with CacheMongoResource(database=job_config.cache.mongo_database, host=job_config.cache.mongo_url) as resource:
yield resource
_clean_cache_database()
@fixture(autouse=True)
def queue_mongo_resource(job_config: JobConfig) -> Iterator[QueueMongoResource]:
with QueueMongoResource(database=job_config.queue.mongo_database, host=job_config.queue.mongo_url) as resource:
yield resource
_clean_queue_database()
| datasets-server-main | jobs/cache_maintenance/tests/conftest.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from http import HTTPStatus
from libcommon.simple_cache import CacheTotalMetricDocument, upsert_response
from cache_maintenance.cache_metrics import collect_cache_metrics
def test_collect_cache_metrics() -> None:
dataset = "test_dataset"
config = None
split = None
content = {"some": "content"}
kind = "kind"
upsert_response(
kind=kind,
dataset=dataset,
config=config,
split=split,
content=content,
http_status=HTTPStatus.OK,
)
collect_cache_metrics()
cache_metrics = CacheTotalMetricDocument.objects()
assert cache_metrics
assert len(cache_metrics) == 1
metric = cache_metrics.first()
assert metric is not None
assert metric.kind == kind
assert metric.error_code is None
assert metric.http_status == HTTPStatus.OK
assert metric.total == 1
| datasets-server-main | jobs/cache_maintenance/tests/test_collect_cache_metrics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
# see https://github.com/huggingface/moon-landing/blob/main/server/scripts/staging-seed-db.ts
CI_APP_TOKEN = "hf_app_datasets-server_token"
CI_PARQUET_CONVERTER_USER = "__PARQUET_CONVERTER_USER__"
CI_PARQUET_CONVERTER_APP_TOKEN = "hf_app_datasets-server-parquet-converter_token"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
CI_USER = "__DUMMY_DATASETS_SERVER_USER__"
CI_USER_TOKEN = "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"
| datasets-server-main | jobs/cache_maintenance/tests/constants.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import os
import time
from pathlib import Path
from cache_maintenance.delete_indexes import delete_indexes
def test_delete_indexes() -> None:
duckdb_index_cache_directory = "/tmp"
subdirectory = "download"
file_extension = ".duckdb"
expired_time_interval_seconds = 2
os.mkdir(f"{duckdb_index_cache_directory}/{subdirectory}")
index_file = Path(f"{duckdb_index_cache_directory}/{subdirectory}/index{file_extension}")
index_file.touch()
# ensure file exists
assert index_file.is_file()
# try to delete it inmediatly after creation, it should remain
delete_indexes(duckdb_index_cache_directory, subdirectory, file_extension, expired_time_interval_seconds)
assert index_file.is_file()
# try to delete it after more that time interval, it should be deleted
index_file.touch()
time.sleep(expired_time_interval_seconds + 2)
delete_indexes(duckdb_index_cache_directory, subdirectory, file_extension, expired_time_interval_seconds)
assert not index_file.is_file()
os.rmdir(f"{duckdb_index_cache_directory}/{subdirectory}")
| datasets-server-main | jobs/cache_maintenance/tests/test_delete_indexes.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from http import HTTPStatus
import pytest
from huggingface_hub.community import DiscussionComment
from libcommon.simple_cache import DatasetWithRevision, upsert_response
from libcommon.utils import get_datetime
from cache_maintenance.config import JobConfig
from cache_maintenance.discussions import (
DAYS,
PARQUET_CACHE_KIND,
create_discussion_description,
create_link,
create_parquet_comment,
limit_to_one_dataset_per_namespace,
post_messages,
)
from .utils import (
TemporaryDataset,
close_discussion,
count_comments,
fetch_bot_discussion,
)
@pytest.mark.parametrize(
"datasets, valid_expected_datasets",
[
(set(), [set()]),
({"a/b"}, [{"a/b"}]),
({"a"}, [set()]),
({"a/b/c"}, [set()]),
({"a/b", "a/b"}, [{"a/b"}]),
({"a/b", "a/c"}, [{"a/b"}, {"a/c"}]),
({"a/b", "b/b"}, [{"a/b", "b/b"}]),
({"a/b", "b"}, [{"a/b"}]),
],
)
def test_limit_to_one_dataset_per_namespace(datasets: set[str], valid_expected_datasets: list[set[str]]) -> None:
assert any(
{
d.dataset
for d in limit_to_one_dataset_per_namespace(
datasets_with_revision=[DatasetWithRevision(dataset=dataset, revision=None) for dataset in datasets]
)
}
== expected_datasets
for expected_datasets in valid_expected_datasets
)
def test_create_link() -> None:
assert (
create_link(
text="sometext",
dataset="a/b",
hf_endpoint="https://huggingface.co",
revision_type="commit",
revision="c/d",
)
== "[`sometext`](https://huggingface.co/datasets/a/b/commit/c%2Fd)"
)
def test_post_messages_in_one_dataset(job_config: JobConfig) -> None:
with TemporaryDataset(prefix="dataset") as dataset:
assert fetch_bot_discussion(dataset=dataset.repo_id) is None
# set "config-parquet" entry for the dataset
first_revision = "3bb24dcad2b45b45e20fc0accc93058dcbe8087d"
upsert_response(
kind=PARQUET_CACHE_KIND,
dataset=dataset.repo_id,
content={},
http_status=HTTPStatus.OK,
dataset_git_revision=first_revision,
)
# call post_messages
counters = post_messages(
hf_endpoint=job_config.common.hf_endpoint,
bot_associated_user_name=job_config.discussions.bot_associated_user_name,
bot_token=job_config.discussions.bot_token,
parquet_revision=job_config.discussions.parquet_revision,
)
# ensure one message has been posted in a dataset discussion
assert counters["parquet"] == {
"datasets": 1,
"messages": 1,
"new_discussions": 1,
"errors": 0,
"dismissed_messages": 0,
}
first_discussion = fetch_bot_discussion(dataset=dataset.repo_id)
assert first_discussion is not None
assert count_comments(first_discussion) == 2
first_comment = first_discussion.events[0]
assert isinstance(first_comment, DiscussionComment)
assert first_comment.content == create_discussion_description()
second_comment = first_discussion.events[1]
assert isinstance(second_comment, DiscussionComment)
assert second_comment.content == create_parquet_comment(
dataset=dataset.repo_id,
hf_endpoint=job_config.common.hf_endpoint,
parquet_revision=job_config.discussions.parquet_revision,
dataset_revision=first_revision,
)
# set a new "config-parquet" entry for the dataset
second_revision = "9a0bd9fe2a87bbb82702ed170a53cf4e86535070"
upsert_response(
kind=PARQUET_CACHE_KIND,
dataset=dataset.repo_id,
content={},
http_status=HTTPStatus.OK,
dataset_git_revision=second_revision,
)
# call post_messages again
counters = post_messages(
hf_endpoint=job_config.common.hf_endpoint,
bot_associated_user_name=job_config.discussions.bot_associated_user_name,
bot_token=job_config.discussions.bot_token,
parquet_revision=job_config.discussions.parquet_revision,
)
# ensure the message have been posted in the same discussion
assert counters["parquet"] == {
"datasets": 1,
"messages": 1,
"new_discussions": 0,
"errors": 0,
"dismissed_messages": 0,
}
second_discussion = fetch_bot_discussion(dataset=dataset.repo_id)
assert second_discussion is not None
assert first_discussion.num == second_discussion.num
assert count_comments(second_discussion) == 3
third_comment = second_discussion.events[2]
assert isinstance(third_comment, DiscussionComment)
assert third_comment.content == create_parquet_comment(
dataset=dataset.repo_id,
hf_endpoint=job_config.common.hf_endpoint,
parquet_revision=job_config.discussions.parquet_revision,
dataset_revision=second_revision,
)
# close the discussion
close_discussion(dataset=dataset.repo_id, discussion_num=first_discussion.num)
# call post_messages again
counters = post_messages(
hf_endpoint=job_config.common.hf_endpoint,
bot_associated_user_name=job_config.discussions.bot_associated_user_name,
bot_token=job_config.discussions.bot_token,
parquet_revision=job_config.discussions.parquet_revision,
)
# ensure the message has not been posted
assert counters["parquet"] == {
"datasets": 1,
"messages": 0,
"new_discussions": 0,
"errors": 0,
"dismissed_messages": 1,
}
third_discussion = fetch_bot_discussion(dataset=dataset.repo_id)
assert third_discussion is not None
assert first_discussion.num == third_discussion.num
assert count_comments(third_discussion) == 3
def test_post_messages_with_two_datasets_in_one_namespace(job_config: JobConfig) -> None:
with TemporaryDataset(prefix="dataset1") as dataset1, TemporaryDataset(prefix="dataset2") as dataset2:
assert fetch_bot_discussion(dataset=dataset1.repo_id) is None
assert fetch_bot_discussion(dataset=dataset2.repo_id) is None
# set "config-parquet" entry for the two datasets
upsert_response(
kind=PARQUET_CACHE_KIND,
dataset=dataset1.repo_id,
content={},
http_status=HTTPStatus.OK,
)
upsert_response(
kind=PARQUET_CACHE_KIND,
dataset=dataset2.repo_id,
content={},
http_status=HTTPStatus.OK,
)
# call post_messages
counters = post_messages(
hf_endpoint=job_config.common.hf_endpoint,
bot_associated_user_name=job_config.discussions.bot_associated_user_name,
bot_token=job_config.discussions.bot_token,
parquet_revision=job_config.discussions.parquet_revision,
)
# ensure one message has been posted in only one dataset discussion
assert counters["parquet"] == {
"datasets": 1,
"messages": 1,
"new_discussions": 1,
"errors": 0,
"dismissed_messages": 0,
}
discussion1 = fetch_bot_discussion(dataset=dataset1.repo_id)
discussion2 = fetch_bot_discussion(dataset=dataset2.repo_id)
discussion = discussion1 or discussion2
assert discussion is not None
assert discussion1 is None or discussion2 is None
assert count_comments(discussion) == 2
comment = discussion.events[1]
assert isinstance(comment, DiscussionComment)
assert comment.content == create_parquet_comment(
dataset=dataset1.repo_id,
hf_endpoint=job_config.common.hf_endpoint,
parquet_revision=job_config.discussions.parquet_revision,
dataset_revision=None,
) or create_parquet_comment(
dataset=dataset2.repo_id,
hf_endpoint=job_config.common.hf_endpoint,
parquet_revision=job_config.discussions.parquet_revision,
dataset_revision=None,
)
@pytest.mark.parametrize(
"gated,private",
[
(True, False),
(False, True),
(True, True),
],
)
def test_post_messages_in_private_or_gated_dataset(job_config: JobConfig, gated: bool, private: bool) -> None:
with TemporaryDataset(prefix="dataset", gated=gated, private=private) as dataset:
assert fetch_bot_discussion(dataset=dataset.repo_id) is None
# set "config-parquet" entry for the dataset
upsert_response(
kind=PARQUET_CACHE_KIND,
dataset=dataset.repo_id,
content={},
http_status=HTTPStatus.OK,
)
# call post_messages
counters = post_messages(
hf_endpoint=job_config.common.hf_endpoint,
bot_associated_user_name=job_config.discussions.bot_associated_user_name,
bot_token=job_config.discussions.bot_token,
parquet_revision=job_config.discussions.parquet_revision,
)
# ensure one message has been posted in a dataset discussion
# YES: even if it's private. Should we forbid this?
# Normally: the cache should not contain private datasets, but a public
# dataset can be switched to private, and for some reason, or during some
# time, the cache can contain private datasets.
assert counters["parquet"] == {
"datasets": 1,
"messages": 1,
"new_discussions": 1,
"errors": 0,
"dismissed_messages": 0,
}
first_discussion = fetch_bot_discussion(dataset=dataset.repo_id)
assert first_discussion is not None
assert count_comments(first_discussion) == 2
comment = first_discussion.events[1]
assert isinstance(comment, DiscussionComment)
assert comment.content == create_parquet_comment(
dataset=dataset.repo_id,
hf_endpoint=job_config.common.hf_endpoint,
parquet_revision=job_config.discussions.parquet_revision,
dataset_revision=None,
)
def test_post_messages_for_outdated_response(job_config: JobConfig) -> None:
with TemporaryDataset(prefix="dataset") as dataset:
assert fetch_bot_discussion(dataset=dataset.repo_id) is None
# set "config-parquet" entry for the dataset
upsert_response(
kind=PARQUET_CACHE_KIND,
dataset=dataset.repo_id,
content={},
http_status=HTTPStatus.OK,
updated_at=get_datetime(days=DAYS + 10),
)
# call post_messages
counters = post_messages(
hf_endpoint=job_config.common.hf_endpoint,
bot_associated_user_name=job_config.discussions.bot_associated_user_name,
bot_token=job_config.discussions.bot_token,
parquet_revision=job_config.discussions.parquet_revision,
)
# ensure no discussion has been created, because the content was too old
assert counters["parquet"] == {
"datasets": 0,
"messages": 0,
"new_discussions": 0,
"errors": 0,
"dismissed_messages": 0,
}
assert fetch_bot_discussion(dataset=dataset.repo_id) is None
# update the content
upsert_response(
kind=PARQUET_CACHE_KIND,
dataset=dataset.repo_id,
content={},
http_status=HTTPStatus.OK,
)
# call post_messages
counters = post_messages(
hf_endpoint=job_config.common.hf_endpoint,
bot_associated_user_name=job_config.discussions.bot_associated_user_name,
bot_token=job_config.discussions.bot_token,
parquet_revision=job_config.discussions.parquet_revision,
)
# ensure one message has been posted in a dataset discussion
assert counters["parquet"] == {
"datasets": 1,
"messages": 1,
"new_discussions": 1,
"errors": 0,
"dismissed_messages": 0,
}
first_discussion = fetch_bot_discussion(dataset=dataset.repo_id)
assert first_discussion is not None
assert count_comments(first_discussion) == 2
comment = first_discussion.events[1]
assert isinstance(comment, DiscussionComment)
assert comment.content == create_parquet_comment(
dataset=dataset.repo_id,
hf_endpoint=job_config.common.hf_endpoint,
parquet_revision=job_config.discussions.parquet_revision,
dataset_revision=None,
)
| datasets-server-main | jobs/cache_maintenance/tests/test_discussions.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | jobs/cache_maintenance/tests/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import time
from contextlib import suppress
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Optional, Union
import requests
from huggingface_hub.community import DiscussionComment, DiscussionWithDetails
from huggingface_hub.constants import (
REPO_TYPE_DATASET,
REPO_TYPES,
REPO_TYPES_URL_PREFIXES,
)
from huggingface_hub.hf_api import HfApi
from huggingface_hub.utils._errors import hf_raise_for_status
from libcommon.resources import Resource
from .constants import (
CI_HUB_ENDPOINT,
CI_PARQUET_CONVERTER_USER,
CI_USER,
CI_USER_TOKEN,
)
DATASET = "dataset"
hf_api = HfApi(endpoint=CI_HUB_ENDPOINT)
def get_default_config_split() -> tuple[str, str]:
config = "default"
split = "train"
return config, split
def update_repo_settings(
*,
repo_id: str,
private: Optional[bool] = None,
gated: Optional[str] = None,
token: Optional[str] = None,
organization: Optional[str] = None,
repo_type: Optional[str] = None,
name: Optional[str] = None,
) -> Any:
"""Update the settings of a repository.
Args:
repo_id (`str`, *optional*):
A namespace (user or an organization) and a repo name separated
by a `/`.
<Tip>
Version added: 0.5
</Tip>
private (`bool`, *optional*, defaults to `None`):
Whether the repo should be private.
gated (`str`, *optional*, defaults to `None`):
Whether the repo should request user access.
Possible values are 'auto' and 'manual'
token (`str`, *optional*):
An authentication token (See https://huggingface.co/settings/token)
repo_type (`str`, *optional*):
Set to `"dataset"` or `"space"` if uploading to a dataset or
space, `None` or `"model"` if uploading to a model. Default is
`None`.
Returns:
The HTTP response in json.
<Tip>
Raises the following errors:
- [`~huggingface_hub.utils.RepositoryNotFoundError`]
If the repository to download from cannot be found. This may be because it doesn't exist,
or because it is set to `private` and you do not have access.
</Tip>
"""
if repo_type not in REPO_TYPES:
raise ValueError("Invalid repo type")
organization, name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
if organization is None:
namespace = hf_api.whoami(token)["name"]
else:
namespace = organization
path_prefix = f"{hf_api.endpoint}/api/"
if repo_type in REPO_TYPES_URL_PREFIXES:
path_prefix += REPO_TYPES_URL_PREFIXES[repo_type]
path = f"{path_prefix}{namespace}/{name}/settings"
json: dict[str, Union[bool, str]] = {}
if private is not None:
json["private"] = private
if gated is not None:
json["gated"] = gated
r = requests.put(
path,
headers={"authorization": f"Bearer {token}"},
json=json,
)
hf_raise_for_status(r)
return r.json()
def create_empty_hub_dataset_repo(
*,
prefix: str,
file_paths: Optional[list[str]] = None,
private: bool = False,
gated: Optional[str] = None,
) -> str:
dataset_name = f"{prefix}-{int(time.time() * 10e3)}"
repo_id = f"{CI_USER}/{dataset_name}"
hf_api.create_repo(repo_id=repo_id, token=CI_USER_TOKEN, repo_type=DATASET, private=private)
if gated:
update_repo_settings(repo_id=repo_id, token=CI_USER_TOKEN, gated=gated, repo_type=DATASET)
if file_paths is not None:
for file_path in file_paths:
hf_api.upload_file(
token=CI_USER_TOKEN,
path_or_fileobj=file_path,
path_in_repo=Path(file_path).name.replace("{dataset_name}", dataset_name),
repo_id=repo_id,
repo_type=DATASET,
)
return repo_id
def delete_hub_dataset_repo(repo_id: str) -> None:
with suppress(requests.exceptions.HTTPError, ValueError):
hf_api.delete_repo(repo_id=repo_id, token=CI_USER_TOKEN, repo_type=DATASET)
@dataclass
class TemporaryDataset(Resource):
"""A temporary dataset."""
prefix: str
gated: bool = False
private: bool = False
repo_id: str = field(init=False)
def allocate(self) -> None:
self.repo_id = create_empty_hub_dataset_repo(
prefix=self.prefix, gated="auto" if self.gated else None, private=self.private
)
def release(self) -> None:
delete_hub_dataset_repo(repo_id=self.repo_id)
def fetch_bot_discussion(dataset: str) -> Optional[DiscussionWithDetails]:
"""
Fetch the discussion for a dataset and a user.
"""
hf_api = HfApi(endpoint=CI_HUB_ENDPOINT, token=CI_USER_TOKEN)
discussions = hf_api.get_repo_discussions(repo_id=dataset, repo_type=REPO_TYPE_DATASET)
discussion = next(
(discussion for discussion in discussions if discussion.author == CI_PARQUET_CONVERTER_USER), None
)
if not discussion:
return None
return hf_api.get_discussion_details(repo_id=dataset, repo_type=REPO_TYPE_DATASET, discussion_num=discussion.num)
def close_discussion(dataset: str, discussion_num: int) -> None:
"""
Let the dataset owner close a discussion.
"""
hf_api = HfApi(endpoint=CI_HUB_ENDPOINT, token=CI_USER_TOKEN)
hf_api.change_discussion_status(
repo_id=dataset, repo_type=REPO_TYPE_DATASET, discussion_num=discussion_num, new_status="closed"
)
def count_comments(discussion: DiscussionWithDetails) -> int:
return len(list(event for event in discussion.events if isinstance(event, DiscussionComment)))
| datasets-server-main | jobs/cache_maintenance/tests/utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import JobTotalMetricDocument, Queue
from libcommon.utils import Status
from cache_maintenance.queue_metrics import collect_queue_metrics
def test_collect_queue_metrics() -> None:
processing_step_name = "test_type"
processing_graph = ProcessingGraph(
processing_graph_specification={processing_step_name: {"input_type": "dataset", "job_runner_version": 1}}
)
processing_step = processing_graph.get_processing_step(processing_step_name)
queue = Queue()
queue.add_job(
job_type=processing_step.job_type,
dataset="dataset",
revision="revision",
config="config",
split="split",
difficulty=50,
)
assert JobTotalMetricDocument.objects().count() == 1
collect_queue_metrics(processing_graph=processing_graph)
job_metrics = JobTotalMetricDocument.objects().all()
assert job_metrics
assert len(job_metrics) == len(Status) # One by each job state, see libcommon.queue.get_jobs_count_by_status
waiting_job = next((job for job in job_metrics if job.status == "waiting"), None)
assert waiting_job
assert waiting_job.total == 1
remaining_status = [job for job in job_metrics if job.status != "waiting"]
assert remaining_status
assert all(job.total == 0 for job in remaining_status)
| datasets-server-main | jobs/cache_maintenance/tests/test_collect_queue_metrics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import JobTotalMetricDocument, Queue
def collect_queue_metrics(processing_graph: ProcessingGraph) -> None:
logging.info("collecting queue metrics")
queue = Queue()
for processing_step in processing_graph.get_processing_steps():
for status, new_total in queue.get_jobs_count_by_status(job_type=processing_step.job_type).items():
job_type = processing_step.job_type
query_set = JobTotalMetricDocument.objects(job_type=job_type, status=status)
current_metric = query_set.first()
if current_metric is not None:
current_total = current_metric.total
logging.info(
f"{job_type=} {status=} current_total={current_total} new_total="
f"{new_total} difference={int(new_total)-current_total}" # type: ignore
)
query_set.upsert_one(total=new_total)
logging.info("queue metrics have been collected")
| datasets-server-main | jobs/cache_maintenance/src/cache_maintenance/queue_metrics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from typing import Literal, Optional, TypedDict
from urllib import parse
from huggingface_hub import HfApi
from huggingface_hub.constants import REPO_TYPE_DATASET
from libcommon.simple_cache import (
DatasetWithRevision,
get_datasets_with_last_updated_kind,
)
PARQUET_CACHE_KIND = "config-parquet"
DAYS = 1
CLOSED_STATUS = "closed"
class ParquetCounters(TypedDict):
datasets: int
messages: int
dismissed_messages: int
new_discussions: int
errors: int
class Counters(TypedDict):
parquet: ParquetCounters
def post_messages(
hf_endpoint: str, bot_associated_user_name: Optional[str], bot_token: Optional[str], parquet_revision: str
) -> Counters:
"""
Post messages in Hub discussions to notify users.
"""
if not (bot_associated_user_name and bot_token):
raise Exception("No bot token or user name provided, skipping posting messages.")
return Counters(
parquet=post_messages_on_parquet_conversion(
hf_endpoint=hf_endpoint,
bot_associated_user_name=bot_associated_user_name,
bot_token=bot_token,
parquet_revision=parquet_revision,
)
)
def post_messages_on_parquet_conversion(
hf_endpoint: str,
bot_associated_user_name: str,
bot_token: str,
parquet_revision: str,
) -> ParquetCounters:
logging.info("Post messages in Hub discussion to notify about parquet conversion")
datasets_with_revision = limit_to_one_dataset_per_namespace(
get_datasets_with_last_updated_kind(kind=PARQUET_CACHE_KIND, days=DAYS)
)
logging.info(f"Posting messages for {len(datasets_with_revision)} datasets")
log_batch = 100
counters: ParquetCounters = {
"datasets": 0,
"messages": 0,
"dismissed_messages": 0,
"new_discussions": 0,
"errors": 0,
}
def get_log() -> str:
return (
f"{counters['messages'] } messages posted (total:"
f" {len(datasets_with_revision)} datasets): {counters['new_discussions']} discussions have been opened."
f" {counters['dismissed_messages']} messages have been dismissed because the discussion had been closed."
f" {counters['errors']} errors."
)
hf_api = HfApi(endpoint=hf_endpoint, token=bot_token)
for dataset_with_revision in datasets_with_revision:
dataset = dataset_with_revision.dataset
revision = dataset_with_revision.revision
counters["datasets"] += 1
try:
bot_discussions = [
discussion
for discussion in hf_api.get_repo_discussions(
repo_id=dataset, repo_type=REPO_TYPE_DATASET, token=bot_token
)
if discussion.author == bot_associated_user_name
]
if bot_discussions:
if len(bot_discussions) > 1:
logging.warning(
f"Found {len(bot_discussions)} discussions for {dataset} with bot {bot_associated_user_name},"
" only the first one will be used."
)
discussion = bot_discussions[0]
else:
discussion = hf_api.create_discussion(
repo_id=dataset,
repo_type=REPO_TYPE_DATASET,
title="Notifications from Datasets Server",
description=create_discussion_description(),
token=bot_token,
)
counters["new_discussions"] += 1
if discussion.status == CLOSED_STATUS:
counters["dismissed_messages"] += 1
continue
hf_api.comment_discussion(
repo_id=dataset,
repo_type=REPO_TYPE_DATASET,
discussion_num=discussion.num,
comment=create_parquet_comment(
dataset=dataset,
hf_endpoint=hf_endpoint,
parquet_revision=parquet_revision,
dataset_revision=revision,
),
token=bot_token,
)
counters["messages"] += 1
except Exception as e:
logging.warning(f"Failed to post a message for {dataset}: {e}")
counters["errors"] += 1
logging.debug(get_log())
if (counters["datasets"]) % log_batch == 0:
logging.info(get_log())
logging.info(get_log())
logging.info("All the messages about parquet conversion have been posted.")
return counters
def temporary_call_to_action_for_feedback() -> str:
return "Please comment below if you have any questions or feedback about this new notifications channel. "
def create_discussion_description() -> str:
return (
"The Datasets Server bot will post messages here about operations such as conversion to"
" Parquet. There are some advantages associated with having a version of your dataset available in the "
"[Parquet format](https://parquet.apache.org/). You can learn more about these in the"
f""" [documentation](https://huggingface.co/docs/datasets-server/parquet).
_{temporary_call_to_action_for_feedback()}Close the discussion if you want to stop receiving notifications._"""
)
def create_parquet_comment(
dataset: str, hf_endpoint: str, parquet_revision: str, dataset_revision: Optional[str]
) -> str:
link_dataset = (
create_link(
text=dataset_revision[:7],
dataset=dataset,
hf_endpoint=hf_endpoint,
revision_type="commit",
revision=dataset_revision,
)
if dataset_revision
else None
)
link_dataset = f" revision {link_dataset}" if link_dataset else ""
link_parquet = create_link(
text=parquet_revision,
dataset=dataset,
hf_endpoint=hf_endpoint,
revision_type="tree",
revision=parquet_revision,
)
return f"""Datasets Server has converted the dataset{link_dataset} to Parquet.
The Parquet files are published to the Hub in the {link_parquet} branch."""
def create_link(
text: str, dataset: str, hf_endpoint: str, revision_type: Literal["commit", "tree"], revision: str
) -> str:
return f"[`{text}`]({hf_endpoint}/datasets/{dataset}/{revision_type}/{parse.quote(revision, safe='')})"
def limit_to_one_dataset_per_namespace(datasets_with_revision: list[DatasetWithRevision]) -> list[DatasetWithRevision]:
"""
Limit the number of datasets to one per namespace.
For instance, if we have `a/b` and `a/c`, we will only keep one of them.
The choice is arbitrary. The filtered list has no particular order.
Args:
datasets (list[DatasetWithRevision]): The list of datasets (with revision) to filter.
Returns:
list[DatasetWithRevision]: The filtered list of datasets (with revision).
"""
namespaces: set[str] = set()
selected_datasets_with_revision: list[DatasetWithRevision] = []
for dataset_with_revision in datasets_with_revision:
namespace = get_namespace(dataset_with_revision.dataset)
if (namespace is None) or (namespace in namespaces):
continue
namespaces.add(namespace)
selected_datasets_with_revision.append(dataset_with_revision)
return selected_datasets_with_revision
def get_namespace(dataset: str) -> Optional[str]:
splits = dataset.split("/")
return splits[0] if len(splits) == 2 else None
| datasets-server-main | jobs/cache_maintenance/src/cache_maintenance/discussions.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from dataclasses import dataclass, field
from typing import Optional
from environs import Env
from libcommon.config import (
CacheConfig,
CommonConfig,
LogConfig,
ProcessingGraphConfig,
QueueConfig,
)
CACHE_MAINTENANCE_BACKFILL_ERROR_CODES_TO_RETRY = None
@dataclass(frozen=True)
class BackfillConfig:
error_codes_to_retry: Optional[list[str]] = CACHE_MAINTENANCE_BACKFILL_ERROR_CODES_TO_RETRY
@classmethod
def from_env(cls) -> "BackfillConfig":
env = Env(expand_vars=True)
return cls(
error_codes_to_retry=env.list(name="CACHE_MAINTENANCE_BACKFILL_ERROR_CODES_TO_RETRY", default=""),
)
DUCKDB_INDEX_CACHE_DIRECTORY = None
DUCKDB_INDEX_SUBDIRECTORY = "downloads"
DUCKDB_INDEX_EXPIRED_TIME_INTERVAL_SECONDS = 10 * 60 # 10 minutes
DUCKDB_INDEX_FILE_EXTENSION = ".duckdb"
@dataclass(frozen=True)
class DuckDbConfig:
cache_directory: Optional[str] = DUCKDB_INDEX_CACHE_DIRECTORY
subdirectory: str = DUCKDB_INDEX_SUBDIRECTORY
expired_time_interval_seconds: int = DUCKDB_INDEX_EXPIRED_TIME_INTERVAL_SECONDS
file_extension: str = DUCKDB_INDEX_FILE_EXTENSION
@classmethod
def from_env(cls) -> "DuckDbConfig":
env = Env(expand_vars=True)
with env.prefixed("DUCKDB_INDEX_"):
return cls(
cache_directory=env.str(name="CACHE_DIRECTORY", default=DUCKDB_INDEX_CACHE_DIRECTORY),
subdirectory=env.str(name="SUBDIRECTORY", default=DUCKDB_INDEX_SUBDIRECTORY),
expired_time_interval_seconds=env.int(
name="EXPIRED_TIME_INTERVAL_SECONDS", default=DUCKDB_INDEX_EXPIRED_TIME_INTERVAL_SECONDS
),
file_extension=env.str(name="FILE_EXTENSION", default=DUCKDB_INDEX_FILE_EXTENSION),
)
DISCUSSIONS_BOT_ASSOCIATED_USER_NAME = None
DISCUSSIONS_BOT_TOKEN = None
DISCUSSIONS_PARQUET_REVISION = "refs/convert/parquet"
@dataclass(frozen=True)
class DiscussionsConfig:
bot_associated_user_name: Optional[str] = DISCUSSIONS_BOT_ASSOCIATED_USER_NAME
bot_token: Optional[str] = DISCUSSIONS_BOT_TOKEN
parquet_revision: str = DISCUSSIONS_PARQUET_REVISION
@classmethod
def from_env(cls) -> "DiscussionsConfig":
env = Env(expand_vars=True)
with env.prefixed("DISCUSSIONS_"):
return cls(
bot_associated_user_name=env.str(
name="BOT_ASSOCIATED_USER_NAME", default=DISCUSSIONS_BOT_ASSOCIATED_USER_NAME
),
bot_token=env.str(name="BOT_TOKEN", default=DISCUSSIONS_BOT_TOKEN),
parquet_revision=env.str(name="PARQUET_REVISION", default=DISCUSSIONS_PARQUET_REVISION),
)
CACHE_MAINTENANCE_ACTION = None
@dataclass(frozen=True)
class JobConfig:
log: LogConfig = field(default_factory=LogConfig)
cache: CacheConfig = field(default_factory=CacheConfig)
queue: QueueConfig = field(default_factory=QueueConfig)
common: CommonConfig = field(default_factory=CommonConfig)
graph: ProcessingGraphConfig = field(default_factory=ProcessingGraphConfig)
backfill: BackfillConfig = field(default_factory=BackfillConfig)
duckdb: DuckDbConfig = field(default_factory=DuckDbConfig)
discussions: DiscussionsConfig = field(default_factory=DiscussionsConfig)
action: Optional[str] = CACHE_MAINTENANCE_ACTION
@classmethod
def from_env(cls) -> "JobConfig":
env = Env(expand_vars=True)
return cls(
log=LogConfig.from_env(),
cache=CacheConfig.from_env(),
queue=QueueConfig.from_env(),
common=CommonConfig.from_env(),
graph=ProcessingGraphConfig.from_env(),
backfill=BackfillConfig.from_env(),
duckdb=DuckDbConfig.from_env(),
discussions=DiscussionsConfig.from_env(),
action=env.str(name="CACHE_MAINTENANCE_ACTION", default=CACHE_MAINTENANCE_ACTION),
)
| datasets-server-main | jobs/cache_maintenance/src/cache_maintenance/config.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | jobs/cache_maintenance/src/cache_maintenance/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import glob
import logging
import os
from datetime import datetime, timedelta
from libcommon.storage import StrPath
def delete_indexes(
duckdb_index_cache_directory: StrPath, subdirectory: str, file_extension: str, expired_time_interval_seconds: int
) -> None:
"""
Delete temporary DuckDB index files downloaded to handle /search requests
"""
logging.info("delete indexes")
indexes_folder = f"{duckdb_index_cache_directory}/{subdirectory}/**/*{file_extension}"
logging.info(f"looking for all files with pattern {indexes_folder}")
now = datetime.now().replace(tzinfo=None)
for path in glob.glob(indexes_folder, recursive=True):
last_access_time_value = os.path.getatime(path)
last_access_datetime = datetime.fromtimestamp(last_access_time_value).replace(tzinfo=None)
if last_access_datetime + timedelta(seconds=expired_time_interval_seconds) <= now:
logging.info(f"deleting file {path=} {last_access_datetime=}")
os.remove(path)
| datasets-server-main | jobs/cache_maintenance/src/cache_maintenance/delete_indexes.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Optional
from libcommon.dataset import get_supported_dataset_infos
from libcommon.orchestrator import DatasetOrchestrator
from libcommon.processing_graph import ProcessingGraph
from libcommon.utils import Priority
def backfill_cache(
processing_graph: ProcessingGraph,
hf_endpoint: str,
cache_max_days: int,
hf_token: Optional[str] = None,
error_codes_to_retry: Optional[list[str]] = None,
) -> None:
logging.info("backfill supported datasets")
supported_dataset_infos = get_supported_dataset_infos(hf_endpoint=hf_endpoint, hf_token=hf_token)
logging.info(f"analyzing {len(supported_dataset_infos)} supported datasets")
analyzed_datasets = 0
backfilled_datasets = 0
total_created_jobs = 0
log_batch = 100
def get_log() -> str:
return (
f"{analyzed_datasets} analyzed datasets (total: {len(supported_dataset_infos)} datasets):"
f" {backfilled_datasets} backfilled datasets ({100 * backfilled_datasets / analyzed_datasets:.2f}%), with"
f" {total_created_jobs} created jobs."
)
for dataset_info in supported_dataset_infos:
analyzed_datasets += 1
dataset = dataset_info.id
if not dataset:
logging.warning(f"dataset id not found for {dataset_info}")
# should not occur
continue
if dataset_info.sha is None:
logging.warning(f"dataset revision not found for {dataset_info}")
# should not occur
continue
try:
dataset_orchestrator = DatasetOrchestrator(dataset=dataset, processing_graph=processing_graph)
except Exception as e:
logging.warning(f"failed to create DatasetOrchestrator for {dataset_info}: {e}")
continue
try:
created_jobs = dataset_orchestrator.backfill(
revision=str(dataset_info.sha),
priority=Priority.LOW,
error_codes_to_retry=error_codes_to_retry,
cache_max_days=cache_max_days,
)
if created_jobs > 0:
backfilled_datasets += 1
total_created_jobs += created_jobs
except Exception as e:
logging.warning(f"failed to backfill {dataset_info}: {e}")
continue
logging.debug(get_log())
if analyzed_datasets % log_batch == 0:
logging.info(get_log())
logging.info(get_log())
logging.info("backfill completed")
| datasets-server-main | jobs/cache_maintenance/src/cache_maintenance/backfill.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from libcommon.simple_cache import (
CacheTotalMetricDocument,
get_responses_count_by_kind_status_and_error_code,
)
def collect_cache_metrics() -> None:
logging.info("collecting cache metrics")
for metric in get_responses_count_by_kind_status_and_error_code():
kind = metric["kind"]
http_status = metric["http_status"]
error_code = metric["error_code"]
new_total = metric["count"]
query_set = CacheTotalMetricDocument.objects(kind=kind, http_status=http_status, error_code=error_code)
current_metric = query_set.first()
if current_metric is not None:
current_total = current_metric.total
logging.info(
f"{kind=} {http_status=} {error_code=} current_total={current_total} new_total="
f"{new_total} difference={new_total-current_total}"
)
query_set.upsert_one(total=metric["count"])
logging.info("cache metrics have been collected")
| datasets-server-main | jobs/cache_maintenance/src/cache_maintenance/cache_metrics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
import sys
from datetime import datetime
from libcommon.log import init_logging
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.storage import init_duckdb_index_cache_dir
from cache_maintenance.backfill import backfill_cache
from cache_maintenance.cache_metrics import collect_cache_metrics
from cache_maintenance.config import JobConfig
from cache_maintenance.delete_indexes import delete_indexes
from cache_maintenance.discussions import post_messages
from cache_maintenance.queue_metrics import collect_queue_metrics
def run_job() -> None:
job_config = JobConfig.from_env()
action = job_config.action
supported_actions = ["backfill", "collect-cache-metrics", "collect-queue-metrics", "delete-indexes", "skip"]
# In the future we will support other kind of actions
if not action:
logging.warning("No action mode was selected, skipping tasks.")
return
if action not in supported_actions:
logging.warning(f"Wrong action mode selected, supported actions are {supported_actions}.")
return
init_logging(level=job_config.log.level)
with (
CacheMongoResource(
database=job_config.cache.mongo_database, host=job_config.cache.mongo_url
) as cache_resource,
QueueMongoResource(
database=job_config.queue.mongo_database, host=job_config.queue.mongo_url
) as queue_resource,
):
if not cache_resource.is_available():
logging.warning("The connection to the cache database could not be established. The action is skipped.")
return
if not queue_resource.is_available():
logging.warning("The connection to the queue database could not be established. The action is skipped.")
return
processing_graph = ProcessingGraph(job_config.graph.specification)
start_time = datetime.now()
if action == "backfill":
backfill_cache(
processing_graph=processing_graph,
hf_endpoint=job_config.common.hf_endpoint,
hf_token=job_config.common.hf_token,
error_codes_to_retry=job_config.backfill.error_codes_to_retry,
cache_max_days=job_config.cache.max_days,
)
elif action == "collect-queue-metrics":
collect_queue_metrics(processing_graph=processing_graph)
elif action == "collect-cache-metrics":
collect_cache_metrics()
elif action == "delete-indexes":
duckdb_index_cache_directory = init_duckdb_index_cache_dir(directory=job_config.duckdb.cache_directory)
delete_indexes(
duckdb_index_cache_directory=duckdb_index_cache_directory,
subdirectory=job_config.duckdb.subdirectory,
expired_time_interval_seconds=job_config.duckdb.expired_time_interval_seconds,
file_extension=job_config.duckdb.file_extension,
)
elif action == "post-messages":
post_messages(
hf_endpoint=job_config.common.hf_endpoint,
bot_associated_user_name=job_config.discussions.bot_associated_user_name,
bot_token=job_config.discussions.bot_token,
parquet_revision=job_config.discussions.parquet_revision,
)
end_time = datetime.now()
logging.info(f"Duration: {end_time - start_time}")
if __name__ == "__main__":
try:
run_job()
sys.exit(0)
except Exception as e:
logging.exception(e)
sys.exit(1)
| datasets-server-main | jobs/cache_maintenance/src/cache_maintenance/main.py |
Subsets and Splits