python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import pytest
from .utils import get, get_openapi_body_example, poll, poll_splits, post_refresh
@pytest.mark.parametrize(
"status,name,dataset,config,error_code",
[
# (200, "all splits in a dataset", "duorc", None, None),
# (200, "splits for a single config", "emotion", "unsplit", None)
(
401,
"inexistent dataset, and not authenticated",
"severo/inexistent-dataset",
None,
"ExternalUnauthenticatedError",
),
# (
# 401,
# "gated-dataset",
# "severo/dummy_gated", None,
# "ExternalUnauthenticatedError",
# ),
# (
# 401,
# "private-dataset",
# "severo/dummy_private", None,
# "ExternalUnauthenticatedError",
# ),
(422, "missing dataset parameter", "", None, "MissingRequiredParameter"),
(422, "empty dataset parameter", None, None, "MissingRequiredParameter"),
# (500, "SplitsNotFoundError", "natural_questions", None, "SplitsNamesError"),
# (500, "FileNotFoundError", "akhaliq/test", None, "SplitsNamesError"),
# (500, "not-ready", "severo/fix-401", None, "SplitsResponseNotReady"),
# not tested: 'internal_error'
],
)
def test_splits_using_openapi(status: int, name: str, dataset: str, config: str, error_code: str) -> None:
body = get_openapi_body_example("/splits", status, name)
config_query = f"&config={config}" if config else ""
if name == "empty dataset parameter":
r_splits = poll("/splits?dataset=", error_field="error")
elif name == "missing dataset parameter":
r_splits = poll("/splits", error_field="error")
else:
post_refresh(dataset)
# poll the endpoint before the worker had the chance to process it
r_splits = (
get(f"/splits?dataset={dataset}{config_query}") if name == "not-ready" else poll_splits(dataset, config)
)
assert r_splits.status_code == status, f"{r_splits.status_code} - {r_splits.text}"
assert r_splits.json() == body, r_splits.text
if error_code is not None:
assert r_splits.headers["X-Error-Code"] == error_code, r_splits.headers["X-Error-Code"]
else:
assert "X-Error-Code" not in r_splits.headers, r_splits.headers["X-Error-Code"]
@pytest.mark.parametrize(
"status,dataset,config,error_code",
[
# (200, "duorc", "SelfRC", None),
(401, "missing-parameter", None, "ExternalUnauthenticatedError")
# missing config will result in asking dataset but it does not exist
],
)
def test_splits_with_config_using_openapi(status: int, dataset: str, config: str, error_code: str) -> None:
r_splits = (
poll(f"/splits?dataset={dataset}&config=", error_field="error")
if error_code
else poll(f"/splits?dataset={dataset}&config={config}")
)
assert r_splits.status_code == status, f"{r_splits.status_code} - {r_splits.text}"
if error_code is None:
assert all(split["config"] == config for split in r_splits.json()["splits"])
# all splits must belong to the provided config
assert "X-Error-Code" not in r_splits.headers, r_splits.headers["X-Error-Code"]
else:
assert r_splits.headers["X-Error-Code"] == error_code, r_splits.headers["X-Error-Code"]
| datasets-server-main | e2e/tests/test_12_splits.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import pytest
from .utils import poll
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub"]
@pytest.fixture(autouse=True, scope="session")
def ensure_services_are_up() -> None:
assert poll("/", expected_code=404).status_code == 404
| datasets-server-main | e2e/tests/conftest.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from .utils import ADMIN_URL, poll
def test_healthcheck() -> None:
# this tests ensures the /healthcheck and the /metrics endpoints are hidden
response = poll("/healthcheck", expected_code=200, url=ADMIN_URL)
assert response.status_code == 200, f"{response.status_code} - {response.text}"
assert "ok" in response.text, response.text
| datasets-server-main | e2e/tests/test_30_admin_healthcheck.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import os
import re
from collections.abc import Mapping
from .utils import SEARCH_URL, get
def has_metric(name: str, labels: Mapping[str, str], metric_names: set[str]) -> bool:
label_str = ",".join([f'{k}="{v}"' for k, v in labels.items()])
s = name + "{" + label_str + "}"
return any(re.match(s, metric_name) is not None for metric_name in metric_names)
def test_metrics() -> None:
assert "PROMETHEUS_MULTIPROC_DIR" in os.environ
response = get("/metrics", url=SEARCH_URL)
assert response.status_code == 200, f"{response.status_code} - {response.text}"
content = response.text
lines = content.split("\n")
# examples:
# starlette_requests_total{method="GET",path_template="/metrics"} 1.0
# method_steps_processing_time_seconds_sum{method="healthcheck_endpoint",step="all"} 1.6772013623267412e-05
metrics = {
parts[0]: float(parts[1]) for line in lines if line and line[0] != "#" and (parts := line.rsplit(" ", 1))
}
# see https://github.com/prometheus/client_python#multiprocess-mode-eg-gunicorn
assert "process_start_time_seconds" not in metrics
# the middleware should have recorded the request
name = 'starlette_requests_total{method="GET",path_template="/metrics"}'
assert name in metrics, metrics
assert metrics[name] > 0, metrics
metric_names = set(metrics.keys())
for endpoint in ["/search"]:
# these metrics are only available in the admin API
assert not has_metric(
name="queue_jobs_total",
labels={"pid": "[0-9]*", "queue": endpoint, "status": "started"},
metric_names=metric_names,
), f"queue_jobs_total - endpoint={endpoint} found in {metrics}"
assert not has_metric(
name="responses_in_cache_total",
labels={"error_code": "None", "http_status": "200", "path": endpoint, "pid": "[0-9]*"},
metric_names=metric_names,
), f"responses_in_cache_total - endpoint {endpoint} found in {metrics}"
| datasets-server-main | e2e/tests/test_51_search_metrics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | e2e/tests/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from .fixtures.hub import AuthHeaders, AuthType
from .utils import get_default_config_split, poll_until_ready_and_assert
def test_statistics_endpoint(
auth_headers: AuthHeaders,
hf_public_dataset_repo_csv_data: str,
) -> None:
auth: AuthType = "none"
expected_status_code: int = 200
expected_error_code = None
# TODO: add dataset with various splits, or various configs
dataset = hf_public_dataset_repo_csv_data
config, split = get_default_config_split()
headers = auth_headers[auth]
statistics_response = poll_until_ready_and_assert(
relative_url=f"/statistics?dataset={dataset}&config={config}&split={split}",
expected_status_code=expected_status_code,
expected_error_code=expected_error_code,
headers=headers,
check_x_revision=True,
)
content = statistics_response.json()
assert "num_examples" in content, statistics_response
assert "statistics" in content, statistics_response
statistics = content["statistics"]
num_examples = content["num_examples"]
assert isinstance(statistics, list), statistics
assert len(statistics) == 2
assert num_examples == 4
first_column = statistics[0]
assert "column_name" in first_column
assert "column_statistics" in first_column
assert "column_type" in first_column
assert first_column["column_name"] == "col_2"
assert first_column["column_type"] == "int"
assert isinstance(first_column["column_statistics"], dict)
assert first_column["column_statistics"] == {
"histogram": {"bin_edges": [0, 1, 2, 3, 3], "hist": [1, 1, 1, 1]},
"max": 3,
"mean": 1.5,
"median": 1.5,
"min": 0,
"nan_count": 0,
"nan_proportion": 0.0,
"std": 1.29099,
}
second_column = statistics[1]
assert "column_name" in second_column
assert "column_statistics" in second_column
assert "column_type" in second_column
assert second_column["column_name"] == "col_3"
assert second_column["column_type"] == "float"
assert isinstance(second_column["column_statistics"], dict)
assert second_column["column_statistics"] == {
"nan_count": 0,
"nan_proportion": 0.0,
"min": 0.0,
"max": 3.0,
"mean": 1.5,
"median": 1.5,
"std": 1.29099,
"histogram": {
"hist": [1, 0, 0, 1, 0, 0, 1, 0, 0, 1],
"bin_edges": [0.0, 0.3, 0.6, 0.9, 1.2, 1.5, 1.8, 2.1, 2.4, 2.7, 3.0],
},
}
| datasets-server-main | e2e/tests/test_14_statistics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from .utils import SEARCH_URL, poll
def test_healthcheck() -> None:
# this tests ensures the /healthcheck and the /metrics endpoints are hidden
response = poll("/healthcheck", expected_code=200, url=SEARCH_URL)
assert response.status_code == 200, f"{response.status_code} - {response.text}"
assert "ok" in response.text, response.text
| datasets-server-main | e2e/tests/test_50_search_healthcheck.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import os
import re
from collections.abc import Mapping
from .utils import API_URL, get
def has_metric(name: str, labels: Mapping[str, str], metric_names: set[str]) -> bool:
label_str = ",".join([f'{k}="{v}"' for k, v in labels.items()])
s = name + "{" + label_str + "}"
return any(re.match(s, metric_name) is not None for metric_name in metric_names)
def test_metrics() -> None:
assert "PROMETHEUS_MULTIPROC_DIR" in os.environ
response = get("/metrics", url=API_URL)
assert response.status_code == 200, f"{response.status_code} - {response.text}"
content = response.text
lines = content.split("\n")
# examples:
# starlette_requests_total{method="GET",path_template="/metrics"} 1.0
# method_steps_processing_time_seconds_sum{method="healthcheck_endpoint",step="all"} 1.6772013623267412e-05
metrics = {
parts[0]: float(parts[1]) for line in lines if line and line[0] != "#" and (parts := line.rsplit(" ", 1))
}
# see https://github.com/prometheus/client_python#multiprocess-mode-eg-gunicorn
assert "process_start_time_seconds" not in metrics
# the middleware should have recorded the request
name = 'starlette_requests_total{method="GET",path_template="/metrics"}'
assert name in metrics, metrics
assert metrics[name] > 0, metrics
metric_names = set(metrics.keys())
for endpoint in ["/splits", "/first-rows", "/parquet"]:
# these metrics are only available in the admin API
assert not has_metric(
name="queue_jobs_total",
labels={"pid": "[0-9]*", "queue": endpoint, "status": "started"},
metric_names=metric_names,
), f"queue_jobs_total - endpoint={endpoint} found in {metrics}"
assert not has_metric(
name="responses_in_cache_total",
labels={"error_code": "None", "http_status": "200", "path": endpoint, "pid": "[0-9]*"},
metric_names=metric_names,
), f"responses_in_cache_total - endpoint {endpoint} found in {metrics}"
| datasets-server-main | e2e/tests/test_21_api_metrics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from .utils import ROWS_URL, poll
def test_healthcheck() -> None:
# this tests ensures the /healthcheck and the /metrics endpoints are hidden
response = poll("/healthcheck", expected_code=200, url=ROWS_URL)
assert response.status_code == 200, f"{response.status_code} - {response.text}"
assert "ok" in response.text, response.text
| datasets-server-main | e2e/tests/test_40_rows_healthcheck.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import pytest
from .utils import poll
@pytest.mark.parametrize("endpoint", ["/healthcheck", "/admin/healthcheck"])
def test_healthcheck(endpoint: str) -> None:
# this tests ensures the /healthcheck are accessible
response = poll(endpoint, expected_code=200)
assert response.status_code == 200, f"{response.status_code} - {response.text}"
assert "ok" in response.text, response.text
@pytest.mark.parametrize("endpoint", ["/", "/metrics", "/admin/metrics"])
def test_hidden(endpoint: str) -> None:
# this tests ensures the root / and the /metrics endpoints are hidden
response = poll(endpoint, expected_code=404)
assert response.status_code == 404, f"{response.status_code} - {response.text}"
assert "Not Found" in response.text, response.text
| datasets-server-main | e2e/tests/test_10_healthcheck.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from .fixtures.hub import AuthHeaders, AuthType
from .utils import get_default_config_split, poll_until_ready_and_assert
def test_search_endpoint(
auth_headers: AuthHeaders,
hf_public_dataset_repo_csv_data: str,
) -> None:
auth: AuthType = "none"
expected_status_code: int = 200
expected_error_code = None
# TODO: add dataset with various splits, or various configs
dataset = hf_public_dataset_repo_csv_data
config, split = get_default_config_split()
headers = auth_headers[auth]
# ensure the /search endpoint works as well
offset = 1
length = 2
query = "Lord Vader"
search_response = poll_until_ready_and_assert(
relative_url=(
f"/search?dataset={dataset}&config={config}&split={split}&offset={offset}&length={length}&query={query}"
),
expected_status_code=expected_status_code,
expected_error_code=expected_error_code,
headers=headers,
check_x_revision=True,
)
if not expected_error_code:
content = search_response.json()
assert "rows" in content, search_response
assert "features" in content, search_response
assert "num_rows_total" in content, search_response
assert "num_rows_per_page" in content, search_response
rows = content["rows"]
features = content["features"]
num_rows_total = content["num_rows_total"]
num_rows_per_page = content["num_rows_per_page"]
assert isinstance(rows, list), rows
assert isinstance(features, list), features
assert num_rows_total == 3
assert num_rows_per_page == 100
assert rows[0] == {
"row_idx": 2,
"row": {"col_1": "We count thirty Rebel ships, Lord Vader.", "col_2": 2, "col_3": 2.0},
"truncated_cells": [],
}, rows[0]
assert rows[1] == {
"row_idx": 3,
"row": {
"col_1": "The wingman spots the pirateship coming at him and warns the Dark Lord",
"col_2": 3,
"col_3": 3.0,
},
"truncated_cells": [],
}, rows[1]
assert features == [
{"feature_idx": 0, "name": "col_1", "type": {"dtype": "string", "_type": "Value"}},
{"feature_idx": 1, "name": "col_2", "type": {"dtype": "int64", "_type": "Value"}},
{"feature_idx": 2, "name": "col_3", "type": {"dtype": "float64", "_type": "Value"}},
], features
| datasets-server-main | e2e/tests/test_52_search.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import json
import os
import time
from collections.abc import Mapping
from pathlib import Path
from typing import Any, Optional
import requests
from requests import Response
PORT_REVERSE_PROXY = os.environ.get("PORT_REVERSE_PROXY", "8000")
API_UVICORN_PORT = os.environ.get("API_UVICORN_PORT", "8080")
ADMIN_UVICORN_PORT = os.environ.get("ADMIN_UVICORN_PORT", "8081")
ROWS_UVICORN_PORT = os.environ.get("ROWS_UVICORN_PORT", "8082")
SEARCH_UVICORN_PORT = os.environ.get("SEARCH_UVICORN_PORT", "8083")
ADMIN_TOKEN = os.environ.get("PARQUET_AND_INFO_COMMITTER_HF_TOKEN", "")
INTERVAL = 1
MAX_DURATION = 10 * 60
URL = f"http://localhost:{PORT_REVERSE_PROXY}"
ADMIN_URL = f"http://localhost:{ADMIN_UVICORN_PORT}"
API_URL = f"http://localhost:{API_UVICORN_PORT}"
ROWS_URL = f"http://localhost:{ROWS_UVICORN_PORT}"
SEARCH_URL = f"http://localhost:{SEARCH_UVICORN_PORT}"
Headers = Mapping[str, str]
def get(relative_url: str, headers: Optional[Headers] = None, url: str = URL) -> Response:
if headers is None:
headers = {}
return requests.get(f"{url}{relative_url}", headers=headers)
def post(relative_url: str, json: Optional[Any] = None, headers: Optional[Headers] = None, url: str = URL) -> Response:
if headers is None:
headers = {}
return requests.post(f"{url}{relative_url}", json=json, headers=headers)
def poll(
relative_url: str,
error_field: Optional[str] = None,
expected_code: Optional[int] = 200,
headers: Optional[Headers] = None,
url: str = URL,
) -> Response:
if headers is None:
headers = {}
interval = INTERVAL
timeout = MAX_DURATION
retries = timeout // interval
should_retry = True
response = None
while retries > 0 and should_retry:
retries -= 1
time.sleep(interval)
response = get(relative_url=relative_url, headers=headers, url=url)
if error_field is not None:
# currently, when the dataset is being processed, the error message contains "Retry later"
try:
should_retry = "retry later" in response.json()[error_field].lower()
except Exception:
should_retry = False
else:
# just retry if the response is not the expected code
should_retry = response.status_code != expected_code
if response is None:
raise RuntimeError("no request has been done")
return response
def post_refresh(dataset: str) -> Response:
return post("/webhook", json={"event": "update", "repo": {"type": "dataset", "name": dataset}})
def poll_parquet(dataset: str, headers: Optional[Headers] = None) -> Response:
return poll(f"/parquet?dataset={dataset}", error_field="error", headers=headers)
def poll_splits(dataset: str, config: Optional[str], headers: Optional[Headers] = None) -> Response:
config_query = f"&config={config}" if config else ""
return poll(f"/splits?dataset={dataset}{config_query}", error_field="error", headers=headers)
def poll_first_rows(dataset: str, config: str, split: str, headers: Optional[Headers] = None) -> Response:
return poll(f"/first-rows?dataset={dataset}&config={config}&split={split}", error_field="error", headers=headers)
def get_openapi_body_example(path: str, status: int, example_name: str) -> Any:
root = Path(__file__).resolve().parent.parent.parent
openapi_filename = root / "docs" / "source" / "openapi.json"
with open(openapi_filename) as json_file:
openapi = json.load(json_file)
steps = [
"paths",
path,
"get",
"responses",
str(status),
"content",
"application/json",
"examples",
example_name,
"value",
]
result = openapi
for step in steps:
if "$ref" in result:
new_steps = result["$ref"].split("/")[1:]
result = openapi
for new_step in new_steps:
result = result[new_step]
result = result[step]
return result
def get_default_config_split() -> tuple[str, str]:
config = "default"
split = "train"
return config, split
def log(response: Response, url: str = URL, relative_url: Optional[str] = None, dataset: Optional[str] = None) -> str:
if relative_url is not None:
try:
extra_response = get(
f"/admin/cache-reports{relative_url}", headers={"Authorization": f"Bearer {ADMIN_TOKEN}"}, url=url
)
if extra_response.status_code == 200:
extra = f"content of cache_reports: {extra_response.text}"
else:
extra = f"cannot get content of cache_reports: {extra_response.status_code} - {extra_response.text}"
except Exception as e:
extra = f"cannot get content of cache_reports - {e}"
extra = f"\n{extra}"
elif dataset is not None:
try:
extra_response = get(
f"/admin/dataset-state?dataset={dataset}", headers={"Authorization": f"Bearer {ADMIN_TOKEN}"}, url=url
)
if extra_response.status_code == 200:
extra = f"content of dataset-state: {extra_response.text}"
else:
extra = f"cannot get content of dataset-state: {extra_response.status_code} - {extra_response.text}"
except Exception as e:
extra = f"cannot get content of dataset-state - {e}"
extra = f"\n{extra}"
return (
f"{dataset=} - {relative_url=} - {response.status_code} - {response.headers} - {response.text} - {url}{extra}"
)
def poll_until_ready_and_assert(
relative_url: str,
expected_status_code: int,
expected_error_code: Optional[str],
headers: Optional[Headers] = None,
url: str = URL,
check_x_revision: bool = False,
) -> Any:
if headers is None:
headers = {}
interval = INTERVAL
timeout = MAX_DURATION
retries = timeout // interval
should_retry = True
response = None
while retries > 0 and should_retry:
retries -= 1
time.sleep(interval)
response = get(relative_url=relative_url, headers=headers, url=url)
print(response.headers.get("X-Error-Code"))
should_retry = response.headers.get("X-Error-Code") in ["ResponseNotReady", "ResponseAlreadyComputedError"]
if retries == 0 or response is None:
raise RuntimeError("Poll timeout")
assert response.status_code == expected_status_code, log(response, url, relative_url)
assert response.headers.get("X-Error-Code") == expected_error_code, log(response, url, relative_url)
if check_x_revision:
assert response.headers.get("X-Revision") is not None, log(response, url, relative_url)
assert len(str(response.headers.get("X-Revision"))) == 40, log(response, url, relative_url)
return response
# explicit re-export
__all__ = ["Response"]
| datasets-server-main | e2e/tests/utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import json
from typing import Any
import pytest
import requests
from .utils import (
URL,
get_openapi_body_example,
poll,
poll_first_rows,
poll_splits,
post_refresh,
)
def prepare_json(response: requests.Response) -> Any:
return json.loads(response.text.replace(URL, "https://datasets-server.huggingface.co"))
@pytest.mark.parametrize(
"status,name,dataset,config,split,error_code",
[
(
401,
"inexistent dataset, and not authenticated",
"severo/inexistent-dataset",
"plain_text",
"train",
"ExternalUnauthenticatedError",
),
(422, "missing required parameter", None, "plain_text", "train", "MissingRequiredParameter"),
(422, "missing required parameter", "imdb", None, "train", "MissingRequiredParameter"),
(422, "missing required parameter", "imdb", "plain_text", None, "MissingRequiredParameter"),
(422, "empty required parameter", "", "plain_text", "train", "MissingRequiredParameter"),
(422, "empty required parameter", "imdb", "", "train", "MissingRequiredParameter"),
(422, "empty required parameter", "imdb", "plain_text", "", "MissingRequiredParameter"),
],
)
def test_first_rows(status: int, name: str, dataset: str, config: str, split: str, error_code: str) -> None:
body = get_openapi_body_example("/first-rows", status, name)
# the logic here is a bit convoluted, because we have no way to refresh a split, we have to refresh the whole
# dataset and depend on the result of /splits
if name == "empty required parameter":
r_rows = poll(f"/first-rows?dataset={dataset}&config={config}&split={split}", error_field="error")
elif name == "missing required parameter":
d = f"dataset={dataset}" if dataset is not None else ""
c = f"config={config}" if config is not None else ""
s = f"split={split}" if split is not None else ""
params = "&".join([d, c, s])
r_rows = poll(f"/first-rows?{params}", error_field="error")
else:
post_refresh(dataset)
poll_splits(dataset, config)
r_rows = poll_first_rows(dataset, config, split)
assert r_rows.status_code == status, f"{r_rows.status_code} - {r_rows.text}"
assert prepare_json(r_rows) == body, r_rows.text
if error_code is not None:
assert r_rows.headers["X-Error-Code"] == error_code, r_rows.headers["X-Error-Code"]
else:
assert "X-Error-Code" not in r_rows.headers, r_rows.headers["X-Error-Code"]
| datasets-server-main | e2e/tests/test_13_first_rows.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import os
import re
from collections.abc import Mapping
from .utils import ROWS_URL, get
def has_metric(name: str, labels: Mapping[str, str], metric_names: set[str]) -> bool:
label_str = ",".join([f'{k}="{v}"' for k, v in labels.items()])
s = name + "{" + label_str + "}"
return any(re.match(s, metric_name) is not None for metric_name in metric_names)
def test_metrics() -> None:
assert "PROMETHEUS_MULTIPROC_DIR" in os.environ
response = get("/metrics", url=ROWS_URL)
assert response.status_code == 200, f"{response.status_code} - {response.text}"
content = response.text
lines = content.split("\n")
# examples:
# starlette_requests_total{method="GET",path_template="/metrics"} 1.0
# method_steps_processing_time_seconds_sum{method="healthcheck_endpoint",step="all"} 1.6772013623267412e-05
metrics = {
parts[0]: float(parts[1]) for line in lines if line and line[0] != "#" and (parts := line.rsplit(" ", 1))
}
# see https://github.com/prometheus/client_python#multiprocess-mode-eg-gunicorn
assert "process_start_time_seconds" not in metrics
# the middleware should have recorded the request
name = 'starlette_requests_total{method="GET",path_template="/metrics"}'
assert name in metrics, metrics
assert metrics[name] > 0, metrics
metric_names = set(metrics.keys())
for endpoint in ["/rows"]:
# these metrics are only available in the admin API
assert not has_metric(
name="queue_jobs_total",
labels={"pid": "[0-9]*", "queue": endpoint, "status": "started"},
metric_names=metric_names,
), f"queue_jobs_total - endpoint={endpoint} found in {metrics}"
assert not has_metric(
name="responses_in_cache_total",
labels={"error_code": "None", "http_status": "200", "path": endpoint, "pid": "[0-9]*"},
metric_names=metric_names,
), f"responses_in_cache_total - endpoint {endpoint} found in {metrics}"
| datasets-server-main | e2e/tests/test_41_rows_metrics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Literal
import pytest
from .fixtures.hub import AuthHeaders, AuthType, DatasetRepos, DatasetReposType
from .utils import get_default_config_split, poll_until_ready_and_assert
@pytest.mark.parametrize(
"type,auth,expected_status_code,expected_error_code",
[
("public", "none", 200, None),
("public", "token", 200, None),
("public", "cookie", 200, None),
("gated", "none", 401, "ExternalUnauthenticatedError"),
("gated", "token", 200, None),
("gated", "cookie", 200, None),
("private", "none", 401, "ExternalUnauthenticatedError"),
("private", "token", 404, "ResponseNotFound"),
("private", "cookie", 404, "ResponseNotFound"),
],
)
def test_auth_e2e(
auth_headers: AuthHeaders,
hf_dataset_repos_csv_data: DatasetRepos,
type: DatasetReposType,
auth: AuthType,
expected_status_code: int,
expected_error_code: str,
) -> None:
# TODO: add dataset with various splits, or various configs
dataset = hf_dataset_repos_csv_data[type]
headers = auth_headers[auth]
# asking for the dataset will launch the jobs, without the need of a webhook
poll_until_ready_and_assert(
relative_url=f"/splits?dataset={dataset}",
expected_status_code=expected_status_code,
expected_error_code=expected_error_code,
headers=headers,
check_x_revision=False,
)
@pytest.mark.parametrize(
"endpoint,input_type",
[
("/splits", "dataset"),
("/splits", "config"),
("/first-rows", "split"),
("/parquet", "dataset"),
("/parquet", "config"),
("/info", "dataset"),
("/info", "config"),
("/size", "dataset"),
("/size", "config"),
("/is-valid", "dataset"),
("/statistics", "split"),
],
)
def test_endpoint(
auth_headers: AuthHeaders,
hf_public_dataset_repo_csv_data: str,
endpoint: str,
input_type: Literal["all", "dataset", "config", "split"],
) -> None:
auth: AuthType = "none"
expected_status_code: int = 200
expected_error_code = None
# TODO: add dataset with various splits, or various configs
dataset = hf_public_dataset_repo_csv_data
config, split = get_default_config_split()
headers = auth_headers[auth]
# asking for the dataset will launch the jobs, without the need of a webhook
relative_url = endpoint
if input_type != "all":
relative_url += f"?dataset={dataset}"
if input_type != "dataset":
relative_url += f"&config={config}"
if input_type != "config":
relative_url += f"&split={split}"
poll_until_ready_and_assert(
relative_url=relative_url,
expected_status_code=expected_status_code,
expected_error_code=expected_error_code,
headers=headers,
check_x_revision=input_type != "all",
)
def test_rows_endpoint(
auth_headers: AuthHeaders,
hf_public_dataset_repo_csv_data: str,
) -> None:
auth: AuthType = "none"
expected_status_code: int = 200
expected_error_code = None
# TODO: add dataset with various splits, or various configs
dataset = hf_public_dataset_repo_csv_data
config, split = get_default_config_split()
headers = auth_headers[auth]
# ensure the /rows endpoint works as well
offset = 1
length = 10
rows_response = poll_until_ready_and_assert(
relative_url=f"/rows?dataset={dataset}&config={config}&split={split}&offset={offset}&length={length}",
expected_status_code=expected_status_code,
expected_error_code=expected_error_code,
headers=headers,
check_x_revision=True,
)
if not expected_error_code:
content = rows_response.json()
assert "rows" in content, rows_response
assert "features" in content, rows_response
rows = content["rows"]
features = content["features"]
assert isinstance(rows, list), rows
assert isinstance(features, list), features
assert len(rows) == 3, rows
assert rows[0] == {
"row_idx": 1,
"row": {
"col_1": "Vader turns round and round in circles as his ship spins into space.",
"col_2": 1,
"col_3": 1.0,
},
"truncated_cells": [],
}, rows[0]
assert features == [
{"feature_idx": 0, "name": "col_1", "type": {"dtype": "string", "_type": "Value"}},
{"feature_idx": 1, "name": "col_2", "type": {"dtype": "int64", "_type": "Value"}},
{"feature_idx": 2, "name": "col_3", "type": {"dtype": "float64", "_type": "Value"}},
], features
| datasets-server-main | e2e/tests/test_11_api.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from .utils import API_URL, poll
def test_healthcheck() -> None:
# this tests ensures the /healthcheck and the /metrics endpoints are hidden
response = poll("/healthcheck", expected_code=200, url=API_URL)
assert response.status_code == 200, f"{response.status_code} - {response.text}"
assert "ok" in response.text, response.text
| datasets-server-main | e2e/tests/test_20_api_healthcheck.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import os
import re
from collections.abc import Mapping
from .utils import ADMIN_URL, get
def has_metric(name: str, labels: Mapping[str, str], metric_names: set[str]) -> bool:
label_str = ",".join([f'{k}="{v}"' for k, v in sorted(labels.items())])
s = name + "{" + label_str + "}"
return any(re.match(s, metric_name) is not None for metric_name in metric_names)
def test_metrics() -> None:
assert "PROMETHEUS_MULTIPROC_DIR" in os.environ
response = get("/metrics", url=ADMIN_URL)
assert response.status_code == 200, f"{response.status_code} - {response.text}"
content = response.text
lines = content.split("\n")
metrics = {line.split(" ")[0]: float(line.split(" ")[1]) for line in lines if line and line[0] != "#"}
# see https://github.com/prometheus/client_python#multiprocess-mode-eg-gunicorn
assert "process_start_time_seconds" not in metrics
# the middleware should have recorded the request
name = 'starlette_requests_total{method="GET",path_template="/metrics"}'
assert name in metrics, metrics
assert metrics[name] > 0, metrics
metric_names = set(metrics.keys())
# the queue metrics are computed each time a job is created and processed
# they should exists at least for some of jobs types
for queue in ["dataset-config-names", "split-first-rows-from-streaming", "dataset-parquet"]:
# eg. 'queue_jobs_total{pid="10",queue="split-first-rows-from-streaming",status="started"}'
assert has_metric(
name="queue_jobs_total",
labels={"pid": "[0-9]*", "queue": queue, "status": "started"},
metric_names=metric_names,
), f"queue_jobs_total - queue={queue} found in {metrics}"
# the cache metrics are computed each time a job is processed
# they should exists at least for some of cache kinds
for cache_kind in ["dataset-config-names", "split-first-rows-from-streaming", "dataset-parquet"]:
# cache should have been filled by the previous tests
# eg. 'responses_in_cache_total{error_code="None",http_status="200",path="dataset-config-names",pid="10"}'
assert has_metric(
name="responses_in_cache_total",
labels={"error_code": "None", "http_status": "200", "kind": cache_kind, "pid": "[0-9]*"},
metric_names=metric_names,
), f"responses_in_cache_total - cache kind {cache_kind} found in {metrics}"
# the disk usage metrics, on the other end, are computed at runtime, so we should see them
assert has_metric(
name="assets_disk_usage",
labels={"type": "total", "pid": "[0-9]*"},
metric_names=metric_names,
), "assets_disk_usage"
assert has_metric(
name="descriptive_statistics_disk_usage",
labels={"type": "total", "pid": "[0-9]*"},
metric_names=metric_names,
), "descriptive_statistics_disk_usage"
assert has_metric(
name="duckdb_disk_usage",
labels={"type": "total", "pid": "[0-9]*"},
metric_names=metric_names,
), "duckdb_disk_usage"
assert has_metric(
name="hf_datasets_disk_usage",
labels={"type": "total", "pid": "[0-9]*"},
metric_names=metric_names,
), "hf_datasets_disk_usage"
assert has_metric(
name="parquet_metadata_disk_usage",
labels={"type": "total", "pid": "[0-9]*"},
metric_names=metric_names,
), "parquet_metadata_disk_usage"
| datasets-server-main | e2e/tests/test_31_admin_metrics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import csv
import pytest
from pytest import TempPathFactory
DATA = [
{"col_1": "There goes another one.", "col_2": 0, "col_3": 0.0},
{"col_1": "Vader turns round and round in circles as his ship spins into space.", "col_2": 1, "col_3": 1.0},
{"col_1": "We count thirty Rebel ships, Lord Vader.", "col_2": 2, "col_3": 2.0},
{"col_1": "The wingman spots the pirateship coming at him and warns the Dark Lord", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="session")
def csv_path(tmp_path_factory: TempPathFactory) -> str:
path = str(tmp_path_factory.mktemp("data") / "dataset.csv")
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(item)
return path
| datasets-server-main | e2e/tests/fixtures/files.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | e2e/tests/fixtures/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
# Adapted from https://github.com/huggingface/datasets/blob/main/tests/fixtures/hub.py
import time
from collections.abc import Callable, Iterator, Mapping
from contextlib import contextmanager, suppress
from typing import Any, Literal, Optional, TypedDict, Union
import pytest
import requests
from huggingface_hub.constants import REPO_TYPES, REPO_TYPES_URL_PREFIXES
from huggingface_hub.hf_api import HfApi
from huggingface_hub.utils._errors import hf_raise_for_status
# see https://github.com/huggingface/moon-landing/blob/main/server/scripts/staging-seed-db.ts
CI_HUB_USER = "__DUMMY_DATASETS_SERVER_USER__"
CI_HUB_USER_API_TOKEN = "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"
CI_HUB_USER_SESSION_TOKEN = (
"ePTgRpKDHjwQZylcViCgFywZqECKWIutEsEercqQPAeADPKGt"
"CLuLEvMpNIGhruoHbnIbDhGLlpTpTlZhOygCpHzbfTHboxEwuPdlHjNlfcxFuYHvfSivfoBXQxWUUek"
)
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
def update_repo_settings(
hf_api: HfApi,
repo_id: str,
*,
private: Optional[bool] = None,
gated: Optional[str] = None,
token: Optional[str] = None,
organization: Optional[str] = None,
repo_type: Optional[str] = None,
name: Optional[str] = None,
) -> Any:
"""Update the settings of a repository.
Args:
repo_id (`str`, *optional*):
A namespace (user or an organization) and a repo name separated
by a `/`.
<Tip>
Version added: 0.5
</Tip>
private (`bool`, *optional*, defaults to `None`):
Whether the repo should be private.
gated (`str`, *optional*, defaults to `None`):
Whether the repo should request user access.
Possible values are 'auto' and 'manual'
token (`str`, *optional*):
An authentication token (See https://huggingface.co/settings/token)
repo_type (`str`, *optional*):
Set to `"dataset"` or `"space"` if uploading to a dataset or
space, `None` or `"model"` if uploading to a model. Default is
`None`.
Returns:
The HTTP response in json.
<Tip>
Raises the following errors:
- [`~huggingface_hub.utils.RepositoryNotFoundError`]
If the repository to download from cannot be found. This may be because it doesn't exist,
or because it is set to `private` and you do not have access.
</Tip>
"""
if repo_type not in REPO_TYPES:
raise ValueError("Invalid repo type")
organization, name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
if organization is None:
namespace = hf_api.whoami(token=token)["name"]
else:
namespace = organization
path_prefix = f"{hf_api.endpoint}/api/"
if repo_type in REPO_TYPES_URL_PREFIXES:
path_prefix += REPO_TYPES_URL_PREFIXES[repo_type]
path = f"{path_prefix}{namespace}/{name}/settings"
json: dict[str, Union[bool, str]] = {}
if private is not None:
json["private"] = private
if gated is not None:
json["gated"] = gated
r = requests.put(
path,
headers={"authorization": f"Bearer {token}"},
json=json,
)
hf_raise_for_status(r)
return r.json()
@pytest.fixture(scope="session")
def hf_api() -> HfApi:
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token() -> str:
return CI_HUB_USER_API_TOKEN
@pytest.fixture
def cleanup_repo(hf_api: HfApi) -> Callable[[str], None]:
def _cleanup_repo(repo_id: str) -> None:
hf_api.delete_repo(repo_id=repo_id, token=CI_HUB_USER_API_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo: Callable[[str], None]) -> Callable[[str], Iterator[str]]:
@contextmanager
def _temporary_repo(repo_id: str) -> Iterator[str]:
try:
yield repo_id
finally:
cleanup_repo(repo_id)
return _temporary_repo # type: ignore
def create_unique_repo_name(prefix: str, user: str) -> str:
repo_name = f"{prefix}-{int(time.time() * 10e3)}"
return f"{user}/{repo_name}"
def create_hf_dataset_repo_csv_data(
hf_api: HfApi,
hf_token: str,
csv_path: str,
*,
private: bool = False,
gated: Optional[str] = None,
user: str = CI_HUB_USER,
) -> str:
repo_id = create_unique_repo_name("repo_csv_data", user)
hf_api.create_repo(repo_id=repo_id, token=hf_token, repo_type="dataset", private=private)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=csv_path,
path_in_repo="data/csv_data.csv",
repo_id=repo_id,
repo_type="dataset",
)
if gated:
update_repo_settings(hf_api, repo_id, token=hf_token, gated=gated, repo_type="dataset")
return repo_id
# https://docs.pytest.org/en/6.2.x/fixture.html#yield-fixtures-recommended
@pytest.fixture(scope="session", autouse=True)
def hf_public_dataset_repo_csv_data(hf_api: HfApi, hf_token: str, csv_path: str) -> Iterator[str]:
repo_id = create_hf_dataset_repo_csv_data(hf_api=hf_api, hf_token=hf_token, csv_path=csv_path)
yield repo_id
with suppress(requests.exceptions.HTTPError, ValueError):
hf_api.delete_repo(repo_id=repo_id, token=hf_token, repo_type="dataset")
@pytest.fixture(scope="session", autouse=True)
def hf_public_2_dataset_repo_csv_data(hf_api: HfApi, hf_token: str, csv_path: str) -> Iterator[str]:
repo_id = create_hf_dataset_repo_csv_data(hf_api=hf_api, hf_token=hf_token, csv_path=csv_path)
yield repo_id
with suppress(requests.exceptions.HTTPError, ValueError):
hf_api.delete_repo(repo_id=repo_id, token=hf_token, repo_type="dataset")
@pytest.fixture(scope="session", autouse=True)
def hf_private_dataset_repo_csv_data(hf_api: HfApi, hf_token: str, csv_path: str) -> Iterator[str]:
repo_id = create_hf_dataset_repo_csv_data(hf_api=hf_api, hf_token=hf_token, csv_path=csv_path, private=True)
yield repo_id
with suppress(requests.exceptions.HTTPError, ValueError):
hf_api.delete_repo(repo_id=repo_id, token=hf_token, repo_type="dataset")
@pytest.fixture(scope="session", autouse=True)
def hf_gated_dataset_repo_csv_data(hf_api: HfApi, hf_token: str, csv_path: str) -> Iterator[str]:
repo_id = create_hf_dataset_repo_csv_data(hf_api=hf_api, hf_token=hf_token, csv_path=csv_path, gated="auto")
yield repo_id
with suppress(requests.exceptions.HTTPError, ValueError):
hf_api.delete_repo(repo_id=repo_id, token=hf_token, repo_type="dataset")
class DatasetRepos(TypedDict):
public: str
public2: str
private: str
gated: str
DatasetReposType = Literal["public", "public2", "private", "gated"]
@pytest.fixture(scope="session", autouse=True)
def hf_dataset_repos_csv_data(
hf_public_dataset_repo_csv_data: str,
hf_public_2_dataset_repo_csv_data: str,
hf_private_dataset_repo_csv_data: str,
hf_gated_dataset_repo_csv_data: str,
) -> DatasetRepos:
return {
"public": hf_public_dataset_repo_csv_data,
"public2": hf_public_2_dataset_repo_csv_data,
"private": hf_private_dataset_repo_csv_data,
"gated": hf_gated_dataset_repo_csv_data,
}
AuthType = Literal["cookie", "token", "none"]
AuthHeaders = Mapping[AuthType, Mapping[str, str]]
@pytest.fixture(autouse=True, scope="session")
def auth_headers() -> AuthHeaders:
return {
"none": {},
"token": {"Authorization": f"Bearer {CI_HUB_USER_API_TOKEN}"},
"cookie": {"Cookie": f"token={CI_HUB_USER_SESSION_TOKEN}"},
}
| datasets-server-main | e2e/tests/fixtures/hub.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | e2e/src/__init__.py |
import os
import urllib.parse
from itertools import product
import pandas as pd
import requests
import gradio as gr
from libcommon.processing_graph import ProcessingGraph
from libcommon.config import ProcessingGraphConfig
import matplotlib
import matplotlib.pyplot as plt
import networkx as nx
import huggingface_hub as hfh
import duckdb
import json
matplotlib.use('SVG')
DEV = os.environ.get("DEV", False)
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
PROD_DSS_ENDPOINT = os.environ.get("PROD_DSS_ENDPOINT", "https://datasets-server.huggingface.co")
DEV_DSS_ENDPOINT = os.environ.get("DEV_DSS_ENDPOINT", "http://localhost:8100")
ADMIN_HF_ORGANIZATION = os.environ.get("ADMIN_HF_ORGANIZATION", "huggingface")
HF_TOKEN = os.environ.get("HF_TOKEN")
DSS_ENDPOINT = DEV_DSS_ENDPOINT if DEV else PROD_DSS_ENDPOINT
PROCESSING_GRAPH = ProcessingGraph(ProcessingGraphConfig().specification)
pending_jobs_df = None
def healthcheck():
try:
response = requests.head(f"{DSS_ENDPOINT}/admin/pending-jobs", timeout=10)
except requests.ConnectionError as error:
return f"❌ Failed to connect to {DSS_ENDPOINT} (error {error})"
if response.status_code == 401:
return f"*Connected to {DSS_ENDPOINT}*"
else:
return f"❌ Failed to connect to {DSS_ENDPOINT} (error {response.status_code})"
def draw_graph(width, height):
graph = PROCESSING_GRAPH._nx_graph
pos = nx.nx_agraph.graphviz_layout(graph, prog="dot")
fig = plt.figure(figsize=(width, height))
nx.draw_networkx(graph, pos=pos, node_color="#d1b2f8", node_size=500)
return fig
with gr.Blocks() as demo:
gr.Markdown(" ## Datasets-server admin page")
gr.Markdown(healthcheck)
with gr.Row(visible=HF_TOKEN is None) as auth_page:
with gr.Column():
auth_title = gr.Markdown("Enter your token ([settings](https://huggingface.co/settings/tokens)):")
token_box = gr.Textbox(HF_TOKEN or "", label="token", placeholder="hf_xxx", type="password")
auth_error = gr.Markdown("", visible=False)
with gr.Row(visible=HF_TOKEN is not None) as main_page:
with gr.Column():
welcome_title = gr.Markdown("### Welcome")
with gr.Tab("View pending jobs"):
fetch_pending_jobs_button = gr.Button("Fetch pending jobs")
gr.Markdown("### Pending jobs summary")
pending_jobs_summary_table = gr.DataFrame(pd.DataFrame({"Jobs": [], "Waiting": [], "Started": []}))
gr.Markdown("### Most recent")
recent_pending_jobs_table = gr.DataFrame()
gr.Markdown("### Query the pending jobs table")
pending_jobs_query = gr.Textbox(
label="Query pending_jobs_df",
placeholder="SELECT * FROM pending_jobs_df WHERE dataset LIKE 'allenai/c4",
value="SELECT * FROM pending_jobs_df WHERE dataset LIKE 'allenai/c4'",
lines=3,
)
query_pending_jobs_button = gr.Button("Run")
pending_jobs_query_result_df = gr.DataFrame()
with gr.Tab("Refresh dataset"):
job_types = [processing_step.job_type for processing_step in PROCESSING_GRAPH.get_topologically_ordered_processing_steps()]
refresh_type = gr.Dropdown(job_types, multiselect=False, label="job type", value=job_types[0])
refresh_dataset_name = gr.Textbox(label="dataset", placeholder="c4")
refresh_config_name = gr.Textbox(label="config (optional)", placeholder="en")
refresh_split_name = gr.Textbox(label="split (optional)", placeholder="train, test")
gr.Markdown("*you can select multiple values by separating them with commas, e.g. split='train, test'*")
refresh_priority = gr.Dropdown(["low", "normal", "high"], multiselect=False, label="priority", value="high")
refresh_dataset_button = gr.Button("Force refresh dataset")
refresh_dataset_output = gr.Markdown("")
with gr.Tab("Obsolete cache"):
fetch_obsolete_cache_button = gr.Button("Fetch obsolete cache")
delete_obsolete_cache_button = gr.Button("Delete obsolete cache")
datasets_to_delete = gr.Markdown("", visible=False)
cache_records_to_delete = gr.Markdown("", visible=False)
obsolete_cache_table = gr.DataFrame(
pd.DataFrame({"Dataset": [], "Cache records": []})
)
with gr.Tab("Dataset status"):
dataset_name = gr.Textbox(label="dataset", placeholder="c4")
dataset_status_button = gr.Button("Get dataset status")
gr.Markdown("### Cached responses")
cached_responses_table = gr.DataFrame()
gr.Markdown("### Pending jobs")
jobs_table = gr.DataFrame()
backfill_message = gr.Markdown("", visible=False)
backfill_plan_table = gr.DataFrame(visible=False)
backfill_execute_button = gr.Button("Execute backfill plan", visible=False)
backfill_execute_error = gr.Markdown("", visible=False)
with gr.Tab("Processing graph"):
gr.Markdown("## 💫 Please, don't forget to rebuild (factory reboot) this space immediately after each deploy 💫")
gr.Markdown("### so that we get the 🚀 production 🚀 version of the graph here ")
with gr.Row():
width = gr.Slider(1, 30, 19, step=1, label="Width")
height = gr.Slider(1, 30, 15, step=1, label="Height")
output = gr.Plot()
draw_button = gr.Button("Plot processing graph")
draw_button.click(draw_graph, inputs=[width, height], outputs=output)
def auth(token):
if not token:
return {auth_error: gr.update(value="", visible=False)}
try:
user = hfh.whoami(token=token)
except requests.HTTPError as err:
return {auth_error: gr.update(value=f"❌ Error ({err})", visible=True)}
orgs = [org["name"] for org in user["orgs"]]
if ADMIN_HF_ORGANIZATION in orgs:
return {
auth_page: gr.update(visible=False),
welcome_title: gr.update(value=f"### Welcome {user['name']}"),
main_page: gr.update(visible=True)
}
else:
return {
auth_error: gr.update(value=f"❌ Unauthorized (user '{user['name']} is not a member of '{ADMIN_HF_ORGANIZATION}')")
}
def call_obsolete_cache(token, delete):
headers = {"Authorization": f"Bearer {token}"}
obsolete_cache_endpoint = f"{DSS_ENDPOINT}/admin/obsolete-cache"
response = (
requests.delete(obsolete_cache_endpoint, headers=headers, timeout=240)
if delete
else requests.get(obsolete_cache_endpoint, headers=headers, timeout=120)
)
action = "delete" if delete else "get"
if response.status_code == 200:
obsolete_cache = response.json()
obsolete_cache_df = pd.DataFrame(obsolete_cache)
datasets_to_delete_count = len(obsolete_cache_df)
cache_records_to_delete_count = 0 if datasets_to_delete_count == 0 else obsolete_cache_df["cache_records"].sum()
return {
obsolete_cache_table: gr.update(visible=True, value=obsolete_cache_df),
datasets_to_delete: gr.update(
visible=True, value=f"### Datasets: {datasets_to_delete_count}"
),
cache_records_to_delete: gr.update(
visible=True,
value=f"### Cached records: {cache_records_to_delete_count}",
),
}
else:
return {
obsolete_cache_table: gr.update(
visible=True,
value=pd.DataFrame(
{
"Error": [
f"❌ Failed to {action} obsolete cache (error {response.status_code})"
]
}
),
),
datasets_to_delete: gr.update(visible=False),
cache_records_to_delete: gr.update(visible=False),
}
def delete_obsolete_cache(token):
return call_obsolete_cache(token, True)
def get_obsolete_cache(token):
return call_obsolete_cache(token, False)
def view_jobs(token):
global pending_jobs_df
headers = {"Authorization": f"Bearer {token}"}
response = requests.get(f"{DSS_ENDPOINT}/admin/pending-jobs", headers=headers, timeout=60)
if response.status_code == 200:
pending_jobs = response.json()
pending_jobs_df = pd.DataFrame([
job
for job_type in pending_jobs
for job_state in pending_jobs[job_type]
for job in pending_jobs[job_type][job_state]
])
if "started_at" in pending_jobs_df.columns:
pending_jobs_df["started_at"] = pd.to_datetime(pending_jobs_df["started_at"], errors="coerce")
if "finished_at" in pending_jobs_df.columns:
pending_jobs_df["finished_at"] = pd.to_datetime(pending_jobs_df["finished_at"], errors="coerce")
if "last_heartbeat" in pending_jobs_df.columns:
pending_jobs_df["last_heartbeat"] = pd.to_datetime(pending_jobs_df["last_heartbeat"], errors="coerce")
if "created_at" in pending_jobs_df.columns:
pending_jobs_df["created_at"] = pd.to_datetime(pending_jobs_df["created_at"], errors="coerce")
most_recent = pending_jobs_df.nlargest(5, "created_at")
else:
most_recent = pd.DataFrame()
return {
pending_jobs_summary_table: gr.update(visible=True, value=pd.DataFrame({
"Jobs": list(pending_jobs),
"Waiting": [len(pending_jobs[job_type]["waiting"]) for job_type in pending_jobs],
"Started": [len(pending_jobs[job_type]["started"]) for job_type in pending_jobs],
})),
recent_pending_jobs_table: gr.update(value=most_recent)
}
else:
return {
pending_jobs_summary_table: gr.update(visible=True, value=pd.DataFrame({"Error": [f"❌ Failed to view pending jobs to {DSS_ENDPOINT} (error {response.status_code})"]})),
recent_pending_jobs_table: gr.update(value=None)
}
def get_dataset_status(token, dataset):
headers = {"Authorization": f"Bearer {token}"}
response = requests.get(f"{DSS_ENDPOINT}/admin/dataset-status?dataset={dataset}", headers=headers, timeout=60)
if response.status_code == 200:
dataset_status = response.json()
cached_responses_df = pd.DataFrame([{
"kind": cached_response["kind"],
"dataset": cached_response["dataset"],
"config": cached_response["config"],
"split": cached_response["split"],
"http_status": cached_response["http_status"],
"error_code": cached_response["error_code"],
"job_runner_version": cached_response["job_runner_version"],
"dataset_git_revision": cached_response["dataset_git_revision"],
"progress": cached_response["progress"],
"updated_at": cached_response["updated_at"],
"details": json.dumps(cached_response["details"]),
}
for job_type, content in dataset_status.items()
for cached_response in content["cached_responses"]
])
jobs_df = pd.DataFrame([{
"type": job["type"],
"dataset": job["dataset"],
"revision": job["revision"],
"config": job["config"],
"split": job["split"],
"namespace": job["namespace"],
"priority": job["priority"],
"status": job["status"],
"created_at": job["created_at"],
"started_at": job["started_at"],
"finished_at": job["finished_at"],
"last_heartbeat": job["last_heartbeat"]
}
for job_type, content in dataset_status.items()
for job in content["jobs"]
])
return {
cached_responses_table: gr.update(value=cached_responses_df),
jobs_table: gr.update(value=jobs_df)
}
else:
return {
cached_responses_table: gr.update(value=pd.DataFrame([{"error": f"❌ Failed to get status for {dataset} (error {response.status_code})"}])),
jobs_table: gr.update(value=pd.DataFrame([{"content": str(response.content)}]))
}
def get_backfill_plan(token, dataset):
headers = {"Authorization": f"Bearer {token}"}
response = requests.get(f"{DSS_ENDPOINT}/admin/dataset-backfill-plan?dataset={dataset}", headers=headers, timeout=60)
if response.status_code != 200:
return {
backfill_message: gr.update(value=f"❌ Failed to get backfill plan for {dataset} (error {response.status_code})", visible=True),
backfill_plan_table: gr.update(value=None,visible=False),
backfill_execute_button: gr.update( visible=False),
backfill_execute_error: gr.update( visible=False)
}
tasks = response.json()
tasks_df = pd.DataFrame(tasks)
has_tasks = len(tasks_df) > 0
return {
backfill_message: gr.update(
value="""### Backfill plan
The cache is outdated or in an incoherent state. Here is the plan to backfill the cache."""
,visible=has_tasks),
backfill_plan_table: gr.update(value=tasks_df,visible=has_tasks),
backfill_execute_button: gr.update(visible=has_tasks),
backfill_execute_error: gr.update(visible=False),
}
def get_dataset_status_and_backfill_plan(token, dataset):
return {**get_dataset_status(token, dataset), **get_backfill_plan(token, dataset)}
def execute_backfill_plan(token, dataset):
headers = {"Authorization": f"Bearer {token}"}
response = requests.post(f"{DSS_ENDPOINT}/admin/dataset-backfill?dataset={dataset}", headers=headers, timeout=60)
state = get_dataset_status_and_backfill_plan(token, dataset)
message = (
"✅ Backfill plan executed"
if response.status_code == 200
else f"❌ Failed to execute backfill plan (error {response.status_code})<pre>{response.text}</pre>"
)
state[backfill_execute_error] = gr.update(value=message, visible=True)
return state
def query_jobs(pending_jobs_query):
global pending_jobs_df
try:
result = duckdb.query(pending_jobs_query).to_df()
except (duckdb.ParserException, duckdb.CatalogException, duckdb.BinderException) as error:
return {pending_jobs_query_result_df: gr.update(value=pd.DataFrame({"Error": [f"❌ {str(error)}"]}))}
return {pending_jobs_query_result_df: gr.update(value=result)}
def refresh_dataset(token, refresh_type, refresh_dataset_names, refresh_config_names, refresh_split_names, refresh_priority):
headers = {"Authorization": f"Bearer {token}"}
all_results = ""
for refresh_dataset_name, refresh_config_name, refresh_split_name in product(
refresh_dataset_names.split(","), refresh_config_names.split(","), refresh_split_names.split(",")
):
refresh_dataset_name = refresh_dataset_name.strip()
params = {"dataset": refresh_dataset_name, "priority": refresh_priority}
if refresh_config_name:
refresh_config_name = refresh_config_name.strip()
params["config"] = refresh_config_name
if refresh_split_name:
refresh_split_name = refresh_split_name.strip()
params["split"] = refresh_split_name
params = urllib.parse.urlencode(params)
response = requests.post(f"{DSS_ENDPOINT}/admin/force-refresh/{refresh_type}?{params}", headers=headers, timeout=60)
if response.status_code == 200:
result = f"[{refresh_dataset_name}] ✅ Added processing step to the queue: '{refresh_type}'"
if refresh_config_name:
result += f", for config '{refresh_config_name}'"
if refresh_split_name:
result += f", for split '{refresh_split_name}'"
else:
result = f"[{refresh_dataset_name}] ❌ Failed to add processing step to the queue. Error {response.status_code}"
try:
if response.json().get("error"):
result += f": {response.json()['error']}"
except requests.JSONDecodeError:
result += f": {response.content}"
all_results += result.strip("\n") + "\n"
return "```\n" + all_results + "\n```"
token_box.change(auth, inputs=token_box, outputs=[auth_error, welcome_title, auth_page, main_page])
fetch_pending_jobs_button.click(view_jobs, inputs=token_box, outputs=[recent_pending_jobs_table, pending_jobs_summary_table])
query_pending_jobs_button.click(query_jobs, inputs=pending_jobs_query, outputs=[pending_jobs_query_result_df])
refresh_dataset_button.click(refresh_dataset, inputs=[token_box, refresh_type, refresh_dataset_name, refresh_config_name, refresh_split_name, refresh_priority], outputs=refresh_dataset_output)
dataset_status_button.click(get_dataset_status_and_backfill_plan, inputs=[token_box, dataset_name], outputs=[cached_responses_table, jobs_table, backfill_message, backfill_plan_table, backfill_execute_button, backfill_execute_error])
backfill_execute_button.click(execute_backfill_plan, inputs=[token_box, dataset_name], outputs=[cached_responses_table, jobs_table, backfill_message, backfill_plan_table, backfill_execute_button, backfill_execute_error])
fetch_obsolete_cache_button.click(get_obsolete_cache, inputs=[token_box], outputs=[obsolete_cache_table, datasets_to_delete, cache_records_to_delete])
delete_obsolete_cache_button.click(delete_obsolete_cache, inputs=[token_box], outputs=[obsolete_cache_table, datasets_to_delete, cache_records_to_delete])
if __name__ == "__main__":
demo.launch()
| datasets-server-main | front/admin_ui/app.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import os
import shutil
from http import HTTPStatus
from pathlib import Path
from unittest.mock import patch
import pytest
from libcommon.simple_cache import has_some_cache, upsert_response
from pytest import raises
from admin.routes.obsolete_cache import (
DatasetCacheReport,
delete_obsolete_cache,
get_obsolete_cache,
)
from admin.utils import UnexpectedError
@pytest.mark.parametrize(
"dataset_names,expected_report",
[(["dataset"], []), ([], [DatasetCacheReport(dataset="dataset", cache_records=2)])],
)
def test_get_obsolete_cache(dataset_names: list[str], expected_report: list[DatasetCacheReport]) -> None:
dataset = "dataset"
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": "config"}]},
http_status=HTTPStatus.OK,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config="config",
content={"splits": [{"dataset": dataset, "config": "config", "split": "split"}]},
http_status=HTTPStatus.OK,
)
assert has_some_cache(dataset=dataset)
with patch("admin.routes.obsolete_cache.get_supported_dataset_names", return_value=dataset_names):
assert get_obsolete_cache(hf_endpoint="hf_endpoint", hf_token="hf_token") == expected_report
@pytest.mark.parametrize(
"dataset_names,minimun_supported_datasets,should_keep,should_raise",
[
(["dataset"], 1, True, False), # do not delete, dataset is still supported
([], 1000, True, True), # do not delete, number of supported datasets is less than threshold
([], 0, False, False), # delete dataset
],
)
def test_delete_obsolete_cache(
dataset_names: list[str], minimun_supported_datasets: int, should_keep: bool, should_raise: bool
) -> None:
assets_directory = "/tmp/assets"
cached_assets_directory = "/tmp/cached-assets"
dataset = "dataset"
os.makedirs(f"{assets_directory}/{dataset}", exist_ok=True)
os.makedirs(f"{cached_assets_directory}/{dataset}", exist_ok=True)
asset_file = Path(f"{assets_directory}/{dataset}/image.jpg")
asset_file.touch()
assert asset_file.is_file()
cached_asset_file = Path(f"{cached_assets_directory}/{dataset}/image.jpg")
cached_asset_file.touch()
assert cached_asset_file.is_file()
upsert_response(
kind="kind_1",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": "config"}]},
http_status=HTTPStatus.OK,
)
upsert_response(
kind="kind_2",
dataset=dataset,
config="config",
content={"splits": [{"dataset": dataset, "config": "config", "split": "split"}]},
http_status=HTTPStatus.OK,
)
assert has_some_cache(dataset=dataset)
with patch("admin.routes.obsolete_cache.get_supported_dataset_names", return_value=dataset_names):
with patch("admin.routes.obsolete_cache.MINIMUM_SUPPORTED_DATASETS", minimun_supported_datasets):
if should_raise:
with raises(UnexpectedError):
delete_obsolete_cache(
hf_endpoint="hf_endpoint",
hf_token="hf_token",
assets_directory=assets_directory,
cached_assets_directory=cached_assets_directory,
)
else:
deletion_report = delete_obsolete_cache(
hf_endpoint="hf_endpoint",
hf_token="hf_token",
assets_directory=assets_directory,
cached_assets_directory=cached_assets_directory,
)
assert len(deletion_report) == 0 if should_keep else 1
if len(deletion_report) > 0:
assert deletion_report[0]["dataset"] == "dataset"
assert deletion_report[0]["cache_records"] == 2 # for kind_1 and kind_2
assert asset_file.is_file() == should_keep
assert cached_asset_file.is_file() == should_keep
assert has_some_cache(dataset=dataset) == should_keep
shutil.rmtree(assets_directory, ignore_errors=True)
shutil.rmtree(cached_assets_directory, ignore_errors=True)
| datasets-server-main | services/admin/tests/test_obsolete_cache.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Iterator
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import _clean_queue_database
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import _clean_cache_database
from libcommon.storage import StrPath, init_assets_dir
from pytest import MonkeyPatch, fixture
from admin.config import AppConfig
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.hub"]
# see https://github.com/pytest-dev/pytest/issues/363#issuecomment-406536200
@fixture(scope="session")
def monkeypatch_session(hf_endpoint: str, hf_token: str) -> Iterator[MonkeyPatch]:
monkeypatch_session = MonkeyPatch()
monkeypatch_session.setenv("CACHE_MONGO_DATABASE", "datasets_server_cache_test")
monkeypatch_session.setenv("QUEUE_MONGO_DATABASE", "datasets_server_queue_test")
monkeypatch_session.setenv("COMMON_HF_ENDPOINT", hf_endpoint)
monkeypatch_session.setenv("COMMON_HF_TOKEN", hf_token)
monkeypatch_session.setenv("ADMIN_HF_TIMEOUT_SECONDS", "10")
yield monkeypatch_session
monkeypatch_session.undo()
@fixture(scope="session")
def app_config(monkeypatch_session: MonkeyPatch) -> AppConfig:
app_config = AppConfig.from_env()
if "test" not in app_config.cache.mongo_database or "test" not in app_config.queue.mongo_database:
raise ValueError("Test must be launched on a test mongo database")
return app_config
@fixture(scope="session")
def processing_graph(app_config: AppConfig) -> ProcessingGraph:
return ProcessingGraph(app_config.processing_graph.specification)
@fixture(scope="session")
def assets_directory(app_config: AppConfig) -> StrPath:
return init_assets_dir(directory=app_config.assets.storage_directory)
@fixture(autouse=True)
def cache_mongo_resource(app_config: AppConfig) -> Iterator[CacheMongoResource]:
with CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) as resource:
yield resource
_clean_cache_database()
@fixture(autouse=True)
def queue_mongo_resource(app_config: AppConfig) -> Iterator[QueueMongoResource]:
with QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) as resource:
yield resource
_clean_queue_database()
| datasets-server-main | services/admin/tests/conftest.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Iterator
from libcommon.processing_graph import ProcessingGraph
from pytest import MonkeyPatch, fixture, mark
from starlette.testclient import TestClient
from admin.app import create_app
from admin.config import AppConfig
# see https://github.com/pytest-dev/pytest/issues/363#issuecomment-406536200
@fixture(scope="module")
def real_monkeypatch() -> Iterator[MonkeyPatch]:
monkeypatch = MonkeyPatch()
monkeypatch.setenv("CACHE_MONGO_DATABASE", "datasets_server_cache_test")
monkeypatch.setenv("QUEUE_MONGO_DATABASE", "datasets_server_queue_test")
monkeypatch.setenv("COMMON_HF_ENDPOINT", "https://huggingface.co")
monkeypatch.setenv("COMMON_HF_TOKEN", "")
yield monkeypatch
monkeypatch.undo()
@fixture(scope="module")
def real_client(real_monkeypatch: MonkeyPatch) -> TestClient:
return TestClient(create_app())
@fixture(scope="module")
def real_app_config(real_monkeypatch: MonkeyPatch) -> AppConfig:
app_config = AppConfig.from_env()
if "test" not in app_config.cache.mongo_database or "test" not in app_config.queue.mongo_database:
raise ValueError("Test must be launched on a test mongo database")
if app_config.common.hf_endpoint != "https://huggingface.co":
raise ValueError("Test must be launched on the production hub")
return app_config
@mark.real_dataset
def test_force_refresh(
real_app_config: AppConfig,
real_client: TestClient,
) -> None:
dataset = "glue"
processing_graph = ProcessingGraph(real_app_config.processing_graph.specification)
first_step = processing_graph.get_processing_steps(order="topological")[0]
path = first_step.job_type
response = real_client.request("post", f"/force-refresh/{path}?dataset={dataset}")
assert response.status_code == 200, response.text
| datasets-server-main | services/admin/tests/test_app_real.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/admin/tests/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Mapping
from typing import Optional
import pytest
import responses
from starlette.datastructures import Headers
from starlette.requests import Request
from admin.authentication import auth_check
from admin.utils import ExternalAuthenticatedError, ExternalUnauthenticatedError
from .utils import request_callback
def test_no_auth_check() -> None:
assert auth_check()
@responses.activate
def test_unreachable_external_auth_check_service() -> None:
with pytest.raises(RuntimeError):
auth_check(external_auth_url="https://auth.check", organization="org")
@responses.activate
@pytest.mark.parametrize(
"status,error",
[
(200, None),
(401, ExternalUnauthenticatedError),
(403, ExternalAuthenticatedError),
(404, ExternalAuthenticatedError),
(429, ValueError),
],
)
def test_external_auth_responses_without_request(status: int, error: Optional[type[Exception]]) -> None:
url = "https://auth.check"
body = '{"orgs": [{"name": "org1"}]}'
responses.add(responses.GET, url, status=status, body=body)
if error is None:
assert auth_check(external_auth_url=url, organization="org1")
else:
with pytest.raises(error):
auth_check(external_auth_url=url, organization="org1")
@responses.activate
@pytest.mark.parametrize(
"org,status,error",
[("org1", 200, None), ("org2", 403, ExternalAuthenticatedError)],
)
def test_org(org: str, status: int, error: Optional[type[Exception]]) -> None:
url = "https://auth.check"
body = '{"orgs": [{"name": "org1"}]}'
responses.add(responses.GET, url, status=status, body=body)
if error is None:
assert auth_check(external_auth_url=url, organization=org)
else:
with pytest.raises(error):
auth_check(external_auth_url=url, organization=org)
def create_request(headers: Mapping[str, str]) -> Request:
return Request(
{
"type": "http",
"path": "/some-path",
"headers": Headers(headers).raw,
"http_version": "1.1",
"method": "GET",
"scheme": "https",
"client": ("127.0.0.1", 8080),
"server": ("some.server", 443),
}
)
@responses.activate
def test_valid_responses_with_request() -> None:
url = "https://auth.check"
organization = "org1"
responses.add_callback(responses.GET, url, callback=request_callback)
with pytest.raises(ExternalAuthenticatedError):
auth_check(
external_auth_url=url,
request=create_request(headers={"authorization": "Bearer token"}),
organization=organization,
)
assert auth_check(
external_auth_url=url,
request=create_request(headers={}),
organization=organization,
)
| datasets-server-main | services/admin/tests/test_authentication.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Mapping
from io import BufferedReader
from typing import Union
from requests import PreparedRequest
from responses import Response
_Body = Union[str, BaseException, Response, BufferedReader, bytes]
def request_callback(request: PreparedRequest) -> Union[Exception, tuple[int, Mapping[str, str], _Body]]:
# return 404 if a token has been provided,
# and 200 if none has been provided
# there is no logic behind this behavior, it's just to test if th
# token are correctly passed to the auth_check service
body = '{"orgs": [{"name": "org1"}]}'
if request.headers.get("authorization"):
return (404, {"Content-Type": "text/plain"}, body)
return (200, {"Content-Type": "text/plain"}, body)
| datasets-server-main | services/admin/tests/utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Optional
import pytest
from libcommon.processing_graph import ProcessingGraph
from starlette.testclient import TestClient
from admin.app import create_app
@pytest.fixture(scope="module")
def client(monkeypatch_session: pytest.MonkeyPatch) -> TestClient:
return TestClient(create_app())
def test_cors(client: TestClient) -> None:
origin = "http://localhost:3000"
method = "GET"
header = "X-Requested-With"
response = client.request(
"options",
"/pending-jobs",
headers={
"Origin": origin,
"Access-Control-Request-Method": method,
"Access-Control-Request-Headers": header,
},
)
assert response.status_code == 200
assert (
origin in [o.strip() for o in response.headers["Access-Control-Allow-Origin"].split(",")]
or response.headers["Access-Control-Allow-Origin"] == "*"
)
assert (
header in [o.strip() for o in response.headers["Access-Control-Allow-Headers"].split(",")]
or response.headers["Access-Control-Expose-Headers"] == "*"
)
assert (
method in [o.strip() for o in response.headers["Access-Control-Allow-Methods"].split(",")]
or response.headers["Access-Control-Expose-Headers"] == "*"
)
assert response.headers["Access-Control-Allow-Credentials"] == "true"
def test_get_healthcheck(client: TestClient) -> None:
response = client.request("get", "/healthcheck")
assert response.status_code == 200
assert response.text == "ok"
def test_metrics(client: TestClient) -> None:
response = client.request("get", "/metrics")
assert response.status_code == 200
text = response.text
lines = text.split("\n")
metrics = {line.split(" ")[0]: float(line.split(" ")[1]) for line in lines if line and line[0] != "#"}
# the middleware should have recorded the request
name = 'starlette_requests_total{method="GET",path_template="/metrics"}'
assert name in metrics, metrics
assert metrics[name] > 0, metrics
def test_pending_jobs(client: TestClient, processing_graph: ProcessingGraph) -> None:
response = client.request("get", "/pending-jobs")
assert response.status_code == 200
json = response.json()
for processing_step in processing_graph.get_processing_steps():
assert json[processing_step.job_type] == {"waiting": [], "started": []}
def test_dataset_status(client: TestClient, processing_graph: ProcessingGraph) -> None:
response = client.request("get", "/dataset-status")
assert response.status_code == 422
response = client.request("get", "/dataset-status", params={"dataset": "test-dataset"})
assert response.status_code == 200
json = response.json()
for processing_step in processing_graph.get_processing_steps():
assert not json[processing_step.job_type]["cached_responses"]
assert not json[processing_step.job_type]["jobs"]
@pytest.mark.parametrize(
"cursor,http_status,error_code",
[
(None, 200, None),
("", 200, None),
("invalid cursor", 422, "InvalidParameter"),
],
)
def test_cache_reports(
client: TestClient,
processing_graph: ProcessingGraph,
cursor: Optional[str],
http_status: int,
error_code: Optional[str],
) -> None:
first_step = processing_graph.get_processing_steps()[0]
path = first_step.cache_kind
cursor_str = f"?cursor={cursor}" if cursor else ""
response = client.request("get", f"/cache-reports/{path}{cursor_str}")
assert response.status_code == http_status
if error_code:
assert isinstance(response.json()["error"], str)
assert response.headers["X-Error-Code"] == error_code
else:
assert response.json() == {"cache_reports": [], "next_cursor": ""}
assert "X-Error-Code" not in response.headers
@pytest.mark.parametrize(
"cursor,http_status,error_code",
[
(None, 200, None),
("", 200, None),
("invalid cursor", 422, "InvalidParameter"),
],
)
def test_cache_reports_with_content(
client: TestClient,
processing_graph: ProcessingGraph,
cursor: Optional[str],
http_status: int,
error_code: Optional[str],
) -> None:
first_step = processing_graph.get_processing_steps()[0]
path = first_step.cache_kind
cursor_str = f"?cursor={cursor}" if cursor else ""
response = client.request("get", f"/cache-reports-with-content/{path}{cursor_str}")
assert response.status_code == http_status
if error_code:
assert isinstance(response.json()["error"], str)
assert response.headers["X-Error-Code"] == error_code
else:
assert response.json() == {"cache_reports_with_content": [], "next_cursor": ""}
assert "X-Error-Code" not in response.headers
def test_get_obsolete_cache(client: TestClient) -> None:
response = client.request("get", "/obsolete-cache")
assert response.status_code == 200
json = response.json()
assert json == []
def test_delete_obsolete_cache(client: TestClient) -> None:
response = client.request("delete", "/obsolete-cache")
assert response.status_code == 500
| datasets-server-main | services/admin/tests/test_app.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/admin/tests/fixtures/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
# Adapted from https://github.com/huggingface/datasets/blob/main/tests/fixtures/hub.py
import time
from collections.abc import Callable, Iterator
from contextlib import contextmanager, suppress
from typing import Any, Literal, Optional, TypedDict, Union
import pytest
import requests
from huggingface_hub.constants import REPO_TYPES, REPO_TYPES_URL_PREFIXES
from huggingface_hub.hf_api import HfApi
from huggingface_hub.utils._errors import hf_raise_for_status
# see https://github.com/huggingface/moon-landing/blob/main/server/scripts/staging-seed-db.ts
CI_HUB_USER = "__DUMMY_DATASETS_SERVER_USER__"
CI_HUB_USER_API_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
def update_repo_settings(
hf_api: HfApi,
repo_id: str,
*,
private: Optional[bool] = None,
gated: Optional[str] = None,
token: Optional[str] = None,
organization: Optional[str] = None,
repo_type: Optional[str] = None,
name: Optional[str] = None,
) -> Any:
"""Update the settings of a repository.
Args:
repo_id (`str`, *optional*):
A namespace (user or an organization) and a repo name separated
by a `/`.
<Tip>
Version added: 0.5
</Tip>
private (`bool`, *optional*, defaults to `None`):
Whether the repo should be private.
gated (`str`, *optional*, defaults to `None`):
Whether the repo should request user access.
Possible values are 'auto' and 'manual'
token (`str`, *optional*):
An authentication token (See https://huggingface.co/settings/token)
repo_type (`str`, *optional*):
Set to `"dataset"` or `"space"` if uploading to a dataset or
space, `None` or `"model"` if uploading to a model. Default is
`None`.
Returns:
The HTTP response in json.
<Tip>
Raises the following errors:
- [`~huggingface_hub.utils.RepositoryNotFoundError`]
If the repository to download from cannot be found. This may be because it doesn't exist,
or because it is set to `private` and you do not have access.
</Tip>
"""
if repo_type not in REPO_TYPES:
raise ValueError("Invalid repo type")
organization, name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
if organization is None:
namespace = hf_api.whoami(token=token)["name"]
else:
namespace = organization
path_prefix = f"{hf_api.endpoint}/api/"
if repo_type in REPO_TYPES_URL_PREFIXES:
path_prefix += REPO_TYPES_URL_PREFIXES[repo_type]
path = f"{path_prefix}{namespace}/{name}/settings"
json: dict[str, Union[bool, str]] = {}
if private is not None:
json["private"] = private
if gated is not None:
json["gated"] = gated
r = requests.put(
path,
headers={"authorization": f"Bearer {token}"},
json=json,
)
hf_raise_for_status(r)
return r.json()
@pytest.fixture(scope="session")
def hf_api() -> HfApi:
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_endpoint() -> str:
return CI_HUB_ENDPOINT
@pytest.fixture(scope="session")
def hf_token() -> str:
return CI_HUB_USER_API_TOKEN
@pytest.fixture
def cleanup_repo(hf_api: HfApi) -> Callable[[str], None]:
def _cleanup_repo(repo_id: str) -> None:
hf_api.delete_repo(repo_id=repo_id, token=CI_HUB_USER_API_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo: Callable[[str], None]) -> Callable[[str], Iterator[str]]:
@contextmanager
def _temporary_repo(repo_id: str) -> Iterator[str]:
try:
yield repo_id
finally:
cleanup_repo(repo_id)
return _temporary_repo # type: ignore
def create_unique_repo_name(prefix: str, user: str) -> str:
repo_name = f"{prefix}-{int(time.time() * 10e3)}"
return f"{user}/{repo_name}"
def create_hf_dataset_repo(
hf_api: HfApi,
hf_token: str,
prefix: str,
*,
private: bool = False,
gated: Optional[str] = None,
user: str = CI_HUB_USER,
) -> str:
repo_id = create_unique_repo_name(prefix, user)
hf_api.create_repo(repo_id=repo_id, token=hf_token, repo_type="dataset", private=private)
if gated:
update_repo_settings(hf_api, repo_id, token=hf_token, gated=gated, repo_type="dataset")
return repo_id
# https://docs.pytest.org/en/6.2.x/fixture.html#yield-fixtures-recommended
@pytest.fixture(scope="session", autouse=True)
def hf_public_dataset_repo_empty(hf_api: HfApi, hf_token: str) -> Iterator[str]:
repo_id = create_hf_dataset_repo(hf_api=hf_api, hf_token=hf_token, prefix="repo_empty")
yield repo_id
with suppress(requests.exceptions.HTTPError, ValueError):
hf_api.delete_repo(repo_id=repo_id, token=hf_token, repo_type="dataset")
@pytest.fixture(scope="session", autouse=True)
def hf_gated_dataset_repo_empty(hf_api: HfApi, hf_token: str) -> Iterator[str]:
repo_id = create_hf_dataset_repo(hf_api=hf_api, hf_token=hf_token, prefix="repo_empty", gated="auto")
yield repo_id
with suppress(requests.exceptions.HTTPError, ValueError):
hf_api.delete_repo(repo_id=repo_id, token=hf_token, repo_type="dataset")
@pytest.fixture(scope="session", autouse=True)
def hf_private_dataset_repo_empty(hf_api: HfApi, hf_token: str) -> Iterator[str]:
repo_id = create_hf_dataset_repo(hf_api=hf_api, hf_token=hf_token, prefix="repo_empty", private=True)
yield repo_id
with suppress(requests.exceptions.HTTPError, ValueError):
hf_api.delete_repo(repo_id=repo_id, token=hf_token, repo_type="dataset")
class DatasetRepos(TypedDict):
public: str
private: str
gated: str
DatasetReposType = Literal["public", "private", "gated"]
@pytest.fixture(scope="session", autouse=True)
def hf_dataset_repos_csv_data(
hf_public_dataset_repo_empty: str,
hf_gated_dataset_repo_empty: str,
hf_private_dataset_repo_empty: str,
) -> DatasetRepos:
return {
"public": hf_public_dataset_repo_empty,
"private": hf_private_dataset_repo_empty,
"gated": hf_gated_dataset_repo_empty,
}
| datasets-server-main | services/admin/tests/fixtures/hub.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from dataclasses import dataclass, field
from typing import Optional
from environs import Env
from libcommon.config import (
AssetsConfig,
CacheConfig,
CachedAssetsConfig,
CommonConfig,
LogConfig,
ParquetMetadataConfig,
ProcessingGraphConfig,
QueueConfig,
)
ADMIN_UVICORN_HOSTNAME = "localhost"
ADMIN_UVICORN_NUM_WORKERS = 2
ADMIN_UVICORN_PORT = 8000
@dataclass(frozen=True)
class UvicornConfig:
hostname: str = ADMIN_UVICORN_HOSTNAME
num_workers: int = ADMIN_UVICORN_NUM_WORKERS
port: int = ADMIN_UVICORN_PORT
@classmethod
def from_env(cls) -> "UvicornConfig":
env = Env(expand_vars=True)
with env.prefixed("ADMIN_UVICORN_"):
return cls(
hostname=env.str(name="HOSTNAME", default=ADMIN_UVICORN_HOSTNAME),
num_workers=env.int(name="NUM_WORKERS", default=ADMIN_UVICORN_NUM_WORKERS),
port=env.int(name="PORT", default=ADMIN_UVICORN_PORT),
)
ADMIN_CACHE_REPORTS_NUM_RESULTS = 100
ADMIN_CACHE_REPORTS_WITH_CONTENT_NUM_RESULTS = 100
ADMIN_EXTERNAL_AUTH_URL = None
ADMIN_HF_ORGANIZATION = None
ADMIN_HF_TIMEOUT_SECONDS = 0.2
ADMIN_HF_WHOAMI_PATH = "/api/whoami-v2"
ADMIN_MAX_AGE = 10
@dataclass(frozen=True)
class AdminConfig:
cache_reports_num_results: int = ADMIN_CACHE_REPORTS_NUM_RESULTS
cache_reports_with_content_num_results: int = ADMIN_CACHE_REPORTS_WITH_CONTENT_NUM_RESULTS
external_auth_url: Optional[str] = ADMIN_EXTERNAL_AUTH_URL # not documented
hf_organization: Optional[str] = ADMIN_HF_ORGANIZATION
hf_timeout_seconds: Optional[float] = ADMIN_HF_TIMEOUT_SECONDS
hf_whoami_path: str = ADMIN_HF_WHOAMI_PATH
max_age: int = ADMIN_MAX_AGE
@classmethod
def from_env(cls, common_config: CommonConfig) -> "AdminConfig":
env = Env(expand_vars=True)
with env.prefixed("ADMIN_"):
hf_whoami_path = env.str(name="HF_WHOAMI_PATH", default=ADMIN_HF_WHOAMI_PATH)
external_auth_url = None if hf_whoami_path is None else f"{common_config.hf_endpoint}{hf_whoami_path}"
return cls(
cache_reports_num_results=env.int(
name="CACHE_REPORTS_NUM_RESULTS", default=ADMIN_CACHE_REPORTS_NUM_RESULTS
),
cache_reports_with_content_num_results=env.int(
name="CACHE_REPORTS_WITH_CONTENT_NUM_RESULTS", default=ADMIN_CACHE_REPORTS_WITH_CONTENT_NUM_RESULTS
),
external_auth_url=external_auth_url,
hf_organization=env.str(name="HF_ORGANIZATION", default=ADMIN_HF_ORGANIZATION),
hf_timeout_seconds=env.float(name="HF_TIMEOUT_SECONDS", default=ADMIN_HF_TIMEOUT_SECONDS),
hf_whoami_path=hf_whoami_path,
max_age=env.int(name="MAX_AGE", default=ADMIN_MAX_AGE),
)
DATASETS_BASED_HF_DATASETS_CACHE = None
@dataclass(frozen=True)
class DatasetsBasedConfig:
hf_datasets_cache: Optional[str] = DATASETS_BASED_HF_DATASETS_CACHE
@classmethod
def from_env(cls) -> "DatasetsBasedConfig":
env = Env(expand_vars=True)
with env.prefixed("DATASETS_BASED_"):
return cls(
hf_datasets_cache=env.str(name="HF_DATASETS_CACHE", default=DATASETS_BASED_HF_DATASETS_CACHE),
)
DESCRIPTIVE_STATISTICS_CACHE_DIRECTORY = None
@dataclass(frozen=True)
class DescriptiveStatisticsConfig:
cache_directory: Optional[str] = DESCRIPTIVE_STATISTICS_CACHE_DIRECTORY
@classmethod
def from_env(cls) -> "DescriptiveStatisticsConfig":
env = Env(expand_vars=True)
with env.prefixed("DESCRIPTIVE_STATISTICS_"):
return cls(
cache_directory=env.str(name="CACHE_DIRECTORY", default=DESCRIPTIVE_STATISTICS_CACHE_DIRECTORY),
)
DUCKDB_INDEX_CACHE_DIRECTORY = None
@dataclass(frozen=True)
class DuckDBIndexConfig:
cache_directory: Optional[str] = DUCKDB_INDEX_CACHE_DIRECTORY
@classmethod
def from_env(cls) -> "DuckDBIndexConfig":
env = Env(expand_vars=True)
with env.prefixed("DUCKDB_INDEX_"):
return cls(
cache_directory=env.str(name="CACHE_DIRECTORY", default=DUCKDB_INDEX_CACHE_DIRECTORY),
)
@dataclass(frozen=True)
class AppConfig:
admin: AdminConfig = field(default_factory=AdminConfig)
assets: AssetsConfig = field(default_factory=AssetsConfig)
cache: CacheConfig = field(default_factory=CacheConfig)
cached_assets: CachedAssetsConfig = field(default_factory=CachedAssetsConfig)
common: CommonConfig = field(default_factory=CommonConfig)
datasets_based: DatasetsBasedConfig = field(default_factory=DatasetsBasedConfig)
descriptive_statistics: DescriptiveStatisticsConfig = field(default_factory=DescriptiveStatisticsConfig)
duckdb_index: DuckDBIndexConfig = field(default_factory=DuckDBIndexConfig)
log: LogConfig = field(default_factory=LogConfig)
parquet_metadata: ParquetMetadataConfig = field(default_factory=ParquetMetadataConfig)
processing_graph: ProcessingGraphConfig = field(default_factory=ProcessingGraphConfig)
queue: QueueConfig = field(default_factory=QueueConfig)
@classmethod
def from_env(cls) -> "AppConfig":
common_config = CommonConfig.from_env()
return cls(
common=common_config,
assets=AssetsConfig.from_env(),
cache=CacheConfig.from_env(),
cached_assets=CachedAssetsConfig.from_env(),
datasets_based=DatasetsBasedConfig.from_env(),
descriptive_statistics=DescriptiveStatisticsConfig.from_env(),
duckdb_index=DuckDBIndexConfig.from_env(),
log=LogConfig.from_env(),
parquet_metadata=ParquetMetadataConfig.from_env(),
processing_graph=ProcessingGraphConfig.from_env(),
queue=QueueConfig.from_env(),
admin=AdminConfig.from_env(common_config),
)
| datasets-server-main | services/admin/src/admin/config.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/admin/src/admin/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from collections.abc import Callable, Coroutine
from http import HTTPStatus
from typing import Any, Literal, Optional
from libcommon.exceptions import CustomError
from libcommon.utils import orjson_dumps
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
AdminErrorCode = Literal[
"ExternalAuthenticatedError",
"ExternalUnauthenticatedError",
"InvalidParameter",
"MissingRequiredParameter",
"UnsupportedDatasetError",
"UnexpectedError", # also in libcommon.exceptions
]
class AdminCustomError(CustomError):
"""Base class for exceptions in this module."""
def __init__(
self,
message: str,
status_code: HTTPStatus,
code: AdminErrorCode,
cause: Optional[BaseException] = None,
disclose_cause: bool = False,
):
super().__init__(message, status_code, str(code), cause, disclose_cause)
class MissingRequiredParameterError(AdminCustomError):
"""Raised when a required parameter is missing."""
def __init__(self, message: str):
super().__init__(message, HTTPStatus.UNPROCESSABLE_ENTITY, "MissingRequiredParameter")
class InvalidParameterError(AdminCustomError):
"""Raised when a parameter is invalid."""
def __init__(self, message: str):
super().__init__(message, HTTPStatus.UNPROCESSABLE_ENTITY, "InvalidParameter")
class UnsupportedDatasetError(AdminCustomError):
"""Raised when a dataset is not supported (private dataset, for example)."""
def __init__(self, message: str):
super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "UnsupportedDatasetError")
class UnexpectedError(AdminCustomError):
"""Raised when an unexpected error occurred."""
def __init__(self, message: str, cause: Optional[BaseException] = None):
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "UnexpectedError", cause)
if cause:
logging.exception(message, exc_info=cause)
else:
logging.exception(message)
class ExternalUnauthenticatedError(AdminCustomError):
"""Raised when the external authentication check failed while the user was unauthenticated."""
def __init__(self, message: str):
super().__init__(message, HTTPStatus.UNAUTHORIZED, "ExternalUnauthenticatedError")
class ExternalAuthenticatedError(AdminCustomError):
"""Raised when the external authentication check failed while the user was authenticated."""
def __init__(self, message: str):
super().__init__(message, HTTPStatus.NOT_FOUND, "ExternalAuthenticatedError")
class OrjsonResponse(JSONResponse):
def render(self, content: Any) -> bytes:
return orjson_dumps(content)
def get_response(content: Any, status_code: int = 200, max_age: int = 0) -> Response:
headers = {"Cache-Control": f"max-age={max_age}"} if max_age > 0 else {"Cache-Control": "no-store"}
return OrjsonResponse(content, status_code=status_code, headers=headers)
def get_json_response(
content: Any, status_code: HTTPStatus = HTTPStatus.OK, max_age: int = 0, error_code: Optional[str] = None
) -> Response:
headers = {"Cache-Control": f"max-age={max_age}" if max_age > 0 else "no-store"}
if error_code is not None:
headers["X-Error-Code"] = error_code
return OrjsonResponse(content, status_code=status_code.value, headers=headers)
EXPOSED_HEADERS = [
"X-Error-Code",
]
def get_json_ok_response(content: Any, max_age: int) -> Response:
return get_json_response(content=content, max_age=max_age)
def get_json_error_response(
content: Any, max_age: int, status_code: HTTPStatus = HTTPStatus.OK, error_code: Optional[str] = None
) -> Response:
return get_json_response(content=content, status_code=status_code, max_age=max_age, error_code=error_code)
def get_json_admin_error_response(error: CustomError, max_age: int) -> Response:
return get_json_error_response(
content=error.as_response(), status_code=error.status_code, max_age=max_age, error_code=error.code
)
def is_non_empty_string(string: Any) -> bool:
return isinstance(string, str) and bool(string and string.strip())
def are_valid_parameters(parameters: list[Any]) -> bool:
return all(is_non_empty_string(s) for s in parameters)
Endpoint = Callable[[Request], Coroutine[Any, Any, Response]]
| datasets-server-main | services/admin/src/admin/utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Literal, Optional
import requests
from requests import PreparedRequest
from requests.auth import AuthBase
from starlette.requests import Request
from admin.utils import ExternalAuthenticatedError, ExternalUnauthenticatedError
class RequestAuth(AuthBase):
"""Attaches input Request authentication headers to the given Request object."""
def __init__(self, request: Optional[Request]) -> None:
if request is not None:
self.authorization = request.headers.get("authorization")
else:
self.authorization = None
def __call__(self, r: PreparedRequest) -> PreparedRequest:
# modify and return the request
if self.authorization:
r.headers["authorization"] = self.authorization
return r
def auth_check(
external_auth_url: Optional[str] = None,
request: Optional[Request] = None,
organization: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> Literal[True]:
"""check if the user is member of the organization
Args:
external_auth_url (str | None): the URL of an external authentication service. If None, the dataset is always
authorized.
request (Request | None): the request which optionally bears authentication headers: "cookie" or
"authorization"
organization (str | None): the organization name. If None, the dataset is always
authorized.
hf_timeout_seconds (float | None): the timeout in seconds for the HTTP request to the external authentication
service.
Returns:
None: the user is authorized
"""
if organization is None or external_auth_url is None:
return True
try:
response = requests.get(external_auth_url, auth=RequestAuth(request), timeout=hf_timeout_seconds)
except Exception as err:
raise RuntimeError("External authentication check failed", err) from err
if response.status_code == 200:
try:
json = response.json()
if organization is None or organization in {org["name"] for org in json["orgs"]}:
return True
else:
raise ExternalAuthenticatedError("You are not member of the organization")
except Exception as err:
raise ExternalAuthenticatedError(
"Cannot access the route with the current credentials. Please retry with other authentication"
" credentials."
) from err
elif response.status_code == 401:
raise ExternalUnauthenticatedError("Cannot access the route. Please retry with authentication.")
elif response.status_code in {403, 404}:
raise ExternalAuthenticatedError(
"Cannot access the route with the current credentials. Please retry with other authentication credentials."
)
else:
raise ValueError(f"Unexpected status code {response.status_code}")
| datasets-server-main | services/admin/src/admin/authentication.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import uvicorn
from libcommon.log import init_logging
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource, Resource
from libcommon.storage import (
exists,
init_assets_dir,
init_cached_assets_dir,
init_duckdb_index_cache_dir,
init_hf_datasets_cache_dir,
init_parquet_metadata_dir,
init_statistics_cache_dir,
)
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
from starlette_prometheus import PrometheusMiddleware
from admin.config import AppConfig, UvicornConfig
from admin.routes.cache_reports import create_cache_reports_endpoint
from admin.routes.cache_reports_with_content import (
create_cache_reports_with_content_endpoint,
)
from admin.routes.dataset_backfill import create_dataset_backfill_endpoint
from admin.routes.dataset_backfill_plan import create_dataset_backfill_plan_endpoint
from admin.routes.dataset_status import create_dataset_status_endpoint
from admin.routes.force_refresh import create_force_refresh_endpoint
from admin.routes.healthcheck import healthcheck_endpoint
from admin.routes.metrics import create_metrics_endpoint
from admin.routes.obsolete_cache import (
create_delete_obsolete_cache_endpoint,
create_get_obsolete_cache_endpoint,
)
from admin.routes.pending_jobs import create_pending_jobs_endpoint
from admin.utils import EXPOSED_HEADERS
def create_app() -> Starlette:
app_config = AppConfig.from_env()
init_logging(level=app_config.log.level)
# ^ set first to have logs as soon as possible
assets_directory = init_assets_dir(directory=app_config.assets.storage_directory)
cached_assets_directory = init_cached_assets_dir(directory=app_config.cached_assets.storage_directory)
duckdb_index_cache_directory = init_duckdb_index_cache_dir(directory=app_config.duckdb_index.cache_directory)
hf_datasets_cache_directory = init_hf_datasets_cache_dir(app_config.datasets_based.hf_datasets_cache)
parquet_metadata_directory = init_parquet_metadata_dir(directory=app_config.parquet_metadata.storage_directory)
statistics_cache_directory = init_statistics_cache_dir(app_config.descriptive_statistics.cache_directory)
if not exists(assets_directory):
raise RuntimeError("The assets storage directory could not be accessed. Exiting.")
if not exists(cached_assets_directory):
raise RuntimeError("The cached-assets storage directory could not be accessed. Exiting.")
processing_graph = ProcessingGraph(app_config.processing_graph.specification)
cache_resource = CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url)
queue_resource = QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url)
resources: list[Resource] = [cache_resource, queue_resource]
if not cache_resource.is_available():
raise RuntimeError("The connection to the cache database could not be established. Exiting.")
if not queue_resource.is_available():
raise RuntimeError("The connection to the queue database could not be established. Exiting.")
middleware = [
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
allow_credentials=True,
expose_headers=EXPOSED_HEADERS,
),
Middleware(GZipMiddleware),
Middleware(PrometheusMiddleware, filter_unhandled_paths=True),
]
routes = [
Route("/healthcheck", endpoint=healthcheck_endpoint),
Route(
"/metrics",
endpoint=create_metrics_endpoint(
assets_directory=assets_directory,
descriptive_statistics_directory=statistics_cache_directory,
duckdb_directory=duckdb_index_cache_directory,
hf_datasets_directory=hf_datasets_cache_directory,
parquet_metadata_directory=parquet_metadata_directory,
),
),
# used in a browser tab to monitor the queue
Route(
"/pending-jobs",
endpoint=create_pending_jobs_endpoint(
processing_graph=processing_graph,
max_age=app_config.admin.max_age,
external_auth_url=app_config.admin.external_auth_url,
organization=app_config.admin.hf_organization,
hf_timeout_seconds=app_config.admin.hf_timeout_seconds,
),
),
Route(
"/dataset-backfill",
endpoint=create_dataset_backfill_endpoint(
processing_graph=processing_graph,
hf_endpoint=app_config.common.hf_endpoint,
hf_token=app_config.common.hf_token,
cache_max_days=app_config.cache.max_days,
external_auth_url=app_config.admin.external_auth_url,
organization=app_config.admin.hf_organization,
hf_timeout_seconds=app_config.admin.hf_timeout_seconds,
),
methods=["POST"],
),
Route(
"/dataset-backfill-plan",
endpoint=create_dataset_backfill_plan_endpoint(
processing_graph=processing_graph,
hf_endpoint=app_config.common.hf_endpoint,
hf_token=app_config.common.hf_token,
cache_max_days=app_config.cache.max_days,
max_age=app_config.admin.max_age,
external_auth_url=app_config.admin.external_auth_url,
organization=app_config.admin.hf_organization,
hf_timeout_seconds=app_config.admin.hf_timeout_seconds,
),
),
Route(
"/dataset-status",
endpoint=create_dataset_status_endpoint(
processing_graph=processing_graph,
max_age=app_config.admin.max_age,
external_auth_url=app_config.admin.external_auth_url,
organization=app_config.admin.hf_organization,
hf_timeout_seconds=app_config.admin.hf_timeout_seconds,
),
),
Route(
"/obsolete-cache",
endpoint=create_get_obsolete_cache_endpoint(
hf_endpoint=app_config.common.hf_endpoint,
max_age=app_config.admin.max_age,
external_auth_url=app_config.admin.external_auth_url,
organization=app_config.admin.hf_organization,
hf_timeout_seconds=app_config.admin.hf_timeout_seconds,
hf_token=app_config.common.hf_token,
),
),
Route(
"/obsolete-cache",
endpoint=create_delete_obsolete_cache_endpoint(
hf_endpoint=app_config.common.hf_endpoint,
max_age=app_config.admin.max_age,
assets_directory=assets_directory,
cached_assets_directory=cached_assets_directory,
external_auth_url=app_config.admin.external_auth_url,
organization=app_config.admin.hf_organization,
hf_timeout_seconds=app_config.admin.hf_timeout_seconds,
hf_token=app_config.common.hf_token,
),
methods=["DELETE"],
),
]
for processing_step in processing_graph.get_processing_steps():
# beware: here we assume 1-1 mapping between processing steps and cache kinds (and job types)
# which is currently the case
cache_kind = processing_step.cache_kind
job_type = processing_step.job_type
input_type = processing_step.input_type
routes.extend(
[
Route(
f"/force-refresh/{job_type}",
endpoint=create_force_refresh_endpoint(
input_type=input_type,
job_type=job_type,
difficulty=processing_step.difficulty,
hf_endpoint=app_config.common.hf_endpoint,
hf_token=app_config.common.hf_token,
external_auth_url=app_config.admin.external_auth_url,
organization=app_config.admin.hf_organization,
hf_timeout_seconds=app_config.admin.hf_timeout_seconds,
),
methods=["POST"],
),
Route(
f"/cache-reports/{cache_kind}",
endpoint=create_cache_reports_endpoint(
cache_kind=cache_kind,
cache_reports_num_results=app_config.admin.cache_reports_num_results,
max_age=app_config.admin.max_age,
external_auth_url=app_config.admin.external_auth_url,
organization=app_config.admin.hf_organization,
hf_timeout_seconds=app_config.admin.hf_timeout_seconds,
),
),
Route(
f"/cache-reports-with-content/{cache_kind}",
endpoint=create_cache_reports_with_content_endpoint(
cache_kind=cache_kind,
cache_reports_with_content_num_results=app_config.admin.cache_reports_with_content_num_results,
max_age=app_config.admin.max_age,
external_auth_url=app_config.admin.external_auth_url,
organization=app_config.admin.hf_organization,
hf_timeout_seconds=app_config.admin.hf_timeout_seconds,
),
),
]
)
return Starlette(routes=routes, middleware=middleware, on_shutdown=[resource.release for resource in resources])
def start() -> None:
uvicorn_config = UvicornConfig.from_env()
uvicorn.run(
"app:create_app",
host=uvicorn_config.hostname,
port=uvicorn_config.port,
factory=True,
workers=uvicorn_config.num_workers,
)
| datasets-server-main | services/admin/src/admin/app.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from admin.app import start
if __name__ == "__main__":
start()
| datasets-server-main | services/admin/src/admin/main.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from libcommon.prometheus import (
Prometheus,
update_assets_disk_usage,
update_descriptive_statistics_disk_usage,
update_duckdb_disk_usage,
update_hf_datasets_disk_usage,
update_parquet_metadata_disk_usage,
update_queue_jobs_total,
update_responses_in_cache_total,
)
from libcommon.storage import StrPath
from prometheus_client import CONTENT_TYPE_LATEST
from starlette.requests import Request
from starlette.responses import Response
from admin.utils import Endpoint
def create_metrics_endpoint(
assets_directory: StrPath,
descriptive_statistics_directory: StrPath,
duckdb_directory: StrPath,
hf_datasets_directory: StrPath,
parquet_metadata_directory: StrPath,
) -> Endpoint:
prometheus = Prometheus()
async def metrics_endpoint(_: Request) -> Response:
logging.info("/metrics")
update_queue_jobs_total()
update_responses_in_cache_total()
update_assets_disk_usage(directory=assets_directory)
update_descriptive_statistics_disk_usage(directory=descriptive_statistics_directory)
update_duckdb_disk_usage(directory=duckdb_directory)
update_hf_datasets_disk_usage(directory=hf_datasets_directory)
update_parquet_metadata_disk_usage(directory=parquet_metadata_directory)
return Response(prometheus.getLatestContent(), headers={"Content-Type": CONTENT_TYPE_LATEST})
return metrics_endpoint
| datasets-server-main | services/admin/src/admin/routes/metrics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Optional
from libcommon.dataset import get_dataset_git_revision
from libcommon.exceptions import CustomError
from libcommon.processing_graph import InputType
from libcommon.queue import Queue
from libcommon.utils import Priority
from starlette.requests import Request
from starlette.responses import Response
from admin.authentication import auth_check
from admin.utils import (
Endpoint,
InvalidParameterError,
MissingRequiredParameterError,
UnexpectedError,
are_valid_parameters,
get_json_admin_error_response,
get_json_ok_response,
)
def create_force_refresh_endpoint(
input_type: InputType,
job_type: str,
difficulty: int,
hf_endpoint: str,
hf_token: Optional[str] = None,
external_auth_url: Optional[str] = None,
organization: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> Endpoint:
async def force_refresh_endpoint(request: Request) -> Response:
try:
dataset = request.query_params.get("dataset")
if not are_valid_parameters([dataset]) or not dataset:
raise MissingRequiredParameterError("Parameter 'dataset' is required")
if input_type == "dataset":
config = None
split = None
elif input_type == "config":
config = request.query_params.get("config")
split = None
if not are_valid_parameters([config]):
raise MissingRequiredParameterError("Parameter 'config' is required")
else:
config = request.query_params.get("config")
split = request.query_params.get("split")
if not are_valid_parameters([config, split]):
raise MissingRequiredParameterError("Parameters 'config' and 'split' are required")
try:
priority = Priority(request.query_params.get("priority", "low"))
except ValueError:
raise InvalidParameterError(
f"Parameter 'priority' should be one of {', '.join(prio.value for prio in Priority)}."
)
logging.info(
f"/force-refresh/{job_type}, dataset={dataset}, config={config}, split={split}, priority={priority}"
)
# if auth_check fails, it will raise an exception that will be caught below
auth_check(
external_auth_url=external_auth_url,
request=request,
organization=organization,
hf_timeout_seconds=hf_timeout_seconds,
)
revision = get_dataset_git_revision(dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token)
Queue().add_job(
job_type=job_type,
difficulty=difficulty,
dataset=dataset,
revision=revision,
config=config,
split=split,
priority=priority,
)
return get_json_ok_response(
{"status": "ok"},
max_age=0,
)
except CustomError as e:
return get_json_admin_error_response(e, max_age=0)
except Exception as e:
return get_json_admin_error_response(UnexpectedError("Unexpected error.", e), max_age=0)
return force_refresh_endpoint
| datasets-server-main | services/admin/src/admin/routes/force_refresh.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Optional
from libcommon.dataset import get_dataset_git_revision
from libcommon.orchestrator import DatasetBackfillPlan
from libcommon.processing_graph import ProcessingGraph
from starlette.requests import Request
from starlette.responses import Response
from admin.authentication import auth_check
from admin.utils import (
AdminCustomError,
Endpoint,
MissingRequiredParameterError,
UnexpectedError,
are_valid_parameters,
get_json_admin_error_response,
get_json_ok_response,
)
def create_dataset_backfill_plan_endpoint(
processing_graph: ProcessingGraph,
max_age: int,
hf_endpoint: str,
cache_max_days: int,
external_auth_url: Optional[str] = None,
organization: Optional[str] = None,
hf_token: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> Endpoint:
async def dataset_state_endpoint(request: Request) -> Response:
try:
dataset = request.query_params.get("dataset")
if not are_valid_parameters([dataset]) or not dataset:
raise MissingRequiredParameterError("Parameter 'dataset' is required")
logging.info(f"/dataset-state, dataset={dataset}")
# if auth_check fails, it will raise an exception that will be caught below
auth_check(
external_auth_url=external_auth_url,
request=request,
organization=organization,
hf_timeout_seconds=hf_timeout_seconds,
)
dataset_git_revision = get_dataset_git_revision(
dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds
)
dataset_backfill_plan = DatasetBackfillPlan(
dataset=dataset,
processing_graph=processing_graph,
revision=dataset_git_revision,
cache_max_days=cache_max_days,
)
return get_json_ok_response(dataset_backfill_plan.as_response(), max_age=max_age)
except AdminCustomError as e:
return get_json_admin_error_response(e, max_age=max_age)
except Exception as e:
return get_json_admin_error_response(UnexpectedError("Unexpected error.", e), max_age=max_age)
return dataset_state_endpoint
| datasets-server-main | services/admin/src/admin/routes/dataset_backfill_plan.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Optional
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import Queue
from starlette.requests import Request
from starlette.responses import Response
from admin.authentication import auth_check
from admin.utils import (
AdminCustomError,
Endpoint,
UnexpectedError,
get_json_admin_error_response,
get_json_ok_response,
)
def create_pending_jobs_endpoint(
processing_graph: ProcessingGraph,
max_age: int,
external_auth_url: Optional[str] = None,
organization: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> Endpoint:
async def pending_jobs_endpoint(request: Request) -> Response:
logging.info("/pending-jobs")
try:
# if auth_check fails, it will raise an exception that will be caught below
auth_check(
external_auth_url=external_auth_url,
request=request,
organization=organization,
hf_timeout_seconds=hf_timeout_seconds,
)
queue = Queue()
return get_json_ok_response(
{
processing_step.job_type: queue.get_dump_by_pending_status(job_type=processing_step.job_type)
for processing_step in processing_graph.get_alphabetically_ordered_processing_steps()
},
max_age=max_age,
)
except AdminCustomError as e:
return get_json_admin_error_response(e, max_age=max_age)
except Exception as e:
return get_json_admin_error_response(UnexpectedError("Unexpected error.", e), max_age=max_age)
return pending_jobs_endpoint
| datasets-server-main | services/admin/src/admin/routes/pending_jobs.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Optional
from libcommon.simple_cache import InvalidCursor, InvalidLimit, get_cache_reports
from starlette.requests import Request
from starlette.responses import Response
from admin.authentication import auth_check
from admin.utils import (
AdminCustomError,
Endpoint,
InvalidParameterError,
UnexpectedError,
get_json_admin_error_response,
get_json_ok_response,
)
def create_cache_reports_endpoint(
cache_kind: str,
cache_reports_num_results: int,
max_age: int,
external_auth_url: Optional[str] = None,
organization: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> Endpoint:
async def cache_reports_endpoint(request: Request) -> Response:
try:
cursor = request.query_params.get("cursor") or ""
logging.info(f"Cache reports for {cache_kind}, cursor={cursor}")
# if auth_check fails, it will raise an exception that will be caught below
auth_check(
external_auth_url=external_auth_url,
request=request,
organization=organization,
hf_timeout_seconds=hf_timeout_seconds,
)
try:
return get_json_ok_response(
get_cache_reports(
kind=cache_kind,
cursor=cursor,
limit=cache_reports_num_results,
),
max_age=max_age,
)
except InvalidCursor as e:
raise InvalidParameterError("Invalid cursor.") from e
except InvalidLimit as e:
raise UnexpectedError(
"Invalid limit. CACHE_REPORTS_NUM_RESULTS must be a strictly positive integer."
) from e
except AdminCustomError as e:
return get_json_admin_error_response(e, max_age=max_age)
except Exception as e:
return get_json_admin_error_response(UnexpectedError("Unexpected error.", e), max_age=max_age)
return cache_reports_endpoint
| datasets-server-main | services/admin/src/admin/routes/cache_reports.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/admin/src/admin/routes/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from starlette.requests import Request
from starlette.responses import PlainTextResponse, Response
async def healthcheck_endpoint(_: Request) -> Response:
logging.info("/healthcheck")
return PlainTextResponse("ok", headers={"Cache-Control": "no-store"})
| datasets-server-main | services/admin/src/admin/routes/healthcheck.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Optional
from libcommon.simple_cache import (
InvalidCursor,
InvalidLimit,
get_cache_reports_with_content,
)
from starlette.requests import Request
from starlette.responses import Response
from admin.authentication import auth_check
from admin.utils import (
AdminCustomError,
Endpoint,
InvalidParameterError,
UnexpectedError,
get_json_admin_error_response,
get_json_ok_response,
)
def create_cache_reports_with_content_endpoint(
cache_kind: str,
cache_reports_with_content_num_results: int,
max_age: int,
external_auth_url: Optional[str] = None,
organization: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> Endpoint:
async def cache_reports_with_content_endpoint(request: Request) -> Response:
try:
cursor = request.query_params.get("cursor") or ""
logging.info(f"Cache reports with content for {cache_kind}, cursor={cursor}")
# if auth_check fails, it will raise an exception that will be caught below
auth_check(
external_auth_url=external_auth_url,
request=request,
organization=organization,
hf_timeout_seconds=hf_timeout_seconds,
)
try:
return get_json_ok_response(
get_cache_reports_with_content(
kind=cache_kind,
cursor=cursor,
limit=cache_reports_with_content_num_results,
),
max_age=max_age,
)
except InvalidCursor as e:
raise InvalidParameterError("Invalid cursor.") from e
except InvalidLimit as e:
raise UnexpectedError(
"Invalid limit. CACHE_REPORTS_WITH_CONTENT_NUM_RESULTS must be a strictly positive integer."
) from e
except AdminCustomError as e:
return get_json_admin_error_response(e, max_age=max_age)
except Exception as e:
return get_json_admin_error_response(UnexpectedError("Unexpected error.", e), max_age=max_age)
return cache_reports_with_content_endpoint
| datasets-server-main | services/admin/src/admin/routes/cache_reports_with_content.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Optional
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import Queue
from libcommon.simple_cache import get_dataset_responses_without_content_for_kind
from starlette.requests import Request
from starlette.responses import Response
from admin.authentication import auth_check
from admin.utils import (
AdminCustomError,
Endpoint,
MissingRequiredParameterError,
UnexpectedError,
are_valid_parameters,
get_json_admin_error_response,
get_json_ok_response,
)
def create_dataset_status_endpoint(
processing_graph: ProcessingGraph,
max_age: int,
external_auth_url: Optional[str] = None,
organization: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> Endpoint:
async def dataset_status_endpoint(request: Request) -> Response:
try:
dataset = request.query_params.get("dataset")
if not are_valid_parameters([dataset]) or not dataset:
raise MissingRequiredParameterError("Parameter 'dataset' is required")
logging.info(f"/dataset-status, dataset={dataset}")
# if auth_check fails, it will raise an exception that will be caught below
auth_check(
external_auth_url=external_auth_url,
request=request,
organization=organization,
hf_timeout_seconds=hf_timeout_seconds,
)
queue = Queue()
return get_json_ok_response(
{
processing_step.name: {
"cached_responses": get_dataset_responses_without_content_for_kind(
kind=processing_step.cache_kind, dataset=dataset
),
"jobs": queue.get_dataset_pending_jobs_for_type(
dataset=dataset, job_type=processing_step.job_type
),
}
for processing_step in processing_graph.get_alphabetically_ordered_processing_steps()
},
max_age=max_age,
)
except AdminCustomError as e:
return get_json_admin_error_response(e, max_age=max_age)
except Exception as e:
return get_json_admin_error_response(UnexpectedError("Unexpected error.", e), max_age=max_age)
return dataset_status_endpoint
| datasets-server-main | services/admin/src/admin/routes/dataset_status.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Optional
from libcommon.dataset import get_dataset_git_revision
from libcommon.exceptions import CustomError
from libcommon.orchestrator import DatasetOrchestrator
from libcommon.processing_graph import ProcessingGraph
from libcommon.utils import Priority
from starlette.requests import Request
from starlette.responses import Response
from admin.authentication import auth_check
from admin.utils import (
Endpoint,
MissingRequiredParameterError,
UnexpectedError,
are_valid_parameters,
get_json_admin_error_response,
get_json_ok_response,
)
def create_dataset_backfill_endpoint(
processing_graph: ProcessingGraph,
hf_endpoint: str,
cache_max_days: int,
external_auth_url: Optional[str] = None,
organization: Optional[str] = None,
hf_token: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> Endpoint:
async def dataset_backfill_endpoint(request: Request) -> Response:
try:
dataset = request.query_params.get("dataset")
if not are_valid_parameters([dataset]) or not dataset:
raise MissingRequiredParameterError("Parameter 'dataset' is required")
logging.info(f"/dataset-backfill, dataset={dataset}")
# if auth_check fails, it will raise an exception that will be caught below
auth_check(
external_auth_url=external_auth_url,
request=request,
organization=organization,
hf_timeout_seconds=hf_timeout_seconds,
)
dataset_git_revision = get_dataset_git_revision(
dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds
)
dataset_orchestrator = DatasetOrchestrator(dataset=dataset, processing_graph=processing_graph)
dataset_orchestrator.backfill(
revision=dataset_git_revision, priority=Priority.LOW, cache_max_days=cache_max_days
)
return get_json_ok_response(
{"status": "ok", "message": "Backfilling dataset."},
max_age=0,
)
except CustomError as e:
return get_json_admin_error_response(e, max_age=0)
except Exception as e:
return get_json_admin_error_response(UnexpectedError("Unexpected error.", e), max_age=0)
return dataset_backfill_endpoint
| datasets-server-main | services/admin/src/admin/routes/dataset_backfill.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from typing import Optional, TypedDict
from libcommon.dataset import get_supported_dataset_infos
from libcommon.simple_cache import (
delete_dataset_responses,
get_all_datasets,
get_cache_count_for_dataset,
)
from libcommon.storage import StrPath
from libcommon.viewer_utils.asset import delete_asset_dir
from starlette.requests import Request
from starlette.responses import Response
from admin.authentication import auth_check
from admin.utils import (
Endpoint,
UnexpectedError,
get_json_admin_error_response,
get_json_ok_response,
)
MINIMUM_SUPPORTED_DATASETS = 20_000
class DatasetCacheReport(TypedDict):
dataset: str
cache_records: Optional[int]
def get_supported_dataset_names(
hf_endpoint: str,
hf_token: Optional[str] = None,
) -> set[str]:
supported_dataset_infos = get_supported_dataset_infos(hf_endpoint=hf_endpoint, hf_token=hf_token)
return {dataset_info.id for dataset_info in supported_dataset_infos}
def get_obsolete_cache(
hf_endpoint: str,
hf_token: Optional[str] = None,
) -> list[DatasetCacheReport]:
supported_dataset_names = get_supported_dataset_names(hf_endpoint=hf_endpoint, hf_token=hf_token)
existing_datasets = get_all_datasets()
datasets_to_delete = existing_datasets.difference(supported_dataset_names)
return [
DatasetCacheReport(dataset=dataset, cache_records=get_cache_count_for_dataset(dataset=dataset))
for dataset in datasets_to_delete
]
def create_get_obsolete_cache_endpoint(
hf_endpoint: str,
max_age: int,
hf_token: Optional[str] = None,
external_auth_url: Optional[str] = None,
organization: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> Endpoint:
async def get_obsolete_cache_endpoint(request: Request) -> Response:
try:
logging.info("/obsolete-cache")
auth_check(
external_auth_url=external_auth_url,
request=request,
organization=organization,
hf_timeout_seconds=hf_timeout_seconds,
)
return get_json_ok_response(
get_obsolete_cache(hf_endpoint=hf_endpoint, hf_token=hf_token), max_age=max_age
)
except Exception as e:
return get_json_admin_error_response(UnexpectedError("Unexpected error.", e), max_age=max_age)
return get_obsolete_cache_endpoint
def delete_obsolete_cache(
hf_endpoint: str,
assets_directory: StrPath,
cached_assets_directory: StrPath,
hf_token: Optional[str] = None,
) -> list[DatasetCacheReport]:
supported_dataset_names = get_supported_dataset_names(hf_endpoint=hf_endpoint, hf_token=hf_token)
if len(supported_dataset_names) < MINIMUM_SUPPORTED_DATASETS:
raise UnexpectedError(f"only {len(supported_dataset_names)} datasets were found")
existing_datasets = get_all_datasets()
datasets_to_delete = existing_datasets.difference(supported_dataset_names)
deletion_report = []
for dataset in datasets_to_delete:
# delete cache records
datasets_cache_records = delete_dataset_responses(dataset=dataset)
if datasets_cache_records is not None and datasets_cache_records > 0:
# delete assets
delete_asset_dir(dataset=dataset, directory=assets_directory)
delete_asset_dir(dataset=dataset, directory=cached_assets_directory)
logging.debug(f"{dataset} has been delete with {datasets_cache_records} cache records")
else:
logging.debug(f"unable to delete {dataset}")
deletion_report.append(DatasetCacheReport(dataset=dataset, cache_records=datasets_cache_records))
return deletion_report
def create_delete_obsolete_cache_endpoint(
hf_endpoint: str,
max_age: int,
assets_directory: StrPath,
cached_assets_directory: StrPath,
hf_token: Optional[str] = None,
external_auth_url: Optional[str] = None,
organization: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
) -> Endpoint:
async def delete_obsolete_cache_endpoint(request: Request) -> Response:
try:
logging.info("/obsolete-cache")
auth_check(
external_auth_url=external_auth_url,
request=request,
organization=organization,
hf_timeout_seconds=hf_timeout_seconds,
)
return get_json_ok_response(
delete_obsolete_cache(
hf_endpoint=hf_endpoint,
hf_token=hf_token,
assets_directory=assets_directory,
cached_assets_directory=cached_assets_directory,
),
max_age=max_age,
)
except Exception as e:
return get_json_admin_error_response(UnexpectedError("Unexpected error.", e), max_age=max_age)
return delete_obsolete_cache_endpoint
| datasets-server-main | services/admin/src/admin/routes/obsolete_cache.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import asyncio
from collections.abc import Iterator
import pytest
from environs import Env
from libcommon.constants import CACHE_COLLECTION_RESPONSES
from libcommon.resources import CacheMongoResource
from libcommon.simple_cache import CachedResponseDocument, _clean_cache_database
from sse_api.config import AppConfig
# see https://github.com/pytest-dev/pytest/issues/363#issuecomment-406536200
@pytest.fixture(scope="session")
def monkeypatch_session() -> Iterator[pytest.MonkeyPatch]:
monkeypatch_session = pytest.MonkeyPatch()
monkeypatch_session.setenv("CACHE_MONGO_DATABASE", "datasets_server_cache_test")
monkeypatch_session.setenv("QUEUE_MONGO_DATABASE", "datasets_server_queue_test")
hostname = "localhost"
port = "8888"
monkeypatch_session.setenv("API_HF_TIMEOUT_SECONDS", "10")
monkeypatch_session.setenv("API_UVICORN_HOSTNAME", hostname)
monkeypatch_session.setenv("API_UVICORN_PORT", port)
monkeypatch_session.setenv("COMMON_HF_ENDPOINT", f"http://{hostname}:{port}")
yield monkeypatch_session
monkeypatch_session.undo()
@pytest.fixture(scope="session")
def app_config(monkeypatch_session: pytest.MonkeyPatch) -> AppConfig:
app_config = AppConfig.from_env()
if "test" not in app_config.cache.mongo_database or "test" not in app_config.queue.mongo_database:
raise ValueError("Test must be launched on a test mongo database")
return app_config
@pytest.fixture(scope="session")
def env() -> Env:
return Env(expand_vars=True)
@pytest.fixture(scope="session")
def cache_mongo_host(env: Env) -> str:
try:
url = env.str(name="CACHE_MONGO_URL")
if type(url) is not str:
raise ValueError("CACHE_MONGO_URL is not set")
return url
except Exception as e:
raise ValueError("CACHE_MONGO_URL is not set") from e
@pytest.fixture(scope="function")
def cache_mongo_resource(cache_mongo_host: str) -> Iterator[CacheMongoResource]:
database = "datasets_server_cache_test"
host = cache_mongo_host
if "test" not in database:
raise ValueError("Test must be launched on a test mongo database")
with CacheMongoResource(database=database, host=host) as cache_mongo_resource:
_clean_cache_database()
cache_mongo_resource.create_collection(CachedResponseDocument)
cache_mongo_resource.enable_pre_and_post_images(CACHE_COLLECTION_RESPONSES)
yield cache_mongo_resource
_clean_cache_database()
cache_mongo_resource.release()
@pytest.fixture(scope="session")
def event_loop() -> Iterator[asyncio.AbstractEventLoop]:
"""
Create an instance of the default event loop for each test case.
See https://github.com/pytest-dev/pytest-asyncio/issues/38#issuecomment-264418154
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
| datasets-server-main | services/sse-api/tests/conftest.py |
import asyncio
import socket
from typing import Optional
import uvicorn
from starlette.applications import Starlette
from uvicorn.config import Config
class UvicornServer(uvicorn.Server):
"""
Wrapper around uvicorn.Server to be able to run it in async tests
See https://github.com/encode/uvicorn/discussions/1103#discussioncomment-4240770
"""
serve_task: asyncio.Task[Starlette]
did_start: asyncio.Event
did_close: asyncio.Event
def __init__(self, config: Config) -> None:
super().__init__(config=config)
self.did_start = asyncio.Event()
self.did_close = asyncio.Event()
async def start(self, sockets: Optional[list[socket.socket]] = None) -> None:
self.serve_task = asyncio.create_task(self.serve(sockets=sockets)) # type: ignore
self.serve_task.add_done_callback(lambda _: self.did_close.set())
await self.did_start.wait()
async def startup(self, sockets: Optional[list[socket.socket]] = None) -> None:
await super().startup(sockets=sockets) # type: ignore
self.did_start.set()
async def shutdown(self, sockets: Optional[list[socket.socket]] = None) -> None:
await super().shutdown()
self.serve_task.cancel()
await self.did_close.wait()
| datasets-server-main | services/sse-api/tests/uvicorn_server.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | services/sse-api/tests/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import asyncio
import json
from collections.abc import AsyncGenerator
from http import HTTPStatus
from typing import Any
import httpx
import pytest
import pytest_asyncio
import uvicorn
from httpx_sse import aconnect_sse
from libcommon.resources import CacheMongoResource
from libcommon.simple_cache import delete_response, upsert_response
from starlette.applications import Starlette
from sse_api.app import create_app_with_config
from sse_api.config import AppConfig
from sse_api.constants import HUB_CACHE_KIND
from .uvicorn_server import UvicornServer
@pytest_asyncio.fixture(scope="function")
async def app_test(
app_config: AppConfig, event_loop: asyncio.events.AbstractEventLoop
) -> AsyncGenerator[Starlette, None]:
app = create_app_with_config(app_config)
config = uvicorn.Config(app=app, port=5555, log_level="warning", loop="asyncio") # event_loop)
server = UvicornServer(config)
await server.start()
try:
yield app
finally:
await server.shutdown()
TIMEOUT = 0.5
@pytest_asyncio.fixture(scope="function")
async def client(app_test: Starlette) -> AsyncGenerator[httpx.AsyncClient, None]:
async with httpx.AsyncClient(base_url=APP_HOST, timeout=TIMEOUT) as client:
yield client
async def sleep() -> None:
await asyncio.sleep(TIMEOUT / 10)
APP_HOST = "http://localhost:5555"
@pytest.mark.asyncio
async def test_provided_loop_is_running_loop(event_loop: asyncio.events.AbstractEventLoop) -> None:
assert event_loop is asyncio.get_running_loop()
@pytest.mark.asyncio
async def test_get_healthcheck(client: httpx.AsyncClient) -> None:
response = await client.get("/healthcheck")
assert response.status_code == 200
assert response.text == "ok"
@pytest.mark.asyncio
async def test_metrics(client: httpx.AsyncClient) -> None:
response = await client.get("/metrics")
assert response.status_code == 200
text = response.text
lines = text.split("\n")
# examples:
# starlette_requests_total{method="GET",path_template="/metrics"} 1.0
# method_steps_processing_time_seconds_sum{method="healthcheck_endpoint",step="all"} 1.6772013623267412e-05
metrics = {
parts[0]: float(parts[1]) for line in lines if line and line[0] != "#" and (parts := line.rsplit(" ", 1))
}
# the metrics should contain at least the following
for name in [
'starlette_requests_total{method="GET",path_template="/metrics"}',
'method_steps_processing_time_seconds_sum{context="None",method="healthcheck_endpoint",step="all"}',
]:
assert name in metrics, metrics
assert metrics[name] > 0, metrics
def init_hub_cache() -> None:
# prepare the content of the cache
upsert_response(
kind=HUB_CACHE_KIND,
dataset="dataset1",
content={
"preview": True,
"viewer": True,
"partial": False,
"num_rows": 100,
},
http_status=HTTPStatus.OK,
)
upsert_response(
kind=HUB_CACHE_KIND,
dataset="dataset2",
content={
"preview": True,
"viewer": True,
"partial": False,
"num_rows": 100,
},
http_status=HTTPStatus.OK,
)
upsert_response(
kind=HUB_CACHE_KIND + "-NOT",
dataset="dataset1",
content={
"preview": True,
"viewer": True,
"partial": False,
"num_rows": 100,
},
http_status=HTTPStatus.OK,
)
upsert_response(
kind=HUB_CACHE_KIND,
dataset="dataset3",
content={
"preview": False,
"viewer": True,
"partial": False,
"num_rows": 100,
},
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
async def update_hub_cache() -> None:
await sleep()
upsert_response(
kind=HUB_CACHE_KIND,
dataset="dataset1",
content={
"preview": True,
"viewer": True,
"partial": False,
"num_rows": 100,
},
http_status=HTTPStatus.OK,
)
await sleep()
upsert_response(
kind=HUB_CACHE_KIND,
dataset="dataset1",
content={
"preview": True,
"viewer": True,
"partial": False,
"num_rows": 100,
},
http_status=HTTPStatus.OK,
)
await sleep()
upsert_response(
kind=HUB_CACHE_KIND,
dataset="dataset2",
content={
"preview": True,
"viewer": True,
"partial": False,
"num_rows": 100,
},
http_status=HTTPStatus.OK,
)
await sleep()
upsert_response(
kind=HUB_CACHE_KIND + "-NOT",
dataset="dataset2",
content={
"preview": True,
"viewer": True,
"partial": False,
"num_rows": 100,
},
http_status=HTTPStatus.OK,
)
await sleep()
upsert_response(
kind=HUB_CACHE_KIND,
dataset="dataset1",
content={
"preview": False,
"viewer": True,
"partial": False,
"num_rows": 100,
},
http_status=HTTPStatus.OK,
)
await sleep()
upsert_response(
kind=HUB_CACHE_KIND,
dataset="dataset1",
content={
"preview": False,
"viewer": True,
"partial": False,
"num_rows": 100,
}, # ^ not important
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
await sleep()
delete_response(
kind=HUB_CACHE_KIND,
dataset="dataset1",
)
await sleep()
upsert_response(
kind=HUB_CACHE_KIND,
dataset="dataset1",
content={
"preview": False,
"viewer": True,
"partial": False,
"num_rows": 100,
}, # ^ not important
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
await sleep()
upsert_response(
kind=HUB_CACHE_KIND,
dataset="dataset1",
content={
"preview": False,
"viewer": True,
"partial": False,
"num_rows": 100,
}, # ^ not important
http_status=HTTPStatus.OK,
)
EventsList = list[dict[str, Any]]
INIT_ONLY_EVENTS: EventsList = [
{
"dataset": "dataset1",
"hub_cache": {
"preview": True,
"viewer": True,
"partial": False,
"num_rows": 100,
},
},
{
"dataset": "dataset2",
"hub_cache": {
"preview": True,
"viewer": True,
"partial": False,
"num_rows": 100,
},
},
{"dataset": "dataset3", "hub_cache": None},
]
UPDATE_ONLY_EVENTS: EventsList = [
{
"dataset": "dataset1",
"hub_cache": {
"preview": True,
"viewer": True,
"partial": False,
"num_rows": 100,
},
},
{
"dataset": "dataset2",
"hub_cache": {
"preview": True,
"viewer": True,
"partial": False,
"num_rows": 100,
},
},
{
"dataset": "dataset1",
"hub_cache": {
"preview": False,
"viewer": True,
"partial": False,
"num_rows": 100,
},
},
{"dataset": "dataset1", "hub_cache": None},
{"dataset": "dataset1", "hub_cache": None},
{"dataset": "dataset1", "hub_cache": None},
{
"dataset": "dataset1",
"hub_cache": {
"preview": False,
"viewer": True,
"partial": False,
"num_rows": 100,
},
},
]
UPDATE_ONLY_AFTER_INIT_EVENTS: EventsList = [
{
"dataset": "dataset1",
"hub_cache": {
"preview": False,
"viewer": True,
"partial": False,
"num_rows": 100,
},
},
{"dataset": "dataset1", "hub_cache": None},
{"dataset": "dataset1", "hub_cache": None},
{"dataset": "dataset1", "hub_cache": None},
{
"dataset": "dataset1",
"hub_cache": {
"preview": False,
"viewer": True,
"partial": False,
"num_rows": 100,
},
},
]
INIT_AND_UPDATE_EVENTS: EventsList = INIT_ONLY_EVENTS + UPDATE_ONLY_AFTER_INIT_EVENTS
async def check(client: httpx.AsyncClient, url: str, expected_events: EventsList) -> None:
async with aconnect_sse(client, url) as event_source:
event_iter = event_source.aiter_sse()
i = 0
while True:
try:
event = await event_iter.__anext__()
# event = await anext(event_iter)
# ^ only available in 3.10
assert event.event == "message", event.data
assert event.data == json.dumps(expected_events[i])
i += 1
except httpx.ReadTimeout:
break
assert i == len(expected_events)
@pytest.mark.asyncio
async def test_hub_cache_only_updates(
client: httpx.AsyncClient,
cache_mongo_resource: CacheMongoResource,
event_loop: asyncio.AbstractEventLoop,
) -> None:
update_task = event_loop.create_task(update_hub_cache())
try:
await check(client, f"{APP_HOST}/hub-cache", UPDATE_ONLY_EVENTS)
except Exception as err:
update_task.cancel()
raise err
else:
await update_task
@pytest.mark.parametrize(
("all", "expected_events"),
[
("?all=true", INIT_ONLY_EVENTS),
("", []),
("?all=false", []),
],
)
@pytest.mark.asyncio
async def test_hub_cache_only_initialization(
client: httpx.AsyncClient,
cache_mongo_resource: CacheMongoResource,
event_loop: asyncio.AbstractEventLoop,
all: str,
expected_events: EventsList,
) -> None:
init_hub_cache()
await check(client, f"{APP_HOST}/hub-cache{all}", expected_events)
@pytest.mark.parametrize(
("all", "expected_events"),
[
("?all=true", INIT_AND_UPDATE_EVENTS),
("?all=false", UPDATE_ONLY_AFTER_INIT_EVENTS),
("", UPDATE_ONLY_AFTER_INIT_EVENTS),
],
)
@pytest.mark.asyncio
async def test_hub_cache_initialization_and_updates(
client: httpx.AsyncClient,
cache_mongo_resource: CacheMongoResource,
event_loop: asyncio.AbstractEventLoop,
all: str,
expected_events: EventsList,
) -> None:
init_hub_cache()
update_task = event_loop.create_task(update_hub_cache())
# ^ We are not testing concurrency between the loop on the initial content and the loop on the updates
try:
await check(client, f"{APP_HOST}/hub-cache{all}", expected_events)
except Exception as err:
update_task.cancel()
raise err
else:
await update_task
| datasets-server-main | services/sse-api/tests/test_app.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from dataclasses import dataclass, field
from libapi.config import ApiConfig
from libcommon.config import (
CacheConfig,
CommonConfig,
LogConfig,
ProcessingGraphConfig,
QueueConfig,
)
@dataclass(frozen=True)
class AppConfig:
api: ApiConfig = field(default_factory=ApiConfig)
cache: CacheConfig = field(default_factory=CacheConfig)
common: CommonConfig = field(default_factory=CommonConfig)
log: LogConfig = field(default_factory=LogConfig)
queue: QueueConfig = field(default_factory=QueueConfig)
processing_graph: ProcessingGraphConfig = field(default_factory=ProcessingGraphConfig)
@classmethod
def from_env(cls) -> "AppConfig":
common_config = CommonConfig.from_env()
return cls(
common=common_config,
cache=CacheConfig.from_env(),
log=LogConfig.from_env(),
processing_graph=ProcessingGraphConfig.from_env(),
queue=QueueConfig.from_env(),
api=ApiConfig.from_env(hf_endpoint=common_config.hf_endpoint),
)
| datasets-server-main | services/sse-api/src/sse_api/config.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
HUB_CACHE_KIND = "dataset-hub-cache"
| datasets-server-main | services/sse-api/src/sse_api/constants.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | services/sse-api/src/sse_api/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import asyncio
import uvicorn
from libapi.config import UvicornConfig
from libapi.routes.healthcheck import healthcheck_endpoint
from libapi.routes.metrics import create_metrics_endpoint
from libapi.utils import EXPOSED_HEADERS
from libcommon.constants import CACHE_COLLECTION_RESPONSES
from libcommon.log import init_logging
from libcommon.resources import CacheMongoResource
from libcommon.simple_cache import CachedResponseDocument
from motor.motor_asyncio import AsyncIOMotorClient
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from starlette.routing import Route
from starlette_prometheus import PrometheusMiddleware
from sse_api.config import AppConfig
from sse_api.routes.hub_cache import create_hub_cache_endpoint
from sse_api.watcher import HubCacheWatcher
def create_app() -> Starlette:
app_config = AppConfig.from_env()
return create_app_with_config(app_config=app_config)
def create_app_with_config(app_config: AppConfig) -> Starlette:
init_logging(level=app_config.log.level)
# ^ set first to have logs as soon as possible
# ensure the collection has changeStreamPreAndPostImages enabled (required to report the delete events)
with CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) as resource:
if not resource.is_available():
raise Exception("MongoDB is not available")
resource.create_collection(CachedResponseDocument)
resource.enable_pre_and_post_images(CACHE_COLLECTION_RESPONSES)
hub_cache_watcher = HubCacheWatcher(
client=AsyncIOMotorClient(host=app_config.cache.mongo_url, io_loop=asyncio.get_running_loop()),
db_name=app_config.cache.mongo_database,
collection_name=CACHE_COLLECTION_RESPONSES,
)
middleware = [
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
allow_credentials=True,
expose_headers=EXPOSED_HEADERS,
),
# https://github.com/sysid/sse-starlette
# > Caveat: SSE streaming does not work in combination with GZipMiddleware.
Middleware(PrometheusMiddleware, filter_unhandled_paths=True),
]
routes = [
Route("/hub-cache", endpoint=create_hub_cache_endpoint(hub_cache_watcher=hub_cache_watcher)),
Route("/healthcheck", endpoint=healthcheck_endpoint),
Route("/metrics", endpoint=create_metrics_endpoint()),
# ^ called by Prometheus
]
return Starlette(
routes=routes,
middleware=middleware,
on_startup=[hub_cache_watcher.start_watching],
on_shutdown=[hub_cache_watcher.stop_watching],
)
def start() -> None:
uvicorn_config = UvicornConfig.from_env()
uvicorn.run(
"app:create_app",
host=uvicorn_config.hostname,
port=uvicorn_config.port,
factory=True,
workers=uvicorn_config.num_workers,
)
| datasets-server-main | services/sse-api/src/sse_api/app.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import asyncio
import contextlib
from collections.abc import Mapping, Sequence
from dataclasses import dataclass
from http import HTTPStatus
from typing import Any, Optional
from uuid import uuid4
from motor.motor_asyncio import AsyncIOMotorClient
from pymongo.errors import PyMongoError
from sse_api.constants import HUB_CACHE_KIND
DatasetHubCacheResponse = Mapping[str, Any]
class ChangeStreamInitError(Exception):
pass
@dataclass
class HubCacheChangedEventValue:
dataset: str
hub_cache: Optional[DatasetHubCacheResponse]
# ^ None if the dataset has been deleted, or the value is an error response
class HubCacheChangedEvent(asyncio.Event):
"""Subclass of asyncio.Event which is able to send a value to the waiter"""
_hub_cache_value: Optional[HubCacheChangedEventValue]
def __init__(self, *, hub_cache_value: Optional[HubCacheChangedEventValue] = None):
super().__init__()
self._hub_cache_value = hub_cache_value
super().set()
def set_value(self, *, hub_cache_value: Optional[HubCacheChangedEventValue] = None) -> None:
self._hub_cache_value = hub_cache_value
return super().set()
async def wait_value(self) -> Optional[HubCacheChangedEventValue]:
"""The caller is responsible to call self.clear() when the event has been handled"""
await super().wait()
return self._hub_cache_value
@dataclass
class HubCachePublisher:
_watchers: dict[str, HubCacheChangedEvent]
def _notify_change(
self,
*,
dataset: str,
hub_cache: Optional[DatasetHubCacheResponse],
suscriber: Optional[str] = None,
) -> None:
hub_cache_value = HubCacheChangedEventValue(dataset=dataset, hub_cache=hub_cache)
for watcher, event in self._watchers.items():
if suscriber is None or suscriber == watcher:
event.set_value(hub_cache_value=hub_cache_value)
def _unsubscribe(self, uuid: str) -> None:
self._watchers.pop(uuid)
def _subscribe(self) -> tuple[str, HubCacheChangedEvent]:
event = HubCacheChangedEvent()
uuid = uuid4().hex
self._watchers[uuid] = event
return (uuid, event)
class HubCacheWatcher:
"""
Utility to watch the value of the cache entries with kind 'dataset-hub-cache'.
"""
_watch_task: asyncio.Task[None] # <- not sure about the type
def __init__(self, client: AsyncIOMotorClient, db_name: str, collection_name: str) -> None:
self._client = client
self._collection = self._client[db_name][collection_name]
self._publisher = HubCachePublisher(_watchers={})
def run_initialization(self, suscriber: str) -> asyncio.Task[Any]:
return asyncio.create_task(self._init_loop(suscriber=suscriber))
def start_watching(self) -> None:
self._watch_task = asyncio.create_task(self._watch_loop())
async def stop_watching(self) -> None:
self._watch_task.cancel()
with contextlib.suppress(asyncio.CancelledError):
await self._watch_task
def subscribe(self) -> tuple[str, HubCacheChangedEvent]:
"""
Subscribe to random value changes for the given space.
The caller is responsible for calling `self.unsubscribe` to release resources.
Returns:
(str, RandomValueChangedEvent):
A 2-tuple containing a UUID and an instance of RandomValueChangedEvent.
RandomValueChangedEvent can be `await`ed to be notified of updates to the random value.
UUID must be passed when unsubscribing to release the associated resources.
"""
return self._publisher._subscribe()
def unsubscribe(self, uuid: str) -> None:
"""
Release resources allocated to subscribe to the random value changes.
"""
pub = self._publisher
pub._unsubscribe(uuid)
async def _init_loop(self, suscriber: str) -> None:
"""
publish an event for each initial dataset-hub-cache cache entry.
TODO: we don't want to send to all the suscribers
"""
async for document in self._collection.find(
filter={"kind": HUB_CACHE_KIND},
projection={"dataset": 1, "content": 1, "http_status": 1},
sort=[("_id", 1)],
batch_size=1,
):
# ^ should we use batch_size=100 instead, and send a list of contents?
dataset = document["dataset"]
self._publisher._notify_change(
suscriber=suscriber,
dataset=dataset,
hub_cache=(document["content"] if document["http_status"] == HTTPStatus.OK else None),
)
async def _watch_loop(self) -> None:
"""
publish a new event, on every change in a dataset-hub-cache cache entry.
"""
pipeline: Sequence[Mapping[str, Any]] = [
{
"$match": {
"$or": [
{"fullDocument.kind": HUB_CACHE_KIND},
{"fullDocumentBeforeChange.kind": HUB_CACHE_KIND},
],
"operationType": {"$in": ["insert", "update", "replace", "delete"]},
},
},
{
"$project": {
"fullDocument": 1,
"fullDocumentBeforeChange": 1,
"updateDescription": 1,
"operationType": 1,
},
},
]
resume_token = None
while True:
try:
async with self._collection.watch(
pipeline,
resume_after=resume_token,
full_document="updateLookup",
full_document_before_change="whenAvailable",
) as stream:
async for change in stream:
resume_token = stream.resume_token
operation = change["operationType"]
if (
operation == "delete"
and "fullDocumentBeforeChange" in change
and change["fullDocumentBeforeChange"]["kind"] == HUB_CACHE_KIND
):
dataset = change["fullDocumentBeforeChange"]["dataset"]
self._publisher._notify_change(dataset=dataset, hub_cache=None)
continue
if change["fullDocument"]["kind"] != HUB_CACHE_KIND:
continue
if operation == "update" and not any(
field in change["updateDescription"]["updatedFields"]
for field in ["content", "http_status"]
):
# ^ no change, skip
continue
self._publisher._notify_change(
dataset=change["fullDocument"]["dataset"],
hub_cache=(
change["fullDocument"]["content"]
if change["fullDocument"]["http_status"] == HTTPStatus.OK
else None
),
)
except PyMongoError:
if resume_token is None:
raise ChangeStreamInitError()
| datasets-server-main | services/sse-api/src/sse_api/watcher.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from sse_api.app import start
if __name__ == "__main__":
start()
| datasets-server-main | services/sse-api/src/sse_api/main.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import dataclasses
import json
import logging
from asyncio import CancelledError
from collections.abc import AsyncGenerator, AsyncIterable
from libapi.utils import Endpoint
from sse_starlette import EventSourceResponse, ServerSentEvent
from starlette.requests import Request
from starlette.responses import Response
from sse_api.watcher import HubCacheWatcher
# https://sysid.github.io/server-sent-events/
# > Reliability: Maintaining an unique Id with messages the server can see that the client missed a number of messages
# > and send the backlog of missed messages on reconnect.
# ^ how do we manage errors and re-connections?
# Also: how do we manage multiple SSE servers / uvicorn workers / load-balancing?
def create_hub_cache_endpoint(hub_cache_watcher: HubCacheWatcher) -> Endpoint:
async def hub_cache_endpoint(request: Request) -> Response:
logging.info("/hub-cache")
all = request.query_params.get("all", "false").lower() == "true"
# ^ the values that trigger the initialization are "true", "True" and any other case-insensitive variant
uuid, event = hub_cache_watcher.subscribe()
if all:
init_task = hub_cache_watcher.run_initialization(uuid)
async def event_generator() -> AsyncGenerator[ServerSentEvent, None]:
try:
while True:
new_value = await event.wait_value()
event.clear()
if new_value is not None:
yield ServerSentEvent(data=json.dumps(dataclasses.asdict(new_value)), event="message")
finally:
hub_cache_watcher.unsubscribe(uuid)
if all:
await init_task
return EventSourceResponse(error_handling(event_generator()), media_type="text/event-stream")
return hub_cache_endpoint
async def error_handling(
sse_generator: AsyncGenerator[ServerSentEvent, None],
) -> AsyncIterable[ServerSentEvent]:
try:
async for event in sse_generator:
yield event
except CancelledError:
yield ServerSentEvent("Connection closed", event="error")
raise
except Exception:
yield ServerSentEvent("Internal server error", event="error")
raise
| datasets-server-main | services/sse-api/src/sse_api/routes/hub_cache.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Iterator
from pathlib import Path
from libapi.config import UvicornConfig
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import _clean_queue_database
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import _clean_cache_database
from libcommon.storage import StrPath, init_cached_assets_dir, init_parquet_metadata_dir
from pytest import MonkeyPatch, fixture
from rows.config import AppConfig
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.fsspec"]
# see https://github.com/pytest-dev/pytest/issues/363#issuecomment-406536200
@fixture(scope="session")
def monkeypatch_session() -> Iterator[MonkeyPatch]:
monkeypatch_session = MonkeyPatch()
monkeypatch_session.setenv("CACHE_MONGO_DATABASE", "datasets_server_cache_test")
monkeypatch_session.setenv("QUEUE_MONGO_DATABASE", "datasets_server_queue_test")
monkeypatch_session.setenv("CACHED_ASSETS_BASE_URL", "http://localhost/cached-assets")
hostname = "localhost"
port = "8888"
monkeypatch_session.setenv("API_HF_TIMEOUT_SECONDS", "10")
monkeypatch_session.setenv("API_UVICORN_HOSTNAME", hostname)
monkeypatch_session.setenv("API_UVICORN_PORT", port)
monkeypatch_session.setenv("COMMON_HF_ENDPOINT", f"http://{hostname}:{port}")
yield monkeypatch_session
monkeypatch_session.undo()
@fixture(scope="session")
def app_config(monkeypatch_session: MonkeyPatch) -> AppConfig:
app_config = AppConfig.from_env()
if "test" not in app_config.cache.mongo_database or "test" not in app_config.queue.mongo_database:
raise ValueError("Test must be launched on a test mongo database")
return app_config
@fixture(scope="session")
def processing_graph(app_config: AppConfig) -> ProcessingGraph:
return ProcessingGraph(app_config.processing_graph.specification)
@fixture(scope="session")
def rows_endpoint() -> str:
return "/rows"
@fixture(autouse=True)
def cache_mongo_resource(app_config: AppConfig) -> Iterator[CacheMongoResource]:
with CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) as resource:
yield resource
_clean_cache_database()
@fixture(autouse=True)
def queue_mongo_resource(app_config: AppConfig) -> Iterator[QueueMongoResource]:
with QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) as resource:
yield resource
_clean_queue_database()
@fixture(scope="session")
def uvicorn_config(monkeypatch_session: MonkeyPatch) -> UvicornConfig:
return UvicornConfig.from_env()
@fixture(scope="session")
def httpserver_listen_address(uvicorn_config: UvicornConfig) -> tuple[str, int]:
return (uvicorn_config.hostname, uvicorn_config.port)
@fixture(scope="session")
def hf_endpoint(app_config: AppConfig) -> str:
return app_config.common.hf_endpoint
@fixture(scope="session")
def hf_auth_path(app_config: AppConfig) -> str:
return app_config.api.hf_auth_path
@fixture
def cached_assets_directory(app_config: AppConfig) -> StrPath:
return init_cached_assets_dir(app_config.cached_assets.storage_directory)
@fixture
def parquet_metadata_directory(app_config: AppConfig) -> StrPath:
return init_parquet_metadata_dir(app_config.parquet_metadata.storage_directory)
@fixture
def image_path() -> str:
image_path = Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"
assert image_path.is_file()
return str(image_path)
| datasets-server-main | services/rows/tests/conftest.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | services/rows/tests/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Optional
import pytest
from starlette.testclient import TestClient
from rows.app import create_app_with_config
from rows.config import AppConfig
@pytest.fixture(scope="module")
def client(monkeypatch_session: pytest.MonkeyPatch, app_config: AppConfig) -> TestClient:
return TestClient(create_app_with_config(app_config=app_config))
def test_cors(client: TestClient) -> None:
origin = "http://localhost:3000"
method = "GET"
header = "X-Requested-With"
response = client.options(
"/rows?dataset=dataset1&config=config1&split=train",
headers={
"Origin": origin,
"Access-Control-Request-Method": method,
"Access-Control-Request-Headers": header,
},
)
assert response.status_code == 200
assert (
origin in [o.strip() for o in response.headers["Access-Control-Allow-Origin"].split(",")]
or response.headers["Access-Control-Allow-Origin"] == "*"
)
assert (
header in [o.strip() for o in response.headers["Access-Control-Allow-Headers"].split(",")]
or response.headers["Access-Control-Expose-Headers"] == "*"
)
assert (
method in [o.strip() for o in response.headers["Access-Control-Allow-Methods"].split(",")]
or response.headers["Access-Control-Expose-Headers"] == "*"
)
assert response.headers["Access-Control-Allow-Credentials"] == "true"
def test_get_healthcheck(client: TestClient) -> None:
response = client.get("/healthcheck")
assert response.status_code == 200
assert response.text == "ok"
def test_get_rows(client: TestClient) -> None:
# missing parameter
response = client.get("/rows")
assert response.status_code == 422
@pytest.mark.parametrize(
"dataset,config,split",
[
(None, None, None),
("a", None, None),
("a", "b", None),
("a", "b", ""),
],
)
def test_get_split_missing_parameter(
client: TestClient,
dataset: Optional[str],
config: Optional[str],
split: Optional[str],
) -> None:
response = client.get("/rows", params={"dataset": dataset, "config": config, "split": split})
assert response.status_code == 422
def test_metrics(client: TestClient) -> None:
response = client.get("/healthcheck")
response = client.get("/metrics")
assert response.status_code == 200
text = response.text
lines = text.split("\n")
# examples:
# starlette_requests_total{method="GET",path_template="/metrics"} 1.0
# method_steps_processing_time_seconds_sum{method="healthcheck_endpoint",step="all"} 1.6772013623267412e-05
metrics = {
parts[0]: float(parts[1]) for line in lines if line and line[0] != "#" and (parts := line.rsplit(" ", 1))
}
# the metrics should contain at least the following
for name in [
'starlette_requests_total{method="GET",path_template="/metrics"}',
'method_steps_processing_time_seconds_sum{context="None",method="healthcheck_endpoint",step="all"}',
]:
assert name in metrics, metrics
assert metrics[name] > 0, metrics
| datasets-server-main | services/rows/tests/test_app.py |
# type: ignore
import posixpath
import shutil
from pathlib import Path
from unittest.mock import patch
import fsspec
import pytest
from fsspec.implementations.local import (
AbstractFileSystem,
LocalFileSystem,
stringify_path,
)
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __init__(self, *args, local_root_dir, **kwargs):
super().__init__()
self._fs = LocalFileSystem(*args, **kwargs)
self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/"
def mkdir(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.mkdir(path, *args, **kwargs)
def makedirs(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.makedirs(path, *args, **kwargs)
def rmdir(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rmdir(path)
def ls(self, path, detail=True, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = self._fs.ls(path, detail=detail, *args, **kwargs)
if detail:
return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out] # noqa: E203
else:
return [name[len(self.local_root_dir) :] for name in out] # noqa: E203
def info(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = dict(self._fs.info(path, *args, **kwargs))
out["name"] = out["name"][len(self.local_root_dir) :] # noqa: E203
return out
def cp_file(self, path1, path2, *args, **kwargs):
path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1))
path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2))
return self._fs.cp_file(path1, path2, *args, **kwargs)
def rm_file(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm_file(path, *args, **kwargs)
def rm(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm(path, *args, **kwargs)
def _open(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs._open(path, *args, **kwargs)
def created(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.created(path)
def modified(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.modified(path)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("mock://"):
path = path[7:]
return path
class TmpDirFileSystem(MockFileSystem):
protocol = "tmp"
tmp_dir = None
def __init__(self, *args, **kwargs):
assert self.tmp_dir is not None, "TmpDirFileSystem.tmp_dir is not set"
super().__init__(*args, **kwargs, local_root_dir=self.tmp_dir, auto_mkdir=True)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("tmp://"):
path = path[6:]
return path
@pytest.fixture
def mock_fsspec():
original_registry = fsspec.registry.copy()
fsspec.register_implementation("mock", MockFileSystem)
fsspec.register_implementation("tmp", TmpDirFileSystem)
yield
fsspec.registry = original_registry
@pytest.fixture
def mockfs(tmp_path_factory, mock_fsspec):
local_fs_dir = tmp_path_factory.mktemp("mockfs")
return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True)
@pytest.fixture
def tmpfs(tmp_path_factory, mock_fsspec):
tmp_fs_dir = tmp_path_factory.mktemp("tmpfs")
with patch.object(TmpDirFileSystem, "tmp_dir", tmp_fs_dir):
yield TmpDirFileSystem()
shutil.rmtree(tmp_fs_dir)
| datasets-server-main | services/rows/tests/fixtures/fsspec.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | services/rows/tests/routes/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import os
import shutil
import time
from collections.abc import Generator
from http import HTTPStatus
from pathlib import Path
from typing import Any
from unittest.mock import patch
import pyarrow.parquet as pq
import pytest
from datasets import Dataset, Image, concatenate_datasets
from datasets.table import embed_table_storage
from fsspec import AbstractFileSystem
from fsspec.implementations.http import HTTPFileSystem
from libcommon.parquet_utils import (
Indexer,
ParquetIndexWithMetadata,
RowsIndex,
TooBigRows,
)
from libcommon.processing_graph import ProcessingGraph
from libcommon.simple_cache import _clean_cache_database, upsert_response
from libcommon.storage import StrPath
from libcommon.viewer_utils.asset import update_last_modified_date_of_rows_in_assets_dir
from rows.config import AppConfig
from rows.routes.rows import create_response
@pytest.fixture(autouse=True)
def clean_mongo_databases(app_config: AppConfig) -> None:
_clean_cache_database()
@pytest.fixture(autouse=True)
def enable_parquet_metadata_on_all_datasets() -> Generator[None, None, None]:
with patch("rows.routes.rows.ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST", "all"):
yield
@pytest.fixture
def ds() -> Dataset:
return Dataset.from_dict({"text": ["Hello there", "General Kenobi"]})
@pytest.fixture
def ds_empty() -> Dataset:
return Dataset.from_dict({"text": ["Hello there", "General Kenobi"]}).select([])
@pytest.fixture
def ds_fs(ds: Dataset, tmpfs: AbstractFileSystem) -> Generator[AbstractFileSystem, None, None]:
with tmpfs.open("default/train/0000.parquet", "wb") as f:
ds.to_parquet(f)
yield tmpfs
@pytest.fixture
def ds_empty_fs(ds_empty: Dataset, tmpfs: AbstractFileSystem) -> Generator[AbstractFileSystem, None, None]:
with tmpfs.open("default/train/0000.parquet", "wb") as f:
ds_empty.to_parquet(f)
yield tmpfs
@pytest.fixture
def ds_sharded(ds: Dataset) -> Dataset:
return concatenate_datasets([ds] * 4)
@pytest.fixture
def ds_sharded_fs(ds: Dataset, tmpfs: AbstractFileSystem) -> Generator[AbstractFileSystem, None, None]:
num_shards = 4
for shard_idx in range(num_shards):
with tmpfs.open(f"default/train/{shard_idx:04d}.parquet", "wb") as f:
ds.to_parquet(f)
yield tmpfs
@pytest.fixture
def ds_image(image_path: str) -> Dataset:
ds = Dataset.from_dict({"image": [image_path]}).cast_column("image", Image())
return Dataset(embed_table_storage(ds.data))
@pytest.fixture
def ds_image_fs(ds_image: Dataset, tmpfs: AbstractFileSystem) -> Generator[AbstractFileSystem, None, None]:
with tmpfs.open("default/train/0000.parquet", "wb") as f:
ds_image.to_parquet(f)
yield tmpfs
@pytest.fixture
def ds_parquet_metadata_dir(
ds_fs: AbstractFileSystem, parquet_metadata_directory: StrPath
) -> Generator[StrPath, None, None]:
parquet_shard_paths = ds_fs.glob("**.parquet")
for parquet_shard_path in parquet_shard_paths:
parquet_file_metadata_path = Path(parquet_metadata_directory) / "ds" / "--" / parquet_shard_path
parquet_file_metadata_path.parent.mkdir(parents=True, exist_ok=True)
with ds_fs.open(parquet_shard_path) as parquet_shard_f:
with open(parquet_file_metadata_path, "wb") as parquet_file_metadata_f:
pq.read_metadata(parquet_shard_f).write_metadata_file(parquet_file_metadata_f)
yield parquet_metadata_directory
shutil.rmtree(Path(parquet_metadata_directory) / "ds")
@pytest.fixture
def dataset_with_config_parquet() -> dict[str, Any]:
config_parquet_content = {
"parquet_files": [
{
"dataset": "ds",
"config": "default",
"split": "train",
"url": "https://fake.huggingface.co/datasets/ds/resolve/refs%2Fconvert%2Fparquet/default/train/0000.parquet", # noqa: E501
"filename": "0000.parquet",
"size": 128,
}
]
}
upsert_response(
kind="config-parquet",
dataset="ds",
config="default",
content=config_parquet_content,
http_status=HTTPStatus.OK,
progress=1.0,
)
return config_parquet_content
@pytest.fixture
def dataset_with_config_parquet_metadata(
ds_fs: AbstractFileSystem, ds_parquet_metadata_dir: StrPath
) -> dict[str, Any]:
config_parquet_content = {
"parquet_files_metadata": [
{
"dataset": "ds",
"config": "default",
"split": "train",
"url": "https://fake.huggingface.co/datasets/ds/resolve/refs%2Fconvert%2Fparquet/default/train/0000.parquet", # noqa: E501
"filename": "0000.parquet",
"size": ds_fs.info("default/train/0000.parquet")["size"],
"num_rows": pq.read_metadata(ds_fs.open("default/train/0000.parquet")).num_rows,
"parquet_metadata_subpath": "ds/--/default/train/0000.parquet",
}
]
}
upsert_response(
kind="config-parquet-metadata",
dataset="ds",
config="default",
content=config_parquet_content,
http_status=HTTPStatus.OK,
progress=1.0,
)
return config_parquet_content
@pytest.fixture
def ds_empty_parquet_metadata_dir(
ds_empty_fs: AbstractFileSystem, parquet_metadata_directory: StrPath
) -> Generator[StrPath, None, None]:
parquet_shard_paths = ds_empty_fs.glob("**.parquet")
for parquet_shard_path in parquet_shard_paths:
parquet_file_metadata_path = Path(parquet_metadata_directory) / "ds_empty" / "--" / parquet_shard_path
parquet_file_metadata_path.parent.mkdir(parents=True, exist_ok=True)
with ds_empty_fs.open(parquet_shard_path) as parquet_shard_f:
with open(parquet_file_metadata_path, "wb") as parquet_file_metadata_f:
pq.read_metadata(parquet_shard_f).write_metadata_file(parquet_file_metadata_f)
yield parquet_metadata_directory
shutil.rmtree(Path(parquet_metadata_directory) / "ds_empty")
@pytest.fixture
def dataset_empty_with_config_parquet() -> dict[str, Any]:
config_parquet_content = {
"parquet_files": [
{
"dataset": "ds_empty",
"config": "default",
"split": "train",
"url": "https://fake.huggingface.co/datasets/ds_empty/resolve/refs%2Fconvert%2Fparquet/default/train/0000.parquet", # noqa: E501
"filename": "0000.parquet",
"size": 128,
}
]
}
upsert_response(
kind="config-parquet",
dataset="ds_empty",
config="default",
content=config_parquet_content,
http_status=HTTPStatus.OK,
progress=1.0,
)
return config_parquet_content
@pytest.fixture
def dataset_empty_with_config_parquet_metadata(
ds_empty_fs: AbstractFileSystem, ds_empty_parquet_metadata_dir: StrPath
) -> dict[str, Any]:
config_parquet_content = {
"parquet_files_metadata": [
{
"dataset": "ds_empty",
"config": "default",
"split": "train",
"url": "https://fake.huggingface.co/datasets/ds/resolve/refs%2Fconvert%2Fparquet/default/train/0000.parquet", # noqa: E501
"filename": "0000.parquet",
"size": ds_empty_fs.info("default/train/0000.parquet")["size"],
"num_rows": pq.read_metadata(ds_empty_fs.open("default/train/0000.parquet")).num_rows,
"parquet_metadata_subpath": "ds_empty/--/default/train/0000.parquet",
}
]
}
upsert_response(
kind="config-parquet-metadata",
dataset="ds_empty",
config="default",
content=config_parquet_content,
http_status=HTTPStatus.OK,
progress=1.0,
)
return config_parquet_content
@pytest.fixture
def ds_sharded_parquet_metadata_dir(
ds_sharded_fs: AbstractFileSystem, parquet_metadata_directory: StrPath
) -> Generator[StrPath, None, None]:
parquet_shard_paths = ds_sharded_fs.glob("**.parquet")
for parquet_shard_path in parquet_shard_paths:
parquet_file_metadata_path = Path(parquet_metadata_directory) / "ds_sharded" / "--" / parquet_shard_path
parquet_file_metadata_path.parent.mkdir(parents=True, exist_ok=True)
with ds_sharded_fs.open(parquet_shard_path) as parquet_shard_f:
with open(parquet_file_metadata_path, "wb") as parquet_file_metadata_f:
pq.read_metadata(parquet_shard_f).write_metadata_file(parquet_file_metadata_f)
yield parquet_metadata_directory
shutil.rmtree(Path(parquet_metadata_directory) / "ds_sharded")
@pytest.fixture
def dataset_sharded_with_config_parquet() -> dict[str, Any]:
num_shards = 4
config_parquet_content = {
"parquet_files": [
{
"dataset": "ds_sharded",
"config": "default",
"split": "train",
"url": f"https://fake.huggingface.co/datasets/ds/resolve/refs%2Fconvert%2Fparquet/default/train{shard_idx:04d}.parquet", # noqa: E501
"filename": f"{shard_idx:04d}.parquet",
"size": 128,
}
for shard_idx in range(num_shards)
]
}
upsert_response(
kind="config-parquet",
dataset="ds_sharded",
config="default",
content=config_parquet_content,
http_status=HTTPStatus.OK,
progress=1.0,
)
return config_parquet_content
@pytest.fixture
def dataset_sharded_with_config_parquet_metadata(
ds_sharded_fs: AbstractFileSystem, ds_sharded_parquet_metadata_dir: StrPath
) -> dict[str, Any]:
config_parquet_metadata_content = {
"parquet_files_metadata": [
{
"dataset": "ds_sharded",
"config": "default",
"split": "train",
"url": f"https://fake.huggingface.co/datasets/ds/resolve/refs%2Fconvert%2Fparquet/{parquet_file_path}", # noqa: E501
"filename": os.path.basename(parquet_file_path),
"size": ds_sharded_fs.info(parquet_file_path)["size"],
"num_rows": pq.read_metadata(ds_sharded_fs.open(parquet_file_path)).num_rows,
"parquet_metadata_subpath": f"ds_sharded/--/{parquet_file_path}",
}
for parquet_file_path in ds_sharded_fs.glob("default/**.parquet")
]
}
upsert_response(
kind="config-parquet-metadata",
dataset="ds_sharded",
config="default",
content=config_parquet_metadata_content,
http_status=HTTPStatus.OK,
progress=1.0,
)
return config_parquet_metadata_content
@pytest.fixture
def dataset_image_with_config_parquet() -> dict[str, Any]:
config_parquet_content = {
"parquet_files": [
{
"dataset": "ds_image",
"config": "default",
"split": "train",
"url": "https://fake.huggingface.co/datasets/ds/resolve/refs%2Fconvert%2Fparquet/default/train/0000.parquet", # noqa: E501
"filename": "0000.parquet",
"size": 11128,
}
]
}
upsert_response(
kind="config-parquet",
dataset="ds_image",
config="default",
content=config_parquet_content,
http_status=HTTPStatus.OK,
progress=1.0,
)
return config_parquet_content
@pytest.fixture
def indexer(
app_config: AppConfig,
processing_graph: ProcessingGraph,
parquet_metadata_directory: StrPath,
) -> Indexer:
return Indexer(
processing_graph=processing_graph,
hf_token=app_config.common.hf_token,
parquet_metadata_directory=parquet_metadata_directory,
httpfs=HTTPFileSystem(),
max_arrow_data_in_memory=9999999999,
)
@pytest.fixture
def rows_index_with_parquet_metadata(
indexer: Indexer,
ds_sharded: Dataset,
ds_sharded_fs: AbstractFileSystem,
dataset_sharded_with_config_parquet_metadata: dict[str, Any],
) -> Generator[RowsIndex, None, None]:
with ds_sharded_fs.open("default/train/0003.parquet") as f:
with patch("libcommon.parquet_utils.HTTPFile", return_value=f):
yield indexer.get_rows_index("ds_sharded", "default", "train")
@pytest.fixture
def rows_index_with_empty_dataset(
indexer: Indexer,
ds_empty: Dataset,
ds_empty_fs: AbstractFileSystem,
dataset_empty_with_config_parquet_metadata: dict[str, Any],
) -> Generator[RowsIndex, None, None]:
with ds_empty_fs.open("default/train/0000.parquet") as f:
with patch("libcommon.parquet_utils.HTTPFile", return_value=f):
yield indexer.get_rows_index("ds_empty", "default", "train")
@pytest.fixture
def rows_index_with_too_big_rows(
app_config: AppConfig,
processing_graph: ProcessingGraph,
parquet_metadata_directory: StrPath,
ds_sharded: Dataset,
ds_sharded_fs: AbstractFileSystem,
dataset_sharded_with_config_parquet_metadata: dict[str, Any],
) -> Generator[RowsIndex, None, None]:
indexer = Indexer(
processing_graph=processing_graph,
hf_token=app_config.common.hf_token,
parquet_metadata_directory=parquet_metadata_directory,
httpfs=HTTPFileSystem(),
max_arrow_data_in_memory=1,
)
with ds_sharded_fs.open("default/train/0003.parquet") as f:
with patch("libcommon.parquet_utils.HTTPFile", return_value=f):
yield indexer.get_rows_index("ds_sharded", "default", "train")
def test_indexer_get_rows_index_with_parquet_metadata(
indexer: Indexer, ds: Dataset, ds_fs: AbstractFileSystem, dataset_with_config_parquet_metadata: dict[str, Any]
) -> None:
with ds_fs.open("default/train/0000.parquet") as f:
with patch("libcommon.parquet_utils.HTTPFile", return_value=f):
index = indexer.get_rows_index("ds", "default", "train")
assert isinstance(index.parquet_index, ParquetIndexWithMetadata)
assert index.parquet_index.features == ds.features
assert index.parquet_index.num_rows == [len(ds)]
assert index.parquet_index.num_rows_total == 2
assert index.parquet_index.parquet_files_urls == [
parquet_file_metadata_item["url"]
for parquet_file_metadata_item in dataset_with_config_parquet_metadata["parquet_files_metadata"]
]
assert len(index.parquet_index.metadata_paths) == 1
assert os.path.exists(index.parquet_index.metadata_paths[0])
def test_indexer_get_rows_index_sharded_with_parquet_metadata(
indexer: Indexer,
ds: Dataset,
ds_sharded: Dataset,
ds_sharded_fs: AbstractFileSystem,
dataset_sharded_with_config_parquet_metadata: dict[str, Any],
) -> None:
with ds_sharded_fs.open("default/train/0003.parquet") as f:
with patch("libcommon.parquet_utils.HTTPFile", return_value=f):
index = indexer.get_rows_index("ds_sharded", "default", "train")
assert isinstance(index.parquet_index, ParquetIndexWithMetadata)
assert index.parquet_index.features == ds_sharded.features
assert index.parquet_index.num_rows == [len(ds)] * 4
assert index.parquet_index.num_rows_total == 8
assert index.parquet_index.parquet_files_urls == [
parquet_file_metadata_item["url"]
for parquet_file_metadata_item in dataset_sharded_with_config_parquet_metadata["parquet_files_metadata"]
]
assert len(index.parquet_index.metadata_paths) == 4
assert all(os.path.exists(index.parquet_index.metadata_paths[i]) for i in range(4))
def test_rows_index_query_with_parquet_metadata(
rows_index_with_parquet_metadata: RowsIndex, ds_sharded: Dataset
) -> None:
assert isinstance(rows_index_with_parquet_metadata.parquet_index, ParquetIndexWithMetadata)
assert rows_index_with_parquet_metadata.query(offset=1, length=3).to_pydict() == ds_sharded[1:4]
assert rows_index_with_parquet_metadata.query(offset=1, length=-1).to_pydict() == ds_sharded[:0]
assert rows_index_with_parquet_metadata.query(offset=1, length=0).to_pydict() == ds_sharded[:0]
assert rows_index_with_parquet_metadata.query(offset=999999, length=1).to_pydict() == ds_sharded[:0]
assert rows_index_with_parquet_metadata.query(offset=1, length=99999999).to_pydict() == ds_sharded[1:]
with pytest.raises(IndexError):
rows_index_with_parquet_metadata.query(offset=-1, length=2)
def test_rows_index_query_with_too_big_rows(rows_index_with_too_big_rows: RowsIndex, ds_sharded: Dataset) -> None:
with pytest.raises(TooBigRows):
rows_index_with_too_big_rows.query(offset=0, length=3)
def test_rows_index_query_with_empty_dataset(rows_index_with_empty_dataset: RowsIndex, ds_sharded: Dataset) -> None:
assert isinstance(rows_index_with_empty_dataset.parquet_index, ParquetIndexWithMetadata)
assert rows_index_with_empty_dataset.query(offset=0, length=1).to_pydict() == ds_sharded[:0]
with pytest.raises(IndexError):
rows_index_with_empty_dataset.query(offset=-1, length=2)
def test_create_response(ds: Dataset, app_config: AppConfig, cached_assets_directory: StrPath) -> None:
response = create_response(
dataset="ds",
config="default",
split="train",
cached_assets_base_url=app_config.cached_assets.base_url,
cached_assets_directory=cached_assets_directory,
pa_table=ds.data,
offset=0,
features=ds.features,
unsupported_columns=[],
num_rows_total=10,
)
assert response["features"] == [{"feature_idx": 0, "name": "text", "type": {"dtype": "string", "_type": "Value"}}]
assert response["rows"] == [
{"row_idx": 0, "row": {"text": "Hello there"}, "truncated_cells": []},
{"row_idx": 1, "row": {"text": "General Kenobi"}, "truncated_cells": []},
]
assert response["num_rows_total"] == 10
assert response["num_rows_per_page"] == 100
def test_create_response_with_image(
ds_image: Dataset, app_config: AppConfig, cached_assets_directory: StrPath
) -> None:
response = create_response(
dataset="ds_image",
config="default",
split="train",
cached_assets_base_url=app_config.cached_assets.base_url,
cached_assets_directory=cached_assets_directory,
pa_table=ds_image.data,
offset=0,
features=ds_image.features,
unsupported_columns=[],
num_rows_total=10,
)
assert response["features"] == [{"feature_idx": 0, "name": "image", "type": {"_type": "Image"}}]
assert response["rows"] == [
{
"row_idx": 0,
"row": {
"image": {
"src": "http://localhost/cached-assets/ds_image/--/default/train/0/image/image.jpg",
"height": 480,
"width": 640,
}
},
"truncated_cells": [],
}
]
cached_image_path = Path(cached_assets_directory) / "ds_image/--/default/train/0/image/image.jpg"
assert cached_image_path.is_file()
def test_update_last_modified_date_of_rows_in_assets_dir(tmp_path: Path) -> None:
cached_assets_directory = tmp_path / "cached-assets"
split_dir = cached_assets_directory / "ds/--/default/train"
split_dir.mkdir(parents=True)
n_rows = 8
for i in range(n_rows):
(split_dir / str(i)).mkdir()
time.sleep(0.01)
update_last_modified_date_of_rows_in_assets_dir(
dataset="ds",
config="default",
split="train",
offset=2,
length=3,
assets_directory=cached_assets_directory,
)
most_recent_rows_dirs = sorted(list(split_dir.glob("*")), key=os.path.getmtime, reverse=True)
most_recent_rows = [int(row_dir.name) for row_dir in most_recent_rows_dirs]
assert sorted(most_recent_rows[:3]) == [2, 3, 4]
assert most_recent_rows[3:] == [7, 6, 5, 1, 0]
| datasets-server-main | services/rows/tests/routes/test_rows.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from dataclasses import dataclass, field
from libapi.config import ApiConfig
from libcommon.config import (
CacheConfig,
CachedAssetsConfig,
CommonConfig,
LogConfig,
ParquetMetadataConfig,
ProcessingGraphConfig,
QueueConfig,
RowsIndexConfig,
)
@dataclass(frozen=True)
class AppConfig:
api: ApiConfig = field(default_factory=ApiConfig)
cached_assets: CachedAssetsConfig = field(default_factory=CachedAssetsConfig)
cache: CacheConfig = field(default_factory=CacheConfig)
common: CommonConfig = field(default_factory=CommonConfig)
log: LogConfig = field(default_factory=LogConfig)
queue: QueueConfig = field(default_factory=QueueConfig)
rows_index: RowsIndexConfig = field(default_factory=RowsIndexConfig)
processing_graph: ProcessingGraphConfig = field(default_factory=ProcessingGraphConfig)
parquet_metadata: ParquetMetadataConfig = field(default_factory=ParquetMetadataConfig)
@classmethod
def from_env(cls) -> "AppConfig":
common_config = CommonConfig.from_env()
return cls(
common=common_config,
cached_assets=CachedAssetsConfig.from_env(),
cache=CacheConfig.from_env(),
log=LogConfig.from_env(),
processing_graph=ProcessingGraphConfig.from_env(),
queue=QueueConfig.from_env(),
api=ApiConfig.from_env(hf_endpoint=common_config.hf_endpoint),
parquet_metadata=ParquetMetadataConfig.from_env(),
rows_index=RowsIndexConfig.from_env(),
)
| datasets-server-main | services/rows/src/rows/config.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | services/rows/src/rows/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import uvicorn
from libapi.config import UvicornConfig
from libapi.jwt_token import get_jwt_public_keys
from libapi.routes.healthcheck import healthcheck_endpoint
from libapi.routes.metrics import create_metrics_endpoint
from libapi.utils import EXPOSED_HEADERS
from libcommon.log import init_logging
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource, Resource
from libcommon.storage import exists, init_cached_assets_dir, init_parquet_metadata_dir
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
from starlette_prometheus import PrometheusMiddleware
from rows.config import AppConfig
from rows.routes.rows import create_rows_endpoint
def create_app() -> Starlette:
app_config = AppConfig.from_env()
return create_app_with_config(app_config=app_config)
def create_app_with_config(app_config: AppConfig) -> Starlette:
init_logging(level=app_config.log.level)
# ^ set first to have logs as soon as possible
cached_assets_directory = init_cached_assets_dir(directory=app_config.cached_assets.storage_directory)
if not exists(cached_assets_directory):
raise RuntimeError("The assets storage directory could not be accessed. Exiting.")
parquet_metadata_directory = init_parquet_metadata_dir(directory=app_config.parquet_metadata.storage_directory)
if not exists(parquet_metadata_directory):
raise RuntimeError("The parquet metadata storage directory could not be accessed. Exiting.")
processing_graph = ProcessingGraph(app_config.processing_graph.specification)
hf_jwt_public_keys = get_jwt_public_keys(
algorithm_name=app_config.api.hf_jwt_algorithm,
public_key_url=app_config.api.hf_jwt_public_key_url,
additional_public_keys=app_config.api.hf_jwt_additional_public_keys,
timeout_seconds=app_config.api.hf_timeout_seconds,
)
middleware = [
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
allow_credentials=True,
expose_headers=EXPOSED_HEADERS,
),
Middleware(GZipMiddleware),
Middleware(PrometheusMiddleware, filter_unhandled_paths=True),
]
cache_resource = CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url)
queue_resource = QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url)
resources: list[Resource] = [cache_resource, queue_resource]
if not cache_resource.is_available():
raise RuntimeError("The connection to the cache database could not be established. Exiting.")
if not queue_resource.is_available():
raise RuntimeError("The connection to the queue database could not be established. Exiting.")
routes = [
Route("/healthcheck", endpoint=healthcheck_endpoint),
Route("/metrics", endpoint=create_metrics_endpoint()),
# ^ called by Prometheus
Route(
"/rows",
endpoint=create_rows_endpoint(
processing_graph=processing_graph,
cached_assets_base_url=app_config.cached_assets.base_url,
cached_assets_directory=cached_assets_directory,
parquet_metadata_directory=parquet_metadata_directory,
max_arrow_data_in_memory=app_config.rows_index.max_arrow_data_in_memory,
hf_endpoint=app_config.common.hf_endpoint,
hf_token=app_config.common.hf_token,
hf_jwt_public_keys=hf_jwt_public_keys,
hf_jwt_algorithm=app_config.api.hf_jwt_algorithm,
external_auth_url=app_config.api.external_auth_url,
hf_timeout_seconds=app_config.api.hf_timeout_seconds,
max_age_long=app_config.api.max_age_long,
max_age_short=app_config.api.max_age_short,
cache_max_days=app_config.cache.max_days,
),
),
]
return Starlette(routes=routes, middleware=middleware, on_shutdown=[resource.release for resource in resources])
def start() -> None:
uvicorn_config = UvicornConfig.from_env()
uvicorn.run(
"app:create_app",
host=uvicorn_config.hostname,
port=uvicorn_config.port,
factory=True,
workers=uvicorn_config.num_workers,
)
| datasets-server-main | services/rows/src/rows/app.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from rows.app import start
if __name__ == "__main__":
start()
| datasets-server-main | services/rows/src/rows/main.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | services/rows/src/rows/routes/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
import random
from typing import Literal, Optional, Union
import pyarrow as pa
from datasets import Features, Value
from fsspec.implementations.http import HTTPFileSystem
from libapi.authentication import auth_check
from libapi.exceptions import (
ApiError,
InvalidParameterError,
MissingRequiredParameterError,
UnexpectedApiError,
)
from libapi.utils import (
Endpoint,
are_valid_parameters,
clean_cached_assets,
get_json_api_error_response,
get_json_error_response,
get_json_ok_response,
to_rows_list,
try_backfill_dataset_then_raise,
)
from libcommon.parquet_utils import Indexer
from libcommon.processing_graph import ProcessingGraph
from libcommon.prometheus import StepProfiler
from libcommon.simple_cache import CachedArtifactError, CachedArtifactNotFoundError
from libcommon.storage import StrPath
from libcommon.utils import PaginatedResponse
from libcommon.viewer_utils.asset import update_last_modified_date_of_rows_in_assets_dir
from libcommon.viewer_utils.features import to_features_list
from starlette.requests import Request
from starlette.responses import Response
logger = logging.getLogger(__name__)
MAX_ROWS = 100
ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST: Union[Literal["all"], list[str]] = ["arabic_speech_corpus"] # for testing
# audio still has some errors when librosa is imported
UNSUPPORTED_FEATURES = [Value("binary")]
def create_response(
dataset: str,
config: str,
split: str,
cached_assets_base_url: str,
cached_assets_directory: StrPath,
pa_table: pa.Table,
offset: int,
features: Features,
unsupported_columns: list[str],
num_rows_total: int,
) -> PaginatedResponse:
if set(pa_table.column_names).intersection(set(unsupported_columns)):
raise RuntimeError(
"The pyarrow table contains unsupported columns. They should have been ignored in the row group reader."
)
return PaginatedResponse(
features=to_features_list(features),
rows=to_rows_list(
pa_table,
dataset,
config,
split,
cached_assets_base_url,
cached_assets_directory,
offset,
features,
unsupported_columns,
),
num_rows_total=num_rows_total,
num_rows_per_page=MAX_ROWS,
)
def create_rows_endpoint(
processing_graph: ProcessingGraph,
cached_assets_base_url: str,
cached_assets_directory: StrPath,
parquet_metadata_directory: StrPath,
cache_max_days: int,
max_arrow_data_in_memory: int,
hf_endpoint: str,
hf_token: Optional[str] = None,
hf_jwt_public_keys: Optional[list[str]] = None,
hf_jwt_algorithm: Optional[str] = None,
external_auth_url: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
max_age_long: int = 0,
max_age_short: int = 0,
clean_cache_proba: float = 0.0,
keep_first_rows_number: int = -1,
keep_most_recent_rows_number: int = -1,
max_cleaned_rows_number: int = -1,
) -> Endpoint:
indexer = Indexer(
processing_graph=processing_graph,
hf_token=hf_token,
parquet_metadata_directory=parquet_metadata_directory,
httpfs=HTTPFileSystem(headers={"authorization": f"Bearer {hf_token}"}),
max_arrow_data_in_memory=max_arrow_data_in_memory,
unsupported_features=UNSUPPORTED_FEATURES,
all_columns_supported_datasets_allow_list=ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST,
)
async def rows_endpoint(request: Request) -> Response:
await indexer.httpfs.set_session()
revision: Optional[str] = None
with StepProfiler(method="rows_endpoint", step="all"):
try:
with StepProfiler(method="rows_endpoint", step="validate parameters"):
dataset = request.query_params.get("dataset")
config = request.query_params.get("config")
split = request.query_params.get("split")
if not dataset or not config or not split or not are_valid_parameters([dataset, config, split]):
raise MissingRequiredParameterError("Parameter 'dataset', 'config' and 'split' are required")
offset = int(request.query_params.get("offset", 0))
if offset < 0:
raise InvalidParameterError(message="Offset must be positive")
length = int(request.query_params.get("length", MAX_ROWS))
if length < 0:
raise InvalidParameterError("Length must be positive")
if length > MAX_ROWS:
raise InvalidParameterError(f"Length must be less than or equal to {MAX_ROWS}")
logging.info(
f"/rows, dataset={dataset}, config={config}, split={split}, offset={offset}, length={length}"
)
with StepProfiler(method="rows_endpoint", step="check authentication"):
# if auth_check fails, it will raise an exception that will be caught below
auth_check(
dataset=dataset,
external_auth_url=external_auth_url,
request=request,
hf_jwt_public_keys=hf_jwt_public_keys,
hf_jwt_algorithm=hf_jwt_algorithm,
hf_timeout_seconds=hf_timeout_seconds,
)
try:
with StepProfiler(method="rows_endpoint", step="get row groups index"):
rows_index = indexer.get_rows_index(
dataset=dataset,
config=config,
split=split,
)
revision = rows_index.revision
except CachedArtifactNotFoundError:
config_parquet_processing_steps = processing_graph.get_config_parquet_processing_steps()
config_parquet_metadata_processing_steps = (
processing_graph.get_config_parquet_metadata_processing_steps()
)
with StepProfiler(method="rows_endpoint", step="try backfill dataset"):
try_backfill_dataset_then_raise(
processing_steps=config_parquet_metadata_processing_steps
+ config_parquet_processing_steps,
processing_graph=processing_graph,
dataset=dataset,
hf_endpoint=hf_endpoint,
hf_timeout_seconds=hf_timeout_seconds,
hf_token=hf_token,
cache_max_days=cache_max_days,
)
with StepProfiler(method="rows_endpoint", step="query the rows"):
pa_table = rows_index.query(offset=offset, length=length)
with StepProfiler(method="rows_endpoint", step="clean cache"):
# no need to do it every time
if random.random() < clean_cache_proba: # nosec
if (
keep_first_rows_number < 0
and keep_most_recent_rows_number < 0
and max_cleaned_rows_number < 0
):
logger.debug(
"Params keep_first_rows_number, keep_most_recent_rows_number and"
" max_cleaned_rows_number are not set. Skipping cached assets cleaning."
)
else:
clean_cached_assets(
dataset=dataset,
cached_assets_directory=cached_assets_directory,
keep_first_rows_number=keep_first_rows_number,
keep_most_recent_rows_number=keep_most_recent_rows_number,
max_cleaned_rows_number=max_cleaned_rows_number,
)
with StepProfiler(method="rows_endpoint", step="transform to a list"):
response = create_response(
dataset=dataset,
config=config,
split=split,
cached_assets_base_url=cached_assets_base_url,
cached_assets_directory=cached_assets_directory,
pa_table=pa_table,
offset=offset,
features=rows_index.parquet_index.features,
unsupported_columns=rows_index.parquet_index.unsupported_columns,
num_rows_total=rows_index.parquet_index.num_rows_total,
)
with StepProfiler(method="rows_endpoint", step="update last modified time of rows in asset dir"):
update_last_modified_date_of_rows_in_assets_dir(
dataset=dataset,
config=config,
split=split,
offset=offset,
length=length,
assets_directory=cached_assets_directory,
)
with StepProfiler(method="rows_endpoint", step="generate the OK response"):
return get_json_ok_response(content=response, max_age=max_age_long, revision=revision)
except CachedArtifactError as e:
content = e.cache_entry_with_details["content"]
http_status = e.cache_entry_with_details["http_status"]
error_code = e.cache_entry_with_details["error_code"]
return get_json_error_response(
content=content,
status_code=http_status,
max_age=max_age_short,
error_code=error_code,
revision=revision,
)
except Exception as e:
error = e if isinstance(e, ApiError) else UnexpectedApiError("Unexpected error.", e)
with StepProfiler(method="rows_endpoint", step="generate API error response"):
return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision)
return rows_endpoint
| datasets-server-main | services/rows/src/rows/routes/rows.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from collections.abc import Iterator
from libapi.config import UvicornConfig
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import _clean_queue_database
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import _clean_cache_database
from libcommon.storage import (
StrPath,
init_cached_assets_dir,
init_duckdb_index_cache_dir,
)
from pytest import MonkeyPatch, fixture
from search.config import AppConfig
# see https://github.com/pytest-dev/pytest/issues/363#issuecomment-406536200
@fixture(scope="session")
def monkeypatch_session() -> Iterator[MonkeyPatch]:
monkeypatch_session = MonkeyPatch()
monkeypatch_session.setenv("CACHE_MONGO_DATABASE", "datasets_server_cache_test")
monkeypatch_session.setenv("QUEUE_MONGO_DATABASE", "datasets_server_queue_test")
monkeypatch_session.setenv("CACHED_ASSETS_BASE_URL", "http://localhost/cached-assets")
hostname = "localhost"
port = "8888"
monkeypatch_session.setenv("API_HF_TIMEOUT_SECONDS", "10")
monkeypatch_session.setenv("API_UVICORN_HOSTNAME", hostname)
monkeypatch_session.setenv("API_UVICORN_PORT", port)
monkeypatch_session.setenv("COMMON_HF_ENDPOINT", f"http://{hostname}:{port}")
yield monkeypatch_session
monkeypatch_session.undo()
@fixture(scope="session")
def app_config(monkeypatch_session: MonkeyPatch) -> AppConfig:
app_config = AppConfig.from_env()
if "test" not in app_config.cache.mongo_database or "test" not in app_config.queue.mongo_database:
raise ValueError("Test must be launched on a test mongo database")
return app_config
@fixture(scope="session")
def processing_graph(app_config: AppConfig) -> ProcessingGraph:
return ProcessingGraph(app_config.processing_graph.specification)
@fixture(scope="session")
def search_endpoint() -> str:
return "/search"
@fixture(autouse=True)
def cache_mongo_resource(app_config: AppConfig) -> Iterator[CacheMongoResource]:
with CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) as resource:
yield resource
_clean_cache_database()
@fixture(autouse=True)
def queue_mongo_resource(app_config: AppConfig) -> Iterator[QueueMongoResource]:
with QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) as resource:
yield resource
_clean_queue_database()
@fixture(scope="session")
def uvicorn_config(monkeypatch_session: MonkeyPatch) -> UvicornConfig:
return UvicornConfig.from_env()
@fixture(scope="session")
def httpserver_listen_address(uvicorn_config: UvicornConfig) -> tuple[str, int]:
return (uvicorn_config.hostname, uvicorn_config.port)
@fixture(scope="session")
def hf_endpoint(app_config: AppConfig) -> str:
return app_config.common.hf_endpoint
@fixture(scope="session")
def hf_auth_path(app_config: AppConfig) -> str:
return app_config.api.hf_auth_path
@fixture
def cached_assets_directory(app_config: AppConfig) -> StrPath:
return init_cached_assets_dir(app_config.cached_assets.storage_directory)
@fixture
def duckdb_index_cache_directory(app_config: AppConfig) -> StrPath:
return init_duckdb_index_cache_dir(app_config.duckdb_index.cache_directory)
| datasets-server-main | services/search/tests/conftest.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | services/search/tests/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from typing import Optional
import pytest
from starlette.testclient import TestClient
from search.app import create_app_with_config
from search.config import AppConfig
@pytest.fixture(scope="module")
def client(monkeypatch_session: pytest.MonkeyPatch, app_config: AppConfig) -> TestClient:
return TestClient(create_app_with_config(app_config=app_config))
def test_cors(client: TestClient) -> None:
origin = "http://localhost:3000"
method = "GET"
header = "X-Requested-With"
response = client.options(
"/search?dataset=dataset1&config=config1&split=train&query=query1",
headers={
"Origin": origin,
"Access-Control-Request-Method": method,
"Access-Control-Request-Headers": header,
},
)
assert response.status_code == 200
assert (
origin in [o.strip() for o in response.headers["Access-Control-Allow-Origin"].split(",")]
or response.headers["Access-Control-Allow-Origin"] == "*"
)
assert (
header in [o.strip() for o in response.headers["Access-Control-Allow-Headers"].split(",")]
or response.headers["Access-Control-Expose-Headers"] == "*"
)
assert (
method in [o.strip() for o in response.headers["Access-Control-Allow-Methods"].split(",")]
or response.headers["Access-Control-Expose-Headers"] == "*"
)
assert response.headers["Access-Control-Allow-Credentials"] == "true"
def test_get_healthcheck(client: TestClient) -> None:
response = client.get("/healthcheck")
assert response.status_code == 200
assert response.text == "ok"
def test_get_search(client: TestClient) -> None:
# missing parameter
response = client.get("/search")
assert response.status_code == 422
@pytest.mark.parametrize(
"dataset,config,split,query",
[
(None, None, None, None),
("a", None, None, None),
("a", "b", None, None),
("a", "b", "c", None),
("a", "b", "c", ""),
],
)
def test_get_split_missing_parameter(
client: TestClient,
dataset: Optional[str],
config: Optional[str],
split: Optional[str],
query: Optional[str],
) -> None:
response = client.get("/search", params={"dataset": dataset, "config": config, "split": split, "query": query})
assert response.status_code == 422
def test_metrics(client: TestClient) -> None:
response = client.get("/healthcheck")
response = client.get("/metrics")
assert response.status_code == 200
text = response.text
lines = text.split("\n")
# examples:
# starlette_requests_total{method="GET",path_template="/metrics"} 1.0
# method_steps_processing_time_seconds_sum{method="healthcheck_endpoint",step="all"} 1.6772013623267412e-05
metrics = {
parts[0]: float(parts[1]) for line in lines if line and line[0] != "#" and (parts := line.rsplit(" ", 1))
}
# the metrics should contain at least the following
for name in [
'starlette_requests_total{method="GET",path_template="/metrics"}',
'method_steps_processing_time_seconds_sum{context="None",method="healthcheck_endpoint",step="all"}',
]:
assert name in metrics, metrics
assert metrics[name] > 0, metrics
| datasets-server-main | services/search/tests/test_app.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import os
from typing import Any
import duckdb
import pandas as pd
import pyarrow as pa
import pytest
from libcommon.storage import StrPath
from search.routes.search import full_text_search, get_download_folder
def test_get_download_folder(duckdb_index_cache_directory: StrPath) -> None:
dataset, config, split, revision = "dataset", "config", "split", "revision"
index_folder = get_download_folder(duckdb_index_cache_directory, dataset, config, split, revision)
assert index_folder is not None
assert str(duckdb_index_cache_directory) in index_folder
@pytest.mark.parametrize(
"query,offset,length,expected_result,expected_num_rows_total",
[
(
"Lord Vader",
0,
100,
{
"__hf_index_id": [0, 4, 2],
"text": [
"Grand Moff Tarkin and Lord Vader are interrupted in their discussion by the buzz of the comlink",
"The wingman spots the pirateship coming at him and warns the Dark Lord",
"Vader turns round and round in circles as his ship spins into space.",
],
},
3,
),
(
"Lord Vader",
1,
2,
{
"__hf_index_id": [4, 2],
"text": [
"The wingman spots the pirateship coming at him and warns the Dark Lord",
"Vader turns round and round in circles as his ship spins into space.",
],
},
3,
),
("non existing text", 0, 100, {"__hf_index_id": [], "text": []}, 0),
(";DROP TABLE data;", 0, 100, {"__hf_index_id": [], "text": []}, 0),
("some text'); DROP TABLE data; --", 0, 100, {"__hf_index_id": [], "text": []}, 0),
],
)
def test_full_text_search(
query: str, offset: int, length: int, expected_result: Any, expected_num_rows_total: int
) -> None:
# simulate index file
index_file_location = "index.duckdb"
con = duckdb.connect(index_file_location)
con.execute("INSTALL 'httpfs';")
con.execute("LOAD 'httpfs';")
con.execute("INSTALL 'fts';")
con.execute("LOAD 'fts';")
con.sql("CREATE OR REPLACE SEQUENCE serial START 0 MINVALUE 0;")
sample_df = pd.DataFrame(
{
"text": [
"Grand Moff Tarkin and Lord Vader are interrupted in their discussion by the buzz of the comlink",
"There goes another one.",
"Vader turns round and round in circles as his ship spins into space.",
"We count thirty Rebel ships.",
"The wingman spots the pirateship coming at him and warns the Dark Lord",
]
},
dtype=pd.StringDtype(storage="python"),
)
create_command_sql = "CREATE OR REPLACE TABLE data AS SELECT nextval('serial') AS __hf_index_id, * FROM sample_df"
con.sql(create_command_sql)
con.execute(query="SELECT COUNT(*) FROM data;").fetchall()
assert sample_df.size == con.execute(query="SELECT COUNT(*) FROM data;").fetchall()[0][0]
con.sql("PRAGMA create_fts_index('data', '__hf_index_id', '*', overwrite=1);")
con.close()
# assert search results
(num_rows_total, pa_table) = full_text_search(index_file_location, query, offset, length)
assert num_rows_total is not None
assert pa_table is not None
assert num_rows_total == expected_num_rows_total
fields = [pa.field("__hf_index_id", pa.int64()), pa.field("text", pa.string())]
filtered_df = pd.DataFrame(expected_result)
expected_table = pa.Table.from_pandas(filtered_df, schema=pa.schema(fields), preserve_index=False)
assert pa_table == expected_table
# ensure that database has not been modified
con = duckdb.connect(index_file_location)
assert sample_df.size == con.execute(query="SELECT COUNT(*) FROM data;").fetchall()[0][0]
con.close()
os.remove(index_file_location)
| datasets-server-main | services/search/tests/routes/test_search.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | services/search/tests/routes/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from dataclasses import dataclass, field
from typing import Optional
from environs import Env
from libapi.config import ApiConfig
from libcommon.config import (
CacheConfig,
CachedAssetsConfig,
CommonConfig,
LogConfig,
ProcessingGraphConfig,
QueueConfig,
)
DUCKDB_INDEX_CACHE_DIRECTORY = None
DUCKDB_INDEX_TARGET_REVISION = "refs/convert/parquet"
@dataclass(frozen=True)
class DuckDbIndexConfig:
cache_directory: Optional[str] = DUCKDB_INDEX_CACHE_DIRECTORY
target_revision: str = DUCKDB_INDEX_TARGET_REVISION
@classmethod
def from_env(cls) -> "DuckDbIndexConfig":
env = Env(expand_vars=True)
with env.prefixed("DUCKDB_INDEX_"):
return cls(
cache_directory=env.str(name="CACHE_DIRECTORY", default=DUCKDB_INDEX_CACHE_DIRECTORY),
target_revision=env.str(name="TARGET_REVISION", default=DUCKDB_INDEX_TARGET_REVISION),
)
@dataclass(frozen=True)
class AppConfig:
api: ApiConfig = field(default_factory=ApiConfig)
cached_assets: CachedAssetsConfig = field(default_factory=CachedAssetsConfig)
cache: CacheConfig = field(default_factory=CacheConfig)
common: CommonConfig = field(default_factory=CommonConfig)
log: LogConfig = field(default_factory=LogConfig)
queue: QueueConfig = field(default_factory=QueueConfig)
processing_graph: ProcessingGraphConfig = field(default_factory=ProcessingGraphConfig)
duckdb_index: DuckDbIndexConfig = field(default_factory=DuckDbIndexConfig)
@classmethod
def from_env(cls) -> "AppConfig":
common_config = CommonConfig.from_env()
return cls(
common=common_config,
cached_assets=CachedAssetsConfig.from_env(),
cache=CacheConfig.from_env(),
log=LogConfig.from_env(),
processing_graph=ProcessingGraphConfig.from_env(),
queue=QueueConfig.from_env(),
api=ApiConfig.from_env(hf_endpoint=common_config.hf_endpoint),
duckdb_index=DuckDbIndexConfig.from_env(),
)
| datasets-server-main | services/search/src/search/config.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | services/search/src/search/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import uvicorn
from libapi.config import UvicornConfig
from libapi.jwt_token import get_jwt_public_keys
from libapi.routes.healthcheck import healthcheck_endpoint
from libapi.routes.metrics import create_metrics_endpoint
from libapi.utils import EXPOSED_HEADERS
from libcommon.log import init_logging
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource, Resource
from libcommon.storage import (
exists,
init_cached_assets_dir,
init_duckdb_index_cache_dir,
)
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
from starlette_prometheus import PrometheusMiddleware
from search.config import AppConfig
from search.routes.search import create_search_endpoint
def create_app() -> Starlette:
app_config = AppConfig.from_env()
return create_app_with_config(app_config=app_config)
def create_app_with_config(app_config: AppConfig) -> Starlette:
init_logging(level=app_config.log.level)
# ^ set first to have logs as soon as possible
cached_assets_directory = init_cached_assets_dir(directory=app_config.cached_assets.storage_directory)
if not exists(cached_assets_directory):
raise RuntimeError("The cached assets storage directory could not be accessed. Exiting.")
duckdb_index_cache_directory = init_duckdb_index_cache_dir(directory=app_config.duckdb_index.cache_directory)
if not exists(duckdb_index_cache_directory):
raise RuntimeError("The duckdb_index cache directory could not be accessed. Exiting.")
processing_graph = ProcessingGraph(app_config.processing_graph.specification)
hf_jwt_public_keys = get_jwt_public_keys(
algorithm_name=app_config.api.hf_jwt_algorithm,
public_key_url=app_config.api.hf_jwt_public_key_url,
additional_public_keys=app_config.api.hf_jwt_additional_public_keys,
timeout_seconds=app_config.api.hf_timeout_seconds,
)
middleware = [
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
allow_credentials=True,
expose_headers=EXPOSED_HEADERS,
),
Middleware(GZipMiddleware),
Middleware(PrometheusMiddleware, filter_unhandled_paths=True),
]
cache_resource = CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url)
queue_resource = QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url)
resources: list[Resource] = [cache_resource, queue_resource]
if not cache_resource.is_available():
raise RuntimeError("The connection to the cache database could not be established. Exiting.")
if not queue_resource.is_available():
raise RuntimeError("The connection to the queue database could not be established. Exiting.")
routes = [
Route("/healthcheck", endpoint=healthcheck_endpoint),
Route("/metrics", endpoint=create_metrics_endpoint()),
# ^ called by Prometheus
Route(
"/search",
endpoint=create_search_endpoint(
duckdb_index_file_directory=duckdb_index_cache_directory,
cached_assets_base_url=app_config.cached_assets.base_url,
cached_assets_directory=cached_assets_directory,
cache_max_days=app_config.cache.max_days,
target_revision=app_config.duckdb_index.target_revision,
hf_endpoint=app_config.common.hf_endpoint,
hf_token=app_config.common.hf_token,
hf_jwt_public_keys=hf_jwt_public_keys,
hf_jwt_algorithm=app_config.api.hf_jwt_algorithm,
external_auth_url=app_config.api.external_auth_url,
hf_timeout_seconds=app_config.api.hf_timeout_seconds,
processing_graph=processing_graph,
max_age_long=app_config.api.max_age_long,
max_age_short=app_config.api.max_age_short,
),
),
]
return Starlette(routes=routes, middleware=middleware, on_shutdown=[resource.release for resource in resources])
def start() -> None:
uvicorn_config = UvicornConfig.from_env()
uvicorn.run(
"app:create_app",
host=uvicorn_config.hostname,
port=uvicorn_config.port,
factory=True,
workers=uvicorn_config.num_workers,
)
| datasets-server-main | services/search/src/search/app.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from search.app import start
if __name__ == "__main__":
start()
| datasets-server-main | services/search/src/search/main.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
| datasets-server-main | services/search/src/search/routes/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import json
import logging
import os
import random
import re
from hashlib import sha1
from http import HTTPStatus
from pathlib import Path
from typing import Optional
import duckdb
import pyarrow as pa
from datasets import Features, Value
from huggingface_hub import hf_hub_download
from libapi.authentication import auth_check
from libapi.exceptions import (
ApiError,
InvalidParameterError,
MissingRequiredParameterError,
UnexpectedApiError,
)
from libapi.utils import (
Endpoint,
are_valid_parameters,
clean_cached_assets,
get_cache_entry_from_steps,
get_json_api_error_response,
get_json_error_response,
get_json_ok_response,
to_rows_list,
)
from libcommon.processing_graph import ProcessingGraph
from libcommon.prometheus import StepProfiler
from libcommon.storage import StrPath, init_dir
from libcommon.utils import PaginatedResponse
from libcommon.viewer_utils.features import (
get_supported_unsupported_columns,
to_features_list,
)
from starlette.requests import Request
from starlette.responses import Response
logger = logging.getLogger(__name__)
ROW_IDX_COLUMN = "__hf_index_id"
MAX_ROWS = 100
UNSUPPORTED_FEATURES = [Value("binary")]
FTS_COMMAND_COUNT = (
"SELECT COUNT(*) FROM (SELECT __hf_index_id, fts_main_data.match_bm25(__hf_index_id, ?) AS __hf_fts_score FROM"
" data) A WHERE __hf_fts_score IS NOT NULL;"
)
FTS_COMMAND = (
"SELECT * EXCLUDE (__hf_fts_score) FROM (SELECT *, fts_main_data.match_bm25(__hf_index_id, ?) AS __hf_fts_score"
" FROM data) A WHERE __hf_fts_score IS NOT NULL ORDER BY __hf_fts_score DESC OFFSET {offset} LIMIT {length};"
)
REPO_TYPE = "dataset"
HUB_DOWNLOAD_CACHE_FOLDER = "cache"
def get_download_folder(
root_directory: StrPath, dataset: str, config: str, split: str, revision: Optional[str]
) -> str:
payload = (dataset, config, split, revision)
hash_suffix = sha1(json.dumps(payload, sort_keys=True).encode(), usedforsecurity=False).hexdigest()[:8]
subdirectory = "".join([c if re.match(r"[\w-]", c) else "-" for c in f"{dataset}-{hash_suffix}"])
return f"{root_directory}/downloads/{subdirectory}"
def download_index_file(
cache_folder: str,
index_folder: str,
target_revision: str,
dataset: str,
repo_file_location: str,
hf_token: Optional[str] = None,
) -> None:
logging.info(f"init_dir {index_folder}")
init_dir(index_folder)
# see https://pypi.org/project/hf-transfer/ for more details about how to enable hf_transfer
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
hf_hub_download(
repo_type=REPO_TYPE,
revision=target_revision,
repo_id=dataset,
filename=repo_file_location,
local_dir=index_folder,
local_dir_use_symlinks=False,
token=hf_token,
cache_dir=cache_folder,
)
def full_text_search(index_file_location: str, query: str, offset: int, length: int) -> tuple[int, pa.Table]:
con = duckdb.connect(index_file_location, read_only=True)
count_result = con.execute(query=FTS_COMMAND_COUNT, parameters=[query]).fetchall()
num_rows_total = count_result[0][0] # it will always return a non empty list with one element in a tuple
logging.debug(f"got {num_rows_total=} results for {query=}")
query_result = con.execute(
query=FTS_COMMAND.format(offset=offset, length=length),
parameters=[query],
)
pa_table = query_result.arrow()
con.close()
return (num_rows_total, pa_table)
def create_response(
pa_table: pa.Table,
dataset: str,
config: str,
split: str,
cached_assets_base_url: str,
cached_assets_directory: StrPath,
offset: int,
features: Features,
num_rows_total: int,
) -> PaginatedResponse:
features_without_key = features.copy()
features_without_key.pop(ROW_IDX_COLUMN, None)
_, unsupported_columns = get_supported_unsupported_columns(
features,
unsupported_features=UNSUPPORTED_FEATURES,
)
pa_table = pa_table.drop(unsupported_columns)
return PaginatedResponse(
features=to_features_list(features_without_key),
rows=to_rows_list(
pa_table,
dataset,
config,
split,
cached_assets_base_url,
cached_assets_directory,
offset=offset,
features=features,
unsupported_columns=unsupported_columns,
row_idx_column=ROW_IDX_COLUMN,
),
num_rows_total=num_rows_total,
num_rows_per_page=MAX_ROWS,
)
def create_search_endpoint(
processing_graph: ProcessingGraph,
duckdb_index_file_directory: StrPath,
cached_assets_base_url: str,
cached_assets_directory: StrPath,
target_revision: str,
cache_max_days: int,
hf_endpoint: str,
external_auth_url: Optional[str] = None,
hf_token: Optional[str] = None,
hf_jwt_public_keys: Optional[list[str]] = None,
hf_jwt_algorithm: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
max_age_long: int = 0,
max_age_short: int = 0,
clean_cache_proba: float = 0.0,
keep_first_rows_number: int = -1,
keep_most_recent_rows_number: int = -1,
max_cleaned_rows_number: int = -1,
) -> Endpoint:
async def search_endpoint(request: Request) -> Response:
revision: Optional[str] = None
with StepProfiler(method="search_endpoint", step="all"):
try:
with StepProfiler(method="search_endpoint", step="validate parameters"):
dataset = request.query_params.get("dataset")
config = request.query_params.get("config")
split = request.query_params.get("split")
query = request.query_params.get("query")
if (
not dataset
or not config
or not split
or not query
or not are_valid_parameters([dataset, config, split, query])
):
raise MissingRequiredParameterError(
"Parameter 'dataset', 'config', 'split' and 'query' are required"
)
offset = int(request.query_params.get("offset", 0))
if offset < 0:
raise InvalidParameterError(message="Offset must be positive")
length = int(request.query_params.get("length", MAX_ROWS))
if length < 0:
raise InvalidParameterError("Length must be positive")
if length > MAX_ROWS:
raise InvalidParameterError(f"Length must be less than or equal to {MAX_ROWS}")
with StepProfiler(method="search_endpoint", step="check authentication"):
# if auth_check fails, it will raise an exception that will be caught below
auth_check(
dataset=dataset,
external_auth_url=external_auth_url,
request=request,
hf_jwt_public_keys=hf_jwt_public_keys,
hf_jwt_algorithm=hf_jwt_algorithm,
hf_timeout_seconds=hf_timeout_seconds,
)
logging.info(f"/search {dataset=} {config=} {split=} {query=} {offset=} {length=}")
with StepProfiler(method="search_endpoint", step="validate indexing was done"):
# no cache data is needed to download the index file
# but will help to validate if indexing was done
processing_steps = processing_graph.get_processing_step_by_job_type("split-duckdb-index")
result = get_cache_entry_from_steps(
processing_steps=[processing_steps],
dataset=dataset,
config=config,
split=split,
processing_graph=processing_graph,
hf_endpoint=hf_endpoint,
hf_token=hf_token,
hf_timeout_seconds=hf_timeout_seconds,
cache_max_days=cache_max_days,
)
content = result["content"]
http_status = result["http_status"]
error_code = result["error_code"]
revision = result["dataset_git_revision"]
if http_status != HTTPStatus.OK:
return get_json_error_response(
content=content,
status_code=http_status,
max_age=max_age_short,
error_code=error_code,
revision=revision,
)
with StepProfiler(method="search_endpoint", step="download index file if missing"):
file_name = content["filename"]
index_folder = get_download_folder(duckdb_index_file_directory, dataset, config, split, revision)
# For directories like "partial-train" for the file
# at "en/partial-train/0000.parquet" in the C4 dataset.
# Note that "-" is forbidden for split names so it doesn't create directory names collisions.
split_directory = content["url"].rsplit("/", 2)[1]
repo_file_location = f"{config}/{split_directory}/{file_name}"
index_file_location = f"{index_folder}/{repo_file_location}"
index_path = Path(index_file_location)
if not index_path.is_file():
with StepProfiler(method="search_endpoint", step="download index file"):
download_index_file(
cache_folder=f"{duckdb_index_file_directory}/{HUB_DOWNLOAD_CACHE_FOLDER}",
index_folder=index_folder,
target_revision=target_revision,
dataset=dataset,
repo_file_location=repo_file_location,
hf_token=hf_token,
)
with StepProfiler(method="search_endpoint", step="perform FTS command"):
logging.debug(f"connect to index file {index_file_location}")
(num_rows_total, pa_table) = full_text_search(index_file_location, query, offset, length)
index_path.touch()
with StepProfiler(method="search_endpoint", step="clean cache"):
# no need to do it every time
if random.random() < clean_cache_proba: # nosec
if (
keep_first_rows_number < 0
and keep_most_recent_rows_number < 0
and max_cleaned_rows_number < 0
):
logger.debug(
"Params keep_first_rows_number, keep_most_recent_rows_number and"
" max_cleaned_rows_number are not set. Skipping cached assets cleaning."
)
else:
clean_cached_assets(
dataset=dataset,
cached_assets_directory=cached_assets_directory,
keep_first_rows_number=keep_first_rows_number,
keep_most_recent_rows_number=keep_most_recent_rows_number,
max_cleaned_rows_number=max_cleaned_rows_number,
)
with StepProfiler(method="search_endpoint", step="create response"):
if "features" in content and isinstance(content["features"], dict):
features = Features.from_dict(content["features"])
else:
features = Features.from_arrow_schema(pa_table.schema)
response = create_response(
pa_table,
dataset,
config,
split,
cached_assets_base_url,
cached_assets_directory,
offset,
features,
num_rows_total,
)
with StepProfiler(method="search_endpoint", step="generate the OK response"):
return get_json_ok_response(response, max_age=max_age_long, revision=revision)
except Exception as e:
error = e if isinstance(e, ApiError) else UnexpectedApiError("Unexpected error.", e)
with StepProfiler(method="search_endpoint", step="generate API error response"):
return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision)
return search_endpoint
| datasets-server-main | services/search/src/search/routes/search.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Iterator
from libapi.config import UvicornConfig
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import _clean_queue_database
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import _clean_cache_database
from pytest import MonkeyPatch, fixture
from api.config import AppConfig, EndpointConfig
from api.routes.endpoint import EndpointsDefinition, StepsByInputTypeAndEndpoint
# see https://github.com/pytest-dev/pytest/issues/363#issuecomment-406536200
@fixture(scope="session")
def monkeypatch_session() -> Iterator[MonkeyPatch]:
monkeypatch_session = MonkeyPatch()
monkeypatch_session.setenv("CACHE_MONGO_DATABASE", "datasets_server_cache_test")
monkeypatch_session.setenv("QUEUE_MONGO_DATABASE", "datasets_server_queue_test")
hostname = "localhost"
port = "8888"
monkeypatch_session.setenv("API_HF_TIMEOUT_SECONDS", "10")
monkeypatch_session.setenv("API_UVICORN_HOSTNAME", hostname)
monkeypatch_session.setenv("API_UVICORN_PORT", port)
monkeypatch_session.setenv("COMMON_HF_ENDPOINT", f"http://{hostname}:{port}")
yield monkeypatch_session
monkeypatch_session.undo()
@fixture(scope="session")
def app_config(monkeypatch_session: MonkeyPatch) -> AppConfig:
app_config = AppConfig.from_env()
if "test" not in app_config.cache.mongo_database or "test" not in app_config.queue.mongo_database:
raise ValueError("Test must be launched on a test mongo database")
return app_config
@fixture(scope="session")
def endpoint_config(monkeypatch_session: MonkeyPatch) -> EndpointConfig:
return EndpointConfig(
processing_step_names_by_input_type_and_endpoint={
"/splits": {
"dataset": ["dataset-split-names"],
"config": ["config-split-names-from-streaming"],
},
"/first-rows": {"split": ["split-first-rows-from-streaming"]},
"/parquet": {"config": ["config-parquet"]},
}
)
@fixture(scope="session")
def processing_graph(app_config: AppConfig) -> ProcessingGraph:
return ProcessingGraph(app_config.processing_graph.specification)
@fixture(scope="session")
def endpoint_definition(
endpoint_config: EndpointConfig, processing_graph: ProcessingGraph
) -> StepsByInputTypeAndEndpoint:
return EndpointsDefinition(processing_graph, endpoint_config=endpoint_config).steps_by_input_type_and_endpoint
@fixture(scope="session")
def first_dataset_endpoint(endpoint_definition: StepsByInputTypeAndEndpoint) -> str:
return next(
endpoint
for endpoint, input_types in endpoint_definition.items()
if next((endpoint for input_type, _ in input_types.items() if input_type == "dataset"), None)
)
@fixture(scope="session")
def first_config_endpoint(endpoint_definition: StepsByInputTypeAndEndpoint) -> str:
return next(
endpoint
for endpoint, input_types in endpoint_definition.items()
if next((endpoint for input_type, _ in input_types.items() if input_type == "config"), None)
)
@fixture(scope="session")
def first_split_endpoint(endpoint_definition: StepsByInputTypeAndEndpoint) -> str:
return next(
endpoint
for endpoint, input_types in endpoint_definition.items()
if next((endpoint for input_type, _ in input_types.items() if input_type == "split"), None)
)
@fixture(autouse=True)
def cache_mongo_resource(app_config: AppConfig) -> Iterator[CacheMongoResource]:
with CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) as resource:
yield resource
_clean_cache_database()
@fixture(autouse=True)
def queue_mongo_resource(app_config: AppConfig) -> Iterator[QueueMongoResource]:
with QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) as resource:
yield resource
_clean_queue_database()
@fixture(scope="session")
def uvicorn_config(monkeypatch_session: MonkeyPatch) -> UvicornConfig:
return UvicornConfig.from_env()
@fixture(scope="session")
def httpserver_listen_address(uvicorn_config: UvicornConfig) -> tuple[str, int]:
return (uvicorn_config.hostname, uvicorn_config.port)
@fixture(scope="session")
def hf_endpoint(app_config: AppConfig) -> str:
return app_config.common.hf_endpoint
@fixture(scope="session")
def hf_auth_path(app_config: AppConfig) -> str:
return app_config.api.hf_auth_path
| datasets-server-main | services/api/tests/conftest.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Iterator
from pytest import MonkeyPatch, fixture, mark
from starlette.testclient import TestClient
from api.app import create_app
from api.config import AppConfig
# see https://github.com/pytest-dev/pytest/issues/363#issuecomment-406536200
@fixture(scope="module")
def real_monkeypatch() -> Iterator[MonkeyPatch]:
monkeypatch = MonkeyPatch()
monkeypatch.setenv("CACHE_MONGO_DATABASE", "datasets_server_cache_test")
monkeypatch.setenv("QUEUE_MONGO_DATABASE", "datasets_server_queue_test")
monkeypatch.setenv("COMMON_HF_ENDPOINT", "https://huggingface.co")
monkeypatch.setenv("COMMON_HF_TOKEN", "")
yield monkeypatch
monkeypatch.undo()
@fixture(scope="module")
def real_client(real_monkeypatch: MonkeyPatch) -> TestClient:
return TestClient(create_app())
@fixture(scope="module")
def real_app_config(real_monkeypatch: MonkeyPatch) -> AppConfig:
app_config = AppConfig.from_env()
if "test" not in app_config.cache.mongo_database or "test" not in app_config.queue.mongo_database:
raise ValueError("Test must be launched on a test mongo database")
if app_config.common.hf_endpoint != "https://huggingface.co":
raise ValueError("Test must be launched on the production hub")
return app_config
@mark.real_dataset
def test_webhook(
real_client: TestClient,
) -> None:
dataset = "glue"
payload = {"event": "add", "repo": {"type": "dataset", "name": dataset, "gitalyUid": "123", "headSha": "revision"}}
response = real_client.post("/webhook", json=payload)
assert response.status_code == 200, response.text
| datasets-server-main | services/api/tests/test_app_real.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/api/tests/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Optional
import pytest
from starlette.testclient import TestClient
from api.app import create_app_with_config
from api.config import AppConfig, EndpointConfig
@pytest.fixture(scope="module")
def client(
monkeypatch_session: pytest.MonkeyPatch, app_config: AppConfig, endpoint_config: EndpointConfig
) -> TestClient:
return TestClient(create_app_with_config(app_config=app_config, endpoint_config=endpoint_config))
def test_cors(client: TestClient, first_dataset_endpoint: str) -> None:
origin = "http://localhost:3000"
method = "GET"
header = "X-Requested-With"
response = client.options(
f"{first_dataset_endpoint}?dataset=dataset1",
headers={
"Origin": origin,
"Access-Control-Request-Method": method,
"Access-Control-Request-Headers": header,
},
)
assert response.status_code == 200
assert (
origin in [o.strip() for o in response.headers["Access-Control-Allow-Origin"].split(",")]
or response.headers["Access-Control-Allow-Origin"] == "*"
)
assert (
header in [o.strip() for o in response.headers["Access-Control-Allow-Headers"].split(",")]
or response.headers["Access-Control-Expose-Headers"] == "*"
)
assert (
method in [o.strip() for o in response.headers["Access-Control-Allow-Methods"].split(",")]
or response.headers["Access-Control-Expose-Headers"] == "*"
)
assert response.headers["Access-Control-Allow-Credentials"] == "true"
def test_get_healthcheck(client: TestClient) -> None:
response = client.get("/healthcheck")
assert response.status_code == 200
assert response.text == "ok"
def test_get_endpoint(client: TestClient, first_dataset_endpoint: str) -> None:
# missing parameter
response = client.get(first_dataset_endpoint)
assert response.status_code == 422
# empty parameter
response = client.get(f"{first_dataset_endpoint}?dataset=")
assert response.status_code == 422
@pytest.mark.parametrize("dataset", (None, ""))
def test_get_dataset_missing_parameter(
client: TestClient,
dataset: Optional[str],
first_dataset_endpoint: str,
) -> None:
response = client.get(first_dataset_endpoint, params={"dataset": dataset, "config": None, "split": None})
assert response.status_code == 422
# this test might fail someday, if `first_split_endpoint` fixture appears to be not an only-split-level endpoint
@pytest.mark.parametrize(
"dataset,config,split",
[
(None, None, None),
("a", None, None),
("a", "b", None),
("a", "b", ""),
],
)
def test_get_split_missing_parameter(
client: TestClient,
dataset: Optional[str],
config: Optional[str],
split: Optional[str],
first_split_endpoint: str,
) -> None:
response = client.get(first_split_endpoint, params={"dataset": dataset, "config": config, "split": split})
assert response.status_code == 422
def test_metrics(client: TestClient) -> None:
response = client.get("/healthcheck")
response = client.get("/metrics")
assert response.status_code == 200
text = response.text
lines = text.split("\n")
# examples:
# starlette_requests_total{method="GET",path_template="/metrics"} 1.0
# method_steps_processing_time_seconds_sum{method="healthcheck_endpoint",step="all"} 1.6772013623267412e-05
metrics = {
parts[0]: float(parts[1]) for line in lines if line and line[0] != "#" and (parts := line.rsplit(" ", 1))
}
# the metrics should contain at least the following
for name in [
'starlette_requests_total{method="GET",path_template="/metrics"}',
'method_steps_processing_time_seconds_sum{context="None",method="healthcheck_endpoint",step="all"}',
]:
assert name in metrics, metrics
assert metrics[name] > 0, metrics
| datasets-server-main | services/api/tests/test_app.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Mapping
from typing import Any
import pytest
from api.routes.webhook import parse_payload
@pytest.mark.parametrize(
"payload,raises",
[
({"event": "add", "repo": {"type": "dataset", "name": "webhook-test", "gitalyUid": "123"}}, False),
(
{
"event": "move",
"movedTo": "webhook-test",
"repo": {"type": "dataset", "name": "previous-name", "gitalyUid": "123"},
},
False,
),
({"event": "add", "repo": {"type": "dataset", "name": "webhook-test"}}, False),
({"event": "doesnotexist", "repo": {"type": "dataset", "name": "webhook-test", "gitalyUid": "123"}}, True),
(
{
"event": "update",
"scope": "repo.content",
"repo": {
"type": "dataset",
"name": "AresEkb/prof_standards_sbert_large_mt_nlu_ru",
"id": "63bab13ae0f4fee16cebf084",
"private": False,
"url": {
"web": "https://huggingface.co/datasets/AresEkb/prof_standards_sbert_large_mt_nlu_ru",
"api": "https://huggingface.co/api/datasets/AresEkb/prof_standards_sbert_large_mt_nlu_ru",
},
"headSha": "c926e6ce93cbd5a6eaf0895abd48776cc5bae638",
"gitalyUid": "c5afeca93171cfa1f6c138ef683df4a53acffd8c86283ab8e7e338df369d2fff",
"authorId": "6394b8740b746ac6a969bd51",
"tags": [],
},
"webhook": {"id": "632c22b3df82fca9e3b46154", "version": 2},
},
False,
),
(
{"event": "update", "repo": {"type": "dataset", "name": "AresEkb/prof_standards_sbert_large_mt_nlu_ru"}},
False,
),
],
)
def test_parse_payload(
payload: Mapping[str, Any],
raises: bool,
) -> None:
if raises:
with pytest.raises(Exception):
parse_payload(payload)
else:
parse_payload(payload)
| datasets-server-main | services/api/tests/routes/test_webhook.py |
datasets-server-main | services/api/tests/routes/__init__.py |
|
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from http import HTTPStatus
from unittest.mock import patch
from libapi.exceptions import ResponseNotReadyError
from libapi.utils import get_cache_entry_from_steps
from libcommon.config import ProcessingGraphConfig
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import Queue
from libcommon.simple_cache import upsert_response
from pytest import raises
from api.config import AppConfig, EndpointConfig
from api.routes.endpoint import EndpointsDefinition
CACHE_MAX_DAYS = 90
def test_endpoints_definition() -> None:
config = ProcessingGraphConfig()
graph = ProcessingGraph(config.specification)
endpoint_config = EndpointConfig.from_env()
endpoints_definition = EndpointsDefinition(graph, endpoint_config)
assert endpoints_definition
definition = endpoints_definition.steps_by_input_type_and_endpoint
assert definition
splits = definition["/splits"]
assert splits is not None
assert sorted(list(splits)) == ["config", "dataset"]
assert splits["dataset"] is not None
assert splits["config"] is not None
assert len(splits["dataset"]) == 1 # Has one processing step
assert len(splits["config"]) == 2 # Has two processing steps
first_rows = definition["/first-rows"]
assert first_rows is not None
assert sorted(list(first_rows)) == ["split"]
assert first_rows["split"] is not None
assert len(first_rows["split"]) == 2 # Has two processing steps
parquet = definition["/parquet"]
assert parquet is not None
assert sorted(list(parquet)) == ["config", "dataset"]
assert parquet["dataset"] is not None
assert parquet["config"] is not None
assert len(parquet["dataset"]) == 1 # Only has one processing step
assert len(parquet["config"]) == 1 # Only has one processing step
dataset_info = definition["/info"]
assert dataset_info is not None
assert sorted(list(dataset_info)) == ["config", "dataset"]
assert dataset_info["dataset"] is not None
assert dataset_info["config"] is not None
assert len(dataset_info["dataset"]) == 1 # Only has one processing step
assert len(dataset_info["config"]) == 1 # Only has one processing step
size = definition["/size"]
assert size is not None
assert sorted(list(size)) == ["config", "dataset"]
assert size["dataset"] is not None
assert size["config"] is not None
assert len(size["dataset"]) == 1 # Only has one processing step
assert len(size["config"]) == 1 # Only has one processing step
opt_in_out_urls = definition["/opt-in-out-urls"]
assert opt_in_out_urls is not None
assert sorted(list(opt_in_out_urls)) == ["config", "dataset", "split"]
assert opt_in_out_urls["split"] is not None
assert opt_in_out_urls["config"] is not None
assert opt_in_out_urls["dataset"] is not None
assert len(opt_in_out_urls["split"]) == 1 # Only has one processing step
assert len(opt_in_out_urls["config"]) == 1 # Only has one processing step
assert len(opt_in_out_urls["dataset"]) == 1 # Only has one processing step
is_valid = definition["/is-valid"]
assert is_valid is not None
assert sorted(list(is_valid)) == ["config", "dataset", "split"]
assert is_valid["dataset"] is not None
assert is_valid["config"] is not None
assert is_valid["split"] is not None
assert len(is_valid["dataset"]) == 1 # Only has one processing step
assert len(is_valid["config"]) == 1 # Only has one processing step
assert len(is_valid["split"]) == 1 # Only has one processing step
# assert old deleted endpoints don't exist
with raises(KeyError):
_ = definition["/dataset-info"]
with raises(KeyError):
_ = definition["/parquet-and-dataset-info"]
with raises(KeyError):
_ = definition["/config-names"]
def test_get_cache_entry_from_steps() -> None:
dataset = "dataset"
revision = "revision"
config = "config"
app_config = AppConfig.from_env()
graph_config = ProcessingGraphConfig()
processing_graph = ProcessingGraph(graph_config.specification)
cache_with_error = "config-split-names-from-streaming"
cache_without_error = "config-split-names-from-info"
step_with_error = processing_graph.get_processing_step(cache_with_error)
step_without_error = processing_graph.get_processing_step(cache_without_error)
upsert_response(
kind=cache_without_error,
dataset=dataset,
config=config,
content={},
http_status=HTTPStatus.OK,
)
upsert_response(
kind=cache_with_error,
dataset=dataset,
config=config,
content={},
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
# succeeded result is returned
result = get_cache_entry_from_steps(
processing_steps=[step_without_error, step_with_error],
dataset=dataset,
config=config,
split=None,
processing_graph=processing_graph,
hf_endpoint=app_config.common.hf_endpoint,
cache_max_days=CACHE_MAX_DAYS,
)
assert result
assert result["http_status"] == HTTPStatus.OK
# succeeded result is returned even if first step failed
result = get_cache_entry_from_steps(
processing_steps=[step_with_error, step_without_error],
dataset=dataset,
config=config,
split=None,
processing_graph=processing_graph,
hf_endpoint=app_config.common.hf_endpoint,
cache_max_days=CACHE_MAX_DAYS,
)
assert result
assert result["http_status"] == HTTPStatus.OK
# error result is returned if all steps failed
result = get_cache_entry_from_steps(
processing_steps=[step_with_error, step_with_error],
dataset=dataset,
config=config,
split=None,
processing_graph=processing_graph,
hf_endpoint=app_config.common.hf_endpoint,
cache_max_days=CACHE_MAX_DAYS,
)
assert result
assert result["http_status"] == HTTPStatus.INTERNAL_SERVER_ERROR
# pending job throws exception
queue = Queue()
queue.add_job(job_type="dataset-split-names", dataset=dataset, revision=revision, config=config, difficulty=50)
non_existent_step = processing_graph.get_processing_step("dataset-split-names")
with patch("libcommon.dataset.get_dataset_git_revision", return_value=revision):
# ^ the dataset does not exist on the Hub, we don't want to raise an issue here
with raises(ResponseNotReadyError):
get_cache_entry_from_steps(
processing_steps=[non_existent_step],
dataset=dataset,
config=None,
split=None,
processing_graph=processing_graph,
hf_endpoint=app_config.common.hf_endpoint,
cache_max_days=CACHE_MAX_DAYS,
)
| datasets-server-main | services/api/tests/routes/test_endpoint.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Mapping
from dataclasses import dataclass, field
from libapi.config import ApiConfig
from libcommon.config import (
CacheConfig,
CommonConfig,
LogConfig,
ProcessingGraphConfig,
QueueConfig,
)
from libcommon.processing_graph import InputType
@dataclass(frozen=True)
class AppConfig:
api: ApiConfig = field(default_factory=ApiConfig)
cache: CacheConfig = field(default_factory=CacheConfig)
common: CommonConfig = field(default_factory=CommonConfig)
log: LogConfig = field(default_factory=LogConfig)
queue: QueueConfig = field(default_factory=QueueConfig)
processing_graph: ProcessingGraphConfig = field(default_factory=ProcessingGraphConfig)
@classmethod
def from_env(cls) -> "AppConfig":
common_config = CommonConfig.from_env()
return cls(
common=common_config,
cache=CacheConfig.from_env(),
log=LogConfig.from_env(),
processing_graph=ProcessingGraphConfig.from_env(),
queue=QueueConfig.from_env(),
api=ApiConfig.from_env(hf_endpoint=common_config.hf_endpoint),
)
ProcessingStepNamesByInputType = Mapping[InputType, list[str]]
ProcessingStepNamesByInputTypeAndEndpoint = Mapping[str, ProcessingStepNamesByInputType]
@dataclass(frozen=True)
class EndpointConfig:
"""Contains the endpoint config specification to relate with step names.
The list of processing steps corresponds to the priority in which the response
has to be reached. The cache from the first step in the list will be used first
then, if it's an error or missing, the second one, etc.
The related steps depend on the query parameters passed in the request
(dataset, config, split)
"""
processing_step_names_by_input_type_and_endpoint: ProcessingStepNamesByInputTypeAndEndpoint = field(
default_factory=lambda: {
"/splits": {
"dataset": [
"dataset-split-names",
],
"config": ["config-split-names-from-streaming", "config-split-names-from-info"],
},
"/first-rows": {"split": ["split-first-rows-from-streaming", "split-first-rows-from-parquet"]},
"/parquet": {
"dataset": ["dataset-parquet"],
"config": ["config-parquet"],
},
"/info": {"dataset": ["dataset-info"], "config": ["config-info"]},
"/size": {
"dataset": ["dataset-size"],
"config": ["config-size"],
},
"/opt-in-out-urls": {
"dataset": ["dataset-opt-in-out-urls-count"],
"config": ["config-opt-in-out-urls-count"],
"split": ["split-opt-in-out-urls-count"],
},
"/is-valid": {
"dataset": ["dataset-is-valid"],
"config": ["config-is-valid"],
"split": ["split-is-valid"],
},
"/statistics": {"split": ["split-descriptive-statistics"]},
}
)
@classmethod
def from_env(cls) -> "EndpointConfig":
# TODO: allow passing the mapping between endpoint and processing steps via env vars
return cls()
| datasets-server-main | services/api/src/api/config.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/api/src/api/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import uvicorn
from libapi.config import UvicornConfig
from libapi.jwt_token import get_jwt_public_keys
from libapi.routes.healthcheck import healthcheck_endpoint
from libapi.routes.metrics import create_metrics_endpoint
from libapi.utils import EXPOSED_HEADERS
from libcommon.log import init_logging
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource, Resource
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
from starlette_prometheus import PrometheusMiddleware
from api.config import AppConfig, EndpointConfig
from api.routes.endpoint import EndpointsDefinition, create_endpoint
from api.routes.webhook import create_webhook_endpoint
def create_app() -> Starlette:
app_config = AppConfig.from_env()
endpoint_config = EndpointConfig.from_env()
return create_app_with_config(app_config=app_config, endpoint_config=endpoint_config)
def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfig) -> Starlette:
init_logging(level=app_config.log.level)
# ^ set first to have logs as soon as possible
processing_graph = ProcessingGraph(app_config.processing_graph.specification)
endpoints_definition = EndpointsDefinition(processing_graph, endpoint_config)
hf_jwt_public_keys = get_jwt_public_keys(
algorithm_name=app_config.api.hf_jwt_algorithm,
public_key_url=app_config.api.hf_jwt_public_key_url,
additional_public_keys=app_config.api.hf_jwt_additional_public_keys,
timeout_seconds=app_config.api.hf_timeout_seconds,
)
middleware = [
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
allow_credentials=True,
expose_headers=EXPOSED_HEADERS,
),
Middleware(GZipMiddleware),
Middleware(PrometheusMiddleware, filter_unhandled_paths=True),
]
cache_resource = CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url)
queue_resource = QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url)
resources: list[Resource] = [cache_resource, queue_resource]
if not cache_resource.is_available():
raise RuntimeError("The connection to the cache database could not be established. Exiting.")
if not queue_resource.is_available():
raise RuntimeError("The connection to the queue database could not be established. Exiting.")
routes = [
Route(
endpoint_name,
endpoint=create_endpoint(
endpoint_name=endpoint_name,
steps_by_input_type=steps_by_input_type,
processing_graph=processing_graph,
hf_endpoint=app_config.common.hf_endpoint,
hf_token=app_config.common.hf_token,
hf_jwt_public_keys=hf_jwt_public_keys,
hf_jwt_algorithm=app_config.api.hf_jwt_algorithm,
external_auth_url=app_config.api.external_auth_url,
hf_timeout_seconds=app_config.api.hf_timeout_seconds,
max_age_long=app_config.api.max_age_long,
max_age_short=app_config.api.max_age_short,
cache_max_days=app_config.cache.max_days,
),
)
for endpoint_name, steps_by_input_type in endpoints_definition.steps_by_input_type_and_endpoint.items()
] + [
Route("/healthcheck", endpoint=healthcheck_endpoint),
Route("/metrics", endpoint=create_metrics_endpoint()),
# ^ called by Prometheus
Route(
"/webhook",
endpoint=create_webhook_endpoint(
processing_graph=processing_graph,
hf_webhook_secret=app_config.api.hf_webhook_secret,
cache_max_days=app_config.cache.max_days,
),
methods=["POST"],
),
# ^ called by the Hub webhooks
]
return Starlette(routes=routes, middleware=middleware, on_shutdown=[resource.release for resource in resources])
def start() -> None:
uvicorn_config = UvicornConfig.from_env()
uvicorn.run(
"app:create_app",
host=uvicorn_config.hostname,
port=uvicorn_config.port,
factory=True,
workers=uvicorn_config.num_workers,
)
| datasets-server-main | services/api/src/api/app.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from api.app import start
if __name__ == "__main__":
start()
| datasets-server-main | services/api/src/api/main.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Any, Literal, Optional, TypedDict
from jsonschema import ValidationError, validate
from libapi.utils import Endpoint, get_response
from libcommon.exceptions import CustomError, DatasetRevisionEmptyError
from libcommon.operations import backfill_dataset, delete_dataset
from libcommon.processing_graph import ProcessingGraph
from libcommon.prometheus import StepProfiler
from libcommon.utils import Priority
from starlette.requests import Request
from starlette.responses import Response
schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"properties": {
"event": {"type": "string", "enum": ["add", "remove", "update", "move"]},
"movedTo": {"type": "string"},
"repo": {
"type": "object",
"properties": {
"headSha": {"type": "string"},
"name": {"type": "string"},
"type": {"type": "string", "enum": ["dataset", "model", "space"]},
},
"required": ["type", "name"],
},
},
"required": ["event", "repo"],
}
class _MoonWebhookV2PayloadRepo(TypedDict):
type: Literal["model", "dataset", "space"]
name: str
class MoonWebhookV2PayloadRepo(_MoonWebhookV2PayloadRepo, total=False):
headSha: Optional[str]
class MoonWebhookV2Payload(TypedDict):
"""
Payload from a moon-landing webhook call, v2.
"""
event: Literal["add", "remove", "update", "move"]
movedTo: Optional[str]
repo: MoonWebhookV2PayloadRepo
def parse_payload(json: Any) -> MoonWebhookV2Payload:
validate(instance=json, schema=schema)
return json # type: ignore
# ^ validate() ensures the content is correct, but does not give the type
def process_payload(
processing_graph: ProcessingGraph,
payload: MoonWebhookV2Payload,
cache_max_days: int,
trust_sender: bool = False,
) -> None:
if payload["repo"]["type"] != "dataset":
return
dataset = payload["repo"]["name"]
if dataset is None:
return
event = payload["event"]
if event == "remove":
# destructive actions (delete, move) require a trusted sender
if trust_sender:
delete_dataset(dataset=dataset)
return
revision = payload["repo"]["headSha"] if "headSha" in payload["repo"] else None
if revision is None:
raise DatasetRevisionEmptyError(message=f"Dataset {dataset} has no revision")
if event in ["add", "update"]:
backfill_dataset(
dataset=dataset,
revision=revision,
processing_graph=processing_graph,
priority=Priority.NORMAL,
cache_max_days=cache_max_days,
)
elif event == "move" and (moved_to := payload["movedTo"]):
# destructive actions (delete, move) require a trusted sender
if trust_sender:
backfill_dataset(
dataset=moved_to,
revision=revision,
processing_graph=processing_graph,
priority=Priority.NORMAL,
cache_max_days=cache_max_days,
)
delete_dataset(dataset=dataset)
def create_webhook_endpoint(
processing_graph: ProcessingGraph, cache_max_days: int, hf_webhook_secret: Optional[str] = None
) -> Endpoint:
async def webhook_endpoint(request: Request) -> Response:
with StepProfiler(method="webhook_endpoint", step="all"):
with StepProfiler(method="webhook_endpoint", step="get JSON"):
try:
json = await request.json()
except Exception:
content = {"status": "error", "error": "the body could not be parsed as a JSON"}
logging.info("/webhook: the body could not be parsed as a JSON.")
return get_response(content, 400)
logging.info(f"/webhook: {json}")
with StepProfiler(method="webhook_endpoint", step="parse payload and headers"):
try:
payload = parse_payload(json)
except ValidationError as e:
content = {"status": "error", "error": "the JSON payload is invalid"}
logging.info(f"/webhook: the JSON body is invalid. JSON: {json}. Error: {e}")
return get_response(content, 400)
except Exception as e:
logging.exception("Unexpected error", exc_info=e)
content = {"status": "error", "error": "unexpected error"}
logging.warning(f"/webhook: unexpected error while parsing the JSON body is invalid. Error: {e}")
return get_response(content, 500)
HEADER = "x-webhook-secret"
trust_sender = (
hf_webhook_secret is not None
and (secret := request.headers.get(HEADER)) is not None
and secret == hf_webhook_secret
)
if not trust_sender:
logging.info(f"/webhook: the sender is not trusted. JSON: {json}")
with StepProfiler(method="webhook_endpoint", step="process payload"):
try:
process_payload(
processing_graph=processing_graph,
payload=payload,
trust_sender=trust_sender,
cache_max_days=cache_max_days,
)
except CustomError as e:
content = {"status": "error", "error": "the dataset is not supported"}
dataset = payload["repo"]["name"]
logging.debug(f"/webhook: the dataset {dataset} is not supported. JSON: {json}. Error: {e}")
return get_response(content, 400)
content = {"status": "ok"}
return get_response(content, 200)
return webhook_endpoint
| datasets-server-main | services/api/src/api/routes/webhook.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/api/src/api/routes/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from abc import ABC, abstractmethod
from collections.abc import Mapping
from http import HTTPStatus
from typing import Optional, TypedDict
from libapi.authentication import auth_check
from libapi.exceptions import (
ApiError,
MissingRequiredParameterError,
UnexpectedApiError,
)
from libapi.utils import (
Endpoint,
are_valid_parameters,
get_cache_entry_from_steps,
get_json_api_error_response,
get_json_error_response,
get_json_ok_response,
)
from libcommon.processing_graph import InputType, ProcessingGraph, ProcessingStep
from libcommon.prometheus import StepProfiler
from starlette.requests import Request
from starlette.responses import Response
from typing_extensions import override
from api.config import EndpointConfig
StepsByInputType = Mapping[InputType, list[ProcessingStep]]
StepsByInputTypeAndEndpoint = Mapping[str, StepsByInputType]
class EndpointsDefinition:
"""Definition of supported endpoints and its relation with processing steps."""
steps_by_input_type_and_endpoint: StepsByInputTypeAndEndpoint
def __init__(self, graph: ProcessingGraph, endpoint_config: EndpointConfig):
processing_step_names_by_input_type_and_endpoint = (
endpoint_config.processing_step_names_by_input_type_and_endpoint.items()
)
self.steps_by_input_type_and_endpoint = {
endpoint: {
input_type: [
graph.get_processing_step(processing_step_name) for processing_step_name in processing_step_names
]
for input_type, processing_step_names in processing_step_names_by_input_type.items()
}
for endpoint, processing_step_names_by_input_type in processing_step_names_by_input_type_and_endpoint
}
# TODO: remove once full scan is implemented for spawning urls scan
class OptInOutUrlsCountResponse(TypedDict):
urls_columns: list[str]
num_opt_in_urls: int
num_opt_out_urls: int
num_urls: int
num_scanned_rows: int
has_urls_columns: bool
full_scan: Optional[bool]
# TODO: remove once full scan is implemented for spawning urls scan
HARD_CODED_OPT_IN_OUT_URLS = {
"laion/laion2B-en": OptInOutUrlsCountResponse(
urls_columns=["URL"],
num_opt_in_urls=5,
num_opt_out_urls=42785281,
num_urls=2322161807,
num_scanned_rows=0, # It is unknown but leaving with 0 for now since UI validates non null
has_urls_columns=True,
full_scan=True,
),
"kakaobrain/coyo-700m": OptInOutUrlsCountResponse(
urls_columns=["url"],
num_opt_in_urls=2,
num_opt_out_urls=4691511,
num_urls=746972269,
num_scanned_rows=0, # It is unknown but leaving with 0 for now since UI validates non null
has_urls_columns=True,
full_scan=True,
),
}
class InputTypeValidator(ABC):
input_type: InputType = NotImplemented
@abstractmethod
def are_parameters_sufficient(self, dataset: Optional[str], config: Optional[str], split: Optional[str]) -> bool:
pass
@abstractmethod
def get_error_message(self) -> str:
pass
@abstractmethod
def get_useful_parameters(
self, dataset: Optional[str], config: Optional[str], split: Optional[str]
) -> tuple[Optional[str], Optional[str], Optional[str]]:
pass
@staticmethod
def from_input_type(input_type: InputType) -> "InputTypeValidator":
return (
DatasetInputTypeValidator()
if input_type == "dataset"
else ConfigInputTypeValidator()
if input_type == "config"
else SplitInputTypeValidator()
)
class DatasetInputTypeValidator(InputTypeValidator):
input_type: InputType = "dataset"
@override
def are_parameters_sufficient(self, dataset: Optional[str], config: Optional[str], split: Optional[str]) -> bool:
return are_valid_parameters([dataset])
@override
def get_error_message(self) -> str:
return "Parameter 'dataset' is required"
@override
def get_useful_parameters(
self, dataset: Optional[str], config: Optional[str], split: Optional[str]
) -> tuple[Optional[str], Optional[str], Optional[str]]:
return (dataset, None, None)
class ConfigInputTypeValidator(InputTypeValidator):
input_type: InputType = "config"
@override
def are_parameters_sufficient(self, dataset: Optional[str], config: Optional[str], split: Optional[str]) -> bool:
return are_valid_parameters([dataset, config])
@override
def get_error_message(self) -> str:
return "Parameters 'config' and 'dataset' are required"
@override
def get_useful_parameters(
self, dataset: Optional[str], config: Optional[str], split: Optional[str]
) -> tuple[Optional[str], Optional[str], Optional[str]]:
return (dataset, config, None)
class SplitInputTypeValidator(InputTypeValidator):
input_type: InputType = "split"
@override
def are_parameters_sufficient(self, dataset: Optional[str], config: Optional[str], split: Optional[str]) -> bool:
return are_valid_parameters([dataset, config, split])
@override
def get_error_message(self) -> str:
return "Parameters 'split', 'config' and 'dataset' are required"
@override
def get_useful_parameters(
self, dataset: Optional[str], config: Optional[str], split: Optional[str]
) -> tuple[Optional[str], Optional[str], Optional[str]]:
return (dataset, config, split)
def get_input_type_validators_by_priority(steps_by_input_type: StepsByInputType) -> list[InputTypeValidator]:
input_type_order: list[InputType] = ["split", "config", "dataset"]
return [
InputTypeValidator.from_input_type(input_type)
for input_type in input_type_order
if input_type in steps_by_input_type
]
def get_input_type_validator_by_parameters(
validators: list[InputTypeValidator], dataset: Optional[str], config: Optional[str], split: Optional[str]
) -> InputTypeValidator:
error_message = "No processing steps supported for parameters"
for validator in validators:
error_message = validator.get_error_message()
if validator.are_parameters_sufficient(dataset=dataset, config=config, split=split):
return validator
raise MissingRequiredParameterError(error_message)
def create_endpoint(
endpoint_name: str,
steps_by_input_type: StepsByInputType,
processing_graph: ProcessingGraph,
cache_max_days: int,
hf_endpoint: str,
hf_token: Optional[str] = None,
hf_jwt_public_keys: Optional[list[str]] = None,
hf_jwt_algorithm: Optional[str] = None,
external_auth_url: Optional[str] = None,
hf_timeout_seconds: Optional[float] = None,
max_age_long: int = 0,
max_age_short: int = 0,
) -> Endpoint:
async def processing_step_endpoint(request: Request) -> Response:
context = f"endpoint: {endpoint_name}"
revision: Optional[str] = None
with StepProfiler(method="processing_step_endpoint", step="all", context=context):
try:
with StepProfiler(
method="processing_step_endpoint",
step="validate parameters and get processing steps",
context=context,
):
# validating request parameters
dataset_parameter = request.query_params.get("dataset")
config_parameter = request.query_params.get("config")
split_parameter = request.query_params.get("split")
validators = get_input_type_validators_by_priority(steps_by_input_type=steps_by_input_type)
logging.debug(
f"endpoint={endpoint_name} dataset={dataset_parameter} config={config_parameter}"
+ f" split={split_parameter}"
)
validator = get_input_type_validator_by_parameters(
validators, dataset_parameter, config_parameter, split_parameter
)
processing_steps = steps_by_input_type[validator.input_type]
dataset, config, split = validator.get_useful_parameters(
dataset_parameter, config_parameter, split_parameter
)
# for now, dataset is always required in the endpoints.
if not dataset:
raise MissingRequiredParameterError("Parameter 'dataset' is required")
# if auth_check fails, it will raise an exception that will be caught below
with StepProfiler(method="processing_step_endpoint", step="check authentication", context=context):
auth_check(
dataset,
external_auth_url=external_auth_url,
request=request,
hf_jwt_public_keys=hf_jwt_public_keys,
hf_jwt_algorithm=hf_jwt_algorithm,
hf_timeout_seconds=hf_timeout_seconds,
)
# getting result based on processing steps
with StepProfiler(method="processing_step_endpoint", step="get cache entry", context=context):
# TODO: remove once full scan is implemented for spawning urls scan
if (
endpoint_name == "/opt-in-out-urls"
and validator.input_type == "dataset"
and dataset in HARD_CODED_OPT_IN_OUT_URLS
):
return get_json_ok_response(
content=HARD_CODED_OPT_IN_OUT_URLS[dataset], max_age=max_age_long, revision=revision
)
result = get_cache_entry_from_steps(
processing_steps=processing_steps,
dataset=dataset,
config=config,
split=split,
processing_graph=processing_graph,
hf_endpoint=hf_endpoint,
hf_token=hf_token,
hf_timeout_seconds=hf_timeout_seconds,
cache_max_days=cache_max_days,
)
content = result["content"]
http_status = result["http_status"]
error_code = result["error_code"]
revision = result["dataset_git_revision"]
if http_status == HTTPStatus.OK:
with StepProfiler(method="processing_step_endpoint", step="generate OK response", context=context):
return get_json_ok_response(content=content, max_age=max_age_long, revision=revision)
with StepProfiler(method="processing_step_endpoint", step="generate error response", context=context):
return get_json_error_response(
content=content,
status_code=http_status,
max_age=max_age_short,
error_code=error_code,
revision=revision,
)
except Exception as e:
error = e if isinstance(e, ApiError) else UnexpectedApiError("Unexpected error.", e)
with StepProfiler(
method="processing_step_endpoint", step="generate API error response", context=context
):
return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision)
return processing_step_endpoint
| datasets-server-main | services/api/src/api/routes/endpoint.py |
from trec_car.read_data import *
import sys
if len(sys.argv)<1 or len(sys.argv)>3:
print("usage ",sys.argv[0]," articlefile [outlinefile paragraphfile]")
exit()
articles=sys.argv[1]
# to open either pages or outlines use iter_annotations
# See docstrings of respective objects for more documentation.
with open(articles, 'rb') as f:
for p in iter_pages(f):
print('\npagename:', p.page_name)
print('\npageid:', p.page_id)
print('\nmeta:', p.page_meta)
# get infoboxes
print('\ninfoboxes:')
for box in p.get_infoboxes():
print(box)
print()
# get one data structure with nested (heading, [children]) pairs
headings = p.nested_headings()
# print("headings", [section.heading for (section, content) in headings])
print("sections with content: ")
for (section, _) in headings:
if section:
print (section.get_text())
print("sections with content: ")
for section in p.child_sections:
if section:
print ('== ', section.heading ,' ==')
print (section.get_text_with_headings(False))
if len(p.outline())>0:
print( p.outline()[0].__str__())
print('deep headings= ', [ (str(section.heading), len(children)) for (section, children) in p.deep_headings_list()])
print('flat headings= ' ,["/".join([str(section.heading) for section in sectionpath]) for sectionpath in p.flat_headings_list()])
if (len(sys.argv)==1):
sys.exit()
outlines=sys.argv[2]
paragraphs=sys.argv[3]
with open(outlines, 'rb') as f:
for p in iter_annotations(f):
print('\npagename:', p.page_name)
# get one data structure with nested (heading, [children]) pairs
headings = p.nested_headings()
print('headings= ', [ (str(section.heading), len(children)) for (section, children) in headings])
if len(p.outline())>2:
print('heading 1=', p.outline()[0])
print('deep headings= ', [ (str(section.heading), len(children)) for (section, children) in p.deep_headings_list()])
print('flat headings= ' ,["/".join([str(section.heading) for section in sectionpath]) for sectionpath in p.flat_headings_list()])
# exit(0)
with open(paragraphs, 'rb') as f:
for p in iter_paragraphs(f):
print('\n', p.para_id, ':')
# Print just the text
texts = [elem.text if isinstance(elem, ParaText)
else elem.anchor_text
for elem in p.bodies]
print(' '.join(texts))
# Print just the linked entities
entities = [elem.page
for elem in p.bodies
if isinstance(elem, ParaLink)]
print(entities)
# Print text interspersed with links as pairs (text, link)
mixed = [(elem.anchor_text, elem.page) if isinstance(elem, ParaLink)
else (elem.text, None)
for elem in p.bodies]
print(mixed)
| datasets-server-main | services/worker/vendors/trec-car-tools/python3/read_data_test.py |
# -*- coding: utf-8 -*-
#
# trec-car-tools documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 10 09:43:28 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'trec-car-tools'
copyright = u'2017, Ben Gamari, Laura Dietz'
author = u'Ben Gamari, Laura Dietz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'trec-car-tools v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
| datasets-server-main | services/worker/vendors/trec-car-tools/python3/conf.py |
#!/usr/bin/env python3
from trec_car.read_data import *
import argparse
def dump_pages(args):
for p in iter_pages(args.file):
print(p.page_meta)
print(p)
print("\n".join([("%s %s"% (heading,content)) for (heading,content) in p.deep_headings_list()]))
def dump_outlines(args):
for p in iter_outlines(args.file):
print(p.page_meta)
print(p)
print("\n".join([("%s"% heading ) for (heading,empty_content) in p.deep_headings_list()]))
def dump_paragraphs(args):
for p in iter_paragraphs(args.file):
print(p)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers()
p = subparser.add_parser('pages', help='Dump pages')
p.add_argument('file', type=argparse.FileType('rb'), help='A pages file')
p.set_defaults(func=dump_pages)
p = subparser.add_parser('outlines', help='Dump outlines')
p.add_argument('file', type=argparse.FileType('rb'), help='An outlines file')
p.set_defaults(func=dump_outlines)
p = subparser.add_parser('paragraphs', help='Dump paragraphs')
p.add_argument('file', type=argparse.FileType('rb'), help='A paragraphs file')
p.set_defaults(func=dump_paragraphs)
args = parser.parse_args()
if 'func' not in args:
parser.print_usage()
else:
args.func(args)
| datasets-server-main | services/worker/vendors/trec-car-tools/python3/test.py |
#!/usr/bin/env python3
from setuptools import setup
setup(
name='trec-car-tools',
version='2.5.4',
packages=['trec_car'],
url='https://github.com/TREMA-UNH/trec-car-tools/python3',
# download_url='https://github.com/TREMA-UNH/trec-car-tools/archive/2.0.tar.gz',
keywords=['wikipedia','complex answer retrieval','trec car'],
license='BSD 3-Clause',
author='laura-dietz',
author_email='Laura.Dietz@unh.edu',
description='Support tools for TREC CAR participants. Also see trec-car.cs.unh.edu',
install_requires=['cbor>=1.0.0', 'numpy>=1.11.2'],
python_requires='>=3.6',
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
]
)
| datasets-server-main | services/worker/vendors/trec-car-tools/python3/setup.py |
Subsets and Splits