diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..1b8a19eae19f9a80e349d9196b26f82256d12aac --- /dev/null +++ b/.dockerignore @@ -0,0 +1,4 @@ +.git +.github +data +output diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000000000000000000000000000000000..f94b74043017432c96676c5dd4d7e21bd41c8b12 --- /dev/null +++ b/.flake8 @@ -0,0 +1,15 @@ +[flake8] +exclude=.cache, .local, server.wave, output, data, reports +max-line-length = 88 +# E203, W503 - black-compatible config +extend-ignore = E203, W503 +per-file-ignores = + */__init__.py: F401 + train.py: E402 + prompt.py: E402 + train_wave.py: E402, I001, I003 + app.py: E402 + tests/src/datasets/test_text_dpo_modeling_ds.py: E501 + tests/src/models/test_dpo_modeling_model.py: E501 + +inline-quotes = " diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000000000000000000000000000000000000..7f0f8e80e995129aa86971453ef1ee9c43ad520a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,18 @@ +--- +name: "\U0001F41B Bug Report" +about: Create a bug report +title: "[BUG]" +labels: type/bug +assignees: '' +--- + +### 🐛 Bug + + + +### To Reproduce + + + +### LLM Studio version + diff --git a/.github/ISSUE_TEMPLATE/code-improvement.md b/.github/ISSUE_TEMPLATE/code-improvement.md new file mode 100644 index 0000000000000000000000000000000000000000..69a8d64f66f083cf9e6ba9f1d3154af15967152e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/code-improvement.md @@ -0,0 +1,15 @@ +--- +name: "\U0001F527 Code improvement" +about: Suggest a code improvement, e.g. refactoring, deprecation, etc. +title: "[CODE IMPROVEMENT]" +labels: area/core +assignees: '' +--- + +### 🔧 Proposed code refactoring + + + +### Motivation + + diff --git a/.github/ISSUE_TEMPLATE/doc-request.md b/.github/ISSUE_TEMPLATE/doc-request.md new file mode 100644 index 0000000000000000000000000000000000000000..b4cfae658dbdbfc10ac58bc0bf657db7d5e131d8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/doc-request.md @@ -0,0 +1,15 @@ +--- +name: "\U0001F41B Documentation request" +about: Create a doc request +title: "[DOC]" +labels: type/doc +assignees: '' +--- + +### 📃 Documentation + + + +### Motivation + + \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000000000000000000000000000000000000..3f8bd3fea4e61f7d249d8b2216f54aa7ce774a13 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,15 @@ +--- +name: "\U0001F680 Feature Request" +about: Submit a proposal/request for a new H2O LLM Studio feature +title: "[FEATURE]" +labels: type/feature +assignees: '' +--- + +### 🚀 Feature + + + +### Motivation + + diff --git a/.github/workflows/build-and-push-nightly.yml b/.github/workflows/build-and-push-nightly.yml new file mode 100644 index 0000000000000000000000000000000000000000..cdd0247d0aa5f1f6575ff55d94f1f59f70e55551 --- /dev/null +++ b/.github/workflows/build-and-push-nightly.yml @@ -0,0 +1,39 @@ +name: Build and Push to Vorvan - Nightly + +on: + schedule: + - cron: "0 4 * * *" + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + - id: 'auth' + uses: google-github-actions/auth@v1 + with: + credentials_json: '${{ secrets.GCP_CRED_JSON }}' + - name: Configure Google Cloud SDK + uses: google-github-actions/setup-gcloud@v1 + - name: Configure Docker Client + run: |- + gcloud auth configure-docker --quiet #authenticate to gcr + - name: Clean Docker images + run: |- + echo "Available storage before cleaning:" + df -h + docker system prune --all --force + echo "Available storage:" + df -h + echo "Removing dotnet" + sudo rm -rf /usr/share/dotnet + echo "Available storage:" + df -h + - name: Docker Build Image + run: |- + docker build -t gcr.io/$GCLOUD_PROJECT/h2oai/h2o-llmstudio:nightly . + - name: Push to Vorvan + run: |- + docker push gcr.io/$GCLOUD_PROJECT/h2oai/h2o-llmstudio:nightly diff --git a/.github/workflows/build-and-push-release.yml b/.github/workflows/build-and-push-release.yml new file mode 100644 index 0000000000000000000000000000000000000000..859e1e13feade4b4d3c3f0ceab55a86d52a50fe3 --- /dev/null +++ b/.github/workflows/build-and-push-release.yml @@ -0,0 +1,40 @@ +name: Build and Push to Vorvan - Release + +on: + push: + tags: + - '**' + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + - id: 'auth' + uses: google-github-actions/auth@v1 + with: + credentials_json: '${{ secrets.GCP_CRED_JSON }}' + - name: Configure Google Cloud SDK + uses: google-github-actions/setup-gcloud@v1 + - name: Configure Docker Client + run: |- + gcloud auth configure-docker --quiet #authenticate to gcr + - name: Clean Docker images + run: |- + echo "Available storage before cleaning:" + df -h + docker system prune --all --force + echo "Available storage:" + df -h + echo "Removing dotnet" + sudo rm -rf /usr/share/dotnet + echo "Available storage:" + df -h + - name: Docker Build Image + run: |- + docker build -t gcr.io/$GCLOUD_PROJECT/h2oai/h2o-llmstudio:${{ github.ref_name }} . + - name: Push to Vorvan + run: |- + docker push gcr.io/$GCLOUD_PROJECT/h2oai/h2o-llmstudio:${{ github.ref_name }} diff --git a/.github/workflows/deploy-to-github-pages.yml b/.github/workflows/deploy-to-github-pages.yml new file mode 100644 index 0000000000000000000000000000000000000000..3f929f81935e339f27f27cb8705b7dcc7fe3fa6f --- /dev/null +++ b/.github/workflows/deploy-to-github-pages.yml @@ -0,0 +1,32 @@ +name: Deploy documentation to GitHub pages + +on: + workflow_dispatch: + +jobs: + deploy: + name: Deploy to GitHub Pages + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-node@v3 + with: + always-auth: true + registry-url: https://npm.pkg.github.com/ + node-version: 18 + cache: npm + cache-dependency-path: documentation/package-lock.json + + - name: Install dependencies + run: cd documentation && npm install --frozen-lockfile + env: + NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build docs + run: cd documentation && npm run build + - name: Deploy to GitHub Pages + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./documentation/tmp/build + user_name: sherenem ##swap username out with the username of someone with admin access to the repo + user_email: sherene.mahanama@h2o.ai ##swap email out with the email of someone with admin access to the repo \ No newline at end of file diff --git a/.github/workflows/requirements.yml b/.github/workflows/requirements.yml new file mode 100644 index 0000000000000000000000000000000000000000..f4e4069ab83451eae69f734bfe280a463a082efb --- /dev/null +++ b/.github/workflows/requirements.yml @@ -0,0 +1,25 @@ +name: Requirements + +on: + pull_request: + +jobs: + requirements: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: 3.10.11 + - run: make setup + + - name: Generate requirements.txt + run: make export-requirements + + - name: Commit changes + uses: stefanzweifel/git-auto-commit-action@v4 + with: + commit_message: "Update requirements.txt" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml new file mode 100644 index 0000000000000000000000000000000000000000..67440974d5830f35da3237ff1103e2e735344130 --- /dev/null +++ b/.github/workflows/style.yml @@ -0,0 +1,18 @@ +name: Style + +on: + push: + branches: [ main ] + pull_request: + +jobs: + style: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: 3.10.11 + - run: make setup-dev + - run: make style diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000000000000000000000000000000000000..bf69d6b33682b2cde98cbbdc5b870f2b52991eec --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,19 @@ +name: Test + +on: + push: + branches: [ main ] + pull_request: + +jobs: + test: + runs-on: self-hosted + steps: + - uses: actions/checkout@v3 + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: 3.10.11 + - run: nvidia-smi + - run: make setup-dev + - run: make test diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e0b8bae8f4f230315fdfc5ec7eea8b983bcd331f --- /dev/null +++ b/.gitignore @@ -0,0 +1,163 @@ +# Folder +input/ +notebooks/ +demo_data/ +output/ +output_old/ +tmp/ +data/ +examples/data_oasst2 +examples/output_oasst2 +data_old/ +tests_tmp/ +subs/ +/datasets/ +.idea/ +.local/ + +output + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +.neptune/* +.vscode/* + +# C extensions +*.so +*.c + +# Distribution / packaging +.Python +build/ +develop-eggs/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +reports/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Documentation +node_modules +tmp +.docusaurus +.cach-loader + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints +*.ipynb + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + + + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ +h2o_wave.state +.DS_Store + +# IDE +.vscode + +# playwright +test-results/ diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..d5157e9258f294ee539d72f4fb97ee76fd8b4adf --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +this repository. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +[https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available +at [https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..b2108ac6bc89539d70695ead6441ce771bbe98b9 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to H2O LLM STUDIO + +H2O LLM Studio is an open source project released under the Apache Software Licence v2. Open Source projects live by +their user and developer communities. We welcome and encourage your contributions of any kind! + +## Bug Reports and Feature Requests + +Found a bug or have an idea for a new feature? Your feedback is invaluable! To ensure a smooth and collaborative +process, please follow these steps: + +1. Provide the full error message and stack trace, if applicable. +2. Attach the model configuration yaml file if the error is related to model training. +3. Specify the commit hash of the version you are using (running `git rev-parse HEAD`) in your report. If you are + pasting the UI error message, the commit hash will also be included in the error message. +4. If the error is reproducible, kindly include the steps to reproduce it. +5. If possible, attempt to reproduce the error using the default dataset. +6. Please mention any other details that might be useful, e.g. if you are using LLM Studio in a Docker container, etc. + +## Pull Requests + +You can contribute to the project by fixing bugs, adding new features, refactoring code, or enhancing documentation. +To ensure a smooth and collaborative process for everyone, please follow these guidelines: + +1. Check if the issue you plan to address is already [reported](https://github.com/h2oai/h2o-llmstudio/issues). If not, + please open a new issue + to discuss your proposed changes. +2. Avoid duplicating work by commenting on the issue you're working on and feel free to seek assistance or ask + questions; our team is happy to help. +3. Fork the repository and create a new branch from `main`. To develop, please follow the setup instructions below. +4. Implement your changes and commit them to your branch. +5. When you feel ready, open a pull request with your changes. You can also open the PR as a draft to receive early + feedback. To facilitate the review process, we have provided a PR checklist below. +6. Our team will review your pull request and provide feedback. Once everything looks good, we will proceed to merge + your contribution. + +## Setting up your development environment + +Follow the instructions in [README](https://github.com/h2oai/h2o-llmstudio/blob/main/README.md) to set up your +environment. Run `make setup-dev` instead of `make setup` to install the development dependencies. + +## Running linters and tests + +Before submitting your pull request, ensure that your code passes the linters and tests. +To format your code, run `make format`. You can check for any style issues by running `make style`. To run the tests, +run `make test`. + +## PR checklist + +Please make sure your pull request fulfills the following checklist: + +☐ The PR title should provide a clear summary of your contribution. +☐ Link the related issue (e.g., closes #123) in your PR description. +☐ If your contribution is still a work in progress, change the PR to draft mode. +☐ Ensure that the existing tests pass by running `make test`. +☐ Make sure `make style` passes to maintain consistent code style. + +## Installing custom packages + +If you need to install additional Python packages into the environment, you can do so using pip after activating your virtual environment via ```make shell```. For example, to install flash-attention, you would use the following commands: + +```bash +make shell +pip install flash-attn --no-build-isolation +pip install git+https://github.com/HazyResearch/flash-attention.git#subdirectory=csrc/rotary +``` + +For a PR, update the Pipfile and the Pipfile.lock via ```pipenv install package_name```. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..26bd8058b93a957735082a4394e74929d6c2fc39 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,48 @@ +FROM nvidia/cuda:11.8.0-devel-ubuntu20.04 + +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get upgrade -y + +RUN apt-get update && apt-get install -y \ + git \ + curl \ + software-properties-common \ + && add-apt-repository ppa:deadsnakes/ppa \ + && apt install -y python3.10 \ + && apt install -y python3.10-distutils \ + && rm -rf /var/lib/apt/lists/* + +# Pick an unusual UID for the llmstudio user. +# In particular, don't pick 1000, which is the default ubuntu user number. +# Force ourselves to test with UID mismatches in the common case. +RUN adduser --uid 1999 llmstudio +USER llmstudio + +# Python virtualenv is installed in /home/llmstudio/.local +# Application code and data lives in /workspace +# +# Make all of the files in the llmstudio directory writable so that the +# application can install other (non-persisted) new packages and other things +# if it wants to. This is really not advisable, though, since it's lost when +# the container exits. +WORKDIR /workspace +RUN \ + curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 && \ + chmod -R a+w /home/llmstudio +COPY Makefile . +COPY Pipfile . +COPY Pipfile.lock . +RUN \ + make setup && \ + mkdir -p /home/llmstudio/mount && \ + chmod -R a+w /home/llmstudio +COPY . . + +ENV HOME=/home/llmstudio +ENV H2O_WAVE_APP_ADDRESS=http://127.0.0.1:8756 +ENV H2O_WAVE_MAX_REQUEST_SIZE=25MB +ENV H2O_WAVE_NO_LOG=true +ENV H2O_WAVE_PRIVATE_DIR="/download/@/workspace/output/download" +EXPOSE 10101 +ENTRYPOINT [ "python3.10", "-m", "pipenv", "run", "wave", "run", "--no-reload", "app" ] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..eea17ea9b810275cbd4c3f3bb0edb52d72149f0b --- /dev/null +++ b/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2023 H2O.ai, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this repository except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..1c61e597170e9f3b171f11b4224c634d2b0a1310 --- /dev/null +++ b/Makefile @@ -0,0 +1,188 @@ +PYTHON_VERSION ?= 3.10 +PYTHON ?= python$(PYTHON_VERSION) +PIP ?= $(PYTHON) -m pip +PIPENV ?= $(PYTHON) -m pipenv +PIPENV_PYTHON = $(PIPENV) run python +PIPENV_PIP = $(PIPENV_PYTHON) -m pip +PWD = $(shell pwd) +DOCKER_IMAGE ?= gcr.io/vorvan/h2oai/h2o-llmstudio:nightly + +ifeq ($(origin H2O_LLM_STUDIO_WORKDIR), environment) + WORKDIR := $(H2O_LLM_STUDIO_WORKDIR) +else + WORKDIR := $(shell pwd) +endif + +ifeq ($(LOG_LEVEL), $(filter $(LOG_LEVEL), debug trace)) + PW_DEBUG = DEBUG=pw:api +else + PW_DEBUG = +endif + +PHONY: pipenv +pipenv: + $(PIP) install pip==24.0 + $(PIP) install pipenv==2023.12.1 + +.PHONY: setup +setup: pipenv + $(PIPENV) install --verbose --python $(PYTHON_VERSION) + -$(PIPENV_PIP) install flash-attn==2.5.5 --no-build-isolation --upgrade --no-cache-dir + +.PHONY: setup-dev +setup-dev: pipenv + $(PIPENV) install --verbose --dev --python $(PYTHON_VERSION) + - $(PIPENV_PIP) install flash-attn==2.5.5 --no-build-isolation --upgrade --no-cache-dir + $(PIPENV) run playwright install + +.PHONY: setup-no-flash +setup-no-flash: pipenv + $(PIPENV) install --verbose --python $(PYTHON_VERSION) + +setup-ui: pipenv + $(PIPENV) install --verbose --categories=dev-packages --python $(PYTHON_VERSION) + $(PIPENV) run playwright install + +.PHONY: export-requirements +export-requirements: pipenv + $(PIPENV) requirements > requirements.txt + +clean-env: + $(PIPENV) --rm + +clean-data: + rm -rf data + +clean-output: + rm -rf output + +reports: + mkdir -p reports + +.PHONY: style +style: reports pipenv + @echo -n > reports/flake8_errors.log + @echo -n > reports/mypy_errors.log + @echo -n > reports/mypy.log + @echo + + -$(PIPENV) run flake8 | tee -a reports/flake8_errors.log + @if [ -s reports/flake8_errors.log ]; then exit 1; fi + + -$(PIPENV) run mypy . --check-untyped-defs | tee -a reports/mypy.log + @if ! grep -Eq "Success: no issues found in [0-9]+ source files" reports/mypy.log ; then exit 1; fi + +.PHONY: format +format: pipenv + $(PIPENV) run isort . + $(PIPENV) run black . + +.PHONY: isort +isort: pipenv + $(PIPENV) run isort . + +.PHONY: black +black: pipenv + $(PIPENV) run black . + +.PHONY: test +test: reports + @bash -c 'set -o pipefail; export PYTHONPATH=$(PWD); \ + $(PIPENV) run pytest -v --junitxml=reports/junit.xml \ + --import-mode importlib \ + --html=./reports/pytest.html \ + --cov=llm_studio \ + --cov-report term \ + --cov-report html:./reports/coverage.html \ + -o log_cli=true -o log_level=INFO -o log_file=reports/tests.log \ + tests/* 2>&1 | tee reports/tests.log' + +.PHONY: test-ui +test-ui: reports setup-ui + $(PW_DEBUG) $(PIPENV) run pytest \ + -v \ + --junitxml=reports/junit_ui.xml \ + --html=./reports/pytest_ui.html \ + -o log_cli=true \ + -o log_level=$(LOG_LEVEL) \ + -o log_file=reports/tests_ui.log \ + tests/ui/test.py 2>&1 | tee reports/tests_ui.log + +.PHONY: test-ui-headed +test-ui-headed: setup-ui + $(PW_DEBUG) $(PIPENV) run pytest \ + -vvs \ + --headed \ + --video=on \ + --screenshot=on \ + --slowmo=100 \ + tests/ui/test.py 2>&1 | tee reports/tests.log + +.PHONY: wave +wave: + HF_HUB_ENABLE_HF_TRANSFER=True \ + H2O_WAVE_APP_ADDRESS=http://127.0.0.1:8756 \ + H2O_WAVE_MAX_REQUEST_SIZE=25MB \ + H2O_WAVE_NO_LOG=true \ + H2O_WAVE_PRIVATE_DIR="/download/@$(WORKDIR)/output/download" \ + $(PIPENV) run wave run app + +.PHONY: llmstudio +llmstudio: + H2O_WAVE_APP_ADDRESS=http://127.0.0.1:8756 \ + H2O_WAVE_MAX_REQUEST_SIZE=25MB \ + H2O_WAVE_NO_LOG=true \ + H2O_WAVE_PRIVATE_DIR="/download/@$(WORKDIR)/output/download" \ + $(PIPENV) run wave run --no-reload app + +.PHONY: docker-build-nightly +docker-build-nightly: + docker build -t $(DOCKER_IMAGE) . + +.PHONY: docker-run-nightly +docker-run-nightly: +ifeq (,$(wildcard ./data)) + mkdir data +endif +ifeq (,$(wildcard ./output)) + mkdir output +endif + docker run \ + --runtime=nvidia \ + --shm-size=64g \ + --init \ + --rm \ + -u `id -u`:`id -g` \ + -p 10101:10101 \ + -v `pwd`/data:/workspace/data \ + -v `pwd`/output:/workspace/output \ + $(DOCKER_IMAGE) + +.PHONY: docker-clean-all +docker-clean-all: + @CONTAINERS=$$(docker ps -a -q --filter ancestor=$(DOCKER_IMAGE)); \ + if [ -n "$$CONTAINERS" ]; then \ + docker stop $$CONTAINERS; \ + docker rm $$CONTAINERS; \ + fi + docker rmi $(DOCKER_IMAGE) + +.PHONY: shell +shell: + $(PIPENV) shell + +setup-doc: # Install documentation dependencies + cd documentation && npm install + +run-doc: # Run the doc locally + cd documentation && npm start + +update-documentation-infrastructure: + cd documentation && npm update @h2oai/makersaurus + cd documentation && npm ls + +build-doc-locally: # Bundles your website into static files for production + cd documentation && npm run build + +serve-doc-locally: # Serves the built website locally + cd documentation && npm run serve diff --git a/Pipfile b/Pipfile new file mode 100644 index 0000000000000000000000000000000000000000..a7f312456f5cfd354bbe76737acdf70206a614b2 --- /dev/null +++ b/Pipfile @@ -0,0 +1,71 @@ +[[source]] +name = "pypi" +url = "https://pypi.org/simple" +verify_ssl = true + +[[source]] +name = "pytorch" +url = "https://download.pytorch.org/whl/cu118" +verify_ssl = false + +[requires] +python_version = "3.10" + +[packages] +torch = {index = "pytorch", version = "==2.2.0+cu118"} +tqdm = ">=4.65.0, <5.0.0" +transformers = "==4.40.1" +numpy = ">=1.26.0, <2.0.0" +pandas = ">=2.2.0, <3.0.0" +scikit-learn = ">=1.4.0, <2.0.0" +boto3 = ">=1.20.24, <2.0.0" +SQLAlchemy = ">=2.0.25, <3.0.0" +dill = ">=0.3.4, <0.4.0" +pyarrow = ">=14.0.1" +kaggle = ">=1.5.12, <2.0.0" +coolname = ">=2.2.0, <3.0.0" +bokeh = ">=3.2.1, <4.0.0" +beautifulsoup4 = ">=4.11.1, <5.0.0" +sqlitedict = "==1.7.0" +sentencepiece = ">=0.1.96, <0.2.0" +sacrebleu = "==2.0.0" +toml = ">=0.10.2, <0.11.0" +pyyaml = ">=6.0.0, <7.0.0" +protobuf = "==3.20.3" +fastparquet = ">=2023.7.0" +gputil = ">=1.4.0, <2.0.0" +huggingface-hub = "==0.21.1" +bitsandbytes = "==0.42.0" +accelerate = "==0.27.2" +openai = ">=1.12.0, <2.0.0" +einops = "==0.7.0" +datasets = ">=2.11.0, <3.0.0" +neptune = "==1.9.1" +Jinja2 = ">=3.1.3, <4.0.0" +h2o-wave = "==1.1.2" +tiktoken = "==0.6.0" +hf-transfer = "==0.1.5" +peft = "==0.9.0" +azure-storage-file-datalake = ">=12.12.0" +deepspeed = "==0.13.2" +keyring = "==24.3.1" + +[dev-packages] +black = "==24.3.0" +coverage = "==7.4.3" +flake8 = "==7.0.0" +flake8-black = "==0.3.6" +flake8-isort = "==6.1.1" +isort = "==5.13.2" +mypy = "==1.8.0" +pytest = "==8.0.0" # >=8.0.1 is not supported by transformers https://github.com/huggingface/transformers/issues/29155 +pytest-cov = "==4.1.0" +pytest-dependency = "==0.6.0" +pytest-html = "4.1.1" +types-pyyaml = ">=6.0" +types-requests = ">=2.31" +types-toml = ">=0.10" +wheel = "==0.42.0" +pytest-bdd = "==7.0.1" +hac-playwright = { file = "http://h2o-public-test-data.s3.amazonaws.com/e2e-testing/hac_playwright-1.38.0-py3-none-any.whl" } +pytest-base-url = "==2.1.0" diff --git a/Pipfile.lock b/Pipfile.lock new file mode 100644 index 0000000000000000000000000000000000000000..e1ec94b0824c88d79069442ca6c29887f50bd85f --- /dev/null +++ b/Pipfile.lock @@ -0,0 +1,4143 @@ +{ + "_meta": { + "hash": { + "sha256": "8289e3bdc5eecf3071a5aa6658355ec56833ddcc894ecc1473844f7626a34084" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.10" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + }, + { + "name": "pytorch", + "url": "https://download.pytorch.org/whl/cu118", + "verify_ssl": false + } + ] + }, + "default": { + "accelerate": { + "hashes": [ + "sha256:a818dd27b9ba24e9eb5030d1b285cf4cdd1b41bbfa675fb4eb2477ddfc097074", + "sha256:cc715fe9a8bc7a286259bfb6d65fb78363badd3371e7cbda4e4a4ef34a0010aa" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.0'", + "version": "==0.27.2" + }, + "aiohttp": { + "hashes": [ + "sha256:0605cc2c0088fcaae79f01c913a38611ad09ba68ff482402d3410bf59039bfb8", + "sha256:0a158704edf0abcac8ac371fbb54044f3270bdbc93e254a82b6c82be1ef08f3c", + "sha256:0cbf56238f4bbf49dab8c2dc2e6b1b68502b1e88d335bea59b3f5b9f4c001475", + "sha256:1732102949ff6087589408d76cd6dea656b93c896b011ecafff418c9661dc4ed", + "sha256:18f634d540dd099c262e9f887c8bbacc959847cfe5da7a0e2e1cf3f14dbf2daf", + "sha256:239f975589a944eeb1bad26b8b140a59a3a320067fb3cd10b75c3092405a1372", + "sha256:2faa61a904b83142747fc6a6d7ad8fccff898c849123030f8e75d5d967fd4a81", + "sha256:320e8618eda64e19d11bdb3bd04ccc0a816c17eaecb7e4945d01deee2a22f95f", + "sha256:38d80498e2e169bc61418ff36170e0aad0cd268da8b38a17c4cf29d254a8b3f1", + "sha256:3916c8692dbd9d55c523374a3b8213e628424d19116ac4308e434dbf6d95bbdd", + "sha256:393c7aba2b55559ef7ab791c94b44f7482a07bf7640d17b341b79081f5e5cd1a", + "sha256:3b7b30258348082826d274504fbc7c849959f1989d86c29bc355107accec6cfb", + "sha256:3fcb4046d2904378e3aeea1df51f697b0467f2aac55d232c87ba162709478c46", + "sha256:4109adee842b90671f1b689901b948f347325045c15f46b39797ae1bf17019de", + "sha256:4558e5012ee03d2638c681e156461d37b7a113fe13970d438d95d10173d25f78", + "sha256:45731330e754f5811c314901cebdf19dd776a44b31927fa4b4dbecab9e457b0c", + "sha256:4715a9b778f4293b9f8ae7a0a7cef9829f02ff8d6277a39d7f40565c737d3771", + "sha256:471f0ef53ccedec9995287f02caf0c068732f026455f07db3f01a46e49d76bbb", + "sha256:4d3ebb9e1316ec74277d19c5f482f98cc65a73ccd5430540d6d11682cd857430", + "sha256:4ff550491f5492ab5ed3533e76b8567f4b37bd2995e780a1f46bca2024223233", + "sha256:52c27110f3862a1afbcb2af4281fc9fdc40327fa286c4625dfee247c3ba90156", + "sha256:55b39c8684a46e56ef8c8d24faf02de4a2b2ac60d26cee93bc595651ff545de9", + "sha256:5a7ee16aab26e76add4afc45e8f8206c95d1d75540f1039b84a03c3b3800dd59", + "sha256:5ca51eadbd67045396bc92a4345d1790b7301c14d1848feaac1d6a6c9289e888", + "sha256:5d6b3f1fabe465e819aed2c421a6743d8debbde79b6a8600739300630a01bf2c", + "sha256:60cdbd56f4cad9f69c35eaac0fbbdf1f77b0ff9456cebd4902f3dd1cf096464c", + "sha256:6380c039ec52866c06d69b5c7aad5478b24ed11696f0e72f6b807cfb261453da", + "sha256:639d0042b7670222f33b0028de6b4e2fad6451462ce7df2af8aee37dcac55424", + "sha256:66331d00fb28dc90aa606d9a54304af76b335ae204d1836f65797d6fe27f1ca2", + "sha256:67c3119f5ddc7261d47163ed86d760ddf0e625cd6246b4ed852e82159617b5fb", + "sha256:694d828b5c41255e54bc2dddb51a9f5150b4eefa9886e38b52605a05d96566e8", + "sha256:6ae79c1bc12c34082d92bf9422764f799aee4746fd7a392db46b7fd357d4a17a", + "sha256:702e2c7c187c1a498a4e2b03155d52658fdd6fda882d3d7fbb891a5cf108bb10", + "sha256:714d4e5231fed4ba2762ed489b4aec07b2b9953cf4ee31e9871caac895a839c0", + "sha256:7b179eea70833c8dee51ec42f3b4097bd6370892fa93f510f76762105568cf09", + "sha256:7f64cbd44443e80094309875d4f9c71d0401e966d191c3d469cde4642bc2e031", + "sha256:82a6a97d9771cb48ae16979c3a3a9a18b600a8505b1115cfe354dfb2054468b4", + "sha256:84dabd95154f43a2ea80deffec9cb44d2e301e38a0c9d331cc4aa0166fe28ae3", + "sha256:8676e8fd73141ded15ea586de0b7cda1542960a7b9ad89b2b06428e97125d4fa", + "sha256:88e311d98cc0bf45b62fc46c66753a83445f5ab20038bcc1b8a1cc05666f428a", + "sha256:8b4f72fbb66279624bfe83fd5eb6aea0022dad8eec62b71e7bf63ee1caadeafe", + "sha256:8c64a6dc3fe5db7b1b4d2b5cb84c4f677768bdc340611eca673afb7cf416ef5a", + "sha256:8cf142aa6c1a751fcb364158fd710b8a9be874b81889c2bd13aa8893197455e2", + "sha256:8d1964eb7617907c792ca00b341b5ec3e01ae8c280825deadbbd678447b127e1", + "sha256:93e22add827447d2e26d67c9ac0161756007f152fdc5210277d00a85f6c92323", + "sha256:9c69e77370cce2d6df5d12b4e12bdcca60c47ba13d1cbbc8645dd005a20b738b", + "sha256:9dbc053ac75ccc63dc3a3cc547b98c7258ec35a215a92bd9f983e0aac95d3d5b", + "sha256:9e3a1ae66e3d0c17cf65c08968a5ee3180c5a95920ec2731f53343fac9bad106", + "sha256:a6ea1a5b409a85477fd8e5ee6ad8f0e40bf2844c270955e09360418cfd09abac", + "sha256:a81b1143d42b66ffc40a441379387076243ef7b51019204fd3ec36b9f69e77d6", + "sha256:ad7f2919d7dac062f24d6f5fe95d401597fbb015a25771f85e692d043c9d7832", + "sha256:afc52b8d969eff14e069a710057d15ab9ac17cd4b6753042c407dcea0e40bf75", + "sha256:b3df71da99c98534be076196791adca8819761f0bf6e08e07fd7da25127150d6", + "sha256:c088c4d70d21f8ca5c0b8b5403fe84a7bc8e024161febdd4ef04575ef35d474d", + "sha256:c26959ca7b75ff768e2776d8055bf9582a6267e24556bb7f7bd29e677932be72", + "sha256:c413016880e03e69d166efb5a1a95d40f83d5a3a648d16486592c49ffb76d0db", + "sha256:c6021d296318cb6f9414b48e6a439a7f5d1f665464da507e8ff640848ee2a58a", + "sha256:c671dc117c2c21a1ca10c116cfcd6e3e44da7fcde37bf83b2be485ab377b25da", + "sha256:c7a4b7a6cf5b6eb11e109a9755fd4fda7d57395f8c575e166d363b9fc3ec4678", + "sha256:c8a02fbeca6f63cb1f0475c799679057fc9268b77075ab7cf3f1c600e81dd46b", + "sha256:cd2adf5c87ff6d8b277814a28a535b59e20bfea40a101db6b3bdca7e9926bc24", + "sha256:d1469f228cd9ffddd396d9948b8c9cd8022b6d1bf1e40c6f25b0fb90b4f893ed", + "sha256:d153f652a687a8e95ad367a86a61e8d53d528b0530ef382ec5aaf533140ed00f", + "sha256:d5ab8e1f6bee051a4bf6195e38a5c13e5e161cb7bad83d8854524798bd9fcd6e", + "sha256:da00da442a0e31f1c69d26d224e1efd3a1ca5bcbf210978a2ca7426dfcae9f58", + "sha256:da22dab31d7180f8c3ac7c7635f3bcd53808f374f6aa333fe0b0b9e14b01f91a", + "sha256:e0ae53e33ee7476dd3d1132f932eeb39bf6125083820049d06edcdca4381f342", + "sha256:e7a6a8354f1b62e15d48e04350f13e726fa08b62c3d7b8401c0a1314f02e3558", + "sha256:e9a3d838441bebcf5cf442700e3963f58b5c33f015341f9ea86dcd7d503c07e2", + "sha256:edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551", + "sha256:f22eb3a6c1080d862befa0a89c380b4dafce29dc6cd56083f630073d102eb595", + "sha256:f26383adb94da5e7fb388d441bf09c61e5e35f455a3217bfd790c6b6bc64b2ee", + "sha256:f3c2890ca8c59ee683fd09adf32321a40fe1cf164e3387799efb2acebf090c11", + "sha256:f64fd07515dad67f24b6ea4a66ae2876c01031de91c93075b8093f07c0a2d93d", + "sha256:fcde4c397f673fdec23e6b05ebf8d4751314fa7c24f93334bf1f1364c1c69ac7", + "sha256:ff84aeb864e0fac81f676be9f4685f0527b660f1efdc40dcede3c251ef1e867f" + ], + "markers": "python_version >= '3.8'", + "version": "==3.9.5" + }, + "aiosignal": { + "hashes": [ + "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc", + "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17" + ], + "markers": "python_version >= '3.7'", + "version": "==1.3.1" + }, + "annotated-types": { + "hashes": [ + "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43", + "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d" + ], + "markers": "python_version >= '3.8'", + "version": "==0.6.0" + }, + "anyio": { + "hashes": [ + "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8", + "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6" + ], + "markers": "python_version >= '3.8'", + "version": "==4.3.0" + }, + "arrow": { + "hashes": [ + "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80", + "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85" + ], + "markers": "python_version >= '3.8'", + "version": "==1.3.0" + }, + "async-timeout": { + "hashes": [ + "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f", + "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028" + ], + "markers": "python_version < '3.11'", + "version": "==4.0.3" + }, + "attrs": { + "hashes": [ + "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30", + "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1" + ], + "markers": "python_version >= '3.7'", + "version": "==23.2.0" + }, + "azure-core": { + "hashes": [ + "sha256:26273a254131f84269e8ea4464f3560c731f29c0c1f69ac99010845f239c1a8f", + "sha256:7c5ee397e48f281ec4dd773d67a0a47a0962ed6fa833036057f9ea067f688e74" + ], + "markers": "python_version >= '3.7'", + "version": "==1.30.1" + }, + "azure-storage-blob": { + "hashes": [ + "sha256:13e16ba42fc54ac2c7e8f976062173a5c82b9ec0594728e134aac372965a11b0", + "sha256:c5530dc51c21c9564e4eb706cd499befca8819b10dd89716d3fc90d747556243" + ], + "markers": "python_version >= '3.7'", + "version": "==12.19.1" + }, + "azure-storage-file-datalake": { + "hashes": [ + "sha256:358f1616db479b8272b74fb7d5f10f93dfc695a264137dd1959b50b50dcd6346", + "sha256:736b565e8c46fc0a36f64102d5fa63e1367b698496d845b7572fc7cad4925cec" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==12.14.0" + }, + "beautifulsoup4": { + "hashes": [ + "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051", + "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed" + ], + "index": "pypi", + "markers": "python_full_version >= '3.6.0'", + "version": "==4.12.3" + }, + "bitsandbytes": { + "hashes": [ + "sha256:63798680912cc63bb77b535a2d0860af024e290a52e157f777ad2a52e2585967", + "sha256:fc1505f184f0d275766f2a6c663f1a43b734c1409b5c5a406f3a6073d9f329fd" + ], + "index": "pypi", + "version": "==0.42.0" + }, + "bleach": { + "hashes": [ + "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe", + "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6" + ], + "markers": "python_version >= '3.8'", + "version": "==6.1.0" + }, + "blessed": { + "hashes": [ + "sha256:0c542922586a265e699188e52d5f5ac5ec0dd517e5a1041d90d2bbf23f906058", + "sha256:2cdd67f8746e048f00df47a2880f4d6acbcdb399031b604e34ba8f71d5787680" + ], + "markers": "python_version >= '2.7'", + "version": "==1.20.0" + }, + "bokeh": { + "hashes": [ + "sha256:1e3c502a0a8205338fc74dadbfa321f8a0965441b39501e36796a47b4017b642", + "sha256:d824961e4265367b0750ce58b07e564ad0b83ca64b335521cd3421e9b9f10d89" + ], + "index": "pypi", + "markers": "python_version >= '3.9'", + "version": "==3.4.1" + }, + "boto3": { + "hashes": [ + "sha256:2824e3dd18743ca50e5b10439d20e74647b1416e8a94509cb30beac92d27a18d", + "sha256:b2e5cb5b95efcc881e25a3bc872d7a24e75ff4e76f368138e4baf7b9d6ee3422" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==1.34.90" + }, + "botocore": { + "hashes": [ + "sha256:113cd4c0cb63e13163ccbc2bb13d551be314ba7f8ba5bfab1c51a19ca01aa133", + "sha256:d48f152498e2c60b43ce25b579d26642346a327b6fb2c632d57219e0a4f63392" + ], + "markers": "python_version >= '3.8'", + "version": "==1.34.90" + }, + "bravado": { + "hashes": [ + "sha256:1bb6ef75d84140c851fffe6420baaee5037d840070cfe11d60913be6ab8e0530", + "sha256:8ac8bbb645e49607917a5c07808116c708521f51e80d9c29bc4a168ff4dd22c6" + ], + "markers": "python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_full_version != '3.5.0'", + "version": "==11.0.3" + }, + "bravado-core": { + "hashes": [ + "sha256:8cf1f7bbac2f7c696d37e970253938b5be4ddec92c8d5e64400b17469c3714b4" + ], + "markers": "python_version >= '3.7'", + "version": "==6.1.1" + }, + "certifi": { + "hashes": [ + "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f", + "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1" + ], + "markers": "python_version >= '3.6'", + "version": "==2024.2.2" + }, + "cffi": { + "hashes": [ + "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc", + "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a", + "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417", + "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab", + "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520", + "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36", + "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743", + "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8", + "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed", + "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684", + "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56", + "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324", + "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d", + "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235", + "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e", + "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088", + "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000", + "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7", + "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e", + "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673", + "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c", + "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe", + "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2", + "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098", + "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8", + "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a", + "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0", + "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b", + "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896", + "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e", + "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9", + "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2", + "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b", + "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6", + "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404", + "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f", + "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0", + "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4", + "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc", + "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936", + "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba", + "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872", + "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb", + "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614", + "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1", + "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d", + "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969", + "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b", + "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4", + "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627", + "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", + "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" + ], + "markers": "platform_python_implementation != 'PyPy'", + "version": "==1.16.0" + }, + "charset-normalizer": { + "hashes": [ + "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027", + "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087", + "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786", + "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8", + "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09", + "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185", + "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574", + "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e", + "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519", + "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898", + "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269", + "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3", + "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f", + "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6", + "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8", + "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a", + "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73", + "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc", + "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714", + "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2", + "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc", + "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce", + "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d", + "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e", + "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6", + "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269", + "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96", + "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d", + "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a", + "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4", + "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77", + "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d", + "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0", + "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed", + "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068", + "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac", + "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25", + "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8", + "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab", + "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26", + "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2", + "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db", + "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f", + "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5", + "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99", + "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c", + "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d", + "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811", + "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa", + "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a", + "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03", + "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b", + "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04", + "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c", + "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001", + "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458", + "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389", + "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99", + "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985", + "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537", + "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238", + "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f", + "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d", + "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796", + "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a", + "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143", + "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8", + "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c", + "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5", + "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5", + "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711", + "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4", + "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6", + "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c", + "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7", + "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4", + "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b", + "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae", + "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12", + "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c", + "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae", + "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8", + "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887", + "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b", + "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4", + "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f", + "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5", + "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33", + "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519", + "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561" + ], + "markers": "python_full_version >= '3.7.0'", + "version": "==3.3.2" + }, + "click": { + "hashes": [ + "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", + "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de" + ], + "markers": "python_version >= '3.7'", + "version": "==8.1.7" + }, + "colorama": { + "hashes": [ + "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", + "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6'", + "version": "==0.4.6" + }, + "contourpy": { + "hashes": [ + "sha256:00e5388f71c1a0610e6fe56b5c44ab7ba14165cdd6d695429c5cd94021e390b2", + "sha256:10a37ae557aabf2509c79715cd20b62e4c7c28b8cd62dd7d99e5ed3ce28c3fd9", + "sha256:11959f0ce4a6f7b76ec578576a0b61a28bdc0696194b6347ba3f1c53827178b9", + "sha256:187fa1d4c6acc06adb0fae5544c59898ad781409e61a926ac7e84b8f276dcef4", + "sha256:1a07fc092a4088ee952ddae19a2b2a85757b923217b7eed584fdf25f53a6e7ce", + "sha256:1cac0a8f71a041aa587410424ad46dfa6a11f6149ceb219ce7dd48f6b02b87a7", + "sha256:1d59e739ab0e3520e62a26c60707cc3ab0365d2f8fecea74bfe4de72dc56388f", + "sha256:2855c8b0b55958265e8b5888d6a615ba02883b225f2227461aa9127c578a4922", + "sha256:2e785e0f2ef0d567099b9ff92cbfb958d71c2d5b9259981cd9bee81bd194c9a4", + "sha256:309be79c0a354afff9ff7da4aaed7c3257e77edf6c1b448a779329431ee79d7e", + "sha256:39f3ecaf76cd98e802f094e0d4fbc6dc9c45a8d0c4d185f0f6c2234e14e5f75b", + "sha256:457499c79fa84593f22454bbd27670227874cd2ff5d6c84e60575c8b50a69619", + "sha256:49e70d111fee47284d9dd867c9bb9a7058a3c617274900780c43e38d90fe1205", + "sha256:4c75507d0a55378240f781599c30e7776674dbaf883a46d1c90f37e563453480", + "sha256:4c863140fafc615c14a4bf4efd0f4425c02230eb8ef02784c9a156461e62c965", + "sha256:4d8908b3bee1c889e547867ca4cdc54e5ab6be6d3e078556814a22457f49423c", + "sha256:5b9eb0ca724a241683c9685a484da9d35c872fd42756574a7cfbf58af26677fd", + "sha256:6022cecf8f44e36af10bd9118ca71f371078b4c168b6e0fab43d4a889985dbb5", + "sha256:6150ffa5c767bc6332df27157d95442c379b7dce3a38dff89c0f39b63275696f", + "sha256:62828cada4a2b850dbef89c81f5a33741898b305db244904de418cc957ff05dc", + "sha256:7b4182299f251060996af5249c286bae9361fa8c6a9cda5efc29fe8bfd6062ec", + "sha256:94b34f32646ca0414237168d68a9157cb3889f06b096612afdd296003fdd32fd", + "sha256:9ce6889abac9a42afd07a562c2d6d4b2b7134f83f18571d859b25624a331c90b", + "sha256:9cffe0f850e89d7c0012a1fb8730f75edd4320a0a731ed0c183904fe6ecfc3a9", + "sha256:a12a813949e5066148712a0626895c26b2578874e4cc63160bb007e6df3436fe", + "sha256:a1eea9aecf761c661d096d39ed9026574de8adb2ae1c5bd7b33558af884fb2ce", + "sha256:a31f94983fecbac95e58388210427d68cd30fe8a36927980fab9c20062645609", + "sha256:ac58bdee53cbeba2ecad824fa8159493f0bf3b8ea4e93feb06c9a465d6c87da8", + "sha256:af3f4485884750dddd9c25cb7e3915d83c2db92488b38ccb77dd594eac84c4a0", + "sha256:b33d2bc4f69caedcd0a275329eb2198f560b325605810895627be5d4b876bf7f", + "sha256:b59c0ffceff8d4d3996a45f2bb6f4c207f94684a96bf3d9728dbb77428dd8cb8", + "sha256:bb6834cbd983b19f06908b45bfc2dad6ac9479ae04abe923a275b5f48f1a186b", + "sha256:bd3db01f59fdcbce5b22afad19e390260d6d0222f35a1023d9adc5690a889364", + "sha256:bd7c23df857d488f418439686d3b10ae2fbf9bc256cd045b37a8c16575ea1040", + "sha256:c2528d60e398c7c4c799d56f907664673a807635b857df18f7ae64d3e6ce2d9f", + "sha256:d31a63bc6e6d87f77d71e1abbd7387ab817a66733734883d1fc0021ed9bfa083", + "sha256:d4492d82b3bc7fbb7e3610747b159869468079fe149ec5c4d771fa1f614a14df", + "sha256:ddcb8581510311e13421b1f544403c16e901c4e8f09083c881fab2be80ee31ba", + "sha256:e1d59258c3c67c865435d8fbeb35f8c59b8bef3d6f46c1f29f6123556af28445", + "sha256:eb3315a8a236ee19b6df481fc5f997436e8ade24a9f03dfdc6bd490fea20c6da", + "sha256:ef2b055471c0eb466033760a521efb9d8a32b99ab907fc8358481a1dd29e3bd3", + "sha256:ef5adb9a3b1d0c645ff694f9bca7702ec2c70f4d734f9922ea34de02294fdf72", + "sha256:f32c38afb74bd98ce26de7cc74a67b40afb7b05aae7b42924ea990d51e4dac02", + "sha256:fe0ccca550bb8e5abc22f530ec0466136379c01321fd94f30a22231e8a48d985" + ], + "markers": "python_version >= '3.9'", + "version": "==1.2.1" + }, + "coolname": { + "hashes": [ + "sha256:4d1563186cfaf71b394d5df4c744f8c41303b6846413645e31d31915cdeb13e8", + "sha256:6c5d5731759104479e7ca195a9b64f7900ac5bead40183c09323c7d0be9e75c7" + ], + "index": "pypi", + "version": "==2.2.0" + }, + "cramjam": { + "hashes": [ + "sha256:00524bb23f4abb3a3bfff08aa32b9274843170c5b43855807e0f59670e2ac98c", + "sha256:007aa9444cb27b8691baae73ca907133cd939987438f874774011b4c740732dd", + "sha256:04b31d427a8902e5c2eec4b8f29873de7a3ade202e3d68e7f2354b9f0aa00bc7", + "sha256:07af94191f6a245226dc8a8bc6c94808e382ce9dfcca4bab0e8015fbc7fc3322", + "sha256:080f3eb7b648f5ba9d35084d8dddc68246a8f365df239792f6712908f0aa568e", + "sha256:11c9d30bc53892c57a3b296756c23659323ab1419a2b4bf22bbafc07b247bb67", + "sha256:11d2e9eebc7d202eda0ae09fb56a2cdbeb5a1563e89d2118bf18cf0030f35f77", + "sha256:1374fe9a4431e546bb4501a16b84875d0bf80fc4e6c8942f0d5608ae48474267", + "sha256:19b8c97350c8d65daea26267dd1becb59073569aac2ae5743952d7f48da5d37a", + "sha256:1ba1a8ff855b30b4069a9b45ea9e7f2b5d882c7953bdfccda8d4b275fa7057ce", + "sha256:1fd25201f1278dc6faa2ae35e67b7a5bb352b7fc6ed1ee939637414ca8115863", + "sha256:246f1f7d32cac2b64617d2dddba11a82851e73cdcf9d1abb799b08dcd9d2ea49", + "sha256:2476828dea4089aa3cb9160391f8b36f793ca651afdcba80de1e341373928397", + "sha256:24990be4010b2185dcecc67133cd727657036e7b132d7de598148f5b1eb8e452", + "sha256:24c2b426dd8fafb894f93a88f42e2827e14199d66836cb100582037e5371c724", + "sha256:269f94d2efe6b6a97624782cd3b541e60535dd5874f4a8d5d0ba66ef59424ae3", + "sha256:274878883e7fadf95a6b5bc58f9c1dd39fef2c31d68e18a0fb8594226457fba7", + "sha256:28c30078effc100739d3f9b227276a8360c1b32aac65efb4f641630552213548", + "sha256:29987b54e31efed66738e8f236c597c4c9a91ec9d57bcb74307712e07505b4bb", + "sha256:2be92c6f0bcffaf8ea6a8164fe0388a188fec2fa9eff1828e8b64dc3a83740f9", + "sha256:2cb64a97e625ca029b55e37769b8c354e64cbea042c75471915dc385935d30ed", + "sha256:30e2d745cd4d244b7973d15aaebeedb537b980f9d3da80e6dea75ee1a872f9fa", + "sha256:3277fd42399755d6d3730edec4a192174ee64d219e0ffbc90613f15cbabf711f", + "sha256:345a952c5d4b922830efaa67dc0b42d21e18c182c1a1bda6d20bb78235f31d6f", + "sha256:35647a0e37a4dfec85a44c7966ae476b7db0e6cd65d91c08f1fb3007ed774d92", + "sha256:3850dac9a2f6dcb3249d23f9d505117643b967bdc1c572ed0cc492a48fd69daf", + "sha256:3f6303c8cc583dfe5054cf84717674f75b18bca4ae8e576dc863958d5494dc4b", + "sha256:440a18fd4ae42e06dbbd7aee91d8248b61da9fef7610ffbd553d1ba93931394b", + "sha256:476890974229713fc7b4c16fb050b756ba926c67e4d1200b3e03c5c051e9b552", + "sha256:4822eb5fe6839cd3d0439e5431e766ad010b2a388ca9617aa6372b6030897782", + "sha256:4a554bcfd068e831affd64a4f067c7c9b00b359742597c4fdadd18ff673baf30", + "sha256:4c1d2d39c2193a77c5e5b327944f90e6ecf2caa1b55e7176cc83d80706ea15de", + "sha256:4f7c16d358df366e308137411125a2bb50d1b19924fced3a390898fa8c9a074d", + "sha256:5023a737d8d9cf5d123e6d87d088929c3cfb2aae90e0f584204427f74882150a", + "sha256:51e847dcfe74fba379fed2bc2b45f5c2f11c3ece5e9eebcf63f39a9594184588", + "sha256:572cb9a8dc5a189691d6e03a9bf9b4305fd9a9f36bb0f9fde55fc36837c2e6b3", + "sha256:594477faff7f4380fa123cfbcf10ab8ee5af1a28b95750b66931ffafcb11ab5c", + "sha256:5d1ac94e00c64258330105473c641441db02b4dc3e9e9f2963d204e53ed93025", + "sha256:6379b92912f7569e126bd48d10e7087ddd20ea88a939532e3c4a85c2fa05d600", + "sha256:65bfd41aa92c0025f32ba09214b48e9367a81122586b2617439b4327c4bd179c", + "sha256:6653c262ad71e6c0ae08eeca3af2ee89ad47483b6312f2c6094518cb77872406", + "sha256:6721edd8f911ad84db83ee4902b7579fc01c55849062f3f1f4171b58fccf98eb", + "sha256:67e09b42e744efd08b93ac56f6100a859a31617d7146725516f3f2c744149d97", + "sha256:6b1fa0a6ea8183831d04572597c182bd6cece62d583a36cde1e6a86e72ce2389", + "sha256:6c04f363cb4b316719421724521432b6e7f6490e5baaaf7692af961c28d0279b", + "sha256:6eec7e985f35708c234542721863d82781d0f7f6a71b45e14ce6d2625d4b131d", + "sha256:6f838d06d06709b9ce8b1ceae36aea4e1c7e613365185a91edcbeb5884f5e606", + "sha256:7337bd8218bd8508f35904274a38cce843a237fe6e23104238bbeb2f337107ed", + "sha256:771b44e549f90b5532508782e25d1c40b8054dd83d52253d05945fc05836b252", + "sha256:77346ac669f5445d14b74476a4e8f3a259fd22681bd73790e92b8956d7e225fc", + "sha256:7871e1fd3ee8ca16799ba22d49fc1e52e78976fa8c659be41630eeb2914475a7", + "sha256:80b088d15866b37851fd53e2b471becc9ec487257dceca1878621072a18e833e", + "sha256:832224f52fa1e601e0ab678dba9bdfde3686fc4cd1a9f2ed4748f29eaf1cb553", + "sha256:86e238b6de79e045f5197df2c9dfaf8d10b37a6517ff4ffc4775fe5a3cf4d4a4", + "sha256:87e26e3e1d5fed1cac5b41be648d0daf0793f94cf4a7aebefce1f4f6656e2d21", + "sha256:8c8aa6d08c135ae7f0da01e6559a332c5d8fe4989a594db401040e385d04dffd", + "sha256:8ea1dc11538842ff20d9872a17214994f5913cbf3be5594b54aad2422becdf19", + "sha256:8ed658f36a2bf667d5b8c7c6690103ad99f81cc62a1b64891b69298447329d4b", + "sha256:962b7106287bcc463150766b5b8c69f32dcc69713a8dbce00e0ca6936f95c55b", + "sha256:9728861bc0390681824961778b36f7f0b95039e8b90d46f1b67f51232f1ee159", + "sha256:97c706c520c3f8b0184278cc86187528458350216c6e4fa85d3f16bcad0d365d", + "sha256:990e65c2bf1c155a9ddec5ecabf431cf77596432f697d3c6e0831b5174c51c40", + "sha256:9a1943f2cc0deee037ddcf92beff6049e12d4e6d557f568ddf59fb3b848f2152", + "sha256:9d08f1bab949ffd6dd6f25a89e4f7062d147aeea9c067e4dd155bdb190e5a519", + "sha256:9efe6915aa7ef176f3a7f42a4e46504573215953331b139abefd20d07d8aba82", + "sha256:a2fededed05a042f093dbf1b11d69afb1874a2c9197fcf1d58c142ba9111db5a", + "sha256:ab67f29094165f0771acad8dd16e840259cfedcc94067af229530496dbf1a24c", + "sha256:ab8146faa5d8c52edf23724843c36469fc32ff2c4a174eba72f4da6de5016688", + "sha256:afa065bab70e27565695441f69f493af3d379b8723030f2c3d2547d2e312a4be", + "sha256:b188e750b95172c01defcfcfbba629cad797718b34402ec61b3bc9ff99403599", + "sha256:ba1e45074757ab0482ac544e60613b6b8658100ac9985c91868a4598cdfb63ba", + "sha256:ba542f07fe3f41475d78626973533539e6cf2d5b6af37923fe6c7e7f0f74b9b2", + "sha256:bc8f24c32124bb47536882c6b941cdb88cc16e4fa64d5bf347cb8dd72a193fc3", + "sha256:bd8c601fe8717e52517a2f2eef78217086acf449627bfdda97e3f53fd79c92af", + "sha256:be6fb5dd5bf1c89c717a73a1057505959f35c08e0e97a76d4cc6391b90d2263b", + "sha256:bec9ca5431c32ba94996b7c1c56695b37d48713b97ee1d2a456f4046f009e82f", + "sha256:c14728e3360cd212d5b606ca703c3bd1c8912efcdbc1aa032c81c2882509ebd5", + "sha256:c28830ecf76501356d678dac4f37563554ec1c651a53a990cdf595f7ed75c651", + "sha256:c7e8329cde48740df8d332dade2f52b74612b8ea86005341c99bb192c82a5ce7", + "sha256:c9d54a4aa475d5e902f2ee518bdaa02f26c089e9f72950d00d1643c090f0deb3", + "sha256:cb5f4d061e9abdc6663551446c332a58c101efb31fd1746229872600274c2b20", + "sha256:d5a0a2fe240c97587df07f3d5e1027673d599b3a6a7a0ab540aea69f09e9ff7a", + "sha256:d824fd98364bc946c38ed324a3ec7befba055285aaf2c1ca61894bb7616226e8", + "sha256:d93b42d22bf3e17290c5e4cf58e715a419330bb5255c35933c14db82ecf3872c", + "sha256:d9b244d04cef82872d12c227a2f202f080a454d664c05db351626e6ad4aaa307", + "sha256:dac42b2b4c3950e7eda9b5551e0e904784ed0c0428accc29171c230fb919ec72", + "sha256:dcf7791e1cedb982ccc873ec9392c6cfb9c714a64ebf1ed4e8310b9cb44655f2", + "sha256:e23e323ad28ed3e4e3a24ceffdab0ff235954109a88b536ea7b3b7886bd0a536", + "sha256:e30f1f00de913b440baa36647817b9b7120a69b04eca05f3354aaf5b40f95ee5", + "sha256:e8ec1d4f27eb9d0412f0c567e7ffd14fbeb2b318a1ac394d5de4047c431fe94c", + "sha256:e954599c6369f429a868852eff453b894d88866acba439b65131ea93f5400b47", + "sha256:eea606b01b43b91626e3aafd463bd19b6ed739bdb8b2b309e5d7ff72afc0e89d", + "sha256:ef0173fb457f73cf9c2553092419db0eba4d582890db95e542a4d93e11340421", + "sha256:ef29fb916fe74be65d0ab8871ab8d964b0f5eb8028bb84b325be43675a59d6e7", + "sha256:f667843e7a8fca208eecfe44e04088242f8ca60d74d4950fac3722043538d700", + "sha256:fb5d7739e2bc573ade12327ef7717b1ac5876c62938fab20eb54d762da23cae2", + "sha256:fc0c6eb8185c68f79a25bb298825e345cc09b826f5828bd8146e3600ca6e9981", + "sha256:fc9c45469914099897c47bfc501616fb377f28a865adebf90ea6f3c8ae6dd4e6", + "sha256:fe6434d3ee0899bc9396801d1abbc5d1fe77662bd3d1f1c1573fac6708459138", + "sha256:fe84440100e7045190da7f80219be9989b0b6db6acadb3ae9cfe0935d93ebf8c" + ], + "markers": "python_version >= '3.7'", + "version": "==2.8.3" + }, + "cryptography": { + "hashes": [ + "sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee", + "sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576", + "sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d", + "sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30", + "sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413", + "sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb", + "sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da", + "sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4", + "sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd", + "sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc", + "sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8", + "sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1", + "sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc", + "sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e", + "sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8", + "sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940", + "sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400", + "sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7", + "sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16", + "sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278", + "sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74", + "sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec", + "sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1", + "sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2", + "sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c", + "sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922", + "sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a", + "sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6", + "sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1", + "sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e", + "sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac", + "sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7" + ], + "markers": "python_version >= '3.7'", + "version": "==42.0.5" + }, + "datasets": { + "hashes": [ + "sha256:cdf8b8c6abf7316377ba4f49f9589a4c74556d6b481afd0abd2284f3d69185cb", + "sha256:f1bbf0e2896917a914de01cbd37075b14deea3837af87ad0d9f697388ccaeb50" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.0'", + "version": "==2.18.0" + }, + "deepspeed": { + "hashes": [ + "sha256:78a8a7ae66b0d3f661224c9d48ed153be0f1a2d5b9be8d0cbfeee1fa6a791995" + ], + "index": "pypi", + "version": "==0.13.2" + }, + "dill": { + "hashes": [ + "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca", + "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==0.3.8" + }, + "distro": { + "hashes": [ + "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", + "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2" + ], + "markers": "python_version >= '3.6'", + "version": "==1.9.0" + }, + "editor": { + "hashes": [ + "sha256:bb6989e872638cd119db9a4fce284cd8e13c553886a1c044c6b8d8a160c871f8", + "sha256:e818e6913f26c2a81eadef503a2741d7cca7f235d20e217274a009ecd5a74abf" + ], + "markers": "python_version >= '3.8'", + "version": "==1.6.6" + }, + "einops": { + "hashes": [ + "sha256:0f3096f26b914f465f6ff3c66f5478f9a5e380bb367ffc6493a68143fbbf1fd1", + "sha256:b2b04ad6081a3b227080c9bf5e3ace7160357ff03043cd66cc5b2319eb7031d1" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==0.7.0" + }, + "exceptiongroup": { + "hashes": [ + "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad", + "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16" + ], + "markers": "python_version < '3.11'", + "version": "==1.2.1" + }, + "fastparquet": { + "hashes": [ + "sha256:06736e5bb0827f861ac0901310baedf7e7b5f52dfcd89d435963ae328203597c", + "sha256:0c1edc578f7a9919d1062bc3184c0c64d5c4e986ab3fa9c75f53561bb7364d7f", + "sha256:0d04901828f54ec118e7e5dfb438518ffe9b75ef3b7ebcdbaf33af130fcee9b7", + "sha256:1a7314e654a06cfc68a50bfc61bbacc548257d8742fbecfe0418c3b0d4295c04", + "sha256:33121c1596bb4d672579969a4901730f555447204c7c2573621803f7990cd309", + "sha256:3b7c39661c918686fdbf21695547d2e7b0cd0226a2f2dd6fa5c2ad7b37da2540", + "sha256:3e3c5cdf2af0fc1b76f07daabd37b132c0f0086106b2fc801ea046739ddabee0", + "sha256:42babeafac01ab24ea1edc7f626c0744c312d60ba6a7189b08c8e7d1c374bfd3", + "sha256:42def5e682eb426e6f7062d0bee370dec9424181f3c61eb24d6bdc67482a0ace", + "sha256:52603d24d19522753e21b1794d99bb295688e33d1a04b61a5c0e9eb4884ba342", + "sha256:6509837887e35bdcb08ba252eeb930b1056e129b6d31c14901443339567ee95a", + "sha256:6a14579bbe2fab4f5f43685503b4142d8b0eb7965ee176704ae1697590143cd1", + "sha256:76fadf2399a778daf49772c644a3a7b27e41492a43e2bea4107a715981c1dc2f", + "sha256:81a8f60c51793eb2436b4fdbbf115ff8578a4a457a179240bc08f9d9573d57a4", + "sha256:83f1abb155d8a8b6f1f31318174507d8a8ddf4bff00a2ef7065b609577deb6ae", + "sha256:94aaa752d79660f2d88983bd7336109f4b61da6940d759786c02144195d6c635", + "sha256:abb08c61ab0f8a29a118dabe0a9105686fa5580648cfca252a74153c8c32444f", + "sha256:b5131d77a6c4cdfe3b00baa7eb95602c7f09d955c5490dd3bc0ec0e290ee4010", + "sha256:b76febb17f2261e1aa8bdf11b3459ee9cca19ced25744b940c3922b7d93862f9", + "sha256:b7a620b87e83c098a46611b901c456403c9a04ba526e4a615750d6704092e1eb", + "sha256:bd1b310e7d9934f61236b793d1e11336d457e7664829bf76d53bff5614dcc338", + "sha256:bf6df4a9c781e32dc10432e78ee82c3c8750e9975a4e2d29aecffc1f2323a418", + "sha256:c26266910e42190f3ba043647b4c1e37e8626981a0366432a498bdf1e10c0bd1", + "sha256:c6affd18ed2608976739b47befce9f80f7848209c892ccb1001d494296af33af", + "sha256:cd4b8133f5fa43c497d151d4d00337f9b0614993116a61c61e563a003eb0811e", + "sha256:cebc1adc7c3a1aed70c752f3fde5e4df094dafba24e60d6501d7963e77047e7e", + "sha256:d2711f30720c4f80654c191ecb21d2b1b7351be1f6763c70936bdbab095f0b54", + "sha256:d87f24ae76e65f94af9e62a648b5479f0bd2e8935e0011c9390ebc1299f3785d", + "sha256:dace50138c81c6f70acfff91a7a15acc85e3d45be0edbcf164f26fd86cf3c7a5", + "sha256:dd45a7973afe651d7fdb6b836fa1f9177d318de20211a28f4580d9af5c2aacbb", + "sha256:dedeb4ad28f68313c2504ef005f4b2d52c3d108bd5323204300dbaeec6fb1b04", + "sha256:e27b5d21fecdc07f071f5343a350b88c859b324834fd19b78d636480fe341999", + "sha256:e6f544d65b9f826a149010e3fd5121510e0a1a44c62f1b274aea4a41a8f3dbcd", + "sha256:ea1503bac0b1457c016a748064823d312806e506f3a8b9226935def4be3fffdc", + "sha256:ee36f1ea8f08cb9b8710161eee4e752e74f34ef3e7aebc58db4e5468d29ff34c", + "sha256:ee37d9273e383811f10bd379990851b53df606cfaa046cae53826b6b14f0a33d", + "sha256:f369dcc860b176739826ed67ea230f243334df5c5b3047ac10b0a365ec469082", + "sha256:fba0fcba4ffd60ab23d24486f85733a5cc1fcf46d1286c9dc3eed329809e9ee3", + "sha256:fe1b88f51687566eac9fa94f7ce4f17b8df9e4b7ba8f7d37f383e7140414fe98" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==2024.2.0" + }, + "filelock": { + "hashes": [ + "sha256:404e5e9253aa60ad457cae1be07c0f0ca90a63931200a47d9b6a6af84fd7b45f", + "sha256:d13f466618bfde72bd2c18255e269f72542c6e70e7bac83a0232d6b1cc5c8cf4" + ], + "markers": "python_version >= '3.8'", + "version": "==3.13.4" + }, + "fqdn": { + "hashes": [ + "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f", + "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014" + ], + "version": "==1.5.1" + }, + "frozenlist": { + "hashes": [ + "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7", + "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98", + "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad", + "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5", + "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae", + "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e", + "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a", + "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701", + "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d", + "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6", + "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6", + "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106", + "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75", + "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868", + "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a", + "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0", + "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1", + "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826", + "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec", + "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6", + "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950", + "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19", + "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0", + "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8", + "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a", + "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09", + "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86", + "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c", + "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5", + "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b", + "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b", + "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d", + "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0", + "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea", + "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776", + "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a", + "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897", + "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7", + "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09", + "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9", + "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe", + "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd", + "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742", + "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09", + "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0", + "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932", + "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1", + "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a", + "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49", + "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d", + "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7", + "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480", + "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89", + "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e", + "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b", + "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82", + "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb", + "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068", + "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8", + "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b", + "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb", + "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2", + "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11", + "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b", + "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc", + "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0", + "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497", + "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17", + "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0", + "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2", + "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439", + "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5", + "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac", + "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825", + "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887", + "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced", + "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74" + ], + "markers": "python_version >= '3.8'", + "version": "==1.4.1" + }, + "fsspec": { + "extras": [ + "http" + ], + "hashes": [ + "sha256:817f969556fa5916bc682e02ca2045f96ff7f586d45110fcb76022063ad2c7d8", + "sha256:b6ad1a679f760dda52b1168c859d01b7b80648ea6f7f7c7f5a8a91dc3f3ecb84" + ], + "markers": "python_version >= '3.8'", + "version": "==2024.2.0" + }, + "future": { + "hashes": [ + "sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216", + "sha256:bd2968309307861edae1458a4f8a4f3598c03be43b97521076aebf5d94c07b05" + ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.0.0" + }, + "gitdb": { + "hashes": [ + "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4", + "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b" + ], + "markers": "python_version >= '3.7'", + "version": "==4.0.11" + }, + "gitpython": { + "hashes": [ + "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c", + "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff" + ], + "markers": "python_version >= '3.7'", + "version": "==3.1.43" + }, + "gputil": { + "hashes": [ + "sha256:099e52c65e512cdfa8c8763fca67f5a5c2afb63469602d5dcb4d296b3661efb9" + ], + "index": "pypi", + "version": "==1.4.0" + }, + "greenlet": { + "hashes": [ + "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67", + "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6", + "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257", + "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4", + "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676", + "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61", + "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc", + "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca", + "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7", + "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728", + "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305", + "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6", + "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379", + "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414", + "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04", + "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a", + "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf", + "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491", + "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559", + "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e", + "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274", + "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb", + "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b", + "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9", + "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b", + "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be", + "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506", + "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405", + "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113", + "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f", + "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5", + "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230", + "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d", + "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f", + "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a", + "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e", + "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61", + "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6", + "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d", + "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71", + "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22", + "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2", + "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3", + "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067", + "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc", + "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881", + "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3", + "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e", + "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac", + "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53", + "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0", + "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b", + "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83", + "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41", + "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c", + "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf", + "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da", + "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33" + ], + "markers": "platform_machine == 'aarch64' or (platform_machine == 'ppc64le' or (platform_machine == 'x86_64' or (platform_machine == 'amd64' or (platform_machine == 'AMD64' or (platform_machine == 'win32' or platform_machine == 'WIN32')))))", + "version": "==3.0.3" + }, + "h11": { + "hashes": [ + "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", + "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761" + ], + "markers": "python_version >= '3.7'", + "version": "==0.14.0" + }, + "h2o-wave": { + "hashes": [ + "sha256:02e25fd0a40d1f733b181007fb683a0f575041ee9f86cc9daf4dcaba2f1232eb", + "sha256:07381fd53c6aea7579c5b83ff2f72ef84d4e67b869b7af826b8e00a735ebb78a", + "sha256:122f5183482502ae4d13acb7fbf601967e14f760741aed86c8b3886d645fd9c3", + "sha256:162ac35a0df20bbb23bfe417ee285c4008fca6896ef5047b293b144a4520b2bb", + "sha256:8510904dbd1b38755a70fbb512bcf9a41a27a407d3efdd41894cdb6488945165", + "sha256:ef4c38a878ea958364a352c3870ba0f475b933c928307031497d5d082019aa86" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==1.1.2" + }, + "hf-transfer": { + "hashes": [ + "sha256:069026e38fc8786a91dac0de5e48a1acd6ac8bb59b9a02049fa73ce88303d468", + "sha256:08491bcbd5eefbc0f33cf958671e24fb5d5b72e6e054448cac3b01dfc373dc76", + "sha256:08ce2e01f4057f8e23c2665f1cfb90c3d1f4c93097e99b35907cb1ddadbe4184", + "sha256:0e724c4e74763380143526e98b323aeb489fd0b076e70e994454519f35b502b1", + "sha256:0f3ef2163503d51d7b0199c5ae95934ebfbae00bc7e1ca2e5fef0230f13ab90d", + "sha256:0ffe2399e3ac162c91d408fcb8be5b38893539ddaaecc334faebfd54a98cdd63", + "sha256:1088fcca432486e50142dec893c5ddcc5a32ef7e71b53c5d25b321744b4cd6a4", + "sha256:130966ca4f1266bfb9e13a4b6c40170115a2b450255b7c08ef0de85f04f778ef", + "sha256:14c9094353e9f9ed4b834b0f581bd531875fccfac5fd951e49b2ab16f1a267c0", + "sha256:1ccb2b43e1441273aedc83ef9b2419e02028686c9ffcdf0a2bd195657917e24a", + "sha256:1d173dc9fbbff38a3e67e3a21568f67d05427c4208ce77106e1822574a68ee27", + "sha256:1d7f850c6f5937402c91c0b999792dad66e476165478031e450797063a8cce5c", + "sha256:1e9988b785fd9bc8cd77819675a2744eb3b6a02edfb820c6b5012b861e4c20da", + "sha256:1f5533bc5d061595b3f5ce66509b34da3ba51aa0014b02356ca28fecc1251c2f", + "sha256:2bd88ebe18d87aaf7acf1641127efffb6d082e566d4f21f0bcbe67e4192c2964", + "sha256:2e8584fdd0435d75f8e340605ef3c3d6a5f18f09b75da9bd22fcf0106942b178", + "sha256:3332cbae4128639f2985be2197125e5f7e9577bf82c7fdad773e5740bb17b296", + "sha256:34b0b043baf351d087b39ceae299fdc25aa2c5945be843b1878ec6f563a99a51", + "sha256:3a7d71529550eeba0525cec2155f12e04aab9d57eb3e15015768d222ac80743f", + "sha256:40842f3b35ceaa99bb6029ab3d1c2cc4b948a19d0b5a2884f8516b744f52a934", + "sha256:4825a78fd9775b51e089f2d071abf7f3e6877be10d1fc2a0c245862bdc94f89a", + "sha256:4a9b86d0d432c9a1c76e29d5a5f233f248ddf9912e1219a3c3b2bc43809980db", + "sha256:572655ece4259d5d46102bf56276fa02a0df5912dedbd13e816e4f3f104db733", + "sha256:5758faa7be41636ac78199fda37b0b4cbd2d9a1dc79c777de3524429fc573f65", + "sha256:59888b7bf7a56b94af1755f47a4415c3e32df1c88f55261ff62df0657bd6483a", + "sha256:5c5efdbefcc35425933d320b5188485b3db080508c290748ca1fa5864da1347f", + "sha256:606bba2d616123b89b749fef1e138118cdf3d84380a6a4fcfe91e1890731ea44", + "sha256:6492086933e8c4d62e4910af952423fb4fff86c18afff8ece81f228c063f9556", + "sha256:6a29736f0d2e843db59898ce1e26e22b477a3f5f60a508e897daf0cfc49fe307", + "sha256:6d721c3354092797531056757cdbe94e24ec898f5c935dd8f3a50b47083e6ea6", + "sha256:72719931d9f02c1aba13526f431f69cd62a4fc0f7634126c2d1e64d8734760aa", + "sha256:72fddff6f76f51145635adde4ba59a3c9e4fe97479f750712770286689acece4", + "sha256:761559f1cb743da773ef831a8c19fc3485e1ceb3cbc9a41135a14d0f4ec53a6d", + "sha256:762202a02627bf9eac438e1f0e12ab13b46af06ba88c2c317042597b4fbbbf73", + "sha256:770e58593d15573c5ff47e7dff22ccf7b33ca6580e1358af83dab43078b835bc", + "sha256:7c82a9e058d77ac31cbc2d0f9be8011c8e0a2de787c1752225687c54eec00226", + "sha256:7e4d25e66695f39d731c4129ce030b24456727df4ddd34febcef559109e4907b", + "sha256:8054e534ec7668fe7d6f9ca0764f1f92e16a40fdd9dd54f4154c5ee6200a00ec", + "sha256:8182241523493dbc6b108e265c5551b98d8f75c7e3a5bd84f5bf9c1db9729cbf", + "sha256:8747d2b7ae6e8dcf44070ab44494d9d0f4d6a71d10888dce0a72e62a029e65eb", + "sha256:885d89c59dd54b687c74a88dad76006c62be4ad11ee1fecea25582d854434b6e", + "sha256:9153d589ced01668d7bd9a6a5ead3306d91ded5ebef5cd97185dcd51884d23a2", + "sha256:930ca177ce417190283a07738449d08c413a9991338115e8619a1597b813d981", + "sha256:95244c6c57360b9750cf6603522a8e1b4c42c8348e962efa62aa11d83e4aa6a6", + "sha256:95c094afee2cde827081d1b543879e64bb5066a12aba0288d8c479102cfa7a7f", + "sha256:961f2936f21ea8513b89d3025a83349317284a37748dccc0beca62be2e94472c", + "sha256:9688be93d9aab0951cedde7ae374e2e77483d13d2f259512a1f94f30723e5060", + "sha256:980319ef96fda5abbb7c03ec3803a251f95ed3a9b50f84db25d948994ff6dc34", + "sha256:9bce65b7cd31ef883d67c8ab733c303231bd8b4c4d3370524405f6b416a9bc2a", + "sha256:9e7524c4137646ed3471d5b6fdf7e1c6b7d3d43394eeeb595018e32f233019ed", + "sha256:a236db066bd017d9a2a543b7414dbcc3fc0df064c3aafd4831ab6b8dcbf1cec2", + "sha256:a2aebcee180cf2731404bdf497da3a4683f5cac5f0b71aced8af5936c7d8283c", + "sha256:ad913daff9f602e0ae13cfa79ba265b1db01255e5784c2469619c70231f49088", + "sha256:af0d23d84fe2326d309b94d7c9ee5a6987fc8005839dd4efff2e4468df1a9c28", + "sha256:b0192ff102c5b74eef7eb11e425fe0e4a3445dcb82b5ab1fab5b8d619c7caa45", + "sha256:b2203860a8affb3bcbcbbb009f592f8591543cf3de8b85b5dccf3e65749d8724", + "sha256:b2536d5420d1e7e7751aed592c6e59af59c4ceccb8d5e36f2f7a5707f7218efc", + "sha256:b79ad597b1571b162938bbc41d0d01a8788f087f848283723bf42532ac44163f", + "sha256:b883ea8289e1d734d55d873f0e34c8d5304a4f24f18a5cc1b4d3d9b6df608b58", + "sha256:b9fc4b5634a3a61635a8f308eba5df1336bf996b5adc12dc74283743b5bf8fcc", + "sha256:bc404b4dc9b5a873bd29d2e95774d17f3b6ff38d5a19bfe34b549c3c99819cec", + "sha256:bc48a561a7e42a5ebfb638fb9c154c4c10fa39e878ce8fb9f9db12f98f24665d", + "sha256:bc60ef0efff59b9a65365bc356f5c34a497d0b84df5887c2348855a15911a12d", + "sha256:bdf4149980b75cfb6c129eef142df6247d49c6820b088661985944c617abc1ff", + "sha256:bf2f873d0d386a15f79e9730e27a5dbf7e3a52b9de120a7166a254d983eeb4da", + "sha256:bf9e6c5282cf45847c1594d6736b3dfe0c42ec97fc624a70f8c2404c050e0a00", + "sha256:c03105c8854302aa0b29f6ae5c180ce07f63e6895c46efde7eea2aeb4429229d", + "sha256:c15a8fec25e93284d4ffb73000c47f909bb261eb0f8d32886db5f1e5ab5f07de", + "sha256:c2953fe35e8a9074507ef77d3c29ec511eead8030d25f5a228a3c03dcda723df", + "sha256:c2cbec42ed94b02d6c21f5fe78c6a65f82703d375dae9448a5efda5c386d2330", + "sha256:c5b870e1c2d6a87d1e5db890747a2d69712f1cbbc91e64f144e066a9fda16b38", + "sha256:cac2abda46b1aac20e75142b84c170af1f8f387ed35ce53a3856148d362c1a26", + "sha256:d2c39584bed7a880d2d5abd0396615d9a3f382009a047d6f70646c57feb27209", + "sha256:d3eb58ac583ccf03954bef830ea70a4e02271195f24884723b499a6577ffaf64", + "sha256:d89d9bae3f5124519efea5b66b33bca68312d7e0a001313b703d710bddc3b317", + "sha256:d96f414a0fd7b7fb57e359a79ad70a5ba2357bb91375dccc1a285edcc296d35d", + "sha256:e10812129996981ee100f943c74963d801187c6048269a81879532baf1b32931", + "sha256:e58926e22651924510109aa9b37baeaf0a6ae2014774746bc43e7d92e0aaf3f0", + "sha256:ea5f975016cca9bf36a76de273f0e477633b0d77dcbbb793260e7b625fb3dc86", + "sha256:f3318bf2436afb24b50a58f668eaaacbee89e23fc00f19e9d2714a9155160098", + "sha256:f5a48f0a606e5278117c130bc85849008f00d50a8efcc5a5e9c9b106a96341f5", + "sha256:fb8a7b55da901a246a2607ccda7dd056e2e594e05e0dde91206f5abae0a4ce3b", + "sha256:ffc4ea668aa8f35895d1373fc4b1f9544723aa6470b7c21619ed4011d51dc462" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==0.1.5" + }, + "hjson": { + "hashes": [ + "sha256:55af475a27cf83a7969c808399d7bccdec8fb836a07ddbd574587593b9cdcf75", + "sha256:65713cdcf13214fb554eb8b4ef803419733f4f5e551047c9b711098ab7186b89" + ], + "version": "==3.1.0" + }, + "httpcore": { + "hashes": [ + "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61", + "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5" + ], + "markers": "python_version >= '3.8'", + "version": "==1.0.5" + }, + "httpx": { + "hashes": [ + "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5", + "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5" + ], + "markers": "python_version >= '3.8'", + "version": "==0.27.0" + }, + "huggingface-hub": { + "hashes": [ + "sha256:b40dd1dc5c589b7c73178f5f17996bac516524dce83f16d5219a83e33a565712", + "sha256:c458ae6b3e8e197472c4ef01d8cc5f8b3ddb70e9288afcd494753d832dac3a70" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.0'", + "version": "==0.21.1" + }, + "idna": { + "hashes": [ + "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", + "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0" + ], + "markers": "python_version >= '3.5'", + "version": "==3.7" + }, + "importlib-metadata": { + "hashes": [ + "sha256:30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570", + "sha256:b78938b926ee8d5f020fc4772d487045805a55ddbad2ecf21c6d60938dc7fcd2" + ], + "markers": "python_version < '3.12'", + "version": "==7.1.0" + }, + "inquirer": { + "hashes": [ + "sha256:273a4e4a4345ac1afdb17408d40fc8dccf3485db68203357919468561035a763", + "sha256:33b09efc1b742b9d687b540296a8b6a3f773399673321fcc2ab0eb4c109bf9b5" + ], + "markers": "python_full_version >= '3.8.1'", + "version": "==3.2.4" + }, + "isodate": { + "hashes": [ + "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96", + "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9" + ], + "version": "==0.6.1" + }, + "isoduration": { + "hashes": [ + "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9", + "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042" + ], + "version": "==20.11.0" + }, + "jaraco.classes": { + "hashes": [ + "sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd", + "sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790" + ], + "markers": "python_version >= '3.8'", + "version": "==3.4.0" + }, + "jeepney": { + "hashes": [ + "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806", + "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755" + ], + "markers": "sys_platform == 'linux'", + "version": "==0.8.0" + }, + "jinja2": { + "hashes": [ + "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", + "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==3.1.3" + }, + "jmespath": { + "hashes": [ + "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", + "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe" + ], + "markers": "python_version >= '3.7'", + "version": "==1.0.1" + }, + "joblib": { + "hashes": [ + "sha256:1eb0dc091919cd384490de890cb5dfd538410a6d4b3b54eef09fb8c50b409b1c", + "sha256:42942470d4062537be4d54c83511186da1fc14ba354961a2114da91efa9a4ed7" + ], + "markers": "python_version >= '3.8'", + "version": "==1.4.0" + }, + "jsonpointer": { + "hashes": [ + "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a", + "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88" + ], + "version": "==2.4" + }, + "jsonref": { + "hashes": [ + "sha256:32fe8e1d85af0fdefbebce950af85590b22b60f9e95443176adbde4e1ecea552", + "sha256:590dc7773df6c21cbf948b5dac07a72a251db28b0238ceecce0a2abfa8ec30a9" + ], + "markers": "python_version >= '3.7'", + "version": "==1.1.0" + }, + "jsonschema": { + "extras": [ + "format-nongpl" + ], + "hashes": [ + "sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f", + "sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5" + ], + "markers": "python_version >= '3.8'", + "version": "==4.21.1" + }, + "jsonschema-specifications": { + "hashes": [ + "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc", + "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c" + ], + "markers": "python_version >= '3.8'", + "version": "==2023.12.1" + }, + "kaggle": { + "hashes": [ + "sha256:f12a05b82d2afb10a74adc69a5f8a736c64bd9934d0b5acfb4fe4f97357530b4" + ], + "index": "pypi", + "version": "==1.6.12" + }, + "keyring": { + "hashes": [ + "sha256:c3327b6ffafc0e8befbdb597cacdb4928ffe5c1212f7645f186e6d9957a898db", + "sha256:df38a4d7419a6a60fea5cef1e45a948a3e8430dd12ad88b0f423c5c143906218" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==24.3.1" + }, + "markupsafe": { + "hashes": [ + "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf", + "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff", + "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f", + "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3", + "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532", + "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f", + "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617", + "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df", + "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4", + "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906", + "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f", + "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4", + "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8", + "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371", + "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2", + "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465", + "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52", + "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6", + "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169", + "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad", + "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2", + "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0", + "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029", + "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f", + "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a", + "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced", + "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5", + "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c", + "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf", + "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9", + "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", + "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad", + "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3", + "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1", + "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46", + "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc", + "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a", + "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee", + "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900", + "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5", + "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea", + "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f", + "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5", + "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e", + "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a", + "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f", + "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50", + "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a", + "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b", + "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4", + "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff", + "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2", + "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46", + "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b", + "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf", + "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5", + "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5", + "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab", + "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd", + "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68" + ], + "markers": "python_version >= '3.7'", + "version": "==2.1.5" + }, + "monotonic": { + "hashes": [ + "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7", + "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c" + ], + "version": "==1.6" + }, + "more-itertools": { + "hashes": [ + "sha256:686b06abe565edfab151cb8fd385a05651e1fdf8f0a14191e4439283421f8684", + "sha256:8fccb480c43d3e99a00087634c06dd02b0d50fbf088b380de5a41a015ec239e1" + ], + "markers": "python_version >= '3.8'", + "version": "==10.2.0" + }, + "mpmath": { + "hashes": [ + "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", + "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c" + ], + "version": "==1.3.0" + }, + "msgpack": { + "hashes": [ + "sha256:00e073efcba9ea99db5acef3959efa45b52bc67b61b00823d2a1a6944bf45982", + "sha256:0726c282d188e204281ebd8de31724b7d749adebc086873a59efb8cf7ae27df3", + "sha256:0ceea77719d45c839fd73abcb190b8390412a890df2f83fb8cf49b2a4b5c2f40", + "sha256:114be227f5213ef8b215c22dde19532f5da9652e56e8ce969bf0a26d7c419fee", + "sha256:13577ec9e247f8741c84d06b9ece5f654920d8365a4b636ce0e44f15e07ec693", + "sha256:1876b0b653a808fcd50123b953af170c535027bf1d053b59790eebb0aeb38950", + "sha256:1ab0bbcd4d1f7b6991ee7c753655b481c50084294218de69365f8f1970d4c151", + "sha256:1cce488457370ffd1f953846f82323cb6b2ad2190987cd4d70b2713e17268d24", + "sha256:26ee97a8261e6e35885c2ecd2fd4a6d38252246f94a2aec23665a4e66d066305", + "sha256:3528807cbbb7f315bb81959d5961855e7ba52aa60a3097151cb21956fbc7502b", + "sha256:374a8e88ddab84b9ada695d255679fb99c53513c0a51778796fcf0944d6c789c", + "sha256:376081f471a2ef24828b83a641a02c575d6103a3ad7fd7dade5486cad10ea659", + "sha256:3923a1778f7e5ef31865893fdca12a8d7dc03a44b33e2a5f3295416314c09f5d", + "sha256:4916727e31c28be8beaf11cf117d6f6f188dcc36daae4e851fee88646f5b6b18", + "sha256:493c5c5e44b06d6c9268ce21b302c9ca055c1fd3484c25ba41d34476c76ee746", + "sha256:505fe3d03856ac7d215dbe005414bc28505d26f0c128906037e66d98c4e95868", + "sha256:5845fdf5e5d5b78a49b826fcdc0eb2e2aa7191980e3d2cfd2a30303a74f212e2", + "sha256:5c330eace3dd100bdb54b5653b966de7f51c26ec4a7d4e87132d9b4f738220ba", + "sha256:5dbf059fb4b7c240c873c1245ee112505be27497e90f7c6591261c7d3c3a8228", + "sha256:5e390971d082dba073c05dbd56322427d3280b7cc8b53484c9377adfbae67dc2", + "sha256:5fbb160554e319f7b22ecf530a80a3ff496d38e8e07ae763b9e82fadfe96f273", + "sha256:64d0fcd436c5683fdd7c907eeae5e2cbb5eb872fafbc03a43609d7941840995c", + "sha256:69284049d07fce531c17404fcba2bb1df472bc2dcdac642ae71a2d079d950653", + "sha256:6a0e76621f6e1f908ae52860bdcb58e1ca85231a9b0545e64509c931dd34275a", + "sha256:73ee792784d48aa338bba28063e19a27e8d989344f34aad14ea6e1b9bd83f596", + "sha256:74398a4cf19de42e1498368c36eed45d9528f5fd0155241e82c4082b7e16cffd", + "sha256:7938111ed1358f536daf311be244f34df7bf3cdedb3ed883787aca97778b28d8", + "sha256:82d92c773fbc6942a7a8b520d22c11cfc8fd83bba86116bfcf962c2f5c2ecdaa", + "sha256:83b5c044f3eff2a6534768ccfd50425939e7a8b5cf9a7261c385de1e20dcfc85", + "sha256:8db8e423192303ed77cff4dce3a4b88dbfaf43979d280181558af5e2c3c71afc", + "sha256:9517004e21664f2b5a5fd6333b0731b9cf0817403a941b393d89a2f1dc2bd836", + "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3", + "sha256:99881222f4a8c2f641f25703963a5cefb076adffd959e0558dc9f803a52d6a58", + "sha256:9ee32dcb8e531adae1f1ca568822e9b3a738369b3b686d1477cbc643c4a9c128", + "sha256:a22e47578b30a3e199ab067a4d43d790249b3c0587d9a771921f86250c8435db", + "sha256:b5505774ea2a73a86ea176e8a9a4a7c8bf5d521050f0f6f8426afe798689243f", + "sha256:bd739c9251d01e0279ce729e37b39d49a08c0420d3fee7f2a4968c0576678f77", + "sha256:d16a786905034e7e34098634b184a7d81f91d4c3d246edc6bd7aefb2fd8ea6ad", + "sha256:d3420522057ebab1728b21ad473aa950026d07cb09da41103f8e597dfbfaeb13", + "sha256:d56fd9f1f1cdc8227d7b7918f55091349741904d9520c65f0139a9755952c9e8", + "sha256:d661dc4785affa9d0edfdd1e59ec056a58b3dbb9f196fa43587f3ddac654ac7b", + "sha256:dfe1f0f0ed5785c187144c46a292b8c34c1295c01da12e10ccddfc16def4448a", + "sha256:e1dd7839443592d00e96db831eddb4111a2a81a46b028f0facd60a09ebbdd543", + "sha256:e2872993e209f7ed04d963e4b4fbae72d034844ec66bc4ca403329db2074377b", + "sha256:e2f879ab92ce502a1e65fce390eab619774dda6a6ff719718069ac94084098ce", + "sha256:e3aa7e51d738e0ec0afbed661261513b38b3014754c9459508399baf14ae0c9d", + "sha256:e532dbd6ddfe13946de050d7474e3f5fb6ec774fbb1a188aaf469b08cf04189a", + "sha256:e6b7842518a63a9f17107eb176320960ec095a8ee3b4420b5f688e24bf50c53c", + "sha256:e75753aeda0ddc4c28dce4c32ba2f6ec30b1b02f6c0b14e547841ba5b24f753f", + "sha256:eadb9f826c138e6cf3c49d6f8de88225a3c0ab181a9b4ba792e006e5292d150e", + "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011", + "sha256:ef254a06bcea461e65ff0373d8a0dd1ed3aa004af48839f002a0c994a6f72d04", + "sha256:f3709997b228685fe53e8c433e2df9f0cdb5f4542bd5114ed17ac3c0129b0480", + "sha256:f51bab98d52739c50c56658cc303f190785f9a2cd97b823357e7aeae54c8f68a", + "sha256:f9904e24646570539a8950400602d66d2b2c492b9010ea7e965025cb71d0c86d", + "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d" + ], + "markers": "python_version >= '3.8'", + "version": "==1.0.8" + }, + "multidict": { + "hashes": [ + "sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556", + "sha256:0275e35209c27a3f7951e1ce7aaf93ce0d163b28948444bec61dd7badc6d3f8c", + "sha256:04bde7a7b3de05732a4eb39c94574db1ec99abb56162d6c520ad26f83267de29", + "sha256:04da1bb8c8dbadf2a18a452639771951c662c5ad03aefe4884775454be322c9b", + "sha256:09a892e4a9fb47331da06948690ae38eaa2426de97b4ccbfafbdcbe5c8f37ff8", + "sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7", + "sha256:107c0cdefe028703fb5dafe640a409cb146d44a6ae201e55b35a4af8e95457dd", + "sha256:141b43360bfd3bdd75f15ed811850763555a251e38b2405967f8e25fb43f7d40", + "sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6", + "sha256:19fe01cea168585ba0f678cad6f58133db2aa14eccaf22f88e4a6dccadfad8b3", + "sha256:1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c", + "sha256:1d9ea7a7e779d7a3561aade7d596649fbecfa5c08a7674b11b423783217933f9", + "sha256:215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5", + "sha256:21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae", + "sha256:220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442", + "sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9", + "sha256:29bfeb0dff5cb5fdab2023a7a9947b3b4af63e9c47cae2a10ad58394b517fddc", + "sha256:2f4848aa3baa109e6ab81fe2006c77ed4d3cd1e0ac2c1fbddb7b1277c168788c", + "sha256:2faa5ae9376faba05f630d7e5e6be05be22913782b927b19d12b8145968a85ea", + "sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5", + "sha256:37b15024f864916b4951adb95d3a80c9431299080341ab9544ed148091b53f50", + "sha256:3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182", + "sha256:3d25f19500588cbc47dc19081d78131c32637c25804df8414463ec908631e453", + "sha256:403c0911cd5d5791605808b942c88a8155c2592e05332d2bf78f18697a5fa15e", + "sha256:411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600", + "sha256:425bf820055005bfc8aa9a0b99ccb52cc2f4070153e34b701acc98d201693733", + "sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda", + "sha256:4a6a4f196f08c58c59e0b8ef8ec441d12aee4125a7d4f4fef000ccb22f8d7241", + "sha256:4cc0ef8b962ac7a5e62b9e826bd0cd5040e7d401bc45a6835910ed699037a461", + "sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e", + "sha256:53689bb4e102200a4fafa9de9c7c3c212ab40a7ab2c8e474491914d2305f187e", + "sha256:55205d03e8a598cfc688c71ca8ea5f66447164efff8869517f175ea632c7cb7b", + "sha256:5c0631926c4f58e9a5ccce555ad7747d9a9f8b10619621f22f9635f069f6233e", + "sha256:5cb241881eefd96b46f89b1a056187ea8e9ba14ab88ba632e68d7a2ecb7aadf7", + "sha256:60d698e8179a42ec85172d12f50b1668254628425a6bd611aba022257cac1386", + "sha256:612d1156111ae11d14afaf3a0669ebf6c170dbb735e510a7438ffe2369a847fd", + "sha256:6214c5a5571802c33f80e6c84713b2c79e024995b9c5897f794b43e714daeec9", + "sha256:6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf", + "sha256:69db76c09796b313331bb7048229e3bee7928eb62bab5e071e9f7fcc4879caee", + "sha256:6bf7a982604375a8d49b6cc1b781c1747f243d91b81035a9b43a2126c04766f5", + "sha256:766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a", + "sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271", + "sha256:76f067f5121dcecf0d63a67f29080b26c43c71a98b10c701b0677e4a065fbd54", + "sha256:7901c05ead4b3fb75113fb1dd33eb1253c6d3ee37ce93305acd9d38e0b5f21a4", + "sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496", + "sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb", + "sha256:7afcdd1fc07befad18ec4523a782cde4e93e0a2bf71239894b8d61ee578c1319", + "sha256:7be7047bd08accdb7487737631d25735c9a04327911de89ff1b26b81745bd4e3", + "sha256:7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f", + "sha256:7df704ca8cf4a073334e0427ae2345323613e4df18cc224f647f251e5e75a527", + "sha256:85f67aed7bb647f93e7520633d8f51d3cbc6ab96957c71272b286b2f30dc70ed", + "sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604", + "sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef", + "sha256:99f60d34c048c5c2fabc766108c103612344c46e35d4ed9ae0673d33c8fb26e8", + "sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5", + "sha256:a3145cb08d8625b2d3fee1b2d596a8766352979c9bffe5d7833e0503d0f0b5e5", + "sha256:aeaf541ddbad8311a87dd695ed9642401131ea39ad7bc8cf3ef3967fd093b626", + "sha256:b55358304d7a73d7bdf5de62494aaf70bd33015831ffd98bc498b433dfe5b10c", + "sha256:b82cc8ace10ab5bd93235dfaab2021c70637005e1ac787031f4d1da63d493c1d", + "sha256:c0868d64af83169e4d4152ec612637a543f7a336e4a307b119e98042e852ad9c", + "sha256:c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc", + "sha256:c9bf56195c6bbd293340ea82eafd0071cb3d450c703d2c93afb89f93b8386ccc", + "sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b", + "sha256:cd6c8fca38178e12c00418de737aef1261576bd1b6e8c6134d3e729a4e858b38", + "sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450", + "sha256:cf590b134eb70629e350691ecca88eac3e3b8b3c86992042fb82e3cb1830d5e1", + "sha256:d3eb1ceec286eba8220c26f3b0096cf189aea7057b6e7b7a2e60ed36b373b77f", + "sha256:d65f25da8e248202bd47445cec78e0025c0fe7582b23ec69c3b27a640dd7a8e3", + "sha256:d6f6d4f185481c9669b9447bf9d9cf3b95a0e9df9d169bbc17e363b7d5487755", + "sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226", + "sha256:d946b0a9eb8aaa590df1fe082cee553ceab173e6cb5b03239716338629c50c7a", + "sha256:dce1c6912ab9ff5f179eaf6efe7365c1f425ed690b03341911bf4939ef2f3046", + "sha256:de170c7b4fe6859beb8926e84f7d7d6c693dfe8e27372ce3b76f01c46e489fcf", + "sha256:e02021f87a5b6932fa6ce916ca004c4d441509d33bbdbeca70d05dff5e9d2479", + "sha256:e030047e85cbcedbfc073f71836d62dd5dadfbe7531cae27789ff66bc551bd5e", + "sha256:e0e79d91e71b9867c73323a3444724d496c037e578a0e1755ae159ba14f4f3d1", + "sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a", + "sha256:e4972624066095e52b569e02b5ca97dbd7a7ddd4294bf4e7247d52635630dd83", + "sha256:e7be68734bd8c9a513f2b0cfd508802d6609da068f40dc57d4e3494cefc92929", + "sha256:e8e94e6912639a02ce173341ff62cc1201232ab86b8a8fcc05572741a5dc7d93", + "sha256:ea1456df2a27c73ce51120fa2f519f1bea2f4a03a917f4a43c8707cf4cbbae1a", + "sha256:ebd8d160f91a764652d3e51ce0d2956b38efe37c9231cd82cfc0bed2e40b581c", + "sha256:eca2e9d0cc5a889850e9bbd68e98314ada174ff6ccd1129500103df7a94a7a44", + "sha256:edd08e6f2f1a390bf137080507e44ccc086353c8e98c657e666c017718561b89", + "sha256:f285e862d2f153a70586579c15c44656f888806ed0e5b56b64489afe4a2dbfba", + "sha256:f2a1dee728b52b33eebff5072817176c172050d44d67befd681609b4746e1c2e", + "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da", + "sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24", + "sha256:fce28b3c8a81b6b36dfac9feb1de115bab619b3c13905b419ec71d03a3fc1423", + "sha256:fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef" + ], + "markers": "python_version >= '3.7'", + "version": "==6.0.5" + }, + "multiprocess": { + "hashes": [ + "sha256:0dfd078c306e08d46d7a8d06fb120313d87aa43af60d66da43ffff40b44d2f41", + "sha256:161af703d4652a0e1410be6abccecde4a7ddffd19341be0a7011b94aeb171ac1", + "sha256:37b55f71c07e2d741374998c043b9520b626a8dddc8b3129222ca4f1a06ef67a", + "sha256:476887be10e2f59ff183c006af746cb6f1fd0eadcfd4ef49e605cbe2659920ee", + "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3", + "sha256:a71d82033454891091a226dfc319d0cfa8019a4e888ef9ca910372a446de4435", + "sha256:af4cabb0dac72abfb1e794fa7855c325fd2b55a10a44628a3c1ad3311c04127a", + "sha256:ba8c31889abf4511c7308a8c52bb4a30b9d590e7f58523302ba00237702ca054", + "sha256:c4a9944c67bd49f823687463660a2d6daae94c289adff97e0f9d696ba6371d02", + "sha256:d951bed82c8f73929ac82c61f01a7b5ce8f3e5ef40f5b52553b4f547ce2b08ec", + "sha256:e7b9d0f307cd9bd50851afaac0dba2cb6c44449efff697df7c7645f7d3f2be3a", + "sha256:fc0544c531920dde3b00c29863377f87e1632601092ea2daca74e4beb40faa2e" + ], + "markers": "python_version >= '3.8'", + "version": "==0.70.16" + }, + "neptune": { + "hashes": [ + "sha256:31684dcd0132b084671151e79e5b91277dfbd08b87ea3d1da8ca4e058852b139", + "sha256:e2e6dab32d30b3f6788c459f4a98f1a74c8927451653625c682f87fbd412494a" + ], + "index": "pypi", + "markers": "python_version >= '3.7' and python_version < '4.0'", + "version": "==1.9.1" + }, + "networkx": { + "hashes": [ + "sha256:0c127d8b2f4865f59ae9cb8aafcd60b5c70f3241ebd66f7defad7c4ab90126c9", + "sha256:28575580c6ebdaf4505b22c6256a2b9de86b316dc63ba9e93abde3d78dfdbcf2" + ], + "markers": "python_version >= '3.10'", + "version": "==3.3" + }, + "ninja": { + "hashes": [ + "sha256:18302d96a5467ea98b68e1cae1ae4b4fb2b2a56a82b955193c637557c7273dbd", + "sha256:185e0641bde601e53841525c4196278e9aaf4463758da6dd1e752c0a0f54136a", + "sha256:376889c76d87b95b5719fdd61dd7db193aa7fd4432e5d52d2e44e4c497bdbbee", + "sha256:3e0f9be5bb20d74d58c66cc1c414c3e6aeb45c35b0d0e41e8d739c2c0d57784f", + "sha256:73b93c14046447c7c5cc892433d4fae65d6364bec6685411cb97a8bcf815f93a", + "sha256:7563ce1d9fe6ed5af0b8dd9ab4a214bf4ff1f2f6fd6dc29f480981f0f8b8b249", + "sha256:76482ba746a2618eecf89d5253c0d1e4f1da1270d41e9f54dfbd91831b0f6885", + "sha256:84502ec98f02a037a169c4b0d5d86075eaf6afc55e1879003d6cab51ced2ea4b", + "sha256:95da904130bfa02ea74ff9c0116b4ad266174fafb1c707aa50212bc7859aebf1", + "sha256:9d793b08dd857e38d0b6ffe9e6b7145d7c485a42dcfea04905ca0cdb6017cc3c", + "sha256:9df724344202b83018abb45cb1efc22efd337a1496514e7e6b3b59655be85205", + "sha256:aad34a70ef15b12519946c5633344bc775a7656d789d9ed5fdb0d456383716ef", + "sha256:d491fc8d89cdcb416107c349ad1e3a735d4c4af5e1cb8f5f727baca6350fdaea", + "sha256:ecf80cf5afd09f14dcceff28cb3f11dc90fb97c999c89307aea435889cb66877", + "sha256:fa2ba9d74acfdfbfbcf06fad1b8282de8a7a8c481d9dee45c859a8c93fcc1082" + ], + "version": "==1.11.1.1" + }, + "numpy": { + "hashes": [ + "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b", + "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818", + "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20", + "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0", + "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010", + "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a", + "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea", + "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c", + "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71", + "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110", + "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be", + "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a", + "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a", + "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5", + "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed", + "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd", + "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c", + "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e", + "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0", + "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c", + "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a", + "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b", + "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0", + "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6", + "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2", + "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a", + "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30", + "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218", + "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5", + "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07", + "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2", + "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4", + "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764", + "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef", + "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3", + "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f" + ], + "index": "pypi", + "markers": "python_version >= '3.9'", + "version": "==1.26.4" + }, + "nvidia-cublas-cu11": { + "hashes": [ + "sha256:39fb40e8f486dd8a2ddb8fdeefe1d5b28f5b99df01c87ab3676f057a74a5a6f3", + "sha256:6ab12b1302bef8ac1ff4414edd1c059e57f4833abef9151683fb8f4de25900be" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64'", + "version": "==11.11.3.6" + }, + "nvidia-cuda-cupti-cu11": { + "hashes": [ + "sha256:0e50c707df56c75a2c0703dc6b886f3c97a22f37d6f63839f75b7418ba672a8d", + "sha256:4332d8550ad5f5b673f98d08e4e4f82030cb604c66d8d5ee919399ea01312e58" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64'", + "version": "==11.8.87" + }, + "nvidia-cuda-nvrtc-cu11": { + "hashes": [ + "sha256:1f27d67b0f72902e9065ae568b4f6268dfe49ba3ed269c9a3da99bb86d1d2008", + "sha256:e18a23a8f4064664a6f1c4a64f38c581cbebfb5935280e94a4943ea8ae3791b1" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64'", + "version": "==11.8.89" + }, + "nvidia-cuda-runtime-cu11": { + "hashes": [ + "sha256:f587bd726eb2f7612cf77ce38a2c1e65cf23251ff49437f6161ce0d647f64f7c", + "sha256:f60c9fdaed8065b38de8097867240efc5556a8a710007146daeb9082334a6e63" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64'", + "version": "==11.8.89" + }, + "nvidia-cudnn-cu11": { + "hashes": [ + "sha256:b3e062498fbbb1c1930435a6a454c1b41c903e1e65b7063bd2b4021e8285408e" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64'", + "version": "==8.7.0.84" + }, + "nvidia-cufft-cu11": { + "hashes": [ + "sha256:222f9da70c80384632fd6035e4c3f16762d64ea7a843829cb278f98b3cb7dd81", + "sha256:c4d316f17c745ec9c728e30409612eaf77a8404c3733cdf6c9c1569634d1ca03" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64'", + "version": "==10.9.0.58" + }, + "nvidia-curand-cu11": { + "hashes": [ + "sha256:8fa8365065fc3e3760d7437b08f164a6bcf8f7124f3b544d2463ded01e6bdc70", + "sha256:ac439548c88580269a1eb6aeb602a5aed32f0dbb20809a31d9ed7d01d77f6bf5" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64'", + "version": "==10.3.0.86" + }, + "nvidia-cusolver-cu11": { + "hashes": [ + "sha256:7efe43b113495a64e2cf9a0b4365bd53b0a82afb2e2cf91e9f993c9ef5e69ee8", + "sha256:ca538f545645b7e6629140786d3127fe067b3d5a085bd794cde5bfe877c8926f" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64'", + "version": "==11.4.1.48" + }, + "nvidia-cusparse-cu11": { + "hashes": [ + "sha256:4ae709fe78d3f23f60acaba8c54b8ad556cf16ca486e0cc1aa92dca7555d2d2b", + "sha256:a0f6ee81cd91be606fc2f55992d06b09cd4e86d74b6ae5e8dd1631cf7f5a8706" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64'", + "version": "==11.7.5.86" + }, + "nvidia-nccl-cu11": { + "hashes": [ + "sha256:7c58afbeddf7f7c6b7dd7d84a7f4e85462610ee0c656287388b96d89dcf046d5" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64'", + "version": "==2.19.3" + }, + "nvidia-nvtx-cu11": { + "hashes": [ + "sha256:54031010ee38d774b2991004d88f90bbd7bbc1458a96bbc4b42662756508c252", + "sha256:890656d8bd9b4e280231c832e1f0d03459200ba4824ddda3dcb59b1e1989b9f5" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64'", + "version": "==11.8.86" + }, + "oauthlib": { + "hashes": [ + "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca", + "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918" + ], + "markers": "python_version >= '3.6'", + "version": "==3.2.2" + }, + "openai": { + "hashes": [ + "sha256:6730b8468a0235e5f289dfdfacaa374001645099c4ad1740b58eab378bcf7723", + "sha256:6eef764a8870095d256d59e6be243acf560a21227e9e3588b508972818929ef7" + ], + "index": "pypi", + "markers": "python_full_version >= '3.7.1'", + "version": "==1.23.3" + }, + "packaging": { + "hashes": [ + "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5", + "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9" + ], + "markers": "python_version >= '3.7'", + "version": "==24.0" + }, + "pandas": { + "hashes": [ + "sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863", + "sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2", + "sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1", + "sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad", + "sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db", + "sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76", + "sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51", + "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32", + "sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08", + "sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b", + "sha256:640cef9aa381b60e296db324337a554aeeb883ead99dc8f6c18e81a93942f5f4", + "sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921", + "sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288", + "sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee", + "sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0", + "sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24", + "sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99", + "sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151", + "sha256:9057e6aa78a584bc93a13f0a9bf7e753a5e9770a30b4d758b8d5f2a62a9433cd", + "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce", + "sha256:92fd6b027924a7e178ac202cfbe25e53368db90d56872d20ffae94b96c7acc57", + "sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef", + "sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54", + "sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a", + "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238", + "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23", + "sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772", + "sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce", + "sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad" + ], + "index": "pypi", + "markers": "python_version >= '3.9'", + "version": "==2.2.2" + }, + "peft": { + "hashes": [ + "sha256:3b8d09dff94d1bfa72e064cb26af5952fd82428e2bcce432cfaf091f5035b04b", + "sha256:d14223fee6050c53593733e8f763d94c13577e1220987f59ae473d988f2ccd91" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.0'", + "version": "==0.9.0" + }, + "pillow": { + "hashes": [ + "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c", + "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2", + "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb", + "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d", + "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa", + "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3", + "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1", + "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a", + "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd", + "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8", + "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999", + "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599", + "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936", + "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375", + "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d", + "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b", + "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60", + "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572", + "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3", + "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced", + "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f", + "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b", + "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19", + "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f", + "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d", + "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383", + "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795", + "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355", + "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57", + "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09", + "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b", + "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462", + "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf", + "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f", + "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a", + "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad", + "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9", + "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d", + "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45", + "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994", + "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d", + "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338", + "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463", + "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451", + "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591", + "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c", + "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd", + "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32", + "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9", + "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf", + "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5", + "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828", + "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3", + "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5", + "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2", + "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b", + "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2", + "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475", + "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3", + "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb", + "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef", + "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015", + "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002", + "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170", + "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84", + "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57", + "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f", + "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27", + "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a" + ], + "markers": "python_version >= '3.8'", + "version": "==10.3.0" + }, + "portalocker": { + "hashes": [ + "sha256:2b035aa7828e46c58e9b31390ee1f169b98e1066ab10b9a6a861fe7e25ee4f33", + "sha256:cfb86acc09b9aa7c3b43594e19be1345b9d16af3feb08bf92f23d4dce513a28e" + ], + "markers": "python_version >= '3.8'", + "version": "==2.8.2" + }, + "protobuf": { + "hashes": [ + "sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7", + "sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c", + "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2", + "sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b", + "sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050", + "sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9", + "sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7", + "sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454", + "sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480", + "sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469", + "sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c", + "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e", + "sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db", + "sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905", + "sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b", + "sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86", + "sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4", + "sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402", + "sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7", + "sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4", + "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99", + "sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==3.20.3" + }, + "psutil": { + "hashes": [ + "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d", + "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73", + "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8", + "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2", + "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e", + "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36", + "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7", + "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c", + "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee", + "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421", + "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf", + "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81", + "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0", + "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631", + "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4", + "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", + "version": "==5.9.8" + }, + "py-cpuinfo": { + "hashes": [ + "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690", + "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5" + ], + "version": "==9.0.0" + }, + "pyarrow": { + "hashes": [ + "sha256:00a1dcb22ad4ceb8af87f7bd30cc3354788776c417f493089e0a0af981bc8d80", + "sha256:1ab8b9050752b16a8b53fcd9853bf07d8daf19093533e990085168f40c64d978", + "sha256:20ce707d9aa390593ea93218b19d0eadab56390311cb87aad32c9a869b0e958c", + "sha256:22a1fdb1254e5095d629e29cd1ea98ed04b4bbfd8e42cc670a6b639ccc208b60", + "sha256:266ddb7e823f03733c15adc8b5078db2df6980f9aa93d6bb57ece615df4e0ba7", + "sha256:2a7abdee4a4a7cfa239e2e8d721224c4b34ffe69a0ca7981354fe03c1328789b", + "sha256:35692ce8ad0b8c666aa60f83950957096d92f2a9d8d7deda93fb835e6053307e", + "sha256:3c2f5e239db7ed43e0ad2baf46a6465f89c824cc703f38ef0fde927d8e0955f7", + "sha256:42e56557bc7c5c10d3e42c3b32f6cff649a29d637e8f4e8b311d334cc4326730", + "sha256:5448564754c154997bc09e95a44b81b9e31ae918a86c0fcb35c4aa4922756f55", + "sha256:56850a0afe9ef37249d5387355449c0f94d12ff7994af88f16803a26d38f2016", + "sha256:574a00260a4ed9d118a14770edbd440b848fcae5a3024128be9d0274dbcaf858", + "sha256:5823275c8addbbb50cd4e6a6839952682a33255b447277e37a6f518d6972f4e1", + "sha256:59bb1f1edbbf4114c72415f039f1359f1a57d166a331c3229788ccbfbb31689a", + "sha256:5cc23090224b6594f5a92d26ad47465af47c1d9c079dd4a0061ae39551889efe", + "sha256:705db70d3e2293c2f6f8e84874b5b775f690465798f66e94bb2c07bab0a6bb55", + "sha256:71d52561cd7aefd22cf52538f262850b0cc9e4ec50af2aaa601da3a16ef48877", + "sha256:729f7b262aa620c9df8b9967db96c1575e4cfc8c25d078a06968e527b8d6ec05", + "sha256:91d28f9a40f1264eab2af7905a4d95320ac2f287891e9c8b0035f264fe3c3a4b", + "sha256:99af421ee451a78884d7faea23816c429e263bd3618b22d38e7992c9ce2a7ad9", + "sha256:9dd3151d098e56f16a8389c1247137f9e4c22720b01c6f3aa6dec29a99b74d80", + "sha256:b93c9a50b965ee0bf4fef65e53b758a7e8dcc0c2d86cebcc037aaaf1b306ecc0", + "sha256:bd40467bdb3cbaf2044ed7a6f7f251c8f941c8b31275aaaf88e746c4f3ca4a7a", + "sha256:c0815d0ddb733b8c1b53a05827a91f1b8bde6240f3b20bf9ba5d650eb9b89cdf", + "sha256:cc8814310486f2a73c661ba8354540f17eef51e1b6dd090b93e3419d3a097b3a", + "sha256:d22d0941e6c7bafddf5f4c0662e46f2075850f1c044bf1a03150dd9e189427ce", + "sha256:d831690844706e374c455fba2fb8cfcb7b797bfe53ceda4b54334316e1ac4fa4", + "sha256:d91073d1e2fef2c121154680e2ba7e35ecf8d4969cc0af1fa6f14a8675858159", + "sha256:dd9334a07b6dc21afe0857aa31842365a62eca664e415a3f9536e3a8bb832c07", + "sha256:df0080339387b5d30de31e0a149c0c11a827a10c82f0c67d9afae3981d1aabb7", + "sha256:ed66e5217b4526fa3585b5e39b0b82f501b88a10d36bd0d2a4d8aa7b5a48e2df", + "sha256:edf38cce0bf0dcf726e074159c60516447e4474904c0033f018c1f33d7dac6c5", + "sha256:ef2f309b68396bcc5a354106741d333494d6a0d3e1951271849787109f0229a6", + "sha256:f293e92d1db251447cb028ae12f7bc47526e4649c3a9924c8376cab4ad6b98bd", + "sha256:fb8065dbc0d051bf2ae2453af0484d99a43135cadabacf0af588a3be81fbbb9b", + "sha256:fda9a7cebd1b1d46c97b511f60f73a5b766a6de4c5236f144f41a5d5afec1f35" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==16.0.0" + }, + "pyarrow-hotfix": { + "hashes": [ + "sha256:79d3e030f7ff890d408a100ac16d6f00b14d44a502d7897cd9fc3e3a534e9945", + "sha256:dcc9ae2d220dff0083be6a9aa8e0cdee5182ad358d4931fce825c545e5c89178" + ], + "markers": "python_version >= '3.5'", + "version": "==0.6" + }, + "pycparser": { + "hashes": [ + "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", + "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc" + ], + "markers": "python_version >= '3.8'", + "version": "==2.22" + }, + "pydantic": { + "hashes": [ + "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5", + "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc" + ], + "markers": "python_version >= '3.8'", + "version": "==2.7.1" + }, + "pydantic-core": { + "hashes": [ + "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b", + "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a", + "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90", + "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d", + "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e", + "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d", + "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027", + "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804", + "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347", + "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400", + "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3", + "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399", + "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349", + "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd", + "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c", + "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e", + "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413", + "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3", + "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e", + "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3", + "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91", + "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce", + "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c", + "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb", + "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664", + "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6", + "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd", + "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3", + "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af", + "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043", + "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350", + "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7", + "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0", + "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563", + "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761", + "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72", + "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3", + "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb", + "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788", + "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b", + "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c", + "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038", + "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250", + "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec", + "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c", + "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74", + "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81", + "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439", + "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75", + "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0", + "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8", + "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150", + "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438", + "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae", + "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857", + "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038", + "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374", + "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f", + "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241", + "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592", + "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4", + "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d", + "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b", + "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b", + "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182", + "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e", + "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641", + "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70", + "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9", + "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a", + "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543", + "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b", + "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f", + "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38", + "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845", + "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2", + "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0", + "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4", + "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242" + ], + "markers": "python_version >= '3.8'", + "version": "==2.18.2" + }, + "pyjwt": { + "hashes": [ + "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de", + "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320" + ], + "markers": "python_version >= '3.7'", + "version": "==2.8.0" + }, + "pynvml": { + "hashes": [ + "sha256:5cce014ac01b098d08f06178f86c37be409b80b2e903a5a03ce15eed60f55e25", + "sha256:d027b21b95b1088b9fc278117f9f61b7c67f8e33a787e9f83f735f0f71ac32d0" + ], + "markers": "python_version >= '3.6'", + "version": "==11.5.0" + }, + "python-dateutil": { + "hashes": [ + "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", + "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.9.0.post0" + }, + "python-slugify": { + "hashes": [ + "sha256:276540b79961052b66b7d116620b36518847f52d5fd9e3a70164fc8c50faa6b8", + "sha256:59202371d1d05b54a9e7720c5e038f928f45daaffe41dd10822f3907b937c856" + ], + "markers": "python_version >= '3.7'", + "version": "==8.0.4" + }, + "pytz": { + "hashes": [ + "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812", + "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319" + ], + "version": "==2024.1" + }, + "pyyaml": { + "hashes": [ + "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5", + "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc", + "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df", + "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741", + "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206", + "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27", + "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595", + "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62", + "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98", + "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696", + "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290", + "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9", + "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d", + "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6", + "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867", + "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47", + "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486", + "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6", + "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3", + "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007", + "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938", + "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0", + "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c", + "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735", + "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d", + "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28", + "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", + "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", + "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", + "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", + "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", + "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", + "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", + "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0", + "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515", + "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c", + "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c", + "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924", + "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34", + "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43", + "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859", + "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673", + "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54", + "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a", + "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b", + "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab", + "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa", + "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c", + "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585", + "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d", + "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f" + ], + "index": "pypi", + "markers": "python_version >= '3.6'", + "version": "==6.0.1" + }, + "readchar": { + "hashes": [ + "sha256:b4b31dd35de4897be738f27e8f9f62426b5fedb54b648364987e30ae534b71bc", + "sha256:e0dae942d3a746f8d5423f83dbad67efe704004baafe31b626477929faaee472" + ], + "markers": "python_version >= '3.8'", + "version": "==4.0.6" + }, + "referencing": { + "hashes": [ + "sha256:191e936b0c696d0af17ad7430a3dc68e88bc11be6514f4757dc890f04ab05889", + "sha256:8080727b30e364e5783152903672df9b6b091c926a146a759080b62ca3126cd6" + ], + "markers": "python_version >= '3.8'", + "version": "==0.35.0" + }, + "regex": { + "hashes": [ + "sha256:00169caa125f35d1bca6045d65a662af0202704489fada95346cfa092ec23f39", + "sha256:03576e3a423d19dda13e55598f0fd507b5d660d42c51b02df4e0d97824fdcae3", + "sha256:03e68f44340528111067cecf12721c3df4811c67268b897fbe695c95f860ac42", + "sha256:0534b034fba6101611968fae8e856c1698da97ce2efb5c2b895fc8b9e23a5834", + "sha256:08dea89f859c3df48a440dbdcd7b7155bc675f2fa2ec8c521d02dc69e877db70", + "sha256:0a38d151e2cdd66d16dab550c22f9521ba79761423b87c01dae0a6e9add79c0d", + "sha256:0c8290b44d8b0af4e77048646c10c6e3aa583c1ca67f3b5ffb6e06cf0c6f0f89", + "sha256:10188fe732dec829c7acca7422cdd1bf57d853c7199d5a9e96bb4d40db239c73", + "sha256:1210365faba7c2150451eb78ec5687871c796b0f1fa701bfd2a4a25420482d26", + "sha256:12f6a3f2f58bb7344751919a1876ee1b976fe08b9ffccb4bbea66f26af6017b9", + "sha256:159dc4e59a159cb8e4e8f8961eb1fa5d58f93cb1acd1701d8aff38d45e1a84a6", + "sha256:20b7a68444f536365af42a75ccecb7ab41a896a04acf58432db9e206f4e525d6", + "sha256:23cff1b267038501b179ccbbd74a821ac4a7192a1852d1d558e562b507d46013", + "sha256:2c72608e70f053643437bd2be0608f7f1c46d4022e4104d76826f0839199347a", + "sha256:3399dd8a7495bbb2bacd59b84840eef9057826c664472e86c91d675d007137f5", + "sha256:34422d5a69a60b7e9a07a690094e824b66f5ddc662a5fc600d65b7c174a05f04", + "sha256:370c68dc5570b394cbaadff50e64d705f64debed30573e5c313c360689b6aadc", + "sha256:3a1018e97aeb24e4f939afcd88211ace472ba566efc5bdf53fd8fd7f41fa7170", + "sha256:3d5ac5234fb5053850d79dd8eb1015cb0d7d9ed951fa37aa9e6249a19aa4f336", + "sha256:4313ab9bf6a81206c8ac28fdfcddc0435299dc88cad12cc6305fd0e78b81f9e4", + "sha256:445ca8d3c5a01309633a0c9db57150312a181146315693273e35d936472df912", + "sha256:479595a4fbe9ed8f8f72c59717e8cf222da2e4c07b6ae5b65411e6302af9708e", + "sha256:4918fd5f8b43aa7ec031e0fef1ee02deb80b6afd49c85f0790be1dc4ce34cb50", + "sha256:4aba818dcc7263852aabb172ec27b71d2abca02a593b95fa79351b2774eb1d2b", + "sha256:4e819a806420bc010489f4e741b3036071aba209f2e0989d4750b08b12a9343f", + "sha256:4facc913e10bdba42ec0aee76d029aedda628161a7ce4116b16680a0413f658a", + "sha256:549c3584993772e25f02d0656ac48abdda73169fe347263948cf2b1cead622f3", + "sha256:5c02fcd2bf45162280613d2e4a1ca3ac558ff921ae4e308ecb307650d3a6ee51", + "sha256:5f580c651a72b75c39e311343fe6875d6f58cf51c471a97f15a938d9fe4e0d37", + "sha256:62120ed0de69b3649cc68e2965376048793f466c5a6c4370fb27c16c1beac22d", + "sha256:6295004b2dd37b0835ea5c14a33e00e8cfa3c4add4d587b77287825f3418d310", + "sha256:65436dce9fdc0aeeb0a0effe0839cb3d6a05f45aa45a4d9f9c60989beca78b9c", + "sha256:684008ec44ad275832a5a152f6e764bbe1914bea10968017b6feaecdad5736e0", + "sha256:684e52023aec43bdf0250e843e1fdd6febbe831bd9d52da72333fa201aaa2335", + "sha256:6cc38067209354e16c5609b66285af17a2863a47585bcf75285cab33d4c3b8df", + "sha256:6f2f017c5be19984fbbf55f8af6caba25e62c71293213f044da3ada7091a4455", + "sha256:743deffdf3b3481da32e8a96887e2aa945ec6685af1cfe2bcc292638c9ba2f48", + "sha256:7571f19f4a3fd00af9341c7801d1ad1967fc9c3f5e62402683047e7166b9f2b4", + "sha256:7731728b6568fc286d86745f27f07266de49603a6fdc4d19c87e8c247be452af", + "sha256:785c071c982dce54d44ea0b79cd6dfafddeccdd98cfa5f7b86ef69b381b457d9", + "sha256:78fddb22b9ef810b63ef341c9fcf6455232d97cfe03938cbc29e2672c436670e", + "sha256:7bb966fdd9217e53abf824f437a5a2d643a38d4fd5fd0ca711b9da683d452969", + "sha256:7cbc5d9e8a1781e7be17da67b92580d6ce4dcef5819c1b1b89f49d9678cc278c", + "sha256:803b8905b52de78b173d3c1e83df0efb929621e7b7c5766c0843704d5332682f", + "sha256:80b696e8972b81edf0af2a259e1b2a4a661f818fae22e5fa4fa1a995fb4a40fd", + "sha256:81500ed5af2090b4a9157a59dbc89873a25c33db1bb9a8cf123837dcc9765047", + "sha256:89ec7f2c08937421bbbb8b48c54096fa4f88347946d4747021ad85f1b3021b3c", + "sha256:8ba6745440b9a27336443b0c285d705ce73adb9ec90e2f2004c64d95ab5a7598", + "sha256:8c91e1763696c0eb66340c4df98623c2d4e77d0746b8f8f2bee2c6883fd1fe18", + "sha256:8d015604ee6204e76569d2f44e5a210728fa917115bef0d102f4107e622b08d5", + "sha256:8d1f86f3f4e2388aa3310b50694ac44daefbd1681def26b4519bd050a398dc5a", + "sha256:8f83b6fd3dc3ba94d2b22717f9c8b8512354fd95221ac661784df2769ea9bba9", + "sha256:8fc6976a3395fe4d1fbeb984adaa8ec652a1e12f36b56ec8c236e5117b585427", + "sha256:904c883cf10a975b02ab3478bce652f0f5346a2c28d0a8521d97bb23c323cc8b", + "sha256:911742856ce98d879acbea33fcc03c1d8dc1106234c5e7d068932c945db209c0", + "sha256:91797b98f5e34b6a49f54be33f72e2fb658018ae532be2f79f7c63b4ae225145", + "sha256:95399831a206211d6bc40224af1c635cb8790ddd5c7493e0bd03b85711076a53", + "sha256:956b58d692f235cfbf5b4f3abd6d99bf102f161ccfe20d2fd0904f51c72c4c66", + "sha256:98c1165f3809ce7774f05cb74e5408cd3aa93ee8573ae959a97a53db3ca3180d", + "sha256:9ab40412f8cd6f615bfedea40c8bf0407d41bf83b96f6fc9ff34976d6b7037fd", + "sha256:9df1bfef97db938469ef0a7354b2d591a2d438bc497b2c489471bec0e6baf7c4", + "sha256:a01fe2305e6232ef3e8f40bfc0f0f3a04def9aab514910fa4203bafbc0bb4682", + "sha256:a70b51f55fd954d1f194271695821dd62054d949efd6368d8be64edd37f55c86", + "sha256:a7ccdd1c4a3472a7533b0a7aa9ee34c9a2bef859ba86deec07aff2ad7e0c3b94", + "sha256:b340cccad138ecb363324aa26893963dcabb02bb25e440ebdf42e30963f1a4e0", + "sha256:b74586dd0b039c62416034f811d7ee62810174bb70dffcca6439f5236249eb09", + "sha256:b9d320b3bf82a39f248769fc7f188e00f93526cc0fe739cfa197868633d44701", + "sha256:ba2336d6548dee3117520545cfe44dc28a250aa091f8281d28804aa8d707d93d", + "sha256:ba8122e3bb94ecda29a8de4cf889f600171424ea586847aa92c334772d200331", + "sha256:bd727ad276bb91928879f3aa6396c9a1d34e5e180dce40578421a691eeb77f47", + "sha256:c21fc21a4c7480479d12fd8e679b699f744f76bb05f53a1d14182b31f55aac76", + "sha256:c2d0e7cbb6341e830adcbfa2479fdeebbfbb328f11edd6b5675674e7a1e37730", + "sha256:c2ef6f7990b6e8758fe48ad08f7e2f66c8f11dc66e24093304b87cae9037bb4a", + "sha256:c4ed75ea6892a56896d78f11006161eea52c45a14994794bcfa1654430984b22", + "sha256:cccc79a9be9b64c881f18305a7c715ba199e471a3973faeb7ba84172abb3f317", + "sha256:d0800631e565c47520aaa04ae38b96abc5196fe8b4aa9bd864445bd2b5848a7a", + "sha256:d2da13568eff02b30fd54fccd1e042a70fe920d816616fda4bf54ec705668d81", + "sha256:d61ae114d2a2311f61d90c2ef1358518e8f05eafda76eaf9c772a077e0b465ec", + "sha256:d83c2bc678453646f1a18f8db1e927a2d3f4935031b9ad8a76e56760461105dd", + "sha256:dd5acc0a7d38fdc7a3a6fd3ad14c880819008ecb3379626e56b163165162cc46", + "sha256:df79012ebf6f4efb8d307b1328226aef24ca446b3ff8d0e30202d7ebcb977a8c", + "sha256:e0a2df336d1135a0b3a67f3bbf78a75f69562c1199ed9935372b82215cddd6e2", + "sha256:e2f142b45c6fed48166faeb4303b4b58c9fcd827da63f4cf0a123c3480ae11fb", + "sha256:e697e1c0238133589e00c244a8b676bc2cfc3ab4961318d902040d099fec7483", + "sha256:e757d475953269fbf4b441207bb7dbdd1c43180711b6208e129b637792ac0b93", + "sha256:e87ab229332ceb127a165612d839ab87795972102cb9830e5f12b8c9a5c1b508", + "sha256:ea355eb43b11764cf799dda62c658c4d2fdb16af41f59bb1ccfec517b60bcb07", + "sha256:ec7e0043b91115f427998febaa2beb82c82df708168b35ece3accb610b91fac1", + "sha256:eeaa0b5328b785abc344acc6241cffde50dc394a0644a968add75fcefe15b9d4", + "sha256:f2d80a6749724b37853ece57988b39c4e79d2b5fe2869a86e8aeae3bbeef9eb0", + "sha256:fa454d26f2e87ad661c4f0c5a5fe4cf6aab1e307d1b94f16ffdfcb089ba685c0", + "sha256:fb83cc090eac63c006871fd24db5e30a1f282faa46328572661c0a24a2323a08", + "sha256:fd80d1280d473500d8086d104962a82d77bfbf2b118053824b7be28cd5a79ea5" + ], + "markers": "python_version >= '3.7'", + "version": "==2024.4.16" + }, + "requests": { + "hashes": [ + "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f", + "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1" + ], + "markers": "python_version >= '3.7'", + "version": "==2.31.0" + }, + "requests-oauthlib": { + "hashes": [ + "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", + "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9" + ], + "markers": "python_version >= '3.4'", + "version": "==2.0.0" + }, + "rfc3339-validator": { + "hashes": [ + "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b", + "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa" + ], + "version": "==0.1.4" + }, + "rfc3986-validator": { + "hashes": [ + "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9", + "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055" + ], + "version": "==0.1.1" + }, + "rpds-py": { + "hashes": [ + "sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f", + "sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c", + "sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76", + "sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e", + "sha256:0ab39c1ba9023914297dd88ec3b3b3c3f33671baeb6acf82ad7ce883f6e8e157", + "sha256:0af039631b6de0397ab2ba16eaf2872e9f8fca391b44d3d8cac317860a700a3f", + "sha256:0b8612cd233543a3781bc659c731b9d607de65890085098986dfd573fc2befe5", + "sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05", + "sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24", + "sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1", + "sha256:1d9a5be316c15ffb2b3c405c4ff14448c36b4435be062a7f578ccd8b01f0c4d8", + "sha256:1df3659d26f539ac74fb3b0c481cdf9d725386e3552c6fa2974f4d33d78e544b", + "sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb", + "sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07", + "sha256:2e6d75ab12b0bbab7215e5d40f1e5b738aa539598db27ef83b2ec46747df90e1", + "sha256:30f43887bbae0d49113cbaab729a112251a940e9b274536613097ab8b4899cf6", + "sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e", + "sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e", + "sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1", + "sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab", + "sha256:3a96e0c6a41dcdba3a0a581bbf6c44bb863f27c541547fb4b9711fd8cf0ffad4", + "sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17", + "sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594", + "sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d", + "sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d", + "sha256:4457a94da0d5c53dc4b3e4de1158bdab077db23c53232f37a3cb7afdb053a4e3", + "sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c", + "sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66", + "sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f", + "sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80", + "sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33", + "sha256:5417558f6887e9b6b65b4527232553c139b57ec42c64570569b155262ac0754f", + "sha256:56a737287efecafc16f6d067c2ea0117abadcd078d58721f967952db329a3e5c", + "sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022", + "sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e", + "sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f", + "sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da", + "sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1", + "sha256:661d25cbffaf8cc42e971dd570d87cb29a665f49f4abe1f9e76be9a5182c4688", + "sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795", + "sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c", + "sha256:685537e07897f173abcf67258bee3c05c374fa6fff89d4c7e42fb391b0605e98", + "sha256:69e64831e22a6b377772e7fb337533c365085b31619005802a79242fee620bc1", + "sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20", + "sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307", + "sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4", + "sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18", + "sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294", + "sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66", + "sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467", + "sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948", + "sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e", + "sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1", + "sha256:7f2facbd386dd60cbbf1a794181e6aa0bd429bd78bfdf775436020172e2a23f0", + "sha256:84ffab12db93b5f6bad84c712c92060a2d321b35c3c9960b43d08d0f639d60d7", + "sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd", + "sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641", + "sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d", + "sha256:8f03bccbd8586e9dd37219bce4d4e0d3ab492e6b3b533e973fa08a112cb2ffc9", + "sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1", + "sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da", + "sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3", + "sha256:998e33ad22dc7ec7e030b3df701c43630b5bc0d8fbc2267653577e3fec279afa", + "sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7", + "sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40", + "sha256:9d54553c1136b50fd12cc17e5b11ad07374c316df307e4cfd6441bea5fb68496", + "sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124", + "sha256:a1ce3ba137ed54f83e56fb983a5859a27d43a40188ba798993812fed73c70836", + "sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434", + "sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984", + "sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f", + "sha256:aec493917dd45e3c69d00a8874e7cbed844efd935595ef78a0f25f14312e33c6", + "sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e", + "sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461", + "sha256:b34b7aa8b261c1dbf7720b5d6f01f38243e9b9daf7e6b8bc1fd4657000062f2c", + "sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432", + "sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73", + "sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58", + "sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88", + "sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337", + "sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7", + "sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863", + "sha256:cd5bf1af8efe569654bbef5a3e0a56eca45f87cfcffab31dd8dde70da5982475", + "sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3", + "sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51", + "sha256:d68c93e381010662ab873fea609bf6c0f428b6d0bb00f2c6939782e0818d37bf", + "sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024", + "sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40", + "sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9", + "sha256:e003b002ec72c8d5a3e3da2989c7d6065b47d9eaa70cd8808b5384fbb970f4ec", + "sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb", + "sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7", + "sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861", + "sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880", + "sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f", + "sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd", + "sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca", + "sha256:fcb25daa9219b4cf3a0ab24b0eb9a5cc8949ed4dc72acb8fa16b7e1681aa3c58", + "sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e" + ], + "markers": "python_version >= '3.8'", + "version": "==0.18.0" + }, + "runs": { + "hashes": [ + "sha256:0980dcbc25aba1505f307ac4f0e9e92cbd0be2a15a1e983ee86c24c87b839dfd", + "sha256:9dc1815e2895cfb3a48317b173b9f1eac9ba5549b36a847b5cc60c3bf82ecef1" + ], + "markers": "python_version >= '3.8'", + "version": "==1.2.2" + }, + "s3transfer": { + "hashes": [ + "sha256:5683916b4c724f799e600f41dd9e10a9ff19871bf87623cc8f491cb4f5fa0a19", + "sha256:ceb252b11bcf87080fb7850a224fb6e05c8a776bab8f2b64b7f25b969464839d" + ], + "markers": "python_version >= '3.8'", + "version": "==0.10.1" + }, + "sacrebleu": { + "hashes": [ + "sha256:1acae0221e27c23c4987834fd17b284b4addc6556941c2097c4d618baa2d67af", + "sha256:51fb69b6683f1b9999cd180143bb6b21d7841744537c9aab235cfe676550f0cf" + ], + "index": "pypi", + "markers": "python_version >= '3.6'", + "version": "==2.0.0" + }, + "safetensors": { + "hashes": [ + "sha256:018b691383026a2436a22b648873ed11444a364324e7088b99cd2503dd828400", + "sha256:01e4b22e3284cd866edeabe4f4d896229495da457229408d2e1e4810c5187121", + "sha256:01feb3089e5932d7e662eda77c3ecc389f97c0883c4a12b5cfdc32b589a811c3", + "sha256:02318f01e332cc23ffb4f6716e05a492c5f18b1d13e343c49265149396284a44", + "sha256:02ef3a24face643456020536591fbd3c717c5abaa2737ec428ccbbc86dffa7a4", + "sha256:03a4447c784917c9bf01d8f2ac5080bc15c41692202cd5f406afba16629e84d6", + "sha256:084fc436e317f83f7071fc6a62ca1c513b2103db325cd09952914b50f51cf78f", + "sha256:0bf4f9d6323d9f86eef5567eabd88f070691cf031d4c0df27a40d3b4aaee755b", + "sha256:0d52c958dc210265157573f81d34adf54e255bc2b59ded6218500c9b15a750eb", + "sha256:0d5ffc6a80f715c30af253e0e288ad1cd97a3d0086c9c87995e5093ebc075e50", + "sha256:0d9cd8e1560dfc514b6d7859247dc6a86ad2f83151a62c577428d5102d872721", + "sha256:0dd37306546b58d3043eb044c8103a02792cc024b51d1dd16bd3dd1f334cb3ed", + "sha256:1139eb436fd201c133d03c81209d39ac57e129f5e74e34bb9ab60f8d9b726270", + "sha256:19bbdf95de2cf64f25cd614c5236c8b06eb2cfa47cbf64311f4b5d80224623a3", + "sha256:1ab6527a20586d94291c96e00a668fa03f86189b8a9defa2cdd34a1a01acc7d5", + "sha256:1b89381517891a7bb7d1405d828b2bf5d75528299f8231e9346b8eba092227f9", + "sha256:1f598b713cc1a4eb31d3b3203557ac308acf21c8f41104cdd74bf640c6e538e3", + "sha256:22d21760dc6ebae42e9c058d75aa9907d9f35e38f896e3c69ba0e7b213033856", + "sha256:22f3b5d65e440cec0de8edaa672efa888030802e11c09b3d6203bff60ebff05a", + "sha256:2a0deb16a1d3ea90c244ceb42d2c6c276059616be21a19ac7101aa97da448faf", + "sha256:2a1f4430cc0c9d6afa01214a4b3919d0a029637df8e09675ceef1ca3f0dfa0df", + "sha256:2d603846a8585b9432a0fd415db1d4c57c0f860eb4aea21f92559ff9902bae4d", + "sha256:2f85fc50c4e07a21e95c24e07460fe6f7e2859d0ce88092838352b798ce711c2", + "sha256:309b10dbcab63269ecbf0e2ca10ce59223bb756ca5d431ce9c9eeabd446569da", + "sha256:3615a96dd2dcc30eb66d82bc76cda2565f4f7bfa89fcb0e31ba3cea8a1a9ecbb", + "sha256:38e2a8666178224a51cca61d3cb4c88704f696eac8f72a49a598a93bbd8a4af9", + "sha256:393e6e391467d1b2b829c77e47d726f3b9b93630e6a045b1d1fca67dc78bf632", + "sha256:3f9cdca09052f585e62328c1c2923c70f46814715c795be65f0b93f57ec98a02", + "sha256:41a727a7f5e6ad9f1db6951adee21bbdadc632363d79dc434876369a17de6ad6", + "sha256:420a98f593ff9930f5822560d14c395ccbc57342ddff3b463bc0b3d6b1951550", + "sha256:446e9fe52c051aeab12aac63d1017e0f68a02a92a027b901c4f8e931b24e5397", + "sha256:455d538aa1aae4a8b279344a08136d3f16334247907b18a5c3c7fa88ef0d3c46", + "sha256:4f9bac020faba7f5dc481e881b14b6425265feabb5bfc552551d21189c0eddc3", + "sha256:53c4879b9c6bd7cd25d114ee0ef95420e2812e676314300624594940a8d6a91f", + "sha256:5757e4688f20df083e233b47de43845d1adb7e17b6cf7da5f8444416fc53828d", + "sha256:585c9ae13a205807b63bef8a37994f30c917ff800ab8a1ca9c9b5d73024f97ee", + "sha256:5d07cbca5b99babb692d76d8151bec46f461f8ad8daafbfd96b2fca40cadae65", + "sha256:5fc6775529fb9f0ce2266edd3e5d3f10aab068e49f765e11f6f2a63b5367021d", + "sha256:622afd28968ef3e9786562d352659a37de4481a4070f4ebac883f98c5836563e", + "sha256:6f9568f380f513a60139971169c4a358b8731509cc19112369902eddb33faa4d", + "sha256:70a5319ef409e7f88686a46607cbc3c428271069d8b770076feaf913664a07ac", + "sha256:74707624b81f1b7f2b93f5619d4a9f00934d5948005a03f2c1845ffbfff42212", + "sha256:7c4fa560ebd4522adddb71dcd25d09bf211b5634003f015a4b815b7647d62ebe", + "sha256:7de32d0d34b6623bb56ca278f90db081f85fb9c5d327e3c18fd23ac64f465768", + "sha256:840b7ac0eff5633e1d053cc9db12fdf56b566e9403b4950b2dc85393d9b88d67", + "sha256:840caf38d86aa7014fe37ade5d0d84e23dcfbc798b8078015831996ecbc206a3", + "sha256:8651c7299cbd8b4161a36cd6a322fa07d39cd23535b144d02f1c1972d0c62f3c", + "sha256:868ad1b6fc41209ab6bd12f63923e8baeb1a086814cb2e81a65ed3d497e0cf8f", + "sha256:88887f69f7a00cf02b954cdc3034ffb383b2303bc0ab481d4716e2da51ddc10e", + "sha256:89f9f17b0dacb913ed87d57afbc8aad85ea42c1085bd5de2f20d83d13e9fc4b2", + "sha256:8c496c5401c1b9c46d41a7688e8ff5b0310a3b9bae31ce0f0ae870e1ea2b8caf", + "sha256:8cf18888606dad030455d18f6c381720e57fc6a4170ee1966adb7ebc98d4d6a3", + "sha256:8d22c1a10dff3f64d0d68abb8298a3fd88ccff79f408a3e15b3e7f637ef5c980", + "sha256:90964917f5b0fa0fa07e9a051fbef100250c04d150b7026ccbf87a34a54012e0", + "sha256:9bfb92f82574d9e58401d79c70c716985dc049b635fef6eecbb024c79b2c46ad", + "sha256:9c6ad011c1b4e3acff058d6b090f1da8e55a332fbf84695cf3100c649cc452d1", + "sha256:a11c374eb63a9c16c5ed146457241182f310902bd2a9c18255781bb832b6748b", + "sha256:a7cef55929dcbef24af3eb40bedec35d82c3c2fa46338bb13ecf3c5720af8a61", + "sha256:a844cdb5d7cbc22f5f16c7e2a0271170750763c4db08381b7f696dbd2c78a361", + "sha256:ae7613a119a71a497d012ccc83775c308b9c1dab454806291427f84397d852fd", + "sha256:b1648568667f820b8c48317c7006221dc40aced1869908c187f493838a1362bc", + "sha256:b1e31be7945f66be23f4ec1682bb47faa3df34cb89fc68527de6554d3c4258a4", + "sha256:b277482120df46e27a58082df06a15aebda4481e30a1c21eefd0921ae7e03f65", + "sha256:b7ffba80aa49bd09195145a7fd233a7781173b422eeb995096f2b30591639517", + "sha256:b852e47eb08475c2c1bd8131207b405793bfc20d6f45aff893d3baaad449ed14", + "sha256:bb4f8c5d0358a31e9a08daeebb68f5e161cdd4018855426d3f0c23bb51087055", + "sha256:bbae3b4b9d997971431c346edbfe6e41e98424a097860ee872721e176040a893", + "sha256:befdf0167ad626f22f6aac6163477fcefa342224a22f11fdd05abb3995c1783c", + "sha256:c0acbe31340ab150423347e5b9cc595867d814244ac14218932a5cf1dd38eb39", + "sha256:c41e1893d1206aa7054029681778d9a58b3529d4c807002c156d58426c225173", + "sha256:c59d51f182c729f47e841510b70b967b0752039f79f1de23bcdd86462a9b09ee", + "sha256:cd6fff9e56df398abc5866b19a32124815b656613c1c5ec0f9350906fd798aac", + "sha256:cdd0a3b5da66e7f377474599814dbf5cbf135ff059cc73694de129b58a5e8a2c", + "sha256:cf476bca34e1340ee3294ef13e2c625833f83d096cfdf69a5342475602004f95", + "sha256:d0dd4a1db09db2dba0f94d15addc7e7cd3a7b0d393aa4c7518c39ae7374623c3", + "sha256:d1456f814655b224d4bf6e7915c51ce74e389b413be791203092b7ff78c936dd", + "sha256:d14d30c25897b2bf19b6fb5ff7e26cc40006ad53fd4a88244fdf26517d852dd7", + "sha256:d244bcafeb1bc06d47cfee71727e775bca88a8efda77a13e7306aae3813fa7e4", + "sha256:d8815b5e1dac85fc534a97fd339e12404db557878c090f90442247e87c8aeaea", + "sha256:d88b33980222085dd6001ae2cad87c6068e0991d4f5ccf44975d216db3b57376", + "sha256:d8c5093206ef4b198600ae484230402af6713dab1bd5b8e231905d754022bec7", + "sha256:d9c289f140a9ae4853fc2236a2ffc9a9f2d5eae0cb673167e0f1b8c18c0961ac", + "sha256:dcf5705cab159ce0130cd56057f5f3425023c407e170bca60b4868048bae64fd", + "sha256:e011cc162503c19f4b1fd63dfcddf73739c7a243a17dac09b78e57a00983ab35", + "sha256:e066e8861eef6387b7c772344d1fe1f9a72800e04ee9a54239d460c400c72aab", + "sha256:e0b2104df1579d6ba9052c0ae0e3137c9698b2d85b0645507e6fd1813b70931a", + "sha256:e375d975159ac534c7161269de24ddcd490df2157b55c1a6eeace6cbb56903f0", + "sha256:e4119532cd10dba04b423e0f86aecb96cfa5a602238c0aa012f70c3a40c44b50", + "sha256:e7dbbde64b6c534548696808a0e01276d28ea5773bc9a2dfb97a88cd3dffe3df", + "sha256:e9afd5358719f1b2cf425fad638fc3c887997d6782da317096877e5b15b2ce93", + "sha256:ec4b52ce9a396260eb9731eb6aea41a7320de22ed73a1042c2230af0212758ce", + "sha256:edb5698a7bc282089f64c96c477846950358a46ede85a1c040e0230344fdde10", + "sha256:ee463219d9ec6c2be1d331ab13a8e0cd50d2f32240a81d498266d77d07b7e71e", + "sha256:efcc860be094b8d19ac61b452ec635c7acb9afa77beb218b1d7784c6d41fe8ad", + "sha256:f5e6883af9a68c0028f70a4c19d5a6ab6238a379be36ad300a22318316c00cb0", + "sha256:f9650713b2cfa9537a2baf7dd9fee458b24a0aaaa6cafcea8bdd5fb2b8efdc34", + "sha256:faefeb3b81bdfb4e5a55b9bbdf3d8d8753f65506e1d67d03f5c851a6c87150e9", + "sha256:fb9c65bd82f9ef3ce4970dc19ee86be5f6f93d032159acf35e663c6bea02b237", + "sha256:fe746d03ed8d193674a26105e4f0fe6c726f5bb602ffc695b409eaf02f04763d", + "sha256:fef5d70683643618244a4f5221053567ca3e77c2531e42ad48ae05fae909f542" + ], + "markers": "python_version >= '3.7'", + "version": "==0.4.3" + }, + "scikit-learn": { + "hashes": [ + "sha256:1d0b25d9c651fd050555aadd57431b53d4cf664e749069da77f3d52c5ad14b3b", + "sha256:36f0ea5d0f693cb247a073d21a4123bdf4172e470e6d163c12b74cbb1536cf38", + "sha256:426d258fddac674fdf33f3cb2d54d26f49406e2599dbf9a32b4d1696091d4256", + "sha256:44c62f2b124848a28fd695db5bc4da019287abf390bfce602ddc8aa1ec186aae", + "sha256:45dee87ac5309bb82e3ea633955030df9bbcb8d2cdb30383c6cd483691c546cc", + "sha256:49d64ef6cb8c093d883e5a36c4766548d974898d378e395ba41a806d0e824db8", + "sha256:5460a1a5b043ae5ae4596b3126a4ec33ccba1b51e7ca2c5d36dac2169f62ab1d", + "sha256:5cd7b524115499b18b63f0c96f4224eb885564937a0b3477531b2b63ce331904", + "sha256:671e2f0c3f2c15409dae4f282a3a619601fa824d2c820e5b608d9d775f91780c", + "sha256:68b8404841f944a4a1459b07198fa2edd41a82f189b44f3e1d55c104dbc2e40c", + "sha256:81bf5d8bbe87643103334032dd82f7419bc8c8d02a763643a6b9a5c7288c5054", + "sha256:8539a41b3d6d1af82eb629f9c57f37428ff1481c1e34dddb3b9d7af8ede67ac5", + "sha256:87440e2e188c87db80ea4023440923dccbd56fbc2d557b18ced00fef79da0727", + "sha256:90378e1747949f90c8f385898fff35d73193dfcaec3dd75d6b542f90c4e89755", + "sha256:b0203c368058ab92efc6168a1507d388d41469c873e96ec220ca8e74079bf62e", + "sha256:c97a50b05c194be9146d61fe87dbf8eac62b203d9e87a3ccc6ae9aed2dfaf361", + "sha256:d36d0bc983336bbc1be22f9b686b50c964f593c8a9a913a792442af9bf4f5e68", + "sha256:d762070980c17ba3e9a4a1e043ba0518ce4c55152032f1af0ca6f39b376b5928", + "sha256:d9993d5e78a8148b1d0fdf5b15ed92452af5581734129998c26f481c46586d68", + "sha256:daa1c471d95bad080c6e44b4946c9390a4842adc3082572c20e4f8884e39e959", + "sha256:ff4effe5a1d4e8fed260a83a163f7dbf4f6087b54528d8880bab1d1377bd78be" + ], + "index": "pypi", + "markers": "python_version >= '3.9'", + "version": "==1.4.2" + }, + "scipy": { + "hashes": [ + "sha256:05f1432ba070e90d42d7fd836462c50bf98bd08bed0aa616c359eed8a04e3922", + "sha256:09c74543c4fbeb67af6ce457f6a6a28e5d3739a87f62412e4a16e46f164f0ae5", + "sha256:0fbcf8abaf5aa2dc8d6400566c1a727aed338b5fe880cde64907596a89d576fa", + "sha256:109d391d720fcebf2fbe008621952b08e52907cf4c8c7efc7376822151820820", + "sha256:1d2f7bb14c178f8b13ebae93f67e42b0a6b0fc50eba1cd8021c9b6e08e8fb1cd", + "sha256:1e7626dfd91cdea5714f343ce1176b6c4745155d234f1033584154f60ef1ff42", + "sha256:22789b56a999265431c417d462e5b7f2b487e831ca7bef5edeb56efe4c93f86e", + "sha256:28e286bf9ac422d6beb559bc61312c348ca9b0f0dae0d7c5afde7f722d6ea13d", + "sha256:33fde20efc380bd23a78a4d26d59fc8704e9b5fd9b08841693eb46716ba13d86", + "sha256:45c08bec71d3546d606989ba6e7daa6f0992918171e2a6f7fbedfa7361c2de1e", + "sha256:4dca18c3ffee287ddd3bc8f1dabaf45f5305c5afc9f8ab9cbfab855e70b2df5c", + "sha256:5407708195cb38d70fd2d6bb04b1b9dd5c92297d86e9f9daae1576bd9e06f602", + "sha256:58569af537ea29d3f78e5abd18398459f195546bb3be23d16677fb26616cc11e", + "sha256:5e4a756355522eb60fcd61f8372ac2549073c8788f6114449b37e9e8104f15a5", + "sha256:6bf9fe63e7a4bf01d3645b13ff2aa6dea023d38993f42aaac81a18b1bda7a82a", + "sha256:8930ae3ea371d6b91c203b1032b9600d69c568e537b7988a3073dfe4d4774f21", + "sha256:9ff7dad5d24a8045d836671e082a490848e8639cabb3dbdacb29f943a678683d", + "sha256:a2f471de4d01200718b2b8927f7d76b5d9bde18047ea0fa8bd15c5ba3f26a1d6", + "sha256:ac38c4c92951ac0f729c4c48c9e13eb3675d9986cc0c83943784d7390d540c78", + "sha256:b2a3ff461ec4756b7e8e42e1c681077349a038f0686132d623fa404c0bee2551", + "sha256:b5acd8e1dbd8dbe38d0004b1497019b2dbbc3d70691e65d69615f8a7292865d7", + "sha256:b8434f6f3fa49f631fae84afee424e2483289dfc30a47755b4b4e6b07b2633a4", + "sha256:ba419578ab343a4e0a77c0ef82f088238a93eef141b2b8017e46149776dfad4d", + "sha256:d0de696f589681c2802f9090fff730c218f7c51ff49bf252b6a97ec4a5d19e8b", + "sha256:dcbb9ea49b0167de4167c40eeee6e167caeef11effb0670b554d10b1e693a8b9" + ], + "markers": "python_version >= '3.9'", + "version": "==1.13.0" + }, + "secretstorage": { + "hashes": [ + "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77", + "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99" + ], + "markers": "sys_platform == 'linux'", + "version": "==3.3.3" + }, + "sentencepiece": { + "hashes": [ + "sha256:004e6a621d4bc88978eecb6ea7959264239a17b70f2cbc348033d8195c9808ec", + "sha256:019e7535108e309dae2b253a75834fc3128240aa87c00eb80732078cdc182588", + "sha256:0b0f55d0a0ee1719b4b04221fe0c9f0c3461dc3dabd77a035fa2f4788eb3ef9a", + "sha256:0eaf3591dd0690a87f44f4df129cf8d05d8a4029b5b6709b489b8e27f9a9bcff", + "sha256:0eb528e70571b7c02723e5804322469b82fe7ea418c96051d0286c0fa028db73", + "sha256:14b0eccb7b641d4591c3e12ae44cab537d68352e4d3b6424944f0c447d2348d5", + "sha256:189c48f5cb2949288f97ccdb97f0473098d9c3dcf5a3d99d4eabe719ec27297f", + "sha256:18e800f206cd235dc27dc749299e05853a4e4332e8d3dfd81bf13d0e5b9007d9", + "sha256:27b866b5bd3ddd54166bbcbf5c8d7dd2e0b397fac8537991c7f544220b1f67bc", + "sha256:2ae1c40cda8f9d5b0423cfa98542735c0235e7597d79caf318855cdf971b2280", + "sha256:2d95e19168875b70df62916eb55428a0cbcb834ac51d5a7e664eda74def9e1e0", + "sha256:33e6f690a1caebb4867a2e367afa1918ad35be257ecdb3455d2bbd787936f155", + "sha256:350e5c74d739973f1c9643edb80f7cc904dc948578bcb1d43c6f2b173e5d18dd", + "sha256:38efeda9bbfb55052d482a009c6a37e52f42ebffcea9d3a98a61de7aee356a28", + "sha256:445b0ec381af1cd4eef95243e7180c63d9c384443c16c4c47a28196bd1cda937", + "sha256:47c378146928690d1bc106fdf0da768cebd03b65dd8405aa3dd88f9c81e35dba", + "sha256:57efcc2d51caff20d9573567d9fd3f854d9efe613ed58a439c78c9f93101384a", + "sha256:62e24c81e74bd87a6e0d63c51beb6527e4c0add67e1a17bac18bcd2076afcfeb", + "sha256:6a904c46197993bd1e95b93a6e373dca2f170379d64441041e2e628ad4afb16f", + "sha256:6c030b081dc1e1bcc9fadc314b19b740715d3d566ad73a482da20d7d46fd444c", + "sha256:6d3c56f24183a1e8bd61043ff2c58dfecdc68a5dd8955dc13bab83afd5f76b81", + "sha256:77d7fafb2c4e4659cbdf303929503f37a26eabc4ff31d3a79bf1c5a1b338caa7", + "sha256:84dbe53e02e4f8a2e45d2ac3e430d5c83182142658e25edd76539b7648928727", + "sha256:85b476406da69c70586f0bb682fcca4c9b40e5059814f2db92303ea4585c650c", + "sha256:8a1abff4d1ff81c77cac3cc6fefa34fa4b8b371e5ee51cb7e8d1ebc996d05983", + "sha256:8a321866c2f85da7beac74a824b4ad6ddc2a4c9bccd9382529506d48f744a12c", + "sha256:9832f08bb372d4c8b567612f8eab9e36e268dff645f1c28f9f8e851be705f6d1", + "sha256:9ba142e7a90dd6d823c44f9870abdad45e6c63958eb60fe44cca6828d3b69da2", + "sha256:a2a0260cd1fb7bd8b4d4f39dc2444a8d5fd4e0a0c4d5c899810ef1abf99b2d45", + "sha256:b133e8a499eac49c581c3c76e9bdd08c338cc1939e441fee6f92c0ccb5f1f8be", + "sha256:b7b1a9ae4d7c6f1f867e63370cca25cc17b6f4886729595b885ee07a58d3cec3", + "sha256:baed1a26464998f9710d20e52607c29ffd4293e7c71c6a1f83f51ad0911ec12c", + "sha256:be9cf5b9e404c245aeb3d3723c737ba7a8f5d4ba262ef233a431fa6c45f732a0", + "sha256:c42f753bcfb7661c122a15b20be7f684b61fc8592c89c870adf52382ea72262d", + "sha256:c6890ea0f2b4703f62d0bf27932e35808b1f679bdb05c7eeb3812b935ba02001", + "sha256:c84ce33af12ca222d14a1cdd37bd76a69401e32bc68fe61c67ef6b59402f4ab8", + "sha256:c8843d23a0f686d85e569bd6dcd0dd0e0cbc03731e63497ca6d5bacd18df8b85", + "sha256:cfbcfe13c69d3f87b7fcd5da168df7290a6d006329be71f90ba4f56bc77f8561", + "sha256:d0f644c9d4d35c096a538507b2163e6191512460035bf51358794a78515b74f7", + "sha256:d89adf59854741c0d465f0e1525b388c0d174f611cc04af54153c5c4f36088c4", + "sha256:db361e03342c41680afae5807590bc88aa0e17cfd1a42696a160e4005fcda03b", + "sha256:ed6ea1819fd612c989999e44a51bf556d0ef6abfb553080b9be3d347e18bcfb7", + "sha256:f90d73a6f81248a909f55d8e6ef56fec32d559e1e9af045f0b0322637cb8e5c7", + "sha256:fa16a830416bb823fa2a52cbdd474d1f7f3bba527fd2304fb4b140dad31bb9bc", + "sha256:fb71af492b0eefbf9f2501bec97bcd043b6812ab000d119eaf4bd33f9e283d03" + ], + "index": "pypi", + "version": "==0.1.99" + }, + "setuptools": { + "hashes": [ + "sha256:6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987", + "sha256:c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32" + ], + "markers": "python_version >= '3.8'", + "version": "==69.5.1" + }, + "simplejson": { + "hashes": [ + "sha256:0405984f3ec1d3f8777c4adc33eac7ab7a3e629f3b1c05fdded63acc7cf01137", + "sha256:0436a70d8eb42bea4fe1a1c32d371d9bb3b62c637969cb33970ad624d5a3336a", + "sha256:061e81ea2d62671fa9dea2c2bfbc1eec2617ae7651e366c7b4a2baf0a8c72cae", + "sha256:064300a4ea17d1cd9ea1706aa0590dcb3be81112aac30233823ee494f02cb78a", + "sha256:08889f2f597ae965284d7b52a5c3928653a9406d88c93e3161180f0abc2433ba", + "sha256:0a48679310e1dd5c9f03481799311a65d343748fe86850b7fb41df4e2c00c087", + "sha256:0b0a3eb6dd39cce23801a50c01a0976971498da49bc8a0590ce311492b82c44b", + "sha256:0d2d5119b1d7a1ed286b8af37357116072fc96700bce3bec5bb81b2e7057ab41", + "sha256:0d551dc931638e2102b8549836a1632e6e7cf620af3d093a7456aa642bff601d", + "sha256:1018bd0d70ce85f165185d2227c71e3b1e446186f9fa9f971b69eee223e1e3cd", + "sha256:11c39fbc4280d7420684494373b7c5904fa72a2b48ef543a56c2d412999c9e5d", + "sha256:11cc3afd8160d44582543838b7e4f9aa5e97865322844b75d51bf4e0e413bb3e", + "sha256:1537b3dd62d8aae644f3518c407aa8469e3fd0f179cdf86c5992792713ed717a", + "sha256:16ca9c90da4b1f50f089e14485db8c20cbfff2d55424062791a7392b5a9b3ff9", + "sha256:176a1b524a3bd3314ed47029a86d02d5a95cc0bee15bd3063a1e1ec62b947de6", + "sha256:18955c1da6fc39d957adfa346f75226246b6569e096ac9e40f67d102278c3bcb", + "sha256:1bb5b50dc6dd671eb46a605a3e2eb98deb4a9af787a08fcdddabe5d824bb9664", + "sha256:1c768e7584c45094dca4b334af361e43b0aaa4844c04945ac7d43379eeda9bc2", + "sha256:1dd4f692304854352c3e396e9b5f0a9c9e666868dd0bdc784e2ac4c93092d87b", + "sha256:25785d038281cd106c0d91a68b9930049b6464288cea59ba95b35ee37c2d23a5", + "sha256:287e39ba24e141b046812c880f4619d0ca9e617235d74abc27267194fc0c7835", + "sha256:2c1467d939932901a97ba4f979e8f2642415fcf02ea12f53a4e3206c9c03bc17", + "sha256:2c433a412e96afb9a3ce36fa96c8e61a757af53e9c9192c97392f72871e18e69", + "sha256:2d022b14d7758bfb98405672953fe5c202ea8a9ccf9f6713c5bd0718eba286fd", + "sha256:2f98d918f7f3aaf4b91f2b08c0c92b1774aea113334f7cde4fe40e777114dbe6", + "sha256:2fc697be37585eded0c8581c4788fcfac0e3f84ca635b73a5bf360e28c8ea1a2", + "sha256:3194cd0d2c959062b94094c0a9f8780ffd38417a5322450a0db0ca1a23e7fbd2", + "sha256:332c848f02d71a649272b3f1feccacb7e4f7e6de4a2e6dc70a32645326f3d428", + "sha256:346820ae96aa90c7d52653539a57766f10f33dd4be609206c001432b59ddf89f", + "sha256:3471e95110dcaf901db16063b2e40fb394f8a9e99b3fe9ee3acc6f6ef72183a2", + "sha256:3848427b65e31bea2c11f521b6fc7a3145d6e501a1038529da2391aff5970f2f", + "sha256:39b6d79f5cbfa3eb63a869639cfacf7c41d753c64f7801efc72692c1b2637ac7", + "sha256:3e74355cb47e0cd399ead3477e29e2f50e1540952c22fb3504dda0184fc9819f", + "sha256:3f39bb1f6e620f3e158c8b2eaf1b3e3e54408baca96a02fe891794705e788637", + "sha256:40847f617287a38623507d08cbcb75d51cf9d4f9551dd6321df40215128325a3", + "sha256:4280e460e51f86ad76dc456acdbfa9513bdf329556ffc8c49e0200878ca57816", + "sha256:445a96543948c011a3a47c8e0f9d61e9785df2544ea5be5ab3bc2be4bd8a2565", + "sha256:4969d974d9db826a2c07671273e6b27bc48e940738d768fa8f33b577f0978378", + "sha256:49aaf4546f6023c44d7e7136be84a03a4237f0b2b5fb2b17c3e3770a758fc1a0", + "sha256:49e0e3faf3070abdf71a5c80a97c1afc059b4f45a5aa62de0c2ca0444b51669b", + "sha256:49f9da0d6cd17b600a178439d7d2d57c5ef01f816b1e0e875e8e8b3b42db2693", + "sha256:4a8c3cc4f9dfc33220246760358c8265dad6e1104f25f0077bbca692d616d358", + "sha256:4d36081c0b1c12ea0ed62c202046dca11438bee48dd5240b7c8de8da62c620e9", + "sha256:4edcd0bf70087b244ba77038db23cd98a1ace2f91b4a3ecef22036314d77ac23", + "sha256:554313db34d63eac3b3f42986aa9efddd1a481169c12b7be1e7512edebff8eaf", + "sha256:5675e9d8eeef0aa06093c1ff898413ade042d73dc920a03e8cea2fb68f62445a", + "sha256:60848ab779195b72382841fc3fa4f71698a98d9589b0a081a9399904487b5832", + "sha256:66e5dc13bfb17cd6ee764fc96ccafd6e405daa846a42baab81f4c60e15650414", + "sha256:6779105d2fcb7fcf794a6a2a233787f6bbd4731227333a072d8513b252ed374f", + "sha256:6ad331349b0b9ca6da86064a3599c425c7a21cd41616e175ddba0866da32df48", + "sha256:6f0a0b41dd05eefab547576bed0cf066595f3b20b083956b1405a6f17d1be6ad", + "sha256:73a8a4653f2e809049999d63530180d7b5a344b23a793502413ad1ecea9a0290", + "sha256:778331444917108fa8441f59af45886270d33ce8a23bfc4f9b192c0b2ecef1b3", + "sha256:7cb98be113911cb0ad09e5523d0e2a926c09a465c9abb0784c9269efe4f95917", + "sha256:7d74beca677623481810c7052926365d5f07393c72cbf62d6cce29991b676402", + "sha256:7f2398361508c560d0bf1773af19e9fe644e218f2a814a02210ac2c97ad70db0", + "sha256:8434dcdd347459f9fd9c526117c01fe7ca7b016b6008dddc3c13471098f4f0dc", + "sha256:8a390e56a7963e3946ff2049ee1eb218380e87c8a0e7608f7f8790ba19390867", + "sha256:92c4a4a2b1f4846cd4364855cbac83efc48ff5a7d7c06ba014c792dd96483f6f", + "sha256:9300aee2a8b5992d0f4293d88deb59c218989833e3396c824b69ba330d04a589", + "sha256:9453419ea2ab9b21d925d0fd7e3a132a178a191881fab4169b6f96e118cc25bb", + "sha256:9652e59c022e62a5b58a6f9948b104e5bb96d3b06940c6482588176f40f4914b", + "sha256:972a7833d4a1fcf7a711c939e315721a88b988553fc770a5b6a5a64bd6ebeba3", + "sha256:9c1a4393242e321e344213a90a1e3bf35d2f624aa8b8f6174d43e3c6b0e8f6eb", + "sha256:9e038c615b3906df4c3be8db16b3e24821d26c55177638ea47b3f8f73615111c", + "sha256:9e4c166f743bb42c5fcc60760fb1c3623e8fda94f6619534217b083e08644b46", + "sha256:9eb117db8d7ed733a7317c4215c35993b815bf6aeab67523f1f11e108c040672", + "sha256:9eb442a2442ce417801c912df68e1f6ccfcd41577ae7274953ab3ad24ef7d82c", + "sha256:a3cd18e03b0ee54ea4319cdcce48357719ea487b53f92a469ba8ca8e39df285e", + "sha256:a8617625369d2d03766413bff9e64310feafc9fc4f0ad2b902136f1a5cd8c6b0", + "sha256:a970a2e6d5281d56cacf3dc82081c95c1f4da5a559e52469287457811db6a79b", + "sha256:aad7405c033d32c751d98d3a65801e2797ae77fac284a539f6c3a3e13005edc4", + "sha256:adcb3332979cbc941b8fff07181f06d2b608625edc0a4d8bc3ffc0be414ad0c4", + "sha256:af9c7e6669c4d0ad7362f79cb2ab6784d71147503e62b57e3d95c4a0f222c01c", + "sha256:b01fda3e95d07a6148702a641e5e293b6da7863f8bc9b967f62db9461330562c", + "sha256:b8d940fd28eb34a7084877747a60873956893e377f15a32ad445fe66c972c3b8", + "sha256:bccb3e88ec26ffa90f72229f983d3a5d1155e41a1171190fa723d4135523585b", + "sha256:bcedf4cae0d47839fee7de344f96b5694ca53c786f28b5f773d4f0b265a159eb", + "sha256:be893258d5b68dd3a8cba8deb35dc6411db844a9d35268a8d3793b9d9a256f80", + "sha256:c0521e0f07cb56415fdb3aae0bbd8701eb31a9dfef47bb57206075a0584ab2a2", + "sha256:c594642d6b13d225e10df5c16ee15b3398e21a35ecd6aee824f107a625690374", + "sha256:c87c22bd6a987aca976e3d3e23806d17f65426191db36d40da4ae16a6a494cbc", + "sha256:c9ac1c2678abf9270e7228133e5b77c6c3c930ad33a3c1dfbdd76ff2c33b7b50", + "sha256:d0e5ffc763678d48ecc8da836f2ae2dd1b6eb2d27a48671066f91694e575173c", + "sha256:d0f402e787e6e7ee7876c8b05e2fe6464820d9f35ba3f172e95b5f8b699f6c7f", + "sha256:d222a9ed082cd9f38b58923775152003765016342a12f08f8c123bf893461f28", + "sha256:d94245caa3c61f760c4ce4953cfa76e7739b6f2cbfc94cc46fff6c050c2390c5", + "sha256:de9a2792612ec6def556d1dc621fd6b2073aff015d64fba9f3e53349ad292734", + "sha256:e2f5a398b5e77bb01b23d92872255e1bcb3c0c719a3be40b8df146570fe7781a", + "sha256:e8dd53a8706b15bc0e34f00e6150fbefb35d2fd9235d095b4f83b3c5ed4fa11d", + "sha256:e9eb3cff1b7d71aa50c89a0536f469cb8d6dcdd585d8f14fb8500d822f3bdee4", + "sha256:ed628c1431100b0b65387419551e822987396bee3c088a15d68446d92f554e0c", + "sha256:ef7938a78447174e2616be223f496ddccdbf7854f7bf2ce716dbccd958cc7d13", + "sha256:f1c70249b15e4ce1a7d5340c97670a95f305ca79f376887759b43bb33288c973", + "sha256:f3c7363a8cb8c5238878ec96c5eb0fc5ca2cb11fc0c7d2379863d342c6ee367a", + "sha256:fbbcc6b0639aa09b9649f36f1bcb347b19403fe44109948392fbb5ea69e48c3e", + "sha256:febffa5b1eda6622d44b245b0685aff6fb555ce0ed734e2d7b1c3acd018a2cff", + "sha256:ff836cd4041e16003549449cc0a5e372f6b6f871eb89007ab0ee18fb2800fded" + ], + "markers": "python_version >= '2.5' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==3.19.2" + }, + "six": { + "hashes": [ + "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", + "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.16.0" + }, + "smmap": { + "hashes": [ + "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62", + "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da" + ], + "markers": "python_version >= '3.7'", + "version": "==5.0.1" + }, + "sniffio": { + "hashes": [ + "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", + "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc" + ], + "markers": "python_version >= '3.7'", + "version": "==1.3.1" + }, + "soupsieve": { + "hashes": [ + "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690", + "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7" + ], + "markers": "python_version >= '3.8'", + "version": "==2.5" + }, + "sqlalchemy": { + "hashes": [ + "sha256:01d10638a37460616708062a40c7b55f73e4d35eaa146781c683e0fa7f6c43fb", + "sha256:04c487305ab035a9548f573763915189fc0fe0824d9ba28433196f8436f1449c", + "sha256:0dfefdb3e54cd15f5d56fd5ae32f1da2d95d78319c1f6dfb9bcd0eb15d603d5d", + "sha256:0f3ca96af060a5250a8ad5a63699180bc780c2edf8abf96c58af175921df847a", + "sha256:205f5a2b39d7c380cbc3b5dcc8f2762fb5bcb716838e2d26ccbc54330775b003", + "sha256:25664e18bef6dc45015b08f99c63952a53a0a61f61f2e48a9e70cec27e55f699", + "sha256:296195df68326a48385e7a96e877bc19aa210e485fa381c5246bc0234c36c78e", + "sha256:2a0732dffe32333211801b28339d2a0babc1971bc90a983e3035e7b0d6f06b93", + "sha256:3071ad498896907a5ef756206b9dc750f8e57352113c19272bdfdc429c7bd7de", + "sha256:308ef9cb41d099099fffc9d35781638986870b29f744382904bf9c7dadd08513", + "sha256:334184d1ab8f4c87f9652b048af3f7abea1c809dfe526fb0435348a6fef3d380", + "sha256:38b624e5cf02a69b113c8047cf7f66b5dfe4a2ca07ff8b8716da4f1b3ae81567", + "sha256:471fcb39c6adf37f820350c28aac4a7df9d3940c6548b624a642852e727ea586", + "sha256:4c142852ae192e9fe5aad5c350ea6befe9db14370b34047e1f0f7cf99e63c63b", + "sha256:4f6d971255d9ddbd3189e2e79d743ff4845c07f0633adfd1de3f63d930dbe673", + "sha256:52c8011088305476691b8750c60e03b87910a123cfd9ad48576d6414b6ec2a1d", + "sha256:52de4736404e53c5c6a91ef2698c01e52333988ebdc218f14c833237a0804f1b", + "sha256:5c7b02525ede2a164c5fa5014915ba3591730f2cc831f5be9ff3b7fd3e30958e", + "sha256:5ef3fbccb4058355053c51b82fd3501a6e13dd808c8d8cd2561e610c5456013c", + "sha256:5f20cb0a63a3e0ec4e169aa8890e32b949c8145983afa13a708bc4b0a1f30e03", + "sha256:61405ea2d563407d316c63a7b5271ae5d274a2a9fbcd01b0aa5503635699fa1e", + "sha256:77d29cb6c34b14af8a484e831ab530c0f7188f8efed1c6a833a2c674bf3c26ec", + "sha256:7b184e3de58009cc0bf32e20f137f1ec75a32470f5fede06c58f6c355ed42a72", + "sha256:7e614d7a25a43a9f54fcce4675c12761b248547f3d41b195e8010ca7297c369c", + "sha256:8197d6f7a3d2b468861ebb4c9f998b9df9e358d6e1cf9c2a01061cb9b6cf4e41", + "sha256:87a1d53a5382cdbbf4b7619f107cc862c1b0a4feb29000922db72e5a66a5ffc0", + "sha256:8c37f1050feb91f3d6c32f864d8e114ff5545a4a7afe56778d76a9aec62638ba", + "sha256:90453597a753322d6aa770c5935887ab1fc49cc4c4fdd436901308383d698b4b", + "sha256:988569c8732f54ad3234cf9c561364221a9e943b78dc7a4aaf35ccc2265f1930", + "sha256:99a1e69d4e26f71e750e9ad6fdc8614fbddb67cfe2173a3628a2566034e223c7", + "sha256:9b19836ccca0d321e237560e475fd99c3d8655d03da80c845c4da20dda31b6e1", + "sha256:9d6753305936eddc8ed190e006b7bb33a8f50b9854823485eed3a886857ab8d1", + "sha256:a13b917b4ffe5a0a31b83d051d60477819ddf18276852ea68037a144a506efb9", + "sha256:a88913000da9205b13f6f195f0813b6ffd8a0c0c2bd58d499e00a30eb508870c", + "sha256:b2a0e3cf0caac2085ff172c3faacd1e00c376e6884b5bc4dd5b6b84623e29e4f", + "sha256:b5d7ed79df55a731749ce65ec20d666d82b185fa4898430b17cb90c892741520", + "sha256:bab41acf151cd68bc2b466deae5deeb9e8ae9c50ad113444151ad965d5bf685b", + "sha256:bd9566b8e58cabd700bc367b60e90d9349cd16f0984973f98a9a09f9c64e86f0", + "sha256:bda7ce59b06d0f09afe22c56714c65c957b1068dee3d5e74d743edec7daba552", + "sha256:c2f9c762a2735600654c654bf48dad388b888f8ce387b095806480e6e4ff6907", + "sha256:c4520047006b1d3f0d89e0532978c0688219857eb2fee7c48052560ae76aca1e", + "sha256:d96710d834a6fb31e21381c6d7b76ec729bd08c75a25a5184b1089141356171f", + "sha256:dba622396a3170974f81bad49aacebd243455ec3cc70615aeaef9e9613b5bca5", + "sha256:dc4ee2d4ee43251905f88637d5281a8d52e916a021384ec10758826f5cbae305", + "sha256:dddaae9b81c88083e6437de95c41e86823d150f4ee94bf24e158a4526cbead01", + "sha256:de7202ffe4d4a8c1e3cde1c03e01c1a3772c92858837e8f3879b497158e4cb44", + "sha256:e5bbe55e8552019c6463709b39634a5fc55e080d0827e2a3a11e18eb73f5cdbd", + "sha256:ea311d4ee9a8fa67f139c088ae9f905fcf0277d6cd75c310a21a88bf85e130f5", + "sha256:fecd5089c4be1bcc37c35e9aa678938d2888845a134dd016de457b942cf5a758" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==2.0.29" + }, + "sqlitedict": { + "hashes": [ + "sha256:2affcc301aacd4da7511692601ecbde392294205af418498f7d6d3ec0dbcad56" + ], + "index": "pypi", + "version": "==1.7.0" + }, + "starlette": { + "hashes": [ + "sha256:6fe59f29268538e5d0d182f2791a479a0c64638e6935d1c6989e63fb2699c6ee", + "sha256:9af890290133b79fc3db55474ade20f6220a364a0402e0b556e7cd5e1e093823" + ], + "markers": "python_version >= '3.8'", + "version": "==0.37.2" + }, + "swagger-spec-validator": { + "hashes": [ + "sha256:16a5ce08c772824a77b1a4a05efc047d72eef1ed53fb969dfe0a18f437ac30a8", + "sha256:174b5de4ab0899df9a57d35c880aaa515511c4b8b578d9d519b09a9596537055" + ], + "markers": "python_version >= '3.7'", + "version": "==3.0.3" + }, + "sympy": { + "hashes": [ + "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5", + "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8" + ], + "markers": "python_version >= '3.8'", + "version": "==1.12" + }, + "tabulate": { + "hashes": [ + "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c", + "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f" + ], + "markers": "python_version >= '3.7'", + "version": "==0.9.0" + }, + "text-unidecode": { + "hashes": [ + "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8", + "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93" + ], + "version": "==1.3" + }, + "threadpoolctl": { + "hashes": [ + "sha256:8f4c689a65b23e5ed825c8436a92b818aac005e0f3715f6a1664d7c7ee29d262", + "sha256:f11b491a03661d6dd7ef692dd422ab34185d982466c49c8f98c8f716b5c93196" + ], + "markers": "python_version >= '3.8'", + "version": "==3.4.0" + }, + "tiktoken": { + "hashes": [ + "sha256:05b344c61779f815038292a19a0c6eb7098b63c8f865ff205abb9ea1b656030e", + "sha256:07f229a5eb250b6403a61200199cecf0aac4aa23c3ecc1c11c1ca002cbb8f159", + "sha256:0c1a3a5d33846f8cd9dd3b7897c1d45722f48625a587f8e6f3d3e85080559be8", + "sha256:0ef917fad0bccda07bfbad835525bbed5f3ab97a8a3e66526e48cdc3e7beacf7", + "sha256:168d718f07a39b013032741867e789971346df8e89983fe3c0ef3fbd5a0b1cb9", + "sha256:17cc8a4a3245ab7d935c83a2db6bb71619099d7284b884f4b2aea4c74f2f83e3", + "sha256:1ccb7a111ee76af5d876a729a347f8747d5ad548e1487eeea90eaf58894b3138", + "sha256:1f5f0f2ed67ba16373f9a6013b68da298096b27cd4e1cf276d2d3868b5c7efd1", + "sha256:277de84ccd8fa12730a6b4067456e5cf72fef6300bea61d506c09e45658d41ac", + "sha256:284aebcccffe1bba0d6571651317df6a5b376ff6cfed5aeb800c55df44c78177", + "sha256:293cb8669757301a3019a12d6770bd55bec38a4d3ee9978ddbe599d68976aca7", + "sha256:430bc4e650a2d23a789dc2cdca3b9e5e7eb3cd3935168d97d43518cbb1f9a911", + "sha256:432aa3be8436177b0db5a2b3e7cc28fd6c693f783b2f8722539ba16a867d0c6a", + "sha256:45577faf9a9d383b8fd683e313cf6df88b6076c034f0a16da243bb1c139340c3", + "sha256:47fdcfe11bd55376785a6aea8ad1db967db7f66ea81aed5c43fad497521819a4", + "sha256:5f1495450a54e564d236769d25bfefbf77727e232d7a8a378f97acddee08c1ae", + "sha256:6318b2bb2337f38ee954fd5efa82632c6e5ced1d52a671370fa4b2eff1355e91", + "sha256:6c4e4857d99f6fb4670e928250835b21b68c59250520a1941618b5b4194e20c3", + "sha256:702950d33d8cabc039845674107d2e6dcabbbb0990ef350f640661368df481bb", + "sha256:75af4c0b16609c2ad02581f3cdcd1fb698c7565091370bf6c0cf8624ffaba6dc", + "sha256:7bd1a288b7903aadc054b0e16ea78e3171f70b670e7372432298c686ebf9dd47", + "sha256:7c1492ab90c21ca4d11cef3a236ee31a3e279bb21b3fc5b0e2210588c4209e68", + "sha256:8bfe8a19c8b5c40d121ee7938cd9c6a278e5b97dc035fd61714b4f0399d2f7a1", + "sha256:9c44433f658064463650d61387623735641dcc4b6c999ca30bc0f8ba3fccaf5c", + "sha256:ac76e000183e3b749634968a45c7169b351e99936ef46f0d2353cd0d46c3118d", + "sha256:ace62a4ede83c75b0374a2ddfa4b76903cf483e9cb06247f566be3bf14e6beed", + "sha256:afb9a2a866ae6eef1995ab656744287a5ac95acc7e0491c33fad54d053288ad3", + "sha256:b2048e1086b48e3c8c6e2ceeac866561374cd57a84622fa49a6b245ffecb7744", + "sha256:c62c05b3109fefca26fedb2820452a050074ad8e5ad9803f4652977778177d9f", + "sha256:c9f497598b9f58c99cbc0eb764b4a92272c14d5203fc713dd650b896a03a50ad", + "sha256:cefb9870fb55dca9e450e54dbf61f904aab9180ff6fe568b61f4db9564e78871", + "sha256:e095131ab6092d0769a2fda85aa260c7c383072daec599ba9d8b149d2a3f4d8b", + "sha256:e2b380c5b7751272015400b26144a2bab4066ebb8daae9c3cd2a92c3b508fe5a", + "sha256:e65e8bd6f3f279d80f1e1fbd5f588f036b9a5fa27690b7f0cc07021f1dfa0839", + "sha256:e8d49d076058f23254f2aff9af603863c5c5f9ab095bc896bceed04f8f0b013a", + "sha256:fb7d2ccbf1a7784810aff6b80b4012fb42c6fc37eaa68cb3b553801a5cc2d1fc" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==0.6.0" + }, + "tokenizers": { + "hashes": [ + "sha256:01d62812454c188306755c94755465505836fd616f75067abcae529c35edeb57", + "sha256:02e81bf089ebf0e7f4df34fa0207519f07e66d8491d963618252f2e0729e0b46", + "sha256:04ce49e82d100594715ac1b2ce87d1a36e61891a91de774755f743babcd0dd52", + "sha256:07f9295349bbbcedae8cefdbcfa7f686aa420be8aca5d4f7d1ae6016c128c0c5", + "sha256:08a44864e42fa6d7d76d7be4bec62c9982f6f6248b4aa42f7302aa01e0abfd26", + "sha256:0b5ca92bfa717759c052e345770792d02d1f43b06f9e790ca0a1db62838816f3", + "sha256:0b9394bd204842a2a1fd37fe29935353742be4a3460b6ccbaefa93f58a8df43d", + "sha256:0bcce02bf1ad9882345b34d5bd25ed4949a480cf0e656bbd468f4d8986f7a3f1", + "sha256:0e64bfde9a723274e9a71630c3e9494ed7b4c0f76a1faacf7fe294cd26f7ae7c", + "sha256:10a707cc6c4b6b183ec5dbfc5c34f3064e18cf62b4a938cb41699e33a99e03c1", + "sha256:16baac68651701364b0289979ecec728546133e8e8fe38f66fe48ad07996b88b", + "sha256:1de5bc8652252d9357a666e609cb1453d4f8e160eb1fb2830ee369dd658e8975", + "sha256:1f0360cbea28ea99944ac089c00de7b2e3e1c58f479fb8613b6d8d511ce98267", + "sha256:2e8a3dd055e515df7054378dc9d6fa8c8c34e1f32777fb9a01fea81496b3f9d3", + "sha256:3174c76efd9d08f836bfccaca7cfec3f4d1c0a4cf3acbc7236ad577cc423c840", + "sha256:35583cd46d16f07c054efd18b5d46af4a2f070a2dd0a47914e66f3ff5efb2b1e", + "sha256:39c1ec76ea1027438fafe16ecb0fb84795e62e9d643444c1090179e63808c69d", + "sha256:3b11853f17b54c2fe47742c56d8a33bf49ce31caf531e87ac0d7d13d327c9334", + "sha256:427c4f0f3df9109314d4f75b8d1f65d9477033e67ffaec4bca53293d3aca286d", + "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75", + "sha256:453e4422efdfc9c6b6bf2eae00d5e323f263fff62b29a8c9cd526c5003f3f642", + "sha256:4692ab92f91b87769d950ca14dbb61f8a9ef36a62f94bad6c82cc84a51f76f6a", + "sha256:4ad23d37d68cf00d54af184586d79b84075ada495e7c5c0f601f051b162112dc", + "sha256:4f3fefdc0446b1a1e6d81cd4c07088ac015665d2e812f6dbba4a06267d1a2c95", + "sha256:56ae39d4036b753994476a1b935584071093b55c7a72e3b8288e68c313ca26e7", + "sha256:5c88d1481f1882c2e53e6bb06491e474e420d9ac7bdff172610c4f9ad3898059", + "sha256:61b7fe8886f2e104d4caf9218b157b106207e0f2a4905c9c7ac98890688aabeb", + "sha256:621d670e1b1c281a1c9698ed89451395d318802ff88d1fc1accff0867a06f153", + "sha256:6258c2ef6f06259f70a682491c78561d492e885adeaf9f64f5389f78aa49a051", + "sha256:6309271f57b397aa0aff0cbbe632ca9d70430839ca3178bf0f06f825924eca22", + "sha256:638e43936cc8b2cbb9f9d8dde0fe5e7e30766a3318d2342999ae27f68fdc9bd6", + "sha256:63c38f45d8f2a2ec0f3a20073cccb335b9f99f73b3c69483cd52ebc75369d8a1", + "sha256:670b802d4d82bbbb832ddb0d41df7015b3e549714c0e77f9bed3e74d42400fbe", + "sha256:6852c5b2a853b8b0ddc5993cd4f33bfffdca4fcc5d52f89dd4b8eada99379285", + "sha256:6b2da5c32ed869bebd990c9420df49813709e953674c0722ff471a116d97b22d", + "sha256:6c330c0eb815d212893c67a032e9dc1b38a803eccb32f3e8172c19cc69fbb439", + "sha256:6f8a20266e695ec9d7a946a019c1d5ca4eddb6613d4f466888eee04f16eedb85", + "sha256:706a37cc5332f85f26efbe2bdc9ef8a9b372b77e4645331a405073e4b3a8c1c6", + "sha256:71e3ec71f0e78780851fef28c2a9babe20270404c921b756d7c532d280349214", + "sha256:72791f9bb1ca78e3ae525d4782e85272c63faaef9940d92142aa3eb79f3407a3", + "sha256:76951121890fea8330d3a0df9a954b3f2a37e3ec20e5b0530e9a0044ca2e11fe", + "sha256:78e769eb3b2c79687d9cb0f89ef77223e8e279b75c0a968e637ca7043a84463f", + "sha256:7c9d5b6c0e7a1e979bec10ff960fae925e947aab95619a6fdb4c1d8ff3708ce3", + "sha256:7fb297edec6c6841ab2e4e8f357209519188e4a59b557ea4fafcf4691d1b4c98", + "sha256:7ff898780a155ea053f5d934925f3902be2ed1f4d916461e1a93019cc7250837", + "sha256:82c8b8063de6c0468f08e82c4e198763e7b97aabfe573fd4cf7b33930ca4df77", + "sha256:85aa3ab4b03d5e99fdd31660872249df5e855334b6c333e0bc13032ff4469c4a", + "sha256:89183e55fb86e61d848ff83753f64cded119f5d6e1f553d14ffee3700d0a4a49", + "sha256:8a6298bde623725ca31c9035a04bf2ef63208d266acd2bed8c2cb7d2b7d53ce6", + "sha256:8b01afb7193d47439f091cd8f070a1ced347ad0f9144952a30a41836902fe09e", + "sha256:952078130b3d101e05ecfc7fc3640282d74ed26bcf691400f872563fca15ac97", + "sha256:952b80dac1a6492170f8c2429bd11fcaa14377e097d12a1dbe0ef2fb2241e16c", + "sha256:9620b78e0b2d52ef07b0d428323fb34e8ea1219c5eac98c2596311f20f1f9266", + "sha256:9ed240c56b4403e22b9584ee37d87b8bfa14865134e3e1c3fb4b2c42fafd3256", + "sha256:a179856d1caee06577220ebcfa332af046d576fb73454b8f4d4b0ba8324423ea", + "sha256:a2b718f316b596f36e1dae097a7d5b91fc5b85e90bf08b01ff139bd8953b25af", + "sha256:ac11016d0a04aa6487b1513a3a36e7bee7eec0e5d30057c9c0408067345c48d2", + "sha256:ad57d59341710b94a7d9dbea13f5c1e7d76fd8d9bcd944a7a6ab0b0da6e0cc66", + "sha256:b07c538ba956843833fee1190cf769c60dc62e1cf934ed50d77d5502194d63b1", + "sha256:b279ab506ec4445166ac476fb4d3cc383accde1ea152998509a94d82547c8e2a", + "sha256:b2edbc75744235eea94d595a8b70fe279dd42f3296f76d5a86dde1d46e35f574", + "sha256:b342d2ce8fc8d00f376af068e3274e2e8649562e3bc6ae4a67784ded6b99428d", + "sha256:b4399b59d1af5645bcee2072a463318114c39b8547437a7c2d6a186a1b5a0e2d", + "sha256:b4c89aa46c269e4e70c4d4f9d6bc644fcc39bb409cb2a81227923404dd6f5227", + "sha256:b70bfbe3a82d3e3fb2a5e9b22a39f8d1740c96c68b6ace0086b39074f08ab89a", + "sha256:b82931fa619dbad979c0ee8e54dd5278acc418209cc897e42fac041f5366d626", + "sha256:bac0b0eb952412b0b196ca7a40e7dce4ed6f6926489313414010f2e6b9ec2adf", + "sha256:bb9dfe7dae85bc6119d705a76dc068c062b8b575abe3595e3c6276480e67e3f1", + "sha256:bcd266ae85c3d39df2f7e7d0e07f6c41a55e9a3123bb11f854412952deacd828", + "sha256:bea6f9947e9419c2fda21ae6c32871e3d398cba549b93f4a65a2d369662d9403", + "sha256:c27b99889bd58b7e301468c0838c5ed75e60c66df0d4db80c08f43462f82e0d3", + "sha256:c2a0d47a89b48d7daa241e004e71fb5a50533718897a4cd6235cb846d511a478", + "sha256:c5c2ff13d157afe413bf7e25789879dd463e5a4abfb529a2d8f8473d8042e28f", + "sha256:c85cf76561fbd01e0d9ea2d1cbe711a65400092bc52b5242b16cfd22e51f0c58", + "sha256:ca407133536f19bdec44b3da117ef0d12e43f6d4b56ac4c765f37eca501c7bda", + "sha256:cbf001afbbed111a79ca47d75941e9e5361297a87d186cbfc11ed45e30b5daba", + "sha256:ce05fde79d2bc2e46ac08aacbc142bead21614d937aac950be88dc79f9db9022", + "sha256:d16ff18907f4909dca9b076b9c2d899114dd6abceeb074eca0c93e2353f943aa", + "sha256:d26194ef6c13302f446d39972aaa36a1dda6450bc8949f5eb4c27f51191375bd", + "sha256:d8c5d59d7b59885eab559d5bc082b2985555a54cda04dda4c65528d90ad252ad", + "sha256:d924204a3dbe50b75630bd16f821ebda6a5f729928df30f582fb5aade90c818a", + "sha256:dadc509cc8a9fe460bd274c0e16ac4184d0958117cf026e0ea8b32b438171594", + "sha256:dd26e3afe8a7b61422df3176e06664503d3f5973b94f45d5c45987e1cb711876", + "sha256:ddf672ed719b4ed82b51499100f5417d7d9f6fb05a65e232249268f35de5ed14", + "sha256:dfedf31824ca4915b511b03441784ff640378191918264268e6923da48104acc", + "sha256:e28cab1582e0eec38b1f38c1c1fb2e56bce5dc180acb1724574fc5f47da2a4fe", + "sha256:e742d76ad84acbdb1a8e4694f915fe59ff6edc381c97d6dfdd054954e3478ad4", + "sha256:e83a31c9cf181a0a3ef0abad2b5f6b43399faf5da7e696196ddd110d332519ee", + "sha256:e8d1ed93beda54bbd6131a2cb363a576eac746d5c26ba5b7556bc6f964425594", + "sha256:e8ff5b90eabdcdaa19af697885f70fe0b714ce16709cf43d4952f1f85299e73a", + "sha256:ec11802450a2487cdf0e634b750a04cbdc1c4d066b97d94ce7dd2cb51ebb325b", + "sha256:ecb2651956eea2aa0a2d099434134b1b68f1c31f9a5084d6d53f08ed43d45ff2", + "sha256:ed69af290c2b65169f0ba9034d1dc39a5db9459b32f1dd8b5f3f32a3fcf06eab", + "sha256:eddd5783a4a6309ce23432353cdb36220e25cbb779bfa9122320666508b44b88", + "sha256:ee59e6680ed0fdbe6b724cf38bd70400a0c1dd623b07ac729087270caeac88e3", + "sha256:f03727225feaf340ceeb7e00604825addef622d551cbd46b7b775ac834c1e1c4", + "sha256:f3bbb7a0c5fcb692950b041ae11067ac54826204318922da754f908d95619fbc", + "sha256:f8a9c828277133af13f3859d1b6bf1c3cb6e9e1637df0e45312e6b7c2e622b1f", + "sha256:f97660f6c43efd3e0bfd3f2e3e5615bf215680bad6ee3d469df6454b8c6e8256", + "sha256:f9939ca7e58c2758c01b40324a59c034ce0cebad18e0d4563a9b1beab3018243" + ], + "markers": "python_version >= '3.7'", + "version": "==0.19.1" + }, + "toml": { + "hashes": [ + "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", + "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f" + ], + "index": "pypi", + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==0.10.2" + }, + "torch": { + "hashes": [ + "sha256:01f947d4dbae6631f33040521c2a7c32fd835d67d190083db154c54e53d6e34f", + "sha256:183b17fced6d344cd93a385a0c5f98e3f31abd254b0aed4741e921115d8de7a8", + "sha256:4377e0a7fe8ff8ffc4f7c9c6130c1dcd3874050ae4fc28b7ff1d35234fbca423", + "sha256:565c16fb26b035f3845a019732d292d7a167ef15b9732dc8e26ba32dc163436d", + "sha256:71801c8c77d2d42f81b220fa15769d4ba48f8f977ca89e7ba928af0c33bcdce3", + "sha256:796ce23ee21da57157f10baee7ee5244c6cddb13186408b7bdd9b5f8dea2ae19", + "sha256:a8eda58ee69e6b0eeab2c25674da6a99d8bbf1e6bf4fda96761f953031097b08", + "sha256:d586b01aaa4ba15ffc3892b87d44803a1138d6372a6eea4d0290ef68f9c809cb", + "sha256:e46a40d6a1055a4a4ee8c8ac5a8dfb2c70b7382a00c411b0e9f2c86029b6efc4", + "sha256:fa305dc0e5a20d450cd4d288c7f5045ce788f9c65e0f7a159477f9d835f18c9b" + ], + "index": "pytorch", + "version": "==2.2.0+cu118" + }, + "tornado": { + "hashes": [ + "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0", + "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63", + "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263", + "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052", + "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f", + "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee", + "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78", + "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579", + "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212", + "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e", + "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2" + ], + "markers": "python_version >= '3.8'", + "version": "==6.4" + }, + "tqdm": { + "hashes": [ + "sha256:1ee4f8a893eb9bef51c6e35730cebf234d5d0b6bd112b0271e10ed7c24a02bd9", + "sha256:6cd52cdf0fef0e0f543299cfc96fec90d7b8a7e88745f411ec33eb44d5ed3531" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==4.66.2" + }, + "transformers": { + "hashes": [ + "sha256:55e1697e6f18b58273e7117bb469cdffc11be28995462d8d5e422fef38d2de36", + "sha256:9d5ee0c8142a60501faf9e49a0b42f8e9cb8611823bce4f195a9325a6816337e" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.0'", + "version": "==4.40.1" + }, + "triton": { + "hashes": [ + "sha256:0af58716e721460a61886668b205963dc4d1e4ac20508cc3f623aef0d70283d5", + "sha256:227cc6f357c5efcb357f3867ac2a8e7ecea2298cd4606a8ba1e931d1d5a947df", + "sha256:a2294514340cfe4e8f4f9e5c66c702744c4a117d25e618bd08469d0bfed1e2e5", + "sha256:b8ce26093e539d727e7cf6f6f0d932b1ab0574dc02567e684377630d86723ace", + "sha256:da58a152bddb62cafa9a857dd2bc1f886dbf9f9c90a2b5da82157cd2b34392b0", + "sha256:e8fe46d3ab94a8103e291bd44c741cc294b91d1d81c1a2888254cbf7ff846dab" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64'", + "version": "==2.2.0" + }, + "types-python-dateutil": { + "hashes": [ + "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202", + "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b" + ], + "markers": "python_version >= '3.8'", + "version": "==2.9.0.20240316" + }, + "typing-extensions": { + "hashes": [ + "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0", + "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a" + ], + "markers": "python_version >= '3.8'", + "version": "==4.11.0" + }, + "tzdata": { + "hashes": [ + "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd", + "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252" + ], + "markers": "python_version >= '2'", + "version": "==2024.1" + }, + "uri-template": { + "hashes": [ + "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7", + "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363" + ], + "version": "==1.3.0" + }, + "urllib3": { + "hashes": [ + "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d", + "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19" + ], + "markers": "python_version >= '3.8'", + "version": "==2.2.1" + }, + "uvicorn": { + "hashes": [ + "sha256:2c2aac7ff4f4365c206fd773a39bf4ebd1047c238f8b8268ad996829323473de", + "sha256:6a69214c0b6a087462412670b3ef21224fa48cae0e452b5883e8e8bdfdd11dd0" + ], + "markers": "python_version >= '3.8'", + "version": "==0.29.0" + }, + "wcwidth": { + "hashes": [ + "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", + "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5" + ], + "version": "==0.2.13" + }, + "webcolors": { + "hashes": [ + "sha256:29bc7e8752c0a1bd4a1f03c14d6e6a72e93d82193738fa860cbff59d0fcc11bf", + "sha256:c225b674c83fa923be93d235330ce0300373d02885cef23238813b0d5668304a" + ], + "version": "==1.13" + }, + "webencodings": { + "hashes": [ + "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", + "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923" + ], + "version": "==0.5.1" + }, + "websocket-client": { + "hashes": [ + "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526", + "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da" + ], + "markers": "python_version >= '3.8'", + "version": "==1.8.0" + }, + "xmod": { + "hashes": [ + "sha256:38c76486b9d672c546d57d8035df0beb7f4a9b088bc3fb2de5431ae821444377", + "sha256:a24e9458a4853489042522bdca9e50ee2eac5ab75c809a91150a8a7f40670d48" + ], + "markers": "python_version >= '3.8'", + "version": "==1.8.1" + }, + "xxhash": { + "hashes": [ + "sha256:00f2fdef6b41c9db3d2fc0e7f94cb3db86693e5c45d6de09625caad9a469635b", + "sha256:0379d6cf1ff987cd421609a264ce025e74f346e3e145dd106c0cc2e3ec3f99a9", + "sha256:0aac5010869240e95f740de43cd6a05eae180c59edd182ad93bf12ee289484fa", + "sha256:0c786a6cd74e8765c6809892a0d45886e7c3dc54de4985b4a5eb8b630f3b8e3b", + "sha256:0e041ce5714f95251a88670c114b748bca3bf80cc72400e9f23e6d0d59cf2681", + "sha256:10e0a619cdd1c0980e25eb04e30fe96cf8f4324758fa497080af9c21a6de573f", + "sha256:11f11357c86d83e53719c592021fd524efa9cf024dc7cb1dfb57bbbd0d8713f2", + "sha256:1d03f1c0d16d24ea032e99f61c552cb2b77d502e545187338bea461fde253583", + "sha256:1d0ae4c2e7698adef58710d6e7a32ff518b66b98854b1c68e70eee504ad061d8", + "sha256:200a5a3ad9c7c0c02ed1484a1d838b63edcf92ff538770ea07456a3732c577f4", + "sha256:2070b6d5bbef5ee031666cf21d4953c16e92c2f8a24a94b5c240f8995ba3b1d0", + "sha256:21287bcdd299fdc3328cc0fbbdeaa46838a1c05391264e51ddb38a3f5b09611f", + "sha256:23cfd9ca09acaf07a43e5a695143d9a21bf00f5b49b15c07d5388cadf1f9ce11", + "sha256:248d3e83d119770f96003271fe41e049dd4ae52da2feb8f832b7a20e791d2920", + "sha256:25dc66be3db54f8a2d136f695b00cfe88018e59ccff0f3b8f545869f376a8a46", + "sha256:2a8ba6181514681c2591840d5632fcf7356ab287d4aff1c8dea20f3c78097088", + "sha256:2be491723405e15cc099ade1280133ccfbf6322d2ef568494fb7d07d280e7eee", + "sha256:312eba88ffe0a05e332e3a6f9788b73883752be63f8588a6dc1261a3eaaaf2b2", + "sha256:36ad4457644c91a966f6fe137d7467636bdc51a6ce10a1d04f365c70d6a16d7e", + "sha256:3b685fab18876b14a8f94813fa2ca80cfb5ab6a85d31d5539b7cd749ce9e3624", + "sha256:4178f78d70e88f1c4a89ff1ffe9f43147185930bb962ee3979dba15f2b1cc799", + "sha256:419ffe34c17ae2df019a4685e8d3934d46b2e0bbe46221ab40b7e04ed9f11137", + "sha256:41ddeae47cf2828335d8d991f2d2b03b0bdc89289dc64349d712ff8ce59d0647", + "sha256:431625fad7ab5649368c4849d2b49a83dc711b1f20e1f7f04955aab86cd307bc", + "sha256:43984c0a92f06cac434ad181f329a1445017c33807b7ae4f033878d860a4b0f2", + "sha256:450401f42bbd274b519d3d8dcf3c57166913381a3d2664d6609004685039f9d3", + "sha256:4603a0f642a1e8d7f3ba5c4c25509aca6a9c1cc16f85091004a7028607ead663", + "sha256:4c76a77dbd169450b61c06fd2d5d436189fc8ab7c1571d39265d4822da16df22", + "sha256:4cb11d8debab1626181633d184b2372aaa09825bde709bf927704ed72765bed1", + "sha256:543c7fcbc02bbb4840ea9915134e14dc3dc15cbd5a30873a7a5bf66039db97ec", + "sha256:562d8b8f783c6af969806aaacf95b6c7b776929ae26c0cd941d54644ea7ef51e", + "sha256:58c49083801885273e262c0f5bbeac23e520564b8357fbb18fb94ff09d3d3ea5", + "sha256:595b252943b3552de491ff51e5bb79660f84f033977f88f6ca1605846637b7c6", + "sha256:5bef2a7dc7b4f4beb45a1edbba9b9194c60a43a89598a87f1a0226d183764189", + "sha256:5dab508ac39e0ab988039bc7f962c6ad021acd81fd29145962b068df4148c476", + "sha256:6066d88c9329ab230e18998daec53d819daeee99d003955c8db6fc4971b45ca3", + "sha256:6127813abc1477f3a83529b6bbcfeddc23162cece76fa69aee8f6a8a97720562", + "sha256:64da57d5ed586ebb2ecdde1e997fa37c27fe32fe61a656b77fabbc58e6fbff6e", + "sha256:665a65c2a48a72068fcc4d21721510df5f51f1142541c890491afc80451636d2", + "sha256:672b273040d5d5a6864a36287f3514efcd1d4b1b6a7480f294c4b1d1ee1b8de0", + "sha256:696b4e18b7023527d5c50ed0626ac0520edac45a50ec7cf3fc265cd08b1f4c03", + "sha256:6a9ff50a3cf88355ca4731682c168049af1ca222d1d2925ef7119c1a78e95b3b", + "sha256:6d3472fd4afef2a567d5f14411d94060099901cd8ce9788b22b8c6f13c606a93", + "sha256:6d42b24d1496deb05dee5a24ed510b16de1d6c866c626c2beb11aebf3be278b9", + "sha256:6e66df260fed01ed8ea790c2913271641c58481e807790d9fca8bfd5a3c13844", + "sha256:6fa45e8cbfbadb40a920fe9ca40c34b393e0b067082d94006f7f64e70c7490a6", + "sha256:719a378930504ab159f7b8e20fa2aa1896cde050011af838af7e7e3518dd82de", + "sha256:71be94265b6c6590f0018bbf73759d21a41c6bda20409782d8117e76cd0dfa8b", + "sha256:743612da4071ff9aa4d055f3f111ae5247342931dedb955268954ef7201a71ff", + "sha256:74fb5cb9406ccd7c4dd917f16630d2e5e8cbbb02fc2fca4e559b2a47a64f4940", + "sha256:7688d7c02149a90a3d46d55b341ab7ad1b4a3f767be2357e211b4e893efbaaf6", + "sha256:7a97322e9a7440bf3c9805cbaac090358b43f650516486746f7fa482672593df", + "sha256:8106d88da330f6535a58a8195aa463ef5281a9aa23b04af1848ff715c4398fb4", + "sha256:8c59f3e46e7daf4c589e8e853d700ef6607afa037bfad32c390175da28127e8c", + "sha256:8cc07256eff0795e0f642df74ad096f8c5d23fe66bc138b83970b50fc7f7f6c5", + "sha256:911035345932a153c427107397c1518f8ce456f93c618dd1c5b54ebb22e73747", + "sha256:91dbfa55346ad3e18e738742236554531a621042e419b70ad8f3c1d9c7a16e7f", + "sha256:92693c487e39523a80474b0394645b393f0ae781d8db3474ccdcead0559ccf45", + "sha256:93805bc3233ad89abf51772f2ed3355097a5dc74e6080de19706fc447da99cd3", + "sha256:961d948b7b1c1b6c08484bbce3d489cdf153e4122c3dfb07c2039621243d8795", + "sha256:9804b9eb254d4b8cc83ab5a2002128f7d631dd427aa873c8727dba7f1f0d1c2b", + "sha256:9c0f7b2d547d72c7eda7aa817acf8791f0146b12b9eba1d4432c531fb0352228", + "sha256:9ecb6c987b62437c2f99c01e97caf8d25660bf541fe79a481d05732e5236719c", + "sha256:9f3025a0d5d8cf406a9313cd0d5789c77433ba2004b1c75439b67678e5136537", + "sha256:9fd28a9da300e64e434cfc96567a8387d9a96e824a9be1452a1e7248b7763b78", + "sha256:a15cbf3a9c40672523bdb6ea97ff74b443406ba0ab9bca10ceccd9546414bd84", + "sha256:a162840cf4de8a7cd8720ff3b4417fbc10001eefdd2d21541a8226bb5556e3bb", + "sha256:a55e0506fdb09640a82ec4f44171273eeabf6f371a4ec605633adb2837b5d9d5", + "sha256:a8b4977963926f60b0d4f830941c864bed16aa151206c01ad5c531636da5708e", + "sha256:a90356ead70d715fe64c30cd0969072de1860e56b78adf7c69d954b43e29d9fa", + "sha256:aabf37fb8fa27430d50507deeab2ee7b1bcce89910dd10657c38e71fee835594", + "sha256:ac56eebb364e44c85e1d9e9cc5f6031d78a34f0092fea7fc80478139369a8b4a", + "sha256:b2746035f518f0410915e247877f7df43ef3372bf36cfa52cc4bc33e85242641", + "sha256:b29728cff2c12f3d9f1d940528ee83918d803c0567866e062683f300d1d2eff3", + "sha256:b41edaf05734092f24f48c0958b3c6cbaaa5b7e024880692078c6b1f8247e2fc", + "sha256:b526015a973bfbe81e804a586b703f163861da36d186627e27524f5427b0d520", + "sha256:b5beb1c6a72fdc7584102f42c4d9df232ee018ddf806e8c90906547dfb43b2da", + "sha256:b736a2a2728ba45017cb67785e03125a79d246462dfa892d023b827007412c52", + "sha256:b9097af00ebf429cc7c0e7d2fdf28384e4e2e91008130ccda8d5ae653db71e54", + "sha256:bb11628470a6004dc71a09fe90c2f459ff03d611376c1debeec2d648f44cb693", + "sha256:bbe750d512982ee7d831838a5dee9e9848f3fb440e4734cca3f298228cc957a6", + "sha256:c09c49473212d9c87261d22c74370457cfff5db2ddfc7fd1e35c80c31a8c14ce", + "sha256:c44d584afdf3c4dbb3277e32321d1a7b01d6071c1992524b6543025fb8f4206f", + "sha256:c4bbba9b182697a52bc0c9f8ec0ba1acb914b4937cd4a877ad78a3b3eeabefb3", + "sha256:c9e1b646af61f1fc7083bb7b40536be944f1ac67ef5e360bca2d73430186971a", + "sha256:ca7783b20e3e4f3f52f093538895863f21d18598f9a48211ad757680c3bd006f", + "sha256:d6322c4291c3ff174dcd104fae41500e75dad12be6f3085d119c2c8a80956c51", + "sha256:d699b921af0dcde50ab18be76c0d832f803034d80470703700cb7df0fbec2832", + "sha256:d77d09a1113899fad5f354a1eb4f0a9afcf58cefff51082c8ad643ff890e30cf", + "sha256:dd59ed668801c3fae282f8f4edadf6dc7784db6d18139b584b6d9677ddde1b6b", + "sha256:dfd7a6cc483e20b4ad90224aeb589e64ec0f31e5610ab9957ff4314270b2bf31", + "sha256:e01226b6b6a1ffe4e6bd6d08cfcb3ca708b16f02eb06dd44f3c6e53285f03e4f", + "sha256:e17032f5a4fea0a074717fe33477cb5ee723a5f428de7563e75af64bfc1b1e10", + "sha256:e867f68a8f381ea12858e6d67378c05359d3a53a888913b5f7d35fbf68939d5f", + "sha256:e9f749999ed80f3955a4af0eb18bb43993f04939350b07b8dd2f44edc98ffee9", + "sha256:ebbb1616435b4a194ce3466d7247df23499475c7ed4eb2681a1fa42ff766aff6", + "sha256:ef2e194262f5db16075caea7b3f7f49392242c688412f386d3c7b07c7733a70a", + "sha256:ef73a53fe90558a4096e3256752268a8bdc0322f4692ed928b6cd7ce06ad4fe3", + "sha256:f1d7c69a1e9ca5faa75546fdd267f214f63f52f12692f9b3a2f6467c9e67d5e7", + "sha256:f31ce76489f8601cc7b8713201ce94b4bd7b7ce90ba3353dccce7e9e1fee71fa", + "sha256:f3ff8dbd0ec97aec842476cb8ccc3e17dd288cd6ce3c8ef38bff83d6eb927817", + "sha256:fa122124d2e3bd36581dd78c0efa5f429f5220313479fb1072858188bc2d5ff1", + "sha256:faec30437919555b039a8bdbaba49c013043e8f76c999670aef146d33e05b3a0", + "sha256:fc6dbd5fc3c9886a9e041848508b7fb65fd82f94cc793253990f81617b61fe49", + "sha256:fc860d887c5cb2f524899fb8338e1bb3d5789f75fac179101920d9afddef284b", + "sha256:fd79d4087727daf4d5b8afe594b37d611ab95dc8e29fe1a7517320794837eb7d", + "sha256:fd7bddb3a5b86213cc3f2c61500c16945a1b80ecd572f3078ddbbe68f9dabdfb", + "sha256:fe0a98d990e433013f41827b62be9ab43e3cf18e08b1483fcc343bda0d691182" + ], + "markers": "python_version >= '3.7'", + "version": "==3.4.1" + }, + "xyzservices": { + "hashes": [ + "sha256:6a04f11487a6fb77d92a98984cd107fbd9157fd5e65f929add9c3d6e604ee88c", + "sha256:b83e48c5b776c9969fffcfff57b03d02b1b1cd6607a9d9c4e7f568b01ef47f4c" + ], + "markers": "python_version >= '3.8'", + "version": "==2024.4.0" + }, + "yarl": { + "hashes": [ + "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51", + "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce", + "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559", + "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0", + "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81", + "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc", + "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4", + "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c", + "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130", + "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136", + "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e", + "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec", + "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7", + "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1", + "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455", + "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099", + "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129", + "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10", + "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142", + "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98", + "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa", + "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7", + "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525", + "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c", + "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9", + "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c", + "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8", + "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b", + "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf", + "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23", + "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd", + "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27", + "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f", + "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece", + "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434", + "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec", + "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff", + "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78", + "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d", + "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863", + "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53", + "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31", + "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15", + "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5", + "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b", + "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57", + "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3", + "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1", + "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f", + "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad", + "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c", + "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7", + "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2", + "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b", + "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2", + "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b", + "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9", + "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be", + "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e", + "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984", + "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4", + "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074", + "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2", + "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392", + "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91", + "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541", + "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf", + "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572", + "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66", + "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575", + "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14", + "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5", + "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1", + "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e", + "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551", + "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17", + "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead", + "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0", + "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe", + "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234", + "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0", + "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7", + "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34", + "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42", + "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385", + "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78", + "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be", + "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958", + "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749", + "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec" + ], + "markers": "python_version >= '3.7'", + "version": "==1.9.4" + }, + "zipp": { + "hashes": [ + "sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b", + "sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715" + ], + "markers": "python_version >= '3.8'", + "version": "==3.18.1" + } + }, + "develop": { + "black": { + "hashes": [ + "sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f", + "sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93", + "sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11", + "sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0", + "sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9", + "sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5", + "sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213", + "sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d", + "sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7", + "sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837", + "sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f", + "sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395", + "sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995", + "sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f", + "sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597", + "sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959", + "sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5", + "sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb", + "sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4", + "sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7", + "sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd", + "sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==24.3.0" + }, + "certifi": { + "hashes": [ + "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f", + "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1" + ], + "markers": "python_version >= '3.6'", + "version": "==2024.2.2" + }, + "charset-normalizer": { + "hashes": [ + "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027", + "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087", + "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786", + "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8", + "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09", + "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185", + "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574", + "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e", + "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519", + "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898", + "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269", + "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3", + "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f", + "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6", + "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8", + "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a", + "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73", + "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc", + "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714", + "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2", + "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc", + "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce", + "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d", + "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e", + "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6", + "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269", + "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96", + "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d", + "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a", + "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4", + "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77", + "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d", + "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0", + "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed", + "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068", + "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac", + "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25", + "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8", + "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab", + "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26", + "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2", + "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db", + "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f", + "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5", + "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99", + "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c", + "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d", + "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811", + "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa", + "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a", + "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03", + "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b", + "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04", + "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c", + "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001", + "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458", + "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389", + "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99", + "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985", + "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537", + "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238", + "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f", + "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d", + "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796", + "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a", + "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143", + "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8", + "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c", + "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5", + "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5", + "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711", + "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4", + "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6", + "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c", + "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7", + "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4", + "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b", + "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae", + "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12", + "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c", + "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae", + "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8", + "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887", + "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b", + "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4", + "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f", + "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5", + "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33", + "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519", + "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561" + ], + "markers": "python_full_version >= '3.7.0'", + "version": "==3.3.2" + }, + "click": { + "hashes": [ + "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", + "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de" + ], + "markers": "python_version >= '3.7'", + "version": "==8.1.7" + }, + "coverage": { + "extras": [ + "toml" + ], + "hashes": [ + "sha256:0209a6369ccce576b43bb227dc8322d8ef9e323d089c6f3f26a597b09cb4d2aa", + "sha256:062b0a75d9261e2f9c6d071753f7eef0fc9caf3a2c82d36d76667ba7b6470003", + "sha256:0842571634f39016a6c03e9d4aba502be652a6e4455fadb73cd3a3a49173e38f", + "sha256:16bae383a9cc5abab9bb05c10a3e5a52e0a788325dc9ba8499e821885928968c", + "sha256:18c7320695c949de11a351742ee001849912fd57e62a706d83dfc1581897fa2e", + "sha256:18d90523ce7553dd0b7e23cbb28865db23cddfd683a38fb224115f7826de78d0", + "sha256:1bf25fbca0c8d121a3e92a2a0555c7e5bc981aee5c3fdaf4bb7809f410f696b9", + "sha256:276f6077a5c61447a48d133ed13e759c09e62aff0dc84274a68dc18660104d52", + "sha256:280459f0a03cecbe8800786cdc23067a8fc64c0bd51dc614008d9c36e1659d7e", + "sha256:28ca2098939eabab044ad68850aac8f8db6bf0b29bc7f2887d05889b17346454", + "sha256:2c854ce44e1ee31bda4e318af1dbcfc929026d12c5ed030095ad98197eeeaed0", + "sha256:35eb581efdacf7b7422af677b92170da4ef34500467381e805944a3201df2079", + "sha256:37389611ba54fd6d278fde86eb2c013c8e50232e38f5c68235d09d0a3f8aa352", + "sha256:3b253094dbe1b431d3a4ac2f053b6d7ede2664ac559705a704f621742e034f1f", + "sha256:3b2eccb883368f9e972e216c7b4c7c06cabda925b5f06dde0650281cb7666a30", + "sha256:451f433ad901b3bb00184d83fd83d135fb682d780b38af7944c9faeecb1e0bfe", + "sha256:489763b2d037b164846ebac0cbd368b8a4ca56385c4090807ff9fad817de4113", + "sha256:4af154d617c875b52651dd8dd17a31270c495082f3d55f6128e7629658d63765", + "sha256:506edb1dd49e13a2d4cac6a5173317b82a23c9d6e8df63efb4f0380de0fbccbc", + "sha256:6679060424faa9c11808598504c3ab472de4531c571ab2befa32f4971835788e", + "sha256:69b9f6f66c0af29642e73a520b6fed25ff9fd69a25975ebe6acb297234eda501", + "sha256:6c00cdc8fa4e50e1cc1f941a7f2e3e0f26cb2a1233c9696f26963ff58445bac7", + "sha256:6c0cdedd3500e0511eac1517bf560149764b7d8e65cb800d8bf1c63ebf39edd2", + "sha256:708a3369dcf055c00ddeeaa2b20f0dd1ce664eeabde6623e516c5228b753654f", + "sha256:718187eeb9849fc6cc23e0d9b092bc2348821c5e1a901c9f8975df0bc785bfd4", + "sha256:767b35c3a246bcb55b8044fd3a43b8cd553dd1f9f2c1eeb87a302b1f8daa0524", + "sha256:77fbfc5720cceac9c200054b9fab50cb2a7d79660609200ab83f5db96162d20c", + "sha256:7cbde573904625509a3f37b6fecea974e363460b556a627c60dc2f47e2fffa51", + "sha256:8249b1c7334be8f8c3abcaaa996e1e4927b0e5a23b65f5bf6cfe3180d8ca7840", + "sha256:8580b827d4746d47294c0e0b92854c85a92c2227927433998f0d3320ae8a71b6", + "sha256:8640f1fde5e1b8e3439fe482cdc2b0bb6c329f4bb161927c28d2e8879c6029ee", + "sha256:9a9babb9466fe1da12417a4aed923e90124a534736de6201794a3aea9d98484e", + "sha256:a78ed23b08e8ab524551f52953a8a05d61c3a760781762aac49f8de6eede8c45", + "sha256:abbbd8093c5229c72d4c2926afaee0e6e3140de69d5dcd918b2921f2f0c8baba", + "sha256:ae7f19afe0cce50039e2c782bff379c7e347cba335429678450b8fe81c4ef96d", + "sha256:b3ec74cfef2d985e145baae90d9b1b32f85e1741b04cd967aaf9cfa84c1334f3", + "sha256:b51bfc348925e92a9bd9b2e48dad13431b57011fd1038f08316e6bf1df107d10", + "sha256:b9a4a8dd3dcf4cbd3165737358e4d7dfbd9d59902ad11e3b15eebb6393b0446e", + "sha256:ba3a8aaed13770e970b3df46980cb068d1c24af1a1968b7818b69af8c4347efb", + "sha256:c0524de3ff096e15fcbfe8f056fdb4ea0bf497d584454f344d59fce069d3e6e9", + "sha256:c0a120238dd71c68484f02562f6d446d736adcc6ca0993712289b102705a9a3a", + "sha256:cbbe5e739d45a52f3200a771c6d2c7acf89eb2524890a4a3aa1a7fa0695d2a47", + "sha256:ce8c50520f57ec57aa21a63ea4f325c7b657386b3f02ccaedeccf9ebe27686e1", + "sha256:cf30900aa1ba595312ae41978b95e256e419d8a823af79ce670835409fc02ad3", + "sha256:d25b937a5d9ffa857d41be042b4238dd61db888533b53bc76dc082cb5a15e914", + "sha256:d6cdecaedea1ea9e033d8adf6a0ab11107b49571bbb9737175444cea6eb72328", + "sha256:dec9de46a33cf2dd87a5254af095a409ea3bf952d85ad339751e7de6d962cde6", + "sha256:ebe7c9e67a2d15fa97b77ea6571ce5e1e1f6b0db71d1d5e96f8d2bf134303c1d", + "sha256:ee866acc0861caebb4f2ab79f0b94dbfbdbfadc19f82e6e9c93930f74e11d7a0", + "sha256:f6a09b360d67e589236a44f0c39218a8efba2593b6abdccc300a8862cffc2f94", + "sha256:fcc66e222cf4c719fe7722a403888b1f5e1682d1679bd780e2b26c18bb648cdc", + "sha256:fd6545d97c98a192c5ac995d21c894b581f1fd14cf389be90724d21808b657e2" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==7.4.3" + }, + "exceptiongroup": { + "hashes": [ + "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad", + "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16" + ], + "markers": "python_version < '3.11'", + "version": "==1.2.1" + }, + "fire": { + "hashes": [ + "sha256:54ec5b996ecdd3c0309c800324a0703d6da512241bc73b553db959d98de0aa66" + ], + "version": "==0.6.0" + }, + "flake8": { + "hashes": [ + "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132", + "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.1'", + "version": "==7.0.0" + }, + "flake8-black": { + "hashes": [ + "sha256:0dfbca3274777792a5bcb2af887a4cad72c72d0e86c94e08e3a3de151bb41c34", + "sha256:fe8ea2eca98d8a504f22040d9117347f6b367458366952862ac3586e7d4eeaca" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==0.3.6" + }, + "flake8-isort": { + "hashes": [ + "sha256:0fec4dc3a15aefbdbe4012e51d5531a2eb5fa8b981cdfbc882296a59b54ede12", + "sha256:c1f82f3cf06a80c13e1d09bfae460e9666255d5c780b859f19f8318d420370b3" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==6.1.1" + }, + "greenlet": { + "hashes": [ + "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67", + "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6", + "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257", + "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4", + "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676", + "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61", + "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc", + "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca", + "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7", + "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728", + "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305", + "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6", + "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379", + "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414", + "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04", + "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a", + "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf", + "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491", + "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559", + "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e", + "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274", + "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb", + "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b", + "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9", + "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b", + "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be", + "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506", + "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405", + "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113", + "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f", + "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5", + "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230", + "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d", + "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f", + "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a", + "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e", + "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61", + "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6", + "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d", + "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71", + "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22", + "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2", + "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3", + "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067", + "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc", + "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881", + "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3", + "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e", + "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac", + "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53", + "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0", + "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b", + "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83", + "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41", + "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c", + "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf", + "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da", + "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33" + ], + "markers": "platform_machine == 'aarch64' or (platform_machine == 'ppc64le' or (platform_machine == 'x86_64' or (platform_machine == 'amd64' or (platform_machine == 'AMD64' or (platform_machine == 'win32' or platform_machine == 'WIN32')))))", + "version": "==3.0.3" + }, + "hac-playwright": { + "file": "http://h2o-public-test-data.s3.amazonaws.com/e2e-testing/hac_playwright-1.38.0-py3-none-any.whl" + }, + "idna": { + "hashes": [ + "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", + "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0" + ], + "markers": "python_version >= '3.5'", + "version": "==3.7" + }, + "iniconfig": { + "hashes": [ + "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", + "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374" + ], + "markers": "python_version >= '3.7'", + "version": "==2.0.0" + }, + "isort": { + "hashes": [ + "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", + "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.0'", + "version": "==5.13.2" + }, + "jinja2": { + "hashes": [ + "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", + "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==3.1.3" + }, + "mako": { + "hashes": [ + "sha256:5324b88089a8978bf76d1629774fcc2f1c07b82acdf00f4c5dd8ceadfffc4b40", + "sha256:e16c01d9ab9c11f7290eef1cfefc093fb5a45ee4a3da09e2fec2e4d1bae54e73" + ], + "markers": "python_version >= '3.8'", + "version": "==1.3.3" + }, + "markupsafe": { + "hashes": [ + "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf", + "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff", + "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f", + "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3", + "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532", + "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f", + "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617", + "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df", + "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4", + "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906", + "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f", + "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4", + "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8", + "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371", + "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2", + "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465", + "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52", + "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6", + "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169", + "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad", + "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2", + "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0", + "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029", + "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f", + "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a", + "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced", + "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5", + "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c", + "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf", + "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9", + "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", + "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad", + "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3", + "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1", + "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46", + "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc", + "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a", + "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee", + "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900", + "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5", + "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea", + "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f", + "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5", + "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e", + "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a", + "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f", + "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50", + "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a", + "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b", + "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4", + "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff", + "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2", + "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46", + "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b", + "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf", + "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5", + "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5", + "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab", + "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd", + "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68" + ], + "markers": "python_version >= '3.7'", + "version": "==2.1.5" + }, + "mccabe": { + "hashes": [ + "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", + "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e" + ], + "markers": "python_version >= '3.6'", + "version": "==0.7.0" + }, + "mypy": { + "hashes": [ + "sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6", + "sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d", + "sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02", + "sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d", + "sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3", + "sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3", + "sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3", + "sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66", + "sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259", + "sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835", + "sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd", + "sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d", + "sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8", + "sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07", + "sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b", + "sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e", + "sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6", + "sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae", + "sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9", + "sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d", + "sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a", + "sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592", + "sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218", + "sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817", + "sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4", + "sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410", + "sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==1.8.0" + }, + "mypy-extensions": { + "hashes": [ + "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", + "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782" + ], + "markers": "python_version >= '3.5'", + "version": "==1.0.0" + }, + "packaging": { + "hashes": [ + "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5", + "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9" + ], + "markers": "python_version >= '3.7'", + "version": "==24.0" + }, + "parse": { + "hashes": [ + "sha256:09002ca350ad42e76629995f71f7b518670bcf93548bdde3684fd55d2be51975", + "sha256:76ddd5214255ae711db4c512be636151fbabaa948c6f30115aecc440422ca82c" + ], + "version": "==1.20.1" + }, + "parse-type": { + "hashes": [ + "sha256:06d39a8b70fde873eb2a131141a0e79bb34a432941fb3d66fad247abafc9766c", + "sha256:79b1f2497060d0928bc46016793f1fca1057c4aacdf15ef876aa48d75a73a355" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==0.6.2" + }, + "pathspec": { + "hashes": [ + "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", + "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712" + ], + "markers": "python_version >= '3.8'", + "version": "==0.12.1" + }, + "platformdirs": { + "hashes": [ + "sha256:031cd18d4ec63ec53e82dceaac0417d218a6863f7745dfcc9efe7793b7039bdf", + "sha256:17d5a1161b3fd67b390023cb2d3b026bbd40abde6fdb052dfbd3a29c3ba22ee1" + ], + "markers": "python_version >= '3.8'", + "version": "==4.2.1" + }, + "playwright": { + "hashes": [ + "sha256:1c46a7ed7702b9f97b57737132f25e2052ef2e9541c3613d896e92739d2ea4ee", + "sha256:22e4a49d61a20a21d6a4a90891d4d08df5091f3719272d7a31c4c7f0ff436683", + "sha256:324e317c6ddc919a01e98ed182a54c88c0b6e775e91aea2996ed320b436c0f27", + "sha256:33d6500d94c5e4608d3a74372d6f50ecbebca55dc55eaee3f70b21eaf02b17aa", + "sha256:801029161725bd9a8c1ea2d29125074f7e54bfa7b0ef85c6dfb667023a0702c8", + "sha256:ce5c2d2c49c97ea856129ac895dc7277df3c877db4a998340bd08efc3696e7fb", + "sha256:d0288c8932d7f14bc231e4a6761ecf76fff879d1601cfa3b6f6aefd544468911" + ], + "markers": "python_version >= '3.8'", + "version": "==1.38.0" + }, + "pluggy": { + "hashes": [ + "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", + "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669" + ], + "markers": "python_version >= '3.8'", + "version": "==1.5.0" + }, + "pycodestyle": { + "hashes": [ + "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f", + "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67" + ], + "markers": "python_version >= '3.8'", + "version": "==2.11.1" + }, + "pyee": { + "hashes": [ + "sha256:2770c4928abc721f46b705e6a72b0c59480c4a69c9a83ca0b00bb994f1ea4b32", + "sha256:9f066570130c554e9cc12de5a9d86f57c7ee47fece163bbdaa3e9c933cfbdfa5" + ], + "version": "==9.0.4" + }, + "pyflakes": { + "hashes": [ + "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f", + "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a" + ], + "markers": "python_version >= '3.8'", + "version": "==3.2.0" + }, + "pyotp": { + "hashes": [ + "sha256:346b6642e0dbdde3b4ff5a930b664ca82abfa116356ed48cc42c7d6590d36f63", + "sha256:81c2e5865b8ac55e825b0358e496e1d9387c811e85bb40e71a3b29b288963612" + ], + "markers": "python_version >= '3.7'", + "version": "==2.9.0" + }, + "pytest": { + "hashes": [ + "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c", + "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==8.0.0" + }, + "pytest-base-url": { + "hashes": [ + "sha256:02748589a54f9e63fcbe62301d6b0496da0d10231b753e950c63e03aee745d45", + "sha256:3ad15611778764d451927b2a53240c1a7a591b521ea44cebfe45849d2d2812e6" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==2.1.0" + }, + "pytest-bdd": { + "hashes": [ + "sha256:652d9c5324076ed9348f1c69b6512c00c581708ff17f063771ea703b62d3b956", + "sha256:faf115b9de793dc2341e898347f936c3766179a54a018c132796302b120918e5" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==7.0.1" + }, + "pytest-cov": { + "hashes": [ + "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6", + "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==4.1.0" + }, + "pytest-dependency": { + "hashes": [ + "sha256:934b0e6a39d95995062c193f7eaeed8a8ffa06ff1bcef4b62b0dc74a708bacc1" + ], + "index": "pypi", + "markers": "python_version >= '3.4'", + "version": "==0.6.0" + }, + "pytest-html": { + "hashes": [ + "sha256:70a01e8ae5800f4a074b56a4cb1025c8f4f9b038bba5fe31e3c98eb996686f07", + "sha256:c8152cea03bd4e9bee6d525573b67bbc6622967b72b9628dda0ea3e2a0b5dd71" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==4.1.1" + }, + "pytest-metadata": { + "hashes": [ + "sha256:c8e0844db684ee1c798cfa38908d20d67d0463ecb6137c72e91f418558dd5f4b", + "sha256:d2a29b0355fbc03f168aa96d41ff88b1a3b44a3b02acbe491801c98a048017c8" + ], + "markers": "python_version >= '3.8'", + "version": "==3.1.1" + }, + "pytest-playwright": { + "hashes": [ + "sha256:5488db4cc49028491c5130af0a2bb6b1d0b222a202217f6d14491d4c9aa67ff9", + "sha256:df306f3a60a8631a3cfde1b95a2ed5a89203a3408dfa1154de049ca7de87c90b" + ], + "markers": "python_version >= '3.8'", + "version": "==0.4.4" + }, + "python-slugify": { + "hashes": [ + "sha256:276540b79961052b66b7d116620b36518847f52d5fd9e3a70164fc8c50faa6b8", + "sha256:59202371d1d05b54a9e7720c5e038f928f45daaffe41dd10822f3907b937c856" + ], + "markers": "python_version >= '3.7'", + "version": "==8.0.4" + }, + "requests": { + "hashes": [ + "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f", + "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1" + ], + "markers": "python_version >= '3.7'", + "version": "==2.31.0" + }, + "setuptools": { + "hashes": [ + "sha256:6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987", + "sha256:c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32" + ], + "markers": "python_version >= '3.8'", + "version": "==69.5.1" + }, + "six": { + "hashes": [ + "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", + "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.16.0" + }, + "termcolor": { + "hashes": [ + "sha256:3afb05607b89aed0ffe25202399ee0867ad4d3cb4180d98aaf8eefa6a5f7d475", + "sha256:b5b08f68937f138fe92f6c089b99f1e2da0ae56c52b78bf7075fd95420fd9a5a" + ], + "markers": "python_version >= '3.7'", + "version": "==2.3.0" + }, + "text-unidecode": { + "hashes": [ + "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8", + "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93" + ], + "version": "==1.3" + }, + "tomli": { + "hashes": [ + "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc", + "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f" + ], + "markers": "python_version < '3.11'", + "version": "==2.0.1" + }, + "tomli-w": { + "hashes": [ + "sha256:9f2a07e8be30a0729e533ec968016807069991ae2fd921a78d42f429ae5f4463", + "sha256:f463434305e0336248cac9c2dc8076b707d8a12d019dd349f5c1e382dd1ae1b9" + ], + "markers": "python_version >= '3.7'", + "version": "==1.0.0" + }, + "types-pyyaml": { + "hashes": [ + "sha256:a9e0f0f88dc835739b0c1ca51ee90d04ca2a897a71af79de9aec5f38cb0a5342", + "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==6.0.12.20240311" + }, + "types-requests": { + "hashes": [ + "sha256:4428df33c5503945c74b3f42e82b181e86ec7b724620419a2966e2de604ce1a1", + "sha256:6216cdac377c6b9a040ac1c0404f7284bd13199c0e1bb235f4324627e8898cf5" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==2.31.0.20240406" + }, + "types-toml": { + "hashes": [ + "sha256:3d41501302972436a6b8b239c850b26689657e25281b48ff0ec06345b8830331", + "sha256:627b47775d25fa29977d9c70dc0cbab3f314f32c8d8d0c012f2ef5de7aaec05d" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==0.10.8.20240310" + }, + "typing-extensions": { + "hashes": [ + "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0", + "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a" + ], + "markers": "python_version >= '3.8'", + "version": "==4.11.0" + }, + "urllib3": { + "hashes": [ + "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d", + "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19" + ], + "markers": "python_version >= '3.8'", + "version": "==2.2.1" + }, + "wheel": { + "hashes": [ + "sha256:177f9c9b0d45c47873b619f5b650346d632cdc35fb5e4d25058e09c9e581433d", + "sha256:c45be39f7882c9d34243236f2d63cbd58039e360f85d0913425fbd7ceea617a8" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==0.42.0" + } + } +} diff --git a/README.md b/README.md index 8976315282b3b8774f40d5cd86895a5ee6bf6647..62c8176f987442a14255b68b2d42eeb65798b7f4 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,274 @@ ---- -title: H2OTest -emoji: 🐢 -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 4.28.3 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +

+

+ +

+

Welcome to H2O LLM Studio, a framework and no-code GUI designed for
+ fine-tuning state-of-the-art large language models (LLMs). +

+

+ +homelogs + +## Jump to + +- [With H2O LLM Studio, you can](#with-h2o-llm-studio-you-can) +- [Quickstart](#quickstart) +- [What's New](#whats-new) +- [Setup](#setup) + - [Recommended Install](#recommended-install) + - [Using requirements.txt](#using-requirementstxt) +- [Run H2O LLM Studio GUI](#run-h2o-llm-studio-gui) +- [Run H2O LLM Studio GUI using Docker from a nightly build](#run-h2o-llm-studio-gui-using-docker-from-a-nightly-build) +- [Run H2O LLM Studio GUI by building your own Docker image](#run-h2o-llm-studio-gui-by-building-your-own-docker-image) +- [Run H2O LLM Studio with command line interface (CLI)](#run-h2o-llm-studio-with-command-line-interface-cli) +- [Data format and example data](#data-format-and-example-data) +- [Training your model](#training-your-model) +- [Example: Run on OASST data via CLI](#example-run-on-oasst-data-via-cli) +- [Model checkpoints](#model-checkpoints) +- [Documentation](#documentation) +- [Contributing](#contributing) +- [License](#license) + +## With H2O LLM Studio, you can + +- easily and effectively fine-tune LLMs **without the need for any coding experience**. +- use a **graphic user interface (GUI)** specially designed for large language models. +- finetune any LLM using a large variety of hyperparameters. +- use recent finetuning techniques such as [Low-Rank Adaptation (LoRA)](https://arxiv.org/abs/2106.09685) and 8-bit model training with a low memory footprint. +- use Reinforcement Learning (RL) to finetune your model (experimental) +- use advanced evaluation metrics to judge generated answers by the model. +- track and compare your model performance visually. In addition, [Neptune](https://neptune.ai/) integration can be used. +- chat with your model and get instant feedback on your model performance. +- easily export your model to the [Hugging Face Hub](https://huggingface.co/) and share it with the community. + +## Quickstart + +For questions, discussing, or just hanging out, come and join our [Discord](https://discord.gg/WKhYMWcVbq)! + +We offer several ways of getting started quickly. + +Using CLI for fine-tuning LLMs: + +[![Kaggle](https://kaggle.com/static/images/open-in-kaggle.svg)](https://www.kaggle.com/code/ilu000/h2o-llm-studio-cli/) [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1soqfJjwDJwjjH-VzZYO_pUeLx5xY4N1K?usp=sharing) + +## What's New + +- [PR 592](https://github.com/h2oai/h2o-llmstudio/pull/599) Added `KTOPairLoss` for DPO modeling allowing to train models with simple preference data. Data currently needs to be manually prepared by randomly matching positive and negative examples as pairs. +- [PR 592](https://github.com/h2oai/h2o-llmstudio/pull/592) Starting to deprecate RLHF in favor of DPO/IPO optimization. Training is disabled, but old experiments are still viewable. RLHF will be fully removed in a future release. +- [PR 530](https://github.com/h2oai/h2o-llmstudio/pull/530) Introduced a new problem type for DPO/IPO optimization. This optimization technique can be used as an alternative to RLHF. +- [PR 288](https://github.com/h2oai/h2o-llmstudio/pull/288) Introduced Deepspeed for sharded training allowing to train larger models on machines with multiple GPUs. Requires NVLink. This feature replaces FSDP and offers more flexibility. Deepspeed requires a system installation of cudatoolkit and we recommend using version 11.8. See [Recommended Install](#recommended-install). +- [PR 449](https://github.com/h2oai/h2o-llmstudio/pull/449) New problem type for Causal Classification Modeling allows to train binary and multiclass models using LLMs. +- [PR 364](https://github.com/h2oai/h2o-llmstudio/pull/364) User secrets are now handled more securely and flexible. Support for handling secrets using the 'keyring' library was added. User settings are tried to be migrated automatically. + +Please note that due to current rapid development we cannot guarantee full backwards compatibility of new functionality. We thus recommend to pin the version of the framework to the one you used for your experiments. For resetting, please delete/backup your `data` and `output` folders. + +## Setup + +H2O LLM Studio requires a machine with Ubuntu 16.04+ and at least one recent Nvidia GPU with Nvidia drivers version >= 470.57.02. For larger models, we recommend at least 24GB of GPU memory. + +For more information about installation prerequisites, see the [Set up H2O LLM Studio](https://docs.h2o.ai/h2o-llmstudio/get-started/set-up-llm-studio#prerequisites) guide in the documentation. + +For a performance comparison of different GPUs, see the [H2O LLM Studio performance](https://h2oai.github.io/h2o-llmstudio/get-started/llm-studio-performance) guide in the documentation. + +### Recommended Install + +The recommended way to install H2O LLM Studio is using pipenv with Python 3.10. To install Python 3.10 on Ubuntu 16.04+, execute the following commands: + +#### System installs (Python 3.10) + +```bash +sudo add-apt-repository ppa:deadsnakes/ppa +sudo apt install python3.10 +sudo apt-get install python3.10-distutils +curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 +``` + +#### Installing NVIDIA Drivers (if required) + +If deploying on a 'bare metal' machine running Ubuntu, one may need to install the required Nvidia drivers and CUDA. The following commands show how to retrieve the latest drivers for a machine running Ubuntu 20.04 as an example. One can update the following based on their OS. + +```bash +wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin +sudo mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600 +wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda-repo-ubuntu2004-11-8-local_11.8.0-520.61.05-1_amd64.deb +sudo dpkg -i cuda-repo-ubuntu2004-11-8-local_11.8.0-520.61.05-1_amd64.deb +sudo cp /var/cuda-repo-ubuntu2004-11-8-local/cuda-*-keyring.gpg /usr/share/keyrings/ +sudo apt-get update +sudo apt-get -y install cuda +``` + +alternatively, one can install cudatoolkits in a cuda environment: + +```bash +conda create -n llmstudio python=3.10 +conda activate llmstudio +conda install -c "nvidia/label/cuda-11.8.0" cuda-toolkit +``` + +#### Create virtual environment (pipenv) + +The following command will create a virtual environment using pipenv and will install the dependencies using pipenv: + +```bash +make setup +``` + +If you are having troubles installing the flash_attn package, consider running + +```bash +make setup-no-flash +``` + +instead. This will install the dependencies without the flash_attn package. Note that this will disable the use of Flash Attention 2 and model training will be slower and consume more memory. + +### Using requirements.txt + +If you wish to use conda or another virtual environment, you can also install the dependencies using the requirements.txt file: + +```bash +pip install -r requirements.txt +pip install flash-attn==2.5.5 --no-build-isolation # optional for Flash Attention 2 +``` + +## Run H2O LLM Studio GUI + +You can start H2O LLM Studio using the following command: + +```bash +make llmstudio +``` + +This command will start the [H2O wave](https://github.com/h2oai/wave) server and app. +Navigate to (we recommend using Chrome) to access H2O LLM Studio and start fine-tuning your models! + +If you are running H2O LLM Studio with a custom environment other than Pipenv, you need to start the app as follows: + +```bash +H2O_WAVE_APP_ADDRESS=http://127.0.0.1:8756 \ +H2O_WAVE_MAX_REQUEST_SIZE=25MB \ +H2O_WAVE_NO_LOG=true \ +H2O_WAVE_PRIVATE_DIR="/download/@output/download" \ +wave run app +``` + +## Run H2O LLM Studio GUI using Docker from a nightly build + +Install Docker first by following instructions from [NVIDIA Containers](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). Make sure to have `nvidia-container-toolkit` installed on your machine as outlined in the instructions. + +H2O LLM Studio images are stored in the h2oai GCR vorvan container repository. + +```bash +mkdir -p `pwd`/data +mkdir -p `pwd`/output + +# make sure to pull latest image if you still have a prior version cached +docker pull gcr.io/vorvan/h2oai/h2o-llmstudio:nightly + +# run the container +docker run \ + --runtime=nvidia \ + --shm-size=64g \ + --init \ + --rm \ + -u `id -u`:`id -g` \ + -p 10101:10101 \ + -v `pwd`/data:/workspace/data \ + -v `pwd`/output:/workspace/output \ + -v ~/.cache:/home/llmstudio/.cache \ + gcr.io/vorvan/h2oai/h2o-llmstudio:nightly +``` + +Navigate to (we recommend using Chrome) to access H2O LLM Studio and start fine-tuning your models! + +(Note other helpful docker commands are `docker ps` and `docker kill`.) + +## Run H2O LLM Studio GUI by building your own Docker image + +```bash +docker build -t h2o-llmstudio . + +mkdir -p `pwd`/data +mkdir -p `pwd`/output + +docker run \ + --runtime=nvidia \ + --shm-size=64g \ + --init \ + --rm \ + -u `id -u`:`id -g` \ + -p 10101:10101 \ + -v `pwd`/data:/workspace/data \ + -v `pwd`/output:/workspace/output \ + -v ~/.cache:/home/llmstudio/.cache \ + h2o-llmstudio +``` + +Alternatively, you can run H2O LLM Studio GUI by using our self-hosted Docker image available [here](https://console.cloud.google.com/gcr/images/vorvan/global/h2oai/h2o-llmstudio). + +## Run H2O LLM Studio with command line interface (CLI) + +You can also use H2O LLM Studio with the command line interface (CLI) and specify the configuration .yaml file that contains all the experiment parameters. To finetune using H2O LLM Studio with CLI, activate the pipenv environment by running `make shell`, and then use the following command: + +```bash +python train.py -Y {path_to_config_yaml_file} +``` + +To run on multiple GPUs in DDP mode, run the following command: + +```bash +bash distributed_train.sh {NR_OF_GPUS} -Y {path_to_config_yaml_file} +``` + +By default, the framework will run on the first `k` GPUs. If you want to specify specific GPUs to run on, use the `CUDA_VISIBLE_DEVICES` environment variable before the command. + +To start an interactive chat with your trained model, use the following command: + +```bash +python prompt.py -e {experiment_name} +``` + +where `experiment_name` is the output folder of the experiment you want to chat with (see configuration). +The interactive chat will also work with model that were finetuned using the UI. + +To publish the model to Hugging Face, use the following command: + +```bash +make shell + +python publish_to_hugging_face.py -p {path_to_experiment} -d {device} -a {api_key} -u {user_id} -m {model_name} -s {safe_serialization} +``` + +`path_to_experiment` is the output folder of the experiment. +`device` is the target device for running the model, either 'cpu' or 'cuda:0'. Default is 'cuda:0'. +`api_key` is the Hugging Face API Key. If user logged in, it can be omitted. +`user_id` is the Hugging Face user ID. If user logged in, it can be omitted. +`model_name` is the name of the model to be published on Hugging Face. It can be omitted. +`safe_serialization` is a flag indicating whether safe serialization should be used. Default is True. + +## Data format and example data + +For details on the data format required when importing your data or example data that you can use to try out H2O LLM Studio, see [Data format](https://docs.h2o.ai/h2o-llmstudio/guide/datasets/data-connectors-format#data-format) in the H2O LLM Studio documentation. + +## Training your model + +With H2O LLM Studio, training your large language model is easy and intuitive. First, upload your dataset and then start training your model. Start by [creating an experiment](https://docs.h2o.ai/h2o-llmstudio/guide/experiments/create-an-experiment). You can then [monitor and manage your experiment](https://docs.h2o.ai/h2o-llmstudio/guide/experiments/view-an-experiment), [compare experiments](https://docs.h2o.ai/h2o-llmstudio/guide/experiments/compare-experiments), or [push the model to Hugging Face](https://docs.h2o.ai/h2o-llmstudio/guide/experiments/export-trained-model) to share it with the community. + +## Example: Run on OASST data via CLI + +As an example, you can run an experiment on the OASST data via CLI. For instructions, see [Run an experiment on the OASST data](https://docs.h2o.ai/h2o-llmstudio/guide/experiments/create-an-experiment#run-an-experiment-on-the-oasst-data-via-cli) guide in the H2O LLM Studio documentation. + +## Model checkpoints + +All open-source datasets and models are posted on [H2O.ai's Hugging Face page](https://huggingface.co/h2oai/) and our [H2OGPT](https://github.com/h2oai/h2ogpt) repository. + +## Documentation + +Detailed documentation and frequently asked questions (FAQs) for H2O LLM Studio can be found at . If you wish to contribute to the docs, navigate to the `/documentation` folder of this repo and refer to the [README.md](documentation/README.md) for more information. + +## Contributing + +We are happy to accept contributions to the H2O LLM Studio project. Please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file for more information. + +## License + +H2O LLM Studio is licensed under the Apache 2.0 license. Please see the [LICENSE](LICENSE) file for more information. diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..c26a4f3d940bcdad8310039a470f1c853b67a465 --- /dev/null +++ b/app.py @@ -0,0 +1,44 @@ +import logging +import os + +from llm_studio.app_utils.sections.chat_update import is_app_blocked_while_streaming +from llm_studio.src.utils.logging_utils import initialize_logging + +os.environ["MKL_THREADING_LAYER"] = "GNU" + +from h2o_wave import Q, app, copy_expando, main, ui # noqa: F401 + +from llm_studio.app_utils.handlers import handle +from llm_studio.app_utils.initializers import initialize_app, initialize_client +from llm_studio.app_utils.sections.common import heap_redact, interface + +logger = logging.getLogger(__name__) + + +def on_startup(): + initialize_logging() + logger.info("STARTING APP") + + +@app("/", on_startup=on_startup) +async def serve(q: Q): + """Serving function.""" + + # Chat is still being streamed but user clicks on another button. + # Wait until streaming has been completed + if await is_app_blocked_while_streaming(q): + return + + if not q.app.initialized: + await initialize_app(q) + + copy_expando(q.args, q.client) + + await initialize_client(q) + await handle(q) + + if not q.args["experiment/display/chat/chatbot"]: + await interface(q) + + await heap_redact(q) + await q.page.save() diff --git a/distributed_train.sh b/distributed_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..c15a703c6845b00b1e4c4199dc3f1a6a14f5a889 --- /dev/null +++ b/distributed_train.sh @@ -0,0 +1,4 @@ +#!/bin/bash +NUM_PROC=$1 +shift +torchrun --nproc_per_node=$NUM_PROC train.py "$@" diff --git a/documentation/.gitignore b/documentation/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a0023f54f27e067e063e5ab23dc6f160b5a7a4d5 --- /dev/null +++ b/documentation/.gitignore @@ -0,0 +1,17 @@ +node_modules +tmp + +# Generated files +.docusaurus +.cach-loader + +# Misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* diff --git a/documentation/README.md b/documentation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cd23f993847d2620a1f901654dc54a3ca31809ce --- /dev/null +++ b/documentation/README.md @@ -0,0 +1,98 @@ +# H2O LLM Studio Documentation + +- The LLM Studio documentation is built using [Makersaurus](https://github.com/h2oai/makersaurus/pkgs/npm/makersaurus) which is a very thin wrapper around Facebook's Docusaurus. +- The documentation is displayed at {{ https://docs.h2o.ai/h2o-llm-studio/ }} + +To view, edit, and cut a version of the documentation, the following is required: + +- Node.js version 16.14+ (you can check your version by running `node -v`). Use nvm to manage multiple Node versions installed on a single machine. + +- To install Node.js and npm with nvm in Mac or Ubuntu, run: `curl -o- +https://raw.githubusercontent.com/creationix/nvm/v0.33.0/install.sh | bash` and `nvm install node` + +- Makersaurus (the H2O themed documentation site) is hosted on H2O's Github npm registry. npm must authenticate to the registry before you can download Makersaurus. Follow the 3 steps below to authenticate the npm package. + + If you have already installed `@h2oai/ui-kit` or any other private `@h2oai`-prefixed npm package you can skip this step. + + **Step 1:** Create a "classic" [personal access token](https://github.com/settings/tokens) (PAT) on Github. Note that you only need to enable the `read:packages` scope for this token. + + **Step 2:** Add the PAT to your `~/.npmrc` file. Create this file if it doesn't exist yet. + ``` + @h2oai:registry=https://npm.pkg.github.com/ + //npm.pkg.github.com/:_authToken=YOUR-GENERATED-TOKEN + ``` + **Step 3:** Verify that it worked by running the following command: + ``` + npm whoami --registry=https://npm.pkg.github.com + ``` + If this command returns your username, you can proceed to the next step. If you get an error, you are not yet authenticated. You might find the [Github registry docs](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-npm-registry#authenticating-with-a-personal-access-token) helpful for debugging. + +### Documentation structure + + +``` +├── documentation +│ ├── docs +│ ├── tmp +│ ├── makersaurus.config.js +│ ├── sidebars.js +│ ├── package.json +│ ├── package-lock.json +``` + +- `documentation/docs`: Contains Markdown documentation files to edit the next documentation version. +Customize the order of the docs sidebar in `sidebars.js` +- `documentation/tmp`: Temporary files generated by Makersaurus. Do not edit these files. +- `documentation/makersaurus.config.js`: Makersaurus [config file](https://h2oai.github.io/makersaurus/api/config) +- `documentation/sidebars.js`: Sidebar configuration file +- `documentation/package.json`: npm configuration file +- `documentation/package-lock.json`: Generated by npm. Do not edit this file. + + +### Edit locally + +To setup the local `env` to view and edit the next or past documentation versions ([first, ensure you install +Node.js](#requirements)): + +1. Enter the documentation folder + +`cd documentation` + +2. Install dependencies + +`npm install` + +3. Start Makersaurus + +`npm start` + +- **Next documentation version**: To view your edits for the next documentation version, navigate to the provided URL. +Then, select **Next** on the **Versions** dropdown menu. +- **Debug** +- If you don't see anything after clicking **Next**, run the following command and try again: +`make setup-doc` +- Ensure that the following variable is set to `true` in the `makersaurus.config.js` file (located at `docs`): +`includeCurrentVersion` +- **Past documentation versions**: To view your edits for past documentation versions (located at +`docs/versioned_docs/`), navigate to the provided URL (for example, `http://localhost:3000/h2o-llm-studio/`). +Then, select a *version* (for example, v0.2.0) on the **Versions** dropdown menu. + +### Cut a version + +To cut a new version after making specific changes at `documentation/docs` to align with the next version of the application, consider the following instructions: + +1. Before a new version of the documentation is released, and right before we cut a version (`make version-doc`), change the following variable located in the `makersaurus.config.js` file to `false`: `includeCurrentVersion` +2. Run: `make version-doc` (for example, `make version-doc DOC_VERSION=v0.3.0`) +3. After the previous steps are executed and all generated files are pushed to the main branch, trigger the following +script in GitHub actions: `deploy-to-github-pages.yml` +4. After publishing the new documentation version, change the following variable located in the +`makersaurus.config.js` file to `true`: `includeCurrentVersion` +- This ensures the next doc version to edit will be visible while editing locally + + +## More information + +Use the [Makersaurus docs](https://h2oai.github.io/makersaurus/) to learn more about how to edit docs, deploy the site, set up versioning and more. + + + diff --git a/documentation/app_banner.png b/documentation/app_banner.png new file mode 100644 index 0000000000000000000000000000000000000000..e3190594401999cb1ced9ede4a093033b94e48f3 Binary files /dev/null and b/documentation/app_banner.png differ diff --git a/documentation/docs/concepts.md b/documentation/docs/concepts.md new file mode 100644 index 0000000000000000000000000000000000000000..c1e1436b52753aae1b277f0c4f8b68c07d760353 --- /dev/null +++ b/documentation/docs/concepts.md @@ -0,0 +1,58 @@ +--- +description: Learn about concepts around H2O LLM Studio. +--- +# Concepts + +H2O LLM Studio is based on a few key concepts and uses several key terms across its documentation. Each, in turn, is explained within the sections below. + +## LLM + +A Large Language Model (LLM) is a type of AI model that uses deep learning techniques and uses massive datasets to analyze and generate human-like language. For example, many AI chatbots or AI search engines are powered by LLMs. + +Generally speaking, LLMs can be characterized by the following parameters: +- size of the training dataset +- cost of training (computational power) +- size of the model (parameters) +- performance after training (or how well the model is able to respond to a particular question) + +## Parameters and hyperparameters + +In the context of an LLM, parameters and hyperparameters are a crucial part of determinining the model's performance and overall behaviour. + +- **Parameters:** The internal variables of the model that are learned during the training process. In the case of an LLM, parameters typically include the weights and biases associated with the neural network layers. The values of parameters directly influence the model's predictions and the quality of generated text. + +- **Hyperparameters:** The configuration choices that are set before training the model and are not learned directly from the data (e.g., number of epochs, batch size etc.). These choices impact the learning process and influence the model's overall behavior. Hyperparameters need to be tuned and optimized to achieve the best performance. H2O LLM Studio GUI shows tooltips next to each hyperparameter to explain what each hyperparameter is for. You can also see the following references for more details about hyperparameters in H2O LLM Studio. + - Dataset settings + - [Experiment settings](./guide/experiments/experiment-settings) + + +## LLM Backbone + +LLM Backbone is a key hyperparamter that determines the model's architecture. This option is the most important setting when it comes to experiment creation, as it sets the pretrained model weights. For more information about LLM Backbone, see [Experiment settings](guide/experiments/experiment-settings.md#llm-backbone). + + +## Generative AI + +Generative AI refers to AI models that can generate new content, such as images, videos, or text, that did not exist before. These models learn from large datasets and use this knowledge to create new content that is similar in style or content to the original dataset. + + +## Foundation model + +A particular adaptive model that has been trained on a large amount of data and starts to derive relationships between words and concepts. Foundation models are fine-tuned to become more specific and adapt to the related domain more efficiently. + +## Fine-tuning + +Fine-tuning refers to the process of taking a pre-trained language model and further training it on a specific task or domain to improve its performance on that task. It is an important technique used to adapt LLMs to specific tasks and domains. + +## LoRA (Low-Rank Adaptation) + +Low-Rank Adapation (LoRa) involves modifying the pre-trained model by adjusting its weights and biases to better fit the new task. This adaptation is done in a way that preserves the pre-trained weights from the original dataset while also adjusting for the new task's specific requirements. This method of training or fine-turning models consumes less memory. By using low rank adaptation, the pre-trained model can be quickly adapted to new tasks, without requiring a large amount of new training data. + +## Quantization + +Quantization is a technique used to reduce the size and memory requirements of a large language model without sacrificing its accuracy. This is done by converting the floating-point numbers used to represent the model's parameters to lower-precision numbers, such as half-floats or bfloat16. Quantization can be used to make language models more accessible to users with limited computing resources. + +## 8-bit model training with a low memory footprint + +8-bit model training with a low memory footprint refers to a fine-tuning technique that reduces the memory requirements for training neural networks by using 8-bit integers instead of 32-bit floating-point numbers. This approach can significantly reduce the amount of memory needed to store the model's parameters and can make it possible to train larger models on hardware with limited memory capacity. + diff --git a/documentation/docs/faqs.md b/documentation/docs/faqs.md new file mode 100644 index 0000000000000000000000000000000000000000..e797b16788c89eae8aa874676180413ceebfe66d --- /dev/null +++ b/documentation/docs/faqs.md @@ -0,0 +1,120 @@ +--- +description: Learn about frequently asked questions. +--- +import Icon from "@material-ui/core/Icon"; + +# FAQs + +The sections below provide answers to frequently asked questions. If you have additional questions, please send them to [cloud-feedback@h2o.ai](mailto:cloud-feedback@h2o.ai). + +--- + +### How much data is generally required to fine-tune a model? + +There is no clear answer. As a rule of thumb, 1000 to 50000 samples of conversational data should be enough. Quality and diversity is very important. Make sure to try training on a subsample of data using the "sample" parameter to see how big the impact of the dataset size is. Recent studies suggest that less data is needed for larger foundation models. + +--- + +### Are there any recommendations for which backbone to use? Are some backbones better for certain types of tasks? + +The majority of the LLM backbones are trained on a very similar corpus of data. The main difference is the size of the model and the number of parameters. Usually, the larger the model, the better they are. The larger models also take longer to train. It is recommended to start with the smallest model and then increase the size if the performance is not satisfactory. If you are looking to train for tasks that are not directly question answering in English, it is also a good idea to look for specialized LLM backbones. + +--- + +### What if my data is not in question-and-answer form and I just have documents? How can I fine-tune the LLM model? + +To train a chatbot style model, you need to convert your data into a question and answer format. + +If you really want to continue pretraining on your own data without teaching a question-answering style, prepare a dataset with all your data in a single column Dataframe. Make sure that the length of the text in each row is not too long. In the experiment setup, remove all additional tokens (e.g. `<|prompt|>`, `<|answer|>`, for Text Prompt Start and Text Answer Start respectively) and disable **Add Eos Token To Prompt** and **Add Eos Token To Answer**. Deselect everything in the Prompt Column. + +There are also other enterprise solutions from H2O.ai that may help you convert your data into a Q&A format. For more information, see [H2O.ai's Generative AI page](https://h2o.ai/) and this blogpost about [H2O LLM DataStudio: Streamlining Data Curation and Data Preparation for LLMs related tasks](https://blog.h2o.ai/blog/streamlining-data-preparation-for-fine-tuning-of-large-language-models/). + +--- + +### I encounter GPU out-of-memory issues. What can I change to be able to train large models? + +There are various parameters that can be tuned while keeping a specific LLM backbone fixed. It is advised to choose 4bit/8bit precision as a backbone dtype to be able to train models >=7B on a consumer type GPU. [LORA](concepts#lora-low-rank-adaptation) should be enabled. Besides that there are the usual parameters such as batch size and maximum sequence length that can be decreased to save GPU memory (please ensure that your prompt+answer text is not truncated too much by checking the train data insights). + +--- + +### When does the model stop the fine-tuning process? + +The number of epochs are set by the user. + +--- + +### How many records are recommended for fine-tuning? + +An order of 100K records is recommended for fine-tuning. + +--- + +### Where does H2O LLM Studio store its data? + +By default, H2O LLM Studio stores its data in two folders located in the root directory in the app. The folders are named `data` and `output`. Here is the breakdown of the data storage structure: +- `data/dbs`: This folder contains the user database used within the app. +- `data/user`: This folder is where uploaded datasets from the user are stored. +- `output/user`: All experiments conducted in H2O LLM Studio are stored in this folder. For each experiment, a separate folder is created within the `output/user` directory, which contains all the relevant data associated with that particular experiment. +- `output/download`: Utility folder that is used to store data the user downloads within the app. + +It is possible to change the default working directory of H2O LLM Studio by setting the `H2O_LLM_STUDIO_WORKDIR` environment variable. By default, the working directory is set to the root directory of the app. + +---- + +### How can I update H2O LLM Studio? + +To update H2O LLM Studio, you have two options: + +1. Using the latest main branch: Execute the commands `git checkout main` and `git pull` to obtain the latest updates from the main branch. +2. Using the latest release tag: Execute the commands `git pull` and `git checkout v0.0.3` (replace 'v0.0.3' with the desired version number) to switch to the latest release branch. + +The update process does not remove or erase any existing data folders or experiment records. This means that all your old data, including the user database, uploaded datasets, and experiment results, will still be available to you within the updated version of H2O LLM Studio. + +Before updating, it is recommended to run the `git rev-parse --short HEAD` command and save the commit hash. +This will allow you to revert to your existing version if needed. + +--- + +### Once I have the [LoRA](guide/experiments/experiment-settings.md#lora), what is the recommended way of utilizing it with the base model? + +You can also export the LoRA weights. You may add them to the files to be exported [here](https://github.com/h2oai/h2o-llmstudio/blob/main/llm_studio/app_utils/sections/experiment.py#L1552). Before exporting, the LoRA weights are merged back into the original LLM backbone weights to make downstream tasks easier. You do not need to have PEFT, or anything else for your deployment. + +--- + +### How to use H2O LLM Studio in Windows? + +Use WSL 2 on Windows + +--- + +### How can I easily fine-tune a large language model (LLM) using the command-line interface (CLI) of H2O LLM Studio when I have limited GPU memory? + +If you have limited GPU memory but still want to fine-tune a large language model using H2O LLM Studio's CLI, there are alternative methods you can use to get started quickly. + +- [Using Kaggle kernels](https://www.kaggle.com/code/ilu000/h2o-llm-studio-cli/) +- [Using Google Colab](https://colab.research.google.com/drive/1soqfJjwDJwjjH-VzZYO_pUeLx5xY4N1K?usp=sharing) + +--- + +### Can I run a validation metric on a model post-training, optionally on a different validation dataset? + +Yes. + +1. After you have finished creating an experiment, click on the more_vert Kebab menu of the relevant experiment and select **New Experiment**. + +2. Enable the **Use previous experiments weight** setting found at the top of the screen. + This will now load the previous weights, and you can now change eval dataset, metric, and anything else as you see fit. To only do evaluation without any retraining, set the **Epochs** to 0. + +---- + +### What are the hardware/infrastructure sizing recommendations for H2O LLM Studio? + +When it comes to hardware requirements, it is important to note that the primary demand centers around the GPU and its associated VRAM. In terms of CPUs, most modern choices should suffice as NLP tasks typically do not heavily stress CPU performance. As for RAM, it's advisable to have a minimum of 128GB, with a stronger recommendation of 256GB or more, particularly when dealing with substantial model weights that must be accommodated in the CPU RAM. + +---- + + + + + + diff --git a/documentation/docs/get-started/core-features.md b/documentation/docs/get-started/core-features.md new file mode 100644 index 0000000000000000000000000000000000000000..45256d3a6036b64585358ee37fd201ac4a43959a --- /dev/null +++ b/documentation/docs/get-started/core-features.md @@ -0,0 +1,34 @@ +--- +description: Learn about the core features of LLM Studio. +--- +# Core features + +## No-code fine-tuning + +NLP practioners can easily fine-tune models without the need for code expertise. The user interface, which is specifically designed for LLMs, allows users to upload large datasets easily and configure [hyperparameters](../concepts#parameters-and-hyperparameters) to fine-tune the model. + +## Highly customizable (wide range of hyperparameters) + +H2O LLM Studio supports a wide variety of hyperparameters that can be used to fine-tune the model and supports the following fine-tuning techniques to enable advanced customization: + +- [Low-Rank Adaptation (LoRA)](../concepts#lora-low-rank-adaptation) +- [8-bit model training with a low memory footprint](../concepts#8-bit-model-training-with-a-low-memory-footprint) + +## Advanced evaluation metrics and experiment comparison + +Advanced evaluation metrics in H2O LLM Studio can be used to validate the answers generated by the LLM. This helps to make data-driven decisions about the model. It also offers visual tracking and comparison of experiment performance, making it easy to analyze and compare different fine-tuned models.You can also visualize how different parameters affect the model performance, and optionally use the [Neptune](https://neptune.ai/) integraton to track and log your experiments. + +## Instant publishing models + +H2O LLM Studio enables easy model sharing with the community by allowing you to export the model to the [Hugging Face Hub](https://huggingface.co/h2oai) with a single click. + +## Instant feedback on model performance + +Additionally, H2O LLM Studio lets you chat with the fine-tuned model and recieve instant feedback about model performance. + + + + + + + diff --git a/documentation/docs/get-started/llm-studio-flow.md b/documentation/docs/get-started/llm-studio-flow.md new file mode 100644 index 0000000000000000000000000000000000000000..5b7c2336e30733369a3e93366b89f0d72fc777d2 --- /dev/null +++ b/documentation/docs/get-started/llm-studio-flow.md @@ -0,0 +1,48 @@ +--- +description: The flow of creating and fine-tuning large language models using H2O LLM Studio. +--- +# Model flow + +The flow of creating and fine-tuning large language models using H2O LLM Studio can be summarized in the following sequential steps: + +- [Step 1: Import a dataset](#step-1-import-a-dataset) +- [Step 2: Create an experiment](#step-2-create-an-experiment) +- [Step 3: Monitor an experiment](#step-3-monitor-an-experiment) +- [Step 4: Compare experiments](#step-4-compare-experiments) +- [Step 5: Export a model to Hugging Face Hub](#step-5-export-a-model-to-hugging-face-hub) + +In the below sections, each step above, in turn, is summarized. + +## Step 1: Import a dataset + +As the first step in the experiment flow, prep your data and import your dataset to H2O LLM Studio. + +- To learn about supported data connectors and data format, see [Supported data connectors and format](../guide/datasets/data-connectors-format). +- To learn about how to import a dataset to H2O LLM Studio, see [Import a dataset](../guide/datasets/import-dataset). +- To learn about reviewing and editing a dataset, see [View and manage dataset](../guide/datasets/view-dataset.md). + +## Step 2: Create an experiment + +As the second step in the experiment flow, create an experiment using the imported dataset. H2O LLM Studio offers several hyperparameter settings that you can adjust for your experiment model. To ensure that your training process is effective, you may need to specify the [hyperparameters](../concepts#parameters-and-hyperparameters) like learning rate, batch size, and the number of epochs. H2O LLM Studio provides an overview of all the parameters you’ll need to specify for your experiment. + +- To learn about creating a new experiment, see [Create an experiment](../guide/experiments/create-an-experiment.md). +- To learn about the settings available for creating an experiment, see [Experiment settings](../guide/experiments/experiment-settings.md). + +## Step 3: Monitor an experiment + +As the third step in the experiment flow, monitor the launched experiment. H2O LLM Studio allows you to inspect your experiment (model) during and after model training. Simple interactive graphs in H2O LLM Studio allow you to understand the impact of selected hyperparameter values during and after model training. You can then adjust the [hyperparameters](../concepts#parameters-and-hyperparameters) to further optimize model performance. + +To learn about viewing and monitoring an experiment, see [View and manage experiments](../guide/experiments/view-an-experiment.md). + +## Step 4: Compare experiments + +The H2O LLM studio provides a useful feature that allows comparing various experiments and analyzing how different model parameters affect model performance. This feature is a powerful tool for fine-tuning your machine-learning models and ensuring they meet your desired performance metrics. + +To learn about comparing multiple experiments, see [Compare experiments](../guide/experiments/compare-experiments.md). + +## Step 5: Export a model to Hugging Face Hub + +As the final step in the experiment flow, you can export the fine-tuned model to Hugging Face with a single click. + +To learn about exporting a trained model to Hugging Face Hub, see, [Export trained model to Hugging Face](../guide/experiments/export-trained-model.md). + diff --git a/documentation/docs/get-started/llm-studio-home-screen.png b/documentation/docs/get-started/llm-studio-home-screen.png new file mode 100644 index 0000000000000000000000000000000000000000..bad3807fb2fdc7ab54b447cc221133f2b4b44458 Binary files /dev/null and b/documentation/docs/get-started/llm-studio-home-screen.png differ diff --git a/documentation/docs/get-started/llm-studio-performance.md b/documentation/docs/get-started/llm-studio-performance.md new file mode 100644 index 0000000000000000000000000000000000000000..c586b5ce72d0008f05bc6b58e42d40202a2466fb --- /dev/null +++ b/documentation/docs/get-started/llm-studio-performance.md @@ -0,0 +1,170 @@ +--- +description: Setting up and runnning H2O LLM Studio requires the following minimal prerequisites. This page lists out the speed and performance metrics of H2O LLM Studio based on different hardware setups. +--- +# H2O LLM Studio performance + +Setting up and runnning H2O LLM Studio requires the following minimal [prerequisites](set-up-llm-studio.md#prerequisites). This page lists out the speed and performance metrics of H2O LLM Studio based on different hardware setups. + +The following metrics were measured. + +- **Hardware setup:** The type and number of computing devices used to train the model. +- **LLM backbone:** The underlying architecture of the language model. For more information, see [LLM backbone](concepts.md#llm-backbone). +- **Quantization:** A technique used to reduce the size and memory requirements of the model. For more information, see [Quantization](concepts.md#quantization). +- **Train**: The amount of time it took to train the model in hours and minutes. +- **Validation:** The amount of time it took to validate the mode in hours and minutes. + +| Hardware setup | LLM backbone | Quantization | Train (hh:mm:ss)| Validation (hh:mm:ss) | +|---|---|---|---|---| +| 8xA10G | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 11:35 | 3:32 | +| 4xA10G | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 21:13 | 06:35 | +| 2xA10G | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 37:04 | 12:21 | +| 1xA10G | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 1:25:29 | 15:50 | +| 8xA10G | h2oai/h2ogpt-4096-llama2-7b | nf4 | 14:26 | 06:13 | +| 4xA10G | h2oai/h2ogpt-4096-llama2-7b | nf4 | 26:55 | 11:59 | +| 2xA10G | h2oai/h2ogpt-4096-llama2-7b | nf4 | 48:24 | 23:37 | +| 1xA10G | h2oai/h2ogpt-4096-llama2-7b | nf4 | 1:26:59 | 42:17 | +| 8xA10G | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | OOM | OOM | +| 4xA10G | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | OOM | OOM | +| 2xA10G | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | OOM | OOM | +| 1xA10G | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | OOM | OOM | +| 8xA10G | h2oai/h2ogpt-4096-llama2-13b | nf4 | 25:07 | 10:58 | +| 4xA10G | h2oai/h2ogpt-4096-llama2-13b | nf4 | 48:43 | 21:25 | +| 2xA10G | h2oai/h2ogpt-4096-llama2-13b | nf4 | 1:30:45 | 42:06 | +| 1xA10G | h2oai/h2ogpt-4096-llama2-13b | nf4 | 2:44:36 | 1:14:20 | +| 8xA10G | h2oai/h2ogpt-4096-llama2-70b | nf4 | OOM | OOM | +| 4xA10G | h2oai/h2ogpt-4096-llama2-70b | nf4 | OOM | OOM | +| 2xA10G | h2oai/h2ogpt-4096-llama2-70b | nf4 | OOM | OOM | +| 1xA10G | h2oai/h2ogpt-4096-llama2-70b | nf4 | OOM | OOM | +|---|---|---|---|---| +| 4xA100 80GB | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 7:04 | 3:55 | +| 2xA100 80GB | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 13:14 | 7:23 | +| 1xA100 80GB | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 23:36 | 13:25 | +| 4xA100 80GB | h2oai/h2ogpt-4096-llama2-7b | nf4 | 9:44 | 6:30 | +| 2xA100 80GB | h2oai/h2ogpt-4096-llama2-7b | nf4 | 18:34 | 12:16 | +| 1xA100 80GB | h2oai/h2ogpt-4096-llama2-7b | nf4 | 34:06 | 21:51 | +| 4xA100 80GB | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | 11:46 | 5:56 | +| 2xA100 80GB | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | 21:54 | 11:17 | +| 1xA100 80GB | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | 39:10 | 18:55 | +| 4xA100 80GB | h2oai/h2ogpt-4096-llama2-13b | nf4 | 16:51 | 10:35 | +| 2xA100 80GB | h2oai/h2ogpt-4096-llama2-13b | nf4 | 32:05 | 21:00 | +| 1xA100 80GB | h2oai/h2ogpt-4096-llama2-13b | nf4 | 59:11 | 36:53 | +| 4xA100 80GB | h2oai/h2ogpt-4096-llama2-70b | nf4 | 1:13:33 | 46:02 | +| 2xA100 80GB | h2oai/h2ogpt-4096-llama2-70b | nf4 | 2:20:44 | 1:33:42 | +| 1xA100 80GB | h2oai/h2ogpt-4096-llama2-70b | nf4 | 4:23:57 | 2:44:51 | + +:::info +The runtimes were gathered using the default parameters. + +
+Expand to see the default parameters + +``` +architecture: + backbone_dtype: int4 + force_embedding_gradients: false + gradient_checkpointing: true + intermediate_dropout: 0.0 + pretrained: true + pretrained_weights: '' +augmentation: + random_parent_probability: 0.0 + skip_parent_probability: 0.0 + token_mask_probability: 0.0 +dataset: + add_eos_token_to_answer: true + add_eos_token_to_prompt: true + add_eos_token_to_system: true + answer_column: output + chatbot_author: H2O.ai + chatbot_name: h2oGPT + data_sample: 1.0 + data_sample_choice: + - Train + - Validation + limit_chained_samples: false + mask_prompt_labels: true + parent_id_column: None + personalize: false + prompt_column: + - instruction + system_column: None + text_answer_separator: <|answer|> + text_prompt_start: <|prompt|> + text_system_start: <|system|> + train_dataframe: /data/user/oasst/train_full.pq + validation_dataframe: None + validation_size: 0.01 + validation_strategy: automatic +environment: + compile_model: false + find_unused_parameters: false + gpus: + - '0' + - '1' + - '2' + - '3' + - '4' + - '5' + - '6' + - '7' + huggingface_branch: main + mixed_precision: true + number_of_workers: 8 + seed: -1 + trust_remote_code: true + use_fsdp: false +experiment_name: default-8-a10g +llm_backbone: h2oai/h2ogpt-4096-llama2-7b +logging: + logger: None + neptune_project: '' +output_directory: /output/... +prediction: + batch_size_inference: 0 + do_sample: false + max_length_inference: 256 + metric: BLEU + metric_gpt_model: gpt-3.5-turbo-0301 + metric_gpt_template: general + min_length_inference: 2 + num_beams: 1 + num_history: 4 + repetition_penalty: 1.2 + stop_tokens: '' + temperature: 0.3 + top_k: 0 + top_p: 1.0 +problem_type: text_causal_language_modeling +tokenizer: + add_prompt_answer_tokens: false + max_length: 512 + max_length_answer: 256 + max_length_prompt: 256 + padding_quantile: 1.0 + use_fast: true +training: + batch_size: 2 + differential_learning_rate: 1.0e-05 + differential_learning_rate_layers: [] + drop_last_batch: true + epochs: 1 + evaluate_before_training: false + evaluation_epochs: 1.0 + grad_accumulation: 1 + gradient_clip: 0.0 + learning_rate: 0.0001 + lora: true + lora_alpha: 16 + lora_dropout: 0.05 + lora_r: 4 + lora_target_modules: '' + loss_function: TokenAveragedCrossEntropy + optimizer: AdamW + save_best_checkpoint: false + schedule: Cosine + train_validation_data: false + warmup_epochs: 0.0 + weight_decay: 0.0 +``` +
+::: \ No newline at end of file diff --git a/documentation/docs/get-started/set-up-llm-studio.md b/documentation/docs/get-started/set-up-llm-studio.md new file mode 100644 index 0000000000000000000000000000000000000000..2513b266b447efdc794ed27060d7d782fb909eac --- /dev/null +++ b/documentation/docs/get-started/set-up-llm-studio.md @@ -0,0 +1,326 @@ +--- +description: Learn how to set up LLM Studio. +--- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +# Set up H2O LLM Studio + +## Prerequisites + +H2O LLM Studio requires the following minimum requirements: + +- A machine with Ubuntu 16.04+ with atleast one recent Nvidia GPU +- Have at least 128GB+ of system RAM. Larger models and complex tasks may require 256GB+ or more. +- Nvidia drivers v470.57.02 or a later version +- Access to the following URLs: + - developer.download.nvidia.com + - pypi.org + - huggingface.co + - download.pytorch.org + - cdn-lfs.huggingface.co + +:::info Notes + +- Atleast 24GB of GPU memory is recommended for larger models. +- For more information on performance benchmarks based on the hardware setup, see [H2O LLM Studio performance](llm-studio-performance.md). +- The required URLs are accessible by default when you start a GCP instance, however, if you have network rules or custom firewalls in place, it is recommended to confirm that the URLs are accessible before running `make setup`. + ::: + +## Installation + +:::note Installation methods + + + +

+ The recommended way to install H2O LLM Studio is using pipenv with Python + 3.10. To install Python 3.10 on Ubuntu 16.04+, execute the following + commands. +

+

+ System installs (Python 3.10) +

+
+      
+        sudo add-apt-repository ppa:deadsnakes/ppa 

+ sudo apt install python3.10

+ sudo apt-get install python3.10-distutils

+ curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 +
+
+

+ Install NVIDIA drivers (if required) +

+ If you are deploying on a 'bare metal' machine running Ubuntu, you may need + to install the required Nvidia drivers and CUDA. The following commands show + how to retrieve the latest drivers for a machine running Ubuntu 20.04 as an + example. You can update the following based on your respective operating system. +

+
+      
+        wget
+        https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin{" "}
+        

+ sudo mv cuda-ubuntu2004.pin + /etc/apt/preferences.d/cuda-repository-pin-600

+ wget + https://developer.download.nvidia.com/compute/cuda/11.4.3/local_installers/cuda-repo-ubuntu2004-11-4-local_11.4.3-470.82.01-1_amd64.deb{" "} +

+ sudo dpkg -i + cuda-repo-ubuntu2004-11-4-local_11.4.3-470.82.01-1_amd64.deb

+ sudo apt-key add /var/cuda-repo-ubuntu2004-11-4-local/7fa2af80.pub

+ sudo apt-get -y update

+ sudo apt-get -y install cuda +
+
+

+ Create virtual environment (pipenv) +

+ The following command creates a virtual environment using pipenv and will install + the dependencies using pipenv. +

+        make setup
+      
+

+
+ +

+ If you wish to use conda or another virtual environment, you can also + install the dependencies using the requirements.txt{" "} + file.{" "} +

+
+      pip install -r requirements.txt
+    
+
+ +

+ Follow the steps below to install H2O LLM Studio on a Windows machine + using Windows Subsystem for Linux{" "} + WSL2 +

+

+ 1. Download the{" "} + + latest nvidia driver + {" "} + for Windows.{" "} +

+

+ 2. Open PowerShell or a Windows Command Prompt window in administrator + mode.{" "} +

+

+ 3. Run the following command to confirm that the driver is installed + properly and see the driver version. +

+        nvidia-smi
+      
+

+

+ 4. Run the following command to install WSL2. +

+        wsl --install
+      
+

+

5. Launch the WSL2 Ubuntu installation.

+

+ 6. Install the{" "} + + WSL2 Nvidia Cuda Drivers + + . +

+        
+          wget
+          https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin{" "}
+          

+ sudo mv cuda-ubuntu2004.pin + /etc/apt/preferences.d/cuda-repository-pin-600

+ wget + https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda-repo-wsl-ubuntu-12-2-local_12.2.0-1_amd64.deb{" "} +

+ sudo dpkg -i cuda-repo-wsl-ubuntu-12-2-local_12.2.0-1_amd64.deb

+ sudo cp /var/cuda-repo-wsl-ubuntu-12-2-local/cuda-*-keyring.gpg + /usr/share/keyrings/

+ sudo apt-get update

+ sudo apt-get -y install cuda +
+
+

+

+ 7. Set up the required python system installs (Python 3.10). +

+        
+          sudo add-apt-repository ppa:deadsnakes/ppa 

+ sudo apt install python3.10

+ sudo apt-get install python3.10-distutils

+ curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 +
+
+

+

+ 8. Create the virtual environment. +

+        
+          sudo apt install -y python3.10-venv

+ python3 -m venv llmstudio

+ source llmstudio/bin/activate

+
+
+

+

+ 9.Clone the H2O LLM Studio repository locally. +

+        
+          git clone https://github.com/h2oai/h2o-llmstudio.git

+ cd h2o-llmstudio +
+
+

+

+ 10. Install H2O LLM Studio using the `requirements.txt`. +

+        pip install -r requirements.txt
+      
+

+

+ 11. Run the H2O LLM Studio application. +

+        
+          H2O_WAVE_MAX_REQUEST_SIZE=25MB \ 

+ H2O_WAVE_NO_LOG=True \

+ H2O_WAVE_PRIVATE_DIR="/download/@output/download" \

+ wave run app +
+
+

+

+ This will start the H2O Wave server and the H2O LLM Studio app. Navigate + to http://localhost:10101/ (we recommend using Chrome) to access + H2O LLM Studio and start fine-tuning your models. +

+
+
+::: + +## Install custom package + +If required, you can install additional Python packages into your environment. This can be done using pip after activating your virtual environment via `make shell`. For example, to install flash-attention, you would use the following commands: + +```bash +make shell +pip install flash-attn --no-build-isolation +pip install git+https://github.com/HazyResearch/flash-attention.git#subdirectory=csrc/rotary +``` + +Alternatively, you can also directly install the custom package by running the following command. + +```bash +pipenv install package_name +``` + +## Run H2O LLM Studio + +There are several ways to run H2O LLM Studio depending on your requirements. + +1. [Run H2O LLM Studio GUI](#run-h2o-llm-studio-gui) +2. [Run using Docker from a nightly build](#run-using-docker-from-a-nightly-build) +3. [Run by building your own Docker image](#run-by-building-your-own-docker-image) +4. [Run with the CLI (command-line interface)](#run-with-command-line-interface-cli) + +### Run H2O LLM Studio GUI + +Run the following command to start the H2O LLM Studio. + +```sh +make llmstudio +``` + +This will start the H2O Wave server and the H2O LLM Studio app. Navigate to [http://localhost:10101/](http://localhost:10101/) (we recommend using Chrome) to access H2O LLM Studio and start fine-tuning your models. + +![home-screen](llm-studio-home-screen.png) + +If you are running H2O LLM Studio with a custom environment other than Pipenv, start the app as follows: + +```sh +H2O_WAVE_APP_ADDRESS=http://127.0.0.1:8756 \ +H2O_WAVE_MAX_REQUEST_SIZE=25MB \ +H2O_WAVE_NO_LOG=True \ +H2O_WAVE_PRIVATE_DIR="/download/@output/download" \ +wave run app +``` + +### Run using Docker from a nightly build + +First, install Docker by following the instructions from the [NVIDIA Container Installation Guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). H2O LLM Studio images are stored in the `h2oai GCR vorvan` container repository. + +```sh +mkdir -p `pwd`/data +mkdir -p `pwd`/output +docker run \ + --runtime=nvidia \ + --shm-size=64g \ + --init \ + --rm \ + -p 10101:10101 \ + -v `pwd`/data:/workspace/data \ + -v `pwd`/output:/workspace/output \ + -v ~/.cache:/home/llmstudio/.cache \ + gcr.io/vorvan/h2oai/h2o-llmstudio:nightly +``` + +Navigate to [http://localhost:10101/](http://localhost:10101/) (we recommend using Chrome) to access H2O LLM Studio and start fine-tuning your models. + +:::info +Other helpful docker commands are `docker ps` and `docker kill`. +::: + +### Run by building your own Docker image + +```sh +docker build -t h2o-llmstudio . +docker run \ + --runtime=nvidia \ + --shm-size=64g \ + --init \ + --rm \ + -p 10101:10101 \ + -v `pwd`/data:/workspace/data \ + -v `pwd`/output:/workspace/output \ + -v ~/.cache:/home/llmstudio/.cache \ + h2o-llmstudio +``` + +### Run with command line interface (CLI) + +You can also use H2O LLM Studio with the command line interface (CLI) and specify the configuration .yaml file that contains all the experiment parameters. To finetune using H2O LLM Studio with CLI, activate the pipenv environment by running `make shell`. + +To specify the path to the configuration file that contains the experiment parameters, run: + +```sh +python train.py -Y {path_to_config_yaml_file} +``` + +To run on multiple GPUs in DDP mode, run: + +```sh +bash distributed_train.sh {NR_OF_GPUS} -Y {path_to_config_yaml_file} +``` + +:::info +By default, the framework will run on the first `k` GPUs. If you want to specify specific GPUs to run on, use the `CUDA_VISIBLE_DEVICES` environment variable before the command. +::: + +To start an interactive chat with your trained model, run: + +```sh +python prompt.py -e {experiment_name} +``` + +`experiment_name` is the output folder of the experiment you want to chat with. The interactive chat will also work with models that were fine-tuned using the GUI. diff --git a/documentation/docs/get-started/videos.md b/documentation/docs/get-started/videos.md new file mode 100644 index 0000000000000000000000000000000000000000..7a97e74d3864c37d5bf1f2dc5e22db252c8743ab --- /dev/null +++ b/documentation/docs/get-started/videos.md @@ -0,0 +1,49 @@ +--- +description: Learn from a collection of videos about LLM Studio. +--- +import ReactPlayer from 'react-player' + + +# Videos + +## Discovering the Potential of LLMs + + + + +:::info Note + In this video, Andreea Turcu delves in-depth into the world of language models, showcasing how users can use H2O.ai's LLM Studio to their full advantage. +::: + +--- + +## The Fine Art of Fine-Tuning Large Language Models + + + + +:::info Note + In this video, Pascal Pfeiffer, Principal Data Scientist at H2O.ai and Kaggle Grandmaster, announces the release of H2O LLM Studio and talks about fine-tuning LLMs using H2O LLM Studio at H2O World India 2023. +::: + +--- + +## Basic introduction to H2O LLM Studio + + + + +:::info Note + In this video, Avkash Chauhan, founder of Prodramp Inc, gives a basic introduction about H2O LLM Studio. +::: + +---- + +## LLM Fine-Tuning, Falcon 40b, and the State of Open-Source + + + + +:::info Note + In this video, Pascal Pfeiffer, the Principal Data Scientist at h2o.ai is interviewed about LLM fine-tuning, being a Kaggle Grandmaster, H2O.ai, Falcon 40b, the state of open-source, and more. +::: \ No newline at end of file diff --git a/documentation/docs/get-started/what-is-h2o-llm-studio.md b/documentation/docs/get-started/what-is-h2o-llm-studio.md new file mode 100644 index 0000000000000000000000000000000000000000..d3423b8cf5825da9ca005ec0a10f6d2247fc8eba --- /dev/null +++ b/documentation/docs/get-started/what-is-h2o-llm-studio.md @@ -0,0 +1,16 @@ +--- +description: H2O LLM Studio is an open-source, no-code LLM graphical user interface (GUI) designed for fine-tuning state-of-the-art large language models. +--- +# What is H2O LLM Studio? + +H2O LLM Studio is an open-source, no-code [LLM](../concepts#llm) graphical user interface (GUI) designed for fine-tuning state-of-the-art large language models. + +[Fine-tuning](../concepts#fine-tuning) a pretrained language model requires coding expertise and extensive knowledge about the model and its [hyperparameters](../concepts#parameters-and-hyperparameters), however H2O LLM Studio enables NLP practioners to fine-tune their LLMs easily with no need for coding and better flexibility over customization. + +H2O LLM Studio also lets you chat with the fine-tuned model and recieve instant feedback about model performance. + +## Who is H2O LLM Studio for? + +H2O LLM Studio is a free and open-source tool that is designed for anyone who wants to fine-tune their own language models. It is designed to be easy to use and accessible to everyone regardless of technical expertise. + +NLP practioners and data scientists in particular may find it useful to easily and effectively create and fine-tune large language models. \ No newline at end of file diff --git a/documentation/docs/guide/datasets/configure-dataset.png b/documentation/docs/guide/datasets/configure-dataset.png new file mode 100644 index 0000000000000000000000000000000000000000..1e1a4d9d8b622d08a8646be9f92f045ed870568b Binary files /dev/null and b/documentation/docs/guide/datasets/configure-dataset.png differ diff --git a/documentation/docs/guide/datasets/data-connectors-format.md b/documentation/docs/guide/datasets/data-connectors-format.md new file mode 100644 index 0000000000000000000000000000000000000000..094c2662461024384d44c2a9a7eafd189bc3993c --- /dev/null +++ b/documentation/docs/guide/datasets/data-connectors-format.md @@ -0,0 +1,31 @@ +# Supported data connectors and format + +## Data connectors + +H2O LLM Studio supports the following data connectors to access or upload external data sources. + +- **Upload**: Upload a local dataset from your machine. +- **Local**: Specify the file location of the dataset on your machine. +- **AWS S3 (Amazon AWS S3)**: Connect to an Amazon AWS S3 data bucket. +- **Kaggle**: Connect to a Kaggle dataset. + +## Data format + +- Each data connector requires either a single `.csv` or `.pq` file, or the data to be in a `.zip` file for a successful import. + +- H2O LLM studio requires a `.csv` file with a minimum of two columns, where one contains the instructions and the other has the model’s expected output. You can also include an additional validation dataframe in the same format or allow for an automatic train/validation split to assess the model’s performance. + +- Optionally, a **Parent Id** can be used for training nested data prompts that are linked to a parent question. + +- During an experiment you can adapt the data representation with the following settings: + - **Prompt Column:** The column in the dataset containing the user prompt. + - **Answer Column:** The column in the dataset containing the expected output. + - **Parent Id Column:** An optional column specifying the parent id to be used for chained conversations. The value of this column needs to match an additional column with the name `id`. If provided, the prompt will be concatenated after preceeding parent rows. + +:::info +To train a chatbot style model, you need to convert your data into a question and answer format. There are other enterprise solutions by H2O.ai that may help you prep your data. For more information, see [H2O.ai's Generative AI page](https://h2o.ai/) and this blogpost about [H2O LLM DataStudio: Streamlining Data Curation and Data Preparation for LLMs related tasks](https://blog.h2o.ai/blog/streamlining-data-preparation-for-fine-tuning-of-large-language-models/). + +## Example data + +H2O LLM Studio provides a sample dataset (converted dataset from [OpenAssistant/oasst2](https://huggingface.co/datasets/OpenAssistant/oasst2)) +that can be downloaded [here](https://www.kaggle.com/code/philippsinger/openassistant-conversations-dataset-oasst2?scriptVersionId=160485459). It is recommended to use `train_full.csv` for training. This dataset is also downloaded and prepared by default when first starting the GUI. Multiple dataframes can be uploaded into a single dataset by uploading a `.zip` archive. \ No newline at end of file diff --git a/documentation/docs/guide/datasets/import-dataset.md b/documentation/docs/guide/datasets/import-dataset.md new file mode 100644 index 0000000000000000000000000000000000000000..2e6b039c6b886d7b9d8175728effc81b3bb9dde7 --- /dev/null +++ b/documentation/docs/guide/datasets/import-dataset.md @@ -0,0 +1,148 @@ +--- +description: H2O LLM Studio provides a number of data connectors to support importing data from local or external sources and requires your data to be in a certain format for successful importing of data. +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import Admonition from '@theme/Admonition'; +import upload_dataset from './upload-dataset.png'; +import upload_local_file from './upload-local-file.png'; +import import_s3_bucket from './import-s3-bucket.png'; +import import_kaggle_dataset from './import-kaggle-dataset.png'; +import TrainDataframeTooltip from '../../tooltips/experiments/_train-dataframe.mdx'; +import ValidationDataframeTooltip from '../../tooltips/experiments/_validation-dataframe.mdx'; +import PromptColumnTooltip from '../../tooltips/experiments/_prompt-column.mdx'; +import AnswerColumnTooltip from '../../tooltips/experiments/_answer-column.mdx'; +import ParentIdColumnTooltip from '../../tooltips/experiments/_parent-id-column.mdx'; + +# Import a dataset + +H2O LLM Studio provides a number of data connectors to support importing data from local or external sources and requires your data to be in a certain format for successful importing of data. + +For more information, see [Supported data connectors and format](data-connectors-format). + +## Import data + +Follow the relevant steps below to import a dataset to H2O LLM Studio. + +1. On the H2O LLM Studio left-navigation pane, click **Import dataset**. +2. Select the relevant **Source** (data connector) that you want to use from the dropdown list . + :::note Data sources + + +
    +
  1. + Drag and drop the file, or click Browse and select the file you want to upload. +
  2. +
  3. + Click Upload. + upload-dataset +
  4. +
+
+ +
    +
  1. + Enter the file path as the File Location or select the relevant local directory that the dataset is located in. +
  2. +
  3. + Click Continue. + upload-local-file +
  4. +
+
+ +
    +
  1. + Enter values for the following fields: +
      +
    • + S3 bucket name:

      + The name of the S3 bucket including the reletive file paths. +
    • +
    • + AWS access key:

      + The access key associated with your S3 bucket. This field is optional. If the S3 bucket is public, you can leave this empty for anonymous access. +
    • +
    • + AWS access secret:

      + The access secret associated with your S3 bucket. This field is optional. If the S3 bucket is public, you can leave this empty for anonymous access. +
    • +
    • + File name:

      + Enter the file name of the dataset that you want to import. +
    • +
    +
    + +

    For more information, see AWS credentials and Methods for accessing a bucket in the AWS Documentation.

    +
    +
    +
  2. +
  3. + Click Continue. + import-s3-bucket +
  4. +
+
+ +
    +
  1. + Enter values for the following fields: +
      +
    • + Kaggle API command:

      + Enter the Kaggle API command that you want to execute. +
    • +
    • + Kaggle username:

      + Your Kaggle username for API authentication +
    • +
    • + Kaggle secret key:

      + Your Kaggle secret key for API authentication. +
    • +
    +
  2. +
  3. + Click Continue. + import-kaggle-dataset +
  4. +
+
+
+ ::: + +## Configure dataset + +Once you have successfully uploaded or imported your dataset, you can configure the dataset settings. + +:::info Tip +You can upload a `.zip` file with both training and validation sets to avoid having to separately upload files. +::: + +- **Dataset name:**
+ A suitable name for the whole dataset which includes both the train dataframe and validation dataframe. + +- **Train Dataframe:** + +- **Validation Dataframe:** + +- **Prompt Column:** + +- **Answer Column:** + +- **Parent Id Column:** + +![configure-dataset](configure-dataset.png) + +## Data validity check + +H2O LLM Studio will provide a preview of the dataset input (sample questions) and output (sample answers) according to the content of the imported dataset. Review the text to ensure that the input and output is as intended, and then click **Continue**. + +## View dataset + +You will now be redirected to the **View datasets** screen. You should be able to see the dataset you just imported listed on the screen. + +![view-dataset](view-imported-dataset.png) + +For more information about viewing dataset summary and statistics, see [View and manage datasets](view-dataset) \ No newline at end of file diff --git a/documentation/docs/guide/datasets/import-kaggle-dataset.png b/documentation/docs/guide/datasets/import-kaggle-dataset.png new file mode 100644 index 0000000000000000000000000000000000000000..4e0f588f9479c38e932ce6b228e7ae25408800e3 Binary files /dev/null and b/documentation/docs/guide/datasets/import-kaggle-dataset.png differ diff --git a/documentation/docs/guide/datasets/import-s3-bucket.png b/documentation/docs/guide/datasets/import-s3-bucket.png new file mode 100644 index 0000000000000000000000000000000000000000..cfe66340fc9d6ad0801255d2984445784aa09a3e Binary files /dev/null and b/documentation/docs/guide/datasets/import-s3-bucket.png differ diff --git a/documentation/docs/guide/datasets/merge-datasets.md b/documentation/docs/guide/datasets/merge-datasets.md new file mode 100644 index 0000000000000000000000000000000000000000..39b99ea7edb528e1ece17820e60407f7eadabc07 --- /dev/null +++ b/documentation/docs/guide/datasets/merge-datasets.md @@ -0,0 +1,34 @@ +--- +description: H2O LLM Studio enables you to merge imported datasets into one main dataset. This functionality can be used to merge training and validation data together into one dataset or extend your existing dataset with more data and increase your dataset size. +--- +import Icon from "@material-ui/core/Icon"; + +# Merge datasets + +H2O LLM Studio enables you to merge imported datasets into one main dataset. This functionality can be used to merge training and validation data together into one dataset or extend your existing dataset with more data and increase your dataset size. + +:::info +H2O LLM Studio does not merge dataset files in the sense that rows are combined, and duplicate rows are removed. "Merge", in this case, refers to bringing the dataset files a dataset might have to a single dataset (another dataset), continuing other dataset files already. +::: + +Generally, you might want to merge datasets in H2O LLM Studio to have both the training data .csv and validation data .csv in one final dataset. + +1. On the H2O LLM Studio left-navigation pane, click **View datasets**. +2. Click the more_vert Kebab menu of the dataset you want to merge with. +3. Click **Edit dataset**. +4. Click **Merge with existing dataset**. +5. Select the dataset you want that you want to merge with. + ![merge-datasets](merge-datasets.png) +6. Click **Merge**. +7. Adjust the dataset configuration if needed. For more information about the configurations, see [Configure dataset](./import-dataset#configure-dataset). +8. Click **Continue**. +9. Review the text to ensure that the input and output is as intended, and then click **Continue**. + +Your datasets are now merged. + +:::info +Alternatively, you can also merge datasets at the point of [importing a dataset](./import-dataset) or combine both datasets (.csv files) into a `.zip` file before uploading it as a whole dataset. +::: + + + diff --git a/documentation/docs/guide/datasets/merge-datasets.png b/documentation/docs/guide/datasets/merge-datasets.png new file mode 100644 index 0000000000000000000000000000000000000000..0af553fecee3b4578b6985b311a715bb8bcaa8a0 Binary files /dev/null and b/documentation/docs/guide/datasets/merge-datasets.png differ diff --git a/documentation/docs/guide/datasets/upload-dataset.png b/documentation/docs/guide/datasets/upload-dataset.png new file mode 100644 index 0000000000000000000000000000000000000000..e089ddb2b6da7df6589e4227b340b72c4adc0cc8 Binary files /dev/null and b/documentation/docs/guide/datasets/upload-dataset.png differ diff --git a/documentation/docs/guide/datasets/upload-local-file.png b/documentation/docs/guide/datasets/upload-local-file.png new file mode 100644 index 0000000000000000000000000000000000000000..cbe9e42e0d3b00b9664547a9a2723fd07bb4e467 Binary files /dev/null and b/documentation/docs/guide/datasets/upload-local-file.png differ diff --git a/documentation/docs/guide/datasets/view-dataset.md b/documentation/docs/guide/datasets/view-dataset.md new file mode 100644 index 0000000000000000000000000000000000000000..e0814ebae5510e252cb028098796892c74602b08 --- /dev/null +++ b/documentation/docs/guide/datasets/view-dataset.md @@ -0,0 +1,74 @@ +--- +description: You can view, review, edit, or delete your datasets once you have imported them. You can also start a new experiment using a dataset you have imported. +--- +import Icon from "@material-ui/core/Icon"; + +# View and manage dataset + +You can view, review, edit, or delete your datasets once you have imported them. You can also start a new experiment using a dataset you have imported. + +## View a dataset + +To view an imported dataset: + +1. On the H2O LLM Studio left-navigation pane, click **View datasets**. + +2. You will see the datasets table with a list of all the datasets you have imported so far. Click the name of the dataset that you want to view. + + ![view-datasets](view-imported-dataset.png) + + :::info + For more information about the dataset details you see on the table above, see [dataset configurations](import-dataset.md#configure-a-dataset). + ::: + +## Dataset tabs + +You will see the following tabs that provide details and different aspects of your dataset. + +- **Sample train data** : This tab contains sample training data from the imported dataset. + +- **Sample train visualization:** This tab visualizes a few sample training data from the imported dataset in a question-answer format; simulating the way the chatbot would answer questions based on the training data. + +- **Train data statistics:** This tab contains metrics about the training data (e.g., unique values) from the imported dataset. + +- **Summary:** This tab contains the following details about the dataset. + + | Name | Description | + | ----------- | ------------------------------------ | + | **Name** | Name of the dataset. | + | **Problem type** | Problem type of the dataset. | + | **Train dataframe** | Name of the training dataframe in the imported dataset. An imported dataset can contain train, test, and validation dataframes. | + | **Train rows** | The number of rows the train dataframe contains. | + | **Validation dataframe** | Name of the validation dataframe in the imported dataset. An imported dataset can contain train, test, and validation dataframes. | + | **Validation rows** | The number of rows the validation dataframe contains. | + | **Labels** | The labels the imported dataset contains. | + + +## Edit a dataset + +To edit an imported dataset, + +1. On the H2O LLM Studio left-navigation pane, click **View datasets**. You will see the datasets table with a list of all the datasets you have imported so far. +2. Locate the row of the dataset you want to edit and click the more_vert Kebab menu. +3. Select **Edit dataset**. +4. Make the desired changes to the dataset configuration. You can also [merge the dataset with an existing dataset](merge-datasets) at this point. +5. Click **Continue** and review the dataset with your changes. + + + +## Delete a dataset + +When a dataset is no longer needed, you can delete it. Deleted datasets are permanently removed from the H2O LLM Studio instance. + +:::caution +You can only delete datasets that are not linked to any experiments. If you wish to delete a dataset that is linked to an experiment, first [delete the experiment](../experiments/view-an-experiment#delete-an-experiment), and then delete the dataset. +::: + +1. On the H2O LLM Studio left-navigation pane, click **View datasets**. +2. Click **Delete datasets**. +3. Select the dataset(s) that you want to delete. +4. Click **Delete** to confirm deletion. \ No newline at end of file diff --git a/documentation/docs/guide/datasets/view-imported-dataset.png b/documentation/docs/guide/datasets/view-imported-dataset.png new file mode 100644 index 0000000000000000000000000000000000000000..0673ca74dbdb0a221127e0991fb0da0d891a63ec Binary files /dev/null and b/documentation/docs/guide/datasets/view-imported-dataset.png differ diff --git a/documentation/docs/guide/experiments/best-validation-sample.png b/documentation/docs/guide/experiments/best-validation-sample.png new file mode 100644 index 0000000000000000000000000000000000000000..cdf3521752bb3f9acfb16b9e7c80f665076a2df8 Binary files /dev/null and b/documentation/docs/guide/experiments/best-validation-sample.png differ diff --git a/documentation/docs/guide/experiments/charts-tab.png b/documentation/docs/guide/experiments/charts-tab.png new file mode 100644 index 0000000000000000000000000000000000000000..8b56e2f0fc05612f7cbbb4482020158a7d56bf4f Binary files /dev/null and b/documentation/docs/guide/experiments/charts-tab.png differ diff --git a/documentation/docs/guide/experiments/chat-tab.png b/documentation/docs/guide/experiments/chat-tab.png new file mode 100644 index 0000000000000000000000000000000000000000..fdabfbc3a95b8f8aa2e1f6bbe5cef977c70b7124 Binary files /dev/null and b/documentation/docs/guide/experiments/chat-tab.png differ diff --git a/documentation/docs/guide/experiments/compare-experiments.md b/documentation/docs/guide/experiments/compare-experiments.md new file mode 100644 index 0000000000000000000000000000000000000000..e01458476d575c9a0c855656d5cffce39236bed7 --- /dev/null +++ b/documentation/docs/guide/experiments/compare-experiments.md @@ -0,0 +1,21 @@ +--- +description: The H2O LLM studio provides a useful feature to compare experiments which allow comparing multiple experiments and analyzing how different model parameters affect model performance. +--- +# Compare experiments + +The H2O LLM studio provides a useful feature to compare experiments which allow comparing multiple experiments and analyzing how different model parameters affect model performance. + +Follow the relevant steps below to compare experiments in H2O LLM Studio. + +1. On the H2O LLM Studio left-navigation pane, click **View experiments**. +2. Click **Compare experiments**. +3. Select the experiments you want to compare. +4. Click **Compare experiments**. + + ![compare experiments](compare-experiments.png) + + The **Charts** tab visually represents the comparison of train/validation loss, metrics, and learning rate of selected experiments. The **Config** tab compares the configuration settings of selected experiments. + +:::info note +In addition, H2O LLM Studio also integrates with [Neptune](https://neptune.ai/), a powerful experiment tracking platform. By enabling Neptune logging when starting an experiment, you can easily track and visualize all aspects of your experiment in real time. This includes model performance, hyperparameter tuning, and other relevant metrics. +::: \ No newline at end of file diff --git a/documentation/docs/guide/experiments/compare-experiments.png b/documentation/docs/guide/experiments/compare-experiments.png new file mode 100644 index 0000000000000000000000000000000000000000..4ec00338068273e9bf530dd57d7e59573e17a56c Binary files /dev/null and b/documentation/docs/guide/experiments/compare-experiments.png differ diff --git a/documentation/docs/guide/experiments/create-an-experiment.md b/documentation/docs/guide/experiments/create-an-experiment.md new file mode 100644 index 0000000000000000000000000000000000000000..06fc3dc0b8cc19b40fce2dc8b59326629c610cfd --- /dev/null +++ b/documentation/docs/guide/experiments/create-an-experiment.md @@ -0,0 +1,62 @@ +--- +description: This page highlights the steps needed to create an experiment in H2O LLM Studio. +--- +import Icon from "@material-ui/core/Icon"; + +# Create an experiment + +Follow the relevant steps below to create an experiment in H2O LLM Studio. + +1. On the H2O LLM Studio left-navigation pane, click **Create experiment**. Alternatively, you can click **New experiment** on the more_vert Kebab menu of the [View datasets](../datasets/view-dataset.md) page. + +2. Select the **Dataset** you want to use to fine-tune an LLM model. + +3. Select the **Problem type**. + +4. Provide a meaningful **Experiment name**. + +5. Define the parameters. The most important parameters are: + - **LLM Backbone**: This parameter determines the LLM architecture to use. It is the foundation model that you continue training. H2O LLM Studio has a predefined list of recommended types of foundation models, but you can also use [Hugging Face models](https://huggingface.co/models). + - **Mask Prompt Labels**: This option controls whether to mask the prompt labels during training and only train on the loss of the answer. + - Hyperparameters such as **Learning rate**, **Batch size**, and number of epochs determine the training process. You can refer to the tooltips that are shown next to each hyperparameter in the GUI to learn more about them. + - **Evaluate Before Training**: This option lets you evaluate the model before training, which can help you judge the quality of the LLM backbone before fine-tuning. + + H2O LLM Studio provides several metric options for evaluating the performance of your model. In addition to the BLEU score, H2O LLM Studio also offers the GPT3.5 and GPT4 metrics that utilize the OpenAI API to determine whether the predicted answer is more favorable than the ground truth answer. To use these metrics, you can either export your OpenAI API key as an environment variable before starting LLM Studio, or you can specify it in the **Settings** menu within the UI. + + :::info note + H2O LLM Studio provides an overview of all the parameters you need to specify for your experiment. The default settings are suitable when you first start an experiment. To learn more about the parameters, see [Experiment settings](experiment-settings.md). + ::: + +6. Click **Run experiment**. + + ![run-experiment](run-experiment.png) + +## Run an experiment on the OASST data via CLI + +The steps below provide an example of how to to run an experiment on [OASST](https://huggingface.co/OpenAssistant) data via the command line interface (CLI). + +1. Get the training dataset (`train_full.csv`), [OpenAssistant Conversations Dataset OASST2](https://www.kaggle.com/code/philippsinger/openassistant-conversations-dataset-oasst2?scriptVersionId=160485459) and place it into the `examples/data_oasst2` folder; or download it directly using the [Kaggle API](https://www.kaggle.com/docs/api) command given below. + + ```bash + kaggle kernels output philippsinger/openassistant-conversations-dataset-oasst2 -p examples/data_oasst2/ + ``` + +2. Go into the interactive shell or open a new terminal window. Install the dependencies first, if you have not installed them already. + + ```bash + make setup # installs all dependencies + make shell + ``` + +3. Run the following command to run the experiment. + + ```bash + python train.py -Y examples/example_oasst2.yaml + ``` + +After the experiment is completed, you can find all output artifacts in the `examples/output_oasst2` folder. +You can then use the `prompt.py` script to chat with your model. + +```bash +python prompt.py -e examples/output_oasst2 +``` \ No newline at end of file diff --git a/documentation/docs/guide/experiments/delete-experiment.png b/documentation/docs/guide/experiments/delete-experiment.png new file mode 100644 index 0000000000000000000000000000000000000000..a80f078c03091a56829d5220855f324a9c818e3a Binary files /dev/null and b/documentation/docs/guide/experiments/delete-experiment.png differ diff --git a/documentation/docs/guide/experiments/experiment-settings.md b/documentation/docs/guide/experiments/experiment-settings.md new file mode 100644 index 0000000000000000000000000000000000000000..438a11e145d3e8e64198fc4a7933b142e22a4244 --- /dev/null +++ b/documentation/docs/guide/experiments/experiment-settings.md @@ -0,0 +1,496 @@ +--- +description: All the settings needed for creating an experiment are explored in this page. +--- +import GeneralSettingsDataset from '../../tooltips/experiments/_dataset.mdx'; +import GeneralSettingsProblemType from '../../tooltips/experiments/_problem-type.mdx'; +import GSImportConfigFromYaml from '../../tooltips/experiments/_import-config-from-yaml.mdx'; +import GSExperimentName from '../../tooltips/experiments/_experiment-name.mdx'; +import GSLLMBackbone from '../../tooltips/experiments/_llm-backbone.mdx'; +import DSTrainDataframe from '../../tooltips/experiments/_train-dataframe.mdx'; +import DSvalidationStrategy from '../../tooltips/experiments/_validation-strategy.mdx'; +import DSvalidationSize from '../../tooltips/experiments/_validation-size.mdx'; +import DSdataSample from '../../tooltips/experiments/_data-sample.mdx'; +import DSpromptColumn from '../../tooltips/experiments/_prompt-column.mdx'; +import DSsystemColumn from '../../tooltips/experiments/_system-column.mdx'; +import DSanswerColumn from '../../tooltips/experiments/_answer-column.mdx'; +import DSparentIdColumn from '../../tooltips/experiments/_parent-id-column.mdx'; +import DStextPromptStart from '../../tooltips/experiments/_text-prompt-start.mdx'; +import DStextAnswerSeparator from '../../tooltips/experiments/_text-answer-separator.mdx'; +import DSadaptiveKlControl from '../../tooltips/experiments/_adaptive-kl-control.mdx'; +import DSaddEosTokentoprompt from '../../tooltips/experiments/_add-eos-token-to-prompt.mdx'; +import DSaddEosTokentoanswer from '../../tooltips/experiments/_add-eos-token-to-answer.mdx'; +import DSmaskPromptlabels from '../../tooltips/experiments/_mask-prompt-labels.mdx'; +import TSmaxLengthPrompt from '../../tooltips/experiments/_max-length-prompt.mdx'; +import TSmaxLengthAnswer from '../../tooltips/experiments/_max-length-answer.mdx'; +import TSmaxLength from '../../tooltips/experiments/_max-length.mdx'; +import TSaddpromptanswertokens from '../../tooltips/experiments/_add-prompt-answer-tokens.mdx'; +import TSpaddingQuantile from '../../tooltips/experiments/_padding-quantile.mdx'; +import TSuseFast from '../../tooltips/experiments/_use-fast.mdx'; +import ASBackboneDtype from '../../tooltips/experiments/_backbone-dtype.mdx'; +import ASGradientcheckpointing from '../../tooltips/experiments/_gradient-checkpointing.mdx'; +import ASforceEmbeddingGradients from '../../tooltips/experiments/_force-embedding-gradients.mdx'; +import ASintermediateDropout from '../../tooltips/experiments/_intermediate-dropout.mdx'; +import ASpretrainedWeights from '../../tooltips/experiments/_pretrained-weights.mdx'; +import TSoptimizer from '../../tooltips/experiments/_optimizer.mdx'; +import TSlossfunction from '../../tooltips/experiments/_loss-function.mdx'; +import TSlearningRate from '../../tooltips/experiments/_learning-rate.mdx'; +import TSuseflashattention2 from '../../tooltips/experiments/_use-flash-attention-2.mdx'; +import TSbatchSize from '../../tooltips/experiments/_batch-size.mdx'; +import TSepochs from '../../tooltips/experiments/_epochs.mdx'; +import TSschedule from '../../tooltips/experiments/_schedule.mdx'; +import TSwarmupEpochs from '../../tooltips/experiments/_warmup-epochs.mdx'; +import TSweightDecay from '../../tooltips/experiments/_weight-decay.mdx'; +import TSGradientclip from '../../tooltips/experiments/_gradient-clip.mdx'; +import TSgradAccumulation from '../../tooltips/experiments/_grad-accumulation.mdx'; +import TSlora from '../../tooltips/experiments/_lora.mdx'; +import TSloraR from '../../tooltips/experiments/_lora-r.mdx'; +import TSloraAlpha from '../../tooltips/experiments/_lora-alpha.mdx'; +import TSloraDropout from '../../tooltips/experiments/_lora-dropout.mdx'; +import TSloraTargetModules from '../../tooltips/experiments/_lora-target-modules.mdx'; +import TSsavebestcheckpoint from '../../tooltips/experiments/_save-best-checkpoint.mdx'; +import TSevaluationepochs from '../../tooltips/experiments/_evaluation-epochs.mdx'; +import TSevaluationbeforetraining from '../../tooltips/experiments/_evaluate-before-training.mdx'; +import TStrainvalidationdata from '../../tooltips/experiments/_train-validation-data.mdx'; +import TSuseRHLF from '../../tooltips/experiments/_use-rlhf.mdx'; +import TSrewardModel from '../../tooltips/experiments/_reward-model.mdx'; +import TSinitialKlCoefficient from '../../tooltips/experiments/_initial-kl-coefficient.mdx'; +import TSklTarget from '../../tooltips/experiments/_kl-target.mdx'; +import TSklHorizon from '../../tooltips/experiments/_kl-horizon.mdx'; +import TSadvantagesGamma from '../../tooltips/experiments/_advantages-gamma.mdx'; +import TSadvantagesLambda from '../../tooltips/experiments/_advantages-lambda.mdx'; +import TSppoClipPolicy from '../../tooltips/experiments/_ppo-clip-policy.mdx'; +import TSppoClipValue from '../../tooltips/experiments/_ppo-clip-value.mdx'; +import TSscalingFactorValueLoss from '../../tooltips/experiments/_scaling-factor-value-loss.mdx'; +import TSppoEpochs from '../../tooltips/experiments/_ppo-epochs.mdx'; +import TSppoBatchSize from '../../tooltips/experiments/_ppo-batch-size.mdx'; +import TSppoGenerateTemp from '../../tooltips/experiments/_ppo-generate-temperature.mdx'; +import TSoffloadRewardModel from '../../tooltips/experiments/_offload-reward-model.mdx'; +import AStokenmaskprobability from '../../tooltips/experiments/_token-mask-probability.mdx'; +import ASskipParentprobability from '../../tooltips/experiments/_skip-parent-probability.mdx'; +import ASrandomparentprobability from '../../tooltips/experiments/_random-parent-probability.mdx'; +import ASneftunenoisealpha from '../../tooltips/experiments/_neftune_noise_alpha.mdx'; +import PSmetric from '../../tooltips/experiments/_metric.mdx'; +import PSmetricgptmodel from '../../tooltips/experiments/_metric-gpt-model.mdx'; +import PSmetricgpttemplate from '../../tooltips/experiments/_metric-gpt-template.mdx'; +import PSminlengthinference from '../../tooltips/experiments/_min-length-inference.mdx'; +import PSmaxlengthinference from '../../tooltips/experiments/_max-length-inference.mdx'; +import PSbatchsizeinference from '../../tooltips/experiments/_batch-size-inference.mdx'; +import PSdosample from '../../tooltips/experiments/_do-sample.mdx'; +import PSnumbeams from '../../tooltips/experiments/_num-beams.mdx'; +import PStemperature from '../../tooltips/experiments/_temperature.mdx'; +import PSrepetitionpenalty from '../../tooltips/experiments/_repetition-penalty.mdx'; +import PSstoptokens from '../../tooltips/experiments/_stop-tokens.mdx'; +import PStopk from '../../tooltips/experiments/_top-k.mdx'; +import PStopp from '../../tooltips/experiments/_top-p.mdx'; +import ESgpus from '../../tooltips/experiments/_gpus.mdx'; +import ESmixedprecision from '../../tooltips/experiments/_mixed-precision.mdx'; +import EScompilemodel from '../../tooltips/experiments/_compile-model.mdx'; +import ESfindunusedparameters from '../../tooltips/experiments/_find-unused-parameters.mdx'; +import EStrustremotecode from '../../tooltips/experiments/_trust-remote-code.mdx'; +import EShuggingfacebranch from '../../tooltips/experiments/_huggingface-branch.mdx'; +import ESnumofworkers from '../../tooltips/experiments/_number-of-workers.mdx'; +import ESseed from '../../tooltips/experiments/_seed.mdx'; +import LSlogger from '../../tooltips/experiments/_logger.mdx'; +import LSneptuneproject from '../../tooltips/experiments/_neptune-project.mdx'; + +# Experiment settings + +The settings for creating an experiment are grouped into the following sections: + - [General settings](#general-settings) + - [Dataset settings](#dataset-settings) + - [Tokenizer settings](#tokenizer-settings) + - [Architecture settings](#architecture-settings) + - [Training settings](#training-settings) + - [Augmentation settings](#augmentation-settings) + - [Prediction settings](#prediction-settings) + - [Environment settings](#environment-settings) + - [Logging settings](#logging-settings) + +The settings under each category are listed and described below. + +## General settings + +### Dataset + + + +### Problem type + + + +### Import config from YAML + + + +### Experiment name + + + +### LLM backbone + + + +## Dataset settings + +### Train dataframe + + + +### Validation strategy + + + +### Validation size + + + +### Data sample + + + +### System column + + + +### Prompt column + + + +### Answer column + + + +### Parent ID column + + + +### Text prompt start + + + +### Text answer separator + + + +## Adaptive Kl control + + + +### Add EOS token to prompt + + + +### Add EOS token to answer + + + +### Mask prompt labels + + + +## Tokenizer settings + +### Max length prompt + + + +### Max length answer + + + +### Max length + + + +### Add prompt answer tokens + + + +### Padding quantile + + + +### Use fast + + + +## Architecture settings + +### Backbone Dtype + + + +### Gradient Checkpointing + + + +### Force Embedding Gradients + + + +### Intermediate dropout + + + +### Pretrained weights + + + +## Training settings + +### Loss function + + + +### Optimizer + + + +### Learning rate + + + +### Use Flash Attention 2 + + + +### Batch size + + + +### Epochs + + + +### Schedule + + + +### Warmup epochs + + + +### Weight decay + + + +### Gradient clip + + + +### Grad accumulation + + + +### Lora + + + +### Lora R + + + +### Lora Alpha + + + +### Lora dropout + + + +### Lora target modules + + + +### Save best checkpoint + + + +### Evaluation epochs + + + +### Evaluate before training + + + +### Train validation data + + + +### Use RLHF + + + +### Reward model + + + +### Adaptive KL control + + + +### Initial KL coefficient + + + +### KL target + + + +### KL Horizon + + + +### Advantages gamma + + + +### Advantages Lambda + + + +### PPO clip policy + + + +### PPO clip value + + + +### Scaling factor value loss + + + +### PPO epochs + + + +### PPO Batch Size + + + +### PPO generate temperature + + + +### Offload reward model + + + +## Augmentation settings + +### Token mask probability + + + +### Skip parent probability + + + +### Random parent probability + + + +### Neftune noise alpha + + + +## Prediction settings + +### Metric + + + +### Metric GPT model + + + +### Metric GPT template + + + +### Min length inference + + + +### Max length inference + + + +### Batch size inference + + + +### Do sample + + + +### Num beams + + + +### Temperature + + + +### Repetition penalty + + + +### Stop tokens + + + +### Top K + + + +### Top P + + + +## Environment settings + +### GPUs + + + +### Mixed precision + + + +### Compile model + + + +### Find unused parameters + + + +### Trust remote code + + + +### Huggingface branch + + + +### Number of workers + + + +### Seed + + + +## Logging settings + +### Logger + + + +### Neptune project + + + diff --git a/documentation/docs/guide/experiments/export-model-to-huggingface.png b/documentation/docs/guide/experiments/export-model-to-huggingface.png new file mode 100644 index 0000000000000000000000000000000000000000..f9b2130c2f4b3dff5c3348eaf64356c011c5f5e4 Binary files /dev/null and b/documentation/docs/guide/experiments/export-model-to-huggingface.png differ diff --git a/documentation/docs/guide/experiments/export-trained-model.md b/documentation/docs/guide/experiments/export-trained-model.md new file mode 100644 index 0000000000000000000000000000000000000000..6f9438780cf2fbed8c712b221c1f0ffbcd103585 --- /dev/null +++ b/documentation/docs/guide/experiments/export-trained-model.md @@ -0,0 +1,61 @@ +--- +description: If you’re ready to share your trained model with a broader community, H2O LLM Studio allows you to export the fine-tuned model to Hugging Face with a single click. +--- +# Publish model to HuggingFace + +If you’re ready to share your trained model with a broader community, H2O LLM Studio allows you to export the fine-tuned model to [Hugging Face](https://huggingface.co/) with a single click. + +:::info note +Before publishing your model to the Hugging Face Hub, you need to have an API key with write access. To obtain an API token with write access, follow the [instructions provided by Hugging Face](https://huggingface.co/docs/hub/security-tokens), which involve creating an account, logging in, and generating an access token with the appropriate permission. +::: + +To publish a trained model to Hugging Face Hub: + +1. On the H2O LLM Studio left-navigation pane, click **View experiments**. You will see the experiments table with a list of all the experiments you have launched so far. + +2. Click the name of the experiment that you want to export as a model. + +3. Click **Push checkpoint to huggingface**. + +4. Enter the **Account name** on Hugging Face to push the model to a particular account. Leaving it empty will push it to the default user account. + +5. Enter the **Huggingface API** Key with write access. + +6. Click **Export**. + + ![export model to hugging face](export-model-to-huggingface.png) + +## Download a model + +1. Click **Download model** on the **View experiments** page to download the model locally. + +Use the following code snippet to utilize the converted model in Jupyter Notebook or Google Colab. + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +model_name = "path_to_downloaded_model" # either local folder or huggingface model name + +# Important: The prompt needs to be in the same format the model was trained with. +# You can find an example prompt in the experiment logs. +prompt = "<|prompt|>How are you?<|endoftext|><|answer|>" + +tokenizer = AutoTokenizer.from_pretrained(model_name) +model = AutoModelForCausalLM.from_pretrained(model_name) +model.cuda().eval() + +inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda") +# generate configuration can be modified to your needs +tokens = model.generate( + **inputs, # Input any question for the model. Ex: "What is the capital of USA?" + max_new_tokens=256, + temperature=0.3, + repetition_penalty=1.2, + num_beams=1 +)[0] +tokens = tokens[inputs["input_ids"].shape[1]:] +answer = tokenizer.decode(tokens, skip_special_tokens=True) +print(answer) +``` + +You can enter any question for the model and change the parameters to get different outputs. \ No newline at end of file diff --git a/documentation/docs/guide/experiments/import-to-h2ogpt.md b/documentation/docs/guide/experiments/import-to-h2ogpt.md new file mode 100644 index 0000000000000000000000000000000000000000..d2b92249d36e17905dc37d4abedd31a5f260d29c --- /dev/null +++ b/documentation/docs/guide/experiments/import-to-h2ogpt.md @@ -0,0 +1,48 @@ +--- +description: Once the model has been fine-tuned using H2O LLM Studio, you can then use h2oGPT to query, summarize, and chat with your model. +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Import a model to h2oGPT + +Once the model has been fine-tuned using H2O LLM Studio, you can then use [h2oGPT](https://github.com/h2oai/h2ogpt/blob/main/README.md) to query, summarize, and chat with your model. + +The most common method to get the model from H2O LLM Studio over to h2oGPT, is to import it into h2oGPT via HuggingFace. However, if your data is sensitive, you can also choose to download the model locally to your machine, and then import it directly into h2oGPT. + +You can use any of the following methods: + +- Publish the model to HuggingFace and import the model from HuggingFace +- Download the model and import it to h2oGPT by specifying the local folder path +- Download the model and upload it to h2oGPT using the file upload option on the UI +- Pull a model from a Github repository or a resolved web link + +## Steps + +1. [Publish the model to HuggingFace](export-trained-model.md) or [download the model locally](export-trained-model.md#download-a-model). + +2. If you opt to download the model, make sure you extract the downloaded .zip file. + +3. Use the following command to import it into h2oGPT. + ``` + python generate.py --base_model=[link_or_path_to_folder] + ``` + + :::note Examples + + +
python generate.py --base_model=HuggingFaceH4/zephyr-7b-beta
+
+ +
python generate.py --base_model=zephyr-7b-beta.Q5_K_M.gguf
+
+ +
python generate.py --base_model=TheBloke/zephyr-7B-beta-AWQ
+
+
+ ::: + +:::info +For more information, see the [h2oGPT documentation](https://github.com/h2oai/h2ogpt/blob/main/docs/FAQ.md#adding-models). +::: + diff --git a/documentation/docs/guide/experiments/run-experiment.png b/documentation/docs/guide/experiments/run-experiment.png new file mode 100644 index 0000000000000000000000000000000000000000..1a1d6b4af1767137d8b457a9aa06d2f5699854b8 Binary files /dev/null and b/documentation/docs/guide/experiments/run-experiment.png differ diff --git a/documentation/docs/guide/experiments/stop-experiment.png b/documentation/docs/guide/experiments/stop-experiment.png new file mode 100644 index 0000000000000000000000000000000000000000..b45037dd340b82bf5a444734a3979aa5b56ac60d Binary files /dev/null and b/documentation/docs/guide/experiments/stop-experiment.png differ diff --git a/documentation/docs/guide/experiments/view-an-experiment.md b/documentation/docs/guide/experiments/view-an-experiment.md new file mode 100644 index 0000000000000000000000000000000000000000..ec4b6d7fe3e4ff3551221b38162029771eac8ac4 --- /dev/null +++ b/documentation/docs/guide/experiments/view-an-experiment.md @@ -0,0 +1,85 @@ +--- +description: You can view, rename, stop, or delete your experiments once you launch them. You can also create a new experiment based on an experiment you have already launched. +--- +import Icon from "@material-ui/core/Icon"; + +# View and manage experiments + +You can view, rename, stop, or delete your experiments once you launch them. You can also create a new experiment based on an experiment you have already launched. + +## View an experiment + +To view an experiment: + +1. On the H2O LLM Studio left-navigation pane, click **View experiments**. + +2. You will see the experiments table with a list of all the experiments you have launched so far. Click the name of the experiment that you want to view. + + ![view-experiments](view-experiments.png) + +## Experiment tabs + +Once you click the name of the experiment, you will see the following tabs that provide details and different aspects of your experiment. + +- **Charts** : This tab visually represents the train/validation loss, metrics, and learning rate. These charts allow you to easily track your model’s performance as it trains. + + ![charts tab](charts-tab.png) + +- **Summary** : This tab contains the following details about an experiment. + + | Name | Description | + | ----------- | ------------------------------------ | + | **Name** | Name of the experiment. | + | **Dataset** | Name of the dataset. | + | **Problem type** | The problem type of the experiment. | + | **Seed** | The random seed value that H2O LLM Studio uses during model training. | + | **GPU list** | The list of GPUs H2O LLM Studio can use for the experiment. | + | **Loss** | The loss function. | + | **Metric** | The metric to evaluate the model’s performance. | + | **Val metric** | The measure of how well the experiment was performed. | + +- **Train data insights** : This tab displays the model’s first batch, so you can verify that the input data representation is correct. Also, it provides insight into how your data is being processed and can help identify potential issues early on in the experiment. + +- **Validation prediction insights** : This tab displays model predictions for random, best, and worst validation samples. This tab becomes available after the first validation run and allows you to evaluate how well your model generalizes to new data. + + ![best validation sample](best-validation-sample.png) + + ![worst validation sample](worst-validation-sample.png) + + The **Worst validation samples** give you an idea of where the model is having issues, and the model can be used to fine-tune further. + +- **Logs and Config tabs** : These two tabs show you the logs and configuration of the experiment. You can keep track of any changes made and quickly troubleshoot the issues that arise. + +- **Chat** : This tab provides a unique opportunity to interact with your trained model and get instant feedback on its performance. The **Chat** tab becomes available after the training is completed and can be used to evaluate how well your model performs in a conversational setting. + + :::info note + You can use the **Chat** feature only when there are no other experiments running. The chatbot is unavailable if the GPU is occupied by another experiment. + ::: + + ![chat tab](chat-tab.png) + +## Stop an experiment + +You can stop a running experiment if you no longer need it to be complted. + +1. On the H2O LLM Studio left-navigation pane, click **View experiments**. +2. Click **Stop experiments**. +3. Select the experiment(s) that you want to stop. +4. Click **Stop experiments**. + +You can also click **Stop experiment** on the more_vert Kebab menu of the relevant experiment row to stop an experiment from running. + + ![stop experiment](stop-experiment.png) + +## Delete an experiment + +When an experiment is no longer needed, you can delete it. Deleted experiments are permanently removed from the H2O LLM Studio instance. + +1. On the H2O LLM Studio left-navigation pane, click **View experiments**. +2. Click **Delete experiments**. +3. Select the experiment(s) that you want to delete and click **Delete experiments**. +4. Click **Delete** to confirm deletion. + +You can also click **Delete experiment** in the kebab menu of the relevant experiment row to delete an experiment. + + ![delete experiment](delete-experiment.png) \ No newline at end of file diff --git a/documentation/docs/guide/experiments/view-experiments.png b/documentation/docs/guide/experiments/view-experiments.png new file mode 100644 index 0000000000000000000000000000000000000000..42f4225e447f9ba176225a0338b78d59e2f18f09 Binary files /dev/null and b/documentation/docs/guide/experiments/view-experiments.png differ diff --git a/documentation/docs/guide/experiments/worst-validation-sample.png b/documentation/docs/guide/experiments/worst-validation-sample.png new file mode 100644 index 0000000000000000000000000000000000000000..f06a256d1010f897e29cfbf71a4ed86f8e14b3b5 Binary files /dev/null and b/documentation/docs/guide/experiments/worst-validation-sample.png differ diff --git a/documentation/docs/index.md b/documentation/docs/index.md new file mode 100644 index 0000000000000000000000000000000000000000..7823eb6f7e0b0156f99bed7b963503a7312ff000 --- /dev/null +++ b/documentation/docs/index.md @@ -0,0 +1,110 @@ +--- +slug: / +displayed_sidebar: defaultSidebar +title: H2O LLM Studio | Docs +hide_table_of_contents: true +hide_title: true +description: 'A framework and no-code GUI designed for fine-tuning state-of-the-art large language models (LLMs)' +--- + +import H2OHome from '@site/src/components/H2OHome'; + + diff --git a/documentation/docs/key-terms.md b/documentation/docs/key-terms.md new file mode 100644 index 0000000000000000000000000000000000000000..e934e3113b6152c13fff8b5c53126a383561d03c --- /dev/null +++ b/documentation/docs/key-terms.md @@ -0,0 +1,78 @@ +--- +description: H2O LLM Studio uses several key terms across its documentation. +--- +# Key terms + +H2O LLM Studio uses several key terms across its documentation, and each, in turn, is explained in the sections below. + +## Prompt Engineering + +Prompt engineering involves crafting precise and effective input queries to guide language models in generating desired outputs or responses. + +## Agents + +Software entities or components that interact with data or perform tasks within a system. + +## ELO + +An algorithm or method used to assess and rank the performance or accuracy of language models based on their proficiency in understanding and processing textual data. + +## Vector Store + +A Vector Store stores numerical representations of text for fast access in language models. + +## Pre-training + +The initial phase of training a machine learning model on a large dataset to learn general features before fine-tuning on a specific task. + +## Attention + +A mechanism that enables models to focus on specific parts of input data relevant to the task at hand, enhancing their understanding and performance. + +## Embedding + +Embedding refers to a mathematical representation of words or tokens in a numerical vector space, enabling machine learning models to understand and process language based on their context and relationships. + +## Language Model + +A language model is an AI system that understands and generates human language, predicting and generating text based on patterns and context within a given sequence of words. + +## Transformer + +A Transformer refers to a neural network architecture specifically designed for processing sequential data like text, using attention mechanisms to learn contextual relationships between words or tokens. + +## Encoders and Decoders + +Encoders and decoders are vital parts of sequence-to-sequence models used in natural language processing. Encoders process input data into a fixed-size representation, while decoders generate an output sequence based on that representation. + +## Text generation + +Text generation is the process of creating written content, such as sentences or paragraphs, using machine learning or AI algorithms based on patterns learned from existing text data. + +## In-context learning + +In-context learning refers to the process where a machine learning model continuously improves and adapts by considering the context of new information within its existing knowledge, enhancing its accuracy and understanding over time. + +## Few-shot learning + +Few-shot learning refers to a machine learning technique where a model can learn from a very small amount of labeled data to generalize and make predictions accurately on new, unseen data. + +## Summarization + +Summarization is the process of condensing a larger piece of text into a shorter, coherent version while retaining its essential information. + +## Fine-tuning + +Fine-tuning refers to adjusting and optimizing a pre-trained machine learning model using specific data to enhance its performance for a particular task. + +## GPT + +GPT stands for "Generative Pre-trained Transformer," a type of language model that uses transformers to understand and generate human-like text based on vast amounts of training data. + +# GPU deployment + +GPU deployment is the utilization of graphics processing units (GPUs) to execute and accelerate the computations involved in deploying machine learning models, improving speed and efficiency in model inference or training. + +# Tokenization + +Tokenization is the process of breaking text into smaller units, typically words or phrases, to analyze or process them individually within a natural language processing system. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_adaptive-kl-control.mdx b/documentation/docs/tooltips/experiments/_adaptive-kl-control.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1bd0a3554a97d60a0142f1297f4573c22e662a01 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_adaptive-kl-control.mdx @@ -0,0 +1 @@ +Use adaptive KL control, otherwise linear. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_add-eos-token-to-answer.mdx b/documentation/docs/tooltips/experiments/_add-eos-token-to-answer.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d884c3564e7f1fc58bf6afe56fc64b0244f8b87f --- /dev/null +++ b/documentation/docs/tooltips/experiments/_add-eos-token-to-answer.mdx @@ -0,0 +1 @@ +Adds EOS token at end of answer. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_add-eos-token-to-prompt.mdx b/documentation/docs/tooltips/experiments/_add-eos-token-to-prompt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2b78f90d04f38c07dcf54bb185e7047e28d97384 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_add-eos-token-to-prompt.mdx @@ -0,0 +1 @@ +Adds EOS token at end of prompt. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_add-eos-token-to-system.mdx b/documentation/docs/tooltips/experiments/_add-eos-token-to-system.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a1904cd07e18666c89c759f637e9401fd3505ba0 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_add-eos-token-to-system.mdx @@ -0,0 +1 @@ +Adds EOS token at end of system input. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_add-prompt-answer-tokens.mdx b/documentation/docs/tooltips/experiments/_add-prompt-answer-tokens.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b136c51bf39789a688899a8699f2042ab11652c5 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_add-prompt-answer-tokens.mdx @@ -0,0 +1 @@ +Adds system, prompt and answer tokens as new tokens to the tokenizer. It is recommended to also set `Force Embedding Gradients` in this case. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_advantages-gamma.mdx b/documentation/docs/tooltips/experiments/_advantages-gamma.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e96491d3fc4c577fed6df26d74e3edbab8dc20f2 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_advantages-gamma.mdx @@ -0,0 +1 @@ +Gamma parameter for advantage calculation. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_advantages-lambda.mdx b/documentation/docs/tooltips/experiments/_advantages-lambda.mdx new file mode 100644 index 0000000000000000000000000000000000000000..9ce92edc18466455878d3024057409686400c0d4 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_advantages-lambda.mdx @@ -0,0 +1 @@ +Lambda parameter for advantage calculation. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_answer-column.mdx b/documentation/docs/tooltips/experiments/_answer-column.mdx new file mode 100644 index 0000000000000000000000000000000000000000..abd997c0c3e8846d7fe43f1e3461233ca3b76e88 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_answer-column.mdx @@ -0,0 +1,3 @@ +The column in the dataset containing the expected output. + +For classification, this needs to be an integer column containing the class label. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_backbone-dtype.mdx b/documentation/docs/tooltips/experiments/_backbone-dtype.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1114950ef7be496b1e57eed0ad7ce273dc064939 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_backbone-dtype.mdx @@ -0,0 +1 @@ +The datatype of the weights in the LLM backbone. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_batch-size-inference.mdx b/documentation/docs/tooltips/experiments/_batch-size-inference.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e425403b073336be02f1ec978dee8d987b7bd4c5 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_batch-size-inference.mdx @@ -0,0 +1 @@ +Defines the size of a mini-batch uses during an iteration of the inference. **Batch size** defines the batch size used per GPU. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_batch-size.mdx b/documentation/docs/tooltips/experiments/_batch-size.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e321bd718f95363b93a795475d688020da295a37 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_batch-size.mdx @@ -0,0 +1,3 @@ +Defines the number of training examples a mini-batch uses during an iteration of the training model to estimate the error gradient before updating the model weights. **Batch size** defines the batch size used per a single GPU. + +During model training, the training data is packed into mini-batches of a fixed size. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_beta.mdx b/documentation/docs/tooltips/experiments/_beta.mdx new file mode 100644 index 0000000000000000000000000000000000000000..575762b09f5fd76b2d76f386eb9bca984c474f4d --- /dev/null +++ b/documentation/docs/tooltips/experiments/_beta.mdx @@ -0,0 +1,3 @@ +Beta is a temperature parameter utilized in measuring DPO losses, ordinarily within the scope of 0.1 to 0.5. +This parameter regulates the deviation from the reference model, where the reference model becomes disregarded as beta approaches zero. +For more detailed information, please refer to section (3) of the given research paper: [https://arxiv.org/pdf/2305.18290.pdf](https://arxiv.org/pdf/2305.18290.pdf). \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_chatbot-name.mdx b/documentation/docs/tooltips/experiments/_chatbot-name.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1e4ac7271d28ff2f2517fa092a2ea7897704d2da --- /dev/null +++ b/documentation/docs/tooltips/experiments/_chatbot-name.mdx @@ -0,0 +1 @@ +Set the name for your chatbot. This is the name that will be used by the chatbot to identify itself in the chat. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_chatbot_author.mdx b/documentation/docs/tooltips/experiments/_chatbot_author.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a17c7125f3debd89855b294a07497d574811e0d7 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_chatbot_author.mdx @@ -0,0 +1 @@ +Set the author for your chatbot. This is the name that will teached to the model as the author of the chatbot. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_compile-model.mdx b/documentation/docs/tooltips/experiments/_compile-model.mdx new file mode 100644 index 0000000000000000000000000000000000000000..19ce6dc5ed3e22d68dd5d0a1ee9e387514927bf3 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_compile-model.mdx @@ -0,0 +1 @@ +Compiles the model with Torch. Experimental! \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_data-sample-choice.mdx b/documentation/docs/tooltips/experiments/_data-sample-choice.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c24d44a79160c0c175089774c22f116be1a6c26c --- /dev/null +++ b/documentation/docs/tooltips/experiments/_data-sample-choice.mdx @@ -0,0 +1,3 @@ +Specifies the data H2O LLM Studio do sample according to the percentage set in the **Data sample** setting. H2O LLM Studio does not sample the unselected data. + +The **Data sample choice** setting is only available if the value in the *Data sample* setting is less than **1.0**. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_data-sample.mdx b/documentation/docs/tooltips/experiments/_data-sample.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3f746d7d439e233ce9efc7f238e67d13a42e004b --- /dev/null +++ b/documentation/docs/tooltips/experiments/_data-sample.mdx @@ -0,0 +1,3 @@ +Defines the percentage of the data to use for the experiment. The default percentage is 100% (1). + +Changing the default value can significantly increase the training speed. Still, it might lead to a substantially poor accuracy value. Using 100% (1) of the data for final models is highly recommended. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_dataset.mdx b/documentation/docs/tooltips/experiments/_dataset.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1e187f80528cbd4b66aebddd77cb8a3f24672c60 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_dataset.mdx @@ -0,0 +1,5 @@ +It defines the dataset for the experiment. + + + + diff --git a/documentation/docs/tooltips/experiments/_deepspeed-allgather-bucket-size.mdx b/documentation/docs/tooltips/experiments/_deepspeed-allgather-bucket-size.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e482551e8718796b9797451800b50616c31dd993 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_deepspeed-allgather-bucket-size.mdx @@ -0,0 +1 @@ +Number of elements allgather at a time. Limits the memory required for the allgather for large model sizes. Smaller values use less GPU memory, but slow down training and validating. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_deepspeed-method.mdx b/documentation/docs/tooltips/experiments/_deepspeed-method.mdx new file mode 100644 index 0000000000000000000000000000000000000000..fa2e7c97293de30229ed72314193d4d2a343e30c --- /dev/null +++ b/documentation/docs/tooltips/experiments/_deepspeed-method.mdx @@ -0,0 +1,2 @@ +- ZeRO-2, gradient partitioning across GPUs. +- ZeRO-3, parameteter partitioning across GPUs. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_deepspeed-offload-optimizer.mdx b/documentation/docs/tooltips/experiments/_deepspeed-offload-optimizer.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d83fa269a019ca29942adbd9c06078ffd3b6d83d --- /dev/null +++ b/documentation/docs/tooltips/experiments/_deepspeed-offload-optimizer.mdx @@ -0,0 +1 @@ +Whether to offload optimizer to cpu for saving more GPU ram during training. Note that turn on offload_optimizer would further slow down training. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_deepspeed-reduce-bucket-size.mdx b/documentation/docs/tooltips/experiments/_deepspeed-reduce-bucket-size.mdx new file mode 100644 index 0000000000000000000000000000000000000000..afe5ef0cb1de40639f4d5e372b55ae46c0df8d9d --- /dev/null +++ b/documentation/docs/tooltips/experiments/_deepspeed-reduce-bucket-size.mdx @@ -0,0 +1 @@ +Number of elements reduced/allreduced at a time. Limits the memory required for the allgather for large model sizes. Smaller values use less memory, but slow down training and validating. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_deepspeed-stage3-max-live-parameters.mdx b/documentation/docs/tooltips/experiments/_deepspeed-stage3-max-live-parameters.mdx new file mode 100644 index 0000000000000000000000000000000000000000..70814112197fd57d3a147a27e7580a6e4986c42a --- /dev/null +++ b/documentation/docs/tooltips/experiments/_deepspeed-stage3-max-live-parameters.mdx @@ -0,0 +1 @@ +The maximum number of parameters resident per GPU before releasing. Smaller values use less memory, but slow down training. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_deepspeed-stage3-max-reuse-distance.mdx b/documentation/docs/tooltips/experiments/_deepspeed-stage3-max-reuse-distance.mdx new file mode 100644 index 0000000000000000000000000000000000000000..0e5beb51a6fc8861d29a49d40d12ce6bfe8cca51 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_deepspeed-stage3-max-reuse-distance.mdx @@ -0,0 +1 @@ +Do not release a parameter if it will be reused within this threshold of parameters. Smaller values use less memory, but slow down training. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_deepspeed-stage3-param-persistence-threshold.mdx b/documentation/docs/tooltips/experiments/_deepspeed-stage3-param-persistence-threshold.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7d6181b98d7c5f5c34b947765f3369b5bb828b37 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_deepspeed-stage3-param-persistence-threshold.mdx @@ -0,0 +1 @@ +Do not partition parameters smaller than this threshold. Smaller values use less memory, but can greatly increase communication and slow down training and validating. (especially latency-bound messages). diff --git a/documentation/docs/tooltips/experiments/_deepspeed-stage3-prefetch-bucket-size.mdx b/documentation/docs/tooltips/experiments/_deepspeed-stage3-prefetch-bucket-size.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7d2ee2c6256c7362392fc76869fcfd140e5e7084 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_deepspeed-stage3-prefetch-bucket-size.mdx @@ -0,0 +1 @@ +Maximum number of parameter elements to fetch ahead of use. Smaller values use less memory, but slow down training and validating. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_differential-learning-rate-layers.mdx b/documentation/docs/tooltips/experiments/_differential-learning-rate-layers.mdx new file mode 100644 index 0000000000000000000000000000000000000000..8fa1c083a700b617d252ee851e1514a1e9d99a07 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_differential-learning-rate-layers.mdx @@ -0,0 +1,8 @@ +Defines the learning rate to apply to certain layers of a model. H2O LLM Studio applies the regular learning rate to layers without a specified learning rate. + +- **Backbone** + - H2O LLM Studio applies a different learning rate to a body of the neural network architecture. +- **Value Head** + - H2O LLM Studio applies a different learning rate to a value head of the neural network architecture. + +A common strategy is to apply a lower learning rate to the backbone of a model for better convergence and training stability. diff --git a/documentation/docs/tooltips/experiments/_differential_learning_rate.mdx b/documentation/docs/tooltips/experiments/_differential_learning_rate.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2c81230745462154ce78b287bb701300b788a903 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_differential_learning_rate.mdx @@ -0,0 +1,3 @@ +Defines the differential learning rate that is applied to all selected differential learning rate layers. + +The **Differential learning rate** setting is available only if the following setting has at least one option selected: **Differential learning rate layers**. diff --git a/documentation/docs/tooltips/experiments/_do-sample.mdx b/documentation/docs/tooltips/experiments/_do-sample.mdx new file mode 100644 index 0000000000000000000000000000000000000000..31c7c3dbbdf84453bbb09ea5c2dd32ffae5c6a2a --- /dev/null +++ b/documentation/docs/tooltips/experiments/_do-sample.mdx @@ -0,0 +1 @@ +Determines whether to sample from the next token distribution instead of choosing the token with the highest probability. If turned **On**, the next token in a predicted sequence is sampled based on the probabilities. If turned **Off**, the highest probability is always chosen. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_epochs.mdx b/documentation/docs/tooltips/experiments/_epochs.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1101bfd8040482b9fb84738215b02689aed0d9d6 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_epochs.mdx @@ -0,0 +1,5 @@ +Defines the number of epochs to train the model. In other words, it specifies the number of times the learning algorithm goes through the entire training dataset. + +- The **Epochs** setting is an important setting to tune because it balances under- and overfitting. +- The learning rate highly impacts the optimal value of the epochs. +- H2O LLM Studio enables you to utilize a pre-trained model trained on zero epochs (where H2O LLM Studio does not train the model and the pretrained model (experiment) can be evaluated as-is): \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_evaluate-before-training.mdx b/documentation/docs/tooltips/experiments/_evaluate-before-training.mdx new file mode 100644 index 0000000000000000000000000000000000000000..8682612658ffb30cc08e739e124e7e5aa2f8044c --- /dev/null +++ b/documentation/docs/tooltips/experiments/_evaluate-before-training.mdx @@ -0,0 +1 @@ +This option lets you evaluate the model before training, which can help you judge the quality of the LLM backbone before fine-tuning. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_evaluation-epochs.mdx b/documentation/docs/tooltips/experiments/_evaluation-epochs.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7f173b947a2c13ac497f9d5dcfa1d09b06df0b00 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_evaluation-epochs.mdx @@ -0,0 +1,5 @@ +Defines the number of epochs H2O LLM Studio uses before each validation loop for model training. In other words, it determines the frequency (in a number of epochs) to run the model evaluation on the validation data. + +- Increasing the number of *Evaluation Epochs* can speed up an experiment. +- The **Evaluation epochs** setting is available only if the following setting is turned **Off**: **Save Best Checkpoint**. +- Can be a fraction of an epoch \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_experiment-name.mdx b/documentation/docs/tooltips/experiments/_experiment-name.mdx new file mode 100644 index 0000000000000000000000000000000000000000..eabccc41d36e368a1cd15fd1a83996ffa8473542 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_experiment-name.mdx @@ -0,0 +1 @@ +It defines the name of the experiment. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_find-unused-parameters.mdx b/documentation/docs/tooltips/experiments/_find-unused-parameters.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2d57bc8c0daf562c27b3665edc7105a93a433733 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_find-unused-parameters.mdx @@ -0,0 +1,3 @@ +In Distributed Data Parallel (DDP) mode, `prepare_for_backward()` is called at the end of DDP forward pass. It traverses the autograd graph to find unused parameters when `find_unused_parameters` is set to True in DDP constructor. + +Note that traversing the autograd graph introduces extra overheads, so applications should only set to True when necessary. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_force-embedding-gradients.mdx b/documentation/docs/tooltips/experiments/_force-embedding-gradients.mdx new file mode 100644 index 0000000000000000000000000000000000000000..92721000d462c4c25356db067628a85fa3088d5b --- /dev/null +++ b/documentation/docs/tooltips/experiments/_force-embedding-gradients.mdx @@ -0,0 +1 @@ +Whether to force the computation of gradients for the input embeddings during training. Useful for LORA. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_full_kl_penalty.mdx b/documentation/docs/tooltips/experiments/_full_kl_penalty.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1f2c4dc21b1d3f1ffcaf572ee666096b282fb98d --- /dev/null +++ b/documentation/docs/tooltips/experiments/_full_kl_penalty.mdx @@ -0,0 +1 @@ +Calculates the actual KL for all tokens in the distribution. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_gpus.mdx b/documentation/docs/tooltips/experiments/_gpus.mdx new file mode 100644 index 0000000000000000000000000000000000000000..888bfb7b14dfd9462102004f95484b4091d50a9d --- /dev/null +++ b/documentation/docs/tooltips/experiments/_gpus.mdx @@ -0,0 +1 @@ +Determines the list of GPUs H2O LLM Studio can use for the experiment. GPUs are listed by name, referring to their system ID (starting from 1). \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_grad-accumulation.mdx b/documentation/docs/tooltips/experiments/_grad-accumulation.mdx new file mode 100644 index 0000000000000000000000000000000000000000..f5a140f5a060e715e517d9ab0a76e8b5f648dfed --- /dev/null +++ b/documentation/docs/tooltips/experiments/_grad-accumulation.mdx @@ -0,0 +1,4 @@ +Defines the number of gradient accumulations before H2O LLM Studio updates the neural network weights during model training. + +- Grad accumulation can be beneficial if only small batches are selected for training. With gradient accumulation, the loss and gradients are calculated after each batch, but it waits for the selected accumulations before updating the model weights. You can control the batch size through the **Batch size** setting. +- Changing the default value of *Grad Accumulation* might require adjusting the learning rate and batch size. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_gradient-checkpointing.mdx b/documentation/docs/tooltips/experiments/_gradient-checkpointing.mdx new file mode 100644 index 0000000000000000000000000000000000000000..49ea0c587693b4264c2a9bffb84d55b0806fe325 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_gradient-checkpointing.mdx @@ -0,0 +1,6 @@ +Determines whether H2O LLM Studio activates gradient checkpointing (GC) when training the model. Starting GC reduces the video random access memory (VRAM) footprint at the cost of a longer runtime (an additional forward pass). Turning **On** GC enables it during the training process. + +**Caution** +Gradient checkpointing is an experimental setting that is not compatible with all backbones or all other settings. + +Activating *GC* comes at the cost of a longer training time; for that reason, try training without *GC* first and only activate when experiencing *GPU out-of-memory (OOM)* errors. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_gradient-clip.mdx b/documentation/docs/tooltips/experiments/_gradient-clip.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3ef2cc4e54b02840562836e56f220906c05c8c35 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_gradient-clip.mdx @@ -0,0 +1,3 @@ +Defines the maximum norm of the gradients H2O LLM Studio specifies during model training. Defaults to **0**, no clipping. When a value greater than 0 is specified, H2O LLM Studio modifies the gradients during model training. H2O LLM Studio uses the specified value as an upper limit for the norm of the gradients, calculated using the Euclidean norm over all gradients per batch. + +This setting can help model convergence when extreme gradient values cause high volatility of weight updates. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_huggingface-branch.mdx b/documentation/docs/tooltips/experiments/_huggingface-branch.mdx new file mode 100644 index 0000000000000000000000000000000000000000..60d331aadf92a74dce2c174ffb02e1069c722051 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_huggingface-branch.mdx @@ -0,0 +1 @@ +The **Huggingface Branch** defines which branch to use in a Huggingface repository. The default value is "main". diff --git a/documentation/docs/tooltips/experiments/_import-config-from-yaml.mdx b/documentation/docs/tooltips/experiments/_import-config-from-yaml.mdx new file mode 100644 index 0000000000000000000000000000000000000000..86b78d4a4bfc60eae5ce09b88cdf6477a1259ed1 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_import-config-from-yaml.mdx @@ -0,0 +1,3 @@ +Defines the `.yml` file that defines the experiment settings. + +- H2O LLM Studio supports a `.yml` file import and export functionality. You can download the config settings of finished experiments, make changes, and re-upload them when starting a new experiment in any instance of H2O LLM Studio. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_initial-kl-coefficient.mdx b/documentation/docs/tooltips/experiments/_initial-kl-coefficient.mdx new file mode 100644 index 0000000000000000000000000000000000000000..6b195434a6da461cd1b2be191d79867c30421c8f --- /dev/null +++ b/documentation/docs/tooltips/experiments/_initial-kl-coefficient.mdx @@ -0,0 +1 @@ +Initial KL penalty coefficient (used for adaptive and linear control). \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_intermediate-dropout.mdx b/documentation/docs/tooltips/experiments/_intermediate-dropout.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b0647ceda763a3424265a20ee23a7284bcf4764a --- /dev/null +++ b/documentation/docs/tooltips/experiments/_intermediate-dropout.mdx @@ -0,0 +1 @@ +Defines the custom dropout rate H2O LLM Studio uses for intermediate layers in the transformer model. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_kl-horizon.mdx b/documentation/docs/tooltips/experiments/_kl-horizon.mdx new file mode 100644 index 0000000000000000000000000000000000000000..00e28e1a523c767ffa7d45d6332f1cd81b78fd42 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_kl-horizon.mdx @@ -0,0 +1 @@ +Horizon for adaptive KL control. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_kl-target.mdx b/documentation/docs/tooltips/experiments/_kl-target.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ab8a040625268125877c9be8cf2f53d7fe07b758 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_kl-target.mdx @@ -0,0 +1 @@ +Target KL value for adaptive KL control. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_learning-rate.mdx b/documentation/docs/tooltips/experiments/_learning-rate.mdx new file mode 100644 index 0000000000000000000000000000000000000000..6cee2f23c9c07a98fabe335ac05d36ae5fedccc4 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_learning-rate.mdx @@ -0,0 +1,4 @@ +Defines the learning rate H2O LLM Studio uses when training the model, specifically when updating the neural network's weights. The learning rate is the speed at which the model updates its weights after processing each mini-batch of data. + +- Learning rate is an important setting to tune as it balances under- and overfitting. +- The number of epochs highly impacts the optimal value of the learning rate. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_limit_chained_samples.mdx b/documentation/docs/tooltips/experiments/_limit_chained_samples.mdx new file mode 100644 index 0000000000000000000000000000000000000000..0f2f31437f23f86abb2298b336f8e00d8d49a71e --- /dev/null +++ b/documentation/docs/tooltips/experiments/_limit_chained_samples.mdx @@ -0,0 +1 @@ +If set to True, model will be only trained on the full chained sample in case of nested conversations. If set to False, each separate sample is used for training. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_llm-backbone.mdx b/documentation/docs/tooltips/experiments/_llm-backbone.mdx new file mode 100644 index 0000000000000000000000000000000000000000..401cc1c6993d6430ce6e303c04b6a56b687259a1 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_llm-backbone.mdx @@ -0,0 +1,5 @@ +The **LLM Backbone** option is the most important setting as it sets the pretrained model weights. + +- Usually, it is good to use smaller architectures for quicker experiments and larger models when aiming for the highest accuracy +- If possible, leverage backbones pre-trained closely to your use case +- Any huggingface model can be used here (not limited to the ones in the dropdown list) \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_logger.mdx b/documentation/docs/tooltips/experiments/_logger.mdx new file mode 100644 index 0000000000000000000000000000000000000000..476701f0d3769f29989abced01e3588b3f12aec0 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_logger.mdx @@ -0,0 +1,7 @@ +Defines the logger type that H2O LLM Studio uses for model training + +Options +- **None** + - H2O LLM Studio does not use any logger. +- **Neptune** + - H2O LLM Studio uses Neptune as a logger to track the experiment. To use Neptune, you must specify a **Neptune API token** and a **Neptune project**. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_lora-alpha.mdx b/documentation/docs/tooltips/experiments/_lora-alpha.mdx new file mode 100644 index 0000000000000000000000000000000000000000..933e2ce1d6fc11176b7ba0ed74591e8c7ee7eb1a --- /dev/null +++ b/documentation/docs/tooltips/experiments/_lora-alpha.mdx @@ -0,0 +1 @@ +The scaling factor for the lora weights. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_lora-dropout.mdx b/documentation/docs/tooltips/experiments/_lora-dropout.mdx new file mode 100644 index 0000000000000000000000000000000000000000..980a7e3ad0ec19cc47e42f3118daaf640a05c3c2 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_lora-dropout.mdx @@ -0,0 +1 @@ +The probability of applying dropout to the LoRA weights during training. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_lora-r.mdx b/documentation/docs/tooltips/experiments/_lora-r.mdx new file mode 100644 index 0000000000000000000000000000000000000000..48ea770a4ce912dca52f171a3ea43a45b76df1bd --- /dev/null +++ b/documentation/docs/tooltips/experiments/_lora-r.mdx @@ -0,0 +1 @@ +The dimension of the matrix decomposition used in LoRA. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_lora-target-modules.mdx b/documentation/docs/tooltips/experiments/_lora-target-modules.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4d1f900c1128ad7827cf44d2946d5c200eea0676 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_lora-target-modules.mdx @@ -0,0 +1 @@ +The modules in the model to apply the LoRA approximation to. Defaults to all linear layers. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_lora.mdx b/documentation/docs/tooltips/experiments/_lora.mdx new file mode 100644 index 0000000000000000000000000000000000000000..db92a793b6260787f0e7de87f786cb7583e1e8a8 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_lora.mdx @@ -0,0 +1 @@ +Whether to use low rank approximations (LoRA) during training. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_loss-function.mdx b/documentation/docs/tooltips/experiments/_loss-function.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ebf6ca195f6ad02001068c25542a9088be6b47a4 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_loss-function.mdx @@ -0,0 +1 @@ +Defines the loss function H2O LLM Studio utilizes during model training. The loss function is a differentiable function measuring the prediction error. The model utilizes gradients of the loss function to update the model weights during training. The options depend on the selected Problem Type. diff --git a/documentation/docs/tooltips/experiments/_mask-prompt-labels.mdx b/documentation/docs/tooltips/experiments/_mask-prompt-labels.mdx new file mode 100644 index 0000000000000000000000000000000000000000..14bf2a85884f6539b4121609127e94a5c267e73f --- /dev/null +++ b/documentation/docs/tooltips/experiments/_mask-prompt-labels.mdx @@ -0,0 +1 @@ +Whether to mask the prompt labels during training and only train on the loss of the answer. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_max-length-answer.mdx b/documentation/docs/tooltips/experiments/_max-length-answer.mdx new file mode 100644 index 0000000000000000000000000000000000000000..468f363be2f8672128be92c209fd18456a952d5e --- /dev/null +++ b/documentation/docs/tooltips/experiments/_max-length-answer.mdx @@ -0,0 +1 @@ +The maximum sequence length of the answer to use during training. In case of chained samples, this max length refers to a single answer length in the chain. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_max-length-inference.mdx b/documentation/docs/tooltips/experiments/_max-length-inference.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2dbdc65fae8ba252ae3bac0c1eb45b6f99d5b87c --- /dev/null +++ b/documentation/docs/tooltips/experiments/_max-length-inference.mdx @@ -0,0 +1,4 @@ +Defines the max length value H2O LLM Studio uses for the generated text. + +- Similar to the **Max Length** setting in the *tokenizer settings* section, this setting specifies the maximum number of tokens to predict for a given prediction sample. +- This setting impacts the evaluation metrics and should depend on the dataset and average output sequence length that is expected to be predicted. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_max-length-prompt.mdx b/documentation/docs/tooltips/experiments/_max-length-prompt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c71223a4d351943dfebda0e3399aafc7e8162f2c --- /dev/null +++ b/documentation/docs/tooltips/experiments/_max-length-prompt.mdx @@ -0,0 +1 @@ +The maximum sequence length of the prompt to use during training. In case of chained samples, this max length refers to a single prompt length in the chain. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_max-length.mdx b/documentation/docs/tooltips/experiments/_max-length.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2d675c2134c80a843e4d6361d8917018556d0f2c --- /dev/null +++ b/documentation/docs/tooltips/experiments/_max-length.mdx @@ -0,0 +1,7 @@ +Defines the maximum length of the input sequence H2O LLM Studio uses during model training. In other words, this setting specifies the maximum number of tokens an input text is transformed for model training. + +A higher token count leads to higher memory usage that slows down training while increasing the probability of obtaining a higher accuracy value. + +In case of Causal Language Modeling, this includes both prompt and answer, or all prompts and answers in case of chained samples. + +In Sequence to Sequence Modeling, this refers to the length of the prompt, or the length of a full chained sample. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_max-time.mdx b/documentation/docs/tooltips/experiments/_max-time.mdx new file mode 100644 index 0000000000000000000000000000000000000000..259d1365b88ddad65fa2dbad20f75c160cfb80ee --- /dev/null +++ b/documentation/docs/tooltips/experiments/_max-time.mdx @@ -0,0 +1 @@ +The maximum amount of time you allow the computation to run for in seconds, zero disabling this setting. Generation will still finish the current pass after allocated time has been passed. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_metric-gpt-model.mdx b/documentation/docs/tooltips/experiments/_metric-gpt-model.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c17cab68dd0a89a66de2629f028583c3a772d5b1 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_metric-gpt-model.mdx @@ -0,0 +1 @@ +Defines the OpenAI model endpoint for the GPT metric. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_metric-gpt-template.mdx b/documentation/docs/tooltips/experiments/_metric-gpt-template.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c741f5369ee3041f378f3549ebf3d85c0c24e4de --- /dev/null +++ b/documentation/docs/tooltips/experiments/_metric-gpt-template.mdx @@ -0,0 +1 @@ +The template to use for GPT-based evaluation. Note that for mt-bench, the validation dataset will be replaced accordingly; to approximate the original implementation as close as possible, we suggest to use gpt-4-0613 as the gpt judge model and use 1024 for the max length inference. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_metric.mdx b/documentation/docs/tooltips/experiments/_metric.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7e3ff80d1358de4e2df416eef0d93dcb4dd98854 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_metric.mdx @@ -0,0 +1,7 @@ +Defines the metric to evaluate the model's performance. + +We provide several metric options for evaluating the performance of your model. +In addition to the BLEU and the Perplexity score, we offer GPT metrics that utilize the OpenAI API to determine whether +the predicted answer is more favorable than the ground truth answer. +To use these metrics, you can either export your OpenAI API key as an environment variable before starting LLM Studio, +or you can specify it in the Settings Menu within the UI. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_min-length-inference.mdx b/documentation/docs/tooltips/experiments/_min-length-inference.mdx new file mode 100644 index 0000000000000000000000000000000000000000..57ad37b6e96ef78fa7fd79dee6289fdfc24aad89 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_min-length-inference.mdx @@ -0,0 +1,3 @@ +Defines the min length value H2O LLM Studio uses for the generated text. + +- This setting impacts the evaluation metrics and should depend on the dataset and average output sequence length that is expected to be predicted. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_mixed-precision-dtype.mdx b/documentation/docs/tooltips/experiments/_mixed-precision-dtype.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a9c986a5e6187867643caffe467391f23b6b079c --- /dev/null +++ b/documentation/docs/tooltips/experiments/_mixed-precision-dtype.mdx @@ -0,0 +1 @@ +The datatype used for mixed precision. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_mixed-precision.mdx b/documentation/docs/tooltips/experiments/_mixed-precision.mdx new file mode 100644 index 0000000000000000000000000000000000000000..8bc4a002f7ca251733587230b82f7824c551d0e1 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_mixed-precision.mdx @@ -0,0 +1,3 @@ +Determines whether to use mixed-precision. When turned **Off**, H2O LLM Studio does not use mixed-precision. + +Mixed-precision is a technique that helps decrease memory consumption and increases training speed. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_neftune_noise_alpha.mdx b/documentation/docs/tooltips/experiments/_neftune_noise_alpha.mdx new file mode 100644 index 0000000000000000000000000000000000000000..9f9923cf0d7144c015f43381a8acef60eff53af2 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_neftune_noise_alpha.mdx @@ -0,0 +1,2 @@ +Will add noise to the input embeddings as proposed by https://arxiv.org/abs/2310.05914 (NEFTune: Noisy Embeddings Improve Instruction Finetuning) + diff --git a/documentation/docs/tooltips/experiments/_neptune-api-token.mdx b/documentation/docs/tooltips/experiments/_neptune-api-token.mdx new file mode 100644 index 0000000000000000000000000000000000000000..47944d71e50bb92663b80b48bb78717b027c691c --- /dev/null +++ b/documentation/docs/tooltips/experiments/_neptune-api-token.mdx @@ -0,0 +1 @@ +Defines the Neptune API token to validate all subsequent Neptune API calls. diff --git a/documentation/docs/tooltips/experiments/_neptune-project.mdx b/documentation/docs/tooltips/experiments/_neptune-project.mdx new file mode 100644 index 0000000000000000000000000000000000000000..aefbdac0e130745a0181105adb3fa1ad2e9bf50b --- /dev/null +++ b/documentation/docs/tooltips/experiments/_neptune-project.mdx @@ -0,0 +1 @@ +Defines the Neptune project to access if you selected Neptune in the **Logger** setting. diff --git a/documentation/docs/tooltips/experiments/_num-beams.mdx b/documentation/docs/tooltips/experiments/_num-beams.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d396204998ae6ff45d01992183873765ca877614 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_num-beams.mdx @@ -0,0 +1,3 @@ +Defines the number of beams to use for beam search. *Num Beams* default value is 1 (a single beam); no beam search. + +A higher *Num Beams* value can increase prediction runtime while potentially improving accuracy. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_num-classes.mdx b/documentation/docs/tooltips/experiments/_num-classes.mdx new file mode 100644 index 0000000000000000000000000000000000000000..544720dcc1129e7204d465cb8d39e19a1eefe025 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_num-classes.mdx @@ -0,0 +1 @@ +The number of possible classes for the classification task. For binary classification, a single class should be selected. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_num-history.mdx b/documentation/docs/tooltips/experiments/_num-history.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2d449124daf13c51e114f406f3511e7d47376336 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_num-history.mdx @@ -0,0 +1,2 @@ +Number of previous prompts and answers the model should use as history and prepend to the current prompt. +As an example, if Num History is 2, the model will use the previous prompt-answer pair. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_number-of-workers.mdx b/documentation/docs/tooltips/experiments/_number-of-workers.mdx new file mode 100644 index 0000000000000000000000000000000000000000..03da84d522933d0becd89d97c663796e22195705 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_number-of-workers.mdx @@ -0,0 +1 @@ +Defines the number of workers H2O LLM Studio uses for the *DataLoader*. In other words, it defines the number of CPU processes to use when reading and loading data to GPUs during model training. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_offload-reward-model.mdx b/documentation/docs/tooltips/experiments/_offload-reward-model.mdx new file mode 100644 index 0000000000000000000000000000000000000000..aaeeb40b8a06f3e02903ee49628ff80f226c33f8 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_offload-reward-model.mdx @@ -0,0 +1 @@ +When enabled, this will offload the reward model weights to CPU when not in use. This can be useful when training on a GPU with limited memory. The weights will be moved back to the GPU when needed. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_openai-api-token.mdx b/documentation/docs/tooltips/experiments/_openai-api-token.mdx new file mode 100644 index 0000000000000000000000000000000000000000..9dfce5fa162afff45f3859d5113d3a56bfe0a434 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_openai-api-token.mdx @@ -0,0 +1 @@ +Your OpenAI API token. Needed for evaluation with GPT. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_optimizer.mdx b/documentation/docs/tooltips/experiments/_optimizer.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e25cd5549613d36ae4e5754e4a7c60423e1814c8 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_optimizer.mdx @@ -0,0 +1,17 @@ +Defines the algorithm or method (optimizer) to use for model training. The selected algorithm or method defines how the model should change the attributes of the neural network, such as weights and learning rate. Optimizers solve optimization problems and make more accurate updates to attributes to reduce learning losses. + + +Options: + +- **Adadelta** + - To learn about Adadelta, see ADADELTA: An Adaptive Learning Rate Method. +- **Adam** + - To learn about Adam, see Adam: A Method for Stochastic Optimization. +- **AdamW** + - To learn about AdamW, see Decoupled Weight Decay Regularization. +- **AdamW8bit** + - To learn about AdamW, see Decoupled Weight Decay Regularization. +- **RMSprop** + - To learn about RMSprop, see Neural Networks for Machine Learning. +- **SGD** + - H2O LLM Studio uses a stochastic gradient descent optimizer. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_padding-quantile.mdx b/documentation/docs/tooltips/experiments/_padding-quantile.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1c37076a118042bb07d7e9a919e79d3f189ff2af --- /dev/null +++ b/documentation/docs/tooltips/experiments/_padding-quantile.mdx @@ -0,0 +1,7 @@ +Defines the padding quantile H2O LLM Studio uses to select the maximum token length per batch. H2O LLM Studio performs padding of shorter sequences up to the specified padding quantile instead of the selected **Max length**. H2O LLM Studio truncates longer sequences. + +- Lowering the quantile can significantly increase training runtime and reduce memory usage in unevenly distributed sequence lengths but can hurt performance +- The setting depends on the batch size and should be adjusted accordingly +- No padding is done in inference, and the selected **Max Length** is guaranteed +- Setting to 0 disables padding +- In case of distributed training, the quantile will be calculated across all GPUs \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_parent-id-column.mdx b/documentation/docs/tooltips/experiments/_parent-id-column.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3ba0ef6b0af3aed7ef577af4093514f9a9ea1318 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_parent-id-column.mdx @@ -0,0 +1 @@ +An optional column specifying the parent id to be used for chained conversations. The value of this column needs to match an additional column with the name `id`. If provided, the prompt will be concatenated after preceeding parent rows. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_personalize.mdx b/documentation/docs/tooltips/experiments/_personalize.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3e73ff4b880bee452675978583333dbec6f06891 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_personalize.mdx @@ -0,0 +1 @@ +When active, this will replace certain words from the dataset to personalize the chatbot. Allows you to set a "Chatbot Name" and a "Chatbot Author". This setting is only useful in conjunction with the default dataset. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_ppo-batch-size.mdx b/documentation/docs/tooltips/experiments/_ppo-batch-size.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2fe732c20ace43c1afa9a4ca4db7b81a13a7992f --- /dev/null +++ b/documentation/docs/tooltips/experiments/_ppo-batch-size.mdx @@ -0,0 +1 @@ +Number of samples optimized inside PPO together. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_ppo-clip-policy.mdx b/documentation/docs/tooltips/experiments/_ppo-clip-policy.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2ed4a8731cb7dcdc01d56ca59bf91b27e47da22f --- /dev/null +++ b/documentation/docs/tooltips/experiments/_ppo-clip-policy.mdx @@ -0,0 +1 @@ +Range for clipping in PPO policy gradient loss. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_ppo-clip-value.mdx b/documentation/docs/tooltips/experiments/_ppo-clip-value.mdx new file mode 100644 index 0000000000000000000000000000000000000000..088a6875ec819baa5c012314077489f75b265a83 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_ppo-clip-value.mdx @@ -0,0 +1 @@ +Range for clipping values in loss calculation. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_ppo-epochs.mdx b/documentation/docs/tooltips/experiments/_ppo-epochs.mdx new file mode 100644 index 0000000000000000000000000000000000000000..940af78aabb29e4de50a73470a3d75e9fba064f1 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_ppo-epochs.mdx @@ -0,0 +1 @@ +Number of optimisation epochs per batch of samples. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_ppo-generate-temperature.mdx b/documentation/docs/tooltips/experiments/_ppo-generate-temperature.mdx new file mode 100644 index 0000000000000000000000000000000000000000..384cb0a6ebb3312e7329f380a8a872c5219b263c --- /dev/null +++ b/documentation/docs/tooltips/experiments/_ppo-generate-temperature.mdx @@ -0,0 +1 @@ +This is the temperature that is used in the generate function during the PPO Rollout. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_pretrained-weights.mdx b/documentation/docs/tooltips/experiments/_pretrained-weights.mdx new file mode 100644 index 0000000000000000000000000000000000000000..06eb508069bc822e9decbeb6a256641c79f0f755 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_pretrained-weights.mdx @@ -0,0 +1 @@ +Allows you to specify a local path to the pretrained weights. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_problem-type.mdx b/documentation/docs/tooltips/experiments/_problem-type.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2d1e74512ff5831402af65fb2fa362ebcec576b2 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_problem-type.mdx @@ -0,0 +1,9 @@ +Defines the problem type of the experiment, which also defines the settings H2O LLM Studio displays for the experiment. + +- Causal Language Modeling: Used to fine-tune large language models + +- DPO Modeling: Used to fine-tune large language models using Direct Preference Optimization + +- Sequence To Sequence Modeling: Used to fine-tune large sequence to sequence models + +- Causal Classification Modeling: Used to fine-tune causal classification models \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_prompt-column.mdx b/documentation/docs/tooltips/experiments/_prompt-column.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d51c06e970ab961f0085525c65e0970a784f2e5d --- /dev/null +++ b/documentation/docs/tooltips/experiments/_prompt-column.mdx @@ -0,0 +1 @@ +The column in the dataset containing the user prompt. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_random-parent-probability.mdx b/documentation/docs/tooltips/experiments/_random-parent-probability.mdx new file mode 100644 index 0000000000000000000000000000000000000000..6b182feb90f8a077768afaebc9ac6057aa9cf6ca --- /dev/null +++ b/documentation/docs/tooltips/experiments/_random-parent-probability.mdx @@ -0,0 +1 @@ +While training, each sample will be concatenated to a random other sample simulating unrelated chained conversations. Can be specified without using a `Parent Column`. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_rejected-answer-column.mdx b/documentation/docs/tooltips/experiments/_rejected-answer-column.mdx new file mode 100644 index 0000000000000000000000000000000000000000..608d5225950e96ee7d5fcc0e03b3f5350cab5922 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_rejected-answer-column.mdx @@ -0,0 +1,3 @@ +The column in the dataset containing the rejected response, i.e. an answer that is not preferred by the user. + +See https://arxiv.org/abs/2305.18290 for more details. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_rejected_prompt_column.mdx b/documentation/docs/tooltips/experiments/_rejected_prompt_column.mdx new file mode 100644 index 0000000000000000000000000000000000000000..74a211d0e24056d1fc9da540b1b0bb2beaf5ba12 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_rejected_prompt_column.mdx @@ -0,0 +1 @@ +The column in the dataset containing the user prompt for the rejected answer. By default this can be set to None to take the same prompt as for the accepted answer and should only be changed if the accepted and rejected answers exhibit different prompts, such as when using KTOPairLoss. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_repetition-penalty.mdx b/documentation/docs/tooltips/experiments/_repetition-penalty.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e552202e88437b15584a8c287791cd11787f141f --- /dev/null +++ b/documentation/docs/tooltips/experiments/_repetition-penalty.mdx @@ -0,0 +1 @@ +The parameter for repetition penalty. 1.0 means no penalty. See https://arxiv.org/pdf/1909.05858.pdf for more details. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_reward-model.mdx b/documentation/docs/tooltips/experiments/_reward-model.mdx new file mode 100644 index 0000000000000000000000000000000000000000..fdda8304cf8c4b9789bbff437fe755ee2ec06ff1 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_reward-model.mdx @@ -0,0 +1,3 @@ +The **Reward Model** option is gives control over the models weights that shall be used to score the active LLM during RLHF training. + +- Any suited huggingface model can be used here (not limited to the ones in the dropdown list) \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_rollout_steps.mdx b/documentation/docs/tooltips/experiments/_rollout_steps.mdx new file mode 100644 index 0000000000000000000000000000000000000000..740bd61a31f3f3ecf57712c6ba97dde360b71153 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_rollout_steps.mdx @@ -0,0 +1 @@ +Defines the number of steps (minibatches) for the rollout until the PPO update is applied. A higher value may help with training stability, but will need more interations to converge. Must be smaller than or equal to the total number of steps in the training. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_save-best-checkpoint.mdx b/documentation/docs/tooltips/experiments/_save-best-checkpoint.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ea190e5bee249a5c571cab9bf6cb1dbbe99af0d1 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_save-best-checkpoint.mdx @@ -0,0 +1,5 @@ +Determines if H2O LLM Studio should save the model weights of the epoch exhibiting the best validation metric. When turned **On**, H2O LLM Studio saves the model weights for the epoch exhibiting the best validation metric. When turned **Off**, H2O LLM Studio saves the model weights after the last epoch is executed. + +- This setting should be turned **On** with care as it has the potential to lead to overfitting of the validation data. +- The default goal should be to attempt to tune models so that the last or very last epoch is the best epoch. +- Suppose an evident decline for later epochs is observed in logging. In that case, it is usually better to adjust hyperparameters, such as reducing the number of epochs or increasing regularization, instead of turning this setting **On**. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_scaling-factor-value-loss.mdx b/documentation/docs/tooltips/experiments/_scaling-factor-value-loss.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ee573f9381d03f461a0280877976e41bbb96745f --- /dev/null +++ b/documentation/docs/tooltips/experiments/_scaling-factor-value-loss.mdx @@ -0,0 +1 @@ +Scaling factor for value loss. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_schedule.mdx b/documentation/docs/tooltips/experiments/_schedule.mdx new file mode 100644 index 0000000000000000000000000000000000000000..fac04d46c358b8a14bb8d082b0d5290718135970 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_schedule.mdx @@ -0,0 +1,10 @@ +Defines the learning rate schedule H2O LLM Studio utilizes during model training. Specifying a learning rate schedule prevents the learning rate from staying the same. Instead, a learning rate schedule causes the learning rate to change over iterations, typically decreasing the learning rate to achieve a better model performance and training convergence. + + +Options +- **Constant** + - H2O LLM Studio applies a constant learning rate during the training process. +- **Cosine** + - H2O LLM Studio applies a cosine learning rate that follows the values of the cosine function. +- **Linear** + - H2O LLM Studio applies a linear learning rate that decreases the learning rate linearly. \ No newline at end of file diff --git a/documentation/docs/tooltips/experiments/_seed.mdx b/documentation/docs/tooltips/experiments/_seed.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a4bbf1ac81f24abc670753796e4998b4b8cd8483 --- /dev/null +++ b/documentation/docs/tooltips/experiments/_seed.mdx @@ -0,0 +1 @@ +Defines the random seed value that H2O LLM Studio uses during model training. It defaults to -1, an arbitrary value. When the value is modified (not -1), the random seed allows results to be reproducible—defining a seed aids in obtaining predictable and repeatable results every time. Otherwise, not modifying the default seed value (-1) leads to random numbers at every invocation. diff --git a/documentation/docs/tooltips/experiments/_skip-parent-probability.mdx b/documentation/docs/tooltips/experiments/_skip-parent-probability.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d74818aa8300ee8dfdf1a5563ac617825738cccb --- /dev/null +++ b/documentation/docs/tooltips/experiments/_skip-parent-probability.mdx @@ -0,0 +1 @@ +If `Parent Column` is set, this random augmentation will skip parent concatenation during training at each parent with this specified probability. \ No newline at end of file diff --git a/documentation/makersaurus.config.js b/documentation/makersaurus.config.js new file mode 100644 index 0000000000000000000000000000000000000000..b7525b34d7d02e51ef7e98d5cef184ab378040ec --- /dev/null +++ b/documentation/makersaurus.config.js @@ -0,0 +1,27 @@ +module.exports = { + title: "H2O LLM Studio | Docs", + tagline: "This code for this site was generated by Makersaurus", + url: "https://docs.h2o.ai", + baseUrl: "/h2o-llmstudio/", + projectName: "h2o-llmstudio", // Usually your repo name + feedbackAssignee: "sherenem", // Should be a github username + feedbackLabels: ["llmstudio"], + searchFilter: "h2o-llmstudio", + showLastUpdateAuthor: false, + includeCurrentVersion: true, + versions: { + current: { + label: " ", + path: "/", + }, + }, + dependencies: { + "@emotion/react": "^11.10.5", + "@emotion/styled": "^11.10.5", + "@material-ui/core": "^4.12.4", + "@material/card": "^14.0.0", + "@mui/icons-material": "^5.10.16", + "@mui/material": "^5.10.16", + "react-player": "^2.11.0", + }, +}; diff --git a/documentation/package-lock.json b/documentation/package-lock.json new file mode 100644 index 0000000000000000000000000000000000000000..24cd9270d74697fcc7dec145344457b99594d715 --- /dev/null +++ b/documentation/package-lock.json @@ -0,0 +1,20236 @@ +{ + "name": "h2o-llmstudio-docs", + "version": "0.0.0", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "h2o-llmstudio-docs", + "version": "0.0.0", + "dependencies": { + "@h2oai/makersaurus": "^0.8.3", + "docusaurus-plugin-includes": "^1.1.4" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.5.tgz", + "integrity": "sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==", + "dependencies": { + "@babel/highlight": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.5.tgz", + "integrity": "sha512-4Jc/YuIaYqKnDDz892kPIledykKg12Aw1PYX5i/TY28anJtacvM1Rrr8wbieB9GfEJwlzqT0hUEao0CxEebiDA==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.5.tgz", + "integrity": "sha512-SBuTAjg91A3eKOvD+bPEz3LlhHZRNu1nFOVts9lzDJTXshHTjII0BAtDS3Y2DAkdZdDKWVZGVwkDfc4Clxn1dg==", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.22.5", + "@babel/generator": "^7.22.5", + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helpers": "^7.22.5", + "@babel/parser": "^7.22.5", + "@babel/template": "^7.22.5", + "@babel/traverse": "^7.22.5", + "@babel/types": "^7.22.5", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.2", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.5.tgz", + "integrity": "sha512-+lcUbnTRhd0jOewtFSedLyiPsD5tswKkbgcezOqqWFUVNEwoUTlpPOBmvhG7OXWLR4jMdv0czPGH5XbflnD1EA==", + "dependencies": { + "@babel/types": "^7.22.5", + "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz", + "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.5.tgz", + "integrity": "sha512-m1EP3lVOPptR+2DwD125gziZNcmoNSHGmJROKoy87loWUQyJaVXDgpmruWqDARZSmtYQ+Dl25okU8+qhVzuykw==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.5.tgz", + "integrity": "sha512-Ji+ywpHeuqxB8WDxraCiqR0xfhYjiDE/e6k7FuIaANnoOFxAHskHChz4vA1mJC9Lbm01s1PVAGhQY4FUKSkGZw==", + "dependencies": { + "@babel/compat-data": "^7.22.5", + "@babel/helper-validator-option": "^7.22.5", + "browserslist": "^4.21.3", + "lru-cache": "^5.1.1", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.5.tgz", + "integrity": "sha512-xkb58MyOYIslxu3gKmVXmjTtUPvBU4odYzbiIQbWwLKIHCsx6UGZGX6F1IznMFVnDdirseUZopzN+ZRt8Xb33Q==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-member-expression-to-functions": "^7.22.5", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.5", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.5.tgz", + "integrity": "sha512-1VpEFOIbMRaXyDeUwUfmTIxExLwQ+zkW+Bh5zXpApA3oQedBx9v/updixWxnx/bZpKw7u8VxWjb/qWpIcmPq8A==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "regexpu-core": "^5.3.1", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.0.tgz", + "integrity": "sha512-RnanLx5ETe6aybRi1cO/edaRH+bNYWaryCEmjDDYyNr4wnSzyOp8T0dWipmqVHKEY3AbVKUom50AKSlj1zmKbg==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.17.7", + "@babel/helper-plugin-utils": "^7.16.7", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2", + "semver": "^6.1.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0-0" + } + }, + "node_modules/@babel/helper-define-polyfill-provider/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-environment-visitor": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz", + "integrity": "sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz", + "integrity": "sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==", + "dependencies": { + "@babel/template": "^7.22.5", + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.22.5.tgz", + "integrity": "sha512-aBiH1NKMG0H2cGZqspNvsaBe6wNGjbJjuLy29aU+eDZjSbbN53BaxlpB02xm9v34pLTZ1nIQPFYn2qMZoa5BQQ==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz", + "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.5.tgz", + "integrity": "sha512-+hGKDt/Ze8GFExiVHno/2dvG5IdstpzCq0y4Qc9OJ25D4q3pKfiIP/4Vp3/JvhDkLKsDK2api3q3fpIgiIF5bw==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-simple-access": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.5", + "@babel/template": "^7.22.5", + "@babel/traverse": "^7.22.5", + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz", + "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", + "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.5.tgz", + "integrity": "sha512-cU0Sq1Rf4Z55fgz7haOakIyM7+x/uCFwXpLPaeRzfoUtAEAuUZjZvFPjL/rk5rW693dIgn2hng1W7xbT7lWT4g==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-wrap-function": "^7.22.5", + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.5.tgz", + "integrity": "sha512-aLdNM5I3kdI/V9xGNyKSF3X/gTyMUBohTZ+/3QdQKAA9vxIiy12E+8E2HoOP1/DjeqU+g6as35QHJNMDDYpuCg==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-member-expression-to-functions": "^7.22.5", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/template": "^7.22.5", + "@babel/traverse": "^7.22.5", + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", + "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz", + "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.5.tgz", + "integrity": "sha512-thqK5QFghPKWLhAV321lxF95yCg2K3Ob5yw+M3VHWfdia0IkPXUtoLH8x/6Fh486QUvzhb8YOWHChTVen2/PoQ==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz", + "integrity": "sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz", + "integrity": "sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.5.tgz", + "integrity": "sha512-bYqLIBSEshYcYQyfks8ewYA8S30yaGSeRslcvKMvoUk6HHPySbxHq9YRi6ghhzEU+yhQv9bP/jXnygkStOcqZw==", + "dependencies": { + "@babel/helper-function-name": "^7.22.5", + "@babel/template": "^7.22.5", + "@babel/traverse": "^7.22.5", + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.5.tgz", + "integrity": "sha512-pSXRmfE1vzcUIDFQcSGA5Mr+GxBV9oiRKDuDxXvWQQBCh8HoIjs/2DlDB7H8smac1IVrB9/xdXj2N3Wol9Cr+Q==", + "dependencies": { + "@babel/template": "^7.22.5", + "@babel/traverse": "^7.22.5", + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.5.tgz", + "integrity": "sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==", + "dependencies": { + "@babel/helper-validator-identifier": "^7.22.5", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/parser": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.5.tgz", + "integrity": "sha512-DFZMC9LJUG9PLOclRC32G63UXwzqS2koQC8dkx+PLdmt1xSePYpbT/NbsrJy8Q/muXz7o/h/d4A7Fuyixm559Q==", + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.22.5.tgz", + "integrity": "sha512-NP1M5Rf+u2Gw9qfSO4ihjcTGW5zXTi36ITLd4/EoAcEhIZ0yjMqmftDNl3QC19CX7olhrjpyU454g/2W7X0jvQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.22.5.tgz", + "integrity": "sha512-31Bb65aZaUwqCbWMnZPduIZxCBngHFlzyN6Dq6KAJjtx+lx6ohKHubc61OomYi7XwVD4Ol0XCVz4h+pYFR048g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-transform-optional-chaining": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-proposal-object-rest-spread": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz", + "integrity": "sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.0", + "@babel/plugin-transform-parameters": "^7.12.1" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-proposal-unicode-property-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz", + "integrity": "sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.22.5.tgz", + "integrity": "sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.22.5.tgz", + "integrity": "sha512-KwvoWDeNKPETmozyFE0P2rOLqh39EoQHNjqizrI5B8Vt0ZNS7M56s7dAiAqbYfiAYOuIzIh96z3iR2ktgu3tEg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz", + "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz", + "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.22.5.tgz", + "integrity": "sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.22.5.tgz", + "integrity": "sha512-gGOEvFzm3fWoyD5uZq7vVTD57pPJ3PczPUD/xCFGjzBpUosnklmXyKnGQbbbGs1NPNPskFex0j93yKbHt0cHyg==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-remap-async-to-generator": "^7.22.5", + "@babel/plugin-syntax-async-generators": "^7.8.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.22.5.tgz", + "integrity": "sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==", + "dependencies": { + "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-remap-async-to-generator": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.22.5.tgz", + "integrity": "sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.22.5.tgz", + "integrity": "sha512-EcACl1i5fSQ6bt+YGuU/XGCeZKStLmyVGytWkpyhCLeQVA0eu6Wtiw92V+I1T/hnezUv7j74dA/Ro69gWcU+hg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.22.5.tgz", + "integrity": "sha512-nDkQ0NfkOhPTq8YCLiWNxp1+f9fCobEjCb0n8WdbNUBc4IB5V7P1QnX9IjpSoquKrXF5SKojHleVNs2vGeHCHQ==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.22.5.tgz", + "integrity": "sha512-SPToJ5eYZLxlnp1UzdARpOGeC2GbHvr9d/UV0EukuVx8atktg194oe+C5BqQ8jRTkgLRVOPYeXRSBg1IlMoVRA==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.22.5.tgz", + "integrity": "sha512-2edQhLfibpWpsVBx2n/GKOz6JdGQvLruZQfGr9l1qes2KQaWswjBzhQF7UDUZMNaMMQeYnQzxwOMPsbYF7wqPQ==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.5", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.22.5.tgz", + "integrity": "sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/template": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.22.5.tgz", + "integrity": "sha512-GfqcFuGW8vnEqTUBM7UtPd5A4q797LTvvwKxXTgRsFjoqaJiEg9deBG6kWeQYkVEL569NpnmpC0Pkr/8BLKGnQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.22.5.tgz", + "integrity": "sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.22.5.tgz", + "integrity": "sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.22.5.tgz", + "integrity": "sha512-0MC3ppTB1AMxd8fXjSrbPa7LT9hrImt+/fcj+Pg5YMD7UQyWp/02+JWpdnCymmsXwIx5Z+sYn1bwCn4ZJNvhqQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.22.5.tgz", + "integrity": "sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==", + "dependencies": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.22.5.tgz", + "integrity": "sha512-X4hhm7FRnPgd4nDA4b/5V280xCx6oL7Oob5+9qVS5C13Zq4bh1qq7LU0GgRU6b5dBWBvhGaXYVB4AcN6+ol6vg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.22.5.tgz", + "integrity": "sha512-3kxQjX1dU9uudwSshyLeEipvrLjBCVthCgeTp6CzE/9JYrlAIaeekVxRpCWsDDfYTfRZRoCeZatCQvwo+wvK8A==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.22.5.tgz", + "integrity": "sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.22.5.tgz", + "integrity": "sha512-DuCRB7fu8MyTLbEQd1ew3R85nx/88yMoqo2uPSjevMj3yoN7CDM8jkgrY0wmVxfJZyJ/B9fE1iq7EQppWQmR5A==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-json-strings": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.22.5.tgz", + "integrity": "sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.22.5.tgz", + "integrity": "sha512-MQQOUW1KL8X0cDWfbwYP+TbVbZm16QmQXJQ+vndPtH/BoO0lOKpVoEDMI7+PskYxH+IiE0tS8xZye0qr1lGzSA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.22.5.tgz", + "integrity": "sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.22.5.tgz", + "integrity": "sha512-R+PTfLTcYEmb1+kK7FNkhQ1gP4KgjpSO6HfH9+f8/yfp2Nt3ggBjiVpRwmwTlfqZLafYKJACy36yDXlEmI9HjQ==", + "dependencies": { + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.22.5.tgz", + "integrity": "sha512-B4pzOXj+ONRmuaQTg05b3y/4DuFz3WcCNAXPLb2Q0GT0TrGKGxNKV4jwsXts+StaM0LQczZbOpj8o1DLPDJIiA==", + "dependencies": { + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-simple-access": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.22.5.tgz", + "integrity": "sha512-emtEpoaTMsOs6Tzz+nbmcePl6AKVtS1yC4YNAeMun9U8YCsgadPNxnOPQ8GhHFB2qdx+LZu9LgoC0Lthuu05DQ==", + "dependencies": { + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.22.5.tgz", + "integrity": "sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==", + "dependencies": { + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz", + "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.22.5.tgz", + "integrity": "sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.22.5.tgz", + "integrity": "sha512-6CF8g6z1dNYZ/VXok5uYkkBBICHZPiGEl7oDnAx2Mt1hlHVHOSIKWJaXHjQJA5VB43KZnXZDIexMchY4y2PGdA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.22.5.tgz", + "integrity": "sha512-NbslED1/6M+sXiwwtcAB/nieypGw02Ejf4KtDeMkCEpP6gWFMX1wI9WKYua+4oBneCCEmulOkRpwywypVZzs/g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.22.5.tgz", + "integrity": "sha512-Kk3lyDmEslH9DnvCDA1s1kkd3YWQITiBOHngOtDL9Pt6BZjzqb6hiOlb8VfjiiQJ2unmegBqZu0rx5RxJb5vmQ==", + "dependencies": { + "@babel/compat-data": "^7.22.5", + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.22.5.tgz", + "integrity": "sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.22.5.tgz", + "integrity": "sha512-pH8orJahy+hzZje5b8e2QIlBWQvGpelS76C63Z+jhZKsmzfNaPQ+LaW6dcJ9bxTpo1mtXbgHwy765Ro3jftmUg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.22.5.tgz", + "integrity": "sha512-AconbMKOMkyG+xCng2JogMCDcqW8wedQAqpVIL4cOSescZ7+iW8utC6YDZLMCSUIReEA733gzRSaOSXMAt/4WQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.22.5.tgz", + "integrity": "sha512-AVkFUBurORBREOmHRKo06FjHYgjrabpdqRSwq6+C7R5iTCZOsM4QbcB27St0a4U6fffyAOqh3s/qEfybAhfivg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.22.5.tgz", + "integrity": "sha512-PPjh4gyrQnGe97JTalgRGMuU4icsZFnWkzicB/fUtzlKUqvsWBKEpPPfr5a2JiyirZkHxnAqkQMO5Z5B2kK3fA==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.22.5.tgz", + "integrity": "sha512-/9xnaTTJcVoBtSSmrVyhtSvO3kbqS2ODoh2juEU72c3aYonNF0OMGiaz2gjukyKM2wBBYJP38S4JiE0Wfb5VMQ==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.22.5.tgz", + "integrity": "sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-constant-elements": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.22.5.tgz", + "integrity": "sha512-BF5SXoO+nX3h5OhlN78XbbDrBOffv+AxPP2ENaJOVqjWCgBDeOY3WcaUcddutGSfoap+5NEQ/q/4I3WZIvgkXA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-display-name": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.22.5.tgz", + "integrity": "sha512-PVk3WPYudRF5z4GKMEYUrLjPl38fJSKNaEOkFuoprioowGuWN6w2RKznuFNSlJx7pzzXXStPUnNSOEO0jL5EVw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.22.5.tgz", + "integrity": "sha512-rog5gZaVbUip5iWDMTYbVM15XQq+RkUKhET/IHR6oizR+JEoN6CAfTTuHcK4vwUyzca30qqHqEpzBOnaRMWYMA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-jsx": "^7.22.5", + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-development": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.22.5.tgz", + "integrity": "sha512-bDhuzwWMuInwCYeDeMzyi7TaBgRQei6DqxhbyniL7/VG4RSS7HtSL2QbY4eESy1KJqlWt8g3xeEBGPuo+XqC8A==", + "dependencies": { + "@babel/plugin-transform-react-jsx": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-pure-annotations": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.22.5.tgz", + "integrity": "sha512-gP4k85wx09q+brArVinTXhWiyzLl9UpmGva0+mWyKxk6JZequ05x3eUcIUE+FyttPKJFRRVtAvQaJ6YF9h1ZpA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.22.5.tgz", + "integrity": "sha512-rR7KePOE7gfEtNTh9Qw+iO3Q/e4DEsoQ+hdvM6QUDH7JRJ5qxq5AA52ZzBWbI5i9lfNuvySgOGP8ZN7LAmaiPw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "regenerator-transform": "^0.15.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.22.5.tgz", + "integrity": "sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.22.5.tgz", + "integrity": "sha512-bg4Wxd1FWeFx3daHFTWk1pkSWK/AyQuiyAoeZAOkAOUBjnZPH6KT7eMxouV47tQ6hl6ax2zyAWBdWZXbrvXlaw==", + "dependencies": { + "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "babel-plugin-polyfill-corejs2": "^0.4.3", + "babel-plugin-polyfill-corejs3": "^0.8.1", + "babel-plugin-polyfill-regenerator": "^0.5.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.22.5.tgz", + "integrity": "sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.22.5.tgz", + "integrity": "sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.22.5.tgz", + "integrity": "sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.22.5.tgz", + "integrity": "sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.22.5.tgz", + "integrity": "sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.22.5.tgz", + "integrity": "sha512-SMubA9S7Cb5sGSFFUlqxyClTA9zWJ8qGQrppNUm05LtFuN1ELRFNndkix4zUJrC9F+YivWwa1dHMSyo0e0N9dA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-typescript": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.22.5.tgz", + "integrity": "sha512-biEmVg1IYB/raUO5wT1tgfacCef15Fbzhkx493D3urBI++6hpJ+RFG4SrWMn0NEZLfvilqKf3QDrRVZHo08FYg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.22.5.tgz", + "integrity": "sha512-HCCIb+CbJIAE6sXn5CjFQXMwkCClcOfPCzTlilJ8cUatfzwHlWQkbtV0zD338u9dZskwvuOYTuuaMaA8J5EI5A==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.22.5.tgz", + "integrity": "sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.22.5.tgz", + "integrity": "sha512-lhMfi4FC15j13eKrh3DnYHjpGj6UKQHtNKTbtc1igvAhRy4+kLhV07OpLcsN0VgDEw/MjAvJO4BdMJsHwMhzCg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.22.5.tgz", + "integrity": "sha512-fj06hw89dpiZzGZtxn+QybifF07nNiZjZ7sazs2aVDcysAZVGjW7+7iFYxg6GLNM47R/thYfLdrXc+2f11Vi9A==", + "dependencies": { + "@babel/compat-data": "^7.22.5", + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.5", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.22.5", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.22.5", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.22.5", + "@babel/plugin-syntax-import-attributes": "^7.22.5", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.22.5", + "@babel/plugin-transform-async-generator-functions": "^7.22.5", + "@babel/plugin-transform-async-to-generator": "^7.22.5", + "@babel/plugin-transform-block-scoped-functions": "^7.22.5", + "@babel/plugin-transform-block-scoping": "^7.22.5", + "@babel/plugin-transform-class-properties": "^7.22.5", + "@babel/plugin-transform-class-static-block": "^7.22.5", + "@babel/plugin-transform-classes": "^7.22.5", + "@babel/plugin-transform-computed-properties": "^7.22.5", + "@babel/plugin-transform-destructuring": "^7.22.5", + "@babel/plugin-transform-dotall-regex": "^7.22.5", + "@babel/plugin-transform-duplicate-keys": "^7.22.5", + "@babel/plugin-transform-dynamic-import": "^7.22.5", + "@babel/plugin-transform-exponentiation-operator": "^7.22.5", + "@babel/plugin-transform-export-namespace-from": "^7.22.5", + "@babel/plugin-transform-for-of": "^7.22.5", + "@babel/plugin-transform-function-name": "^7.22.5", + "@babel/plugin-transform-json-strings": "^7.22.5", + "@babel/plugin-transform-literals": "^7.22.5", + "@babel/plugin-transform-logical-assignment-operators": "^7.22.5", + "@babel/plugin-transform-member-expression-literals": "^7.22.5", + "@babel/plugin-transform-modules-amd": "^7.22.5", + "@babel/plugin-transform-modules-commonjs": "^7.22.5", + "@babel/plugin-transform-modules-systemjs": "^7.22.5", + "@babel/plugin-transform-modules-umd": "^7.22.5", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5", + "@babel/plugin-transform-new-target": "^7.22.5", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.22.5", + "@babel/plugin-transform-numeric-separator": "^7.22.5", + "@babel/plugin-transform-object-rest-spread": "^7.22.5", + "@babel/plugin-transform-object-super": "^7.22.5", + "@babel/plugin-transform-optional-catch-binding": "^7.22.5", + "@babel/plugin-transform-optional-chaining": "^7.22.5", + "@babel/plugin-transform-parameters": "^7.22.5", + "@babel/plugin-transform-private-methods": "^7.22.5", + "@babel/plugin-transform-private-property-in-object": "^7.22.5", + "@babel/plugin-transform-property-literals": "^7.22.5", + "@babel/plugin-transform-regenerator": "^7.22.5", + "@babel/plugin-transform-reserved-words": "^7.22.5", + "@babel/plugin-transform-shorthand-properties": "^7.22.5", + "@babel/plugin-transform-spread": "^7.22.5", + "@babel/plugin-transform-sticky-regex": "^7.22.5", + "@babel/plugin-transform-template-literals": "^7.22.5", + "@babel/plugin-transform-typeof-symbol": "^7.22.5", + "@babel/plugin-transform-unicode-escapes": "^7.22.5", + "@babel/plugin-transform-unicode-property-regex": "^7.22.5", + "@babel/plugin-transform-unicode-regex": "^7.22.5", + "@babel/plugin-transform-unicode-sets-regex": "^7.22.5", + "@babel/preset-modules": "^0.1.5", + "@babel/types": "^7.22.5", + "babel-plugin-polyfill-corejs2": "^0.4.3", + "babel-plugin-polyfill-corejs3": "^0.8.1", + "babel-plugin-polyfill-regenerator": "^0.5.0", + "core-js-compat": "^3.30.2", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-env/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.5.tgz", + "integrity": "sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", + "@babel/plugin-transform-dotall-regex": "^7.4.4", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-react": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.22.5.tgz", + "integrity": "sha512-M+Is3WikOpEJHgR385HbuCITPTaPRaNkibTEa9oiofmJvIsrceb4yp9RL9Kb+TE8LznmeyZqpP+Lopwcx59xPQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.5", + "@babel/plugin-transform-react-display-name": "^7.22.5", + "@babel/plugin-transform-react-jsx": "^7.22.5", + "@babel/plugin-transform-react-jsx-development": "^7.22.5", + "@babel/plugin-transform-react-pure-annotations": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.22.5.tgz", + "integrity": "sha512-YbPaal9LxztSGhmndR46FmAbkJ/1fAsw293tSU+I5E5h+cnJ3d4GTwyUgGYmOXJYdGA+uNePle4qbaRzj2NISQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.5", + "@babel/plugin-syntax-jsx": "^7.22.5", + "@babel/plugin-transform-modules-commonjs": "^7.22.5", + "@babel/plugin-transform-typescript": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" + }, + "node_modules/@babel/runtime": { + "version": "7.21.5", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.21.5.tgz", + "integrity": "sha512-8jI69toZqqcsnqGGqwGS4Qb1VwLOEp4hz+CXPywcvjs60u3B4Pom/U/7rm4W8tMOYEB+E9wgD0mW1l3r8qlI9Q==", + "dependencies": { + "regenerator-runtime": "^0.13.11" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime-corejs3": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.22.5.tgz", + "integrity": "sha512-TNPDN6aBFaUox2Lu+H/Y1dKKQgr4ucz/FGyCz67RVYLsBpVpUFf1dDngzg+Od8aqbrqwyztkaZjtWCZEUOT8zA==", + "dependencies": { + "core-js-pure": "^3.30.2", + "regenerator-runtime": "^0.13.11" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.5.tgz", + "integrity": "sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw==", + "dependencies": { + "@babel/code-frame": "^7.22.5", + "@babel/parser": "^7.22.5", + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.5.tgz", + "integrity": "sha512-7DuIjPgERaNo6r+PZwItpjCZEa5vyw4eJGufeLxrPdBXBoLcCJCIasvK6pK/9DVNrLZTLFhUGqaC6X/PA007TQ==", + "dependencies": { + "@babel/code-frame": "^7.22.5", + "@babel/generator": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.5", + "@babel/parser": "^7.22.5", + "@babel/types": "^7.22.5", + "debug": "^4.1.0", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.5.tgz", + "integrity": "sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==", + "dependencies": { + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.5", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@docusaurus/core": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.1.tgz", + "integrity": "sha512-SNsY7PshK3Ri7vtsLXVeAJGS50nJN3RgF836zkyUfAD01Fq+sAk5EwWgLw+nnm5KVNGDu7PRR2kRGDsWvqpo0g==", + "dependencies": { + "@babel/core": "^7.18.6", + "@babel/generator": "^7.18.7", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-transform-runtime": "^7.18.6", + "@babel/preset-env": "^7.18.6", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.18.6", + "@babel/runtime": "^7.18.6", + "@babel/runtime-corejs3": "^7.18.6", + "@babel/traverse": "^7.18.8", + "@docusaurus/cssnano-preset": "2.4.1", + "@docusaurus/logger": "2.4.1", + "@docusaurus/mdx-loader": "2.4.1", + "@docusaurus/react-loadable": "5.5.2", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-common": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", + "@slorber/static-site-generator-webpack-plugin": "^4.0.7", + "@svgr/webpack": "^6.2.1", + "autoprefixer": "^10.4.7", + "babel-loader": "^8.2.5", + "babel-plugin-dynamic-import-node": "^2.3.3", + "boxen": "^6.2.1", + "chalk": "^4.1.2", + "chokidar": "^3.5.3", + "clean-css": "^5.3.0", + "cli-table3": "^0.6.2", + "combine-promises": "^1.1.0", + "commander": "^5.1.0", + "copy-webpack-plugin": "^11.0.0", + "core-js": "^3.23.3", + "css-loader": "^6.7.1", + "css-minimizer-webpack-plugin": "^4.0.0", + "cssnano": "^5.1.12", + "del": "^6.1.1", + "detect-port": "^1.3.0", + "escape-html": "^1.0.3", + "eta": "^2.0.0", + "file-loader": "^6.2.0", + "fs-extra": "^10.1.0", + "html-minifier-terser": "^6.1.0", + "html-tags": "^3.2.0", + "html-webpack-plugin": "^5.5.0", + "import-fresh": "^3.3.0", + "leven": "^3.1.0", + "lodash": "^4.17.21", + "mini-css-extract-plugin": "^2.6.1", + "postcss": "^8.4.14", + "postcss-loader": "^7.0.0", + "prompts": "^2.4.2", + "react-dev-utils": "^12.0.1", + "react-helmet-async": "^1.3.0", + "react-loadable": "npm:@docusaurus/react-loadable@5.5.2", + "react-loadable-ssr-addon-v5-slorber": "^1.0.1", + "react-router": "^5.3.3", + "react-router-config": "^5.1.1", + "react-router-dom": "^5.3.3", + "rtl-detect": "^1.0.4", + "semver": "^7.3.7", + "serve-handler": "^6.1.3", + "shelljs": "^0.8.5", + "terser-webpack-plugin": "^5.3.3", + "tslib": "^2.4.0", + "update-notifier": "^5.1.0", + "url-loader": "^4.1.1", + "wait-on": "^6.0.1", + "webpack": "^5.73.0", + "webpack-bundle-analyzer": "^4.5.0", + "webpack-dev-server": "^4.9.3", + "webpack-merge": "^5.8.0", + "webpackbar": "^5.0.2" + }, + "bin": { + "docusaurus": "bin/docusaurus.mjs" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^16.8.4 || ^17.0.0", + "react-dom": "^16.8.4 || ^17.0.0" + } + }, + "node_modules/@docusaurus/core/node_modules/commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/@docusaurus/core/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@docusaurus/core/node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/@docusaurus/core/node_modules/universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@docusaurus/cssnano-preset": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.1.tgz", + "integrity": "sha512-ka+vqXwtcW1NbXxWsh6yA1Ckii1klY9E53cJ4O9J09nkMBgrNX3iEFED1fWdv8wf4mJjvGi5RLZ2p9hJNjsLyQ==", + "dependencies": { + "cssnano-preset-advanced": "^5.3.8", + "postcss": "^8.4.14", + "postcss-sort-media-queries": "^4.2.1", + "tslib": "^2.4.0" + }, + "engines": { + "node": ">=16.14" + } + }, + "node_modules/@docusaurus/logger": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.1.tgz", + "integrity": "sha512-5h5ysIIWYIDHyTVd8BjheZmQZmEgWDR54aQ1BX9pjFfpyzFo5puKXKYrYJXbjEHGyVhEzmB9UXwbxGfaZhOjcg==", + "dependencies": { + "chalk": "^4.1.2", + "tslib": "^2.4.0" + }, + "engines": { + "node": ">=16.14" + } + }, + "node_modules/@docusaurus/mdx-loader": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.1.tgz", + "integrity": "sha512-4KhUhEavteIAmbBj7LVFnrVYDiU51H5YWW1zY6SmBSte/YLhDutztLTBE0PQl1Grux1jzUJeaSvAzHpTn6JJDQ==", + "dependencies": { + "@babel/parser": "^7.18.8", + "@babel/traverse": "^7.18.8", + "@docusaurus/logger": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@mdx-js/mdx": "^1.6.22", + "escape-html": "^1.0.3", + "file-loader": "^6.2.0", + "fs-extra": "^10.1.0", + "image-size": "^1.0.1", + "mdast-util-to-string": "^2.0.0", + "remark-emoji": "^2.2.0", + "stringify-object": "^3.3.0", + "tslib": "^2.4.0", + "unified": "^9.2.2", + "unist-util-visit": "^2.0.3", + "url-loader": "^4.1.1", + "webpack": "^5.73.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "react": "^16.8.4 || ^17.0.0", + "react-dom": "^16.8.4 || ^17.0.0" + } + }, + "node_modules/@docusaurus/mdx-loader/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@docusaurus/mdx-loader/node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/@docusaurus/mdx-loader/node_modules/universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@docusaurus/react-loadable": { + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "dependencies": { + "@types/react": "*", + "prop-types": "^15.6.2" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/@docusaurus/types": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.1.tgz", + "integrity": "sha512-0R+cbhpMkhbRXX138UOc/2XZFF8hiZa6ooZAEEJFp5scytzCw4tC1gChMFXrpa3d2tYE6AX8IrOEpSonLmfQuQ==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "commander": "^5.1.0", + "joi": "^17.6.0", + "react-helmet-async": "^1.3.0", + "utility-types": "^3.10.0", + "webpack": "^5.73.0", + "webpack-merge": "^5.8.0" + }, + "peerDependencies": { + "react": "^16.8.4 || ^17.0.0", + "react-dom": "^16.8.4 || ^17.0.0" + } + }, + "node_modules/@docusaurus/types/node_modules/commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/@docusaurus/utils": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.1.tgz", + "integrity": "sha512-1lvEZdAQhKNht9aPXPoh69eeKnV0/62ROhQeFKKxmzd0zkcuE/Oc5Gpnt00y/f5bIsmOsYMY7Pqfm/5rteT5GA==", + "dependencies": { + "@docusaurus/logger": "2.4.1", + "@svgr/webpack": "^6.2.1", + "escape-string-regexp": "^4.0.0", + "file-loader": "^6.2.0", + "fs-extra": "^10.1.0", + "github-slugger": "^1.4.0", + "globby": "^11.1.0", + "gray-matter": "^4.0.3", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "micromatch": "^4.0.5", + "resolve-pathname": "^3.0.0", + "shelljs": "^0.8.5", + "tslib": "^2.4.0", + "url-loader": "^4.1.1", + "webpack": "^5.73.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } + } + }, + "node_modules/@docusaurus/utils-common": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.1.tgz", + "integrity": "sha512-bCVGdZU+z/qVcIiEQdyx0K13OC5mYwxhSuDUR95oFbKVuXYRrTVrwZIqQljuo1fyJvFTKHiL9L9skQOPokuFNQ==", + "dependencies": { + "tslib": "^2.4.0" + }, + "engines": { + "node": ">=16.14" + }, + "peerDependencies": { + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } + } + }, + "node_modules/@docusaurus/utils-validation": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.1.tgz", + "integrity": "sha512-unII3hlJlDwZ3w8U+pMO3Lx3RhI4YEbY3YNsQj4yzrkZzlpqZOLuAiZK2JyULnD+TKbceKU0WyWkQXtYbLNDFA==", + "dependencies": { + "@docusaurus/logger": "2.4.1", + "@docusaurus/utils": "2.4.1", + "joi": "^17.6.0", + "js-yaml": "^4.1.0", + "tslib": "^2.4.0" + }, + "engines": { + "node": ">=16.14" + } + }, + "node_modules/@docusaurus/utils/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@docusaurus/utils/node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/@docusaurus/utils/node_modules/universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@h2oai/makersaurus": { + "version": "0.8.3", + "resolved": "https://npm.pkg.github.com/download/@h2oai/makersaurus/0.8.3/07b00e2f07fcf2d4798df2c1d305b6e5b3ff9006", + "integrity": "sha512-nvujC9gepvRxEgeFnpu9YQ3s+wmdp4+3BN9s7GuqSYweFCT5rdZSfdmQrg8HVc0x0ijhEuHggEYVKkLAVIWYiQ==", + "dependencies": { + "commander": "^9.4.1", + "handlebars": "^4.7.7", + "sync-directory": "^5.1.9", + "yup": "^0.32.11" + }, + "bin": { + "makersaurus": "src/bin.js" + } + }, + "node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==" + }, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@jest/schemas": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.4.3.tgz", + "integrity": "sha512-VLYKXQmtmuEz6IxJsrZwzG9NvtkQsWNnWMsKxqWNu3+CnfzJQhp0WDDKWLVV9hLKr0l3SLLFRqcYHjhtyuDVxg==", + "dependencies": { + "@sinclair/typebox": "^0.25.16" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.5.0.tgz", + "integrity": "sha512-qbu7kN6czmVRc3xWFQcAN03RAUamgppVUdXrvl1Wr3jlNF93o9mJbGcDWrwGB6ht44u7efB1qCFgVQmca24Uog==", + "dependencies": { + "@jest/schemas": "^29.4.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "dependencies": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", + "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", + "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.3.tgz", + "integrity": "sha512-b+fsZXeLYi9fEULmfBrhxn4IrPlINf8fiNarzTof004v3lFdntdwa9PF7vFJqm3mg7s+ScJMxXaE3Acp1irZcg==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.18", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz", + "integrity": "sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==", + "dependencies": { + "@jridgewell/resolve-uri": "3.1.0", + "@jridgewell/sourcemap-codec": "1.4.14" + } + }, + "node_modules/@jridgewell/trace-mapping/node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.14", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", + "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==" + }, + "node_modules/@leichtgewicht/ip-codec": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz", + "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==" + }, + "node_modules/@mdx-js/mdx": { + "version": "1.6.22", + "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-1.6.22.tgz", + "integrity": "sha512-AMxuLxPz2j5/6TpF/XSdKpQP1NlG0z11dFOlq+2IP/lSgl11GY8ji6S/rgsViN/L0BDvHvUMruRb7ub+24LUYA==", + "dependencies": { + "@babel/core": "7.12.9", + "@babel/plugin-syntax-jsx": "7.12.1", + "@babel/plugin-syntax-object-rest-spread": "7.8.3", + "@mdx-js/util": "1.6.22", + "babel-plugin-apply-mdx-type-prop": "1.6.22", + "babel-plugin-extract-import-names": "1.6.22", + "camelcase-css": "2.0.1", + "detab": "2.0.4", + "hast-util-raw": "6.0.1", + "lodash.uniq": "4.5.0", + "mdast-util-to-hast": "10.0.1", + "remark-footnotes": "2.0.0", + "remark-mdx": "1.6.22", + "remark-parse": "8.0.3", + "remark-squeeze-paragraphs": "4.0.0", + "style-to-object": "0.3.0", + "unified": "9.2.0", + "unist-builder": "2.0.3", + "unist-util-visit": "2.0.3" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mdx-js/mdx/node_modules/@babel/core": { + "version": "7.12.9", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz", + "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==", + "dependencies": { + "@babel/code-frame": "^7.10.4", + "@babel/generator": "^7.12.5", + "@babel/helper-module-transforms": "^7.12.1", + "@babel/helpers": "^7.12.5", + "@babel/parser": "^7.12.7", + "@babel/template": "^7.12.7", + "@babel/traverse": "^7.12.9", + "@babel/types": "^7.12.7", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.1", + "json5": "^2.1.2", + "lodash": "^4.17.19", + "resolve": "^1.3.2", + "semver": "^5.4.1", + "source-map": "^0.5.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@mdx-js/mdx/node_modules/@babel/plugin-syntax-jsx": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz", + "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@mdx-js/mdx/node_modules/semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/@mdx-js/mdx/node_modules/source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@mdx-js/mdx/node_modules/unified": { + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", + "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "dependencies": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mdx-js/util": { + "version": "1.6.22", + "resolved": "https://registry.npmjs.org/@mdx-js/util/-/util-1.6.22.tgz", + "integrity": "sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@polka/url": { + "version": "1.0.0-next.21", + "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.21.tgz", + "integrity": "sha512-a5Sab1C4/icpTZVzZc5Ghpz88yQtGOyNqYXcZgOssB2uuAr+wF/MvN6bgtW32q7HHrvBki+BsZ0OuNv6EV3K9g==" + }, + "node_modules/@sideway/address": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz", + "integrity": "sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" + }, + "node_modules/@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" + }, + "node_modules/@sinclair/typebox": { + "version": "0.25.24", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz", + "integrity": "sha512-XJfwUVUKDHF5ugKwIcxEgc9k8b7HbznCp6eUfWgu710hMPNIO4aw4/zB5RogDQz8nd6gyCDpU9O/m6qYEWY6yQ==" + }, + "node_modules/@sindresorhus/is": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", + "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/@slorber/static-site-generator-webpack-plugin": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz", + "integrity": "sha512-Ug7x6z5lwrz0WqdnNFOMYrDQNTPAprvHLSh6+/fmml3qUiz6l5eq+2MzLKWtn/q5K5NpSiFsZTP/fck/3vjSxA==", + "dependencies": { + "eval": "^0.1.8", + "p-map": "^4.0.0", + "webpack-sources": "^3.2.2" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@svgr/babel-plugin-add-jsx-attribute": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", + "integrity": "sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", + "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz", + "integrity": "sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-dynamic-title": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz", + "integrity": "sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-em-dimensions": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz", + "integrity": "sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-react-native-svg": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz", + "integrity": "sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-svg-component": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz", + "integrity": "sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-preset": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-6.5.1.tgz", + "integrity": "sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==", + "dependencies": { + "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1", + "@svgr/babel-plugin-remove-jsx-attribute": "*", + "@svgr/babel-plugin-remove-jsx-empty-expression": "*", + "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1", + "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1", + "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1", + "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1", + "@svgr/babel-plugin-transform-svg-component": "^6.5.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/core": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz", + "integrity": "sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==", + "dependencies": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "camelcase": "^6.2.0", + "cosmiconfig": "^7.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/hast-util-to-babel-ast": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz", + "integrity": "sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==", + "dependencies": { + "@babel/types": "^7.20.0", + "entities": "^4.4.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/plugin-jsx": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz", + "integrity": "sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==", + "dependencies": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/hast-util-to-babel-ast": "^6.5.1", + "svg-parser": "^2.0.4" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "^6.0.0" + } + }, + "node_modules/@svgr/plugin-svgo": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-6.5.1.tgz", + "integrity": "sha512-omvZKf8ixP9z6GWgwbtmP9qQMPX4ODXi+wzbVZgomNFsUIlHA1sf4fThdwTWSsZGgvGAG6yE+b/F5gWUkcZ/iQ==", + "dependencies": { + "cosmiconfig": "^7.0.1", + "deepmerge": "^4.2.2", + "svgo": "^2.8.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/webpack": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-6.5.1.tgz", + "integrity": "sha512-cQ/AsnBkXPkEK8cLbv4Dm7JGXq2XrumKnL1dRpJD9rIO2fTIlJI9a1uCciYG1F2aUsox/hJQyNGbt3soDxSRkA==", + "dependencies": { + "@babel/core": "^7.19.6", + "@babel/plugin-transform-react-constant-elements": "^7.18.12", + "@babel/preset-env": "^7.19.4", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.18.6", + "@svgr/core": "^6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "@svgr/plugin-svgo": "^6.5.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@szmarczak/http-timer": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", + "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", + "dependencies": { + "defer-to-connect": "^1.0.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@trysound/sax": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", + "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.2", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", + "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/bonjour": { + "version": "3.5.10", + "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.10.tgz", + "integrity": "sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.35", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", + "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect-history-api-fallback": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.0.tgz", + "integrity": "sha512-4x5FkPpLipqwthjPsF7ZRbOv3uoLUFkTA9G9v583qi4pACvq0uTELrB8OLUzPWUI4IJIyvM85vzkV1nyiI2Lig==", + "dependencies": { + "@types/express-serve-static-core": "*", + "@types/node": "*" + } + }, + "node_modules/@types/eslint": { + "version": "8.40.1", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.40.1.tgz", + "integrity": "sha512-vRb792M4mF1FBT+eoLecmkpLXwxsBHvWWRGJjzbYANBM6DtiJc6yETyv4rqDA6QNjF1pkj1U7LMA6dGb3VYlHw==", + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.4", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.4.tgz", + "integrity": "sha512-9K4zoImiZc3HlIp6AVUDE4CWYx22a+lhSZMYNpbjW04+YF0KWj4pJXnEMjdnFTiQibFFmElcsasJXDbdI/EPhA==", + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz", + "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==" + }, + "node_modules/@types/express": { + "version": "4.17.17", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.17.tgz", + "integrity": "sha512-Q4FmmuLGBG58btUnfS1c1r/NQdlp3DMfGDGig8WhfpA2YRUtEkxAjkZb0yvplJGYdF1fsQ81iMDcH24sSCNC/Q==", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.17.35", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.35.tgz", + "integrity": "sha512-wALWQwrgiB2AWTT91CB62b6Yt0sNHpznUXeZEcnPU3DRdlDIz74x8Qg1UUYKSVFi+va5vKOLYRBI1bRKiLLKIg==", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/hast": { + "version": "2.3.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.4.tgz", + "integrity": "sha512-wLEm0QvaoawEDoTRwzTXp4b4jpwiJDvR5KMnFnVodm3scufTlBOWRD6N1OBf9TZMhjlNsSfcO5V+7AF4+Vy+9g==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/history": { + "version": "4.7.11", + "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", + "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==" + }, + "node_modules/@types/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==" + }, + "node_modules/@types/http-proxy": { + "version": "1.17.11", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.11.tgz", + "integrity": "sha512-HC8G7c1WmaF2ekqpnFq626xd3Zz0uvaqFmBJNRZCGEZCXkvSdJoNFn/8Ygbd9fKNQj8UzLdCETaI0UWPAjK7IA==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", + "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", + "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz", + "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.12", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.12.tgz", + "integrity": "sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==" + }, + "node_modules/@types/lodash": { + "version": "4.14.194", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.194.tgz", + "integrity": "sha512-r22s9tAS7imvBt2lyHC9B8AGwWnXaYb1tY09oyLkXDs4vArpYJzw09nj8MLx5VfciBPGIb+ZwG0ssYnEPJxn/g==" + }, + "node_modules/@types/mdast": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.11.tgz", + "integrity": "sha512-Y/uImid8aAwrEA24/1tcRZwpxX3pIFTSilcNDKSPn+Y2iDywSEachzRuvgAYYLR3wpGXAsMbv5lvKLDZLeYPAw==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mime": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz", + "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==" + }, + "node_modules/@types/node": { + "version": "20.2.5", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.2.5.tgz", + "integrity": "sha512-JJulVEQXmiY9Px5axXHeYGLSjhkZEnD+MDPDGbCbIAbMslkKwmygtZFy1X6s/075Yo94sf8GuSlFfPzysQrWZQ==" + }, + "node_modules/@types/parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==" + }, + "node_modules/@types/parse5": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/@types/parse5/-/parse5-5.0.3.tgz", + "integrity": "sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw==" + }, + "node_modules/@types/prop-types": { + "version": "15.7.5", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", + "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==" + }, + "node_modules/@types/qs": { + "version": "6.9.7", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz", + "integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==" + }, + "node_modules/@types/range-parser": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", + "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==" + }, + "node_modules/@types/react": { + "version": "18.2.9", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.9.tgz", + "integrity": "sha512-pL3JAesUkF7PEQGxh5XOwdXGV907te6m1/Qe1ERJLgomojS6Ne790QiA7GUl434JEkFA2aAaB6qJ5z4e1zJn/w==", + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" + }, + "node_modules/@types/scheduler": { + "version": "0.16.3", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", + "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==" + }, + "node_modules/@types/send": { + "version": "0.17.1", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.1.tgz", + "integrity": "sha512-Cwo8LE/0rnvX7kIIa3QHCkcuF21c05Ayb0ZfxPiv0W8VRiZiNW/WuRupHKpqqGVGf7SUA44QSOUKaEd9lIrd/Q==", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "node_modules/@types/serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==", + "dependencies": { + "@types/express": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "1.15.1", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.1.tgz", + "integrity": "sha512-NUo5XNiAdULrJENtJXZZ3fHtfMolzZwczzBbnAeBbqBwG+LaG6YaJtuwzwGSQZ2wsCrxjEhNNjAkKigy3n8teQ==", + "dependencies": { + "@types/mime": "*", + "@types/node": "*" + } + }, + "node_modules/@types/sockjs": { + "version": "0.3.33", + "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.33.tgz", + "integrity": "sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/unist": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.6.tgz", + "integrity": "sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ==" + }, + "node_modules/@types/ws": { + "version": "8.5.5", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.5.tgz", + "integrity": "sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/yargs": { + "version": "17.0.24", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz", + "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.0", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz", + "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==" + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", + "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==" + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", + "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==" + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.6.tgz", + "integrity": "sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==" + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", + "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", + "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==" + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.6.tgz", + "integrity": "sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", + "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", + "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", + "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==" + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.6.tgz", + "integrity": "sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/helper-wasm-section": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-opt": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6", + "@webassemblyjs/wast-printer": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.6.tgz", + "integrity": "sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.6.tgz", + "integrity": "sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz", + "integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.6.tgz", + "integrity": "sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", + "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-assertions": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", + "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", + "peerDependencies": { + "acorn": "^8" + } + }, + "node_modules/acorn-walk": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", + "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/address": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz", + "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-formats/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-align/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/ansi-align/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-html-community": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", + "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", + "engines": [ + "node >= 0.8.0" + ], + "bin": { + "ansi-html": "bin/ansi-html" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/array-flatten": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", + "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/autoprefixer": { + "version": "10.4.14", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.14.tgz", + "integrity": "sha512-FQzyfOsTlwVzjHxKEqRIAdJx9niO6VCBCoEwax/VLSoQF29ggECcPuBqUMZ+u8jCZOPSy8b8/8KnuFbp0SaFZQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + } + ], + "dependencies": { + "browserslist": "^4.21.5", + "caniuse-lite": "^1.0.30001464", + "fraction.js": "^4.2.0", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.0", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/axios": { + "version": "0.25.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.25.0.tgz", + "integrity": "sha512-cD8FOb0tRH3uuEe6+evtAbgJtfxr7ly3fQjYcMcuPlgkwVS9xboaVIpcDV+cYQe+yGykgwZCs1pzjntcGa6l5g==", + "dependencies": { + "follow-redirects": "^1.14.7" + } + }, + "node_modules/babel-loader": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.3.0.tgz", + "integrity": "sha512-H8SvsMF+m9t15HNLMipppzkC+Y2Yq+v3SonZyU70RBL/h1gxPkH08Ot8pEE9Z4Kd+czyWJClmFS8qzIP9OZ04Q==", + "dependencies": { + "find-cache-dir": "^3.3.1", + "loader-utils": "^2.0.0", + "make-dir": "^3.1.0", + "schema-utils": "^2.6.5" + }, + "engines": { + "node": ">= 8.9" + }, + "peerDependencies": { + "@babel/core": "^7.0.0", + "webpack": ">=2" + } + }, + "node_modules/babel-plugin-apply-mdx-type-prop": { + "version": "1.6.22", + "resolved": "https://registry.npmjs.org/babel-plugin-apply-mdx-type-prop/-/babel-plugin-apply-mdx-type-prop-1.6.22.tgz", + "integrity": "sha512-VefL+8o+F/DfK24lPZMtJctrCVOfgbqLAGZSkxwhazQv4VxPg3Za/i40fu22KR2m8eEda+IfSOlPLUSIiLcnCQ==", + "dependencies": { + "@babel/helper-plugin-utils": "7.10.4", + "@mdx-js/util": "1.6.22" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@babel/core": "^7.11.6" + } + }, + "node_modules/babel-plugin-apply-mdx-type-prop/node_modules/@babel/helper-plugin-utils": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", + "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" + }, + "node_modules/babel-plugin-dynamic-import-node": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", + "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", + "dependencies": { + "object.assign": "^4.1.0" + } + }, + "node_modules/babel-plugin-extract-import-names": { + "version": "1.6.22", + "resolved": "https://registry.npmjs.org/babel-plugin-extract-import-names/-/babel-plugin-extract-import-names-1.6.22.tgz", + "integrity": "sha512-yJ9BsJaISua7d8zNT7oRG1ZLBJCIdZ4PZqmH8qa9N5AK01ifk3fnkc98AXhtzE7UkfCsEumvoQWgoYLhOnJ7jQ==", + "dependencies": { + "@babel/helper-plugin-utils": "7.10.4" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/babel-plugin-extract-import-names/node_modules/@babel/helper-plugin-utils": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", + "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.3.tgz", + "integrity": "sha512-bM3gHc337Dta490gg+/AseNB9L4YLHxq1nGKZZSHbhXv4aTYU2MD2cjza1Ru4S6975YLTaL1K8uJf6ukJhhmtw==", + "dependencies": { + "@babel/compat-data": "^7.17.7", + "@babel/helper-define-polyfill-provider": "^0.4.0", + "semver": "^6.1.1" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.1.tgz", + "integrity": "sha512-ikFrZITKg1xH6pLND8zT14UPgjKHiGLqex7rGEZCH2EvhsneJaJPemmpQaIZV5AL03II+lXylw3UmddDK8RU5Q==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.4.0", + "core-js-compat": "^3.30.1" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.0.tgz", + "integrity": "sha512-hDJtKjMLVa7Z+LwnTCxoDLQj6wdc+B8dun7ayF2fYieI6OzfuvcLMB32ihJZ4UhCBwNYGl5bg/x/P9cMdnkc2g==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.4.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/bail": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz", + "integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/batch": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", + "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==" + }, + "node_modules/big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", + "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/body-parser": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", + "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.4", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.1", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/body-parser/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/bonjour-service": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.1.1.tgz", + "integrity": "sha512-Z/5lQRMOG9k7W+FkeGTNjh7htqn/2LMnfOvBZ8pynNZCM9MwkQkI3zeI4oz09uWdcgmgHugVvBqxGg4VQJ5PCg==", + "dependencies": { + "array-flatten": "^2.1.2", + "dns-equal": "^1.0.0", + "fast-deep-equal": "^3.1.3", + "multicast-dns": "^7.2.5" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==" + }, + "node_modules/boxen": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", + "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^6.2.0", + "chalk": "^4.1.2", + "cli-boxes": "^3.0.0", + "string-width": "^5.0.1", + "type-fest": "^2.5.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.0.1" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.21.7", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.7.tgz", + "integrity": "sha512-BauCXrQ7I2ftSqd2mvKHGo85XR0u7Ru3C/Hxsy/0TkfCtjrmAbPdzLGasmoiBxplpDXlPvdjX9u7srIMfgasNA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001489", + "electron-to-chromium": "^1.4.411", + "node-releases": "^2.0.12", + "update-browserslist-db": "^1.0.11" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + }, + "node_modules/bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/cacheable-request": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", + "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", + "dependencies": { + "clone-response": "^1.0.2", + "get-stream": "^5.1.0", + "http-cache-semantics": "^4.0.0", + "keyv": "^3.0.0", + "lowercase-keys": "^2.0.0", + "normalize-url": "^4.1.0", + "responselike": "^1.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cacheable-request/node_modules/get-stream": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cacheable-request/node_modules/lowercase-keys": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", + "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/cacheable-request/node_modules/normalize-url": { + "version": "4.5.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz", + "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "dependencies": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-me-maybe": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.2.tgz", + "integrity": "sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ==" + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/camel-case": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", + "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", + "dependencies": { + "pascal-case": "^3.1.2", + "tslib": "^2.0.3" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-api": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", + "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "dependencies": { + "browserslist": "^4.0.0", + "caniuse-lite": "^1.0.0", + "lodash.memoize": "^4.1.2", + "lodash.uniq": "^4.5.0" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001497", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001497.tgz", + "integrity": "sha512-I4/duVK4wL6rAK/aKZl3HXB4g+lIZvaT4VLAn2rCgJ38jVLb0lv2Xug6QuqmxXFVRJMF74SPPWPJ/1Sdm3vCzw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/ccount": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-1.1.0.tgz", + "integrity": "sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/character-entities": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", + "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", + "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", + "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chokidar": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", + "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chrome-trace-event": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", + "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", + "engines": { + "node": ">=6.0" + } + }, + "node_modules/ci-info": { + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", + "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/clean-css": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.2.tgz", + "integrity": "sha512-JVJbM+f3d3Q704rF4bqQ5UUyTtuJ0JRKNbTKVEeujCCBoMdkEi+V+e8oktO9qGQNSvHrFTM6JZRXrUvGR1czww==", + "dependencies": { + "source-map": "~0.6.0" + }, + "engines": { + "node": ">= 10.0" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-table3": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.3.tgz", + "integrity": "sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg==", + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, + "node_modules/cli-table3/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/cli-table3/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "dependencies": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/clone-response": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.3.tgz", + "integrity": "sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA==", + "dependencies": { + "mimic-response": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/collapse-white-space": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-1.0.6.tgz", + "integrity": "sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/colord": { + "version": "2.9.3", + "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==" + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" + }, + "node_modules/combine-promises": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.1.0.tgz", + "integrity": "sha512-ZI9jvcLDxqwaXEixOhArm3r7ReIivsXkpbyEWyeOhzz1QS0iSgBPnWvEqvIQtYyamGCYA88gFhmUrs9hrrQ0pg==", + "engines": { + "node": ">=10" + } + }, + "node_modules/comma-separated-tokens": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz", + "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "9.5.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz", + "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==", + "engines": { + "node": "^12.20.0 || >=14" + } + }, + "node_modules/commondir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", + "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==" + }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compressible/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", + "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", + "dependencies": { + "accepts": "~1.3.5", + "bytes": "3.0.0", + "compressible": "~2.0.16", + "debug": "2.6.9", + "on-headers": "~1.0.2", + "safe-buffer": "5.1.2", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/compression/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/compression/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/compression/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/configstore": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz", + "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==", + "dependencies": { + "dot-prop": "^5.2.0", + "graceful-fs": "^4.1.2", + "make-dir": "^3.0.0", + "unique-string": "^2.0.0", + "write-file-atomic": "^3.0.0", + "xdg-basedir": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/connect-history-api-fallback": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", + "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/consola": { + "version": "2.15.3", + "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", + "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" + }, + "node_modules/content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==" + }, + "node_modules/cookie": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", + "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + }, + "node_modules/copy-webpack-plugin": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", + "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", + "dependencies": { + "fast-glob": "^3.2.11", + "glob-parent": "^6.0.1", + "globby": "^13.1.1", + "normalize-path": "^3.0.0", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/copy-webpack-plugin/node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/copy-webpack-plugin/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/globby": { + "version": "13.1.4", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.1.4.tgz", + "integrity": "sha512-iui/IiiW+QrJ1X1hKH5qwlMQyv34wJAYwH1vrf8b9kBA4sNiif3gKsMHa+BrdnOpEudWjpotfa7LrTzB1ERS/g==", + "dependencies": { + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.11", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/copy-webpack-plugin/node_modules/schema-utils": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz", + "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/copy-webpack-plugin/node_modules/slash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/core-js": { + "version": "3.30.2", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.30.2.tgz", + "integrity": "sha512-uBJiDmwqsbJCWHAwjrx3cvjbMXP7xD72Dmsn5LOJpiRmE3WbBbN5rCqQ2Qh6Ek6/eOrjlWngEynBWo4VxerQhg==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-compat": { + "version": "3.30.2", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.30.2.tgz", + "integrity": "sha512-nriW1nuJjUgvkEjIot1Spwakz52V9YkYHZAQG6A1eCgC8AA1p0zngrQEP9R0+V6hji5XilWKG1Bd0YRppmGimA==", + "dependencies": { + "browserslist": "^4.21.5" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-pure": { + "version": "3.30.2", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.30.2.tgz", + "integrity": "sha512-p/npFUJXXBkCCTIlEGBdghofn00jWG6ZOtdoIXSJmAu2QBvN0IqpZXWweOytcwE6cfx8ZvVUy1vw8zxhe4Y2vg==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "node_modules/cosmiconfig": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", + "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto-random-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz", + "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/css-declaration-sorter": { + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.4.0.tgz", + "integrity": "sha512-jDfsatwWMWN0MODAFuHszfjphEXfNw9JUAhmY4pLu3TyTU+ohUpsbVtbU+1MZn4a47D9kqh03i4eyOm+74+zew==", + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.0.9" + } + }, + "node_modules/css-loader": { + "version": "6.8.1", + "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.8.1.tgz", + "integrity": "sha512-xDAXtEVGlD0gJ07iclwWVkLoZOpEvAWaSyf6W18S2pOC//K8+qUDIx8IIT3D+HjnmkJPQeesOPv5aiUaJsCM2g==", + "dependencies": { + "icss-utils": "^5.1.0", + "postcss": "^8.4.21", + "postcss-modules-extract-imports": "^3.0.0", + "postcss-modules-local-by-default": "^4.0.3", + "postcss-modules-scope": "^3.0.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.3.8" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/css-minimizer-webpack-plugin": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.2.2.tgz", + "integrity": "sha512-s3Of/4jKfw1Hj9CxEO1E5oXhQAxlayuHO2y/ML+C6I9sQ7FdzfEV6QgMLN3vI+qFsjJGIAFLKtQK7t8BOXAIyA==", + "dependencies": { + "cssnano": "^5.1.8", + "jest-worker": "^29.1.2", + "postcss": "^8.4.17", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@parcel/css": { + "optional": true + }, + "@swc/css": { + "optional": true + }, + "clean-css": { + "optional": true + }, + "csso": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "lightningcss": { + "optional": true + } + } + }, + "node_modules/css-minimizer-webpack-plugin/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/css-minimizer-webpack-plugin/node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/css-minimizer-webpack-plugin/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/css-minimizer-webpack-plugin/node_modules/schema-utils": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz", + "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/css-tree": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz", + "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", + "dependencies": { + "mdn-data": "2.0.14", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/css-what": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", + "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssnano": { + "version": "5.1.15", + "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-5.1.15.tgz", + "integrity": "sha512-j+BKgDcLDQA+eDifLx0EO4XSA56b7uut3BQFH+wbSaSTuGLuiyTa/wbRYthUXX8LC9mLg+WWKe8h+qJuwTAbHw==", + "dependencies": { + "cssnano-preset-default": "^5.2.14", + "lilconfig": "^2.0.3", + "yaml": "^1.10.2" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/cssnano" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/cssnano-preset-advanced": { + "version": "5.3.10", + "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.10.tgz", + "integrity": "sha512-fnYJyCS9jgMU+cmHO1rPSPf9axbQyD7iUhLO5Df6O4G+fKIOMps+ZbU0PdGFejFBBZ3Pftf18fn1eG7MAPUSWQ==", + "dependencies": { + "autoprefixer": "^10.4.12", + "cssnano-preset-default": "^5.2.14", + "postcss-discard-unused": "^5.1.0", + "postcss-merge-idents": "^5.1.1", + "postcss-reduce-idents": "^5.2.0", + "postcss-zindex": "^5.1.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/cssnano-preset-default": { + "version": "5.2.14", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz", + "integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==", + "dependencies": { + "css-declaration-sorter": "^6.3.1", + "cssnano-utils": "^3.1.0", + "postcss-calc": "^8.2.3", + "postcss-colormin": "^5.3.1", + "postcss-convert-values": "^5.1.3", + "postcss-discard-comments": "^5.1.2", + "postcss-discard-duplicates": "^5.1.0", + "postcss-discard-empty": "^5.1.1", + "postcss-discard-overridden": "^5.1.0", + "postcss-merge-longhand": "^5.1.7", + "postcss-merge-rules": "^5.1.4", + "postcss-minify-font-values": "^5.1.0", + "postcss-minify-gradients": "^5.1.1", + "postcss-minify-params": "^5.1.4", + "postcss-minify-selectors": "^5.2.1", + "postcss-normalize-charset": "^5.1.0", + "postcss-normalize-display-values": "^5.1.0", + "postcss-normalize-positions": "^5.1.1", + "postcss-normalize-repeat-style": "^5.1.1", + "postcss-normalize-string": "^5.1.0", + "postcss-normalize-timing-functions": "^5.1.0", + "postcss-normalize-unicode": "^5.1.1", + "postcss-normalize-url": "^5.1.0", + "postcss-normalize-whitespace": "^5.1.1", + "postcss-ordered-values": "^5.1.3", + "postcss-reduce-initial": "^5.1.2", + "postcss-reduce-transforms": "^5.1.0", + "postcss-svgo": "^5.1.0", + "postcss-unique-selectors": "^5.1.1" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/cssnano-utils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-3.1.0.tgz", + "integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/csso": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", + "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", + "dependencies": { + "css-tree": "^1.1.2" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/csstype": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", + "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==" + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decompress-response": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", + "integrity": "sha512-BzRPQuY1ip+qDonAOz42gRm/pg9F768C+npV/4JOsxRC2sq+Rlk+Q4ZCAsOhnIaMrgarILY+RMUIvMmmX1qAEA==", + "dependencies": { + "mimic-response": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/default-gateway": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", + "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "dependencies": { + "execa": "^5.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/defer-to-connect": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", + "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==" + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "engines": { + "node": ">=8" + } + }, + "node_modules/define-properties": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.0.tgz", + "integrity": "sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA==", + "dependencies": { + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/del": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", + "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", + "dependencies": { + "globby": "^11.0.1", + "graceful-fs": "^4.2.4", + "is-glob": "^4.0.1", + "is-path-cwd": "^2.2.0", + "is-path-inside": "^3.0.2", + "p-map": "^4.0.0", + "rimraf": "^3.0.2", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/detab": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/detab/-/detab-2.0.4.tgz", + "integrity": "sha512-8zdsQA5bIkoRECvCrNKPla84lyoR7DSAyf7p0YgXzBO9PDJx8KntPUay7NS6yp+KdxdVtiE5SpHKtbp2ZQyA9g==", + "dependencies": { + "repeat-string": "^1.5.4" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" + }, + "node_modules/detect-port": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.5.1.tgz", + "integrity": "sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==", + "dependencies": { + "address": "^1.0.1", + "debug": "4" + }, + "bin": { + "detect": "bin/detect-port.js", + "detect-port": "bin/detect-port.js" + } + }, + "node_modules/detect-port-alt": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", + "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", + "dependencies": { + "address": "^1.0.1", + "debug": "^2.6.0" + }, + "bin": { + "detect": "bin/detect-port", + "detect-port": "bin/detect-port" + }, + "engines": { + "node": ">= 4.2.1" + } + }, + "node_modules/detect-port-alt/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/detect-port-alt/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dns-equal": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", + "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==" + }, + "node_modules/dns-packet": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.0.tgz", + "integrity": "sha512-rza3UH1LwdHh9qyPXp8lkwpjSNk/AMD3dPytUoRoqnypDUhY0xvbdmVhWOfxO68frEfV9BU8V12Ez7ZsHGZpCQ==", + "dependencies": { + "@leichtgewicht/ip-codec": "^2.0.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/docusaurus-plugin-includes": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/docusaurus-plugin-includes/-/docusaurus-plugin-includes-1.1.4.tgz", + "integrity": "sha512-4L7Eqker4xh1dyWZoz2Isz6JQTg8CWZvvSQyX2IHpEPjwovvD5DpEHHRlSk7gJLQNasWPP9DTHTd0fxFZ6jl2g==", + "dependencies": { + "@docusaurus/core": "^2.0.0-beta.5", + "@docusaurus/types": "^2.0.0-beta.5", + "@docusaurus/utils": "^2.0.0-beta.5", + "fs-extra": "^10.0.0", + "path": "^0.12.7" + }, + "engines": { + "node": ">=12.13.0" + }, + "peerDependencies": { + "react": "^16.8.4 || ^17.0.0", + "react-dom": "^16.8.4 || ^17.0.0" + } + }, + "node_modules/docusaurus-plugin-includes/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/docusaurus-plugin-includes/node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/docusaurus-plugin-includes/node_modules/universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/dom-converter": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", + "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "dependencies": { + "utila": "~0.4" + } + }, + "node_modules/dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/dom-serializer/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dot-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", + "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/dot-prop": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", + "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", + "dependencies": { + "is-obj": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dot-prop/node_modules/is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" + }, + "node_modules/duplexer3": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.5.tgz", + "integrity": "sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA==" + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/electron-to-chromium": { + "version": "1.4.425", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.425.tgz", + "integrity": "sha512-wv1NufHxu11zfDbY4fglYQApMswleE9FL/DSeyOyauVXDZ+Kco96JK/tPfBUaDqfRarYp2WH2hJ/5UnVywp9Jg==" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + }, + "node_modules/emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/emoticon": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-3.2.0.tgz", + "integrity": "sha512-SNujglcLTTg+lDAcApPNgEdudaqQFiAbJCqzjNxJkvN9vAwCGi0uu8IUVvx+f16h+V44KCY6Y2yboroc9pilHg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.14.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.14.1.tgz", + "integrity": "sha512-Vklwq2vDKtl0y/vtwjSesgJ5MYS7Etuk5txS8VdKL4AOS1aUlD96zqIfsOSLQsdv3xgMRbtkWM8eG9XDfKUPow==", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-module-lexer": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.2.1.tgz", + "integrity": "sha512-9978wrXM50Y4rTMmW5kXIC09ZdXQZqkE4mxhwkd8VbzsGkXGPgV4zWuqQJgCEzYngdo2dYDa0l8xhX4fkSwJSg==" + }, + "node_modules/es6-promise": { + "version": "4.2.8", + "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", + "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" + }, + "node_modules/escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-goat": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", + "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eta": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", + "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", + "engines": { + "node": ">=6.0.0" + }, + "funding": { + "url": "https://github.com/eta-dev/eta?sponsor=1" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/eval": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", + "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", + "dependencies": { + "@types/node": "*", + "require-like": ">= 0.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/execa/node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/express": { + "version": "4.18.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", + "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.1", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.5.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.2.0", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/express/node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "node_modules/express/node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/express/node_modules/path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + }, + "node_modules/express/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-glob": { + "version": "3.2.12", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", + "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, + "node_modules/fast-url-parser": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", + "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==", + "dependencies": { + "punycode": "^1.3.2" + } + }, + "node_modules/fastq": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", + "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/faye-websocket": { + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", + "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "dependencies": { + "websocket-driver": ">=0.5.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/file-loader": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", + "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/file-loader/node_modules/schema-utils": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz", + "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/filesize": { + "version": "8.0.7", + "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", + "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", + "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/find-cache-dir": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz", + "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==", + "dependencies": { + "commondir": "^1.0.1", + "make-dir": "^3.0.2", + "pkg-dir": "^4.1.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/avajs/find-cache-dir?sponsor=1" + } + }, + "node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz", + "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", + "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==", + "dependencies": { + "@babel/code-frame": "^7.8.3", + "@types/json-schema": "^7.0.5", + "chalk": "^4.1.0", + "chokidar": "^3.4.2", + "cosmiconfig": "^6.0.0", + "deepmerge": "^4.2.2", + "fs-extra": "^9.0.0", + "glob": "^7.1.6", + "memfs": "^3.1.2", + "minimatch": "^3.0.4", + "schema-utils": "2.7.0", + "semver": "^7.3.2", + "tapable": "^1.0.0" + }, + "engines": { + "node": ">=10", + "yarn": ">=1.0.0" + }, + "peerDependencies": { + "eslint": ">= 6", + "typescript": ">= 2.7", + "vue-template-compiler": "*", + "webpack": ">= 4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + }, + "vue-template-compiler": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", + "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.7.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", + "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", + "dependencies": { + "@types/json-schema": "^7.0.4", + "ajv": "^6.12.2", + "ajv-keywords": "^3.4.1" + }, + "engines": { + "node": ">= 8.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", + "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fraction.js": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz", + "integrity": "sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA==", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://www.patreon.com/infusion" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fs-extra": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", + "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", + "dependencies": { + "graceful-fs": "^4.1.2", + "jsonfile": "^4.0.0", + "universalify": "^0.1.0" + }, + "engines": { + "node": ">=6 <7 || >=8" + } + }, + "node_modules/fs-monkey": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.4.tgz", + "integrity": "sha512-INM/fWAxMICjttnD0DX1rBvinKskj5G1w+oy/pnm9u/tSlnBrzFonJMcalKJ30P8RRsPzKcCG7Q8l0jx5Fh9YQ==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + }, + "node_modules/fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz", + "integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==", + "dependencies": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-own-enumerable-property-symbols": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", + "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" + }, + "node_modules/get-stream": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", + "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/github-slugger": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz", + "integrity": "sha512-Iozmtbqv0noj0uDDqoL0zNq0VBEfK2YFoMAZoxJe4cwphvLR+JskfF30QhXHOR4m3KrE6NLRYw+U9MRXvifyig==" + }, + "node_modules/global-dirs": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", + "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", + "dependencies": { + "ini": "2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/global-dirs/node_modules/ini": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "dependencies": { + "global-prefix": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dependencies": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix/node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/got": { + "version": "9.6.0", + "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", + "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", + "dependencies": { + "@sindresorhus/is": "^0.14.0", + "@szmarczak/http-timer": "^1.1.2", + "cacheable-request": "^6.0.0", + "decompress-response": "^3.3.0", + "duplexer3": "^0.1.4", + "get-stream": "^4.1.0", + "lowercase-keys": "^1.0.1", + "mimic-response": "^1.0.1", + "p-cancelable": "^1.0.0", + "to-readable-stream": "^1.0.0", + "url-parse-lax": "^3.0.0" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, + "node_modules/gray-matter": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", + "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", + "dependencies": { + "js-yaml": "^3.13.1", + "kind-of": "^6.0.2", + "section-matter": "^1.0.0", + "strip-bom-string": "^1.0.0" + }, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/gray-matter/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/gray-matter/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/gzip-size": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", + "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "dependencies": { + "duplexer": "^0.1.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/handle-thing": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", + "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" + }, + "node_modules/handlebars": { + "version": "4.7.7", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.7.tgz", + "integrity": "sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA==", + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.0", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } + }, + "node_modules/has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", + "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", + "dependencies": { + "get-intrinsic": "^1.1.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", + "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-yarn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz", + "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/hast-to-hyperscript": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz", + "integrity": "sha512-zQgLKqF+O2F72S1aa4y2ivxzSlko3MAvxkwG8ehGmNiqd98BIN3JM1rAJPmplEyLmGLO2QZYJtIneOSZ2YbJuA==", + "dependencies": { + "@types/unist": "^2.0.3", + "comma-separated-tokens": "^1.0.0", + "property-information": "^5.3.0", + "space-separated-tokens": "^1.0.0", + "style-to-object": "^0.3.0", + "unist-util-is": "^4.0.0", + "web-namespaces": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-6.0.1.tgz", + "integrity": "sha512-jeJUWiN5pSxW12Rh01smtVkZgZr33wBokLzKLwinYOUfSzm1Nl/c3GUGebDyOKjdsRgMvoVbV0VpAcpjF4NrJA==", + "dependencies": { + "@types/parse5": "^5.0.0", + "hastscript": "^6.0.0", + "property-information": "^5.0.0", + "vfile": "^4.0.0", + "vfile-location": "^3.2.0", + "web-namespaces": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz", + "integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-6.0.1.tgz", + "integrity": "sha512-ZMuiYA+UF7BXBtsTBNcLBF5HzXzkyE6MLzJnL605LKE8GJylNjGc4jjxazAHUtcwT5/CEt6afRKViYB4X66dig==", + "dependencies": { + "@types/hast": "^2.0.0", + "hast-util-from-parse5": "^6.0.0", + "hast-util-to-parse5": "^6.0.0", + "html-void-elements": "^1.0.0", + "parse5": "^6.0.0", + "unist-util-position": "^3.0.0", + "vfile": "^4.0.0", + "web-namespaces": "^1.0.0", + "xtend": "^4.0.0", + "zwitch": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-parse5": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-6.0.0.tgz", + "integrity": "sha512-Lu5m6Lgm/fWuz8eWnrKezHtVY83JeRGaNQ2kn9aJgqaxvVkFCZQBEhgodZUDUvoodgyROHDb3r5IxAEdl6suJQ==", + "dependencies": { + "hast-to-hyperscript": "^9.0.0", + "property-information": "^5.0.0", + "web-namespaces": "^1.0.0", + "xtend": "^4.0.0", + "zwitch": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz", + "integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==", + "dependencies": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^1.0.0", + "hast-util-parse-selector": "^2.0.0", + "property-information": "^5.0.0", + "space-separated-tokens": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "bin": { + "he": "bin/he" + } + }, + "node_modules/history": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", + "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", + "dependencies": { + "@babel/runtime": "^7.1.2", + "loose-envify": "^1.2.0", + "resolve-pathname": "^3.0.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0", + "value-equal": "^1.0.1" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hpack.js": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", + "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", + "dependencies": { + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" + } + }, + "node_modules/hpack.js/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "node_modules/hpack.js/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/hpack.js/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/hpack.js/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/html-entities": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.3.5.tgz", + "integrity": "sha512-72TJlcMkYsEJASa/3HnX7VT59htM7iSHbH59NSZbtc+22Ap0Txnlx91sfeB+/A7wNZg7UxtZdhAW4y+/jimrdg==" + }, + "node_modules/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/html-minifier-terser/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "engines": { + "node": ">= 12" + } + }, + "node_modules/html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/html-void-elements": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-1.0.5.tgz", + "integrity": "sha512-uE/TxKuyNIcx44cIWnjr/rfIATDH7ZaOMmstu0CwhFG1Dunhlp4OC6/NMbhiwoq5BpW0ubi303qnEk/PZj614w==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/html-webpack-plugin": { + "version": "5.5.1", + "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.5.1.tgz", + "integrity": "sha512-cTUzZ1+NqjGEKjmVgZKLMdiFg3m9MdRXkZW2OEe69WYVi5ONLMmlnSZdXzGGMOq0C8jGDrL6EWyEDDUioHO/pA==", + "dependencies": { + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/html-webpack-plugin" + }, + "peerDependencies": { + "webpack": "^5.20.0" + } + }, + "node_modules/htmlparser2": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", + "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, + "node_modules/htmlparser2/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/http-cache-semantics": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" + }, + "node_modules/http-deceiver": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", + "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==" + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-parser-js": { + "version": "0.5.8", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", + "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==" + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-proxy-middleware": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", + "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", + "dependencies": { + "@types/http-proxy": "^1.17.8", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.1", + "is-plain-obj": "^3.0.0", + "micromatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "@types/express": "^4.17.13" + }, + "peerDependenciesMeta": { + "@types/express": { + "optional": true + } + } + }, + "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", + "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/icss-utils": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/ignore": { + "version": "5.2.4", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", + "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/image-size": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.0.2.tgz", + "integrity": "sha512-xfOoWjceHntRb3qFCrh5ZFORYH8XCdYpASltMhZ/Q0KZiOwjdE/Yl2QCiWdwD+lygV5bMCvauzgu5PxBX/Yerg==", + "dependencies": { + "queue": "6.0.2" + }, + "bin": { + "image-size": "bin/image-size.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/immer": { + "version": "9.0.21", + "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", + "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/immer" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-lazy": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz", + "integrity": "sha512-m7ZEHgtw69qOGw+jwxXkHlrlIPdTGkyh66zXZ1ajZbxkDBNjSY/LGbmjc7h0s2ELsUDTAhFr55TrPSSqJGPG0A==", + "engines": { + "node": ">=4" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + }, + "node_modules/inline-style-parser": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", + "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" + }, + "node_modules/interpret": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", + "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/ipaddr.js": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz", + "integrity": "sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/is-absolute": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-absolute/-/is-absolute-1.0.0.tgz", + "integrity": "sha512-dOWoqflvcydARa360Gvv18DZ/gRuHKi2NU/wU5X1ZFzdYfH29nkiNZsF3mp4OJ3H4yo9Mx8A/uAGNzpzPN3yBA==", + "dependencies": { + "is-relative": "^1.0.0", + "is-windows": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-alphabetical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", + "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", + "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "dependencies": { + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-buffer": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", + "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "engines": { + "node": ">=4" + } + }, + "node_modules/is-ci": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", + "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", + "dependencies": { + "ci-info": "^2.0.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/is-ci/node_modules/ci-info": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", + "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==" + }, + "node_modules/is-core-module": { + "version": "2.12.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.1.tgz", + "integrity": "sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==", + "dependencies": { + "has": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", + "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", + "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-installed-globally": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", + "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "dependencies": { + "global-dirs": "^3.0.0", + "is-path-inside": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-npm": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-5.0.0.tgz", + "integrity": "sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-path-cwd": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", + "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-regexp": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", + "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-relative": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-relative/-/is-relative-1.0.0.tgz", + "integrity": "sha512-Kw/ReK0iqwKeu0MITLFuj0jbPAmEiOsIwyIXvvbfa6QfmN9pkD1M+8pdk7Rl/dTKbH34/XBFMbgD4iMJhLQbGA==", + "dependencies": { + "is-unc-path": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-root": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", + "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + }, + "node_modules/is-unc-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-unc-path/-/is-unc-path-1.0.0.tgz", + "integrity": "sha512-mrGpVd0fs7WWLfVsStvgF6iEJnbjDFZh9/emhRDcGWTduTfNHd9CHeUwH3gYIjdbwo4On6hunkztwOaAw0yllQ==", + "dependencies": { + "unc-path-regex": "^0.1.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-whitespace-character": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz", + "integrity": "sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-windows": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-word-character": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-word-character/-/is-word-character-1.0.4.tgz", + "integrity": "sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-yarn-global": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz", + "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw==" + }, + "node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jest-util": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.5.0.tgz", + "integrity": "sha512-RYMgG/MTadOr5t8KdhejfvUU82MxsCu5MF6KuDUHl+NuwzUt+Sm6jJWxTJVrDR1j5M/gJVCPKQEpWXY+yIQ6lQ==", + "dependencies": { + "@jest/types": "^29.5.0", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.5.0.tgz", + "integrity": "sha512-NcrQnevGoSp4b5kg+akIpthoAFHxPBcb5P6mYPY0fUNT+sSvmtu6jlkEle3anczUKIKEbMxFimk9oTP/tpIPgA==", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.5.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jiti": { + "version": "1.18.2", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.18.2.tgz", + "integrity": "sha512-QAdOptna2NYiSSpv0O/BwoHBSmz4YhpzJHyi+fnMRTXFjp7B8i/YG5Z8IfusxB1ufjcD2Sre1F3R+nX3fvy7gg==", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/joi": { + "version": "17.9.2", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.9.2.tgz", + "integrity": "sha512-Itk/r+V4Dx0V3c7RLFdRh12IOjySm2/WGPMubBT92cQvRfYZhPM2W0hZlctjj72iES8jsRCwp7S/cRmWBnJ4nw==", + "dependencies": { + "@hapi/hoek": "^9.0.0", + "@hapi/topo": "^5.0.0", + "@sideway/address": "^4.1.3", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-buffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", + "integrity": "sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ==" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", + "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/keyv": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", + "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", + "dependencies": { + "json-buffer": "3.0.0" + } + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/klona": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.6.tgz", + "integrity": "sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/latest-version": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", + "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==", + "dependencies": { + "package-json": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/launch-editor": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.6.0.tgz", + "integrity": "sha512-JpDCcQnyAAzZZaZ7vEiSqL690w7dAEyLao+KC96zBplnYbJS7TYNjvM3M7y3dGz+v7aIsJk3hllWuc0kWAjyRQ==", + "dependencies": { + "picocolors": "^1.0.0", + "shell-quote": "^1.7.3" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", + "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", + "engines": { + "node": ">=10" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" + }, + "node_modules/loader-runner": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "engines": { + "node": ">=6.11.5" + } + }, + "node_modules/loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lower-case": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", + "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "dependencies": { + "tslib": "^2.0.3" + } + }, + "node_modules/lowercase-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", + "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "dependencies": { + "semver": "^6.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/markdown-escapes": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/markdown-escapes/-/markdown-escapes-1.0.4.tgz", + "integrity": "sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-squeeze-paragraphs": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-squeeze-paragraphs/-/mdast-squeeze-paragraphs-4.0.0.tgz", + "integrity": "sha512-zxdPn69hkQ1rm4J+2Cs2j6wDEv7O17TfXTJ33tl/+JPIoEmtV9t2ZzBM5LPHE8QlHsmVD8t3vPKCyY3oH+H8MQ==", + "dependencies": { + "unist-util-remove": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-definitions": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-4.0.0.tgz", + "integrity": "sha512-k8AJ6aNnUkB7IE+5azR9h81O5EQ/cTDXtWdMq9Kk5KcEW/8ritU5CeLg/9HhOC++nALHBlaogJ5jz0Ybk3kPMQ==", + "dependencies": { + "unist-util-visit": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-10.0.1.tgz", + "integrity": "sha512-BW3LM9SEMnjf4HXXVApZMt8gLQWVNXc3jryK0nJu/rOXPOnlkUjmdkDlmxMirpbU9ILncGFIwLH/ubnWBbcdgA==", + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "mdast-util-definitions": "^4.0.0", + "mdurl": "^1.0.0", + "unist-builder": "^2.0.0", + "unist-util-generated": "^1.0.0", + "unist-util-position": "^3.0.0", + "unist-util-visit": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz", + "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdn-data": { + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", + "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" + }, + "node_modules/mdurl": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", + "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==" + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "dependencies": { + "fs-monkey": "^1.0.4" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "dependencies": { + "mime-db": "~1.33.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-response": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", + "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/mini-css-extract-plugin": { + "version": "2.7.6", + "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.7.6.tgz", + "integrity": "sha512-Qk7HcgaPkGG6eD77mLvZS1nmxlao3j+9PkrT9Uc7HAE1id3F41+DdBRYRYkbyfNRGzm8/YWtzhw7nVPmwhqTQw==", + "dependencies": { + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/mini-css-extract-plugin/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/mini-css-extract-plugin/node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/mini-css-extract-plugin/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/mini-css-extract-plugin/node_modules/schema-utils": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz", + "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mrmime": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-1.0.1.tgz", + "integrity": "sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw==", + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/multicast-dns": { + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", + "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", + "dependencies": { + "dns-packet": "^5.2.2", + "thunky": "^1.0.2" + }, + "bin": { + "multicast-dns": "cli.js" + } + }, + "node_modules/nanoclone": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/nanoclone/-/nanoclone-0.2.1.tgz", + "integrity": "sha512-wynEP02LmIbLpcYw8uBKpcfF6dmg2vcpKqxeH5UcoKEYdExslsdUA4ugFauuaeYdTB76ez6gJW8XAZ6CgkXYxA==" + }, + "node_modules/nanoid": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" + }, + "node_modules/no-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", + "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", + "dependencies": { + "lower-case": "^2.0.2", + "tslib": "^2.0.3" + } + }, + "node_modules/node-emoji": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz", + "integrity": "sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==", + "dependencies": { + "lodash": "^4.17.21" + } + }, + "node_modules/node-forge": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", + "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", + "engines": { + "node": ">= 6.13.0" + } + }, + "node_modules/node-releases": { + "version": "2.0.12", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.12.tgz", + "integrity": "sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-url": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", + "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.12.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", + "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz", + "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/obuf": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", + "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", + "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/p-cancelable": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", + "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==", + "engines": { + "node": ">=6" + } + }, + "node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-retry": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", + "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "dependencies": { + "@types/retry": "0.12.0", + "retry": "^0.13.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/package-json": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz", + "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==", + "dependencies": { + "got": "^9.6.0", + "registry-auth-token": "^4.0.0", + "registry-url": "^5.0.0", + "semver": "^6.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/package-json/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/param-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", + "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", + "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "dependencies": { + "character-entities": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "character-reference-invalid": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-hexadecimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse5": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", + "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==" + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/pascal-case": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", + "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/path": { + "version": "0.12.7", + "resolved": "https://registry.npmjs.org/path/-/path-0.12.7.tgz", + "integrity": "sha512-aXXC6s+1w7otVF9UletFkFcDsJeO7lSZBPUQhtb5O0xJe8LtYhj/GxldoL09bBj9+ZmE2hNoHqQSFMN5fikh4Q==", + "dependencies": { + "process": "^0.11.1", + "util": "^0.10.3" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==" + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" + }, + "node_modules/path-to-regexp": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", + "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", + "dependencies": { + "isarray": "0.0.1" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-up": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz", + "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==", + "dependencies": { + "find-up": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-up/node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss": { + "version": "8.4.24", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.24.tgz", + "integrity": "sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-calc": { + "version": "8.2.4", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-8.2.4.tgz", + "integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==", + "dependencies": { + "postcss-selector-parser": "^6.0.9", + "postcss-value-parser": "^4.2.0" + }, + "peerDependencies": { + "postcss": "^8.2.2" + } + }, + "node_modules/postcss-colormin": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.1.tgz", + "integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==", + "dependencies": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0", + "colord": "^2.9.1", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-convert-values": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz", + "integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==", + "dependencies": { + "browserslist": "^4.21.4", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-comments": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz", + "integrity": "sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-duplicates": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz", + "integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-empty": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz", + "integrity": "sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-overridden": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz", + "integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-unused": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz", + "integrity": "sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw==", + "dependencies": { + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-loader": { + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.2.tgz", + "integrity": "sha512-c7qDlXErX6n0VT+LUsW+nwefVtTu3ORtVvK8EXuUIDcxo+b/euYqpuHlJAvePb0Af5e8uMjR/13e0lTuYifaig==", + "dependencies": { + "cosmiconfig": "^8.1.3", + "jiti": "^1.18.2", + "klona": "^2.0.6", + "semver": "^7.3.8" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "postcss": "^7.0.0 || ^8.0.1", + "webpack": "^5.0.0" + } + }, + "node_modules/postcss-loader/node_modules/cosmiconfig": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.2.0.tgz", + "integrity": "sha512-3rTMnFJA1tCOPwRxtgF4wd7Ab2qvDbL8jX+3smjIbS4HlZBagTlpERbdN7iAbWlrfxE3M8c27kTwTawQ7st+OQ==", + "dependencies": { + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + } + }, + "node_modules/postcss-merge-idents": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz", + "integrity": "sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw==", + "dependencies": { + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-merge-longhand": { + "version": "5.1.7", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz", + "integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "stylehacks": "^5.1.1" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-merge-rules": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz", + "integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==", + "dependencies": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0", + "cssnano-utils": "^3.1.0", + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-font-values": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz", + "integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-gradients": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz", + "integrity": "sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw==", + "dependencies": { + "colord": "^2.9.1", + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-params": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz", + "integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==", + "dependencies": { + "browserslist": "^4.21.4", + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-selectors": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz", + "integrity": "sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg==", + "dependencies": { + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-modules-extract-imports": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz", + "integrity": "sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.3.tgz", + "integrity": "sha512-2/u2zraspoACtrbFRnTijMiQtb4GW4BvatjaG/bCjYQo8kLTdevCUlwuBHx2sCnSyrI3x3qj4ZK1j5LQBgzmwA==", + "dependencies": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.1.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-scope": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz", + "integrity": "sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==", + "dependencies": { + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-values": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", + "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", + "dependencies": { + "icss-utils": "^5.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-normalize-charset": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz", + "integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-display-values": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz", + "integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-positions": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz", + "integrity": "sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-repeat-style": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz", + "integrity": "sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-string": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz", + "integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-timing-functions": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz", + "integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-unicode": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz", + "integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==", + "dependencies": { + "browserslist": "^4.21.4", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-url": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz", + "integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==", + "dependencies": { + "normalize-url": "^6.0.1", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-whitespace": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz", + "integrity": "sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-ordered-values": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz", + "integrity": "sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ==", + "dependencies": { + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-reduce-idents": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz", + "integrity": "sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-reduce-initial": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz", + "integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==", + "dependencies": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-reduce-transforms": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz", + "integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.0.13", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.13.tgz", + "integrity": "sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-sort-media-queries": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz", + "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==", + "dependencies": { + "sort-css-media-queries": "2.1.0" + }, + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "postcss": "^8.4.16" + } + }, + "node_modules/postcss-svgo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-5.1.0.tgz", + "integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "svgo": "^2.7.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-unique-selectors": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz", + "integrity": "sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA==", + "dependencies": { + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" + }, + "node_modules/postcss-zindex": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz", + "integrity": "sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/prepend-http": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", + "integrity": "sha512-ravE6m9Atw9Z/jjttRUZ+clIXogdghyZAuWJ3qEzjT+jI/dL1ifAqhZeC5VHzQp1MSt1+jxKkFNemj/iO7tVUA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/pretty-error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", + "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", + "dependencies": { + "lodash": "^4.17.20", + "renderkid": "^3.0.0" + } + }, + "node_modules/pretty-time": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", + "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/property-expr": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.5.tgz", + "integrity": "sha512-IJUkICM5dP5znhCckHSv30Q4b5/JA5enCtkRHYaOVOAocnH/1BQEYTC5NMfT3AVl/iXKdr3aqQbQn9DxyWknwA==" + }, + "node_modules/property-information": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", + "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==", + "dependencies": { + "xtend": "^4.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-addr/node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==" + }, + "node_modules/pupa": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz", + "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==", + "dependencies": { + "escape-goat": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/qs": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/queue": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz", + "integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==", + "dependencies": { + "inherits": "~2.0.3" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", + "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-body/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/react": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react/-/react-17.0.2.tgz", + "integrity": "sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dev-utils": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz", + "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==", + "dependencies": { + "@babel/code-frame": "^7.16.0", + "address": "^1.1.2", + "browserslist": "^4.18.1", + "chalk": "^4.1.2", + "cross-spawn": "^7.0.3", + "detect-port-alt": "^1.1.6", + "escape-string-regexp": "^4.0.0", + "filesize": "^8.0.6", + "find-up": "^5.0.0", + "fork-ts-checker-webpack-plugin": "^6.5.0", + "global-modules": "^2.0.0", + "globby": "^11.0.4", + "gzip-size": "^6.0.0", + "immer": "^9.0.7", + "is-root": "^2.1.0", + "loader-utils": "^3.2.0", + "open": "^8.4.0", + "pkg-up": "^3.1.0", + "prompts": "^2.4.2", + "react-error-overlay": "^6.0.11", + "recursive-readdir": "^2.2.2", + "shell-quote": "^1.7.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/react-dev-utils/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/loader-utils": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.1.tgz", + "integrity": "sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw==", + "engines": { + "node": ">= 12.13.0" + } + }, + "node_modules/react-dev-utils/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dom": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz", + "integrity": "sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1", + "scheduler": "^0.20.2" + }, + "peerDependencies": { + "react": "17.0.2" + } + }, + "node_modules/react-error-overlay": { + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz", + "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg==" + }, + "node_modules/react-fast-compare": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", + "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==" + }, + "node_modules/react-helmet-async": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", + "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==", + "dependencies": { + "@babel/runtime": "^7.12.5", + "invariant": "^2.2.4", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.2.0", + "shallowequal": "^1.1.0" + }, + "peerDependencies": { + "react": "^16.6.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "node_modules/react-loadable": { + "name": "@docusaurus/react-loadable", + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "dependencies": { + "@types/react": "*", + "prop-types": "^15.6.2" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/react-loadable-ssr-addon-v5-slorber": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz", + "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==", + "dependencies": { + "@babel/runtime": "^7.10.3" + }, + "engines": { + "node": ">=10.13.0" + }, + "peerDependencies": { + "react-loadable": "*", + "webpack": ">=4.41.1 || 5.x" + } + }, + "node_modules/react-router": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", + "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "hoist-non-react-statics": "^3.1.0", + "loose-envify": "^1.3.1", + "path-to-regexp": "^1.7.0", + "prop-types": "^15.6.2", + "react-is": "^16.6.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/react-router-config": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", + "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", + "dependencies": { + "@babel/runtime": "^7.1.2" + }, + "peerDependencies": { + "react": ">=15", + "react-router": ">=5" + } + }, + "node_modules/react-router-dom": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz", + "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "loose-envify": "^1.3.1", + "prop-types": "^15.6.2", + "react-router": "5.3.4", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdir-enhanced": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/readdir-enhanced/-/readdir-enhanced-1.5.2.tgz", + "integrity": "sha512-oncAoS9LLjy/+DeZfSAdZBI/iFJGcPCOp44RPFI6FIMHuxt5CC5P0cUZ9mET+EZB9ONhcEvAids/lVRkj0sTHw==", + "dependencies": { + "call-me-maybe": "^1.0.1", + "es6-promise": "^4.1.0", + "glob-to-regexp": "^0.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/rechoir": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", + "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==", + "dependencies": { + "resolve": "^1.1.6" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/recursive-readdir": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz", + "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==", + "dependencies": { + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz", + "integrity": "sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ==", + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.13.11", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", + "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" + }, + "node_modules/regenerator-transform": { + "version": "0.15.1", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.1.tgz", + "integrity": "sha512-knzmNAcuyxV+gQCufkYcvOqX/qIIfHLv0u5x79kRxuGojfYVky1f15TzZEu2Avte8QGepvUNTnLskf8E6X6Vyg==", + "dependencies": { + "@babel/runtime": "^7.8.4" + } + }, + "node_modules/regexpu-core": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", + "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "dependencies": { + "@babel/regjsgen": "^0.8.0", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.1.0", + "regjsparser": "^0.9.1", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/registry-auth-token": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.2.tgz", + "integrity": "sha512-PC5ZysNb42zpFME6D/XlIgtNGdTl8bBOCw90xQLVMpzuuubJKYDWFAEuUNc+Cn8Z8724tg2SDhDRrkVEsqfDMg==", + "dependencies": { + "rc": "1.2.8" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/registry-url": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz", + "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==", + "dependencies": { + "rc": "^1.2.8" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/regjsparser": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", + "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "dependencies": { + "jsesc": "~0.5.0" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/regjsparser/node_modules/jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "bin": { + "jsesc": "bin/jsesc" + } + }, + "node_modules/relateurl": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", + "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/remark-emoji": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-2.2.0.tgz", + "integrity": "sha512-P3cj9s5ggsUvWw5fS2uzCHJMGuXYRb0NnZqYlNecewXt8QBU9n5vW3DUUKOhepS8F9CwdMx9B8a3i7pqFWAI5w==", + "dependencies": { + "emoticon": "^3.2.0", + "node-emoji": "^1.10.0", + "unist-util-visit": "^2.0.3" + } + }, + "node_modules/remark-footnotes": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/remark-footnotes/-/remark-footnotes-2.0.0.tgz", + "integrity": "sha512-3Clt8ZMH75Ayjp9q4CorNeyjwIxHFcTkaektplKGl2A1jNGEUey8cKL0ZC5vJwfcD5GFGsNLImLG/NGzWIzoMQ==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-mdx": { + "version": "1.6.22", + "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-1.6.22.tgz", + "integrity": "sha512-phMHBJgeV76uyFkH4rvzCftLfKCr2RZuF+/gmVcaKrpsihyzmhXjA0BEMDaPTXG5y8qZOKPVo83NAOX01LPnOQ==", + "dependencies": { + "@babel/core": "7.12.9", + "@babel/helper-plugin-utils": "7.10.4", + "@babel/plugin-proposal-object-rest-spread": "7.12.1", + "@babel/plugin-syntax-jsx": "7.12.1", + "@mdx-js/util": "1.6.22", + "is-alphabetical": "1.0.4", + "remark-parse": "8.0.3", + "unified": "9.2.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-mdx/node_modules/@babel/core": { + "version": "7.12.9", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz", + "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==", + "dependencies": { + "@babel/code-frame": "^7.10.4", + "@babel/generator": "^7.12.5", + "@babel/helper-module-transforms": "^7.12.1", + "@babel/helpers": "^7.12.5", + "@babel/parser": "^7.12.7", + "@babel/template": "^7.12.7", + "@babel/traverse": "^7.12.9", + "@babel/types": "^7.12.7", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.1", + "json5": "^2.1.2", + "lodash": "^4.17.19", + "resolve": "^1.3.2", + "semver": "^5.4.1", + "source-map": "^0.5.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/remark-mdx/node_modules/@babel/helper-plugin-utils": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", + "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" + }, + "node_modules/remark-mdx/node_modules/@babel/plugin-syntax-jsx": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz", + "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/remark-mdx/node_modules/semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/remark-mdx/node_modules/source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/remark-mdx/node_modules/unified": { + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", + "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "dependencies": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-8.0.3.tgz", + "integrity": "sha512-E1K9+QLGgggHxCQtLt++uXltxEprmWzNfg+MxpfHsZlrddKzZ/hZyWHDbK3/Ap8HJQqYJRXP+jHczdL6q6i85Q==", + "dependencies": { + "ccount": "^1.0.0", + "collapse-white-space": "^1.0.2", + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-whitespace-character": "^1.0.0", + "is-word-character": "^1.0.0", + "markdown-escapes": "^1.0.0", + "parse-entities": "^2.0.0", + "repeat-string": "^1.5.4", + "state-toggle": "^1.0.0", + "trim": "0.0.1", + "trim-trailing-lines": "^1.0.0", + "unherit": "^1.0.4", + "unist-util-remove-position": "^2.0.0", + "vfile-location": "^3.0.0", + "xtend": "^4.0.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-squeeze-paragraphs": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/remark-squeeze-paragraphs/-/remark-squeeze-paragraphs-4.0.0.tgz", + "integrity": "sha512-8qRqmL9F4nuLPIgl92XUuxI3pFxize+F1H0e/W3llTk0UsjJaj01+RrirkMw7P21RKe4X6goQhYRSvNWX+70Rw==", + "dependencies": { + "mdast-squeeze-paragraphs": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/renderkid": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", + "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", + "dependencies": { + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^6.0.1" + } + }, + "node_modules/repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-like": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", + "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==", + "engines": { + "node": "*" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + }, + "node_modules/resolve": { + "version": "1.22.2", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz", + "integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==", + "dependencies": { + "is-core-module": "^2.11.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pathname": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", + "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" + }, + "node_modules/responselike": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", + "integrity": "sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ==", + "dependencies": { + "lowercase-keys": "^1.0.0" + } + }, + "node_modules/retry": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rtl-detect": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.0.4.tgz", + "integrity": "sha512-EBR4I2VDSSYr7PkBmFy04uhycIpDKp+21p/jARYXlCSjQksTBQcJ0HFUPOO79EPPH5JS6VAhiIQbycf0O3JAxQ==" + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/scheduler": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.2.tgz", + "integrity": "sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ==", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1" + } + }, + "node_modules/schema-utils": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz", + "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==", + "dependencies": { + "@types/json-schema": "^7.0.5", + "ajv": "^6.12.4", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 8.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/section-matter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", + "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", + "dependencies": { + "extend-shallow": "^2.0.1", + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/select-hose": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", + "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==" + }, + "node_modules/selfsigned": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.1.1.tgz", + "integrity": "sha512-GSL3aowiF7wa/WtSFwnUrludWFoNhftq8bUkH9pkzjpN2XSPOAYEgg6e0sS9s0rZwgJzJiQRPU18A6clnoW5wQ==", + "dependencies": { + "node-forge": "^1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver": { + "version": "7.5.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.1.tgz", + "integrity": "sha512-Wvss5ivl8TMRZXXESstBA4uR5iXgEN/VC5/sOcuXdVLzcdkz4HWetIoRfG5gb5X+ij/G9rw9YoGn3QoQ8OCSpw==", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver-diff": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz", + "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==", + "dependencies": { + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/semver-diff/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/semver/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/send": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", + "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/send/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.1.tgz", + "integrity": "sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/serve-handler": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", + "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", + "dependencies": { + "bytes": "3.0.0", + "content-disposition": "0.5.2", + "fast-url-parser": "1.1.3", + "mime-types": "2.1.18", + "minimatch": "3.1.2", + "path-is-inside": "1.0.2", + "path-to-regexp": "2.2.1", + "range-parser": "1.2.0" + } + }, + "node_modules/serve-handler/node_modules/path-to-regexp": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", + "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==" + }, + "node_modules/serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", + "dependencies": { + "accepts": "~1.3.4", + "batch": "0.6.1", + "debug": "2.6.9", + "escape-html": "~1.0.3", + "http-errors": "~1.6.2", + "mime-types": "~2.1.17", + "parseurl": "~1.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/serve-index/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/serve-index/node_modules/depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/http-errors": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", + "dependencies": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": ">= 1.4.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + }, + "node_modules/serve-index/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/serve-index/node_modules/setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + }, + "node_modules/serve-index/node_modules/statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-static": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", + "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "dependencies": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.18.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "dependencies": { + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/shell-quote": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz", + "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/shelljs": { + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz", + "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==", + "dependencies": { + "glob": "^7.0.0", + "interpret": "^1.0.0", + "rechoir": "^0.6.2" + }, + "bin": { + "shjs": "bin/shjs" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + }, + "node_modules/sirv": { + "version": "1.0.19", + "resolved": "https://registry.npmjs.org/sirv/-/sirv-1.0.19.tgz", + "integrity": "sha512-JuLThK3TnZG1TAKDwNIqNq6QA2afLOCcm+iE8D1Kj3GA40pSPsxQjjJl0J8X3tsR7T+CP1GavpzLwYkgVLWrZQ==", + "dependencies": { + "@polka/url": "^1.0.0-next.20", + "mrmime": "^1.0.0", + "totalist": "^1.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/sockjs": { + "version": "0.3.24", + "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", + "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", + "dependencies": { + "faye-websocket": "^0.11.3", + "uuid": "^8.3.2", + "websocket-driver": "^0.7.4" + } + }, + "node_modules/sort-css-media-queries": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.1.0.tgz", + "integrity": "sha512-IeWvo8NkNiY2vVYdPa27MCQiR0MN0M80johAYFVxWWXQ44KU84WNxjslwBHmc/7ZL2ccwkM7/e6S5aiKZXm7jA==", + "engines": { + "node": ">= 6.3.0" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz", + "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/spdy": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", + "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", + "dependencies": { + "debug": "^4.1.0", + "handle-thing": "^2.0.0", + "http-deceiver": "^1.2.7", + "select-hose": "^2.0.0", + "spdy-transport": "^3.0.0" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/spdy-transport": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", + "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", + "dependencies": { + "debug": "^4.1.0", + "detect-node": "^2.0.4", + "hpack.js": "^2.1.6", + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" + }, + "node_modules/stable": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", + "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==", + "deprecated": "Modern JS already guarantees Array#sort() is a stable sort, so this library is deprecated. See the compatibility table on MDN: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/sort#browser_compatibility" + }, + "node_modules/state-toggle": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/state-toggle/-/state-toggle-1.0.3.tgz", + "integrity": "sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/std-env": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.3.3.tgz", + "integrity": "sha512-Rz6yejtVyWnVjC1RFvNmYL10kgjC49EOghxWn0RFqlCHGFpQx+Xe7yW3I4ceK1SGrWIGMjD5Kbue8W/udkbMJg==" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/stringify-object": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", + "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", + "dependencies": { + "get-own-enumerable-property-symbols": "^3.0.0", + "is-obj": "^1.0.1", + "is-regexp": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom-string": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", + "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/style-to-object": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.3.0.tgz", + "integrity": "sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA==", + "dependencies": { + "inline-style-parser": "0.1.1" + } + }, + "node_modules/stylehacks": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz", + "integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==", + "dependencies": { + "browserslist": "^4.21.4", + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg-parser": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", + "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==" + }, + "node_modules/svgo": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz", + "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==", + "dependencies": { + "@trysound/sax": "0.2.0", + "commander": "^7.2.0", + "css-select": "^4.1.3", + "css-tree": "^1.1.3", + "csso": "^4.2.0", + "picocolors": "^1.0.0", + "stable": "^0.1.8" + }, + "bin": { + "svgo": "bin/svgo" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/svgo/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/sync-directory": { + "version": "5.1.9", + "resolved": "https://registry.npmjs.org/sync-directory/-/sync-directory-5.1.9.tgz", + "integrity": "sha512-0942RssO+NrIjDcaNiXUH/NQoAamURT1zpzN/uB8fgyetDM8NtPPOQNax3+BuNUfw/2JcaEXrAz567DokNq0lw==", + "dependencies": { + "chokidar": "^3.3.1", + "commander": "^6.2.0", + "fs-extra": "^7.0.1", + "is-absolute": "^1.0.0", + "readdir-enhanced": "^1.5.2" + }, + "bin": { + "syncdir": "cmd.js" + } + }, + "node_modules/sync-directory/node_modules/commander": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.1.tgz", + "integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/terser": { + "version": "5.17.7", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.17.7.tgz", + "integrity": "sha512-/bi0Zm2C6VAexlGgLlVxA0P2lru/sdLyfCVaRMfKVo9nWxbmz7f/sD8VPybPeSUJaJcwmCJis9pBIhcVcG1QcQ==", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.9", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.9.tgz", + "integrity": "sha512-ZuXsqE07EcggTWQjXUj+Aot/OMcD0bMKGgF63f7UxYcu5/AJF53aIpK1YoP5xR9l6s/Hy2b+t1AM0bLNPRuhwA==", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.17", + "jest-worker": "^27.4.5", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.1", + "terser": "^5.16.8" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/terser-webpack-plugin/node_modules/schema-utils": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz", + "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/terser-webpack-plugin/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" + }, + "node_modules/thunky": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", + "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" + }, + "node_modules/tiny-invariant": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.1.tgz", + "integrity": "sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw==" + }, + "node_modules/tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "engines": { + "node": ">=4" + } + }, + "node_modules/to-readable-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", + "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/toposort": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/toposort/-/toposort-2.0.2.tgz", + "integrity": "sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg==" + }, + "node_modules/totalist": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/totalist/-/totalist-1.1.0.tgz", + "integrity": "sha512-gduQwd1rOdDMGxFG1gEvhV88Oirdo2p+KjoYFU7k2g+i7n6AFFbDQ5kMPUsW0pNbfQsB/cwXvT1i4Bue0s9g5g==", + "engines": { + "node": ">=6" + } + }, + "node_modules/trim": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/trim/-/trim-0.0.1.tgz", + "integrity": "sha512-YzQV+TZg4AxpKxaTHK3c3D+kRDCGVEE7LemdlQZoQXn0iennk10RsIoY6ikzAqJTc9Xjl9C1/waHom/J86ziAQ==", + "deprecated": "Use String.prototype.trim() instead" + }, + "node_modules/trim-trailing-lines": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz", + "integrity": "sha512-rjUWSqnfTNrjbB9NQWfPMH/xRK1deHeGsHoVfpxJ++XeYXE0d6B1En37AHfw3jtfTU7dzMzZL2jjpe8Qb5gLIQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz", + "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/tslib": { + "version": "2.5.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.3.tgz", + "integrity": "sha512-mSxlJJwl3BMEQCUNnxXBU9jP4JBktcEGhURcPR6VQVlnP0FdDEsIaz0C35dXNGLyRfrATNofF0F5p2KPxQgB+w==" + }, + "node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "dependencies": { + "is-typedarray": "^1.0.0" + } + }, + "node_modules/typescript": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.3.tgz", + "integrity": "sha512-XH627E9vkeqhlZFQuL+UsyAXEnibT0kWR2FWONlr4sTjvxyJYnyefgrkyECLzM5NenmKzRAy2rR/OlYLA1HkZw==", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uglify-js": { + "version": "3.17.4", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.17.4.tgz", + "integrity": "sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g==", + "optional": true, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/unc-path-regex": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/unc-path-regex/-/unc-path-regex-0.1.2.tgz", + "integrity": "sha512-eXL4nmJT7oCpkZsHZUOJo8hcX3GbsiDOa0Qu9F646fi8dT3XuSVopVqAcEiVzSKKH7UoDti23wNX3qGFxcW5Qg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/unherit": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/unherit/-/unherit-1.1.3.tgz", + "integrity": "sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ==", + "dependencies": { + "inherits": "^2.0.0", + "xtend": "^4.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", + "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", + "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unified": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz", + "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==", + "dependencies": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unique-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", + "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==", + "dependencies": { + "crypto-random-string": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/unist-builder": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-builder/-/unist-builder-2.0.3.tgz", + "integrity": "sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-generated": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-1.1.6.tgz", + "integrity": "sha512-cln2Mm1/CZzN5ttGK7vkoGw+RZ8VcUH6BtGbq98DDtRGquAAOXig1mrBQYelOwMXYS8rK+vZDyyojSjp7JX+Lg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", + "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-3.1.0.tgz", + "integrity": "sha512-w+PkwCbYSFw8vpgWD0v7zRCl1FpY3fjDSQ3/N/wNd9Ffa4gPi8+4keqt99N3XW6F99t/mUzp2xAhNmfKWp95QA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-remove": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unist-util-remove/-/unist-util-remove-2.1.0.tgz", + "integrity": "sha512-J8NYPyBm4baYLdCbjmf1bhPu45Cr1MWTm77qd9istEkzWpnN6O9tMsEbB2JhNnBCqGENRqEWomQ+He6au0B27Q==", + "dependencies": { + "unist-util-is": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-remove-position": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-2.0.1.tgz", + "integrity": "sha512-fDZsLYIe2uT+oGFnuZmy73K6ZxOPG/Qcm+w7jbEjaFcJgbQ6cqjs/eSPzXhsmGpAsWPkqZM9pYjww5QTn3LHMA==", + "dependencies": { + "unist-util-visit": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dependencies": { + "@types/unist": "^2.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", + "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0", + "unist-util-visit-parents": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", + "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/universalify": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", + "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", + "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/update-notifier": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-5.1.0.tgz", + "integrity": "sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw==", + "dependencies": { + "boxen": "^5.0.0", + "chalk": "^4.1.0", + "configstore": "^5.0.1", + "has-yarn": "^2.1.0", + "import-lazy": "^2.1.0", + "is-ci": "^2.0.0", + "is-installed-globally": "^0.4.0", + "is-npm": "^5.0.0", + "is-yarn-global": "^0.3.0", + "latest-version": "^5.1.0", + "pupa": "^2.1.1", + "semver": "^7.3.4", + "semver-diff": "^3.1.1", + "xdg-basedir": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/yeoman/update-notifier?sponsor=1" + } + }, + "node_modules/update-notifier/node_modules/boxen": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-5.1.2.tgz", + "integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==", + "dependencies": { + "ansi-align": "^3.0.0", + "camelcase": "^6.2.0", + "chalk": "^4.1.0", + "cli-boxes": "^2.2.1", + "string-width": "^4.2.2", + "type-fest": "^0.20.2", + "widest-line": "^3.1.0", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/cli-boxes": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", + "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/update-notifier/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/update-notifier/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/widest-line": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz", + "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", + "dependencies": { + "string-width": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/update-notifier/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/uri-js/node_modules/punycode": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", + "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/url-loader": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", + "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", + "dependencies": { + "loader-utils": "^2.0.0", + "mime-types": "^2.1.27", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "file-loader": "*", + "webpack": "^4.0.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "file-loader": { + "optional": true + } + } + }, + "node_modules/url-loader/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/schema-utils": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz", + "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/url-parse-lax": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", + "integrity": "sha512-NjFKA0DidqPa5ciFcSrXnAltTtzz84ogy+NebPvfEgAck0+TNg4UJ4IN+fB7zRZfbgUf0syOo9MDxFkDSMuFaQ==", + "dependencies": { + "prepend-http": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/util": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", + "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", + "dependencies": { + "inherits": "2.0.3" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/util/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + }, + "node_modules/utila": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", + "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==" + }, + "node_modules/utility-types": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.10.0.tgz", + "integrity": "sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/value-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", + "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==" + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vfile": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz", + "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==", + "dependencies": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^2.0.0", + "vfile-message": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-3.2.0.tgz", + "integrity": "sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", + "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/wait-on": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/wait-on/-/wait-on-6.0.1.tgz", + "integrity": "sha512-zht+KASY3usTY5u2LgaNqn/Cd8MukxLGjdcZxT2ns5QzDmTFc4XoWBgC+C/na+sMRZTuVygQoMYwdcVjHnYIVw==", + "dependencies": { + "axios": "^0.25.0", + "joi": "^17.6.0", + "lodash": "^4.17.21", + "minimist": "^1.2.5", + "rxjs": "^7.5.4" + }, + "bin": { + "wait-on": "bin/wait-on" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/watchpack": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", + "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/watchpack/node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + }, + "node_modules/wbuf": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", + "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", + "dependencies": { + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/web-namespaces": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-1.1.4.tgz", + "integrity": "sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/webpack": { + "version": "5.86.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.86.0.tgz", + "integrity": "sha512-3BOvworZ8SO/D4GVP+GoRC3fVeg5MO4vzmq8TJJEkdmopxyazGDxN8ClqN12uzrZW9Tv8EED8v5VSb6Sqyi0pg==", + "dependencies": { + "@types/eslint-scope": "^3.7.3", + "@types/estree": "^1.0.0", + "@webassemblyjs/ast": "^1.11.5", + "@webassemblyjs/wasm-edit": "^1.11.5", + "@webassemblyjs/wasm-parser": "^1.11.5", + "acorn": "^8.7.1", + "acorn-import-assertions": "^1.9.0", + "browserslist": "^4.14.5", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.14.1", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.9", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^3.1.2", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.3.7", + "watchpack": "^2.4.0", + "webpack-sources": "^3.2.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-bundle-analyzer": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.9.0.tgz", + "integrity": "sha512-+bXGmO1LyiNx0i9enBu3H8mv42sj/BJWhZNFwjz92tVnBa9J3JMGo2an2IXlEleoDOPn/Hofl5hr/xCpObUDtw==", + "dependencies": { + "@discoveryjs/json-ext": "0.5.7", + "acorn": "^8.0.4", + "acorn-walk": "^8.0.0", + "chalk": "^4.1.0", + "commander": "^7.2.0", + "gzip-size": "^6.0.0", + "lodash": "^4.17.20", + "opener": "^1.5.2", + "sirv": "^1.0.7", + "ws": "^7.3.1" + }, + "bin": { + "webpack-bundle-analyzer": "lib/bin/analyzer.js" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/webpack-bundle-analyzer/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/webpack-dev-middleware": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", + "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", + "dependencies": { + "colorette": "^2.0.10", + "memfs": "^3.4.3", + "mime-types": "^2.1.31", + "range-parser": "^1.2.1", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/webpack-dev-middleware/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/webpack-dev-middleware/node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/webpack-dev-middleware/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/webpack-dev-middleware/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/schema-utils": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz", + "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/webpack-dev-server": { + "version": "4.15.0", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.0.tgz", + "integrity": "sha512-HmNB5QeSl1KpulTBQ8UT4FPrByYyaLxpJoQ0+s7EvUrMc16m0ZS1sgb1XGqzmgCPk0c9y+aaXxn11tbLzuM7NQ==", + "dependencies": { + "@types/bonjour": "^3.5.9", + "@types/connect-history-api-fallback": "^1.3.5", + "@types/express": "^4.17.13", + "@types/serve-index": "^1.9.1", + "@types/serve-static": "^1.13.10", + "@types/sockjs": "^0.3.33", + "@types/ws": "^8.5.1", + "ansi-html-community": "^0.0.8", + "bonjour-service": "^1.0.11", + "chokidar": "^3.5.3", + "colorette": "^2.0.10", + "compression": "^1.7.4", + "connect-history-api-fallback": "^2.0.0", + "default-gateway": "^6.0.3", + "express": "^4.17.3", + "graceful-fs": "^4.2.6", + "html-entities": "^2.3.2", + "http-proxy-middleware": "^2.0.3", + "ipaddr.js": "^2.0.1", + "launch-editor": "^2.6.0", + "open": "^8.0.9", + "p-retry": "^4.5.0", + "rimraf": "^3.0.2", + "schema-utils": "^4.0.0", + "selfsigned": "^2.1.1", + "serve-index": "^1.9.1", + "sockjs": "^0.3.24", + "spdy": "^4.0.2", + "webpack-dev-middleware": "^5.3.1", + "ws": "^8.13.0" + }, + "bin": { + "webpack-dev-server": "bin/webpack-dev-server.js" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.37.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "webpack": { + "optional": true + }, + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-dev-server/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/webpack-dev-server/node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/webpack-dev-server/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/webpack-dev-server/node_modules/schema-utils": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz", + "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/webpack-dev-server/node_modules/ws": { + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz", + "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/webpack-merge": { + "version": "5.9.0", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.9.0.tgz", + "integrity": "sha512-6NbRQw4+Sy50vYNTw7EyOn41OZItPiXB8GNv3INSoe3PSFaHJEz3SHTrYVaRm2LilNGnFUzh0FAwqPEmU/CwDg==", + "dependencies": { + "clone-deep": "^4.0.1", + "wildcard": "^2.0.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/webpack-sources": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/webpack/node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + }, + "node_modules/webpack/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/schema-utils": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz", + "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/webpackbar": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz", + "integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==", + "dependencies": { + "chalk": "^4.1.0", + "consola": "^2.15.3", + "pretty-time": "^1.1.0", + "std-env": "^3.0.1" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "webpack": "3 || 4 || 5" + } + }, + "node_modules/websocket-driver": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", + "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", + "dependencies": { + "http-parser-js": ">=0.5.1", + "safe-buffer": ">=5.1.0", + "websocket-extensions": ">=0.1.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/websocket-extensions": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", + "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/widest-line": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", + "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", + "dependencies": { + "string-width": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wildcard": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz", + "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==" + }, + "node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==" + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "dependencies": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "node_modules/ws": { + "version": "7.5.9", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.9.tgz", + "integrity": "sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==", + "engines": { + "node": ">=8.3.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xdg-basedir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", + "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yup": { + "version": "0.32.11", + "resolved": "https://registry.npmjs.org/yup/-/yup-0.32.11.tgz", + "integrity": "sha512-Z2Fe1bn+eLstG8DRR6FTavGD+MeAwyfmouhHsIUgaADz8jvFKbO/fXc2trJKZg+5EBjh4gGm3iU/t3onKlXHIg==", + "dependencies": { + "@babel/runtime": "^7.15.4", + "@types/lodash": "^4.14.175", + "lodash": "^4.17.21", + "lodash-es": "^4.17.21", + "nanoclone": "^0.2.1", + "property-expr": "^2.0.4", + "toposort": "^2.0.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/zwitch": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-1.0.5.tgz", + "integrity": "sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + }, + "dependencies": { + "@ampproject/remapping": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "requires": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + } + }, + "@babel/code-frame": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.5.tgz", + "integrity": "sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==", + "requires": { + "@babel/highlight": "^7.22.5" + } + }, + "@babel/compat-data": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.5.tgz", + "integrity": "sha512-4Jc/YuIaYqKnDDz892kPIledykKg12Aw1PYX5i/TY28anJtacvM1Rrr8wbieB9GfEJwlzqT0hUEao0CxEebiDA==" + }, + "@babel/core": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.5.tgz", + "integrity": "sha512-SBuTAjg91A3eKOvD+bPEz3LlhHZRNu1nFOVts9lzDJTXshHTjII0BAtDS3Y2DAkdZdDKWVZGVwkDfc4Clxn1dg==", + "requires": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.22.5", + "@babel/generator": "^7.22.5", + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helpers": "^7.22.5", + "@babel/parser": "^7.22.5", + "@babel/template": "^7.22.5", + "@babel/traverse": "^7.22.5", + "@babel/types": "^7.22.5", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.2", + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + } + } + }, + "@babel/generator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.5.tgz", + "integrity": "sha512-+lcUbnTRhd0jOewtFSedLyiPsD5tswKkbgcezOqqWFUVNEwoUTlpPOBmvhG7OXWLR4jMdv0czPGH5XbflnD1EA==", + "requires": { + "@babel/types": "^7.22.5", + "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", + "jsesc": "^2.5.1" + } + }, + "@babel/helper-annotate-as-pure": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz", + "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==", + "requires": { + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.5.tgz", + "integrity": "sha512-m1EP3lVOPptR+2DwD125gziZNcmoNSHGmJROKoy87loWUQyJaVXDgpmruWqDARZSmtYQ+Dl25okU8+qhVzuykw==", + "requires": { + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-compilation-targets": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.5.tgz", + "integrity": "sha512-Ji+ywpHeuqxB8WDxraCiqR0xfhYjiDE/e6k7FuIaANnoOFxAHskHChz4vA1mJC9Lbm01s1PVAGhQY4FUKSkGZw==", + "requires": { + "@babel/compat-data": "^7.22.5", + "@babel/helper-validator-option": "^7.22.5", + "browserslist": "^4.21.3", + "lru-cache": "^5.1.1", + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + } + } + }, + "@babel/helper-create-class-features-plugin": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.5.tgz", + "integrity": "sha512-xkb58MyOYIslxu3gKmVXmjTtUPvBU4odYzbiIQbWwLKIHCsx6UGZGX6F1IznMFVnDdirseUZopzN+ZRt8Xb33Q==", + "requires": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-member-expression-to-functions": "^7.22.5", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.5", + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + } + } + }, + "@babel/helper-create-regexp-features-plugin": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.5.tgz", + "integrity": "sha512-1VpEFOIbMRaXyDeUwUfmTIxExLwQ+zkW+Bh5zXpApA3oQedBx9v/updixWxnx/bZpKw7u8VxWjb/qWpIcmPq8A==", + "requires": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "regexpu-core": "^5.3.1", + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + } + } + }, + "@babel/helper-define-polyfill-provider": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.0.tgz", + "integrity": "sha512-RnanLx5ETe6aybRi1cO/edaRH+bNYWaryCEmjDDYyNr4wnSzyOp8T0dWipmqVHKEY3AbVKUom50AKSlj1zmKbg==", + "requires": { + "@babel/helper-compilation-targets": "^7.17.7", + "@babel/helper-plugin-utils": "^7.16.7", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2", + "semver": "^6.1.2" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + } + } + }, + "@babel/helper-environment-visitor": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz", + "integrity": "sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==" + }, + "@babel/helper-function-name": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz", + "integrity": "sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==", + "requires": { + "@babel/template": "^7.22.5", + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-hoist-variables": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", + "requires": { + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-member-expression-to-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.22.5.tgz", + "integrity": "sha512-aBiH1NKMG0H2cGZqspNvsaBe6wNGjbJjuLy29aU+eDZjSbbN53BaxlpB02xm9v34pLTZ1nIQPFYn2qMZoa5BQQ==", + "requires": { + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-module-imports": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz", + "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==", + "requires": { + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-module-transforms": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.5.tgz", + "integrity": "sha512-+hGKDt/Ze8GFExiVHno/2dvG5IdstpzCq0y4Qc9OJ25D4q3pKfiIP/4Vp3/JvhDkLKsDK2api3q3fpIgiIF5bw==", + "requires": { + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-simple-access": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.5", + "@babel/template": "^7.22.5", + "@babel/traverse": "^7.22.5", + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-optimise-call-expression": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz", + "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==", + "requires": { + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-plugin-utils": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", + "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==" + }, + "@babel/helper-remap-async-to-generator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.5.tgz", + "integrity": "sha512-cU0Sq1Rf4Z55fgz7haOakIyM7+x/uCFwXpLPaeRzfoUtAEAuUZjZvFPjL/rk5rW693dIgn2hng1W7xbT7lWT4g==", + "requires": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-wrap-function": "^7.22.5", + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-replace-supers": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.5.tgz", + "integrity": "sha512-aLdNM5I3kdI/V9xGNyKSF3X/gTyMUBohTZ+/3QdQKAA9vxIiy12E+8E2HoOP1/DjeqU+g6as35QHJNMDDYpuCg==", + "requires": { + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-member-expression-to-functions": "^7.22.5", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/template": "^7.22.5", + "@babel/traverse": "^7.22.5", + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-simple-access": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", + "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", + "requires": { + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz", + "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==", + "requires": { + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-split-export-declaration": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.5.tgz", + "integrity": "sha512-thqK5QFghPKWLhAV321lxF95yCg2K3Ob5yw+M3VHWfdia0IkPXUtoLH8x/6Fh486QUvzhb8YOWHChTVen2/PoQ==", + "requires": { + "@babel/types": "^7.22.5" + } + }, + "@babel/helper-string-parser": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==" + }, + "@babel/helper-validator-identifier": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz", + "integrity": "sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==" + }, + "@babel/helper-validator-option": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz", + "integrity": "sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw==" + }, + "@babel/helper-wrap-function": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.5.tgz", + "integrity": "sha512-bYqLIBSEshYcYQyfks8ewYA8S30yaGSeRslcvKMvoUk6HHPySbxHq9YRi6ghhzEU+yhQv9bP/jXnygkStOcqZw==", + "requires": { + "@babel/helper-function-name": "^7.22.5", + "@babel/template": "^7.22.5", + "@babel/traverse": "^7.22.5", + "@babel/types": "^7.22.5" + } + }, + "@babel/helpers": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.5.tgz", + "integrity": "sha512-pSXRmfE1vzcUIDFQcSGA5Mr+GxBV9oiRKDuDxXvWQQBCh8HoIjs/2DlDB7H8smac1IVrB9/xdXj2N3Wol9Cr+Q==", + "requires": { + "@babel/template": "^7.22.5", + "@babel/traverse": "^7.22.5", + "@babel/types": "^7.22.5" + } + }, + "@babel/highlight": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.5.tgz", + "integrity": "sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==", + "requires": { + "@babel/helper-validator-identifier": "^7.22.5", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==" + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "@babel/parser": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.5.tgz", + "integrity": "sha512-DFZMC9LJUG9PLOclRC32G63UXwzqS2koQC8dkx+PLdmt1xSePYpbT/NbsrJy8Q/muXz7o/h/d4A7Fuyixm559Q==" + }, + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.22.5.tgz", + "integrity": "sha512-NP1M5Rf+u2Gw9qfSO4ihjcTGW5zXTi36ITLd4/EoAcEhIZ0yjMqmftDNl3QC19CX7olhrjpyU454g/2W7X0jvQ==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.22.5.tgz", + "integrity": "sha512-31Bb65aZaUwqCbWMnZPduIZxCBngHFlzyN6Dq6KAJjtx+lx6ohKHubc61OomYi7XwVD4Ol0XCVz4h+pYFR048g==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-transform-optional-chaining": "^7.22.5" + } + }, + "@babel/plugin-proposal-object-rest-spread": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz", + "integrity": "sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA==", + "requires": { + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.0", + "@babel/plugin-transform-parameters": "^7.12.1" + } + }, + "@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "requires": {} + }, + "@babel/plugin-proposal-unicode-property-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz", + "integrity": "sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==", + "requires": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + } + }, + "@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "requires": { + "@babel/helper-plugin-utils": "^7.12.13" + } + }, + "@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "requires": { + "@babel/helper-plugin-utils": "^7.14.5" + } + }, + "@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-syntax-import-assertions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.22.5.tgz", + "integrity": "sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-syntax-import-attributes": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.22.5.tgz", + "integrity": "sha512-KwvoWDeNKPETmozyFE0P2rOLqh39EoQHNjqizrI5B8Vt0ZNS7M56s7dAiAqbYfiAYOuIzIh96z3iR2ktgu3tEg==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" + } + }, + "@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-jsx": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz", + "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" + } + }, + "@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" + } + }, + "@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "requires": { + "@babel/helper-plugin-utils": "^7.14.5" + } + }, + "@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "requires": { + "@babel/helper-plugin-utils": "^7.14.5" + } + }, + "@babel/plugin-syntax-typescript": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz", + "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "requires": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + } + }, + "@babel/plugin-transform-arrow-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.22.5.tgz", + "integrity": "sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-async-generator-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.22.5.tgz", + "integrity": "sha512-gGOEvFzm3fWoyD5uZq7vVTD57pPJ3PczPUD/xCFGjzBpUosnklmXyKnGQbbbGs1NPNPskFex0j93yKbHt0cHyg==", + "requires": { + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-remap-async-to-generator": "^7.22.5", + "@babel/plugin-syntax-async-generators": "^7.8.4" + } + }, + "@babel/plugin-transform-async-to-generator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.22.5.tgz", + "integrity": "sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==", + "requires": { + "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-remap-async-to-generator": "^7.22.5" + } + }, + "@babel/plugin-transform-block-scoped-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.22.5.tgz", + "integrity": "sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-block-scoping": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.22.5.tgz", + "integrity": "sha512-EcACl1i5fSQ6bt+YGuU/XGCeZKStLmyVGytWkpyhCLeQVA0eu6Wtiw92V+I1T/hnezUv7j74dA/Ro69gWcU+hg==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-class-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.22.5.tgz", + "integrity": "sha512-nDkQ0NfkOhPTq8YCLiWNxp1+f9fCobEjCb0n8WdbNUBc4IB5V7P1QnX9IjpSoquKrXF5SKojHleVNs2vGeHCHQ==", + "requires": { + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-class-static-block": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.22.5.tgz", + "integrity": "sha512-SPToJ5eYZLxlnp1UzdARpOGeC2GbHvr9d/UV0EukuVx8atktg194oe+C5BqQ8jRTkgLRVOPYeXRSBg1IlMoVRA==", + "requires": { + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + } + }, + "@babel/plugin-transform-classes": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.22.5.tgz", + "integrity": "sha512-2edQhLfibpWpsVBx2n/GKOz6JdGQvLruZQfGr9l1qes2KQaWswjBzhQF7UDUZMNaMMQeYnQzxwOMPsbYF7wqPQ==", + "requires": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.5", + "globals": "^11.1.0" + } + }, + "@babel/plugin-transform-computed-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.22.5.tgz", + "integrity": "sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/template": "^7.22.5" + } + }, + "@babel/plugin-transform-destructuring": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.22.5.tgz", + "integrity": "sha512-GfqcFuGW8vnEqTUBM7UtPd5A4q797LTvvwKxXTgRsFjoqaJiEg9deBG6kWeQYkVEL569NpnmpC0Pkr/8BLKGnQ==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-dotall-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.22.5.tgz", + "integrity": "sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==", + "requires": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-duplicate-keys": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.22.5.tgz", + "integrity": "sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-dynamic-import": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.22.5.tgz", + "integrity": "sha512-0MC3ppTB1AMxd8fXjSrbPa7LT9hrImt+/fcj+Pg5YMD7UQyWp/02+JWpdnCymmsXwIx5Z+sYn1bwCn4ZJNvhqQ==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" + } + }, + "@babel/plugin-transform-exponentiation-operator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.22.5.tgz", + "integrity": "sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==", + "requires": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-export-namespace-from": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.22.5.tgz", + "integrity": "sha512-X4hhm7FRnPgd4nDA4b/5V280xCx6oL7Oob5+9qVS5C13Zq4bh1qq7LU0GgRU6b5dBWBvhGaXYVB4AcN6+ol6vg==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + } + }, + "@babel/plugin-transform-for-of": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.22.5.tgz", + "integrity": "sha512-3kxQjX1dU9uudwSshyLeEipvrLjBCVthCgeTp6CzE/9JYrlAIaeekVxRpCWsDDfYTfRZRoCeZatCQvwo+wvK8A==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-function-name": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.22.5.tgz", + "integrity": "sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==", + "requires": { + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-json-strings": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.22.5.tgz", + "integrity": "sha512-DuCRB7fu8MyTLbEQd1ew3R85nx/88yMoqo2uPSjevMj3yoN7CDM8jkgrY0wmVxfJZyJ/B9fE1iq7EQppWQmR5A==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-json-strings": "^7.8.3" + } + }, + "@babel/plugin-transform-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.22.5.tgz", + "integrity": "sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-logical-assignment-operators": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.22.5.tgz", + "integrity": "sha512-MQQOUW1KL8X0cDWfbwYP+TbVbZm16QmQXJQ+vndPtH/BoO0lOKpVoEDMI7+PskYxH+IiE0tS8xZye0qr1lGzSA==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + } + }, + "@babel/plugin-transform-member-expression-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.22.5.tgz", + "integrity": "sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-modules-amd": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.22.5.tgz", + "integrity": "sha512-R+PTfLTcYEmb1+kK7FNkhQ1gP4KgjpSO6HfH9+f8/yfp2Nt3ggBjiVpRwmwTlfqZLafYKJACy36yDXlEmI9HjQ==", + "requires": { + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-modules-commonjs": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.22.5.tgz", + "integrity": "sha512-B4pzOXj+ONRmuaQTg05b3y/4DuFz3WcCNAXPLb2Q0GT0TrGKGxNKV4jwsXts+StaM0LQczZbOpj8o1DLPDJIiA==", + "requires": { + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-simple-access": "^7.22.5" + } + }, + "@babel/plugin-transform-modules-systemjs": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.22.5.tgz", + "integrity": "sha512-emtEpoaTMsOs6Tzz+nbmcePl6AKVtS1yC4YNAeMun9U8YCsgadPNxnOPQ8GhHFB2qdx+LZu9LgoC0Lthuu05DQ==", + "requires": { + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.5" + } + }, + "@babel/plugin-transform-modules-umd": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.22.5.tgz", + "integrity": "sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==", + "requires": { + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz", + "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==", + "requires": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-new-target": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.22.5.tgz", + "integrity": "sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.22.5.tgz", + "integrity": "sha512-6CF8g6z1dNYZ/VXok5uYkkBBICHZPiGEl7oDnAx2Mt1hlHVHOSIKWJaXHjQJA5VB43KZnXZDIexMchY4y2PGdA==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + } + }, + "@babel/plugin-transform-numeric-separator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.22.5.tgz", + "integrity": "sha512-NbslED1/6M+sXiwwtcAB/nieypGw02Ejf4KtDeMkCEpP6gWFMX1wI9WKYua+4oBneCCEmulOkRpwywypVZzs/g==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + } + }, + "@babel/plugin-transform-object-rest-spread": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.22.5.tgz", + "integrity": "sha512-Kk3lyDmEslH9DnvCDA1s1kkd3YWQITiBOHngOtDL9Pt6BZjzqb6hiOlb8VfjiiQJ2unmegBqZu0rx5RxJb5vmQ==", + "requires": { + "@babel/compat-data": "^7.22.5", + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.22.5" + } + }, + "@babel/plugin-transform-object-super": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.22.5.tgz", + "integrity": "sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.5" + } + }, + "@babel/plugin-transform-optional-catch-binding": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.22.5.tgz", + "integrity": "sha512-pH8orJahy+hzZje5b8e2QIlBWQvGpelS76C63Z+jhZKsmzfNaPQ+LaW6dcJ9bxTpo1mtXbgHwy765Ro3jftmUg==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + } + }, + "@babel/plugin-transform-optional-chaining": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.22.5.tgz", + "integrity": "sha512-AconbMKOMkyG+xCng2JogMCDcqW8wedQAqpVIL4cOSescZ7+iW8utC6YDZLMCSUIReEA733gzRSaOSXMAt/4WQ==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + } + }, + "@babel/plugin-transform-parameters": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.22.5.tgz", + "integrity": "sha512-AVkFUBurORBREOmHRKo06FjHYgjrabpdqRSwq6+C7R5iTCZOsM4QbcB27St0a4U6fffyAOqh3s/qEfybAhfivg==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-private-methods": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.22.5.tgz", + "integrity": "sha512-PPjh4gyrQnGe97JTalgRGMuU4icsZFnWkzicB/fUtzlKUqvsWBKEpPPfr5a2JiyirZkHxnAqkQMO5Z5B2kK3fA==", + "requires": { + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-private-property-in-object": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.22.5.tgz", + "integrity": "sha512-/9xnaTTJcVoBtSSmrVyhtSvO3kbqS2ODoh2juEU72c3aYonNF0OMGiaz2gjukyKM2wBBYJP38S4JiE0Wfb5VMQ==", + "requires": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + } + }, + "@babel/plugin-transform-property-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.22.5.tgz", + "integrity": "sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-react-constant-elements": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.22.5.tgz", + "integrity": "sha512-BF5SXoO+nX3h5OhlN78XbbDrBOffv+AxPP2ENaJOVqjWCgBDeOY3WcaUcddutGSfoap+5NEQ/q/4I3WZIvgkXA==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-react-display-name": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.22.5.tgz", + "integrity": "sha512-PVk3WPYudRF5z4GKMEYUrLjPl38fJSKNaEOkFuoprioowGuWN6w2RKznuFNSlJx7pzzXXStPUnNSOEO0jL5EVw==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-react-jsx": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.22.5.tgz", + "integrity": "sha512-rog5gZaVbUip5iWDMTYbVM15XQq+RkUKhET/IHR6oizR+JEoN6CAfTTuHcK4vwUyzca30qqHqEpzBOnaRMWYMA==", + "requires": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-jsx": "^7.22.5", + "@babel/types": "^7.22.5" + } + }, + "@babel/plugin-transform-react-jsx-development": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.22.5.tgz", + "integrity": "sha512-bDhuzwWMuInwCYeDeMzyi7TaBgRQei6DqxhbyniL7/VG4RSS7HtSL2QbY4eESy1KJqlWt8g3xeEBGPuo+XqC8A==", + "requires": { + "@babel/plugin-transform-react-jsx": "^7.22.5" + } + }, + "@babel/plugin-transform-react-pure-annotations": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.22.5.tgz", + "integrity": "sha512-gP4k85wx09q+brArVinTXhWiyzLl9UpmGva0+mWyKxk6JZequ05x3eUcIUE+FyttPKJFRRVtAvQaJ6YF9h1ZpA==", + "requires": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-regenerator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.22.5.tgz", + "integrity": "sha512-rR7KePOE7gfEtNTh9Qw+iO3Q/e4DEsoQ+hdvM6QUDH7JRJ5qxq5AA52ZzBWbI5i9lfNuvySgOGP8ZN7LAmaiPw==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "regenerator-transform": "^0.15.1" + } + }, + "@babel/plugin-transform-reserved-words": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.22.5.tgz", + "integrity": "sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-runtime": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.22.5.tgz", + "integrity": "sha512-bg4Wxd1FWeFx3daHFTWk1pkSWK/AyQuiyAoeZAOkAOUBjnZPH6KT7eMxouV47tQ6hl6ax2zyAWBdWZXbrvXlaw==", + "requires": { + "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "babel-plugin-polyfill-corejs2": "^0.4.3", + "babel-plugin-polyfill-corejs3": "^0.8.1", + "babel-plugin-polyfill-regenerator": "^0.5.0", + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + } + } + }, + "@babel/plugin-transform-shorthand-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.22.5.tgz", + "integrity": "sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-spread": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.22.5.tgz", + "integrity": "sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" + } + }, + "@babel/plugin-transform-sticky-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.22.5.tgz", + "integrity": "sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-template-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.22.5.tgz", + "integrity": "sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-typeof-symbol": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.22.5.tgz", + "integrity": "sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-typescript": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.22.5.tgz", + "integrity": "sha512-SMubA9S7Cb5sGSFFUlqxyClTA9zWJ8qGQrppNUm05LtFuN1ELRFNndkix4zUJrC9F+YivWwa1dHMSyo0e0N9dA==", + "requires": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-typescript": "^7.22.5" + } + }, + "@babel/plugin-transform-unicode-escapes": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.22.5.tgz", + "integrity": "sha512-biEmVg1IYB/raUO5wT1tgfacCef15Fbzhkx493D3urBI++6hpJ+RFG4SrWMn0NEZLfvilqKf3QDrRVZHo08FYg==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-unicode-property-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.22.5.tgz", + "integrity": "sha512-HCCIb+CbJIAE6sXn5CjFQXMwkCClcOfPCzTlilJ8cUatfzwHlWQkbtV0zD338u9dZskwvuOYTuuaMaA8J5EI5A==", + "requires": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-unicode-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.22.5.tgz", + "integrity": "sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==", + "requires": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/plugin-transform-unicode-sets-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.22.5.tgz", + "integrity": "sha512-lhMfi4FC15j13eKrh3DnYHjpGj6UKQHtNKTbtc1igvAhRy4+kLhV07OpLcsN0VgDEw/MjAvJO4BdMJsHwMhzCg==", + "requires": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + } + }, + "@babel/preset-env": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.22.5.tgz", + "integrity": "sha512-fj06hw89dpiZzGZtxn+QybifF07nNiZjZ7sazs2aVDcysAZVGjW7+7iFYxg6GLNM47R/thYfLdrXc+2f11Vi9A==", + "requires": { + "@babel/compat-data": "^7.22.5", + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.5", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.22.5", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.22.5", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.22.5", + "@babel/plugin-syntax-import-attributes": "^7.22.5", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.22.5", + "@babel/plugin-transform-async-generator-functions": "^7.22.5", + "@babel/plugin-transform-async-to-generator": "^7.22.5", + "@babel/plugin-transform-block-scoped-functions": "^7.22.5", + "@babel/plugin-transform-block-scoping": "^7.22.5", + "@babel/plugin-transform-class-properties": "^7.22.5", + "@babel/plugin-transform-class-static-block": "^7.22.5", + "@babel/plugin-transform-classes": "^7.22.5", + "@babel/plugin-transform-computed-properties": "^7.22.5", + "@babel/plugin-transform-destructuring": "^7.22.5", + "@babel/plugin-transform-dotall-regex": "^7.22.5", + "@babel/plugin-transform-duplicate-keys": "^7.22.5", + "@babel/plugin-transform-dynamic-import": "^7.22.5", + "@babel/plugin-transform-exponentiation-operator": "^7.22.5", + "@babel/plugin-transform-export-namespace-from": "^7.22.5", + "@babel/plugin-transform-for-of": "^7.22.5", + "@babel/plugin-transform-function-name": "^7.22.5", + "@babel/plugin-transform-json-strings": "^7.22.5", + "@babel/plugin-transform-literals": "^7.22.5", + "@babel/plugin-transform-logical-assignment-operators": "^7.22.5", + "@babel/plugin-transform-member-expression-literals": "^7.22.5", + "@babel/plugin-transform-modules-amd": "^7.22.5", + "@babel/plugin-transform-modules-commonjs": "^7.22.5", + "@babel/plugin-transform-modules-systemjs": "^7.22.5", + "@babel/plugin-transform-modules-umd": "^7.22.5", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5", + "@babel/plugin-transform-new-target": "^7.22.5", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.22.5", + "@babel/plugin-transform-numeric-separator": "^7.22.5", + "@babel/plugin-transform-object-rest-spread": "^7.22.5", + "@babel/plugin-transform-object-super": "^7.22.5", + "@babel/plugin-transform-optional-catch-binding": "^7.22.5", + "@babel/plugin-transform-optional-chaining": "^7.22.5", + "@babel/plugin-transform-parameters": "^7.22.5", + "@babel/plugin-transform-private-methods": "^7.22.5", + "@babel/plugin-transform-private-property-in-object": "^7.22.5", + "@babel/plugin-transform-property-literals": "^7.22.5", + "@babel/plugin-transform-regenerator": "^7.22.5", + "@babel/plugin-transform-reserved-words": "^7.22.5", + "@babel/plugin-transform-shorthand-properties": "^7.22.5", + "@babel/plugin-transform-spread": "^7.22.5", + "@babel/plugin-transform-sticky-regex": "^7.22.5", + "@babel/plugin-transform-template-literals": "^7.22.5", + "@babel/plugin-transform-typeof-symbol": "^7.22.5", + "@babel/plugin-transform-unicode-escapes": "^7.22.5", + "@babel/plugin-transform-unicode-property-regex": "^7.22.5", + "@babel/plugin-transform-unicode-regex": "^7.22.5", + "@babel/plugin-transform-unicode-sets-regex": "^7.22.5", + "@babel/preset-modules": "^0.1.5", + "@babel/types": "^7.22.5", + "babel-plugin-polyfill-corejs2": "^0.4.3", + "babel-plugin-polyfill-corejs3": "^0.8.1", + "babel-plugin-polyfill-regenerator": "^0.5.0", + "core-js-compat": "^3.30.2", + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + } + } + }, + "@babel/preset-modules": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.5.tgz", + "integrity": "sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==", + "requires": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", + "@babel/plugin-transform-dotall-regex": "^7.4.4", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + } + }, + "@babel/preset-react": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.22.5.tgz", + "integrity": "sha512-M+Is3WikOpEJHgR385HbuCITPTaPRaNkibTEa9oiofmJvIsrceb4yp9RL9Kb+TE8LznmeyZqpP+Lopwcx59xPQ==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.5", + "@babel/plugin-transform-react-display-name": "^7.22.5", + "@babel/plugin-transform-react-jsx": "^7.22.5", + "@babel/plugin-transform-react-jsx-development": "^7.22.5", + "@babel/plugin-transform-react-pure-annotations": "^7.22.5" + } + }, + "@babel/preset-typescript": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.22.5.tgz", + "integrity": "sha512-YbPaal9LxztSGhmndR46FmAbkJ/1fAsw293tSU+I5E5h+cnJ3d4GTwyUgGYmOXJYdGA+uNePle4qbaRzj2NISQ==", + "requires": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.5", + "@babel/plugin-syntax-jsx": "^7.22.5", + "@babel/plugin-transform-modules-commonjs": "^7.22.5", + "@babel/plugin-transform-typescript": "^7.22.5" + } + }, + "@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" + }, + "@babel/runtime": { + "version": "7.21.5", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.21.5.tgz", + "integrity": "sha512-8jI69toZqqcsnqGGqwGS4Qb1VwLOEp4hz+CXPywcvjs60u3B4Pom/U/7rm4W8tMOYEB+E9wgD0mW1l3r8qlI9Q==", + "requires": { + "regenerator-runtime": "^0.13.11" + } + }, + "@babel/runtime-corejs3": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.22.5.tgz", + "integrity": "sha512-TNPDN6aBFaUox2Lu+H/Y1dKKQgr4ucz/FGyCz67RVYLsBpVpUFf1dDngzg+Od8aqbrqwyztkaZjtWCZEUOT8zA==", + "requires": { + "core-js-pure": "^3.30.2", + "regenerator-runtime": "^0.13.11" + } + }, + "@babel/template": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.5.tgz", + "integrity": "sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw==", + "requires": { + "@babel/code-frame": "^7.22.5", + "@babel/parser": "^7.22.5", + "@babel/types": "^7.22.5" + } + }, + "@babel/traverse": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.5.tgz", + "integrity": "sha512-7DuIjPgERaNo6r+PZwItpjCZEa5vyw4eJGufeLxrPdBXBoLcCJCIasvK6pK/9DVNrLZTLFhUGqaC6X/PA007TQ==", + "requires": { + "@babel/code-frame": "^7.22.5", + "@babel/generator": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.5", + "@babel/parser": "^7.22.5", + "@babel/types": "^7.22.5", + "debug": "^4.1.0", + "globals": "^11.1.0" + } + }, + "@babel/types": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.5.tgz", + "integrity": "sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==", + "requires": { + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.5", + "to-fast-properties": "^2.0.0" + } + }, + "@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "optional": true + }, + "@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==" + }, + "@docusaurus/core": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.1.tgz", + "integrity": "sha512-SNsY7PshK3Ri7vtsLXVeAJGS50nJN3RgF836zkyUfAD01Fq+sAk5EwWgLw+nnm5KVNGDu7PRR2kRGDsWvqpo0g==", + "requires": { + "@babel/core": "^7.18.6", + "@babel/generator": "^7.18.7", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-transform-runtime": "^7.18.6", + "@babel/preset-env": "^7.18.6", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.18.6", + "@babel/runtime": "^7.18.6", + "@babel/runtime-corejs3": "^7.18.6", + "@babel/traverse": "^7.18.8", + "@docusaurus/cssnano-preset": "2.4.1", + "@docusaurus/logger": "2.4.1", + "@docusaurus/mdx-loader": "2.4.1", + "@docusaurus/react-loadable": "5.5.2", + "@docusaurus/utils": "2.4.1", + "@docusaurus/utils-common": "2.4.1", + "@docusaurus/utils-validation": "2.4.1", + "@slorber/static-site-generator-webpack-plugin": "^4.0.7", + "@svgr/webpack": "^6.2.1", + "autoprefixer": "^10.4.7", + "babel-loader": "^8.2.5", + "babel-plugin-dynamic-import-node": "^2.3.3", + "boxen": "^6.2.1", + "chalk": "^4.1.2", + "chokidar": "^3.5.3", + "clean-css": "^5.3.0", + "cli-table3": "^0.6.2", + "combine-promises": "^1.1.0", + "commander": "^5.1.0", + "copy-webpack-plugin": "^11.0.0", + "core-js": "^3.23.3", + "css-loader": "^6.7.1", + "css-minimizer-webpack-plugin": "^4.0.0", + "cssnano": "^5.1.12", + "del": "^6.1.1", + "detect-port": "^1.3.0", + "escape-html": "^1.0.3", + "eta": "^2.0.0", + "file-loader": "^6.2.0", + "fs-extra": "^10.1.0", + "html-minifier-terser": "^6.1.0", + "html-tags": "^3.2.0", + "html-webpack-plugin": "^5.5.0", + "import-fresh": "^3.3.0", + "leven": "^3.1.0", + "lodash": "^4.17.21", + "mini-css-extract-plugin": "^2.6.1", + "postcss": "^8.4.14", + "postcss-loader": "^7.0.0", + "prompts": "^2.4.2", + "react-dev-utils": "^12.0.1", + "react-helmet-async": "^1.3.0", + "react-loadable": "npm:@docusaurus/react-loadable@5.5.2", + "react-loadable-ssr-addon-v5-slorber": "^1.0.1", + "react-router": "^5.3.3", + "react-router-config": "^5.1.1", + "react-router-dom": "^5.3.3", + "rtl-detect": "^1.0.4", + "semver": "^7.3.7", + "serve-handler": "^6.1.3", + "shelljs": "^0.8.5", + "terser-webpack-plugin": "^5.3.3", + "tslib": "^2.4.0", + "update-notifier": "^5.1.0", + "url-loader": "^4.1.1", + "wait-on": "^6.0.1", + "webpack": "^5.73.0", + "webpack-bundle-analyzer": "^4.5.0", + "webpack-dev-server": "^4.9.3", + "webpack-merge": "^5.8.0", + "webpackbar": "^5.0.2" + }, + "dependencies": { + "commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==" + }, + "fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "requires": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + } + }, + "jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "requires": { + "graceful-fs": "^4.1.6", + "universalify": "^2.0.0" + } + }, + "universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==" + } + } + }, + "@docusaurus/cssnano-preset": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.1.tgz", + "integrity": "sha512-ka+vqXwtcW1NbXxWsh6yA1Ckii1klY9E53cJ4O9J09nkMBgrNX3iEFED1fWdv8wf4mJjvGi5RLZ2p9hJNjsLyQ==", + "requires": { + "cssnano-preset-advanced": "^5.3.8", + "postcss": "^8.4.14", + "postcss-sort-media-queries": "^4.2.1", + "tslib": "^2.4.0" + } + }, + "@docusaurus/logger": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.1.tgz", + "integrity": "sha512-5h5ysIIWYIDHyTVd8BjheZmQZmEgWDR54aQ1BX9pjFfpyzFo5puKXKYrYJXbjEHGyVhEzmB9UXwbxGfaZhOjcg==", + "requires": { + "chalk": "^4.1.2", + "tslib": "^2.4.0" + } + }, + "@docusaurus/mdx-loader": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.1.tgz", + "integrity": "sha512-4KhUhEavteIAmbBj7LVFnrVYDiU51H5YWW1zY6SmBSte/YLhDutztLTBE0PQl1Grux1jzUJeaSvAzHpTn6JJDQ==", + "requires": { + "@babel/parser": "^7.18.8", + "@babel/traverse": "^7.18.8", + "@docusaurus/logger": "2.4.1", + "@docusaurus/utils": "2.4.1", + "@mdx-js/mdx": "^1.6.22", + "escape-html": "^1.0.3", + "file-loader": "^6.2.0", + "fs-extra": "^10.1.0", + "image-size": "^1.0.1", + "mdast-util-to-string": "^2.0.0", + "remark-emoji": "^2.2.0", + "stringify-object": "^3.3.0", + "tslib": "^2.4.0", + "unified": "^9.2.2", + "unist-util-visit": "^2.0.3", + "url-loader": "^4.1.1", + "webpack": "^5.73.0" + }, + "dependencies": { + "fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "requires": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + } + }, + "jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "requires": { + "graceful-fs": "^4.1.6", + "universalify": "^2.0.0" + } + }, + "universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==" + } + } + }, + "@docusaurus/react-loadable": { + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "requires": { + "@types/react": "*", + "prop-types": "^15.6.2" + } + }, + "@docusaurus/types": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.1.tgz", + "integrity": "sha512-0R+cbhpMkhbRXX138UOc/2XZFF8hiZa6ooZAEEJFp5scytzCw4tC1gChMFXrpa3d2tYE6AX8IrOEpSonLmfQuQ==", + "requires": { + "@types/history": "^4.7.11", + "@types/react": "*", + "commander": "^5.1.0", + "joi": "^17.6.0", + "react-helmet-async": "^1.3.0", + "utility-types": "^3.10.0", + "webpack": "^5.73.0", + "webpack-merge": "^5.8.0" + }, + "dependencies": { + "commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==" + } + } + }, + "@docusaurus/utils": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.1.tgz", + "integrity": "sha512-1lvEZdAQhKNht9aPXPoh69eeKnV0/62ROhQeFKKxmzd0zkcuE/Oc5Gpnt00y/f5bIsmOsYMY7Pqfm/5rteT5GA==", + "requires": { + "@docusaurus/logger": "2.4.1", + "@svgr/webpack": "^6.2.1", + "escape-string-regexp": "^4.0.0", + "file-loader": "^6.2.0", + "fs-extra": "^10.1.0", + "github-slugger": "^1.4.0", + "globby": "^11.1.0", + "gray-matter": "^4.0.3", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "micromatch": "^4.0.5", + "resolve-pathname": "^3.0.0", + "shelljs": "^0.8.5", + "tslib": "^2.4.0", + "url-loader": "^4.1.1", + "webpack": "^5.73.0" + }, + "dependencies": { + "fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "requires": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + } + }, + "jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "requires": { + "graceful-fs": "^4.1.6", + "universalify": "^2.0.0" + } + }, + "universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==" + } + } + }, + "@docusaurus/utils-common": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.1.tgz", + "integrity": "sha512-bCVGdZU+z/qVcIiEQdyx0K13OC5mYwxhSuDUR95oFbKVuXYRrTVrwZIqQljuo1fyJvFTKHiL9L9skQOPokuFNQ==", + "requires": { + "tslib": "^2.4.0" + } + }, + "@docusaurus/utils-validation": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.1.tgz", + "integrity": "sha512-unII3hlJlDwZ3w8U+pMO3Lx3RhI4YEbY3YNsQj4yzrkZzlpqZOLuAiZK2JyULnD+TKbceKU0WyWkQXtYbLNDFA==", + "requires": { + "@docusaurus/logger": "2.4.1", + "@docusaurus/utils": "2.4.1", + "joi": "^17.6.0", + "js-yaml": "^4.1.0", + "tslib": "^2.4.0" + } + }, + "@h2oai/makersaurus": { + "version": "0.8.3", + "resolved": "https://npm.pkg.github.com/download/@h2oai/makersaurus/0.8.3/07b00e2f07fcf2d4798df2c1d305b6e5b3ff9006", + "integrity": "sha512-nvujC9gepvRxEgeFnpu9YQ3s+wmdp4+3BN9s7GuqSYweFCT5rdZSfdmQrg8HVc0x0ijhEuHggEYVKkLAVIWYiQ==", + "requires": { + "commander": "^9.4.1", + "handlebars": "^4.7.7", + "sync-directory": "^5.1.9", + "yup": "^0.32.11" + } + }, + "@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==" + }, + "@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "requires": { + "@hapi/hoek": "^9.0.0" + } + }, + "@jest/schemas": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.4.3.tgz", + "integrity": "sha512-VLYKXQmtmuEz6IxJsrZwzG9NvtkQsWNnWMsKxqWNu3+CnfzJQhp0WDDKWLVV9hLKr0l3SLLFRqcYHjhtyuDVxg==", + "requires": { + "@sinclair/typebox": "^0.25.16" + } + }, + "@jest/types": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.5.0.tgz", + "integrity": "sha512-qbu7kN6czmVRc3xWFQcAN03RAUamgppVUdXrvl1Wr3jlNF93o9mJbGcDWrwGB6ht44u7efB1qCFgVQmca24Uog==", + "requires": { + "@jest/schemas": "^29.4.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + } + }, + "@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "requires": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + } + }, + "@jridgewell/resolve-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", + "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==" + }, + "@jridgewell/set-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", + "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==" + }, + "@jridgewell/source-map": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.3.tgz", + "integrity": "sha512-b+fsZXeLYi9fEULmfBrhxn4IrPlINf8fiNarzTof004v3lFdntdwa9PF7vFJqm3mg7s+ScJMxXaE3Acp1irZcg==", + "requires": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + } + }, + "@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "@jridgewell/trace-mapping": { + "version": "0.3.18", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz", + "integrity": "sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==", + "requires": { + "@jridgewell/resolve-uri": "3.1.0", + "@jridgewell/sourcemap-codec": "1.4.14" + }, + "dependencies": { + "@jridgewell/sourcemap-codec": { + "version": "1.4.14", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", + "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==" + } + } + }, + "@leichtgewicht/ip-codec": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz", + "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==" + }, + "@mdx-js/mdx": { + "version": "1.6.22", + "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-1.6.22.tgz", + "integrity": "sha512-AMxuLxPz2j5/6TpF/XSdKpQP1NlG0z11dFOlq+2IP/lSgl11GY8ji6S/rgsViN/L0BDvHvUMruRb7ub+24LUYA==", + "requires": { + "@babel/core": "7.12.9", + "@babel/plugin-syntax-jsx": "7.12.1", + "@babel/plugin-syntax-object-rest-spread": "7.8.3", + "@mdx-js/util": "1.6.22", + "babel-plugin-apply-mdx-type-prop": "1.6.22", + "babel-plugin-extract-import-names": "1.6.22", + "camelcase-css": "2.0.1", + "detab": "2.0.4", + "hast-util-raw": "6.0.1", + "lodash.uniq": "4.5.0", + "mdast-util-to-hast": "10.0.1", + "remark-footnotes": "2.0.0", + "remark-mdx": "1.6.22", + "remark-parse": "8.0.3", + "remark-squeeze-paragraphs": "4.0.0", + "style-to-object": "0.3.0", + "unified": "9.2.0", + "unist-builder": "2.0.3", + "unist-util-visit": "2.0.3" + }, + "dependencies": { + "@babel/core": { + "version": "7.12.9", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz", + "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==", + "requires": { + "@babel/code-frame": "^7.10.4", + "@babel/generator": "^7.12.5", + "@babel/helper-module-transforms": "^7.12.1", + "@babel/helpers": "^7.12.5", + "@babel/parser": "^7.12.7", + "@babel/template": "^7.12.7", + "@babel/traverse": "^7.12.9", + "@babel/types": "^7.12.7", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.1", + "json5": "^2.1.2", + "lodash": "^4.17.19", + "resolve": "^1.3.2", + "semver": "^5.4.1", + "source-map": "^0.5.0" + } + }, + "@babel/plugin-syntax-jsx": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz", + "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==", + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" + } + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + }, + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==" + }, + "unified": { + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", + "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "requires": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + } + } + } + }, + "@mdx-js/util": { + "version": "1.6.22", + "resolved": "https://registry.npmjs.org/@mdx-js/util/-/util-1.6.22.tgz", + "integrity": "sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA==" + }, + "@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "requires": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + } + }, + "@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==" + }, + "@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "requires": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + } + }, + "@polka/url": { + "version": "1.0.0-next.21", + "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.21.tgz", + "integrity": "sha512-a5Sab1C4/icpTZVzZc5Ghpz88yQtGOyNqYXcZgOssB2uuAr+wF/MvN6bgtW32q7HHrvBki+BsZ0OuNv6EV3K9g==" + }, + "@sideway/address": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz", + "integrity": "sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==", + "requires": { + "@hapi/hoek": "^9.0.0" + } + }, + "@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" + }, + "@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" + }, + "@sinclair/typebox": { + "version": "0.25.24", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz", + "integrity": "sha512-XJfwUVUKDHF5ugKwIcxEgc9k8b7HbznCp6eUfWgu710hMPNIO4aw4/zB5RogDQz8nd6gyCDpU9O/m6qYEWY6yQ==" + }, + "@sindresorhus/is": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", + "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==" + }, + "@slorber/static-site-generator-webpack-plugin": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz", + "integrity": "sha512-Ug7x6z5lwrz0WqdnNFOMYrDQNTPAprvHLSh6+/fmml3qUiz6l5eq+2MzLKWtn/q5K5NpSiFsZTP/fck/3vjSxA==", + "requires": { + "eval": "^0.1.8", + "p-map": "^4.0.0", + "webpack-sources": "^3.2.2" + } + }, + "@svgr/babel-plugin-add-jsx-attribute": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", + "integrity": "sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==", + "requires": {} + }, + "@svgr/babel-plugin-remove-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", + "requires": {} + }, + "@svgr/babel-plugin-remove-jsx-empty-expression": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", + "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", + "requires": {} + }, + "@svgr/babel-plugin-replace-jsx-attribute-value": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz", + "integrity": "sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==", + "requires": {} + }, + "@svgr/babel-plugin-svg-dynamic-title": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz", + "integrity": "sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==", + "requires": {} + }, + "@svgr/babel-plugin-svg-em-dimensions": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz", + "integrity": "sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==", + "requires": {} + }, + "@svgr/babel-plugin-transform-react-native-svg": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz", + "integrity": "sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==", + "requires": {} + }, + "@svgr/babel-plugin-transform-svg-component": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz", + "integrity": "sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==", + "requires": {} + }, + "@svgr/babel-preset": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-6.5.1.tgz", + "integrity": "sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==", + "requires": { + "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1", + "@svgr/babel-plugin-remove-jsx-attribute": "*", + "@svgr/babel-plugin-remove-jsx-empty-expression": "*", + "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1", + "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1", + "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1", + "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1", + "@svgr/babel-plugin-transform-svg-component": "^6.5.1" + } + }, + "@svgr/core": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz", + "integrity": "sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==", + "requires": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "camelcase": "^6.2.0", + "cosmiconfig": "^7.0.1" + } + }, + "@svgr/hast-util-to-babel-ast": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz", + "integrity": "sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==", + "requires": { + "@babel/types": "^7.20.0", + "entities": "^4.4.0" + } + }, + "@svgr/plugin-jsx": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz", + "integrity": "sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==", + "requires": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/hast-util-to-babel-ast": "^6.5.1", + "svg-parser": "^2.0.4" + } + }, + "@svgr/plugin-svgo": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-6.5.1.tgz", + "integrity": "sha512-omvZKf8ixP9z6GWgwbtmP9qQMPX4ODXi+wzbVZgomNFsUIlHA1sf4fThdwTWSsZGgvGAG6yE+b/F5gWUkcZ/iQ==", + "requires": { + "cosmiconfig": "^7.0.1", + "deepmerge": "^4.2.2", + "svgo": "^2.8.0" + } + }, + "@svgr/webpack": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-6.5.1.tgz", + "integrity": "sha512-cQ/AsnBkXPkEK8cLbv4Dm7JGXq2XrumKnL1dRpJD9rIO2fTIlJI9a1uCciYG1F2aUsox/hJQyNGbt3soDxSRkA==", + "requires": { + "@babel/core": "^7.19.6", + "@babel/plugin-transform-react-constant-elements": "^7.18.12", + "@babel/preset-env": "^7.19.4", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.18.6", + "@svgr/core": "^6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "@svgr/plugin-svgo": "^6.5.1" + } + }, + "@szmarczak/http-timer": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", + "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", + "requires": { + "defer-to-connect": "^1.0.1" + } + }, + "@trysound/sax": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", + "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==" + }, + "@types/body-parser": { + "version": "1.19.2", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", + "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", + "requires": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "@types/bonjour": { + "version": "3.5.10", + "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.10.tgz", + "integrity": "sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==", + "requires": { + "@types/node": "*" + } + }, + "@types/connect": { + "version": "3.4.35", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", + "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==", + "requires": { + "@types/node": "*" + } + }, + "@types/connect-history-api-fallback": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.0.tgz", + "integrity": "sha512-4x5FkPpLipqwthjPsF7ZRbOv3uoLUFkTA9G9v583qi4pACvq0uTELrB8OLUzPWUI4IJIyvM85vzkV1nyiI2Lig==", + "requires": { + "@types/express-serve-static-core": "*", + "@types/node": "*" + } + }, + "@types/eslint": { + "version": "8.40.1", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.40.1.tgz", + "integrity": "sha512-vRb792M4mF1FBT+eoLecmkpLXwxsBHvWWRGJjzbYANBM6DtiJc6yETyv4rqDA6QNjF1pkj1U7LMA6dGb3VYlHw==", + "requires": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "@types/eslint-scope": { + "version": "3.7.4", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.4.tgz", + "integrity": "sha512-9K4zoImiZc3HlIp6AVUDE4CWYx22a+lhSZMYNpbjW04+YF0KWj4pJXnEMjdnFTiQibFFmElcsasJXDbdI/EPhA==", + "requires": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "@types/estree": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz", + "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==" + }, + "@types/express": { + "version": "4.17.17", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.17.tgz", + "integrity": "sha512-Q4FmmuLGBG58btUnfS1c1r/NQdlp3DMfGDGig8WhfpA2YRUtEkxAjkZb0yvplJGYdF1fsQ81iMDcH24sSCNC/Q==", + "requires": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "@types/express-serve-static-core": { + "version": "4.17.35", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.35.tgz", + "integrity": "sha512-wALWQwrgiB2AWTT91CB62b6Yt0sNHpznUXeZEcnPU3DRdlDIz74x8Qg1UUYKSVFi+va5vKOLYRBI1bRKiLLKIg==", + "requires": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "@types/hast": { + "version": "2.3.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.4.tgz", + "integrity": "sha512-wLEm0QvaoawEDoTRwzTXp4b4jpwiJDvR5KMnFnVodm3scufTlBOWRD6N1OBf9TZMhjlNsSfcO5V+7AF4+Vy+9g==", + "requires": { + "@types/unist": "*" + } + }, + "@types/history": { + "version": "4.7.11", + "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", + "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==" + }, + "@types/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==" + }, + "@types/http-proxy": { + "version": "1.17.11", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.11.tgz", + "integrity": "sha512-HC8G7c1WmaF2ekqpnFq626xd3Zz0uvaqFmBJNRZCGEZCXkvSdJoNFn/8Ygbd9fKNQj8UzLdCETaI0UWPAjK7IA==", + "requires": { + "@types/node": "*" + } + }, + "@types/istanbul-lib-coverage": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", + "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==" + }, + "@types/istanbul-lib-report": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", + "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==", + "requires": { + "@types/istanbul-lib-coverage": "*" + } + }, + "@types/istanbul-reports": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz", + "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==", + "requires": { + "@types/istanbul-lib-report": "*" + } + }, + "@types/json-schema": { + "version": "7.0.12", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.12.tgz", + "integrity": "sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==" + }, + "@types/lodash": { + "version": "4.14.194", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.194.tgz", + "integrity": "sha512-r22s9tAS7imvBt2lyHC9B8AGwWnXaYb1tY09oyLkXDs4vArpYJzw09nj8MLx5VfciBPGIb+ZwG0ssYnEPJxn/g==" + }, + "@types/mdast": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.11.tgz", + "integrity": "sha512-Y/uImid8aAwrEA24/1tcRZwpxX3pIFTSilcNDKSPn+Y2iDywSEachzRuvgAYYLR3wpGXAsMbv5lvKLDZLeYPAw==", + "requires": { + "@types/unist": "*" + } + }, + "@types/mime": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz", + "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==" + }, + "@types/node": { + "version": "20.2.5", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.2.5.tgz", + "integrity": "sha512-JJulVEQXmiY9Px5axXHeYGLSjhkZEnD+MDPDGbCbIAbMslkKwmygtZFy1X6s/075Yo94sf8GuSlFfPzysQrWZQ==" + }, + "@types/parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==" + }, + "@types/parse5": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/@types/parse5/-/parse5-5.0.3.tgz", + "integrity": "sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw==" + }, + "@types/prop-types": { + "version": "15.7.5", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", + "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==" + }, + "@types/qs": { + "version": "6.9.7", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz", + "integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==" + }, + "@types/range-parser": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", + "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==" + }, + "@types/react": { + "version": "18.2.9", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.9.tgz", + "integrity": "sha512-pL3JAesUkF7PEQGxh5XOwdXGV907te6m1/Qe1ERJLgomojS6Ne790QiA7GUl434JEkFA2aAaB6qJ5z4e1zJn/w==", + "requires": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" + }, + "@types/scheduler": { + "version": "0.16.3", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", + "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==" + }, + "@types/send": { + "version": "0.17.1", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.1.tgz", + "integrity": "sha512-Cwo8LE/0rnvX7kIIa3QHCkcuF21c05Ayb0ZfxPiv0W8VRiZiNW/WuRupHKpqqGVGf7SUA44QSOUKaEd9lIrd/Q==", + "requires": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "@types/serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==", + "requires": { + "@types/express": "*" + } + }, + "@types/serve-static": { + "version": "1.15.1", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.1.tgz", + "integrity": "sha512-NUo5XNiAdULrJENtJXZZ3fHtfMolzZwczzBbnAeBbqBwG+LaG6YaJtuwzwGSQZ2wsCrxjEhNNjAkKigy3n8teQ==", + "requires": { + "@types/mime": "*", + "@types/node": "*" + } + }, + "@types/sockjs": { + "version": "0.3.33", + "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.33.tgz", + "integrity": "sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==", + "requires": { + "@types/node": "*" + } + }, + "@types/unist": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.6.tgz", + "integrity": "sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ==" + }, + "@types/ws": { + "version": "8.5.5", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.5.tgz", + "integrity": "sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg==", + "requires": { + "@types/node": "*" + } + }, + "@types/yargs": { + "version": "17.0.24", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz", + "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==", + "requires": { + "@types/yargs-parser": "*" + } + }, + "@types/yargs-parser": { + "version": "21.0.0", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz", + "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==" + }, + "@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "requires": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", + "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==" + }, + "@webassemblyjs/helper-api-error": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", + "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==" + }, + "@webassemblyjs/helper-buffer": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.6.tgz", + "integrity": "sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==" + }, + "@webassemblyjs/helper-numbers": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", + "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "requires": { + "@webassemblyjs/floating-point-hex-parser": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", + "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==" + }, + "@webassemblyjs/helper-wasm-section": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.6.tgz", + "integrity": "sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==", + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6" + } + }, + "@webassemblyjs/ieee754": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", + "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "requires": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "@webassemblyjs/leb128": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", + "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "requires": { + "@xtuc/long": "4.2.2" + } + }, + "@webassemblyjs/utf8": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", + "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==" + }, + "@webassemblyjs/wasm-edit": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.6.tgz", + "integrity": "sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==", + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/helper-wasm-section": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-opt": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6", + "@webassemblyjs/wast-printer": "1.11.6" + } + }, + "@webassemblyjs/wasm-gen": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.6.tgz", + "integrity": "sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==", + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "@webassemblyjs/wasm-opt": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.6.tgz", + "integrity": "sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==", + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6" + } + }, + "@webassemblyjs/wasm-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz", + "integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==", + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "@webassemblyjs/wast-printer": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.6.tgz", + "integrity": "sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==", + "requires": { + "@webassemblyjs/ast": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" + }, + "@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" + }, + "accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "requires": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "dependencies": { + "mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" + }, + "mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "requires": { + "mime-db": "1.52.0" + } + } + } + }, + "acorn": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", + "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==" + }, + "acorn-import-assertions": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", + "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", + "requires": {} + }, + "acorn-walk": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", + "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==" + }, + "address": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz", + "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==" + }, + "aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "requires": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + } + }, + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "requires": { + "ajv": "^8.0.0" + }, + "dependencies": { + "ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "requires": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, + "json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + } + } + }, + "ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "requires": {} + }, + "ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "requires": { + "string-width": "^4.1.0" + }, + "dependencies": { + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + } + } + }, + "ansi-html-community": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", + "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==" + }, + "ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" + }, + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "requires": { + "color-convert": "^2.0.1" + } + }, + "anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "requires": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + } + }, + "argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "array-flatten": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", + "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==" + }, + "array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==" + }, + "at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==" + }, + "autoprefixer": { + "version": "10.4.14", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.14.tgz", + "integrity": "sha512-FQzyfOsTlwVzjHxKEqRIAdJx9niO6VCBCoEwax/VLSoQF29ggECcPuBqUMZ+u8jCZOPSy8b8/8KnuFbp0SaFZQ==", + "requires": { + "browserslist": "^4.21.5", + "caniuse-lite": "^1.0.30001464", + "fraction.js": "^4.2.0", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.0", + "postcss-value-parser": "^4.2.0" + } + }, + "axios": { + "version": "0.25.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.25.0.tgz", + "integrity": "sha512-cD8FOb0tRH3uuEe6+evtAbgJtfxr7ly3fQjYcMcuPlgkwVS9xboaVIpcDV+cYQe+yGykgwZCs1pzjntcGa6l5g==", + "requires": { + "follow-redirects": "^1.14.7" + } + }, + "babel-loader": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.3.0.tgz", + "integrity": "sha512-H8SvsMF+m9t15HNLMipppzkC+Y2Yq+v3SonZyU70RBL/h1gxPkH08Ot8pEE9Z4Kd+czyWJClmFS8qzIP9OZ04Q==", + "requires": { + "find-cache-dir": "^3.3.1", + "loader-utils": "^2.0.0", + "make-dir": "^3.1.0", + "schema-utils": "^2.6.5" + } + }, + "babel-plugin-apply-mdx-type-prop": { + "version": "1.6.22", + "resolved": "https://registry.npmjs.org/babel-plugin-apply-mdx-type-prop/-/babel-plugin-apply-mdx-type-prop-1.6.22.tgz", + "integrity": "sha512-VefL+8o+F/DfK24lPZMtJctrCVOfgbqLAGZSkxwhazQv4VxPg3Za/i40fu22KR2m8eEda+IfSOlPLUSIiLcnCQ==", + "requires": { + "@babel/helper-plugin-utils": "7.10.4", + "@mdx-js/util": "1.6.22" + }, + "dependencies": { + "@babel/helper-plugin-utils": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", + "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" + } + } + }, + "babel-plugin-dynamic-import-node": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", + "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", + "requires": { + "object.assign": "^4.1.0" + } + }, + "babel-plugin-extract-import-names": { + "version": "1.6.22", + "resolved": "https://registry.npmjs.org/babel-plugin-extract-import-names/-/babel-plugin-extract-import-names-1.6.22.tgz", + "integrity": "sha512-yJ9BsJaISua7d8zNT7oRG1ZLBJCIdZ4PZqmH8qa9N5AK01ifk3fnkc98AXhtzE7UkfCsEumvoQWgoYLhOnJ7jQ==", + "requires": { + "@babel/helper-plugin-utils": "7.10.4" + }, + "dependencies": { + "@babel/helper-plugin-utils": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", + "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" + } + } + }, + "babel-plugin-polyfill-corejs2": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.3.tgz", + "integrity": "sha512-bM3gHc337Dta490gg+/AseNB9L4YLHxq1nGKZZSHbhXv4aTYU2MD2cjza1Ru4S6975YLTaL1K8uJf6ukJhhmtw==", + "requires": { + "@babel/compat-data": "^7.17.7", + "@babel/helper-define-polyfill-provider": "^0.4.0", + "semver": "^6.1.1" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + } + } + }, + "babel-plugin-polyfill-corejs3": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.1.tgz", + "integrity": "sha512-ikFrZITKg1xH6pLND8zT14UPgjKHiGLqex7rGEZCH2EvhsneJaJPemmpQaIZV5AL03II+lXylw3UmddDK8RU5Q==", + "requires": { + "@babel/helper-define-polyfill-provider": "^0.4.0", + "core-js-compat": "^3.30.1" + } + }, + "babel-plugin-polyfill-regenerator": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.0.tgz", + "integrity": "sha512-hDJtKjMLVa7Z+LwnTCxoDLQj6wdc+B8dun7ayF2fYieI6OzfuvcLMB32ihJZ4UhCBwNYGl5bg/x/P9cMdnkc2g==", + "requires": { + "@babel/helper-define-polyfill-provider": "^0.4.0" + } + }, + "bail": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz", + "integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==" + }, + "balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "batch": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", + "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==" + }, + "big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==" + }, + "binary-extensions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", + "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==" + }, + "body-parser": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", + "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "requires": { + "bytes": "3.1.2", + "content-type": "~1.0.4", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.1", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "dependencies": { + "bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==" + }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + } + } + }, + "bonjour-service": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.1.1.tgz", + "integrity": "sha512-Z/5lQRMOG9k7W+FkeGTNjh7htqn/2LMnfOvBZ8pynNZCM9MwkQkI3zeI4oz09uWdcgmgHugVvBqxGg4VQJ5PCg==", + "requires": { + "array-flatten": "^2.1.2", + "dns-equal": "^1.0.0", + "fast-deep-equal": "^3.1.3", + "multicast-dns": "^7.2.5" + } + }, + "boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==" + }, + "boxen": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", + "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", + "requires": { + "ansi-align": "^3.0.1", + "camelcase": "^6.2.0", + "chalk": "^4.1.2", + "cli-boxes": "^3.0.0", + "string-width": "^5.0.1", + "type-fest": "^2.5.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.0.1" + } + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "requires": { + "fill-range": "^7.0.1" + } + }, + "browserslist": { + "version": "4.21.7", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.7.tgz", + "integrity": "sha512-BauCXrQ7I2ftSqd2mvKHGo85XR0u7Ru3C/Hxsy/0TkfCtjrmAbPdzLGasmoiBxplpDXlPvdjX9u7srIMfgasNA==", + "requires": { + "caniuse-lite": "^1.0.30001489", + "electron-to-chromium": "^1.4.411", + "node-releases": "^2.0.12", + "update-browserslist-db": "^1.0.11" + } + }, + "buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + }, + "bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==" + }, + "cacheable-request": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", + "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", + "requires": { + "clone-response": "^1.0.2", + "get-stream": "^5.1.0", + "http-cache-semantics": "^4.0.0", + "keyv": "^3.0.0", + "lowercase-keys": "^2.0.0", + "normalize-url": "^4.1.0", + "responselike": "^1.0.2" + }, + "dependencies": { + "get-stream": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "requires": { + "pump": "^3.0.0" + } + }, + "lowercase-keys": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", + "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==" + }, + "normalize-url": { + "version": "4.5.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz", + "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==" + } + } + }, + "call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "requires": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + } + }, + "call-me-maybe": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.2.tgz", + "integrity": "sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ==" + }, + "callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==" + }, + "camel-case": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", + "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", + "requires": { + "pascal-case": "^3.1.2", + "tslib": "^2.0.3" + } + }, + "camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==" + }, + "camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==" + }, + "caniuse-api": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", + "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "requires": { + "browserslist": "^4.0.0", + "caniuse-lite": "^1.0.0", + "lodash.memoize": "^4.1.2", + "lodash.uniq": "^4.5.0" + } + }, + "caniuse-lite": { + "version": "1.0.30001497", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001497.tgz", + "integrity": "sha512-I4/duVK4wL6rAK/aKZl3HXB4g+lIZvaT4VLAn2rCgJ38jVLb0lv2Xug6QuqmxXFVRJMF74SPPWPJ/1Sdm3vCzw==" + }, + "ccount": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-1.1.0.tgz", + "integrity": "sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg==" + }, + "chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "character-entities": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", + "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==" + }, + "character-entities-legacy": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", + "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==" + }, + "character-reference-invalid": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", + "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==" + }, + "chokidar": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", + "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "requires": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "fsevents": "~2.3.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + } + }, + "chrome-trace-event": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", + "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==" + }, + "ci-info": { + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", + "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==" + }, + "clean-css": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.2.tgz", + "integrity": "sha512-JVJbM+f3d3Q704rF4bqQ5UUyTtuJ0JRKNbTKVEeujCCBoMdkEi+V+e8oktO9qGQNSvHrFTM6JZRXrUvGR1czww==", + "requires": { + "source-map": "~0.6.0" + } + }, + "clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==" + }, + "cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==" + }, + "cli-table3": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.3.tgz", + "integrity": "sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg==", + "requires": { + "@colors/colors": "1.5.0", + "string-width": "^4.2.0" + }, + "dependencies": { + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + } + } + }, + "clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "requires": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + } + }, + "clone-response": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.3.tgz", + "integrity": "sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA==", + "requires": { + "mimic-response": "^1.0.0" + } + }, + "collapse-white-space": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-1.0.6.tgz", + "integrity": "sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ==" + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "colord": { + "version": "2.9.3", + "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==" + }, + "colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" + }, + "combine-promises": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.1.0.tgz", + "integrity": "sha512-ZI9jvcLDxqwaXEixOhArm3r7ReIivsXkpbyEWyeOhzz1QS0iSgBPnWvEqvIQtYyamGCYA88gFhmUrs9hrrQ0pg==" + }, + "comma-separated-tokens": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz", + "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==" + }, + "commander": { + "version": "9.5.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz", + "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==" + }, + "commondir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", + "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==" + }, + "compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "requires": { + "mime-db": ">= 1.43.0 < 2" + }, + "dependencies": { + "mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" + } + } + }, + "compression": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", + "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", + "requires": { + "accepts": "~1.3.5", + "bytes": "3.0.0", + "compressible": "~2.0.16", + "debug": "2.6.9", + "on-headers": "~1.0.2", + "safe-buffer": "5.1.2", + "vary": "~1.1.2" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + } + } + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "configstore": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz", + "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==", + "requires": { + "dot-prop": "^5.2.0", + "graceful-fs": "^4.1.2", + "make-dir": "^3.0.0", + "unique-string": "^2.0.0", + "write-file-atomic": "^3.0.0", + "xdg-basedir": "^4.0.0" + } + }, + "connect-history-api-fallback": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", + "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==" + }, + "consola": { + "version": "2.15.3", + "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", + "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" + }, + "content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==" + }, + "content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==" + }, + "convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==" + }, + "cookie": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", + "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==" + }, + "cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + }, + "copy-webpack-plugin": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", + "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", + "requires": { + "fast-glob": "^3.2.11", + "glob-parent": "^6.0.1", + "globby": "^13.1.1", + "normalize-path": "^3.0.0", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0" + }, + "dependencies": { + "ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "requires": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, + "ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "requires": { + "fast-deep-equal": "^3.1.3" + } + }, + "glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "requires": { + "is-glob": "^4.0.3" + } + }, + "globby": { + "version": "13.1.4", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.1.4.tgz", + "integrity": "sha512-iui/IiiW+QrJ1X1hKH5qwlMQyv34wJAYwH1vrf8b9kBA4sNiif3gKsMHa+BrdnOpEudWjpotfa7LrTzB1ERS/g==", + "requires": { + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.11", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^4.0.0" + } + }, + "json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "schema-utils": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz", + "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==", + "requires": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + } + }, + "slash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==" + } + } + }, + "core-js": { + "version": "3.30.2", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.30.2.tgz", + "integrity": "sha512-uBJiDmwqsbJCWHAwjrx3cvjbMXP7xD72Dmsn5LOJpiRmE3WbBbN5rCqQ2Qh6Ek6/eOrjlWngEynBWo4VxerQhg==" + }, + "core-js-compat": { + "version": "3.30.2", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.30.2.tgz", + "integrity": "sha512-nriW1nuJjUgvkEjIot1Spwakz52V9YkYHZAQG6A1eCgC8AA1p0zngrQEP9R0+V6hji5XilWKG1Bd0YRppmGimA==", + "requires": { + "browserslist": "^4.21.5" + } + }, + "core-js-pure": { + "version": "3.30.2", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.30.2.tgz", + "integrity": "sha512-p/npFUJXXBkCCTIlEGBdghofn00jWG6ZOtdoIXSJmAu2QBvN0IqpZXWweOytcwE6cfx8ZvVUy1vw8zxhe4Y2vg==" + }, + "core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "cosmiconfig": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", + "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "requires": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + } + }, + "cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + } + }, + "crypto-random-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz", + "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==" + }, + "css-declaration-sorter": { + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.4.0.tgz", + "integrity": "sha512-jDfsatwWMWN0MODAFuHszfjphEXfNw9JUAhmY4pLu3TyTU+ohUpsbVtbU+1MZn4a47D9kqh03i4eyOm+74+zew==", + "requires": {} + }, + "css-loader": { + "version": "6.8.1", + "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.8.1.tgz", + "integrity": "sha512-xDAXtEVGlD0gJ07iclwWVkLoZOpEvAWaSyf6W18S2pOC//K8+qUDIx8IIT3D+HjnmkJPQeesOPv5aiUaJsCM2g==", + "requires": { + "icss-utils": "^5.1.0", + "postcss": "^8.4.21", + "postcss-modules-extract-imports": "^3.0.0", + "postcss-modules-local-by-default": "^4.0.3", + "postcss-modules-scope": "^3.0.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.3.8" + } + }, + "css-minimizer-webpack-plugin": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.2.2.tgz", + "integrity": "sha512-s3Of/4jKfw1Hj9CxEO1E5oXhQAxlayuHO2y/ML+C6I9sQ7FdzfEV6QgMLN3vI+qFsjJGIAFLKtQK7t8BOXAIyA==", + "requires": { + "cssnano": "^5.1.8", + "jest-worker": "^29.1.2", + "postcss": "^8.4.17", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0", + "source-map": "^0.6.1" + }, + "dependencies": { + "ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "requires": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, + "ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "requires": { + "fast-deep-equal": "^3.1.3" + } + }, + "json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "schema-utils": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz", + "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==", + "requires": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + } + } + } + }, + "css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "requires": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + } + }, + "css-tree": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz", + "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", + "requires": { + "mdn-data": "2.0.14", + "source-map": "^0.6.1" + } + }, + "css-what": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", + "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==" + }, + "cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==" + }, + "cssnano": { + "version": "5.1.15", + "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-5.1.15.tgz", + "integrity": "sha512-j+BKgDcLDQA+eDifLx0EO4XSA56b7uut3BQFH+wbSaSTuGLuiyTa/wbRYthUXX8LC9mLg+WWKe8h+qJuwTAbHw==", + "requires": { + "cssnano-preset-default": "^5.2.14", + "lilconfig": "^2.0.3", + "yaml": "^1.10.2" + } + }, + "cssnano-preset-advanced": { + "version": "5.3.10", + "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.10.tgz", + "integrity": "sha512-fnYJyCS9jgMU+cmHO1rPSPf9axbQyD7iUhLO5Df6O4G+fKIOMps+ZbU0PdGFejFBBZ3Pftf18fn1eG7MAPUSWQ==", + "requires": { + "autoprefixer": "^10.4.12", + "cssnano-preset-default": "^5.2.14", + "postcss-discard-unused": "^5.1.0", + "postcss-merge-idents": "^5.1.1", + "postcss-reduce-idents": "^5.2.0", + "postcss-zindex": "^5.1.0" + } + }, + "cssnano-preset-default": { + "version": "5.2.14", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz", + "integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==", + "requires": { + "css-declaration-sorter": "^6.3.1", + "cssnano-utils": "^3.1.0", + "postcss-calc": "^8.2.3", + "postcss-colormin": "^5.3.1", + "postcss-convert-values": "^5.1.3", + "postcss-discard-comments": "^5.1.2", + "postcss-discard-duplicates": "^5.1.0", + "postcss-discard-empty": "^5.1.1", + "postcss-discard-overridden": "^5.1.0", + "postcss-merge-longhand": "^5.1.7", + "postcss-merge-rules": "^5.1.4", + "postcss-minify-font-values": "^5.1.0", + "postcss-minify-gradients": "^5.1.1", + "postcss-minify-params": "^5.1.4", + "postcss-minify-selectors": "^5.2.1", + "postcss-normalize-charset": "^5.1.0", + "postcss-normalize-display-values": "^5.1.0", + "postcss-normalize-positions": "^5.1.1", + "postcss-normalize-repeat-style": "^5.1.1", + "postcss-normalize-string": "^5.1.0", + "postcss-normalize-timing-functions": "^5.1.0", + "postcss-normalize-unicode": "^5.1.1", + "postcss-normalize-url": "^5.1.0", + "postcss-normalize-whitespace": "^5.1.1", + "postcss-ordered-values": "^5.1.3", + "postcss-reduce-initial": "^5.1.2", + "postcss-reduce-transforms": "^5.1.0", + "postcss-svgo": "^5.1.0", + "postcss-unique-selectors": "^5.1.1" + } + }, + "cssnano-utils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-3.1.0.tgz", + "integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==", + "requires": {} + }, + "csso": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", + "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", + "requires": { + "css-tree": "^1.1.2" + } + }, + "csstype": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", + "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==" + }, + "debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "requires": { + "ms": "2.1.2" + } + }, + "decompress-response": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", + "integrity": "sha512-BzRPQuY1ip+qDonAOz42gRm/pg9F768C+npV/4JOsxRC2sq+Rlk+Q4ZCAsOhnIaMrgarILY+RMUIvMmmX1qAEA==", + "requires": { + "mimic-response": "^1.0.0" + } + }, + "deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==" + }, + "deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==" + }, + "default-gateway": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", + "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "requires": { + "execa": "^5.0.0" + } + }, + "defer-to-connect": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", + "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==" + }, + "define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==" + }, + "define-properties": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.0.tgz", + "integrity": "sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA==", + "requires": { + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + } + }, + "del": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", + "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", + "requires": { + "globby": "^11.0.1", + "graceful-fs": "^4.2.4", + "is-glob": "^4.0.1", + "is-path-cwd": "^2.2.0", + "is-path-inside": "^3.0.2", + "p-map": "^4.0.0", + "rimraf": "^3.0.2", + "slash": "^3.0.0" + } + }, + "depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==" + }, + "destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==" + }, + "detab": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/detab/-/detab-2.0.4.tgz", + "integrity": "sha512-8zdsQA5bIkoRECvCrNKPla84lyoR7DSAyf7p0YgXzBO9PDJx8KntPUay7NS6yp+KdxdVtiE5SpHKtbp2ZQyA9g==", + "requires": { + "repeat-string": "^1.5.4" + } + }, + "detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" + }, + "detect-port": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.5.1.tgz", + "integrity": "sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==", + "requires": { + "address": "^1.0.1", + "debug": "4" + } + }, + "detect-port-alt": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", + "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", + "requires": { + "address": "^1.0.1", + "debug": "^2.6.0" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + } + } + }, + "dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "requires": { + "path-type": "^4.0.0" + } + }, + "dns-equal": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", + "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==" + }, + "dns-packet": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.0.tgz", + "integrity": "sha512-rza3UH1LwdHh9qyPXp8lkwpjSNk/AMD3dPytUoRoqnypDUhY0xvbdmVhWOfxO68frEfV9BU8V12Ez7ZsHGZpCQ==", + "requires": { + "@leichtgewicht/ip-codec": "^2.0.1" + } + }, + "docusaurus-plugin-includes": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/docusaurus-plugin-includes/-/docusaurus-plugin-includes-1.1.4.tgz", + "integrity": "sha512-4L7Eqker4xh1dyWZoz2Isz6JQTg8CWZvvSQyX2IHpEPjwovvD5DpEHHRlSk7gJLQNasWPP9DTHTd0fxFZ6jl2g==", + "requires": { + "@docusaurus/core": "^2.0.0-beta.5", + "@docusaurus/types": "^2.0.0-beta.5", + "@docusaurus/utils": "^2.0.0-beta.5", + "fs-extra": "^10.0.0", + "path": "^0.12.7" + }, + "dependencies": { + "fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "requires": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + } + }, + "jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "requires": { + "graceful-fs": "^4.1.6", + "universalify": "^2.0.0" + } + }, + "universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==" + } + } + }, + "dom-converter": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", + "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "requires": { + "utila": "~0.4" + } + }, + "dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "requires": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "dependencies": { + "entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==" + } + } + }, + "domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==" + }, + "domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "requires": { + "domelementtype": "^2.2.0" + } + }, + "domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "requires": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + } + }, + "dot-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", + "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "requires": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "dot-prop": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", + "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", + "requires": { + "is-obj": "^2.0.0" + }, + "dependencies": { + "is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==" + } + } + }, + "duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" + }, + "duplexer3": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.5.tgz", + "integrity": "sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA==" + }, + "eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" + }, + "ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "electron-to-chromium": { + "version": "1.4.425", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.425.tgz", + "integrity": "sha512-wv1NufHxu11zfDbY4fglYQApMswleE9FL/DSeyOyauVXDZ+Kco96JK/tPfBUaDqfRarYp2WH2hJ/5UnVywp9Jg==" + }, + "emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + }, + "emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==" + }, + "emoticon": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-3.2.0.tgz", + "integrity": "sha512-SNujglcLTTg+lDAcApPNgEdudaqQFiAbJCqzjNxJkvN9vAwCGi0uu8IUVvx+f16h+V44KCY6Y2yboroc9pilHg==" + }, + "encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==" + }, + "end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "requires": { + "once": "^1.4.0" + } + }, + "enhanced-resolve": { + "version": "5.14.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.14.1.tgz", + "integrity": "sha512-Vklwq2vDKtl0y/vtwjSesgJ5MYS7Etuk5txS8VdKL4AOS1aUlD96zqIfsOSLQsdv3xgMRbtkWM8eG9XDfKUPow==", + "requires": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + } + }, + "entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==" + }, + "error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "requires": { + "is-arrayish": "^0.2.1" + } + }, + "es-module-lexer": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.2.1.tgz", + "integrity": "sha512-9978wrXM50Y4rTMmW5kXIC09ZdXQZqkE4mxhwkd8VbzsGkXGPgV4zWuqQJgCEzYngdo2dYDa0l8xhX4fkSwJSg==" + }, + "es6-promise": { + "version": "4.2.8", + "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", + "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" + }, + "escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" + }, + "escape-goat": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", + "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==" + }, + "escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==" + }, + "eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "requires": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + } + }, + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" + }, + "esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "requires": { + "estraverse": "^5.2.0" + }, + "dependencies": { + "estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==" + } + } + }, + "estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==" + }, + "esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==" + }, + "eta": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", + "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==" + }, + "etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==" + }, + "eval": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", + "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", + "requires": { + "@types/node": "*", + "require-like": ">= 0.1.1" + } + }, + "eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + }, + "events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==" + }, + "execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "requires": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "dependencies": { + "get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==" + } + } + }, + "express": { + "version": "4.18.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", + "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "requires": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.1", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.5.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.2.0", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "dependencies": { + "array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "requires": { + "safe-buffer": "5.2.1" + } + }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + }, + "range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==" + } + } + }, + "extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "requires": { + "is-extendable": "^0.1.0" + } + }, + "fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "fast-glob": { + "version": "3.2.12", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", + "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "requires": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + } + }, + "fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, + "fast-url-parser": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", + "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==", + "requires": { + "punycode": "^1.3.2" + } + }, + "fastq": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", + "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "requires": { + "reusify": "^1.0.4" + } + }, + "faye-websocket": { + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", + "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "requires": { + "websocket-driver": ">=0.5.1" + } + }, + "file-loader": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", + "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", + "requires": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "dependencies": { + "schema-utils": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz", + "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==", + "requires": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + } + } + } + }, + "filesize": { + "version": "8.0.7", + "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", + "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==" + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "finalhandler": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", + "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "requires": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + } + } + }, + "find-cache-dir": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz", + "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==", + "requires": { + "commondir": "^1.0.1", + "make-dir": "^3.0.2", + "pkg-dir": "^4.1.0" + } + }, + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "follow-redirects": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz", + "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==" + }, + "fork-ts-checker-webpack-plugin": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", + "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==", + "requires": { + "@babel/code-frame": "^7.8.3", + "@types/json-schema": "^7.0.5", + "chalk": "^4.1.0", + "chokidar": "^3.4.2", + "cosmiconfig": "^6.0.0", + "deepmerge": "^4.2.2", + "fs-extra": "^9.0.0", + "glob": "^7.1.6", + "memfs": "^3.1.2", + "minimatch": "^3.0.4", + "schema-utils": "2.7.0", + "semver": "^7.3.2", + "tapable": "^1.0.0" + }, + "dependencies": { + "cosmiconfig": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", + "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", + "requires": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.7.2" + } + }, + "fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "requires": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + } + }, + "jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "requires": { + "graceful-fs": "^4.1.6", + "universalify": "^2.0.0" + } + }, + "schema-utils": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", + "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", + "requires": { + "@types/json-schema": "^7.0.4", + "ajv": "^6.12.2", + "ajv-keywords": "^3.4.1" + } + }, + "tapable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", + "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==" + }, + "universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==" + } + } + }, + "forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==" + }, + "fraction.js": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz", + "integrity": "sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA==" + }, + "fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==" + }, + "fs-extra": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", + "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", + "requires": { + "graceful-fs": "^4.1.2", + "jsonfile": "^4.0.0", + "universalify": "^0.1.0" + } + }, + "fs-monkey": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.4.tgz", + "integrity": "sha512-INM/fWAxMICjttnD0DX1rBvinKskj5G1w+oy/pnm9u/tSlnBrzFonJMcalKJ30P8RRsPzKcCG7Q8l0jx5Fh9YQ==" + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + }, + "fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "optional": true + }, + "function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + }, + "gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==" + }, + "get-intrinsic": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz", + "integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==", + "requires": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3" + } + }, + "get-own-enumerable-property-symbols": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", + "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" + }, + "get-stream": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", + "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", + "requires": { + "pump": "^3.0.0" + } + }, + "github-slugger": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" + }, + "glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "requires": { + "is-glob": "^4.0.1" + } + }, + "glob-to-regexp": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz", + "integrity": "sha512-Iozmtbqv0noj0uDDqoL0zNq0VBEfK2YFoMAZoxJe4cwphvLR+JskfF30QhXHOR4m3KrE6NLRYw+U9MRXvifyig==" + }, + "global-dirs": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", + "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", + "requires": { + "ini": "2.0.0" + }, + "dependencies": { + "ini": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==" + } + } + }, + "global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "requires": { + "global-prefix": "^3.0.0" + } + }, + "global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "requires": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + }, + "dependencies": { + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==" + }, + "globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "requires": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + } + }, + "got": { + "version": "9.6.0", + "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", + "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", + "requires": { + "@sindresorhus/is": "^0.14.0", + "@szmarczak/http-timer": "^1.1.2", + "cacheable-request": "^6.0.0", + "decompress-response": "^3.3.0", + "duplexer3": "^0.1.4", + "get-stream": "^4.1.0", + "lowercase-keys": "^1.0.1", + "mimic-response": "^1.0.1", + "p-cancelable": "^1.0.0", + "to-readable-stream": "^1.0.0", + "url-parse-lax": "^3.0.0" + } + }, + "graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, + "gray-matter": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", + "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", + "requires": { + "js-yaml": "^3.13.1", + "kind-of": "^6.0.2", + "section-matter": "^1.0.0", + "strip-bom-string": "^1.0.0" + }, + "dependencies": { + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + } + } + }, + "gzip-size": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", + "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "requires": { + "duplexer": "^0.1.2" + } + }, + "handle-thing": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", + "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" + }, + "handlebars": { + "version": "4.7.7", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.7.tgz", + "integrity": "sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA==", + "requires": { + "minimist": "^1.2.5", + "neo-async": "^2.6.0", + "source-map": "^0.6.1", + "uglify-js": "^3.1.4", + "wordwrap": "^1.0.0" + } + }, + "has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "requires": { + "function-bind": "^1.1.1" + } + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" + }, + "has-property-descriptors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", + "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", + "requires": { + "get-intrinsic": "^1.1.1" + } + }, + "has-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", + "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==" + }, + "has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==" + }, + "has-yarn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz", + "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==" + }, + "hast-to-hyperscript": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz", + "integrity": "sha512-zQgLKqF+O2F72S1aa4y2ivxzSlko3MAvxkwG8ehGmNiqd98BIN3JM1rAJPmplEyLmGLO2QZYJtIneOSZ2YbJuA==", + "requires": { + "@types/unist": "^2.0.3", + "comma-separated-tokens": "^1.0.0", + "property-information": "^5.3.0", + "space-separated-tokens": "^1.0.0", + "style-to-object": "^0.3.0", + "unist-util-is": "^4.0.0", + "web-namespaces": "^1.0.0" + } + }, + "hast-util-from-parse5": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-6.0.1.tgz", + "integrity": "sha512-jeJUWiN5pSxW12Rh01smtVkZgZr33wBokLzKLwinYOUfSzm1Nl/c3GUGebDyOKjdsRgMvoVbV0VpAcpjF4NrJA==", + "requires": { + "@types/parse5": "^5.0.0", + "hastscript": "^6.0.0", + "property-information": "^5.0.0", + "vfile": "^4.0.0", + "vfile-location": "^3.2.0", + "web-namespaces": "^1.0.0" + } + }, + "hast-util-parse-selector": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz", + "integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==" + }, + "hast-util-raw": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-6.0.1.tgz", + "integrity": "sha512-ZMuiYA+UF7BXBtsTBNcLBF5HzXzkyE6MLzJnL605LKE8GJylNjGc4jjxazAHUtcwT5/CEt6afRKViYB4X66dig==", + "requires": { + "@types/hast": "^2.0.0", + "hast-util-from-parse5": "^6.0.0", + "hast-util-to-parse5": "^6.0.0", + "html-void-elements": "^1.0.0", + "parse5": "^6.0.0", + "unist-util-position": "^3.0.0", + "vfile": "^4.0.0", + "web-namespaces": "^1.0.0", + "xtend": "^4.0.0", + "zwitch": "^1.0.0" + } + }, + "hast-util-to-parse5": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-6.0.0.tgz", + "integrity": "sha512-Lu5m6Lgm/fWuz8eWnrKezHtVY83JeRGaNQ2kn9aJgqaxvVkFCZQBEhgodZUDUvoodgyROHDb3r5IxAEdl6suJQ==", + "requires": { + "hast-to-hyperscript": "^9.0.0", + "property-information": "^5.0.0", + "web-namespaces": "^1.0.0", + "xtend": "^4.0.0", + "zwitch": "^1.0.0" + } + }, + "hastscript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz", + "integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==", + "requires": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^1.0.0", + "hast-util-parse-selector": "^2.0.0", + "property-information": "^5.0.0", + "space-separated-tokens": "^1.0.0" + } + }, + "he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==" + }, + "history": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", + "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", + "requires": { + "@babel/runtime": "^7.1.2", + "loose-envify": "^1.2.0", + "resolve-pathname": "^3.0.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0", + "value-equal": "^1.0.1" + } + }, + "hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "requires": { + "react-is": "^16.7.0" + } + }, + "hpack.js": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", + "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", + "requires": { + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" + }, + "dependencies": { + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "requires": { + "safe-buffer": "~5.1.0" + } + } + } + }, + "html-entities": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.3.5.tgz", + "integrity": "sha512-72TJlcMkYsEJASa/3HnX7VT59htM7iSHbH59NSZbtc+22Ap0Txnlx91sfeB+/A7wNZg7UxtZdhAW4y+/jimrdg==" + }, + "html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "requires": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "dependencies": { + "commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==" + } + } + }, + "html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==" + }, + "html-void-elements": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-1.0.5.tgz", + "integrity": "sha512-uE/TxKuyNIcx44cIWnjr/rfIATDH7ZaOMmstu0CwhFG1Dunhlp4OC6/NMbhiwoq5BpW0ubi303qnEk/PZj614w==" + }, + "html-webpack-plugin": { + "version": "5.5.1", + "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.5.1.tgz", + "integrity": "sha512-cTUzZ1+NqjGEKjmVgZKLMdiFg3m9MdRXkZW2OEe69WYVi5ONLMmlnSZdXzGGMOq0C8jGDrL6EWyEDDUioHO/pA==", + "requires": { + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + } + }, + "htmlparser2": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", + "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", + "requires": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + }, + "dependencies": { + "entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==" + } + } + }, + "http-cache-semantics": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" + }, + "http-deceiver": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", + "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==" + }, + "http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "requires": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + } + }, + "http-parser-js": { + "version": "0.5.8", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", + "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==" + }, + "http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "requires": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + } + }, + "http-proxy-middleware": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", + "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", + "requires": { + "@types/http-proxy": "^1.17.8", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.1", + "is-plain-obj": "^3.0.0", + "micromatch": "^4.0.2" + }, + "dependencies": { + "is-plain-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", + "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==" + } + } + }, + "human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==" + }, + "iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "icss-utils": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "requires": {} + }, + "ignore": { + "version": "5.2.4", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", + "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==" + }, + "image-size": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.0.2.tgz", + "integrity": "sha512-xfOoWjceHntRb3qFCrh5ZFORYH8XCdYpASltMhZ/Q0KZiOwjdE/Yl2QCiWdwD+lygV5bMCvauzgu5PxBX/Yerg==", + "requires": { + "queue": "6.0.2" + } + }, + "immer": { + "version": "9.0.21", + "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", + "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==" + }, + "import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "requires": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + } + }, + "import-lazy": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz", + "integrity": "sha512-m7ZEHgtw69qOGw+jwxXkHlrlIPdTGkyh66zXZ1ajZbxkDBNjSY/LGbmjc7h0s2ELsUDTAhFr55TrPSSqJGPG0A==" + }, + "imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==" + }, + "indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==" + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + }, + "inline-style-parser": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", + "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" + }, + "interpret": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", + "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==" + }, + "invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "requires": { + "loose-envify": "^1.0.0" + } + }, + "ipaddr.js": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz", + "integrity": "sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ==" + }, + "is-absolute": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-absolute/-/is-absolute-1.0.0.tgz", + "integrity": "sha512-dOWoqflvcydARa360Gvv18DZ/gRuHKi2NU/wU5X1ZFzdYfH29nkiNZsF3mp4OJ3H4yo9Mx8A/uAGNzpzPN3yBA==", + "requires": { + "is-relative": "^1.0.0", + "is-windows": "^1.0.1" + } + }, + "is-alphabetical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", + "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==" + }, + "is-alphanumerical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", + "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "requires": { + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0" + } + }, + "is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" + }, + "is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "requires": { + "binary-extensions": "^2.0.0" + } + }, + "is-buffer": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", + "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==" + }, + "is-ci": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", + "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", + "requires": { + "ci-info": "^2.0.0" + }, + "dependencies": { + "ci-info": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", + "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==" + } + } + }, + "is-core-module": { + "version": "2.12.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.1.tgz", + "integrity": "sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==", + "requires": { + "has": "^1.0.3" + } + }, + "is-decimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", + "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==" + }, + "is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==" + }, + "is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==" + }, + "is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==" + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" + }, + "is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "requires": { + "is-extglob": "^2.1.1" + } + }, + "is-hexadecimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", + "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==" + }, + "is-installed-globally": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", + "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "requires": { + "global-dirs": "^3.0.0", + "is-path-inside": "^3.0.2" + } + }, + "is-npm": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-5.0.0.tgz", + "integrity": "sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA==" + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==" + }, + "is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==" + }, + "is-path-cwd": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", + "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==" + }, + "is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==" + }, + "is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==" + }, + "is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "requires": { + "isobject": "^3.0.1" + } + }, + "is-regexp": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", + "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==" + }, + "is-relative": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-relative/-/is-relative-1.0.0.tgz", + "integrity": "sha512-Kw/ReK0iqwKeu0MITLFuj0jbPAmEiOsIwyIXvvbfa6QfmN9pkD1M+8pdk7Rl/dTKbH34/XBFMbgD4iMJhLQbGA==", + "requires": { + "is-unc-path": "^1.0.0" + } + }, + "is-root": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", + "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==" + }, + "is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==" + }, + "is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + }, + "is-unc-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-unc-path/-/is-unc-path-1.0.0.tgz", + "integrity": "sha512-mrGpVd0fs7WWLfVsStvgF6iEJnbjDFZh9/emhRDcGWTduTfNHd9CHeUwH3gYIjdbwo4On6hunkztwOaAw0yllQ==", + "requires": { + "unc-path-regex": "^0.1.2" + } + }, + "is-whitespace-character": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz", + "integrity": "sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w==" + }, + "is-windows": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==" + }, + "is-word-character": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-word-character/-/is-word-character-1.0.4.tgz", + "integrity": "sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA==" + }, + "is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "requires": { + "is-docker": "^2.0.0" + } + }, + "is-yarn-global": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz", + "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw==" + }, + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==" + }, + "jest-util": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.5.0.tgz", + "integrity": "sha512-RYMgG/MTadOr5t8KdhejfvUU82MxsCu5MF6KuDUHl+NuwzUt+Sm6jJWxTJVrDR1j5M/gJVCPKQEpWXY+yIQ6lQ==", + "requires": { + "@jest/types": "^29.5.0", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + } + }, + "jest-worker": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.5.0.tgz", + "integrity": "sha512-NcrQnevGoSp4b5kg+akIpthoAFHxPBcb5P6mYPY0fUNT+sSvmtu6jlkEle3anczUKIKEbMxFimk9oTP/tpIPgA==", + "requires": { + "@types/node": "*", + "jest-util": "^29.5.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "dependencies": { + "supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "jiti": { + "version": "1.18.2", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.18.2.tgz", + "integrity": "sha512-QAdOptna2NYiSSpv0O/BwoHBSmz4YhpzJHyi+fnMRTXFjp7B8i/YG5Z8IfusxB1ufjcD2Sre1F3R+nX3fvy7gg==" + }, + "joi": { + "version": "17.9.2", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.9.2.tgz", + "integrity": "sha512-Itk/r+V4Dx0V3c7RLFdRh12IOjySm2/WGPMubBT92cQvRfYZhPM2W0hZlctjj72iES8jsRCwp7S/cRmWBnJ4nw==", + "requires": { + "@hapi/hoek": "^9.0.0", + "@hapi/topo": "^5.0.0", + "@sideway/address": "^4.1.3", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "requires": { + "argparse": "^2.0.1" + } + }, + "jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==" + }, + "json-buffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", + "integrity": "sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ==" + }, + "json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==" + }, + "jsonfile": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", + "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", + "requires": { + "graceful-fs": "^4.1.6" + } + }, + "keyv": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", + "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", + "requires": { + "json-buffer": "3.0.0" + } + }, + "kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==" + }, + "kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==" + }, + "klona": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.6.tgz", + "integrity": "sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA==" + }, + "latest-version": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", + "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==", + "requires": { + "package-json": "^6.3.0" + } + }, + "launch-editor": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.6.0.tgz", + "integrity": "sha512-JpDCcQnyAAzZZaZ7vEiSqL690w7dAEyLao+KC96zBplnYbJS7TYNjvM3M7y3dGz+v7aIsJk3hllWuc0kWAjyRQ==", + "requires": { + "picocolors": "^1.0.0", + "shell-quote": "^1.7.3" + } + }, + "leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==" + }, + "lilconfig": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", + "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==" + }, + "lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" + }, + "loader-runner": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==" + }, + "loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "requires": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + } + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "requires": { + "p-locate": "^4.1.0" + } + }, + "lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==" + }, + "lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" + }, + "lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" + }, + "loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "requires": { + "js-tokens": "^3.0.0 || ^4.0.0" + } + }, + "lower-case": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", + "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "requires": { + "tslib": "^2.0.3" + } + }, + "lowercase-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", + "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==" + }, + "lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "requires": { + "yallist": "^3.0.2" + } + }, + "make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "requires": { + "semver": "^6.0.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + } + } + }, + "markdown-escapes": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/markdown-escapes/-/markdown-escapes-1.0.4.tgz", + "integrity": "sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg==" + }, + "mdast-squeeze-paragraphs": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-squeeze-paragraphs/-/mdast-squeeze-paragraphs-4.0.0.tgz", + "integrity": "sha512-zxdPn69hkQ1rm4J+2Cs2j6wDEv7O17TfXTJ33tl/+JPIoEmtV9t2ZzBM5LPHE8QlHsmVD8t3vPKCyY3oH+H8MQ==", + "requires": { + "unist-util-remove": "^2.0.0" + } + }, + "mdast-util-definitions": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-4.0.0.tgz", + "integrity": "sha512-k8AJ6aNnUkB7IE+5azR9h81O5EQ/cTDXtWdMq9Kk5KcEW/8ritU5CeLg/9HhOC++nALHBlaogJ5jz0Ybk3kPMQ==", + "requires": { + "unist-util-visit": "^2.0.0" + } + }, + "mdast-util-to-hast": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-10.0.1.tgz", + "integrity": "sha512-BW3LM9SEMnjf4HXXVApZMt8gLQWVNXc3jryK0nJu/rOXPOnlkUjmdkDlmxMirpbU9ILncGFIwLH/ubnWBbcdgA==", + "requires": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "mdast-util-definitions": "^4.0.0", + "mdurl": "^1.0.0", + "unist-builder": "^2.0.0", + "unist-util-generated": "^1.0.0", + "unist-util-position": "^3.0.0", + "unist-util-visit": "^2.0.0" + } + }, + "mdast-util-to-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz", + "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==" + }, + "mdn-data": { + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", + "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" + }, + "mdurl": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", + "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==" + }, + "media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==" + }, + "memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "requires": { + "fs-monkey": "^1.0.4" + } + }, + "merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + }, + "merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==" + }, + "methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==" + }, + "micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "requires": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + } + }, + "mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==" + }, + "mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==" + }, + "mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "requires": { + "mime-db": "~1.33.0" + } + }, + "mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==" + }, + "mimic-response": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", + "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==" + }, + "mini-css-extract-plugin": { + "version": "2.7.6", + "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.7.6.tgz", + "integrity": "sha512-Qk7HcgaPkGG6eD77mLvZS1nmxlao3j+9PkrT9Uc7HAE1id3F41+DdBRYRYkbyfNRGzm8/YWtzhw7nVPmwhqTQw==", + "requires": { + "schema-utils": "^4.0.0" + }, + "dependencies": { + "ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "requires": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, + "ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "requires": { + "fast-deep-equal": "^3.1.3" + } + }, + "json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "schema-utils": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz", + "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==", + "requires": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + } + } + } + }, + "minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + }, + "minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==" + }, + "mrmime": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-1.0.1.tgz", + "integrity": "sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw==" + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "multicast-dns": { + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", + "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", + "requires": { + "dns-packet": "^5.2.2", + "thunky": "^1.0.2" + } + }, + "nanoclone": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/nanoclone/-/nanoclone-0.2.1.tgz", + "integrity": "sha512-wynEP02LmIbLpcYw8uBKpcfF6dmg2vcpKqxeH5UcoKEYdExslsdUA4ugFauuaeYdTB76ez6gJW8XAZ6CgkXYxA==" + }, + "nanoid": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==" + }, + "negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==" + }, + "neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" + }, + "no-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", + "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", + "requires": { + "lower-case": "^2.0.2", + "tslib": "^2.0.3" + } + }, + "node-emoji": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz", + "integrity": "sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==", + "requires": { + "lodash": "^4.17.21" + } + }, + "node-forge": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", + "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==" + }, + "node-releases": { + "version": "2.0.12", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.12.tgz", + "integrity": "sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==" + }, + "normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==" + }, + "normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==" + }, + "normalize-url": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", + "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==" + }, + "npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "requires": { + "path-key": "^3.0.0" + } + }, + "nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "requires": { + "boolbase": "^1.0.0" + } + }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==" + }, + "object-inspect": { + "version": "1.12.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", + "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==" + }, + "object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==" + }, + "object.assign": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz", + "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==", + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + } + }, + "obuf": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", + "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" + }, + "on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "requires": { + "ee-first": "1.1.1" + } + }, + "on-headers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==" + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "requires": { + "wrappy": "1" + } + }, + "onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "requires": { + "mimic-fn": "^2.1.0" + } + }, + "open": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", + "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", + "requires": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + } + }, + "opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==" + }, + "p-cancelable": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", + "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==" + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "requires": { + "p-limit": "^2.2.0" + } + }, + "p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "requires": { + "aggregate-error": "^3.0.0" + } + }, + "p-retry": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", + "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "requires": { + "@types/retry": "0.12.0", + "retry": "^0.13.1" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" + }, + "package-json": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz", + "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==", + "requires": { + "got": "^9.6.0", + "registry-auth-token": "^4.0.0", + "registry-url": "^5.0.0", + "semver": "^6.2.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + } + } + }, + "param-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", + "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", + "requires": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "requires": { + "callsites": "^3.0.0" + } + }, + "parse-entities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", + "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "requires": { + "character-entities": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "character-reference-invalid": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-hexadecimal": "^1.0.0" + } + }, + "parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "requires": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + } + }, + "parse5": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", + "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==" + }, + "parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==" + }, + "pascal-case": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", + "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", + "requires": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "path": { + "version": "0.12.7", + "resolved": "https://registry.npmjs.org/path/-/path-0.12.7.tgz", + "integrity": "sha512-aXXC6s+1w7otVF9UletFkFcDsJeO7lSZBPUQhtb5O0xJe8LtYhj/GxldoL09bBj9+ZmE2hNoHqQSFMN5fikh4Q==", + "requires": { + "process": "^0.11.1", + "util": "^0.10.3" + } + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==" + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==" + }, + "path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==" + }, + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==" + }, + "path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" + }, + "path-to-regexp": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", + "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", + "requires": { + "isarray": "0.0.1" + } + }, + "path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==" + }, + "picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + }, + "picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==" + }, + "pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "requires": { + "find-up": "^4.0.0" + } + }, + "pkg-up": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz", + "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==", + "requires": { + "find-up": "^3.0.0" + }, + "dependencies": { + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "requires": { + "locate-path": "^3.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "requires": { + "p-limit": "^2.0.0" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==" + } + } + }, + "postcss": { + "version": "8.4.24", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.24.tgz", + "integrity": "sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==", + "requires": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + } + }, + "postcss-calc": { + "version": "8.2.4", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-8.2.4.tgz", + "integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==", + "requires": { + "postcss-selector-parser": "^6.0.9", + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-colormin": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.1.tgz", + "integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==", + "requires": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0", + "colord": "^2.9.1", + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-convert-values": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz", + "integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==", + "requires": { + "browserslist": "^4.21.4", + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-discard-comments": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz", + "integrity": "sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ==", + "requires": {} + }, + "postcss-discard-duplicates": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz", + "integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==", + "requires": {} + }, + "postcss-discard-empty": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz", + "integrity": "sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A==", + "requires": {} + }, + "postcss-discard-overridden": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz", + "integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==", + "requires": {} + }, + "postcss-discard-unused": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz", + "integrity": "sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw==", + "requires": { + "postcss-selector-parser": "^6.0.5" + } + }, + "postcss-loader": { + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.2.tgz", + "integrity": "sha512-c7qDlXErX6n0VT+LUsW+nwefVtTu3ORtVvK8EXuUIDcxo+b/euYqpuHlJAvePb0Af5e8uMjR/13e0lTuYifaig==", + "requires": { + "cosmiconfig": "^8.1.3", + "jiti": "^1.18.2", + "klona": "^2.0.6", + "semver": "^7.3.8" + }, + "dependencies": { + "cosmiconfig": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.2.0.tgz", + "integrity": "sha512-3rTMnFJA1tCOPwRxtgF4wd7Ab2qvDbL8jX+3smjIbS4HlZBagTlpERbdN7iAbWlrfxE3M8c27kTwTawQ7st+OQ==", + "requires": { + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0" + } + } + } + }, + "postcss-merge-idents": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz", + "integrity": "sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw==", + "requires": { + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-merge-longhand": { + "version": "5.1.7", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz", + "integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==", + "requires": { + "postcss-value-parser": "^4.2.0", + "stylehacks": "^5.1.1" + } + }, + "postcss-merge-rules": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz", + "integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==", + "requires": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0", + "cssnano-utils": "^3.1.0", + "postcss-selector-parser": "^6.0.5" + } + }, + "postcss-minify-font-values": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz", + "integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==", + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-minify-gradients": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz", + "integrity": "sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw==", + "requires": { + "colord": "^2.9.1", + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-minify-params": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz", + "integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==", + "requires": { + "browserslist": "^4.21.4", + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-minify-selectors": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz", + "integrity": "sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg==", + "requires": { + "postcss-selector-parser": "^6.0.5" + } + }, + "postcss-modules-extract-imports": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz", + "integrity": "sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==", + "requires": {} + }, + "postcss-modules-local-by-default": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.3.tgz", + "integrity": "sha512-2/u2zraspoACtrbFRnTijMiQtb4GW4BvatjaG/bCjYQo8kLTdevCUlwuBHx2sCnSyrI3x3qj4ZK1j5LQBgzmwA==", + "requires": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.1.0" + } + }, + "postcss-modules-scope": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz", + "integrity": "sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==", + "requires": { + "postcss-selector-parser": "^6.0.4" + } + }, + "postcss-modules-values": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", + "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", + "requires": { + "icss-utils": "^5.0.0" + } + }, + "postcss-normalize-charset": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz", + "integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==", + "requires": {} + }, + "postcss-normalize-display-values": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz", + "integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==", + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-normalize-positions": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz", + "integrity": "sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg==", + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-normalize-repeat-style": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz", + "integrity": "sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g==", + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-normalize-string": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz", + "integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==", + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-normalize-timing-functions": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz", + "integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==", + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-normalize-unicode": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz", + "integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==", + "requires": { + "browserslist": "^4.21.4", + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-normalize-url": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz", + "integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==", + "requires": { + "normalize-url": "^6.0.1", + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-normalize-whitespace": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz", + "integrity": "sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA==", + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-ordered-values": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz", + "integrity": "sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ==", + "requires": { + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-reduce-idents": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz", + "integrity": "sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg==", + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-reduce-initial": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz", + "integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==", + "requires": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0" + } + }, + "postcss-reduce-transforms": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz", + "integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==", + "requires": { + "postcss-value-parser": "^4.2.0" + } + }, + "postcss-selector-parser": { + "version": "6.0.13", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.13.tgz", + "integrity": "sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==", + "requires": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + } + }, + "postcss-sort-media-queries": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz", + "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==", + "requires": { + "sort-css-media-queries": "2.1.0" + } + }, + "postcss-svgo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-5.1.0.tgz", + "integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==", + "requires": { + "postcss-value-parser": "^4.2.0", + "svgo": "^2.7.0" + } + }, + "postcss-unique-selectors": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz", + "integrity": "sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA==", + "requires": { + "postcss-selector-parser": "^6.0.5" + } + }, + "postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" + }, + "postcss-zindex": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz", + "integrity": "sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==", + "requires": {} + }, + "prepend-http": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", + "integrity": "sha512-ravE6m9Atw9Z/jjttRUZ+clIXogdghyZAuWJ3qEzjT+jI/dL1ifAqhZeC5VHzQp1MSt1+jxKkFNemj/iO7tVUA==" + }, + "pretty-error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", + "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", + "requires": { + "lodash": "^4.17.20", + "renderkid": "^3.0.0" + } + }, + "pretty-time": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", + "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==" + }, + "process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==" + }, + "process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "requires": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + } + }, + "prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "requires": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "property-expr": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.5.tgz", + "integrity": "sha512-IJUkICM5dP5znhCckHSv30Q4b5/JA5enCtkRHYaOVOAocnH/1BQEYTC5NMfT3AVl/iXKdr3aqQbQn9DxyWknwA==" + }, + "property-information": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", + "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==", + "requires": { + "xtend": "^4.0.0" + } + }, + "proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "requires": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "dependencies": { + "ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==" + } + } + }, + "pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==" + }, + "pupa": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz", + "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==", + "requires": { + "escape-goat": "^2.0.0" + } + }, + "qs": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "requires": { + "side-channel": "^1.0.4" + } + }, + "queue": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz", + "integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==", + "requires": { + "inherits": "~2.0.3" + } + }, + "queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==" + }, + "randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "requires": { + "safe-buffer": "^5.1.0" + } + }, + "range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==" + }, + "raw-body": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", + "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "requires": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "dependencies": { + "bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==" + } + } + }, + "rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "requires": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + } + }, + "react": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react/-/react-17.0.2.tgz", + "integrity": "sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==", + "peer": true, + "requires": { + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1" + } + }, + "react-dev-utils": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz", + "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==", + "requires": { + "@babel/code-frame": "^7.16.0", + "address": "^1.1.2", + "browserslist": "^4.18.1", + "chalk": "^4.1.2", + "cross-spawn": "^7.0.3", + "detect-port-alt": "^1.1.6", + "escape-string-regexp": "^4.0.0", + "filesize": "^8.0.6", + "find-up": "^5.0.0", + "fork-ts-checker-webpack-plugin": "^6.5.0", + "global-modules": "^2.0.0", + "globby": "^11.0.4", + "gzip-size": "^6.0.0", + "immer": "^9.0.7", + "is-root": "^2.1.0", + "loader-utils": "^3.2.0", + "open": "^8.4.0", + "pkg-up": "^3.1.0", + "prompts": "^2.4.2", + "react-error-overlay": "^6.0.11", + "recursive-readdir": "^2.2.2", + "shell-quote": "^1.7.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "dependencies": { + "find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "requires": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + } + }, + "loader-utils": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.1.tgz", + "integrity": "sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw==" + }, + "locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "requires": { + "p-locate": "^5.0.0" + } + }, + "p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "requires": { + "yocto-queue": "^0.1.0" + } + }, + "p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "requires": { + "p-limit": "^3.0.2" + } + } + } + }, + "react-dom": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz", + "integrity": "sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==", + "peer": true, + "requires": { + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1", + "scheduler": "^0.20.2" + } + }, + "react-error-overlay": { + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz", + "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg==" + }, + "react-fast-compare": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", + "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==" + }, + "react-helmet-async": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", + "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==", + "requires": { + "@babel/runtime": "^7.12.5", + "invariant": "^2.2.4", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.2.0", + "shallowequal": "^1.1.0" + } + }, + "react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "react-loadable": { + "version": "npm:@docusaurus/react-loadable@5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "requires": { + "@types/react": "*", + "prop-types": "^15.6.2" + } + }, + "react-loadable-ssr-addon-v5-slorber": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz", + "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==", + "requires": { + "@babel/runtime": "^7.10.3" + } + }, + "react-router": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", + "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", + "requires": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "hoist-non-react-statics": "^3.1.0", + "loose-envify": "^1.3.1", + "path-to-regexp": "^1.7.0", + "prop-types": "^15.6.2", + "react-is": "^16.6.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + } + }, + "react-router-config": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", + "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", + "requires": { + "@babel/runtime": "^7.1.2" + } + }, + "react-router-dom": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz", + "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==", + "requires": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "loose-envify": "^1.3.1", + "prop-types": "^15.6.2", + "react-router": "5.3.4", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + } + }, + "readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + }, + "readdir-enhanced": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/readdir-enhanced/-/readdir-enhanced-1.5.2.tgz", + "integrity": "sha512-oncAoS9LLjy/+DeZfSAdZBI/iFJGcPCOp44RPFI6FIMHuxt5CC5P0cUZ9mET+EZB9ONhcEvAids/lVRkj0sTHw==", + "requires": { + "call-me-maybe": "^1.0.1", + "es6-promise": "^4.1.0", + "glob-to-regexp": "^0.3.0" + } + }, + "readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "requires": { + "picomatch": "^2.2.1" + } + }, + "rechoir": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", + "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==", + "requires": { + "resolve": "^1.1.6" + } + }, + "recursive-readdir": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz", + "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==", + "requires": { + "minimatch": "^3.0.5" + } + }, + "regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" + }, + "regenerate-unicode-properties": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz", + "integrity": "sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ==", + "requires": { + "regenerate": "^1.4.2" + } + }, + "regenerator-runtime": { + "version": "0.13.11", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", + "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" + }, + "regenerator-transform": { + "version": "0.15.1", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.1.tgz", + "integrity": "sha512-knzmNAcuyxV+gQCufkYcvOqX/qIIfHLv0u5x79kRxuGojfYVky1f15TzZEu2Avte8QGepvUNTnLskf8E6X6Vyg==", + "requires": { + "@babel/runtime": "^7.8.4" + } + }, + "regexpu-core": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", + "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "requires": { + "@babel/regjsgen": "^0.8.0", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.1.0", + "regjsparser": "^0.9.1", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + } + }, + "registry-auth-token": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.2.tgz", + "integrity": "sha512-PC5ZysNb42zpFME6D/XlIgtNGdTl8bBOCw90xQLVMpzuuubJKYDWFAEuUNc+Cn8Z8724tg2SDhDRrkVEsqfDMg==", + "requires": { + "rc": "1.2.8" + } + }, + "registry-url": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz", + "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==", + "requires": { + "rc": "^1.2.8" + } + }, + "regjsparser": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", + "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "requires": { + "jsesc": "~0.5.0" + }, + "dependencies": { + "jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==" + } + } + }, + "relateurl": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", + "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==" + }, + "remark-emoji": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-2.2.0.tgz", + "integrity": "sha512-P3cj9s5ggsUvWw5fS2uzCHJMGuXYRb0NnZqYlNecewXt8QBU9n5vW3DUUKOhepS8F9CwdMx9B8a3i7pqFWAI5w==", + "requires": { + "emoticon": "^3.2.0", + "node-emoji": "^1.10.0", + "unist-util-visit": "^2.0.3" + } + }, + "remark-footnotes": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/remark-footnotes/-/remark-footnotes-2.0.0.tgz", + "integrity": "sha512-3Clt8ZMH75Ayjp9q4CorNeyjwIxHFcTkaektplKGl2A1jNGEUey8cKL0ZC5vJwfcD5GFGsNLImLG/NGzWIzoMQ==" + }, + "remark-mdx": { + "version": "1.6.22", + "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-1.6.22.tgz", + "integrity": "sha512-phMHBJgeV76uyFkH4rvzCftLfKCr2RZuF+/gmVcaKrpsihyzmhXjA0BEMDaPTXG5y8qZOKPVo83NAOX01LPnOQ==", + "requires": { + "@babel/core": "7.12.9", + "@babel/helper-plugin-utils": "7.10.4", + "@babel/plugin-proposal-object-rest-spread": "7.12.1", + "@babel/plugin-syntax-jsx": "7.12.1", + "@mdx-js/util": "1.6.22", + "is-alphabetical": "1.0.4", + "remark-parse": "8.0.3", + "unified": "9.2.0" + }, + "dependencies": { + "@babel/core": { + "version": "7.12.9", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz", + "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==", + "requires": { + "@babel/code-frame": "^7.10.4", + "@babel/generator": "^7.12.5", + "@babel/helper-module-transforms": "^7.12.1", + "@babel/helpers": "^7.12.5", + "@babel/parser": "^7.12.7", + "@babel/template": "^7.12.7", + "@babel/traverse": "^7.12.9", + "@babel/types": "^7.12.7", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.1", + "json5": "^2.1.2", + "lodash": "^4.17.19", + "resolve": "^1.3.2", + "semver": "^5.4.1", + "source-map": "^0.5.0" + } + }, + "@babel/helper-plugin-utils": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", + "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" + }, + "@babel/plugin-syntax-jsx": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz", + "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==", + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" + } + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + }, + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==" + }, + "unified": { + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", + "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "requires": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + } + } + } + }, + "remark-parse": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-8.0.3.tgz", + "integrity": "sha512-E1K9+QLGgggHxCQtLt++uXltxEprmWzNfg+MxpfHsZlrddKzZ/hZyWHDbK3/Ap8HJQqYJRXP+jHczdL6q6i85Q==", + "requires": { + "ccount": "^1.0.0", + "collapse-white-space": "^1.0.2", + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-whitespace-character": "^1.0.0", + "is-word-character": "^1.0.0", + "markdown-escapes": "^1.0.0", + "parse-entities": "^2.0.0", + "repeat-string": "^1.5.4", + "state-toggle": "^1.0.0", + "trim": "0.0.1", + "trim-trailing-lines": "^1.0.0", + "unherit": "^1.0.4", + "unist-util-remove-position": "^2.0.0", + "vfile-location": "^3.0.0", + "xtend": "^4.0.1" + } + }, + "remark-squeeze-paragraphs": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/remark-squeeze-paragraphs/-/remark-squeeze-paragraphs-4.0.0.tgz", + "integrity": "sha512-8qRqmL9F4nuLPIgl92XUuxI3pFxize+F1H0e/W3llTk0UsjJaj01+RrirkMw7P21RKe4X6goQhYRSvNWX+70Rw==", + "requires": { + "mdast-squeeze-paragraphs": "^4.0.0" + } + }, + "renderkid": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", + "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", + "requires": { + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^6.0.1" + } + }, + "repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==" + }, + "require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==" + }, + "require-like": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", + "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==" + }, + "requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + }, + "resolve": { + "version": "1.22.2", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz", + "integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==", + "requires": { + "is-core-module": "^2.11.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + } + }, + "resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==" + }, + "resolve-pathname": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", + "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" + }, + "responselike": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", + "integrity": "sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ==", + "requires": { + "lowercase-keys": "^1.0.0" + } + }, + "retry": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==" + }, + "reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==" + }, + "rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "requires": { + "glob": "^7.1.3" + } + }, + "rtl-detect": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.0.4.tgz", + "integrity": "sha512-EBR4I2VDSSYr7PkBmFy04uhycIpDKp+21p/jARYXlCSjQksTBQcJ0HFUPOO79EPPH5JS6VAhiIQbycf0O3JAxQ==" + }, + "run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "requires": { + "queue-microtask": "^1.2.2" + } + }, + "rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "requires": { + "tslib": "^2.1.0" + } + }, + "safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" + }, + "safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "scheduler": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.2.tgz", + "integrity": "sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ==", + "peer": true, + "requires": { + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1" + } + }, + "schema-utils": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz", + "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==", + "requires": { + "@types/json-schema": "^7.0.5", + "ajv": "^6.12.4", + "ajv-keywords": "^3.5.2" + } + }, + "section-matter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", + "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", + "requires": { + "extend-shallow": "^2.0.1", + "kind-of": "^6.0.0" + } + }, + "select-hose": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", + "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==" + }, + "selfsigned": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.1.1.tgz", + "integrity": "sha512-GSL3aowiF7wa/WtSFwnUrludWFoNhftq8bUkH9pkzjpN2XSPOAYEgg6e0sS9s0rZwgJzJiQRPU18A6clnoW5wQ==", + "requires": { + "node-forge": "^1" + } + }, + "semver": { + "version": "7.5.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.1.tgz", + "integrity": "sha512-Wvss5ivl8TMRZXXESstBA4uR5iXgEN/VC5/sOcuXdVLzcdkz4HWetIoRfG5gb5X+ij/G9rw9YoGn3QoQ8OCSpw==", + "requires": { + "lru-cache": "^6.0.0" + }, + "dependencies": { + "lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "requires": { + "yallist": "^4.0.0" + } + }, + "yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + } + } + }, + "semver-diff": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz", + "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==", + "requires": { + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + } + } + }, + "send": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", + "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "requires": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + }, + "dependencies": { + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + } + } + }, + "ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==" + } + } + }, + "serialize-javascript": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.1.tgz", + "integrity": "sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==", + "requires": { + "randombytes": "^2.1.0" + } + }, + "serve-handler": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", + "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", + "requires": { + "bytes": "3.0.0", + "content-disposition": "0.5.2", + "fast-url-parser": "1.1.3", + "mime-types": "2.1.18", + "minimatch": "3.1.2", + "path-is-inside": "1.0.2", + "path-to-regexp": "2.2.1", + "range-parser": "1.2.0" + }, + "dependencies": { + "path-to-regexp": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", + "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==" + } + } + }, + "serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", + "requires": { + "accepts": "~1.3.4", + "batch": "0.6.1", + "debug": "2.6.9", + "escape-html": "~1.0.3", + "http-errors": "~1.6.2", + "mime-types": "~2.1.17", + "parseurl": "~1.3.2" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==" + }, + "http-errors": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", + "requires": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": ">= 1.4.0 < 2" + } + }, + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + }, + "statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==" + } + } + }, + "serve-static": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", + "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "requires": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.18.0" + } + }, + "setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "requires": { + "kind-of": "^6.0.2" + } + }, + "shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + }, + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "requires": { + "shebang-regex": "^3.0.0" + } + }, + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==" + }, + "shell-quote": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz", + "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==" + }, + "shelljs": { + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz", + "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==", + "requires": { + "glob": "^7.0.0", + "interpret": "^1.0.0", + "rechoir": "^0.6.2" + } + }, + "side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "requires": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + } + }, + "signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + }, + "sirv": { + "version": "1.0.19", + "resolved": "https://registry.npmjs.org/sirv/-/sirv-1.0.19.tgz", + "integrity": "sha512-JuLThK3TnZG1TAKDwNIqNq6QA2afLOCcm+iE8D1Kj3GA40pSPsxQjjJl0J8X3tsR7T+CP1GavpzLwYkgVLWrZQ==", + "requires": { + "@polka/url": "^1.0.0-next.20", + "mrmime": "^1.0.0", + "totalist": "^1.0.0" + } + }, + "sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==" + }, + "sockjs": { + "version": "0.3.24", + "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", + "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", + "requires": { + "faye-websocket": "^0.11.3", + "uuid": "^8.3.2", + "websocket-driver": "^0.7.4" + } + }, + "sort-css-media-queries": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.1.0.tgz", + "integrity": "sha512-IeWvo8NkNiY2vVYdPa27MCQiR0MN0M80johAYFVxWWXQ44KU84WNxjslwBHmc/7ZL2ccwkM7/e6S5aiKZXm7jA==" + }, + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + }, + "source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==" + }, + "source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "requires": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "space-separated-tokens": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz", + "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==" + }, + "spdy": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", + "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", + "requires": { + "debug": "^4.1.0", + "handle-thing": "^2.0.0", + "http-deceiver": "^1.2.7", + "select-hose": "^2.0.0", + "spdy-transport": "^3.0.0" + } + }, + "spdy-transport": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", + "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", + "requires": { + "debug": "^4.1.0", + "detect-node": "^2.0.4", + "hpack.js": "^2.1.6", + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + } + }, + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" + }, + "stable": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", + "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==" + }, + "state-toggle": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/state-toggle/-/state-toggle-1.0.3.tgz", + "integrity": "sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ==" + }, + "statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==" + }, + "std-env": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.3.3.tgz", + "integrity": "sha512-Rz6yejtVyWnVjC1RFvNmYL10kgjC49EOghxWn0RFqlCHGFpQx+Xe7yW3I4ceK1SGrWIGMjD5Kbue8W/udkbMJg==" + }, + "string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "requires": { + "safe-buffer": "~5.2.0" + } + }, + "string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "requires": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==" + }, + "strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "requires": { + "ansi-regex": "^6.0.1" + } + } + } + }, + "stringify-object": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", + "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", + "requires": { + "get-own-enumerable-property-symbols": "^3.0.0", + "is-obj": "^1.0.1", + "is-regexp": "^1.0.0" + } + }, + "strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "requires": { + "ansi-regex": "^5.0.1" + } + }, + "strip-bom-string": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", + "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==" + }, + "strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==" + }, + "strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==" + }, + "style-to-object": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.3.0.tgz", + "integrity": "sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA==", + "requires": { + "inline-style-parser": "0.1.1" + } + }, + "stylehacks": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz", + "integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==", + "requires": { + "browserslist": "^4.21.4", + "postcss-selector-parser": "^6.0.4" + } + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "requires": { + "has-flag": "^4.0.0" + } + }, + "supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==" + }, + "svg-parser": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", + "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==" + }, + "svgo": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz", + "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==", + "requires": { + "@trysound/sax": "0.2.0", + "commander": "^7.2.0", + "css-select": "^4.1.3", + "css-tree": "^1.1.3", + "csso": "^4.2.0", + "picocolors": "^1.0.0", + "stable": "^0.1.8" + }, + "dependencies": { + "commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==" + } + } + }, + "sync-directory": { + "version": "5.1.9", + "resolved": "https://registry.npmjs.org/sync-directory/-/sync-directory-5.1.9.tgz", + "integrity": "sha512-0942RssO+NrIjDcaNiXUH/NQoAamURT1zpzN/uB8fgyetDM8NtPPOQNax3+BuNUfw/2JcaEXrAz567DokNq0lw==", + "requires": { + "chokidar": "^3.3.1", + "commander": "^6.2.0", + "fs-extra": "^7.0.1", + "is-absolute": "^1.0.0", + "readdir-enhanced": "^1.5.2" + }, + "dependencies": { + "commander": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.1.tgz", + "integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==" + } + } + }, + "tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==" + }, + "terser": { + "version": "5.17.7", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.17.7.tgz", + "integrity": "sha512-/bi0Zm2C6VAexlGgLlVxA0P2lru/sdLyfCVaRMfKVo9nWxbmz7f/sD8VPybPeSUJaJcwmCJis9pBIhcVcG1QcQ==", + "requires": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "dependencies": { + "commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + } + } + }, + "terser-webpack-plugin": { + "version": "5.3.9", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.9.tgz", + "integrity": "sha512-ZuXsqE07EcggTWQjXUj+Aot/OMcD0bMKGgF63f7UxYcu5/AJF53aIpK1YoP5xR9l6s/Hy2b+t1AM0bLNPRuhwA==", + "requires": { + "@jridgewell/trace-mapping": "^0.3.17", + "jest-worker": "^27.4.5", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.1", + "terser": "^5.16.8" + }, + "dependencies": { + "jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "requires": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + } + }, + "schema-utils": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz", + "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==", + "requires": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + } + }, + "supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" + }, + "thunky": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", + "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" + }, + "tiny-invariant": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.1.tgz", + "integrity": "sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw==" + }, + "tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" + }, + "to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==" + }, + "to-readable-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", + "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==" + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "requires": { + "is-number": "^7.0.0" + } + }, + "toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==" + }, + "toposort": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/toposort/-/toposort-2.0.2.tgz", + "integrity": "sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg==" + }, + "totalist": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/totalist/-/totalist-1.1.0.tgz", + "integrity": "sha512-gduQwd1rOdDMGxFG1gEvhV88Oirdo2p+KjoYFU7k2g+i7n6AFFbDQ5kMPUsW0pNbfQsB/cwXvT1i4Bue0s9g5g==" + }, + "trim": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/trim/-/trim-0.0.1.tgz", + "integrity": "sha512-YzQV+TZg4AxpKxaTHK3c3D+kRDCGVEE7LemdlQZoQXn0iennk10RsIoY6ikzAqJTc9Xjl9C1/waHom/J86ziAQ==" + }, + "trim-trailing-lines": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz", + "integrity": "sha512-rjUWSqnfTNrjbB9NQWfPMH/xRK1deHeGsHoVfpxJ++XeYXE0d6B1En37AHfw3jtfTU7dzMzZL2jjpe8Qb5gLIQ==" + }, + "trough": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz", + "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==" + }, + "tslib": { + "version": "2.5.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.3.tgz", + "integrity": "sha512-mSxlJJwl3BMEQCUNnxXBU9jP4JBktcEGhURcPR6VQVlnP0FdDEsIaz0C35dXNGLyRfrATNofF0F5p2KPxQgB+w==" + }, + "type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==" + }, + "type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "requires": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "dependencies": { + "mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" + }, + "mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "requires": { + "mime-db": "1.52.0" + } + } + } + }, + "typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "requires": { + "is-typedarray": "^1.0.0" + } + }, + "typescript": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.3.tgz", + "integrity": "sha512-XH627E9vkeqhlZFQuL+UsyAXEnibT0kWR2FWONlr4sTjvxyJYnyefgrkyECLzM5NenmKzRAy2rR/OlYLA1HkZw==", + "peer": true + }, + "uglify-js": { + "version": "3.17.4", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.17.4.tgz", + "integrity": "sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g==", + "optional": true + }, + "unc-path-regex": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/unc-path-regex/-/unc-path-regex-0.1.2.tgz", + "integrity": "sha512-eXL4nmJT7oCpkZsHZUOJo8hcX3GbsiDOa0Qu9F646fi8dT3XuSVopVqAcEiVzSKKH7UoDti23wNX3qGFxcW5Qg==" + }, + "unherit": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/unherit/-/unherit-1.1.3.tgz", + "integrity": "sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ==", + "requires": { + "inherits": "^2.0.0", + "xtend": "^4.0.0" + } + }, + "unicode-canonical-property-names-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", + "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==" + }, + "unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "requires": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + } + }, + "unicode-match-property-value-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", + "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==" + }, + "unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==" + }, + "unified": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz", + "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==", + "requires": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + } + }, + "unique-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", + "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==", + "requires": { + "crypto-random-string": "^2.0.0" + } + }, + "unist-builder": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-builder/-/unist-builder-2.0.3.tgz", + "integrity": "sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw==" + }, + "unist-util-generated": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-1.1.6.tgz", + "integrity": "sha512-cln2Mm1/CZzN5ttGK7vkoGw+RZ8VcUH6BtGbq98DDtRGquAAOXig1mrBQYelOwMXYS8rK+vZDyyojSjp7JX+Lg==" + }, + "unist-util-is": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", + "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==" + }, + "unist-util-position": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-3.1.0.tgz", + "integrity": "sha512-w+PkwCbYSFw8vpgWD0v7zRCl1FpY3fjDSQ3/N/wNd9Ffa4gPi8+4keqt99N3XW6F99t/mUzp2xAhNmfKWp95QA==" + }, + "unist-util-remove": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unist-util-remove/-/unist-util-remove-2.1.0.tgz", + "integrity": "sha512-J8NYPyBm4baYLdCbjmf1bhPu45Cr1MWTm77qd9istEkzWpnN6O9tMsEbB2JhNnBCqGENRqEWomQ+He6au0B27Q==", + "requires": { + "unist-util-is": "^4.0.0" + } + }, + "unist-util-remove-position": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-2.0.1.tgz", + "integrity": "sha512-fDZsLYIe2uT+oGFnuZmy73K6ZxOPG/Qcm+w7jbEjaFcJgbQ6cqjs/eSPzXhsmGpAsWPkqZM9pYjww5QTn3LHMA==", + "requires": { + "unist-util-visit": "^2.0.0" + } + }, + "unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "requires": { + "@types/unist": "^2.0.2" + } + }, + "unist-util-visit": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", + "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "requires": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0", + "unist-util-visit-parents": "^3.0.0" + } + }, + "unist-util-visit-parents": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", + "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "requires": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0" + } + }, + "universalify": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", + "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==" + }, + "unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==" + }, + "update-browserslist-db": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", + "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", + "requires": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + } + }, + "update-notifier": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-5.1.0.tgz", + "integrity": "sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw==", + "requires": { + "boxen": "^5.0.0", + "chalk": "^4.1.0", + "configstore": "^5.0.1", + "has-yarn": "^2.1.0", + "import-lazy": "^2.1.0", + "is-ci": "^2.0.0", + "is-installed-globally": "^0.4.0", + "is-npm": "^5.0.0", + "is-yarn-global": "^0.3.0", + "latest-version": "^5.1.0", + "pupa": "^2.1.1", + "semver": "^7.3.4", + "semver-diff": "^3.1.1", + "xdg-basedir": "^4.0.0" + }, + "dependencies": { + "boxen": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-5.1.2.tgz", + "integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==", + "requires": { + "ansi-align": "^3.0.0", + "camelcase": "^6.2.0", + "chalk": "^4.1.0", + "cli-boxes": "^2.2.1", + "string-width": "^4.2.2", + "type-fest": "^0.20.2", + "widest-line": "^3.1.0", + "wrap-ansi": "^7.0.0" + } + }, + "cli-boxes": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", + "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==" + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + }, + "type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==" + }, + "widest-line": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz", + "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", + "requires": { + "string-width": "^4.0.0" + } + }, + "wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + } + } + } + }, + "uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "requires": { + "punycode": "^2.1.0" + }, + "dependencies": { + "punycode": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", + "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==" + } + } + }, + "url-loader": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", + "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", + "requires": { + "loader-utils": "^2.0.0", + "mime-types": "^2.1.27", + "schema-utils": "^3.0.0" + }, + "dependencies": { + "mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" + }, + "mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "requires": { + "mime-db": "1.52.0" + } + }, + "schema-utils": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz", + "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==", + "requires": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + } + } + } + }, + "url-parse-lax": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", + "integrity": "sha512-NjFKA0DidqPa5ciFcSrXnAltTtzz84ogy+NebPvfEgAck0+TNg4UJ4IN+fB7zRZfbgUf0syOo9MDxFkDSMuFaQ==", + "requires": { + "prepend-http": "^2.0.0" + } + }, + "util": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", + "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", + "requires": { + "inherits": "2.0.3" + }, + "dependencies": { + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + } + } + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "utila": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", + "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==" + }, + "utility-types": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.10.0.tgz", + "integrity": "sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg==" + }, + "utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==" + }, + "uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==" + }, + "value-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", + "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==" + }, + "vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==" + }, + "vfile": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz", + "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==", + "requires": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^2.0.0", + "vfile-message": "^2.0.0" + } + }, + "vfile-location": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-3.2.0.tgz", + "integrity": "sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA==" + }, + "vfile-message": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", + "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", + "requires": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + } + }, + "wait-on": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/wait-on/-/wait-on-6.0.1.tgz", + "integrity": "sha512-zht+KASY3usTY5u2LgaNqn/Cd8MukxLGjdcZxT2ns5QzDmTFc4XoWBgC+C/na+sMRZTuVygQoMYwdcVjHnYIVw==", + "requires": { + "axios": "^0.25.0", + "joi": "^17.6.0", + "lodash": "^4.17.21", + "minimist": "^1.2.5", + "rxjs": "^7.5.4" + } + }, + "watchpack": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", + "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", + "requires": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "dependencies": { + "glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + } + } + }, + "wbuf": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", + "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", + "requires": { + "minimalistic-assert": "^1.0.0" + } + }, + "web-namespaces": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-1.1.4.tgz", + "integrity": "sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw==" + }, + "webpack": { + "version": "5.86.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.86.0.tgz", + "integrity": "sha512-3BOvworZ8SO/D4GVP+GoRC3fVeg5MO4vzmq8TJJEkdmopxyazGDxN8ClqN12uzrZW9Tv8EED8v5VSb6Sqyi0pg==", + "requires": { + "@types/eslint-scope": "^3.7.3", + "@types/estree": "^1.0.0", + "@webassemblyjs/ast": "^1.11.5", + "@webassemblyjs/wasm-edit": "^1.11.5", + "@webassemblyjs/wasm-parser": "^1.11.5", + "acorn": "^8.7.1", + "acorn-import-assertions": "^1.9.0", + "browserslist": "^4.14.5", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.14.1", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.9", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^3.1.2", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.3.7", + "watchpack": "^2.4.0", + "webpack-sources": "^3.2.3" + }, + "dependencies": { + "glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + }, + "mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" + }, + "mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "requires": { + "mime-db": "1.52.0" + } + }, + "schema-utils": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz", + "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==", + "requires": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + } + } + } + }, + "webpack-bundle-analyzer": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.9.0.tgz", + "integrity": "sha512-+bXGmO1LyiNx0i9enBu3H8mv42sj/BJWhZNFwjz92tVnBa9J3JMGo2an2IXlEleoDOPn/Hofl5hr/xCpObUDtw==", + "requires": { + "@discoveryjs/json-ext": "0.5.7", + "acorn": "^8.0.4", + "acorn-walk": "^8.0.0", + "chalk": "^4.1.0", + "commander": "^7.2.0", + "gzip-size": "^6.0.0", + "lodash": "^4.17.20", + "opener": "^1.5.2", + "sirv": "^1.0.7", + "ws": "^7.3.1" + }, + "dependencies": { + "commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==" + } + } + }, + "webpack-dev-middleware": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", + "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", + "requires": { + "colorette": "^2.0.10", + "memfs": "^3.4.3", + "mime-types": "^2.1.31", + "range-parser": "^1.2.1", + "schema-utils": "^4.0.0" + }, + "dependencies": { + "ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "requires": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, + "ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "requires": { + "fast-deep-equal": "^3.1.3" + } + }, + "json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" + }, + "mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "requires": { + "mime-db": "1.52.0" + } + }, + "range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==" + }, + "schema-utils": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz", + "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==", + "requires": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + } + } + } + }, + "webpack-dev-server": { + "version": "4.15.0", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.0.tgz", + "integrity": "sha512-HmNB5QeSl1KpulTBQ8UT4FPrByYyaLxpJoQ0+s7EvUrMc16m0ZS1sgb1XGqzmgCPk0c9y+aaXxn11tbLzuM7NQ==", + "requires": { + "@types/bonjour": "^3.5.9", + "@types/connect-history-api-fallback": "^1.3.5", + "@types/express": "^4.17.13", + "@types/serve-index": "^1.9.1", + "@types/serve-static": "^1.13.10", + "@types/sockjs": "^0.3.33", + "@types/ws": "^8.5.1", + "ansi-html-community": "^0.0.8", + "bonjour-service": "^1.0.11", + "chokidar": "^3.5.3", + "colorette": "^2.0.10", + "compression": "^1.7.4", + "connect-history-api-fallback": "^2.0.0", + "default-gateway": "^6.0.3", + "express": "^4.17.3", + "graceful-fs": "^4.2.6", + "html-entities": "^2.3.2", + "http-proxy-middleware": "^2.0.3", + "ipaddr.js": "^2.0.1", + "launch-editor": "^2.6.0", + "open": "^8.0.9", + "p-retry": "^4.5.0", + "rimraf": "^3.0.2", + "schema-utils": "^4.0.0", + "selfsigned": "^2.1.1", + "serve-index": "^1.9.1", + "sockjs": "^0.3.24", + "spdy": "^4.0.2", + "webpack-dev-middleware": "^5.3.1", + "ws": "^8.13.0" + }, + "dependencies": { + "ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "requires": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, + "ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "requires": { + "fast-deep-equal": "^3.1.3" + } + }, + "json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "schema-utils": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz", + "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==", + "requires": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + } + }, + "ws": { + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz", + "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==", + "requires": {} + } + } + }, + "webpack-merge": { + "version": "5.9.0", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.9.0.tgz", + "integrity": "sha512-6NbRQw4+Sy50vYNTw7EyOn41OZItPiXB8GNv3INSoe3PSFaHJEz3SHTrYVaRm2LilNGnFUzh0FAwqPEmU/CwDg==", + "requires": { + "clone-deep": "^4.0.1", + "wildcard": "^2.0.0" + } + }, + "webpack-sources": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==" + }, + "webpackbar": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz", + "integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==", + "requires": { + "chalk": "^4.1.0", + "consola": "^2.15.3", + "pretty-time": "^1.1.0", + "std-env": "^3.0.1" + } + }, + "websocket-driver": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", + "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", + "requires": { + "http-parser-js": ">=0.5.1", + "safe-buffer": ">=5.1.0", + "websocket-extensions": ">=0.1.1" + } + }, + "websocket-extensions": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", + "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==" + }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "requires": { + "isexe": "^2.0.0" + } + }, + "widest-line": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", + "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", + "requires": { + "string-width": "^5.0.1" + } + }, + "wildcard": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz", + "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==" + }, + "wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==" + }, + "wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "requires": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==" + }, + "ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==" + }, + "strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "requires": { + "ansi-regex": "^6.0.1" + } + } + } + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "requires": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "ws": { + "version": "7.5.9", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.9.tgz", + "integrity": "sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==", + "requires": {} + }, + "xdg-basedir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", + "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==" + }, + "xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==" + }, + "yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" + }, + "yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==" + }, + "yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==" + }, + "yup": { + "version": "0.32.11", + "resolved": "https://registry.npmjs.org/yup/-/yup-0.32.11.tgz", + "integrity": "sha512-Z2Fe1bn+eLstG8DRR6FTavGD+MeAwyfmouhHsIUgaADz8jvFKbO/fXc2trJKZg+5EBjh4gGm3iU/t3onKlXHIg==", + "requires": { + "@babel/runtime": "^7.15.4", + "@types/lodash": "^4.14.175", + "lodash": "^4.17.21", + "lodash-es": "^4.17.21", + "nanoclone": "^0.2.1", + "property-expr": "^2.0.4", + "toposort": "^2.0.2" + } + }, + "zwitch": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-1.0.5.tgz", + "integrity": "sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==" + } + } +} diff --git a/documentation/package.json b/documentation/package.json new file mode 100644 index 0000000000000000000000000000000000000000..7295346f3208abb3d900af7e7ec476365aec2731 --- /dev/null +++ b/documentation/package.json @@ -0,0 +1,14 @@ +{ + "name": "h2o-llmstudio-docs", + "version": "0.0.0", + "scripts": { + "start": "makersaurus start", + "build": "makersaurus build", + "serve": "makersaurus serve", + "deploy": "makersaurus deploy" + }, + "dependencies": { + "@h2oai/makersaurus": "^0.8.3", + "docusaurus-plugin-includes": "^1.1.4" + } +} diff --git a/documentation/sidebars.js b/documentation/sidebars.js new file mode 100644 index 0000000000000000000000000000000000000000..5d8289065ae96c267e8e9f1051ecbbd0bbfef0ae --- /dev/null +++ b/documentation/sidebars.js @@ -0,0 +1,46 @@ +module.exports = { + defaultSidebar: [ + "index", + { + "Get started": [ + "get-started/what-is-h2o-llm-studio", + "get-started/set-up-llm-studio", + "get-started/llm-studio-performance", + "get-started/llm-studio-flow", + "get-started/core-features", + "get-started/videos", + ], + }, + "concepts", + { + type: "category", + label: "Guide", + items: [ + { + type: "category", + label: "Datasets", + items: [ + "guide/datasets/data-connectors-format", + "guide/datasets/import-dataset", + "guide/datasets/view-dataset", + "guide/datasets/merge-datasets", + ], + }, + { + type: "category", + label: "Experiments", + items: [ + "guide/experiments/experiment-settings", + "guide/experiments/create-an-experiment", + "guide/experiments/view-an-experiment", + "guide/experiments/compare-experiments", + "guide/experiments/export-trained-model", + "guide/experiments/import-to-h2ogpt" + ], + }, + ], + }, + "faqs", + ], +}; + diff --git a/examples/example_oasst2.yaml b/examples/example_oasst2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b28e453f29d4cf47e3c1f4088af723c8f606b583 --- /dev/null +++ b/examples/example_oasst2.yaml @@ -0,0 +1,99 @@ +architecture: + backbone_dtype: int4 + force_embedding_gradients: false + gradient_checkpointing: true + intermediate_dropout: 0.0 + pretrained: true + pretrained_weights: '' +augmentation: + random_parent_probability: 0.0 + skip_parent_probability: 0.0 + token_mask_probability: 0.0 +dataset: + add_eos_token_to_answer: true + add_eos_token_to_prompt: true + add_eos_token_to_system: true + answer_column: output + chatbot_author: H2O.ai + chatbot_name: h2oGPT + data_sample: 0.01 + data_sample_choice: + - Train + - Validation + limit_chained_samples: false + mask_prompt_labels: true + parent_id_column: None + personalize: false + prompt_column: + - instruction + system_column: None + text_answer_separator: <|answer|> + text_prompt_start: <|prompt|> + text_system_start: <|system|> + train_dataframe: examples/data_oasst2/train_full.csv + validation_dataframe: None + validation_size: 0.01 + validation_strategy: automatic +environment: + compile_model: false + find_unused_parameters: false + gpus: + - '0' + huggingface_branch: main + mixed_precision: true + number_of_workers: 8 + seed: -1 + trust_remote_code: true +experiment_name: example_oasst2 +llm_backbone: h2oai/h2o-danube2-1.8b-base +logging: + logger: None + neptune_project: test_org/test_project +output_directory: examples/output_oasst2 +prediction: + batch_size_inference: 0 + do_sample: false + max_length_inference: 256 + max_time: 0.0 + metric: Perplexity + metric_gpt_model: gpt-3.5-turbo-0301 + metric_gpt_template: general + min_length_inference: 1 + num_beams: 1 + num_history: 4 + repetition_penalty: 1.2 + stop_tokens: '' + temperature: 0.3 + top_k: 0 + top_p: 1.0 +problem_type: text_causal_language_modeling +tokenizer: + add_prompt_answer_tokens: false + max_length: 512 + max_length_answer: 256 + max_length_prompt: 256 + padding_quantile: 1.0 + use_fast: true +training: + batch_size: 2 + differential_learning_rate: 1.0e-05 + differential_learning_rate_layers: [] + drop_last_batch: true + epochs: 1 + evaluate_before_training: false + evaluation_epochs: 1.0 + grad_accumulation: 1 + gradient_clip: 0.0 + learning_rate: 0.0001 + lora: true + lora_alpha: 16 + lora_dropout: 0.05 + lora_r: 4 + lora_target_modules: '' + loss_function: TokenAveragedCrossEntropy + optimizer: AdamW + save_best_checkpoint: false + schedule: Cosine + train_validation_data: false + warmup_epochs: 0.0 + weight_decay: 0.0 diff --git a/jenkins/packer/README.md b/jenkins/packer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0247e92ace12ef2b30aa2a31c19cee55f8c8ff4a --- /dev/null +++ b/jenkins/packer/README.md @@ -0,0 +1,8 @@ +# Build H2O-LLM-STUDIO cloud images + +Build H2O-LLM-STUDIO cloud images for AWS, GCP and AZURE. Following base images are used to build the H2O-LLM-STUDIO cloud images, +- AWS Ubuntu 20.04 +- GCP Ubuntu 20.04 +- AZURE Ubuntu 20.04 + +Jenkins Pipeline: http://jenkins.h2o.local:8080/job/H2O-LLM-Studio/job/build-llm-studio-cloud-images/ \ No newline at end of file diff --git a/jenkins/packer/install_llm_studio_ubuntu2004.sh b/jenkins/packer/install_llm_studio_ubuntu2004.sh new file mode 100644 index 0000000000000000000000000000000000000000..5c6827820e046b7cb279b47b6325b2ff66ee1dc3 --- /dev/null +++ b/jenkins/packer/install_llm_studio_ubuntu2004.sh @@ -0,0 +1,133 @@ +#!/bin/bash -e + + +# Install core packages +sudo apt update +sudo apt install -y software-properties-common +sudo add-apt-repository universe -y +sudo apt update +sudo apt -y install curl +sudo apt -y install make + +# Verify make installation +ls /usr/bin/make + +# System installs (Python 3.10) +sudo add-apt-repository ppa:deadsnakes/ppa -y +sudo apt -y install python3.10 +sudo apt-get -y install python3.10-distutils +curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 + + +#add GPU support +set -eo pipefail +set -x + +wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin +sudo mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600 +wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda-repo-ubuntu2004-11-8-local_11.8.0-520.61.05-1_amd64.deb +sudo dpkg -i cuda-repo-ubuntu2004-11-8-local_11.8.0-520.61.05-1_amd64.deb +sudo cp /var/cuda-repo-ubuntu2004-11-8-local/cuda-*-keyring.gpg /usr/share/keyrings/ +sudo apt-get update +sudo apt-get -y install cuda + +distribution=$(. /etc/os-release;echo $ID$VERSION_ID) && curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey \ +| sudo apt-key add - && curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list \ +| sudo tee /etc/apt/sources.list.d/nvidia-docker.list + +sudo apt-get -y update +sudo apt-get install -y nvidia-container-runtime +rm cuda-repo-ubuntu2004-*.deb + + +# Clone h2o-llmstudio +git clone https://github.com/h2oai/h2o-llmstudio.git +cd h2o-llmstudio +git checkout "$VERSION" + + +# Create virtual environment (pipenv) +make setup + +# Running application as a service in systemd +cd /etc/systemd/system +sudo chown -R ubuntu:ubuntu . + +cd /etc/systemd/system +printf """ +[Unit] +Description=LLM Studio Service +After=network.target +[Service] +Type=simple +User=ubuntu +WorkingDirectory=/home/ubuntu/h2o-llmstudio +ExecStart=/usr/bin/make llmstudio +Restart=always +[Install] +WantedBy=multi-user.target +""" >> llm_studio.service + + +sudo systemctl daemon-reload +sudo systemctl enable llm_studio.service +sudo systemctl start llm_studio.service + +#Install nginx + +sudo apt update +sudo apt install -y nginx + + +#configure nginx for port forwarding + +cd /etc/nginx/conf.d +sudo chown -R ubuntu:ubuntu . +cd $HOME +printf """ +server { + listen 80; + listen [::]:80; + server_name <|_SUBST_PUBLIC_IP|>; # Change this to your domain name + + location / { # Change this if you'd like to server your Gradio app on a different path + proxy_pass http://0.0.0.0:10101/; # Change this if your Gradio app will be running on a different port + proxy_redirect off; + proxy_http_version 1.1; + proxy_set_header Upgrade \$http_upgrade; + proxy_set_header Connection \"upgrade\"; + proxy_set_header Host \$host; + } +} +""" > temp.conf + +printf """ +ip=\$(dig +short myip.opendns.com @resolver1.opendns.com) +sed \"s/<|_SUBST_PUBLIC_IP|>;/\$ip;/g\" /home/ubuntu/temp.conf > /etc/nginx/conf.d/llm.conf +""" > run_nginx.sh + +sudo chmod u+x run_nginx.sh + +cd /etc/systemd/system +sudo chown -R ubuntu:ubuntu . +printf """ +[Unit] +Description=LLM Nginx Server +After=network.target +[Service] +Type=simple +User=ubuntu +WorkingDirectory=/home/ubuntu +ExecStart=bash /home/ubuntu/run_nginx.sh +Restart=always +[Install] +WantedBy=multi-user.target +""" > llm_nginx.service + +sudo systemctl daemon-reload +sudo systemctl enable llm_nginx.service +sudo systemctl restart llm_studio.service +sudo systemctl restart nginx + + + diff --git a/jenkins/packer/llm-studio-aws.json b/jenkins/packer/llm-studio-aws.json new file mode 100644 index 0000000000000000000000000000000000000000..597bee68e2bce5e978c50430738a6fb593061bc3 --- /dev/null +++ b/jenkins/packer/llm-studio-aws.json @@ -0,0 +1,89 @@ +{ + "variables": { + "aws_access_key": "", + "aws_secret_key": "", + "aws_region": "us-east-1", + "aws_vpc_id": "", + "aws_owners": "", + "aws_subnet_id": "", + "aws_security_group_id": "", + "llm_studio_version": "" + }, + "builders": [ + { + "type": "amazon-ebs", + "ami_name": "h2oai-llm-studio-{{user `llm_studio_version`}}", + "instance_type": "g4dn.xlarge", + "ssh_username": "ubuntu", + "tags": + { + "name": "h2oai-llm-studio-{{user `llm_studio_version`}}", + "Owner": "ops@h2o.ai", + "Project": "DevOps", + "Department": "Engineering", + "Environment": "Dev", + "Scheduling": "self-managed" + }, + + "run_volume_tags": { + "Name": "h2oai-llm-studio-{{user `llm_studio_version`}}", + "Owner": "ops@h2o.ai", + "Project": "DevOps", + "Department": "Engineering", + "Environment": "Dev", + "Scheduling": "self-managed" + }, + + "run_tags": { + "Name": "h2oai-llm-studio-{{user `llm_studio_version`}}", + "Owner": "ops@h2o.ai", + "Project": "DevOps", + "Department": "Engineering", + "Environment": "Dev", + "Scheduling": "self-managed" + }, + + "access_key": "{{user `aws_access_key`}}", + "secret_key": "{{user `aws_secret_key`}}", + "region": "{{user `aws_region`}}", + "subnet_id": "{{user `aws_subnet_id`}}", + "vpc_id": "{{user `aws_vpc_id`}}", + "security_group_id": "{{user `aws_security_group_id`}}", + "launch_block_device_mappings": [ + { + "device_name": "/dev/sda1", + "volume_size": 512, + "volume_type": "gp3", + "delete_on_termination": true + } + ], + "source_ami_filter": { + "filters": { + "virtualization-type": "hvm", + "name": "ubuntu/images/*ubuntu-focal-20.04-amd64-server-*", + "root-device-type": "ebs" + }, + "owners": ["{{user `aws_owners`}}"], + "most_recent": true + } + } + ], + "post-processors": [ + { + "type": "manifest", + "output": "aws-image-info.json", + "strip_path": true, + "custom_data": { + "base_image": "AWS Ubuntu 20.04", + "llm_studio_version": "{{user `llm_studio_version`}}" + } + } + ], + "provisioners": [ + { + "type": "shell", + "environment_vars": ["VERSION={{user `BRANCH_VERSION`}}"], + "script": "./install_llm_studio_ubuntu2004.sh" + } + ] + } \ No newline at end of file diff --git a/jenkins/packer/llm-studio-azure.json b/jenkins/packer/llm-studio-azure.json new file mode 100644 index 0000000000000000000000000000000000000000..5e05595d5a649f305c5cd2a300e7dcd14d301b80 --- /dev/null +++ b/jenkins/packer/llm-studio-azure.json @@ -0,0 +1,60 @@ +{ + "variables": { + "client_id": "", + "client_secret": "", + "subscription_id": "92429150-401a-431f-8955-e69c0c119e68", + "tenant_id": "840229f2-c911-49e6-a73d-5b3a4311835a", + "managed_image_resource_group_name": "H2OIMAGES", + "llm_studio_version": "" + }, + "builders": [ + { + "type": "azure-arm", + "client_id": "{{user `client_id`}}", + "client_secret": "{{user `client_secret`}}", + "subscription_id": "{{user `subscription_id`}}", + "tenant_id": "{{user `tenant_id`}}", + "capture_container_name": "h2ovhdimages", + "capture_name_prefix": "h2oai-llm-studio-{{user `llm_studio_version`}}", + "resource_group_name": "{{user `managed_image_resource_group_name`}}", + "temp_resource_group_name": "Engineering_DevOps_LLM-Studio-Ubuntu", + "storage_account": "h2ovhdimages", + "os_type": "Linux", + "image_publisher": "Canonical", + "image_offer": "0001-com-ubuntu-server-focal", + "image_sku": "20_04-lts", + "os_disk_size_gb": 512, + "azure_tags": { + "dept": "Engineering", + "task": "Image deployment", + "Name": "LLM-STUDIO-CLOUD-IMAGES", + "Owner": "ops@h2o.ai", + "Project": "DevOps", + "Department": "Engineering", + "Environment": "Dev", + "Scheduling": "self-managed" + }, + "location": "East US", + "vm_size": "Standard_DS2_v2", + "ssh_username": "ubuntu" + } + ], + "post-processors": [ + { + "type": "manifest", + "output": "azure-ubuntu-image-info.json", + "strip_path": true, + "custom_data": { + "base_image": "AZURE Ubuntu 20.04", + "llm_studio_version": "{{user `llm_studio_version`}}" + } + } + ], + "provisioners": [ + { + "type": "shell", + "environment_vars": ["VERSION={{user `BRANCH_VERSION`}}"], + "script": "./install_llm_studio_ubuntu2004.sh" + } + ] +} \ No newline at end of file diff --git a/jenkins/packer/llm-studio-cloud-images.groovy b/jenkins/packer/llm-studio-cloud-images.groovy new file mode 100644 index 0000000000000000000000000000000000000000..dac5bd6b4467f5b7f1bbf7ff2dfa80bd1d7c8bf8 --- /dev/null +++ b/jenkins/packer/llm-studio-cloud-images.groovy @@ -0,0 +1,108 @@ +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils + +properties( + [ + parameters( + [ + string(name: 'BRANCH_TAG', defaultValue: 'origin/main'), + string(defaultValue: 'v0.1.0', description: 'eg: v0.1.0', name: 'BRANCH_VERSION', trim: true), + booleanParam(name: 'AWS', defaultValue: true, description: 'Make Amazon Machine Image/Not?'), + booleanParam(name: 'GCP', defaultValue: true, description: 'Make GCP Image/Not?'), + booleanParam(name: 'AZURE', defaultValue: true, description: 'Make AZURE Image/Not?'), + string(name: 'LLM_STUDIO_VERSION', description: 'Example: for version 1.10.5 use 1105') + ] + ) + ] +) + +node('docker') { + stage('Init') { + cleanWs() + currentBuild.displayName = "#${BUILD_NUMBER} - Rel:${LLM_STUDIO_VERSION}" + checkout scm + sh('ls -al') + } + + stage('Build Images') { + try { + docker.image('harbor.h2o.ai/opsh2oai/h2oai-packer-build:2').inside { + parallel([ + "AWS Ubuntu 20.04": { + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: 'jenkins-full-aws-creds'], + string(credentialsId: "AWS_MARKETPLACE_VPC", variable: "aws_vpc_id"), + string(credentialsId: "AWS_MARKETPLACE_OWNERS", variable: "aws_owners"), + string(credentialsId: "AWS_MARKETPLACE_SUBNET", variable: "aws_subnet_id"), + string(credentialsId: "AWS_MARKETPLACE_SG", variable: "aws_security_group_id")]) { + dir('jenkins/packer') { + if (params.AWS) { + sh("packer build \ + -var 'aws_access_key=$AWS_ACCESS_KEY_ID' \ + -var 'aws_secret_key=$AWS_SECRET_ACCESS_KEY' \ + -var 'BRANCH_VERSION=${BRANCH_VERSION}' \ + -var 'llm_studio_version=${LLM_STUDIO_VERSION}' \ + -var 'aws_region=us-east-1' \ + -var 'aws_vpc_id=$aws_vpc_id' \ + -var 'aws_owners=$aws_owners' \ + -var 'aws_subnet_id=$aws_subnet_id' \ + -var 'aws_security_group_id=$aws_security_group_id' \ + llm-studio-aws.json" + ) + archiveArtifacts artifacts: '*-image-info.json' + }else { + Utils.markStageSkippedForConditional('AWS Ubuntu 20.04') + } + } + } + }, + + "GCP Ubuntu 20.04": { + withCredentials([file(credentialsId: 'GCP_MARKETPLACE_SERVICE_ACCOUNT', variable: 'GCP_ACCOUNT_FILE')]) { + dir('jenkins/packer') { + if (params.GCP) { + sh("packer build \ + -var 'project_id=h2o-gce' \ + -var 'account_file=$GCP_ACCOUNT_FILE' \ + -var 'BRANCH_VERSION=${BRANCH_VERSION}' \ + -var 'llm_studio_version=${LLM_STUDIO_VERSION}' \ + llm-studio-gcp.json" + ) + archiveArtifacts artifacts: '*-image-info.json' + }else { + Utils.markStageSkippedForConditional('GCP Ubuntu 20.04') + } + } + } + }, + + "AZURE Ubuntu 20.04": { + withCredentials([string(credentialsId: "AZURE_MARKETPLACE_CLIENT_ID", variable: "AZURE_CLIENT_ID"), + string(credentialsId: "AZURE_MARKETPLACE_CLIENT_SECRET", variable: "AZURE_CLIENT_SECRET"), + string(credentialsId: "AZURE_MARKETPLACE_SUBSCRIPTION_ID", variable: "AZURE_SUBSCRIPTION_ID"), + string(credentialsId: "AZURE_MARKETPLACE_TENANT_ID", variable: "AZURE_TENANT_ID")]) { + dir('jenkins/packer') { + if (params.AZURE) { + sh("packer build \ + -var 'client_id=$AZURE_CLIENT_ID' \ + -var 'client_secret=$AZURE_CLIENT_SECRET' \ + -var 'managed_image_resource_group_name=H2OIMAGES' \ + -var 'subscription_id=$AZURE_SUBSCRIPTION_ID' \ + -var 'tenant_id=$AZURE_TENANT_ID' \ + -var 'BRANCH_VERSION=${BRANCH_VERSION}' \ + -var 'llm_studio_version=${LLM_STUDIO_VERSION}' \ + llm-studio-azure.json" + ) + archiveArtifacts artifacts: '*-image-info.json' + }else { + Utils.markStageSkippedForConditional('AZURE Ubuntu 20.04') + } + } + } + }, + + ]) + } + } finally { + cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/packer/llm-studio-gcp.json b/jenkins/packer/llm-studio-gcp.json new file mode 100644 index 0000000000000000000000000000000000000000..8e89a5ae8fc970ba0b30a1b52f22007edb08c1ce --- /dev/null +++ b/jenkins/packer/llm-studio-gcp.json @@ -0,0 +1,41 @@ +{ + "variables": { + "project_id":"h2o-gce", + "account_file":"", + "llm_studio_version": "" + }, + "builders": [ + { + "type": "googlecompute", + "project_id": "{{user `project_id`}}", + "account_file": "{{user `account_file`}}", + "machine_type": "e2-standard-4", + "source_image_family": "ubuntu-2004-lts", + "zone": "us-west1-b", + "image_description": "h2o-llm-studio using packer", + "image_name": "h2oai-llm-studio-{{user `llm_studio_version`}}", + "disk_size": 512, + "disk_type": "pd-ssd", + "ssh_username": "ubuntu" + } + ], + "post-processors": [ + { + "type": "manifest", + "output": "gcp-image-info.json", + "strip_path": true, + "custom_data": { + "base_image": "GCP Ubuntu 20.04", + "llm_studio_version": "{{user `llm_studio_version`}}" + } + } + ], + "provisioners": [ + + { + "type": "shell", + "environment_vars": ["VERSION={{user `BRANCH_VERSION`}}"], + "script": "./install_llm_studio_ubuntu2004.sh" + } + ] + } \ No newline at end of file diff --git a/llm_studio/__init__.py b/llm_studio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llm_studio/app_utils/__init__.py b/llm_studio/app_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llm_studio/app_utils/cards.py b/llm_studio/app_utils/cards.py new file mode 100644 index 0000000000000000000000000000000000000000..09e662da82aaecc441bab06efda71958fa1902f0 --- /dev/null +++ b/llm_studio/app_utils/cards.py @@ -0,0 +1,325 @@ +from typing import List, Optional + +from h2o_wave import ui + + +def header_zone() -> ui.Zone: + """Returns the header zone""" + + zone = ui.zone( + "header", + size="80px", + ) + + return zone + + +def navigation_zone() -> ui.Zone: + """Returns the navigation zone""" + + zone = ui.zone( + "navigation", + size="13%", + zones=[ + ui.zone(name="nav", size="99.5%"), + ], + ) + + return zone + + +def card_zones(mode: Optional[str] = "full") -> List[ui.Zone]: + """Specifies for certain modes the layout zones + + Args: + mode: mode for layout zones + + Returns: + List of zones + + """ + + if mode in ["full", "experiment_start"]: + zones = [ + header_zone(), + ui.zone( + "body", + size="1", + direction=ui.ZoneDirection.ROW, + zones=[ + navigation_zone(), + ui.zone( + "content_all", + direction=ui.ZoneDirection.COLUMN, + size="87.5%", + zones=[ + ui.zone( + "content", + size="calc(100vh - 160px)", + direction=ui.ZoneDirection.ROW, + ), + ui.zone("footer", size="80px"), + ], + ), + ], + ), + ] + elif mode == "error": + zones = [ + header_zone(), + ui.zone( + "body", + size="1", + direction=ui.ZoneDirection.ROW, + zones=[ + navigation_zone(), + ui.zone( + "content_all", + direction=ui.ZoneDirection.COLUMN, + size="87.5%", + zones=[ + ui.zone( + "content", + size="calc(100vh - 80px)", + direction=ui.ZoneDirection.ROW, + ), + ], + ), + ], + ), + ] + + elif mode == "home": + zones = [ + header_zone(), + ui.zone( + "body", + size="calc(100vh - 80px)", + direction=ui.ZoneDirection.ROW, + zones=[ + navigation_zone(), + ui.zone( + "content_all", + direction=ui.ZoneDirection.COLUMN, + size="87.5%", + zones=[ + ui.zone( + "content", + size="370px", + direction=ui.ZoneDirection.ROW, + ), + ui.zone( + "expander", + size="0", + direction=ui.ZoneDirection.ROW, + ), + ui.zone( + "tables", + size="0", + direction=ui.ZoneDirection.ROW, + zones=[ + ui.zone(name="datasets", size="40%"), + ui.zone(name="experiments", size="60%"), + ], + ), + ], + ), + ], + ), + ] + + elif mode in [ + "experiment/display/charts", + "experiment/compare/charts", + ]: + zones = [ + header_zone(), + ui.zone( + "body", + size="1", + direction=ui.ZoneDirection.ROW, + zones=[ + navigation_zone(), + ui.zone( + "content_all", + direction=ui.ZoneDirection.COLUMN, + size="87.5%", + zones=[ + ui.zone("nav2", size="60px"), + ui.zone( + "first", + size="calc((100vh - 220px)*0.5)", + direction=ui.ZoneDirection.ROW, + ), + ui.zone( + "second", + size="calc((100vh - 220px)*0.5)", + direction=ui.ZoneDirection.ROW, + ), + ui.zone("footer", size="80px"), + ], + ), + ], + ), + ] + + elif mode in [ + "experiment/display/chat", + ]: + zones = [ + header_zone(), + ui.zone( + "body", + size="1", + direction=ui.ZoneDirection.ROW, + zones=[ + navigation_zone(), + ui.zone( + "content_all", + direction=ui.ZoneDirection.COLUMN, + size="87.5%", + zones=[ + ui.zone("nav2", size="60px"), + ui.zone( + "first", + size="calc((100vh - 220px)*0.65)", + direction=ui.ZoneDirection.ROW, + ), + ui.zone( + "second", + size="calc((100vh - 220px)*0.35)", + direction=ui.ZoneDirection.ROW, + ), + ui.zone("footer", size="80px"), + ], + ), + ], + ), + ] + elif mode in ["experiment/display/summary"]: + zones = [ + header_zone(), + ui.zone( + "body", + size="1", + direction=ui.ZoneDirection.ROW, + zones=[ + navigation_zone(), + ui.zone( + "content_all", + direction=ui.ZoneDirection.COLUMN, + size="87.5%", + zones=[ + ui.zone("nav2", size="60px"), + ui.zone( + "first", + size="calc(0.25*(100vh - 220px))", + direction=ui.ZoneDirection.ROW, + ), + ui.zone( + "second", + size="calc(0.25*(100vh - 220px))", + direction=ui.ZoneDirection.ROW, + ), + ui.zone( + "third", + size="calc(0.5*(100vh - 220px))", + direction=ui.ZoneDirection.ROW, + ), + ui.zone("footer", size="80px"), + ], + ), + ], + ), + ] + elif mode in ["dataset/display/statistics"]: + zones = [ + header_zone(), + ui.zone( + "body", + size="1", + direction=ui.ZoneDirection.ROW, + zones=[ + navigation_zone(), + ui.zone( + "content_all", + direction=ui.ZoneDirection.COLUMN, + size="87.5%", + zones=[ + ui.zone("nav2", size="60px"), + ui.zone( + "first", + size="calc(0.5*(100vh - 220px))", + direction=ui.ZoneDirection.ROW, + ), + ui.zone( + "second", + size="calc(0.5*(100vh - 220px))", + direction=ui.ZoneDirection.ROW, + ), + ui.zone( + "third", + size="calc(0.5*(100vh - 220px))", + direction=ui.ZoneDirection.ROW, + ), + ui.zone("footer", size="80px"), + ], + ), + ], + ), + ] + elif mode in [ + "experiment/compare/config", + "experiment/display/train_data_insights", + "experiment/display/validation_prediction_insights", + "experiment/display/config", + "experiment/display/logs", + "dataset/display/data", + "dataset/display/visualization", + "dataset/display/summary", + ]: + zones = [ + header_zone(), + ui.zone( + "body", + size="1", + direction=ui.ZoneDirection.ROW, + zones=[ + navigation_zone(), + ui.zone( + "content_all", + direction=ui.ZoneDirection.COLUMN, + size="87.5%", + zones=[ + ui.zone("nav2", size="60px"), + ui.zone( + "first", + size="calc(100vh - 220px)", + direction=ui.ZoneDirection.ROW, + ), + ui.zone("footer", size="80px"), + ], + ), + ], + ), + ] + else: + raise ValueError(f"Unknown mode: {mode}") + return zones + + +def card_wait(msg: str, box: str) -> ui.FormCard: + """Return a form card for displaying waiting status + + Args: + msg: message to display + box: box for card + + Returns: + Form card + + """ + + card = ui.form_card(box=box, items=[ui.progress(label=msg)]) + + return card diff --git a/llm_studio/app_utils/config.py b/llm_studio/app_utils/config.py new file mode 100644 index 0000000000000000000000000000000000000000..a01a6512c432bf8dc29f2eaae3a391ca0ba440f0 --- /dev/null +++ b/llm_studio/app_utils/config.py @@ -0,0 +1,124 @@ +import os +import socket +from types import SimpleNamespace + + +def get_size(x): + try: + if x.endswith("TB"): + return float(x.replace("TB", "")) * (2**40) + if x.endswith("GB"): + return float(x.replace("GB", "")) * (2**30) + if x.endswith("MB"): + return float(x.replace("MB", "")) * (2**20) + if x.endswith("KB"): + return float(x.replace("KB", "")) * (2**10) + if x.endswith("B"): + return float(x.replace("B", "")) + return 2**31 + except Exception: + return 2**31 + + +version = "1.6.0-dev" + +try: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.connect(("8.8.8.8", 80)) + host = s.getsockname()[0] + s.close() +except OSError: + host = "localhost" + +port = "10101" +url = f"http://{host}:{port}/" + + +default_cfg = { + "url": url, + "name": "H2O LLM Studio", + "version": version, + "github": "https://github.com/h2oai/h2o-llmstudio", + "min_experiment_disk_space": get_size( + os.getenv("MIN_DISK_SPACE_FOR_EXPERIMENTS", "2GB") + ), + "allowed_file_extensions": os.getenv( + "ALLOWED_FILE_EXTENSIONS", ".zip,.csv,.pq,.parquet" + ).split(","), + "llm_studio_workdir": f"{os.getenv('H2O_LLM_STUDIO_WORKDIR', os.getcwd())}", + "heap_mode": os.getenv("H2O_LLM_STUDIO_ENABLE_HEAP", "False") == "True", + "data_folder": "data/", + "output_folder": "output/", + "s3_bucket": f"{os.getenv('AWS_BUCKET', 'bucket_name')}", + "s3_filename": os.path.join( + f"{os.getenv('AWS_BUCKET', 'bucket_name')}", + "default.zip", + ), + "cfg_file": "text_causal_language_modeling_config", + "start_page": "home", + "kaggle_command": ("kaggle competitions download -c " "dataset"), + "problem_types": [ + "text_causal_language_modeling_config", + "text_dpo_modeling_config", + "text_sequence_to_sequence_modeling_config", + "text_causal_classification_modeling_config", + ], + "problem_categories": ["text"], + "dataset_keys": [ + "train_dataframe", + "validation_dataframe", + "prompt_column", + "answer_column", + "parent_id_column", + ], + "dataset_trigger_keys": [ + "train_dataframe", + "validation_dataframe", + ], + "dataset_extra_keys": [ + "validation_strategy", + "data_sample", + "data_sample_choice", + ], + "dataset_folder_keys": [ + "train_dataframe", + "validation_dataframe", + ], + "user_settings": { + "theme_dark": True, + "credential_saver": ".env File", + "default_aws_bucket_name": f"{os.getenv('AWS_BUCKET', 'bucket_name')}", + "default_aws_access_key": os.getenv("AWS_ACCESS_KEY_ID", ""), + "default_aws_secret_key": os.getenv("AWS_SECRET_ACCESS_KEY", ""), + "default_azure_conn_string": "", + "default_azure_container": "", + "default_kaggle_username": "", + "default_kaggle_secret_key": "", + "set_max_epochs": 50, + "set_max_batch_size": 256, + "set_max_gradient_clip": 10, + "set_max_lora_r": 256, + "set_max_lora_alpha": 256, + "gpu_used_for_chat": 1, + "default_number_of_workers": 8, + "default_logger": "None", + "default_neptune_project": os.getenv("NEPTUNE_PROJECT", ""), + "default_neptune_api_token": os.getenv("NEPTUNE_API_TOKEN", ""), + "default_huggingface_api_token": os.getenv("HUGGINGFACE_TOKEN", ""), + "default_openai_azure": os.getenv("OPENAI_API_TYPE", "open_ai") == "azure", + "default_openai_api_token": os.getenv("OPENAI_API_KEY", ""), + "default_openai_api_base": os.getenv( + "OPENAI_API_BASE", "https://example-endpoint.openai.azure.com" + ), + "default_openai_api_deployment_id": os.getenv( + "OPENAI_API_DEPLOYMENT_ID", "deployment-name" + ), + "default_openai_api_version": os.getenv("OPENAI_API_VERSION", "2023-05-15"), + "default_gpt_eval_max": os.getenv("GPT_EVAL_MAX", 100), + "default_safe_serialization": True, + "delete_dialogs": True, + "chart_plot_max_points": 1000, + }, +} + +default_cfg = SimpleNamespace(**default_cfg) diff --git a/llm_studio/app_utils/db.py b/llm_studio/app_utils/db.py new file mode 100644 index 0000000000000000000000000000000000000000..bb41e9622b5bdebef21b53ca376d0ea5493c96c8 --- /dev/null +++ b/llm_studio/app_utils/db.py @@ -0,0 +1,159 @@ +from typing import Optional + +import pandas as pd +from pandas.core.frame import DataFrame +from sqlalchemy import Integer, String, create_engine +from sqlalchemy.orm import DeclarativeBase, Mapped, Session, mapped_column + + +class Base(DeclarativeBase): + pass + + +class Dataset(Base): + """Dataset table""" + + __tablename__ = "datasets" + + id: Mapped[int] = mapped_column("id", Integer, autoincrement=True, primary_key=True) + name: Mapped[str] = mapped_column("name", String, unique=True) + path: Mapped[str] = mapped_column("path", String) + config_file: Mapped[str] = mapped_column("config_file", String) + train_rows: Mapped[int] = mapped_column("train_rows", Integer) + validation_rows: Mapped[Optional[int]] = mapped_column( + "validation_rows", Integer, nullable=True + ) + + +class Experiment(Base): + """Experiment table""" + + __tablename__ = "experiments" + + id: Mapped[int] = mapped_column("id", Integer, primary_key=True) + name: Mapped[str] = mapped_column("name", String) + mode: Mapped[str] = mapped_column("mode", String) + dataset: Mapped[str] = mapped_column("dataset", String) + config_file: Mapped[str] = mapped_column("config_file", String) + path: Mapped[str] = mapped_column("path", String) + seed: Mapped[int] = mapped_column("seed", Integer) + process_id: Mapped[int] = mapped_column("process_id", Integer) + gpu_list: Mapped[str] = mapped_column("gpu_list", String) + + +class Database: + """Class for managing database.""" + + def __init__(self, path_db: str) -> None: + """Initialize database + + Args: + path_db: path to sqlite database file + """ + + self.__engine__ = create_engine(f"sqlite:///{path_db}") + Base.metadata.create_all(self.__engine__) + self._session = Session(self.__engine__) + + def add_dataset(self, dataset: Dataset) -> None: + """Add a dataset to the table + + Args: + dataset: dataset to add + """ + self._session.add(dataset) + self._session.commit() + + def delete_dataset(self, id: int) -> None: + """Delete a dataset from the table + + Args: + id: dataset id to delete + """ + + dataset = self._session.query(Dataset).get(int(id)) + self._session.delete(dataset) + self._session.commit() + + def get_dataset(self, id: int) -> Dataset: + """Return dataset given an id + + Args: + id: dataset id to return + + Returns: + Dataset with given id + """ + + return self._session.query(Dataset).get(int(id)) + + def get_datasets_df(self) -> DataFrame: + """Return dataframe containing all datasets + + Returns: + All datasets + """ + + datasets = pd.read_sql(self._session.query(Dataset).statement, self.__engine__) + return datasets.sort_values("id", ascending=False) + + def add_experiment(self, experiment: Experiment) -> None: + """Add an experiment to the table + + Args: + experiment: experiment to add + """ + + self._session.add(experiment) + self._session.commit() + + def delete_experiment(self, id: int) -> None: + """Delete an experiment from the table + + Args: + id: experiment id to delete + """ + + experiment = self._session.query(Experiment).get(int(id)) + self._session.delete(experiment) + self._session.commit() + + def get_experiment(self, id: int) -> Experiment: + """Return experiment given an id + + Args: + id: experiment id to return + + Returns: + Experiment with given id + """ + + return self._session.query(Experiment).get(int(id)) + + def get_experiments_df(self) -> DataFrame: + """Return dataframe containing all experiments + + Returns: + All experiments + """ + + experiments = pd.read_sql( + self._session.query(Experiment).statement, self.__engine__ + ) + return experiments.sort_values("id", ascending=False) + + def rename_experiment(self, id: int, new_name: str, new_path: str) -> None: + """Rename experiment given id and new name + + Args: + id: experiment id + new_name: new name + """ + + experiment = self.get_experiment(id) + experiment.name = new_name + experiment.path = new_path + self._session.commit() + + def update(self) -> None: + self._session.commit() diff --git a/llm_studio/app_utils/default_datasets.py b/llm_studio/app_utils/default_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..1d0aa5f87b2f0504fc06dd8522042ba6f0ac3452 --- /dev/null +++ b/llm_studio/app_utils/default_datasets.py @@ -0,0 +1,144 @@ +import os +import random +import re +import uuid + +import pandas as pd +from datasets import load_dataset +from tqdm import tqdm + + +def prepare_default_dataset_causal_language_modeling(path): + ds = load_dataset("OpenAssistant/oasst2") + train = ds["train"].to_pandas() + val = ds["validation"].to_pandas() + + df = pd.concat([train, val], axis=0).reset_index(drop=True) + + df_assistant = df[(df.role == "assistant")].copy() + df_prompter = df[(df.role == "prompter")].copy() + df_prompter = df_prompter.set_index("message_id") + df_assistant["output"] = df_assistant["text"].values + + inputs = [] + parent_ids = [] + for _, row in df_assistant.iterrows(): + input = df_prompter.loc[row.parent_id] + inputs.append(input.text) + parent_ids.append(input.parent_id) + + df_assistant["instruction"] = inputs + df_assistant["parent_id"] = parent_ids + + df_assistant = df_assistant[ + ["instruction", "output", "message_id", "parent_id", "lang", "rank"] + ].rename(columns={"message_id": "id"}) + + df_assistant[(df_assistant["rank"] == 0.0) & (df_assistant["lang"] == "en")][ + ["instruction", "output", "id", "parent_id"] + ].to_parquet(os.path.join(path, "train_full.pq"), index=False) + + df_assistant[df_assistant["lang"] == "en"][ + ["instruction", "output", "id", "parent_id"] + ].to_parquet(os.path.join(path, "train_full_allrank.pq"), index=False) + + df_assistant[df_assistant["rank"] == 0.0][ + ["instruction", "output", "id", "parent_id"] + ].to_parquet(os.path.join(path, "train_full_multilang.pq"), index=False) + + df_assistant[["instruction", "output", "id", "parent_id"]].to_parquet( + os.path.join(path, "train_full_multilang_allrank.pq"), index=False + ) + + return df_assistant[(df_assistant["rank"] == 0.0) & (df_assistant["lang"] == "en")] + + +def prepare_default_dataset_dpo_modeling() -> pd.DataFrame: + df = load_dataset("Intel/orca_dpo_pairs")["train"].to_pandas() + return df + + +def extract_anthropic_prompt(prompt_and_response): + """Extract the anthropic prompt from a prompt and response pair.""" + search_term = "\n\nAssistant:" + search_term_idx = prompt_and_response.rfind(search_term) + assert ( + search_term_idx != -1 + ), f"Prompt and response does not contain '{search_term}'" + return prompt_and_response[: search_term_idx + len(search_term)] + + +def _parse_row(prompt_and_response): + """Extract the anthropic prompt from a prompt and response pair.""" + search_term = "\n\nAssistant:" + search_term_idx = prompt_and_response["chosen"].rfind(search_term) + assert ( + search_term_idx != -1 + ), f"Prompt and response does not contain '{search_term}'" + prompt = prompt_and_response["chosen"][: search_term_idx + len(search_term)] + + chosen_response = prompt_and_response["chosen"][len(prompt) :] + rejected_response = prompt_and_response["rejected"][len(prompt) :] + + return prompt, chosen_response, rejected_response + + +def _split_up_prompt(prompt): + human_texts = re.findall( + r"\n\nHuman:(.*?)(?=(\n\nAssistant:|$))", prompt, flags=re.DOTALL + ) + assistant_texts = re.findall( + r"\n\nAssistant:(.*?)(?=(\n\nHuman:|$))", prompt, flags=re.DOTALL + ) + human_texts = [text[0].strip() for text in human_texts] + assistant_texts = [text[0].strip() for text in assistant_texts] + + assert len(human_texts) == len(assistant_texts), prompt + dialogue = list(zip(human_texts, assistant_texts)) + return dialogue + + +def prepare_hh_dpo_modeling(split: str) -> pd.DataFrame: + """ + Adapted from + https://github.com/eric-mitchell/direct-preference-optimization/blob/main/preference_datasets.py + """ + dataset = load_dataset("Anthropic/hh-rlhf", split=split) + rnd = random.Random() + rnd.seed(123) + dfs = [] + for row in tqdm(dataset): + prompt, chosen_response, rejected_response = _parse_row(row) + if len(rejected_response) == 0: + # remove rejected answers that are empty + continue + + parent_uuid = None + parsed_texts = [] + for human_text, assistant_text in _split_up_prompt(prompt): + random_uuid = str(uuid.UUID(int=rnd.getrandbits(128), version=4)) + parsed_texts += [ + [human_text, assistant_text, random_uuid, parent_uuid, None, None] + ] + parent_uuid = random_uuid + + parsed_texts[-1][-2] = chosen_response + parsed_texts[-1][-1] = rejected_response + df = pd.DataFrame( + parsed_texts, + columns=[ + "instruction", + "output", + "id", + "parent_id", + "chosen_response", + "rejected_response", + ], + ) + dfs.append(df) + df = pd.concat(dfs).reset_index(drop=True) + # merge output into chosen and rejected response + df["chosen_response"] = df["chosen_response"].fillna(df["output"]) + df["rejected_response"] = df["rejected_response"].fillna(df["output"]) + del df["output"] + return df diff --git a/llm_studio/app_utils/handlers.py b/llm_studio/app_utils/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..2bbb76bf4ef12111c6ab65c1217eb62ca018deea --- /dev/null +++ b/llm_studio/app_utils/handlers.py @@ -0,0 +1,406 @@ +import gc +import logging +from typing import List + +import torch +from h2o_wave import Q + +from llm_studio.app_utils.sections.chat import chat_tab +from llm_studio.app_utils.sections.chat_update import chat_update +from llm_studio.app_utils.sections.common import delete_dialog +from llm_studio.app_utils.sections.dataset import ( + dataset_delete_current_datasets, + dataset_delete_single, + dataset_display, + dataset_edit, + dataset_import, + dataset_import_uploaded_file, + dataset_list, + dataset_list_delete, + dataset_merge, + dataset_newexperiment, +) +from llm_studio.app_utils.sections.experiment import ( + experiment_delete, + experiment_display, + experiment_download_logs, + experiment_download_model, + experiment_download_predictions, + experiment_list, + experiment_push_to_huggingface_dialog, + experiment_rename_ui_workflow, + experiment_run, + experiment_start, + experiment_stop, +) +from llm_studio.app_utils.sections.home import home +from llm_studio.app_utils.sections.project import ( + current_experiment_compare, + current_experiment_list_compare, + current_experiment_list_delete, + current_experiment_list_stop, + experiment_rename_action_workflow, + list_current_experiments, +) +from llm_studio.app_utils.sections.settings import settings +from llm_studio.app_utils.setting_utils import ( + load_default_user_settings, + load_user_settings_and_secrets, + save_user_settings_and_secrets, +) +from llm_studio.app_utils.utils import add_model_type +from llm_studio.app_utils.wave_utils import report_error, wave_utils_handle_error + +logger = logging.getLogger(__name__) + + +async def handle(q: Q) -> None: + """Handles all requests in application and calls according functions.""" + + # logger.info(f"args: {q.args}") + # logger.info(f"events: {q.events}") + + if not ( + q.args.__wave_submission_name__ == "experiment/display/chat/chatbot" + or q.args.__wave_submission_name__ == "experiment/display/chat/clear_history" + ): + if "experiment/display/chat/cfg" in q.client: + del q.client["experiment/display/chat/cfg"] + if "experiment/display/chat/model" in q.client: + del q.client["experiment/display/chat/model"] + if "experiment/display/chat/tokenizer" in q.client: + del q.client["experiment/display/chat/tokenizer"] + torch.cuda.empty_cache() + gc.collect() + + try: + if q.args.__wave_submission_name__ == "home": + await home(q) + elif q.args.__wave_submission_name__ == "settings": + await settings(q) + elif q.args.__wave_submission_name__ == "save_settings": + logger.info("Saving user settings") + await save_user_settings_and_secrets(q) + await settings(q) + elif q.args.__wave_submission_name__ == "load_settings": + load_user_settings_and_secrets(q) + await settings(q) + elif q.args.__wave_submission_name__ == "restore_default_settings": + load_default_user_settings(q) + await settings(q) + + elif q.args.__wave_submission_name__ == "report_error": + await report_error(q) + + elif q.args.__wave_submission_name__ == "dataset/import": + await dataset_import(q, step=1) + elif q.args.__wave_submission_name__ == "dataset/list": + await dataset_list(q) + elif q.args.__wave_submission_name__ == "dataset/list/delete/abort": + q.page["dataset/list"].items[0].table.multiple = False + await dataset_list(q, reset=True) + elif q.args.__wave_submission_name__ == "dataset/list/abort": + q.page["dataset/list"].items[0].table.multiple = False + await dataset_list(q, reset=True) + elif q.args.__wave_submission_name__ == "dataset/list/delete": + await dataset_list_delete(q) + elif q.args.__wave_submission_name__ == "dataset/delete/single": + dataset_id = q.client["dataset/delete/single/id"] + dataset_id = q.client["dataset/list/df_datasets"]["id"].iloc[dataset_id] + await dataset_delete_single(q, int(dataset_id)) + elif q.args.__wave_submission_name__ == "dataset/delete/dialog/single": + dataset_id = int(q.args["dataset/delete/dialog/single"]) + q.client["dataset/delete/single/id"] = dataset_id + name = q.client["dataset/list/df_datasets"]["name"].iloc[dataset_id] + + if q.client["delete_dialogs"]: + await delete_dialog(q, [name], "dataset/delete/single", "dataset") + else: + dataset_id = q.client["dataset/list/df_datasets"]["id"].iloc[dataset_id] + await dataset_delete_single(q, int(dataset_id)) + + elif q.args["dataset/delete/dialog"]: + names = list( + q.client["dataset/list/df_datasets"]["name"].iloc[ + list(map(int, q.client["dataset/list/table"])) + ] + ) + + if not names: + return + + if q.client["delete_dialogs"]: + await delete_dialog(q, names, "dataset/delete", "dataset") + else: + await dataset_delete_current_datasets(q) + + elif q.args.__wave_submission_name__ == "dataset/delete": + await dataset_delete_current_datasets(q) + elif q.args.__wave_submission_name__ == "dataset/edit": + if q.client["dataset/list/df_datasets"] is not None: + dataset_id = int(q.args["dataset/edit"]) + dataset_id = q.client["dataset/list/df_datasets"]["id"].iloc[dataset_id] + await dataset_edit(q, int(dataset_id)) + elif q.args.__wave_submission_name__ == "dataset/newexperiment": + if q.client["dataset/list/df_datasets"] is not None: + dataset_id = int(q.args["dataset/newexperiment"]) + dataset_id = q.client["dataset/list/df_datasets"]["id"].iloc[dataset_id] + await dataset_newexperiment(q, int(dataset_id)) + elif q.args.__wave_submission_name__ == "dataset/newexperiment/from_current": + idx = q.client["dataset/display/id"] + dataset_id = q.client["dataset/list/df_datasets"]["id"].iloc[idx] + await dataset_newexperiment(q, dataset_id) + + elif q.args.__wave_submission_name__ == "dataset/list/table": + q.client["dataset/display/id"] = int(q.args["dataset/list/table"][0]) + await dataset_display(q) + + elif q.args.__wave_submission_name__ == "dataset/display/visualization": + await dataset_display(q) + elif q.args.__wave_submission_name__ == "dataset/display/data": + await dataset_display(q) + elif q.args.__wave_submission_name__ == "dataset/display/statistics": + await dataset_display(q) + elif q.args["dataset/display/summary"]: + await dataset_display(q) + + elif ( + q.args.__wave_submission_name__ == "experiment/start/run" + or q.args.__wave_submission_name__ == "experiment/start/error/proceed" + ): + # add model type to cfg file name here + q.client["experiment/start/cfg_file"] = add_model_type( + q.client["experiment/start/cfg_file"], + q.client["experiment/start/cfg_sub"], + ) + q.client.delete_cards.add("experiment/start") + await experiment_run(q, pre="experiment/start") + q.client["experiment/list/mode"] = "train" + + elif ( + q.args.__wave_submission_name__ == "experiment/start_experiment" + or q.args.__wave_submission_name__ == "experiment/list/new" + ): + if q.client["experiment/list/df_experiments"] is not None: + selected_idx = int(q.args["experiment/list/new"]) + experiment_id = q.client["experiment/list/df_experiments"]["id"].iloc[ + selected_idx + ] + + q.client["experiment/start/cfg_category"] = "experiment" + q.client["experiment/start/cfg_file"] = "experiment" + q.client["experiment/start/cfg_experiment"] = str(experiment_id) + + await experiment_start(q) + elif q.args.__wave_submission_name__ == "experiment/start": + q.client["experiment/start/cfg_category"] = None + q.client["experiment/start/cfg_file"] = None + datasets_df = q.client.app_db.get_datasets_df() + if datasets_df.shape[0] == 0: + info = "Import dataset before you create an experiment. " + await dataset_import(q, step=1, info=info) + else: + await experiment_start(q) + + elif q.args.__wave_submission_name__ == "experiment/display/download_logs": + await experiment_download_logs(q) + elif ( + q.args.__wave_submission_name__ == "experiment/display/download_predictions" + ): + await experiment_download_predictions(q) + + elif q.args.__wave_submission_name__ == "experiment/list": + q.client["experiment/list/mode"] = "train" + await experiment_list(q) + elif q.args.__wave_submission_name__ == "experiment/list/current": + await list_current_experiments(q) + elif q.args.__wave_submission_name__ == "experiment/list/current/noreset": + await list_current_experiments(q, reset=False) + elif q.args.__wave_submission_name__ == "experiment/list/refresh": + await experiment_list(q) + elif q.args.__wave_submission_name__ == "experiment/list/abort": + await list_current_experiments(q) + elif q.args.__wave_submission_name__ == "experiment/list/stop": + await current_experiment_list_stop(q) + elif q.args.__wave_submission_name__ == "experiment/list/delete": + await current_experiment_list_delete(q) + elif q.args.__wave_submission_name__ == "experiment/list/rename": + await experiment_rename_ui_workflow(q) + elif q.args.__wave_submission_name__ == "experiment/list/compare": + await current_experiment_list_compare(q) + elif ( + q.args.__wave_submission_name__ == "experiment/stop" + or q.args.__wave_submission_name__ == "experiment/list/stop/table" + ): + if q.args["experiment/list/stop/table"]: + idx = int(q.args["experiment/list/stop/table"]) + selected_id = q.client["experiment/list/df_experiments"]["id"].iloc[idx] + experiment_ids = [selected_id] + else: + selected_idxs = q.client["experiment/list/table"] + experiment_ids = list( + q.client["experiment/list/df_experiments"]["id"].iloc[ + list(map(int, selected_idxs)) + ] + ) + + await experiment_stop(q, experiment_ids) + await list_current_experiments(q) + elif q.args.__wave_submission_name__ == "experiment/list/delete/table/dialog": + idx = int(q.args["experiment/list/delete/table/dialog"]) + names = [q.client["experiment/list/df_experiments"]["name"].iloc[idx]] + selected_id = q.client["experiment/list/df_experiments"]["id"].iloc[idx] + q.client["experiment/delete/single/id"] = selected_id + if q.client["delete_dialogs"]: + await delete_dialog( + q, names, "experiment/list/delete/table", "experiment" + ) + else: + await experiment_delete_all_artifacts(q, [selected_id]) + + elif q.args.__wave_submission_name__ == "experiment/delete/dialog": + selected_idxs = q.client["experiment/list/table"] + exp_df = q.client["experiment/list/df_experiments"] + names = list(exp_df["name"].iloc[list(map(int, selected_idxs))]) + + if not names: + return + + if q.client["delete_dialogs"]: + await delete_dialog(q, names, "experiment/delete", "experiment") + else: + experiment_ids = list(exp_df["id"].iloc[list(map(int, selected_idxs))]) + await experiment_delete_all_artifacts(q, experiment_ids) + + elif ( + q.args.__wave_submission_name__ == "experiment/delete" + or q.args.__wave_submission_name__ == "experiment/list/delete/table" + ): + if q.args["experiment/list/delete/table"]: + selected_id = q.client["experiment/delete/single/id"] + experiment_ids = [selected_id] + else: + selected_idxs = q.client["experiment/list/table"] + exp_df = q.client["experiment/list/df_experiments"] + experiment_ids = list(exp_df["id"].iloc[list(map(int, selected_idxs))]) + + await experiment_delete_all_artifacts(q, experiment_ids) + + elif q.args.__wave_submission_name__ == "experiment/rename/action": + await experiment_rename_action_workflow(q) + + elif q.args.__wave_submission_name__ == "experiment/compare": + await current_experiment_compare(q) + elif q.args.__wave_submission_name__ == "experiment/compare/charts": + await current_experiment_compare(q) + elif q.args.__wave_submission_name__ == "experiment/compare/config": + await current_experiment_compare(q) + elif q.args.__wave_submission_name__ == "experiment/compare/diff_toggle": + q.client["experiment/compare/diff_toggle"] = q.args[ + "experiment/compare/diff_toggle" + ] + await current_experiment_compare(q) + + elif q.args.__wave_submission_name__ == "experiment/list/table": + q.client["experiment/display/id"] = int(q.args["experiment/list/table"][0]) + q.client["experiment/display/logs_path"] = None + q.client["experiment/display/preds_path"] = None + q.client["experiment/display/tab"] = None + await experiment_display(q) + + elif q.args.__wave_submission_name__ == "experiment/display/refresh": + await experiment_display(q) + + elif q.args.__wave_submission_name__ == "experiment/display/charts": + await experiment_display(q) + elif q.args.__wave_submission_name__ == "experiment/display/summary": + await experiment_display(q) + elif ( + q.args.__wave_submission_name__ == "experiment/display/train_data_insights" + ): + await experiment_display(q) + elif ( + q.args.__wave_submission_name__ + == "experiment/display/validation_prediction_insights" + ): + await experiment_display(q) + elif ( + q.args.__wave_submission_name__ == "experiment/display/push_to_huggingface" + ): + await experiment_push_to_huggingface_dialog(q) + elif q.args.__wave_submission_name__ == "experiment/display/download_model": + await experiment_download_model(q) + elif ( + q.args.__wave_submission_name__ + == "experiment/display/push_to_huggingface_submit" + ): + await experiment_push_to_huggingface_dialog(q) + + elif q.args.__wave_submission_name__ == "experiment/display/config": + await experiment_display(q) + elif q.args.__wave_submission_name__ == "experiment/display/logs": + await experiment_display(q) + elif q.args.__wave_submission_name__ == "experiment/display/chat": + await experiment_display(q) + + elif q.args.__wave_submission_name__ == "experiment/display/chat/chatbot": + await chat_update(q) + elif q.args.__wave_submission_name__ == "experiment/display/chat/clear_history": + await chat_tab(q, load_model=False) + + elif q.args.__wave_submission_name__ == "dataset/import/local_upload": + await dataset_import_uploaded_file(q) + elif q.args.__wave_submission_name__ == "dataset/import/local_path_list": + await dataset_import(q, step=1) + elif q.args.__wave_submission_name__ == "dataset/import/2": + await dataset_import(q, step=2) + elif q.args.__wave_submission_name__ == "dataset/import/3": + await dataset_import(q, step=3) + elif q.args.__wave_submission_name__ == "dataset/import/3/edit": + await dataset_import(q, step=3, edit=True) + elif q.args.__wave_submission_name__ == "dataset/import/4": + await dataset_import(q, step=4) + elif q.args.__wave_submission_name__ == "dataset/import/4/edit": + await dataset_import(q, step=4, edit=True) + elif q.args.__wave_submission_name__ == "dataset/import/6": + await dataset_import(q, step=6) + elif ( + q.args.__wave_submission_name__ == "dataset/import/source" + and not q.args["dataset/list"] + ): + await dataset_import(q, step=1) + elif q.args.__wave_submission_name__ == "dataset/merge": + await dataset_merge(q, step=1) + elif q.args.__wave_submission_name__ == "dataset/merge/action": + await dataset_merge(q, step=2) + + elif q.args.__wave_submission_name__ == "dataset/import/cfg_file": + await dataset_import(q, step=3) + + # leave at the end of dataset import routing, + # would also be triggered if user clicks on + # a continue button in the dataset import wizard + elif q.args.__wave_submission_name__ == "dataset/import/cfg/train_dataframe": + await dataset_import(q, step=3) + + elif q.args.__wave_submission_name__ == "experiment/start/cfg_file": + q.client["experiment/start/cfg_file"] = q.args["experiment/start/cfg_file"] + await experiment_start(q) + elif q.args.__wave_submission_name__ == "experiment/start/dataset": + await experiment_start(q) + + elif q.client["nav/active"] == "experiment/start": + await experiment_start(q) + + except Exception as unknown_exception: + logger.error("Unknown exception", exc_info=True) + await wave_utils_handle_error( + q, + error=unknown_exception, + ) + + +async def experiment_delete_all_artifacts(q: Q, experiment_ids: List[int]): + await experiment_stop(q, experiment_ids) + await experiment_delete(q, experiment_ids) + await list_current_experiments(q) diff --git a/llm_studio/app_utils/hugging_face_utils.py b/llm_studio/app_utils/hugging_face_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9116636508f5c8d8a48e9e8737431574ec81c165 --- /dev/null +++ b/llm_studio/app_utils/hugging_face_utils.py @@ -0,0 +1,208 @@ +import os + +import accelerate +import einops +import huggingface_hub +import torch +import transformers +from jinja2 import Environment, FileSystemLoader + +from llm_studio.app_utils.sections.chat import load_cfg_model_tokenizer +from llm_studio.app_utils.utils import hf_repo_friendly_name, save_hf_yaml, set_env +from llm_studio.src.utils.config_utils import NON_GENERATION_PROBLEM_TYPES +from llm_studio.src.utils.modeling_utils import check_disk_space + + +def get_model_card(cfg, model, repo_id) -> huggingface_hub.ModelCard: + """ + Method to define the Model Card. + + It is possible to change the language, the library name, and the tags. + These values will appear in the Model Card tab of Hugging Face. + + Parameters: + cfg : Configuration parameters for the model card. + model : The model for which the model card is being generated. + repo_id : The ID of the target Hugging Face repository. + + Returns: + huggingface_hub.ModelCard: The Model Card containing model information. + """ + card_data = huggingface_hub.ModelCardData( + language="en", + library_name="transformers", + tags=["gpt", "llm", "large language model", "h2o-llmstudio"], + ) + cfg_kwargs = dict( + use_fast=cfg.tokenizer.use_fast, + text_prompt_start=cfg.dataset.text_prompt_start, + text_answer_separator=cfg.dataset.text_answer_separator, + trust_remote_code=cfg.environment.trust_remote_code, + end_of_sentence=( + cfg._tokenizer_eos_token if cfg.dataset.add_eos_token_to_prompt else "" + ), + ) + if cfg.problem_type not in NON_GENERATION_PROBLEM_TYPES: + cfg_kwargs.update( + dict( + min_new_tokens=cfg.prediction.min_length_inference, + max_new_tokens=cfg.prediction.max_length_inference, + do_sample=cfg.prediction.do_sample, + num_beams=cfg.prediction.num_beams, + temperature=cfg.prediction.temperature, + repetition_penalty=cfg.prediction.repetition_penalty, + ) + ) + + card = huggingface_hub.ModelCard.from_template( + card_data, + template_path=os.path.join("model_cards", cfg.environment._model_card_template), + base_model=cfg.llm_backbone, # will be replaced in template if it exists + repo_id=repo_id, + model_architecture=model.backbone.__repr__(), + config=cfg.__repr__(), + transformers_version=transformers.__version__, + einops_version=einops.__version__, + accelerate_version=accelerate.__version__, + torch_version=torch.__version__.split("+")[0], + **cfg_kwargs, + ) + return card + + +def publish_model_to_hugging_face( + path_to_experiment: str, + model_name: str, + user_id: str = None, + api_key: str = None, + device: str = "cuda:0", + safe_serialization: bool = True, +) -> None: + """ + Method to publish the model to Hugging Face. + + Parameters: + path_to_experiment: The file path of the fine-tuned model's files. + device: The target device for running the model, either 'cpu', 'cpu_shard' + or 'cuda:0'. + user_id: The Hugging Face user ID. + api_key: The Hugging Face API Key. + model_name: The name of the model to be published on Hugging Face. + safe_serialization: A flag indicating whether safe serialization should be used. + + Returns: + None. The model is published to the specified Hugging Face repository. + """ + + # Check if the 'device' value is valid, raise an exception if not + if device == "cpu" or device == "cpu_shard": + pass # 'cpu' is a valid value + elif device.startswith("cuda:") and device[5:].isdigit(): + pass # 'cuda:integer' format is valid + else: + raise ValueError( + "Invalid device value. Use 'cpu', 'cpu_shard' or 'cuda:INTEGER'." + ) + + with set_env(HUGGINGFACE_TOKEN=api_key): + cfg, model, tokenizer = load_cfg_model_tokenizer( + path_to_experiment, + merge=True, + device=device, + ) + + check_disk_space(model.backbone, "./") + + # Check if the user is already logged in, and if not, prompt for API key + if api_key: + huggingface_hub.login(api_key) + + # If 'user_id' argument is blank, fetch 'user_id' from the logged-in user + if user_id == "": + user_id = huggingface_hub.whoami()["name"] + + repo_id = f"{user_id}/{hf_repo_friendly_name(model_name)}" + + # push tokenizer to hub + tokenizer.push_to_hub(repo_id=repo_id, private=True) + + # push model card to hub + card = get_model_card(cfg, model, repo_id) + card.push_to_hub( + repo_id=repo_id, repo_type="model", commit_message="Upload model card" + ) + + api = huggingface_hub.HfApi() + + # push classification head to hub + if os.path.isfile(f"{path_to_experiment}/classification_head.pth"): + api.upload_file( + path_or_fileobj=f"{path_to_experiment}/classification_head.pth", + path_in_repo="classification_head.pth", + repo_id=repo_id, + repo_type="model", + commit_message="Upload classification_head.pth", + ) + + # push config to hub + api.upload_file( + path_or_fileobj=os.path.join(path_to_experiment, "cfg.yaml"), + path_in_repo="cfg.yaml", + repo_id=repo_id, + repo_type="model", + commit_message="Upload cfg.yaml", + ) + + # push model to hub + model.backbone.config.custom_pipelines = { + "text-generation": { + "impl": "h2oai_pipeline.H2OTextGenerationPipeline", + "pt": "AutoModelForCausalLM", + } + } + + model.backbone.push_to_hub( + repo_id=repo_id, + private=True, + commit_message="Upload model", + safe_serialization=safe_serialization, + ) + + # Storing HF attributes + output_directory = cfg.output_directory + save_hf_yaml( + path=f"{output_directory.rstrip('/')}/hf.yaml", + account_name=user_id, + model_name=model_name, + repo_id=repo_id, + ) + + # push pipeline to hub + template_env = Environment(loader=FileSystemLoader(searchpath="llm_studio/src/")) + + pipeline_template = template_env.get_template("h2oai_pipeline_template.py") + + data = { + "text_prompt_start": cfg.dataset.text_prompt_start, + "text_answer_separator": cfg.dataset.text_answer_separator, + } + + if cfg.dataset.add_eos_token_to_prompt: + data.update({"end_of_sentence": cfg._tokenizer_eos_token}) + else: + data.update({"end_of_sentence": ""}) + + custom_pipeline = pipeline_template.render(data) + + custom_pipeline_path = os.path.join(path_to_experiment, "h2oai_pipeline.py") + + with open(custom_pipeline_path, "w") as f: + f.write(custom_pipeline) + + api.upload_file( + path_or_fileobj=custom_pipeline_path, + path_in_repo="h2oai_pipeline.py", + repo_id=repo_id, + repo_type="model", + commit_message="Upload h2oai_pipeline.py", + ) diff --git a/llm_studio/app_utils/initializers.py b/llm_studio/app_utils/initializers.py new file mode 100644 index 0000000000000000000000000000000000000000..55840ac51072bf1057f0103609f8ec5dac9a36ab --- /dev/null +++ b/llm_studio/app_utils/initializers.py @@ -0,0 +1,177 @@ +import logging +import os +import shutil +from tempfile import NamedTemporaryFile + +from bokeh.resources import Resources as BokehResources +from h2o_wave import Q, ui + +from llm_studio.app_utils.config import default_cfg +from llm_studio.app_utils.db import Database, Dataset +from llm_studio.app_utils.default_datasets import ( + prepare_default_dataset_causal_language_modeling, + prepare_default_dataset_dpo_modeling, +) +from llm_studio.app_utils.sections.common import interface +from llm_studio.app_utils.setting_utils import load_user_settings_and_secrets +from llm_studio.app_utils.utils import ( + get_data_dir, + get_database_dir, + get_download_dir, + get_output_dir, + get_user_db_path, + get_user_name, +) +from llm_studio.src.utils.config_utils import load_config_py, save_config_yaml + +logger = logging.getLogger(__name__) + + +async def import_default_data(q: Q): + """Imports default data""" + + try: + if q.client.app_db.get_dataset(1) is None: + logger.info("Downloading default dataset...") + q.page["meta"].dialog = ui.dialog( + title="Creating default datasets", + blocking=True, + items=[ui.progress(label="Please be patient...")], + ) + await q.page.save() + + dataset = prepare_oasst(q) + q.client.app_db.add_dataset(dataset) + dataset = prepare_dpo(q) + q.client.app_db.add_dataset(dataset) + + except Exception as e: + q.client.app_db._session.rollback() + logger.warning(f"Could not download default dataset: {e}") + pass + + +def prepare_oasst(q: Q) -> Dataset: + path = f"{get_data_dir(q)}/oasst" + if os.path.exists(path): + shutil.rmtree(path) + os.makedirs(path, exist_ok=True) + df = prepare_default_dataset_causal_language_modeling(path) + cfg = load_config_py( + config_path=os.path.join("llm_studio/python_configs", default_cfg.cfg_file), + config_name="ConfigProblemBase", + ) + cfg.dataset.train_dataframe = os.path.join(path, "train_full.pq") + cfg.dataset.prompt_column = ("instruction",) + cfg.dataset.answer_column = "output" + cfg.dataset.parent_id_column = "None" + cfg_path = os.path.join(path, f"{default_cfg.cfg_file}.yaml") + save_config_yaml(cfg_path, cfg) + dataset = Dataset( + id=1, + name="oasst", + path=path, + config_file=cfg_path, + train_rows=df.shape[0], + ) + return dataset + + +def prepare_dpo(q): + path = f"{get_data_dir(q)}/dpo" + if os.path.exists(path): + shutil.rmtree(path) + os.makedirs(path, exist_ok=True) + train_df = prepare_default_dataset_dpo_modeling() + train_df.to_parquet(os.path.join(path, "train.pq"), index=False) + + from llm_studio.python_configs.text_dpo_modeling_config import ConfigDPODataset + from llm_studio.python_configs.text_dpo_modeling_config import ( + ConfigProblemBase as ConfigProblemBaseDPO, + ) + + cfg: ConfigProblemBaseDPO = ConfigProblemBaseDPO( + dataset=ConfigDPODataset( + train_dataframe=os.path.join(path, "train.pq"), + system_column="system", + prompt_column=("question",), + answer_column="chosen", + rejected_answer_column="rejected", + ), + ) + + cfg_path = os.path.join(path, "text_dpo_modeling_config.yaml") + save_config_yaml(cfg_path, cfg) + dataset = Dataset( + id=2, + name="dpo", + path=path, + config_file=cfg_path, + train_rows=train_df.shape[0], + ) + return dataset + + +async def initialize_client(q: Q) -> None: + """Initialize the client.""" + + logger.info(f"Initializing client {q.client.client_initialized}") + + if not q.client.client_initialized: + q.client.delete_cards = set() + q.client.delete_cards.add("init_app") + + os.makedirs(get_data_dir(q), exist_ok=True) + os.makedirs(get_database_dir(q), exist_ok=True) + os.makedirs(get_output_dir(q), exist_ok=True) + os.makedirs(get_download_dir(q), exist_ok=True) + + db_path = get_user_db_path(q) + + q.client.app_db = Database(db_path) + + logger.info(f"User name: {get_user_name(q)}") + + q.client.client_initialized = True + + q.client["mode_curr"] = "full" + load_user_settings_and_secrets(q) + await interface(q) + + await import_default_data(q) + q.args.__wave_submission_name__ = default_cfg.start_page + + return + + +async def initialize_app(q: Q) -> None: + """ + Initialize the app. + + This function is called once when the app is started and stores values in q.app. + """ + + logger.info("Initializing app ...") + + icons_pth = "llm_studio/app_utils/static/" + (q.app["icon_path"],) = await q.site.upload([f"{icons_pth}/icon.png"]) + + script_sources = [] + + with NamedTemporaryFile(mode="w", suffix=".min.js") as f: + # write all Bokeh scripts to one file to make sure + # they are loaded sequentially + for js_raw in BokehResources(mode="inline").js_raw: + f.write(js_raw) + f.write("\n") + + (url,) = await q.site.upload([f.name]) + script_sources.append(url) + + q.app["script_sources"] = script_sources + q.app["initialized"] = True + q.app.version = default_cfg.version + q.app.name = default_cfg.name + q.app.heap_mode = default_cfg.heap_mode + + logger.info("Initializing app ... done") diff --git a/llm_studio/app_utils/sections/__init__.py b/llm_studio/app_utils/sections/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llm_studio/app_utils/sections/chat.py b/llm_studio/app_utils/sections/chat.py new file mode 100644 index 0000000000000000000000000000000000000000..e7f94ff93b787807d97c5126dc4515b823298723 --- /dev/null +++ b/llm_studio/app_utils/sections/chat.py @@ -0,0 +1,248 @@ +import gc +import logging +import os + +import torch +from accelerate import dispatch_model, infer_auto_device_map +from accelerate.utils import get_balanced_memory +from h2o_wave import Q +from h2o_wave import data as chat_data +from h2o_wave import ui + +from llm_studio.app_utils.utils import get_experiments, get_ui_elements, set_env +from llm_studio.python_configs.base import DefaultConfigProblemBase +from llm_studio.src.datasets.text_utils import get_tokenizer +from llm_studio.src.utils.config_utils import ( + NON_GENERATION_PROBLEM_TYPES, + load_config_yaml, +) +from llm_studio.src.utils.modeling_utils import load_checkpoint + +logger = logging.getLogger(__name__) + + +async def chat_tab(q: Q, load_model=True): + if not await should_start_chat(q): + return + + if load_model: + q.page["experiment/display/chat"] = ui.form_card( + box="first", + items=[ui.progress(label="Loading the model...")], + ) + + q.client["experiment/display/chat/messages"] = [] + q.client.delete_cards.add("experiment/display/chat") + + q.page["experiment/display/chat/settings"] = ui.form_card( + box="second", + items=[ + ui.expander( + name="chat_settings", + label="Chat Settings", + items=[ui.progress(label="Loading model configuration...")], + expanded=True, + ) + ], + ) + q.client.delete_cards.add("experiment/display/chat/settings") + + await q.page.save() + logger.info(torch.cuda.memory_allocated()) + + if load_model: + with set_env(HUGGINGFACE_TOKEN=q.client["default_huggingface_api_token"]): + gpu_id = q.client["gpu_used_for_chat"] - 1 + cfg, model, tokenizer = load_cfg_model_tokenizer( + q.client["experiment/display/experiment_path"], device=f"cuda:{gpu_id}" + ) + q.client["experiment/display/chat/cfg"] = cfg + q.client["experiment/display/chat/model"] = model + q.client["experiment/display/chat/tokenizer"] = tokenizer + initial_message = "Model successfully loaded, how can I help you?" + + else: + cfg = q.client["experiment/display/chat/cfg"] + assert q.client["experiment/display/chat/model"] is not None + assert q.client["experiment/display/chat/tokenizer"] is not None + initial_message = "Chat History cleaned. How can I help you?" + + # Hide fields that are should not be visible in the UI + cfg.prediction._visibility["metric"] = -1 + cfg.prediction._visibility["batch_size_inference"] = -1 + cfg.prediction._visibility["min_length_inference"] = -1 + cfg.prediction._visibility["stop_tokens"] = -1 + + logger.info(torch.cuda.memory_allocated()) + q.page["experiment/display/chat"] = ui.chatbot_card( + box="first", + data=chat_data(fields="content from_user", t="list"), # type: ignore + name="experiment/display/chat/chatbot", + events=["stop", "suggestion"], + suggestions=[ + ui.chat_suggestion( + "Write a poem about H2O LLM Studio", + label="Write a poem", + caption="about H2O LLM Studio", + icon="Edit", + ), + ui.chat_suggestion( + "Plan a trip to Europe", + label="Plan a trip", + caption="to Europe", + icon="Airplane", + ), + ui.chat_suggestion( + "Give me ideas for a new project", + label="Give me ideas", + caption="for a new project", + icon="Lightbulb", + ), + ui.chat_suggestion( + "Explain me CSS preprocessors", + label="Explain me", + caption="CSS preprocessors", + icon="Code", + ), + ], + ) + q.page["experiment/display/chat"].data += [initial_message, False] + + option_items = get_ui_elements( + cfg=q.client["experiment/display/chat/cfg"].prediction, + q=q, + pre="chat/cfg_predictions", + ) + q.page["experiment/display/chat/settings"] = ui.form_card( + box="second", + items=[ + ui.buttons( + [ + ui.button( + name="experiment/display/chat/clear_history", + label="Clear History", + primary=True, + ), + ] + ), + ui.expander( + name="chat_settings", + label="Chat Settings", + items=option_items, + expanded=True, + ), + ], + ) + + +async def should_start_chat(q: Q): + cfg: DefaultConfigProblemBase = load_config_yaml( + os.path.join(q.client["experiment/display/experiment_path"], "cfg.yaml") + ) + + if cfg.problem_type in NON_GENERATION_PROBLEM_TYPES: + q.page["experiment/display/chat"] = ui.form_card( + box="first", + items=[ + ui.text( + "Chatbot is not available for text classification problems. " + "Please select a text generation problem." + ) + ], + title="", + ) + q.client.delete_cards.add("experiment/display/chat") + return False + + # gpu id in UI is offset by 1 to be in sync with experiment UI + gpu_id = q.client["gpu_used_for_chat"] - 1 + if gpu_is_blocked(q, gpu_id): + q.page["experiment/display/chat"] = ui.form_card( + box="first", + items=[ + ui.text( + f"""Chatbot is not available when GPU{q.client["gpu_used_for_chat"]} + is blocked by another experiment. + You can change "Gpu used for Chat" in the settings tab + to use another GPU for the chatbot. """ + ) + ], + title="", + ) + q.client.delete_cards.add("experiment/display/chat") + return False + return True + + +def gpu_is_blocked(q, gpu_id): + experiments = get_experiments(q=q) + running_experiments = experiments[experiments.status.isin(["running"])] + gpu_blocked = any( + [ + str(gpu_id) in gpu_list + for gpu_list in running_experiments["gpu_list"] + .apply(lambda x: x.split(",")) + .to_list() + ] + ) + return gpu_blocked + + +def load_cfg_model_tokenizer( + experiment_path: str, merge: bool = False, device: str = "cuda:0" +): + cfg = load_config_yaml(os.path.join(experiment_path, "cfg.yaml")) + cfg.architecture.pretrained = False + cfg.architecture.gradient_checkpointing = False + cfg.environment._device = device.replace("_shard", "") + cfg.environment._local_rank = 0 + cfg.prediction._visibility["num_history"] = 1 + + tokenizer = get_tokenizer(cfg) + + gc.collect() + torch.cuda.empty_cache() + + if ( + merge + and cfg.training.lora + and cfg.architecture.backbone_dtype in ("int4", "int8") + ): + logger.info("Loading backbone in float16 for merging LORA weights.") + cfg.architecture.backbone_dtype = "float16" + cfg.architecture.pretrained = True + + # if "cpu" in device: + # cfg.architecture.backbone_dtype = "float32" + + with torch.device(cfg.environment._device): + model = cfg.architecture.model_class(cfg) + cfg.architecture.pretrained_weights = os.path.join( + experiment_path, "checkpoint.pth" + ) + load_checkpoint(cfg, model, strict=False) + + if device == "cpu_shard": + max_memory = get_balanced_memory( + model, + ) + device_map = infer_auto_device_map(model, max_memory=max_memory) + model = dispatch_model( + model, + device_map=device_map, + ) + + if merge and cfg.training.lora: + # merges the LoRa layers into the base model. + # This is needed if one wants to use the base model as a standalone model. + logger.info("Merging LORA layers with base model.") + if device == "cpu": + model = model.to(torch.float32) + model.backbone = model.backbone.merge_and_unload() + if device == "cpu": + model = model.to(torch.float16) + + model = model.eval() + model.backbone.use_cache = True + + return cfg, model, tokenizer diff --git a/llm_studio/app_utils/sections/chat_update.py b/llm_studio/app_utils/sections/chat_update.py new file mode 100644 index 0000000000000000000000000000000000000000..72e50771da8305a31cd3711169a3e5a6c99f03d2 --- /dev/null +++ b/llm_studio/app_utils/sections/chat_update.py @@ -0,0 +1,305 @@ +import asyncio +import gc +import logging +import os +import threading +from typing import Any, Callable, Dict, List, Optional + +import numpy as np +import torch +from h2o_wave import Q, ui +from transformers import AutoTokenizer, TextStreamer + +from llm_studio.app_utils.utils import parse_ui_elements +from llm_studio.src.models.text_causal_language_modeling_model import Model +from llm_studio.src.utils.modeling_utils import ( + EnvVariableStoppingCriteria, + get_torch_dtype, + set_generation_config, +) + +logger = logging.getLogger(__name__) + +__all__ = ["chat_update", "is_app_blocked_while_streaming"] + +USER = True +BOT = False + + +@torch.inference_mode(mode=True) +async def chat_update(q: Q) -> None: + """ + Update the chatbot with the new message. + """ + q.client["experiment/display/chat/finished"] = False + try: + await update_chat_window(q) + finally: + q.client["experiment/display/chat/finished"] = True + # Hide the "Stop generating" button + q.page["experiment/display/chat"].generating = False + + +async def update_chat_window(q): + cfg_prediction = parse_ui_elements( + cfg=q.client["experiment/display/chat/cfg"].prediction, + q=q, + pre="chat/cfg_predictions/cfg/", + ) + q.client["experiment/display/chat/cfg"].prediction = cfg_prediction + # Update generation config + q.client["experiment/display/chat/model"].backbone = set_generation_config( + q.client["experiment/display/chat/model"].backbone, cfg_prediction + ) + + # could also invoke cfg.check() here, but leave it explicit as cfg.check() + # may raise other issues not related to the chatbot + if cfg_prediction.do_sample and cfg_prediction.temperature == 0.0: + q.page["meta"].dialog = ui.dialog( + title="Invalid Text Generation configuration.", + name="chatbot_invalid_settings", + items=[ + ui.text( + "Do Sample enabled and Temperature = 0 are mutually exclusive. " + "Please increase Temperature or disable sampling." + ), + ], + closable=True, + ) + await q.page.save() + return + + # populate chat window with user message + logger.info(f"Using chatbot config: {cfg_prediction}") + if q.events["experiment/display/chat/chatbot"]: + prompt = q.events["experiment/display/chat/chatbot"]["suggestion"] + else: + prompt = q.client["experiment/display/chat/chatbot"] + message = [prompt, USER] + q.client["experiment/display/chat/messages"].append(message) + q.page["experiment/display/chat"].data += message + q.page["experiment/display/chat"].data += ["", BOT] + await q.page.save() + + predicted_text = await answer_chat(q) + + # populate chat window with bot message + logger.info(f"Predicted Answer: {predicted_text}") + message = [predicted_text, BOT] + q.client["experiment/display/chat/messages"].append(message) + q.page["experiment/display/chat"].data[-1] = message + + +async def answer_chat(q: Q) -> str: + cfg = q.client["experiment/display/chat/cfg"] + model: Model = q.client["experiment/display/chat/model"] + tokenizer = q.client["experiment/display/chat/tokenizer"] + + full_prompt = "" + if len(q.client["experiment/display/chat/messages"]): + for prev_message in q.client["experiment/display/chat/messages"][ + -(cfg.prediction.num_history + 1) : + ]: + if prev_message[1] is USER: + prev_message = cfg.dataset.dataset_class.parse_prompt( + cfg, prev_message[0] + ) + else: + prev_message = prev_message[0] + if cfg.dataset.add_eos_token_to_answer: + prev_message += cfg._tokenizer_eos_token + + full_prompt += prev_message + logger.info(f"Full prompt: {full_prompt}") + + inputs = cfg.dataset.dataset_class.encode( + tokenizer, full_prompt, cfg.tokenizer.max_length_prompt, "left" + ) + inputs["prompt_input_ids"] = ( + inputs.pop("input_ids").unsqueeze(0).to(cfg.environment._device) + ) + inputs["prompt_attention_mask"] = ( + inputs.pop("attention_mask").unsqueeze(0).to(cfg.environment._device) + ) + + def text_cleaner(text: str) -> str: + return cfg.dataset.dataset_class.clean_output( + output={"predicted_text": np.array([text])}, cfg=cfg + )["predicted_text"][0] + + if cfg.prediction.num_beams == 1: + streamer = WaveChatStreamer(tokenizer=tokenizer, q=q, text_cleaner=text_cleaner) + # Need to start generation in a separate thread, otherwise streaming is blocked + thread = threading.Thread( + target=generate, + kwargs=dict(model=model, inputs=inputs, cfg=cfg, streamer=streamer), + ) + q.client["currently_chat_streaming"] = True + # Show the "Stop generating" button + q.page["experiment/display/chat"].generating = True + # Hide suggestions + q.page["experiment/display/chat"].suggestions = None + + try: + thread.start() + max_wait_time_in_seconds = 60 * 3 + for current_wait_time in range(max_wait_time_in_seconds): + thread_is_dead = not thread.is_alive() + takes_too_much_time = current_wait_time == max_wait_time_in_seconds - 1 + streaming_finished = streamer.finished + + if streaming_finished or takes_too_much_time or thread_is_dead: + if takes_too_much_time: + # this is more of a safety measure + # to ensure the app gets responsive eventually + logger.warning( + "Chat generation took too much time. " + "Stopping chat generation." + ) + if thread_is_dead: # some error occurred during streaming + logger.warning( + "Chat generation thread is not alive anymore. " + "Please check logs!" + ) + if streaming_finished: + logger.info("Chat Stream has been completed") + + predicted_text = streamer.answer + break + await q.sleep(1) # 1 second, see max_wait_time_in_seconds + finally: + del q.client["currently_chat_streaming"] + if thread.is_alive(): + thread.join() + else: + # ValueError: `streamer` cannot be used with beam search (yet!). + # Make sure that `num_beams` is set to 1. + logger.info("Not streaming output, as it cannot be used with beam search.") + q.page["experiment/display/chat"].data[-1] = ["...", BOT] + await q.page.save() + predicted_answer_ids = generate(model, inputs, cfg)[0] + predicted_text = tokenizer.decode( + predicted_answer_ids, skip_special_tokens=True + ) + predicted_text = text_cleaner(predicted_text) + + del inputs + gc.collect() + torch.cuda.empty_cache() + return predicted_text + + +class WaveChatStreamer(TextStreamer): + """ + Utility class that updates the chabot card in a streaming fashion + """ + + def __init__( + self, + tokenizer: AutoTokenizer, + q: Q, + text_cleaner: Optional[Callable] = None, + **decode_kwargs, + ): + super().__init__(tokenizer, skip_prompt=True, **decode_kwargs) + self.text_cleaner = text_cleaner + self.words_predicted_answer: List[str] = [] + self.q = q + self.loop = asyncio.get_event_loop() + self.finished = False + + def on_finalized_text(self, text: str, stream_end: bool = False): + self.words_predicted_answer += [text] + self.loop.create_task(self.update_chat_page()) + + async def update_chat_page(self): + self.q.page["experiment/display/chat"].data[-1] = [self.answer, BOT] + await self.q.page.save() + + @property + def answer(self): + """ + Create the answer by joining the generated words. + By this, self.text_cleaner does not need to be idempotent. + """ + answer = "".join(self.words_predicted_answer) + if answer.endswith(self.tokenizer.eos_token): + # text generation is stopped + answer = answer.replace(self.tokenizer.eos_token, "") + if self.text_cleaner: + answer = self.text_cleaner(answer) + return answer + + def end(self): + super().end() + self.finished = True + + +def generate(model: Model, inputs: Dict, cfg: Any, streamer: TextStreamer = None): + with torch.cuda.amp.autocast( + dtype=get_torch_dtype(cfg.environment.mixed_precision_dtype) + ): + output = model.generate(batch=inputs, cfg=cfg, streamer=streamer).detach().cpu() + return output + + +async def show_chat_is_running_dialog(q): + q.page["meta"].dialog = ui.dialog( + title="Text Generation is streaming.", + name="chatbot_running_dialog", + items=[ + ui.text("Please wait till the text generation has stopped."), + ], + closable=True, + ) + await q.page.save() + + +async def show_stream_is_aborted_dialog(q): + q.page["meta"].dialog = ui.dialog( + title="Text Generation will be stopped.", + name="chatbot_stopping_dialog", + items=[ + ui.text("Please wait"), + ], + closable=False, + ) + await q.page.save() + + +async def is_app_blocked_while_streaming(q: Q): + """ + Check whether the app is blocked with current answer generation. + """ + if ( + q.events["experiment/display/chat/chatbot"] is not None + and q.events["experiment/display/chat/chatbot"]["stop"] + and q.client["currently_chat_streaming"] + ): + # Cancel the streaming task. + try: + # User clicks abort button while the chat is currently streaming + logger.info("Stopping Chat Stream") + os.environ[EnvVariableStoppingCriteria.stop_streaming_env] = "True" + await show_stream_is_aborted_dialog(q) + await q.page.save() + + for _ in range(20): # don't wait longer than 10 seconds + await q.sleep(0.5) + if q.client["currently_chat_streaming"] is None: + q.page["meta"].dialog = None + await q.page.save() + return True + else: + logger.warning("Could not terminate stream") + return True + finally: + if EnvVariableStoppingCriteria.stop_streaming_env in os.environ: + del os.environ[EnvVariableStoppingCriteria.stop_streaming_env] + # Hide the "Stop generating" button. + q.page["experiment/display/chat"].generating = False + + elif q.client["experiment/display/chat/finished"] is False: + await show_chat_is_running_dialog(q) + return True + return False diff --git a/llm_studio/app_utils/sections/common.py b/llm_studio/app_utils/sections/common.py new file mode 100644 index 0000000000000000000000000000000000000000..d7fdc7e422b58274d7eccc830f540ca9ed434f1a --- /dev/null +++ b/llm_studio/app_utils/sections/common.py @@ -0,0 +1,267 @@ +import hashlib +import logging +from typing import List + +from h2o_wave import Q, ui + +from llm_studio.app_utils.cards import card_zones +from llm_studio.app_utils.config import default_cfg + +logger = logging.getLogger(__name__) + + +async def meta(q: Q) -> None: + if q.client["keep_meta"]: # Do not reset meta, keep current dialog opened + q.client["keep_meta"] = False + return + + zones = card_zones(mode=q.client["mode_curr"]) + + if q.client["notification_bar"]: + notification_bar = ui.notification_bar( + type="warning", + timeout=20, + text=q.client["notification_bar"], + position="top-right", + ) + else: + notification_bar = None + + # TODO remove `stylesheet` when wave makes message bars smaller + q.page["meta"] = ui.meta_card( + box="", + title="H2O LLM Studio", + layouts=[ + ui.layout(breakpoint="0px", width="1430px", zones=zones), + ], + scripts=[ + ui.script(source, asynchronous=True) for source in q.app["script_sources"] + ], + stylesheet=ui.inline_stylesheet( + """ + .ms-MessageBar { + padding-top: 3px; + padding-bottom: 3px; + min-height: 18px; + } + div[data-test="nav_bar"] .ms-Nav-groupContent { + margin-bottom: 0; + } + + div[data-test="experiment/display/deployment/top_right"], + div[data-test="experiment/display/deployment/top_right"] + div[data-visible="true"]:last-child > div > div { + display: flex; + } + + div[data-test="experiment/display/deployment/top_right"] + div[data-visible="true"]:last-child, + div[data-test="experiment/display/deployment/top_right"] + div[data-visible="true"]:last-child > div { + display: flex; + flex-grow: 1; + } + + div[data-test="experiment/display/deployment/top_right"] + div[data-visible="true"]:last-child > div > div > div { + display: flex; + flex-grow: 1; + flex-direction: column; + } + + div[data-test="experiment/display/deployment/top_right"] + div[data-visible="true"]:last-child > div > div > div > div { + flex-grow: 1; + } + """ + ), + script=None, + notification_bar=notification_bar, + ) + + if q.client.theme_dark: + q.page["meta"].theme = "h2o-dark" + else: + q.page["meta"].theme = "light" + + +def heap_analytics( + userid, user_properties=None, event_properties=None +) -> ui.InlineScript: + script = ( + "window.heap=window.heap||[],heap.load=function(e,t)" + "{window.heap.appid=e,window.heap." + 'config=t=t||{};var r=document.createElement("script");' + 'r.type="text/javascript",' + 'r.async=!0,r.src="https://cdn.heapanalytics.com/js/heap-"+e+".js";' + 'var a=document.getElementsByTagName("script")[0];' + "a.parentNode.insertBefore(r,a);" + "for(var n=function(e){return function(){heap.push([e]." + "concat(Array.prototype.slice.call(arguments,0)))}}," + 'p=["addEventProperties","addUserProperties","clearEventProperties","identify",' + '"resetIdentity","removeEventProperty","setEventProperties","track",' + '"unsetEventProperty"],o=0;o None: + """Display interface cards.""" + + await meta(q) + + navigation_pages = ["Home", "Settings"] + + if q.client["init_interface"] is None: + # to avoid flickering + q.page["header"] = ui.header_card( + box="header", + title=default_cfg.name, + image=q.app["icon_path"], + subtitle=f"v{default_cfg.version}", + ) + + if q.app.heap_mode: + logger.info("Heap on") + q.page["meta"].script = heap_analytics( + userid=q.auth.subject, + event_properties=( + f"{{version: '{q.app.version}'" + f", product: '{q.app.name}'}}" + ), + ) + # execute the heap inline script once in the initialization + await q.page.save() + else: + logger.info("Heap off") + + q.page["nav_bar"] = ui.nav_card( + box="nav", + items=[ + ui.nav_group( + "Navigation", + items=[ + ui.nav_item(page.lower(), page) for page in navigation_pages + ], + ), + ui.nav_group( + "Datasets", + items=[ + ui.nav_item(name="dataset/import", label="Import dataset"), + ui.nav_item(name="dataset/list", label="View datasets"), + ], + ), + ui.nav_group( + "Experiments", + items=[ + ui.nav_item(name="experiment/start", label="Create experiment"), + ui.nav_item(name="experiment/list", label="View experiments"), + ], + ), + ], + value=( + default_cfg.start_page + if q.client["nav/active"] is None + else q.client["nav/active"] + ), + ) + else: + # Only update menu properties to prevent from flickering + q.page["nav_bar"].value = ( + default_cfg.start_page + if q.client["nav/active"] is None + else q.client["nav/active"] + ) + + q.client["init_interface"] = True + + +async def clean_dashboard(q: Q, mode: str = "full", exclude: List[str] = []): + """Drop cards from Q page.""" + + logger.info(q.client.delete_cards) + for card_name in q.client.delete_cards: + if card_name not in exclude: + del q.page[card_name] + + q.page["meta"].layouts[0].zones = card_zones(mode=mode) + q.client["mode_curr"] = mode + q.client["notification_bar"] = None + + +async def delete_dialog(q: Q, names: List[str], action, entity): + title = "Do you really want to delete " + n_datasets = len(names) + + if n_datasets == 1: + title = f"{title} {entity} {names[0]}?" + else: + title = f"{title} {n_datasets} {entity}s?" + + q.page["meta"].dialog = ui.dialog( + f"Delete {entity}", + items=[ + ui.text(title), + ui.markup("
"), + ui.buttons( + [ + ui.button(name=action, label="Delete", primary=True), + ui.button(name="abort", label="Abort", primary=False), + ], + justify="end", + ), + ], + ) + q.client["keep_meta"] = True + + +async def info_dialog(q: Q, title: str, message: str): + q.page["meta"].dialog = ui.dialog( + title, + items=[ + ui.text(message), + ui.markup("
"), + ui.buttons( + [ + ui.button(name="abort", label="Continue", primary=False), + ], + justify="end", + ), + ], + blocking=True, + ) + q.client["keep_meta"] = True + + +async def heap_redact(q: Q) -> None: + if q.app.heap_mode: + # Send the page to the browser, so the following js can be applied + await q.page.save() + + # replace dataset names with **** + q.page["meta"].script = ui.inline_script( + """ +document.querySelectorAll('div[data-automation-key="name"]').forEach(a => { + a.setAttribute('data-heap-redact-text', '') +}) + +document.querySelector('div[data-test="datasets_table"] \ +.ms-ScrollablePane--contentContainer').addEventListener('scroll', () => { + window.setTimeout(() => {{ + document.querySelectorAll('div[data-automation-key="name"]').forEach(a => { + a.setAttribute('data-heap-redact-text', '') + }) + }}, 100) +}) + """ + ) diff --git a/llm_studio/app_utils/sections/dataset.py b/llm_studio/app_utils/sections/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..712a66c3517c7ef07babb5511b4e17cd78606b74 --- /dev/null +++ b/llm_studio/app_utils/sections/dataset.py @@ -0,0 +1,1338 @@ +import functools +import hashlib +import logging +import os +import re +import shutil +import time +import traceback +from typing import List, Optional + +import pandas as pd +from h2o_wave import Q, ui +from h2o_wave.types import FormCard, ImageCard, MarkupCard, StatListItem, Tab + +from llm_studio.app_utils.config import default_cfg +from llm_studio.app_utils.db import Dataset +from llm_studio.app_utils.sections.common import clean_dashboard +from llm_studio.app_utils.sections.experiment import experiment_start +from llm_studio.app_utils.sections.histogram_card import histogram_card +from llm_studio.app_utils.utils import ( + add_model_type, + azure_download, + azure_file_options, + check_valid_upload_content, + clean_error, + dir_file_table, + get_data_dir, + get_dataset_elements, + get_datasets, + get_experiments_status, + get_frame_stats, + get_model_types, + get_problem_types, + get_unique_dataset_name, + kaggle_download, + local_download, + make_label, + parse_ui_elements, + remove_temp_files, + s3_download, + s3_file_options, +) +from llm_studio.app_utils.wave_utils import busy_dialog, ui_table_from_df +from llm_studio.src.datasets.conversation_chain_handler import get_conversation_chains +from llm_studio.src.utils.config_utils import ( + load_config_py, + load_config_yaml, + save_config_yaml, +) +from llm_studio.src.utils.data_utils import ( + get_fill_columns, + read_dataframe, + read_dataframe_drop_missing_labels, + sanity_check, +) +from llm_studio.src.utils.plot_utils import PlotData + +logger = logging.getLogger(__name__) + + +def file_extension_is_compatible(q): + cfg = q.client["dataset/import/cfg"] + allowed_extensions = cfg.dataset._allowed_file_extensions + + is_correct_extension = [] + for mode in ["train", "validation"]: + dataset_name = q.client[f"dataset/import/cfg/{mode}_dataframe"] + + if dataset_name is None or dataset_name == "None": + continue + is_correct_extension.append(dataset_name.endswith(allowed_extensions)) + return all(is_correct_extension) + + +async def dataset_import( + q: Q, + step: int, + edit: Optional[bool] = False, + error: Optional[str] = "", + warning: Optional[str] = "", + info: Optional[str] = "", + allow_merge: bool = True, +) -> None: + """Display dataset import cards. + + Args: + q: Q + step: current step of wizard + edit: whether in edit mode + error: optional error message + warning: optional warning message + info: optional info message + allow_merge: whether to allow merging dataset when importing + """ + + await clean_dashboard(q, mode="full") + q.client["nav/active"] = "dataset/import" + if step == 1: # select import data source + q.page["dataset/import"] = ui.form_card(box="content", items=[]) + q.client.delete_cards.add("dataset/import") + + if q.client["dataset/import/source"] is None: + q.client["dataset/import/source"] = "Upload" + + import_choices = [ + ui.choice("Upload", "Upload"), + ui.choice("Local", "Local"), + ui.choice("S3", "AWS S3"), + ui.choice("Azure", "Azure Datalake"), + ui.choice("Kaggle", "Kaggle"), + ] + + items = [ + ui.text_l("Import dataset"), + ui.dropdown( + name="dataset/import/source", + label="Source", + value=( + "Upload" + if q.client["dataset/import/source"] is None + else q.client["dataset/import/source"] + ), + choices=import_choices, + trigger=True, + tooltip="Source of dataset import", + ), + ] + + if ( + q.client["dataset/import/source"] is None + or q.client["dataset/import/source"] == "S3" + ): + if q.client["dataset/import/s3_bucket"] is None: + q.client["dataset/import/s3_bucket"] = q.client[ + "default_aws_bucket_name" + ] + if q.client["dataset/import/s3_access_key"] is None: + q.client["dataset/import/s3_access_key"] = q.client[ + "default_aws_access_key" + ] + if q.client["dataset/import/s3_secret_key"] is None: + q.client["dataset/import/s3_secret_key"] = q.client[ + "default_aws_secret_key" + ] + + files = s3_file_options( + q.client["dataset/import/s3_bucket"], + q.client["dataset/import/s3_access_key"], + q.client["dataset/import/s3_secret_key"], + ) + + if not files: + ui_filename = ui.textbox( + name="dataset/import/s3_filename", + label="File name", + value="", + required=True, + tooltip="File name to be imported", + ) + else: + if default_cfg.s3_filename in files: + default_file = default_cfg.s3_filename + else: + default_file = files[0] + ui_filename = ui.dropdown( + name="dataset/import/s3_filename", + label="File name", + value=default_file, + choices=[ui.choice(x, x.split("/")[-1]) for x in files], + required=True, + tooltip="File name to be imported", + ) + + items += [ + ui.textbox( + name="dataset/import/s3_bucket", + label="S3 bucket name", + value=q.client["dataset/import/s3_bucket"], + trigger=True, + required=True, + tooltip="S3 bucket name including relative paths", + ), + ui.textbox( + name="dataset/import/s3_access_key", + label="AWS access key", + value=q.client["dataset/import/s3_access_key"], + trigger=True, + required=True, + password=True, + tooltip="Optional AWS access key; empty for anonymous access.", + ), + ui.textbox( + name="dataset/import/s3_secret_key", + label="AWS secret key", + value=q.client["dataset/import/s3_secret_key"], + trigger=True, + required=True, + password=True, + tooltip="Optional AWS secret key; empty for anonymous access.", + ), + ui_filename, + ] + + elif ( + q.client["dataset/import/source"] is None + or q.client["dataset/import/source"] == "Azure" + ): + if q.client["dataset/import/azure_conn_string"] is None: + q.client["dataset/import/azure_conn_string"] = q.client[ + "default_azure_conn_string" + ] + if q.client["dataset/import/azure_container"] is None: + q.client["dataset/import/azure_container"] = q.client[ + "default_azure_container" + ] + + files = azure_file_options( + q.client["dataset/import/azure_conn_string"], + q.client["dataset/import/azure_container"], + ) + print(files) + + if not files: + ui_filename = ui.textbox( + name="dataset/import/azure_filename", + label="File name", + value="", + required=True, + tooltip="File name to be imported", + ) + else: + default_file = files[0] + ui_filename = ui.dropdown( + name="dataset/import/azure_filename", + label="File name", + value=default_file, + choices=[ui.choice(x, x.split("/")[-1]) for x in files], + required=True, + tooltip="File name to be imported", + ) + + items += [ + ui.textbox( + name="dataset/import/azure_conn_string", + label="Datalake connection string", + value=q.client["dataset/import/azure_conn_string"], + trigger=True, + required=True, + password=True, + tooltip="Azure connection string to connect to Datalake storage", + ), + ui.textbox( + name="dataset/import/azure_container", + label="Datalake container name", + value=q.client["dataset/import/azure_container"], + trigger=True, + required=True, + tooltip="Azure Datalake container name including relative paths", + ), + ui_filename, + ] + + elif q.client["dataset/import/source"] == "Upload": + items += [ + ui.file_upload( + name="dataset/import/local_upload", + label="Upload!", + multiple=False, + file_extensions=default_cfg.allowed_file_extensions, + ) + ] + + elif q.client["dataset/import/source"] == "Local": + current_path = ( + q.client["dataset/import/local_path_current"] + if q.client["dataset/import/local_path_current"] is not None + else os.path.expanduser("~") + ) + + if q.args.__wave_submission_name__ == "dataset/import/local_path_list": + idx = int(q.args["dataset/import/local_path_list"][0]) + options = q.client["dataset/import/local_path_list_last"] + new_path = os.path.abspath(os.path.join(current_path, options[idx])) + if os.path.exists(new_path): + current_path = new_path + + results_df = dir_file_table(current_path) + files_list = results_df[current_path].tolist() + q.client["dataset/import/local_path_list_last"] = files_list + q.client["dataset/import/local_path_current"] = current_path + + items += [ + ui.textbox( + name="dataset/import/local_path", + label="File location", + value=current_path, + required=True, + tooltip="Location of file to be imported", + ), + ui_table_from_df( + q=q, + df=results_df, + name="dataset/import/local_path_list", + sortables=[], + searchables=[], + min_widths={current_path: "400"}, + link_col=current_path, + height="calc(65vh)", + ), + ] + + elif q.client["dataset/import/source"] == "Kaggle": + if q.client["dataset/import/kaggle_access_key"] is None: + q.client["dataset/import/kaggle_access_key"] = q.client[ + "default_kaggle_username" + ] + if q.client["dataset/import/kaggle_secret_key"] is None: + q.client["dataset/import/kaggle_secret_key"] = q.client[ + "default_kaggle_secret_key" + ] + + items += [ + ui.textbox( + name="dataset/import/kaggle_command", + label="Kaggle API command", + value=default_cfg.kaggle_command, + required=True, + tooltip="Kaggle API command to be executed", + ), + ui.textbox( + name="dataset/import/kaggle_access_key", + label="Kaggle username", + value=q.client["dataset/import/kaggle_access_key"], + required=True, + password=False, + tooltip="Kaggle username for API authentication", + ), + ui.textbox( + name="dataset/import/kaggle_secret_key", + label="Kaggle secret key", + value=q.client["dataset/import/kaggle_secret_key"], + required=True, + password=True, + tooltip="Kaggle secret key for API authentication", + ), + ] + + allowed_types = ", ".join(default_cfg.allowed_file_extensions) + allowed_types = " or".join(allowed_types.rsplit(",", 1)) + items += [ + ui.message_bar(type="info", text=info + f"Must be a {allowed_types} file."), + ui.message_bar(type="error", text=error), + ui.message_bar(type="warning", text=warning), + ] + + q.page["dataset/import"].items = items + + buttons = [ui.button(name="dataset/list", label="Abort")] + if q.client["dataset/import/source"] != "Upload": + buttons.insert( + 0, ui.button(name="dataset/import/2", label="Continue", primary=True) + ) + + q.page["dataset/import/footer"] = ui.form_card( + box="footer", items=[ui.inline(items=buttons, justify="start")] + ) + q.client.delete_cards.add("dataset/import/footer") + + q.client["dataset/import/id"] = None + q.client["dataset/import/cfg_file"] = None + + elif step == 2: # download / import data from source + q.page["dataset/import/footer"] = ui.form_card(box="footer", items=[]) + try: + if not q.args["dataset/import/cfg_file"] and not edit: + if q.client["dataset/import/source"] == "S3": + ( + q.client["dataset/import/path"], + q.client["dataset/import/name"], + ) = await s3_download( + q, + q.client["dataset/import/s3_bucket"], + q.client["dataset/import/s3_filename"], + q.client["dataset/import/s3_access_key"], + q.client["dataset/import/s3_secret_key"], + ) + elif q.client["dataset/import/source"] == "Azure": + ( + q.client["dataset/import/path"], + q.client["dataset/import/name"], + ) = await azure_download( + q, + q.client["dataset/import/azure_conn_string"], + q.client["dataset/import/azure_container"], + q.client["dataset/import/azure_filename"], + ) + elif q.client["dataset/import/source"] in ("Upload", "Local"): + ( + q.client["dataset/import/path"], + q.client["dataset/import/name"], + ) = await local_download(q, q.client["dataset/import/local_path"]) + elif q.client["dataset/import/source"] == "Kaggle": + ( + q.client["dataset/import/path"], + q.client["dataset/import/name"], + ) = await kaggle_download( + q, + q.client["dataset/import/kaggle_command"], + q.client["dataset/import/kaggle_access_key"], + q.client["dataset/import/kaggle_secret_key"], + ) + + # store if in edit mode + q.client["dataset/import/edit"] = edit + + # clear dataset triggers from client + for trigger_key in default_cfg.dataset_trigger_keys: + if q.client[f"dataset/import/cfg/{trigger_key}"]: + del q.client[f"dataset/import/cfg/{trigger_key}"] + + await dataset_import( + q, + step=3, + edit=edit, + error=error, + warning=warning, + allow_merge=allow_merge, + ) + except Exception: + logger.error("Dataset error:", exc_info=True) + error = ( + "Dataset import failed. Please make sure all required " + "fields are filled correctly." + ) + await clean_dashboard(q, mode="full") + await dataset_import(q, step=1, error=str(error)) + + elif step == 3: # set dataset configuration + q.page["dataset/import/footer"] = ui.form_card(box="footer", items=[]) + try: + if not q.args["dataset/import/cfg_file"] and not edit: + q.client["dataset/import/name"] = get_unique_dataset_name( + q, q.client["dataset/import/name"] + ) + q.page["dataset/import"] = ui.form_card(box="content", items=[]) + q.client.delete_cards.add("dataset/import") + + wizard = q.page["dataset/import"] + + title = "Configure dataset" + + items = [ + ui.text_l(title), + ui.textbox( + name="dataset/import/name", + label="Dataset name", + value=q.client["dataset/import/name"], + required=True, + ), + ] + + choices_problem_types = [ + ui.choice(name, label) for name, label in get_problem_types() + ] + + if q.client["dataset/import/cfg_file"] is None: + max_substring_len = 0 + for c in choices_problem_types: + problem_type_name = c.name.replace("_config", "") + if problem_type_name in q.client["dataset/import/name"]: + if len(problem_type_name) > max_substring_len: + q.client["dataset/import/cfg_file"] = c.name + q.client["dataset/import/cfg_category"] = c.name.split("_")[ + 0 + ] + max_substring_len = len(problem_type_name) + if q.client["dataset/import/cfg_file"] is None: + q.client["dataset/import/cfg_file"] = default_cfg.cfg_file + q.client["dataset/import/cfg_category"] = q.client[ # type: ignore + "dataset/import/cfg_file" + ].split("_")[0] + + # set default value of problem type if no match to category + if ( + q.client["dataset/import/cfg_category"] + not in q.client["dataset/import/cfg_file"] + ): + q.client["dataset/import/cfg_file"] = get_problem_types( + category=q.client["dataset/import/cfg_category"] + )[0][0] + + model_types = get_model_types(q.client["dataset/import/cfg_file"]) + if len(model_types) > 0: + # add model type to cfg file name here + q.client["dataset/import/cfg_file"] = add_model_type( + q.client["dataset/import/cfg_file"], model_types[0][0] + ) + if not edit: + q.client["dataset/import/cfg"] = load_config_py( + config_path=( + f"llm_studio/python_configs/" + f"{q.client['dataset/import/cfg_file']}" + ), + config_name="ConfigProblemBase", + ) + + option_items = get_dataset_elements(cfg=q.client["dataset/import/cfg"], q=q) + items.extend(option_items) + items.append(ui.message_bar(type="error", text=error)) + items.append(ui.message_bar(type="warning", text=warning)) + if file_extension_is_compatible(q): + ui_nav_name = "dataset/import/4/edit" if edit else "dataset/import/4" + buttons = [ + ui.button(name=ui_nav_name, label="Continue", primary=True), + ui.button(name="dataset/list", label="Abort"), + ] + if allow_merge: + datasets_df = q.client.app_db.get_datasets_df() + if datasets_df.shape[0]: + label = "Merge With Existing Dataset" + buttons.insert(1, ui.button(name="dataset/merge", label=label)) + else: + problem_type = make_label( + re.sub("_config.*", "", q.client["dataset/import/cfg_file"]) + ) + items += [ + ui.text( + " The chosen file extensions is not " + f"compatible with {problem_type}. " + ) + ] + buttons = [ + ui.button(name="dataset/list", label="Abort"), + ] + q.page["dataset/import/footer"] = ui.form_card( + box="footer", items=[ui.inline(items=buttons, justify="start")] + ) + + wizard.items = items + + q.client.delete_cards.add("dataset/import/footer") + + except Exception as exception: + logger.error("Dataset error:", exc_info=True) + error = clean_error(str(exception)) + await clean_dashboard(q, mode="full") + await dataset_import(q, step=1, error=str(error)) + + elif step == 4: # verify if dataset does not exist already + dataset_name = q.client["dataset/import/name"] + original_name = q.client["dataset/import/original_name"] # used in edit mode + valid_dataset_name = get_unique_dataset_name(q, dataset_name) + if valid_dataset_name != dataset_name and not ( + q.client["dataset/import/edit"] and dataset_name == original_name + ): + err = f"Dataset {dataset_name} already exists" + q.client["dataset/import/name"] = valid_dataset_name + await dataset_import(q, 3, edit=edit, error=err) + else: + await dataset_import(q, 5, edit=edit) + + elif step == 5: # visualize dataset + header = "

Sample Data Visualization

" + valid_visualization = False + try: + cfg = q.client["dataset/import/cfg"] + cfg = parse_ui_elements( + cfg=cfg, q=q, limit=default_cfg.dataset_keys, pre="dataset/import/cfg/" + ) + + q.client["dataset/import/cfg"] = cfg + plot = cfg.logging.plots_class.plot_data(cfg) + text = ( + "Data Validity Check. Click Continue if the input " + "data and labels appear correctly." + ) + if plot.encoding == "image": + plot_item = ui.image(title="", type="png", image=plot.data) + elif plot.encoding == "html": + plot_item = ui.markup(content=plot.data) + elif plot.encoding == "df": + df = pd.read_parquet(plot.data) + df = df.iloc[:2000] + min_widths = {"Content": "800"} + plot_item = ui_table_from_df( + q=q, + df=df, + name="experiment/display/table", + markdown_cells=list(df.columns), + searchables=list(df.columns), + downloadable=False, + resettable=False, + min_widths=min_widths, + height="calc(100vh - 245px)", + max_char_length=5_000, + cell_overflow="tooltip", + ) + else: + raise ValueError(f"Unknown plot encoding `{plot.encoding}`") + + items = [ui.markup(content=header), ui.message_bar(text=text), plot_item] + valid_visualization = True + + await busy_dialog( + q=q, + title="Performing sanity checks on the data", + text="Please be patient...", + ) + # add one-second delay for datasets where sanity check is instant + # to avoid flickering dialog + time.sleep(1) + sanity_check(cfg) + + except Exception as exception: + logger.error( + f"Error while plotting data preview: {exception}", exc_info=True + ) + text = ( + "Error occurred while visualizing the data. Please go back and verify " + "whether the problem type and other settings were set properly." + ) + items = [ + ui.markup(content=header), + ui.message_bar(text=text, type="error"), + ui.expander( + name="expander", + label="Expand Error Traceback", + items=[ui.markup(f"
{traceback.format_exc()}
")], + ), + ] + + buttons = [ + ui.button( + name="dataset/import/6", label="Continue", primary=valid_visualization + ), + ui.button( + name="dataset/import/3/edit", + label="Back", + primary=not valid_visualization, + ), + ui.button(name="dataset/list", label="Abort"), + ] + + q.page["dataset/import"] = ui.form_card(box="content", items=items) + q.client.delete_cards.add("dataset/import") + + q.page["dataset/import/footer"] = ui.form_card( + box="footer", items=[ui.inline(items=buttons, justify="start")] + ) + q.client.delete_cards.add("dataset/import/footer") + + elif step == 6: # create dataset + if q.client["dataset/import/name"] == "": + await clean_dashboard(q, mode="full") + await dataset_import(q, step=2, error="Please enter all required fields!") + + else: + folder_name = q.client["dataset/import/path"].split("/")[-1] + new_folder = q.client["dataset/import/name"] + act_path = q.client["dataset/import/path"] + new_path = new_folder.join(act_path.rsplit(folder_name, 1)) + + try: + shutil.move(q.client["dataset/import/path"], new_path) + + cfg = q.client["dataset/import/cfg"] + + # remap old path to new path + for k in default_cfg.dataset_folder_keys: + old_path = getattr(cfg.dataset, k, None) + if old_path is not None: + setattr( + cfg.dataset, + k, + old_path.replace(q.client["dataset/import/path"], new_path), + ) + + # change the default validation strategy if validation df set + if cfg.dataset.validation_dataframe != "None": + cfg.dataset.validation_strategy = "custom" + cfg_path = f"{new_path}/{q.client['dataset/import/cfg_file']}.yaml" + save_config_yaml(cfg_path, cfg) + + train_rows = None + if os.path.exists(cfg.dataset.train_dataframe): + train_rows = read_dataframe_drop_missing_labels( + cfg.dataset.train_dataframe, cfg + ).shape[0] + validation_rows = None + if os.path.exists(cfg.dataset.validation_dataframe): + validation_rows = read_dataframe_drop_missing_labels( + cfg.dataset.validation_dataframe, cfg + ).shape[0] + + dataset = Dataset( + id=q.client["dataset/import/id"], + name=q.client["dataset/import/name"], + path=new_path, + config_file=cfg_path, + train_rows=train_rows, + validation_rows=validation_rows, + ) + if q.client["dataset/import/id"] is not None: + q.client.app_db.delete_dataset(dataset.id) + q.client.app_db.add_dataset(dataset) + await dataset_list(q) + + except Exception as exception: + logger.error("Dataset error:", exc_info=True) + q.client.app_db._session.rollback() + error = clean_error(str(exception)) + await clean_dashboard(q, mode="full") + await dataset_import(q, step=2, error=str(error)) + + +async def dataset_merge(q: Q, step, error=""): + if step == 1: # Select which dataset to merge + await clean_dashboard(q, mode="full") + q.client["nav/active"] = "dataset/merge" + + q.page["dataset/merge"] = ui.form_card(box="content", items=[]) + q.client.delete_cards.add("dataset/merge") + + datasets_df = q.client.app_db.get_datasets_df() + import_choices = [ + ui.choice(x["path"], x["name"]) for idx, x in datasets_df.iterrows() + ] + + items = [ + ui.text_l("Merge current dataset with an existing dataset"), + ui.dropdown( + name="dataset/merge/target", + label="Dataset", + value=datasets_df.iloc[0]["path"], + choices=import_choices, + trigger=False, + tooltip="Source of dataset import", + ), + ] + + if error: + items.append(ui.message_bar(type="error", text=error)) + + q.page["dataset/merge"].items = items + + buttons = [ + ui.button(name="dataset/merge/action", label="Merge", primary=True), + ui.button(name="dataset/import/3", label="Back", primary=False), + ui.button(name="dataset/list", label="Abort"), + ] + + q.page["dataset/import/footer"] = ui.form_card( + box="footer", items=[ui.inline(items=buttons, justify="start")] + ) + q.client.delete_cards.add("dataset/import/footer") + + elif step == 2: # copy file to dataset and go to edit dataset + current_dir = q.client["dataset/import/path"] + target_dir = q.args["dataset/merge/target"] + + if current_dir == target_dir: + await dataset_merge(q, step=1, error="Cannot merge dataset with itself") + return + + datasets_df = q.client.app_db.get_datasets_df().set_index("path") + has_dataset_entry = current_dir in datasets_df.index + + if has_dataset_entry: + experiment_df = q.client.app_db.get_experiments_df() + source_id = int(datasets_df.loc[current_dir, "id"]) + has_experiment = any(experiment_df["dataset"].astype(int) == source_id) + else: + source_id = None + has_experiment = False + + current_files = os.listdir(current_dir) + current_files = [x for x in current_files if not x.endswith(".yaml")] + target_files = os.listdir(target_dir) + overlapping_files = list(set(current_files).intersection(set(target_files))) + rename_map = {} + + for file in overlapping_files: + tmp_str = file.split(".") + if len(tmp_str) == 1: + file_name, extension = file, "" + else: + file_name, extension = ".".join(tmp_str[:-1]), f".{tmp_str[-1]}" + + cnt = 1 + while f"{file_name}_{cnt}{extension}" in target_files: + cnt += 1 + + rename_map[file] = f"{file_name}_{cnt}{extension}" + target_files.append(rename_map[file]) + + if len(overlapping_files): + warning = ( + f"Renamed {', '.join(rename_map.keys())} to " + f"{', '.join(rename_map.values())} due to duplicated entries." + ) + else: + warning = "" + + for file in current_files: + new_file = rename_map.get(file, file) + src = os.path.join(current_dir, file) + dst = os.path.join(target_dir, new_file) + + if has_experiment: + if os.path.isdir(src): + shutil.copytree(src, dst) + else: + shutil.copy(src, dst) + else: + shutil.move(src, dst) + + if not has_experiment: + shutil.rmtree(current_dir) + if has_dataset_entry: + q.client.app_db.delete_dataset(source_id) + + dataset_id = int(datasets_df.loc[target_dir, "id"]) + await dataset_edit(q, dataset_id, warning=warning, allow_merge=False) + + +async def dataset_list_table( + q: Q, + show_experiment_datasets: bool = True, +) -> None: + """Pepare dataset list form card + + Args: + q: Q + show_experiment_datasets: whether to also show datasets linked to experiments + """ + + q.client["dataset/list/df_datasets"] = get_datasets( + q=q, + show_experiment_datasets=show_experiment_datasets, + ) + + df_viz = q.client["dataset/list/df_datasets"].copy() + + columns_to_drop = [ + "id", + "path", + "config_file", + "validation dataframe", + ] + + df_viz = df_viz.drop(columns=columns_to_drop, errors="ignore") + if "problem type" in df_viz.columns: + df_viz["problem type"] = df_viz["problem type"].str.replace("Text ", "") + + widths = { + "name": "200", + "problem type": "210", + "train dataframe": "190", + "train rows": "120", + "validation rows": "130", + "labels": "120", + "actions": "5", + } + + actions_dict = { + "dataset/newexperiment": "New experiment", + "dataset/edit": "Edit dataset", + "dataset/delete/dialog/single": "Delete dataset", + } + + q.page["dataset/list"] = ui.form_card( + box="content", + items=[ + ui_table_from_df( + q=q, + df=df_viz, + name="dataset/list/table", + sortables=["train rows", "validation rows"], + filterables=["name", "problem type"], + searchables=[], + min_widths=widths, + link_col="name", + height="calc(100vh - 245px)", + actions=actions_dict, + ), + ui.message_bar(type="info", text=""), + ], + ) + q.client.delete_cards.add("dataset/list") + + +async def dataset_list(q: Q, reset: bool = True) -> None: + """Display all datasets.""" + q.client["nav/active"] = "dataset/list" + + if reset: + await clean_dashboard(q, mode="full") + await dataset_list_table(q) + + q.page["dataset/display/footer"] = ui.form_card( + box="footer", + items=[ + ui.inline( + items=[ + ui.button( + name="dataset/import", label="Import dataset", primary=True + ), + ui.button( + name="dataset/list/delete", + label="Delete datasets", + primary=False, + ), + ], + justify="start", + ) + ], + ) + q.client.delete_cards.add("dataset/display/footer") + remove_temp_files(q) + + await q.page.save() + + +async def dataset_newexperiment(q: Q, dataset_id: int): + """Start a new experiment from given dataset.""" + + dataset = q.client.app_db.get_dataset(dataset_id) + + q.client["experiment/start/cfg_file"] = dataset.config_file.split("/")[-1].replace( + ".yaml", "" + ) + q.client["experiment/start/cfg_category"] = q.client[ + "experiment/start/cfg_file" + ].split("_")[0] + q.client["experiment/start/dataset"] = str(dataset_id) + + await experiment_start(q) + + +async def dataset_edit( + q: Q, dataset_id: int, error: str = "", warning: str = "", allow_merge: bool = True +): + """Edit selected dataset. + + Args: + q: Q + dataset_id: dataset id to edit + error: optional error message + warning: optional warning message + allow_merge: whether to allow merging dataset when editing + """ + + dataset = q.client.app_db.get_dataset(dataset_id) + + experiments_df = q.client.app_db.get_experiments_df() + experiments_df = experiments_df[experiments_df["dataset"] == str(dataset_id)] + statuses, _ = get_experiments_status(experiments_df) + num_invalid = len([stat for stat in statuses if stat in ["running", "queued"]]) + + if num_invalid: + info = "s" if num_invalid > 1 else "" + info_str = ( + f"Dataset {dataset.name} is linked to {num_invalid} " + f"running or queued experiment{info}. Wait for them to finish or stop them " + "first before editing the dataset." + ) + q.page["dataset/list"].items[1].message_bar.text = info_str + return + + q.client["dataset/import/id"] = dataset_id + + q.client["dataset/import/cfg_file"] = dataset.config_file.split("/")[-1].replace( + ".yaml", "" + ) + q.client["dataset/import/cfg_category"] = q.client["dataset/import/cfg_file"].split( + "_" + )[0] + q.client["dataset/import/path"] = dataset.path + q.client["dataset/import/name"] = dataset.name + q.client["dataset/import/original_name"] = dataset.name + q.client["dataset/import/cfg"] = load_config_yaml(dataset.config_file) + + if allow_merge and experiments_df.shape[0]: + allow_merge = False + + await dataset_import( + q=q, step=2, edit=True, error=error, warning=warning, allow_merge=allow_merge + ) + + +async def dataset_list_delete(q: Q): + """Allow to select multiple datasets for deletion.""" + + await dataset_list_table(q, show_experiment_datasets=False) + + q.page["dataset/list"].items[0].table.multiple = True + + info_str = "Only datasets not linked to experiments can be deleted." + + q.page["dataset/list"].items[1].message_bar.text = info_str + + q.page["dataset/display/footer"].items = [ + ui.inline( + items=[ + ui.button( + name="dataset/delete/dialog", label="Delete datasets", primary=True + ), + ui.button(name="dataset/list/delete/abort", label="Abort"), + ] + ) + ] + + +async def dataset_delete(q: Q, dataset_ids: List[int]): + """Delete selected datasets. + + Args: + q: Q + dataset_ids: list of dataset ids to delete + """ + + for dataset_id in dataset_ids: + dataset = q.client.app_db.get_dataset(dataset_id) + q.client.app_db.delete_dataset(dataset.id) + + try: + shutil.rmtree(dataset.path) + except OSError: + pass + + +async def dataset_delete_single(q: Q, dataset_id: int): + dataset = q.client.app_db.get_dataset(dataset_id) + + experiments_df = q.client.app_db.get_experiments_df() + num_experiments = sum(experiments_df["dataset"] == str(dataset_id)) + if num_experiments: + info = "s" if num_experiments > 1 else "" + info_str = ( + f"Dataset {dataset.name} is linked to {num_experiments} " + f"experiment{info}. Only datasets not linked to experiments can be deleted." + ) + await dataset_list(q) + q.page["dataset/list"].items[1].message_bar.text = info_str + else: + await dataset_delete(q, [dataset_id]) + await dataset_list(q) + + +async def dataset_display(q: Q) -> None: + """Display a selected dataset.""" + + dataset_id = q.client["dataset/list/df_datasets"]["id"].iloc[ + q.client["dataset/display/id"] + ] + dataset: Dataset = q.client.app_db.get_dataset(dataset_id) + config_filename = dataset.config_file + cfg = load_config_yaml(config_filename) + dataset_filename = cfg.dataset.train_dataframe + + if ( + q.client["dataset/display/tab"] is None + or q.args["dataset/display/data"] is not None + ): + q.client["dataset/display/tab"] = "dataset/display/data" + + if q.args["dataset/display/visualization"] is not None: + q.client["dataset/display/tab"] = "dataset/display/visualization" + + if q.args["dataset/display/statistics"] is not None: + q.client["dataset/display/tab"] = "dataset/display/statistics" + + if q.args["dataset/display/summary"] is not None: + q.client["dataset/display/tab"] = "dataset/display/summary" + + await clean_dashboard(q, mode=q.client["dataset/display/tab"]) + + items: List[Tab] = [ + ui.tab(name="dataset/display/data", label="Sample Train Data"), + ui.tab( + name="dataset/display/visualization", label="Sample Train Visualization" + ), + ui.tab(name="dataset/display/statistics", label="Train Data Statistics"), + ui.tab(name="dataset/display/summary", label="Summary"), + ] + + q.page["dataset/display/tab"] = ui.tab_card( + box="nav2", + link=True, + items=items, + value=q.client["dataset/display/tab"], + ) + q.client.delete_cards.add("dataset/display/tab") + + if q.client["dataset/display/tab"] == "dataset/display/data": + await show_data_tab(q=q, cfg=cfg, filename=dataset_filename) + + elif q.client["dataset/display/tab"] == "dataset/display/visualization": + await show_visualization_tab(q, cfg) + + elif q.client["dataset/display/tab"] == "dataset/display/statistics": + await show_statistics_tab( + q, dataset_filename=dataset_filename, config_filename=config_filename + ) + + elif q.client["dataset/display/tab"] == "dataset/display/summary": + await show_summary_tab(q, dataset_id) + + q.page["dataset/display/footer"] = ui.form_card( + box="footer", + items=[ + ui.inline( + items=[ + ui.button( + name="dataset/newexperiment/from_current", + label="Create experiment", + primary=False, + disabled=False, + tooltip=None, + ), + ui.button(name="dataset/list", label="Back", primary=False), + ], + justify="start", + ) + ], + ) + q.client.delete_cards.add("dataset/display/footer") + + +async def show_data_tab(q, cfg, filename: str): + fill_columns = get_fill_columns(cfg) + df = read_dataframe(filename, n_rows=200, fill_columns=fill_columns) + q.page["dataset/display/data"] = ui.form_card( + box="first", + items=[ + ui_table_from_df( + q=q, + df=df, + name="dataset/display/data/table", + sortables=list(df.columns), + height="calc(100vh - 265px)", + cell_overflow="wrap", + ) + ], + ) + q.client.delete_cards.add("dataset/display/data") + + +async def show_visualization_tab(q, cfg): + try: + plot = cfg.logging.plots_class.plot_data(cfg) + except Exception as error: + logger.error(f"Error while plotting data preview: {error}", exc_info=True) + plot = PlotData("

Error while plotting data.

", encoding="html") + card: ImageCard | MarkupCard | FormCard + if plot.encoding == "image": + card = ui.image_card(box="first", title="", type="png", image=plot.data) + elif plot.encoding == "html": + card = ui.markup_card(box="first", title="", content=plot.data) + elif plot.encoding == "df": + df = pd.read_parquet(plot.data) + df = df.iloc[:2000] + min_widths = {"Content": "800"} + card = ui.form_card( + box="first", + items=[ + ui_table_from_df( + q=q, + df=df, + name="dataset/display/visualization/table", + markdown_cells=list(df.columns), + searchables=list(df.columns), + downloadable=True, + resettable=True, + min_widths=min_widths, + height="calc(100vh - 245px)", + max_char_length=50_000, + cell_overflow="tooltip", + ) + ], + ) + + else: + raise ValueError(f"Unknown plot encoding `{plot.encoding}`") + q.page["dataset/display/visualization"] = card + q.client.delete_cards.add("dataset/display/visualization") + + +async def show_summary_tab(q, dataset_id): + dataset_df = get_datasets(q) + dataset_df = dataset_df[dataset_df.id == dataset_id] + stat_list_items: List[StatListItem] = [] + for col in dataset_df.columns: + if col in ["id", "config_file", "path", "process_id", "status"]: + continue + v = dataset_df[col].values[0] + t: StatListItem = ui.stat_list_item(label=make_label(col), value=str(v)) + + stat_list_items.append(t) + q.page["dataset/display/summary"] = ui.stat_list_card( + box="first", items=stat_list_items, title="" + ) + q.client.delete_cards.add("dataset/display/summary") + + +async def show_statistics_tab(q, dataset_filename, config_filename): + cfg_hash = hashlib.md5(open(config_filename, "rb").read()).hexdigest() + stats_dict = compute_dataset_statistics(dataset_filename, config_filename, cfg_hash) + + for chat_type in ["prompts", "answers"]: + q.page[f"dataset/display/statistics/{chat_type}_histogram"] = histogram_card( + x=stats_dict[chat_type], + x_axis_description=f"text_length_{chat_type.capitalize()}", + title=f"Text Length Distribution for {chat_type.capitalize()}" + f" (split by whitespace)", + histogram_box="first", + ) + q.client.delete_cards.add(f"dataset/display/statistics/{chat_type}_histogram") + + q.page["dataset/display/statistics/full_conversation_histogram"] = histogram_card( + x=stats_dict["complete_conversations"], + x_axis_description="text_length_complete_conversations", + title="Text Length Distribution for complete " + "conversations (split by whitespace)", + histogram_box="second", + ) + q.client.delete_cards.add("dataset/display/statistics/full_conversation_histogram") + + if len(set(stats_dict["number_of_prompts"])) > 1: + q.page["dataset/display/statistics/parent_id_length_histogram"] = ( + histogram_card( + x=stats_dict["number_of_prompts"], + x_axis_description="number_of_prompts", + title="Distribution of number of prompt-answer turns per conversation.", + histogram_box="second", + ) + ) + q.client.delete_cards.add( + "dataset/display/statistics/parent_id_length_histogram" + ) + + df_stats = stats_dict["df_stats"] + if df_stats is None: + component_items = [ + ui.text( + "Dataset does not contain numerical or text features. " + "No statistics available." + ) + ] + else: + if df_stats.shape[1] > 5: # mixed text and numeric + widths = {col: "77" for col in df_stats} + else: # only text features + widths = None + component_items = [ + ui_table_from_df( + q=q, + df=df_stats, + name="dataset/display/statistics/table", + sortables=list(df_stats.columns), + min_widths=widths, + height="265px", + ) + ] + q.page["dataset/display/statistics"] = ui.form_card( + box="third", + items=component_items, + ) + q.client.delete_cards.add("dataset/display/statistics") + + +@functools.lru_cache() +def compute_dataset_statistics(dataset_path: str, cfg_path: str, cfg_hash: str): + """ + Compute various statistics for a dataset. + - text length distribution for prompts and answers + - text length distribution for complete conversations + - distribution of number of prompt-answer turns per conversation + - statistics for non text features + + We use LRU caching to avoid recomputing the statistics for the same dataset. + Thus, cfg_hash is used as a function argument to identify the dataset. + """ + df_train = read_dataframe(dataset_path) + cfg = load_config_yaml(cfg_path) + conversations = get_conversation_chains( + df=df_train, cfg=cfg, limit_chained_samples=True + ) + stats_dict = {} + for chat_type in ["prompts", "answers"]: + text_lengths = [ + [len(text.split(" ")) for text in conversation[chat_type]] + for conversation in conversations + ] + text_lengths = [item for sublist in text_lengths for item in sublist] + stats_dict[chat_type] = text_lengths + input_texts = [] + for conversation in conversations: + input_text = conversation["systems"][0] + prompts = conversation["prompts"] + answers = conversation["answers"] + for prompt, answer in zip(prompts, answers): + input_text += prompt + answer + input_texts += [input_text] + stats_dict["complete_conversations"] = [ + len(text.split(" ")) for text in input_texts + ] + stats_dict["number_of_prompts"] = [ + len(conversation["prompts"]) for conversation in conversations + ] + stats_dict["df_stats"] = get_frame_stats(df_train) + return stats_dict + + +async def dataset_import_uploaded_file(q: Q): + local_path = await q.site.download( + q.args["dataset/import/local_upload"][0], + f"{get_data_dir(q)}/" + f'{q.args["dataset/import/local_upload"][0].split("/")[-1]}', + ) + await q.site.unload(q.args["dataset/import/local_upload"][0]) + valid, error = check_valid_upload_content(local_path) + if valid: + q.args["dataset/import/local_path"] = local_path + q.client["dataset/import/local_path"] = q.args["dataset/import/local_path"] + await dataset_import(q, step=2) + else: + await dataset_import(q, step=1, error=error) + + +async def dataset_delete_current_datasets(q: Q): + dataset_ids = list( + q.client["dataset/list/df_datasets"]["id"].iloc[ + list(map(int, q.client["dataset/list/table"])) + ] + ) + await dataset_delete(q, dataset_ids) + await dataset_list(q) diff --git a/llm_studio/app_utils/sections/experiment.py b/llm_studio/app_utils/sections/experiment.py new file mode 100644 index 0000000000000000000000000000000000000000..4c7191d9df27e18fb4f54c8bc429d42d967d8ab3 --- /dev/null +++ b/llm_studio/app_utils/sections/experiment.py @@ -0,0 +1,1903 @@ +import glob +import logging +import os +import shutil +import time +import zipfile +from pathlib import Path +from typing import Callable, List, Optional, Set + +import accelerate +import einops +import huggingface_hub +import numpy as np +import pandas as pd +import torch +import transformers +import yaml +from h2o_wave import Q, data, ui +from sqlitedict import SqliteDict + +from llm_studio.app_utils.config import default_cfg +from llm_studio.app_utils.hugging_face_utils import ( + get_model_card, + publish_model_to_hugging_face, +) +from llm_studio.app_utils.sections.chat import chat_tab, load_cfg_model_tokenizer +from llm_studio.app_utils.sections.common import clean_dashboard +from llm_studio.app_utils.utils import ( + add_model_type, + flatten_dict, + get_cfg_list_items, + get_data_dir, + get_download_link, + get_experiment_status, + get_experiments, + get_model_types, + get_problem_categories, + get_problem_types, + get_ui_elements, + get_unique_name, + hf_repo_friendly_name, + parse_ui_elements, + remove_model_type, + set_env, + start_experiment, +) +from llm_studio.app_utils.wave_utils import busy_dialog, ui_table_from_df, wave_theme +from llm_studio.python_configs.cfg_checks import check_config_for_errors +from llm_studio.src.datasets.text_utils import get_tokenizer +from llm_studio.src.tooltips import tooltips +from llm_studio.src.utils.config_utils import ( + NON_GENERATION_PROBLEM_TYPES, + load_config_py, + load_config_yaml, + save_config_yaml, +) +from llm_studio.src.utils.exceptions import LLMResourceException +from llm_studio.src.utils.export_utils import ( + check_available_space, + get_artifact_path_path, + get_logs_path, + get_model_path, + get_predictions_path, + save_logs, + save_prediction_outputs, +) +from llm_studio.src.utils.logging_utils import write_flag +from llm_studio.src.utils.modeling_utils import unwrap_model +from llm_studio.src.utils.plot_utils import PLOT_ENCODINGS +from llm_studio.src.utils.utils import add_file_to_zip, kill_child_processes + +logger = logging.getLogger(__name__) + + +async def experiment_start(q: Q) -> None: + """Display experiment start cards.""" + + await clean_dashboard(q, mode="experiment_start", exclude=["experiment/start"]) + q.client["nav/active"] = "experiment/start" + + show_update_warnings = True + is_create_experiment = False + # reset certain configs if new experiment start session + if ( + q.args.__wave_submission_name__ == "experiment/start" + or q.args.__wave_submission_name__ == "experiment/start_experiment" + or q.args.__wave_submission_name__ == "dataset/newexperiment" + or q.args.__wave_submission_name__ == "dataset/newexperiment/from_current" + or q.args.__wave_submission_name__ == "experiment/list/new" + ): + q.client["experiment/start/cfg_experiment_prev"] = None + q.client["experiment/start/cfg_file_prev"] = None + q.client["experiment/start/prev_dataset"] = None + q.client["experiment/start/cfg_sub"] = None + show_update_warnings = False + is_create_experiment = True + + # get all the datasets available + df_datasets = q.client.app_db.get_datasets_df() + # Hide inference only datasets + df_datasets = df_datasets.loc[df_datasets["train_rows"].notna()] + if ( + not q.client["experiment/start/dataset"] + or q.client["experiment/start/dataset"] not in df_datasets.id.astype(str).values + ): + if len(df_datasets) >= 1: + q.client["experiment/start/dataset"] = str(df_datasets["id"].iloc[-1]) + else: + q.client["experiment/start/dataset"] = "1" + + warning_message = "Experiment settings might be updated after changing {}" + + items = [ + ui.separator(name="general_expander", label="General settings"), + ui.dropdown( + name="experiment/start/dataset", + label="Dataset", + required=True, + value=q.client["experiment/start/dataset"], + choices=[ + ui.choice(str(row["id"]), str(row["name"])) + for _, row in df_datasets.iterrows() + ], + trigger=True, + tooltip=tooltips["experiments_dataset"], + ), + ] + + if ( + show_update_warnings + and q.client["experiment/start/dataset_prev"] + != q.client["experiment/start/dataset"] + ): + items += [ + ui.message_bar(type="warning", text=warning_message.format("Dataset")) + ] + show_update_warnings = False + + if ( + q.client["experiment/start/cfg_file"] is None + or q.client["experiment/start/dataset_prev"] + != q.client["experiment/start/dataset"] + ) and q.client["experiment/start/cfg_category"] != "experiment": + dataset = q.client.app_db.get_dataset(q.client["experiment/start/dataset"]) + if dataset is not None: + problem_type = dataset.config_file.replace(dataset.path + "/", "").replace( + ".yaml", "" + ) + else: + problem_type = default_cfg.cfg_file + q.client["experiment/start/cfg_file"] = problem_type + q.client["experiment/start/cfg_category"] = problem_type.split("_")[0] + + if q.client["experiment/start/cfg_category"] == "experiment": + q.client["experiment/start/cfg_file"] = "experiment" + + # get all experiments + df_experiments = get_experiments(q, mode="train") + + # get all problem category choices + choices_problem_categories = [ + ui.choice(name, label) for name, label in get_problem_categories() + ] + + if len(df_experiments["id"]) > 0: + choices_problem_categories += [ui.choice("experiment", "From Experiment")] + + # set default value of problem type if no match to category + if ( + q.client["experiment/start/cfg_category"] + not in q.client["experiment/start/cfg_file"] + ): + if q.client["experiment/start/cfg_category"] != "experiment": + q.client["experiment/start/cfg_file"] = get_problem_types( + category=q.client["experiment/start/cfg_category"] + )[0][0] + + # get all problem type choices + choices_problem_types = [ + ui.choice(name, label) + for name, label in get_problem_types( + category=q.client["experiment/start/cfg_category"] + ) + ] + + # remove model type if present in cfg file name here + q.client["experiment/start/cfg_file"] = remove_model_type( + q.client["experiment/start/cfg_file"] + ) + + if len(df_experiments["id"]) > 0: + if q.client["experiment/start/cfg_experiment"] is None: + q.client["experiment/start/cfg_experiment"] = str( + df_experiments["id"].iloc[0] + ) + # Default pretrained from the previous experiment to False + if ( + q.client["experiment/start/cfg_experiment_pretrained"] is None + or is_create_experiment + ): + q.client["experiment/start/cfg_experiment_pretrained"] = False + + if q.client["experiment/start/cfg_category"] != "experiment": + items += [ + ui.dropdown( + name="experiment/start/cfg_file", + label="Problem Type", + required=True, + choices=choices_problem_types, + value=q.client["experiment/start/cfg_file"], + trigger=True, + tooltip=tooltips["experiments_problem_type"], + ) + ] + + model_types = get_model_types(q.client["experiment/start/cfg_file"]) + if len(model_types) > 0: + choices = [ui.choice(name, label) for name, label in model_types] + if q.client["experiment/start/cfg_sub"] in [None, ""]: + q.client["experiment/start/cfg_sub"] = model_types[0][0] + items += [ + ui.dropdown( + name="experiment/start/cfg_sub", + label="Model Type", + required=True, + choices=choices, + value=q.client["experiment/start/cfg_sub"], + trigger=True, + ) + ] + else: + q.client["experiment/start/cfg_sub"] = "" + + # add model type to cfg file name here + q.client["experiment/start/cfg_file"] = add_model_type( + q.client["experiment/start/cfg_file"], q.client["experiment/start/cfg_sub"] + ) + + if ( + show_update_warnings + and q.client["experiment/start/cfg_file_prev"] + != q.client["experiment/start/cfg_file"] + and q.client["experiment/start/cfg_category"] != "experiment" + ): + items += [ + ui.message_bar(type="warning", text=warning_message.format("Problem Type")) + ] + show_update_warnings = False + + if q.client["experiment/start/cfg_category"] == "experiment": + items += [ + ui.dropdown( + name="experiment/start/cfg_experiment", + label="Experiment", + required=True, + choices=[ + ui.choice(str(row.id), row["name"]) + for _, row in df_experiments.iterrows() + ], + value=q.client["experiment/start/cfg_experiment"], + trigger=True, + ) + ] + + if ( + show_update_warnings + and q.client["experiment/start/cfg_experiment_prev"] + != q.client["experiment/start/cfg_experiment"] + ): + items += [ + ui.message_bar( + type="warning", text=warning_message.format("previous Experiment") + ) + ] + + # Show pretrained weights toggle only for successfully finished experiments + if ( + df_experiments.loc[ + df_experiments.id == int(q.client["experiment/start/cfg_experiment"]), + "status", + ].values[0] + == "finished" + ): + items += [ + ui.toggle( + name="experiment/start/cfg_experiment_pretrained", + label="Use previous experiment weights", + value=q.client["experiment/start/cfg_experiment_pretrained"], + trigger=True, + ) + ] + + # only show yaml option, when not starting from another experiment + if q.client["experiment/start/cfg_category"] != "experiment": + items += [ + ui.toggle( + name="experiment/start/from_yaml", + label="Import config from YAML", + value=False, + trigger=True, + tooltip=tooltips["experiments_import_config_from_yaml"], + ) + ] + + if q.args["experiment/start/from_yaml"]: + items += [ + ui.file_upload( + name="experiment/upload_yaml", + label="Upload!", + multiple=False, + file_extensions=["yaml"], + ) + ] + + if q.args["experiment/upload_yaml"] is not None: + # reset previous, so the UI will be reloaded + q.client["experiment/start/cfg_file_prev"] = None + await config_import_uploaded_file(q) + + logger.info( + f"PREV {q.client['experiment/start/cfg_file_prev']} " + f"{q.client['experiment/start/cfg_file']} " + f"{q.client['experiment/start/dataset_prev']} " + f"{q.client['experiment/start/dataset']} " + f"{q.client['experiment/start/cfg_experiment_prev']} " + f"{q.client['experiment/start/cfg_experiment']} " + ) + + # set mode to training + q.client["experiment/start/cfg_mode/mode"] = "train" + + if q.client["experiment/start/cfg_category"] == "experiment": + logger.info("Starting from experiment") + + # reset previous config file + q.client["experiment/start/cfg_file_prev"] = None + + q.client["experiment/start/experiment"] = q.client.app_db.get_experiment( + q.client["experiment/start/cfg_experiment"] + ) + + parent_path = os.path.dirname(q.client["experiment/start/experiment"].path) + parent_exp_name = parent_path.split("/")[-1] + parent_experiment = f"{parent_exp_name}" + + old_config = load_config_yaml(f"{parent_path}/cfg.yaml") + old_config._parent_experiment = parent_experiment + + q.client["experiment/start/cfg"] = old_config + + # set pretrained weights + if q.client["experiment/start/cfg_experiment_pretrained"]: + prev_weights = os.path.join( + q.client["experiment/start/experiment"].path, + "checkpoint.pth", + ) + if os.path.exists(prev_weights): + q.client["experiment/start/cfg"].architecture.pretrained_weights = ( + prev_weights + ) + q.client["experiment/start/cfg"].architecture._visibility[ + "pretrained_weights" + ] = -1 + + experiments_df = q.client.app_db.get_experiments_df() + output_dir = os.path.abspath( + os.path.join(q.client["experiment/start/cfg"].output_directory, "..") + ) + q.client["experiment/start/cfg"].experiment_name = get_unique_name( + q.client["experiment/start/cfg"].experiment_name, + experiments_df["name"].values, + lambda x: os.path.exists(os.path.join(output_dir, x)), + ) + + # Configuration flags: + # from_dataset -- take the values from the dataset config + # from_cfg -- take the values from the configuration file + # from_default -- take the values from the the default settings + # from_dataset_args -- take the values from the dataset's q.args + # Otherwise -- take the values from the q.args (user input) + + # pick default values from config + if ( + q.client["experiment/start/cfg_experiment_prev"] + != q.client["experiment/start/cfg_experiment"] + ): + q.client["experiment/start/cfg_mode/from_dataset"] = False + q.client["experiment/start/cfg_mode/from_cfg"] = True + q.client["experiment/start/cfg_mode/from_dataset_args"] = False + + q.client["experiment/start/dataset"] = str( + q.client["experiment/start/experiment"].dataset + ) + + items[1].dropdown.value = q.client["experiment/start/dataset"] + # pick default values from config or dataset + elif ( + q.client["experiment/start/dataset_prev"] + != q.client["experiment/start/dataset"] + ): + q.client["experiment/start/cfg_mode/from_dataset"] = True + q.client["experiment/start/cfg_mode/from_cfg"] = True + q.client["experiment/start/cfg_mode/from_dataset_args"] = False + # pick default values from args + else: + q.client["experiment/start/cfg_mode/from_dataset"] = False + q.client["experiment/start/cfg_mode/from_cfg"] = False + q.client["experiment/start/cfg_mode/from_dataset_args"] = True + + q.client["experiment/start/cfg_mode/from_default"] = False + q.client["experiment/start/cfg_experiment_prev"] = q.client[ + "experiment/start/cfg_experiment" + ] + + else: + logger.info("Starting from CFG") + + # reset previous experiment + q.client["experiment/start/cfg_experiment_prev"] = None + + # pick default values from dataset or config + if ( + q.client["experiment/start/cfg_file_prev"] + != q.client["experiment/start/cfg_file"] + ) or ( + q.client["experiment/start/dataset_prev"] + != q.client["experiment/start/dataset"] + ): + q.client["experiment/start/cfg_mode/from_dataset"] = True + q.client["experiment/start/cfg_mode/from_cfg"] = True + q.client["experiment/start/cfg_mode/from_default"] = True + q.client["experiment/start/cfg_mode/from_dataset_args"] = False + # pick default values from args + else: + q.client["experiment/start/cfg_mode/from_dataset"] = False + q.client["experiment/start/cfg_mode/from_cfg"] = False + q.client["experiment/start/cfg_mode/from_default"] = False + q.client["experiment/start/cfg_mode/from_dataset_args"] = True + + q.client["experiment/start/cfg_file_prev"] = q.client[ + "experiment/start/cfg_file" + ] + + config_path = ( + f"llm_studio/python_configs/{q.client['experiment/start/cfg_file']}" + ) + + q.client["experiment/start/cfg"] = load_config_py( + config_path=config_path, config_name="ConfigProblemBase" + ) + + q.client["experiment/start/dataset_prev"] = q.client["experiment/start/dataset"] + logger.info(f"From dataset {q.client['experiment/start/cfg_mode/from_dataset']}") + logger.info(f"From cfg {q.client['experiment/start/cfg_mode/from_cfg']}") + logger.info(f"From default {q.client['experiment/start/cfg_mode/from_default']}") + logger.info(f"Config file: {q.client['experiment/start/cfg_file']}") + + option_items = get_ui_elements(cfg=q.client["experiment/start/cfg"], q=q) + items.extend(option_items) + + if q.client["experiment/start/cfg_mode/from_cfg"]: + q.page["experiment/start"] = ui.form_card(box="content", items=items) + else: + q.page["experiment/start"].items = items + + q.client.delete_cards.add("experiment/start") + + q.page["experiment/start/footer"] = ui.form_card( + box="footer", + items=[ + ui.inline( + items=[ + ui.button( + name="experiment/start/run", + label="Run experiment", + primary=True, + ) + ], + justify="start", + ) + ], + ) + q.client.delete_cards.add("experiment/start/footer") + + +async def experiment_run(q: Q, pre: str = "experiment/start"): + """Start an experiment. + + Args: + q: Q + pre: prefix for client key + """ + # import here to avoid circular imports + from llm_studio.app_utils.sections.project import list_current_experiments + + logger.info("Starting experiment") + logger.info(f"{pre}/cfg_file") + logger.info(f"CFG: {q.client[f'{pre}/cfg_file']}") + + if q.client[f"{pre}/cfg_category"] == "experiment": + q.client[f"{pre}/cfg_file"] = q.client[f"{pre}/experiment"].config_file + + cfg = q.client[f"{pre}/cfg"] + cfg = parse_ui_elements(cfg=cfg, q=q, pre=f"{pre}/cfg/") + cfg.experiment_name = cfg.experiment_name.replace("/", "-") + + errors = check_config_for_errors(cfg) + if errors["title"] and not q.args["experiment/start/error/proceed"]: + title = ( + errors["title"][0] + if len(errors["title"]) == 1 + else "The following configuration mismatches were found:" + ) + error_text = [ui.text(message) for message in errors["message"]] + q.page["meta"].dialog = ui.dialog( + title=title, + name="experiment/start/error/dialog", + items=error_text + + [ + ui.buttons( + [ + ui.button( + name="experiment/start/error/ok", label="Ok", primary=True + ), + ui.button( + name="experiment/start/error/proceed", + label="I want to proceed anyhow", + primary=False, + ), + ] + ) + ], + closable=True, + ) + q.client["keep_meta"] = True + else: + start_experiment(cfg=cfg, q=q, pre=pre) + await list_current_experiments(q) + + +def get_experiment_table( + q, df_viz, predictions, height="calc(100vh - 245px)", actions=None +): + col_remove = [ + "id", + "path", + "mode", + "seed", + "process_id", + "gpu_list", + "loss", + "eta", + "epoch", + "config_file", + ] + if predictions: + col_remove += ["epoch", "val metric"] + + for col in col_remove: + if col in df_viz: + del df_viz[col] + # df_viz = df_viz.rename( + # columns={"process_id": "pid", "config_file": "problem type"}, + # ) + # df_viz["problem type"] = df_viz["problem type"].str.replace("Text ", "") + + if actions == "experiment" and q.client["experiment/list/mode"] == "train": + actions_dict = { + "experiment/list/new": "New experiment", + "experiment/list/rename": "Rename experiment", + "experiment/list/stop/table": "Stop experiment", + "experiment/list/delete/table/dialog": "Delete experiment", + } + else: + actions_dict = {} + + min_widths = { + "name": "350", + "dataset": "150", + # "problem type": "190", + "metric": "75", + "val metric": "102", + "progress": "85", + "status": "90", + "info": "115", + "actions": "5" if predictions else "5", + } + + if predictions: + for k, v in min_widths.items(): + min_widths[k] = str(int(np.ceil(int(v) * 1.05))) + + return ui_table_from_df( + q=q, + df=df_viz, + name="experiment/list/table", + sortables=["val metric"], + filterables=["name", "dataset", "problem type", "metric", "status"], + searchables=["name", "dataset"], + numerics=["val metric"], + tags=["status"], + progresses=["progress"], + min_widths=min_widths, + link_col="name", + height=height, + actions=actions_dict, + ) + + +async def experiment_list( + q: Q, + reset: bool = True, + allowed_statuses: Optional[List[str]] = None, + actions: bool = True, +) -> None: + """List all experiments.""" + + if q.client["experiment/list/mode"] is None: + q.client["experiment/list/mode"] = "train" + + if q.client["experiment/list/mode"] == "train": + q.client["nav/active"] = "experiment/list" + else: + q.client["nav/active"] = "experiment/list_predictions" + + if reset: + await clean_dashboard(q, mode="full") + + q.client["experiment/list/df_experiments"] = get_experiments( + q, + mode=q.client["experiment/list/mode"], + status=allowed_statuses, + ) + + df_viz = q.client["experiment/list/df_experiments"].copy() + + table = get_experiment_table( + q, + df_viz, + q.client["experiment/list/mode"] == "predict", + actions="experiment" if actions else None, + ) + + message_bar = get_experiment_list_message_bar(q) + + items = [table, message_bar] + + q.page["experiment/list"] = ui.form_card(box="content", items=items) + q.client.delete_cards.add("experiment/list") + + buttons = [ + ui.button(name="experiment/list/refresh", label="Refresh", primary=True), + ui.button( + name="experiment/list/compare", + label="Compare experiments", + primary=False, + ), + ui.button(name="experiment/list/stop", label="Stop experiments", primary=False), + ui.button( + name="experiment/list/delete", label="Delete experiments", primary=False + ), + ] + + q.page["dataset/display/footer"] = ui.form_card( + box="footer", items=[ui.inline(items=buttons, justify="start")] + ) + q.client.delete_cards.add("dataset/display/footer") + + +def get_table_and_message_item_indices(q): + table_item_idx, message_item_idx = 0, 1 + return table_item_idx, message_item_idx + + +async def experiment_compare(q: Q, selected_rows: list): + if q.client["experiment/compare/tab"] is None: + q.client["experiment/compare/tab"] = "experiment/compare/charts" + if q.args["experiment/compare/charts"] is not None: + q.client["experiment/compare/tab"] = "experiment/compare/charts" + if q.args["experiment/compare/config"] is not None: + q.client["experiment/compare/tab"] = "experiment/compare/config" + + experiment_ids = [ + q.client["experiment/list/df_experiments"]["id"].iloc[int(idx)] + for idx in selected_rows + ] + + await clean_dashboard(q, mode=q.client["experiment/compare/tab"]) + tabs = [ + ui.tab(name="experiment/compare/charts", label="Charts"), + ui.tab(name="experiment/compare/config", label="Config"), + ] + q.page["experiment/compare/tab"] = ui.tab_card( + box="nav2", link=True, items=tabs, value=q.client["experiment/compare/tab"] + ) + q.client.delete_cards.add("experiment/compare/tab") + + if q.client["experiment/compare/tab"] == "experiment/compare/charts": + charts = [] + experiment_names = [] + + for experiment_id in experiment_ids: + experiment = q.client.app_db.get_experiment(experiment_id) + experiment_path = experiment.path + charts.append(load_charts(experiment_path)) + current_name = f" {experiment.name}" + experiment_names.append(current_name) + + await charts_tab(q, charts, experiment_names) + + elif q.client["experiment/compare/tab"] == "experiment/compare/config": + if q.client["experiment/compare/diff_toggle"] is None: + q.client["experiment/compare/diff_toggle"] = False + + settings = pd.DataFrame() + for experiment_id in experiment_ids: + experiment = q.client.app_db.get_experiment(experiment_id) + experiment_path = experiment.path + experiment_cfg = load_config_yaml(os.path.join(experiment_path, "cfg.yaml")) + items = get_cfg_list_items(experiment_cfg) + act_df = pd.Series({item.label: item.value for item in items}) + settings[experiment.name] = act_df + + settings.index.name = "setting" + + if q.client["experiment/compare/diff_toggle"]: + val_counts = settings.T.nunique() + drop_idx = val_counts[val_counts == 1].index.values + settings = settings.drop(drop_idx) + + items = [ + ui.toggle( + name="experiment/compare/diff_toggle", + label="Show differences only", + value=q.client["experiment/compare/diff_toggle"], + trigger=True, + ), + ui_table_from_df( + q=q, + df=settings.reset_index(), + name="experiment/compare/summary/table", + link_col="setting", + height="calc(100vh - 315px)", + ), + ] + + q.page["experiment/compare/config"] = ui.form_card(box="first", items=items) + q.client.delete_cards.add("experiment/compare/config") + + buttons = [ + ui.button(name="experiment/compare", label="Refresh", primary=True), + ui.button(name="experiment/list/current", label="Back", primary=False), + ] + q.page["experiment/compare/footer"] = ui.form_card( + box="footer", items=[ui.inline(items=buttons, justify="start")] + ) + q.client.delete_cards.add("experiment/compare/footer") + + +async def experiment_rename_form(q: Q, error: str = "") -> None: + experiment = q.client.app_db.get_experiment(q.client["experiment/rename/id"]) + + experiment_name = experiment.name + items = [ + ui.textbox( + name="experiment/rename/name", + label=f"New name for {experiment_name}", + value=experiment_name, + required=True, + ) + ] + + if error: + items.append(ui.message_bar(type="error", text=error)) + + q.page["experiment/list"].items = items + + buttons = [ + ui.button(name="experiment/rename/action", label="Rename", primary=True), + ui.button(name="experiment/list/current", label="Abort", primary=False), + ] + q.page["dataset/display/footer"] = ui.form_card( + box="footer", items=[ui.inline(items=buttons, justify="start")] + ) + q.client.delete_cards.add("dataset/display/footer") + + +async def experiment_rename_ui_workflow(q: Q): + selected_row = q.args["experiment/list/rename"] + rename_id = q.client["experiment/list/df_experiments"]["id"].iloc[int(selected_row)] + q.client["experiment/rename/id"] = rename_id + await experiment_rename_form(q) + + +async def experiment_rename_action(q, experiment, new_name): + """Rename experiment with `current_id` id in DB to `new_name`""" + + old_name = experiment.name + old_path = experiment.path + new_path = old_path.replace(old_name, new_name) + + if old_path != new_path: + old_exp_path = f"{old_path}" + exp_path = f"{new_path}" + logger.info(f"Renaming {old_exp_path} to {exp_path}") + shutil.move(os.path.abspath(old_exp_path), os.path.abspath(exp_path)) + + # update the experiment name in the DB + with SqliteDict(os.path.join(new_path, "charts.db")) as charts: + for k1 in PLOT_ENCODINGS: + if k1 == "df": + # this is required to properly overwrite it + df = charts[k1].copy() + for k2, v2 in df.items(): + logger.info( + f"Renaming charts {v2} to {v2.replace(old_name, new_name)}" + ) + df[k2] = v2.replace(old_name, new_name) + charts[k1] = df + charts.commit() + + for config_file in ["cfg.yaml"]: + config_path = os.path.join(exp_path, config_file) + if os.path.exists(config_path): + experiment_cfg = load_config_yaml(config_path) + experiment_cfg.experiment_name = new_name + experiment_cfg.output_directory = new_path + save_config_yaml(config_path, experiment_cfg) + + rename_files = ["preds"] + for file in rename_files: + old_file = get_artifact_path_path(old_name, exp_path, file) + new_file = get_artifact_path_path(new_name, exp_path, file) + if os.path.exists(old_file): + logger.info(f"Renaming {old_file} to {new_file}") + shutil.move(os.path.abspath(old_file), os.path.abspath(new_file)) + + delete_files = ["logs"] # will be generated on demand with updates + for file in delete_files: + file = get_artifact_path_path(old_name, exp_path, file) + if os.path.exists(file): + logger.info(f"Deleting {file}") + os.remove(file) + + q.client.app_db.rename_experiment(experiment.id, new_name, new_path) + + +async def experiment_delete(q: Q, experiment_ids: List[int]) -> None: + """Delete selected experiments. + + Args: + q: Q + experiment_ids: list of experiment ids to delete + """ + + for experiment_id in experiment_ids: + experiment = q.client.app_db.get_experiment(experiment_id) + q.client.app_db.delete_experiment(experiment.id) + shutil.rmtree(f"{experiment.path}") + + +async def experiment_stop(q: Q, experiment_ids: List[int]) -> None: + """Stop selected experiments. + + Args: + q: Q + experiment_ids: list of experiment ids to stop + """ + + for experiment_id in experiment_ids: + experiment = q.client.app_db.get_experiment(experiment_id) + + try: + ret = kill_child_processes(int(experiment.process_id)) + if ret: + flag_path = os.path.join(experiment.path, "flags.json") + write_flag(flag_path, "status", "stopped") + except Exception as e: + logger.error(f"Error while stopping the experiment: {e}") + pass + + +def load_charts(experiment_path): + try: + with SqliteDict(os.path.join(experiment_path, "charts.db")) as charts: + charts = dict(charts) + except Exception: + charts = {} + logger.warning("Too early, wait for the charts to appear") + + return charts + + +async def experiment_display(q: Q) -> None: + """Display a selected experiment.""" + + experiment_id = q.client["experiment/list/df_experiments"]["id"].iloc[ + q.client["experiment/display/id"] + ] + q.client["experiment/display/experiment_id"] = experiment_id + experiment = q.client.app_db.get_experiment(experiment_id) + q.client["experiment/display/experiment"] = experiment + + q.client["experiment/display/experiment_path"] = experiment.path + + status, _ = get_experiment_status(experiment.path) + + charts = load_charts(q.client["experiment/display/experiment_path"]) + q.client["experiment/display/charts"] = charts + + if experiment.mode == "train": + if q.client["experiment/display/tab"] is None: + q.client["experiment/display/tab"] = "experiment/display/charts" + else: + if q.client["experiment/display/tab"] is None: + q.client["experiment/display/tab"] = "experiment/display/summary" + + if q.args["experiment/display/charts"] is not None: + q.client["experiment/display/tab"] = "experiment/display/charts" + if q.args["experiment/display/summary"] is not None: + q.client["experiment/display/tab"] = "experiment/display/summary" + if q.args["experiment/display/train_data_insights"] is not None: + q.client["experiment/display/tab"] = "experiment/display/train_data_insights" + if q.args["experiment/display/validation_prediction_insights"] is not None: + q.client["experiment/display/tab"] = ( + "experiment/display/validation_prediction_insights" + ) + if q.args["experiment/display/config"] is not None: + q.client["experiment/display/tab"] = "experiment/display/config" + if q.args["experiment/display/deployment"] is not None: + q.client["experiment/display/tab"] = "experiment/display/deployment" + if q.args["experiment/display/logs"] is not None: + q.client["experiment/display/tab"] = "experiment/display/logs" + if q.args["experiment/display/chat"] is not None: + q.client["experiment/display/tab"] = "experiment/display/chat" + + await clean_dashboard(q, mode=q.client["experiment/display/tab"]) + + tabs = [ + ui.tab(name="experiment/display/charts", label="Charts"), + ui.tab(name="experiment/display/summary", label="Summary"), + ] + # html for legacy experiments + has_train_data_insights = any( + [ + charts.get(plot_encoding, dict()).get("train_data") is not None + for plot_encoding in PLOT_ENCODINGS + ] + ) + if has_train_data_insights: + tabs += [ + ui.tab( + name="experiment/display/train_data_insights", + label="Train Data Insights", + ) + ] + has_validation_prediction_insights = any( + [ + charts.get(plot_encoding, dict()).get("validation_predictions") is not None + for plot_encoding in PLOT_ENCODINGS + ] + ) + if has_validation_prediction_insights: + tabs += [ + ui.tab( + name="experiment/display/validation_prediction_insights", + label="Validation Prediction Insights", + ) + ] + + tabs += [ + ui.tab(name="experiment/display/logs", label="Logs"), + ui.tab(name="experiment/display/config", label="Config"), + ] + + if status == "finished": + tabs += [ui.tab(name="experiment/display/chat", label="Chat")] + + q.page["experiment/display/tab"] = ui.tab_card( + box="nav2", link=True, items=tabs, value=q.client["experiment/display/tab"] + ) + q.client.delete_cards.add("experiment/display/tab") + + if q.client["experiment/display/tab"] == "experiment/display/charts": + await charts_tab(q, [charts], [""]) + elif q.client["experiment/display/tab"] in [ + "experiment/display/train_data_insights", + "experiment/display/validation_prediction_insights", + ]: + await insights_tab(charts, q) + elif q.client["experiment/display/tab"] in ["experiment/display/summary"]: + await summary_tab(experiment_id, q) + elif q.client["experiment/display/tab"] in ["experiment/display/config"]: + await configs_tab(q) + elif q.client["experiment/display/tab"] in ["experiment/display/logs"]: + await logs_tab(q) + elif q.client["experiment/display/tab"] in ["experiment/display/chat"]: + await chat_tab(q) + + await q.page.save() + + buttons = [ + ui.button(name="experiment/display/refresh", label="Refresh", primary=True) + ] + + buttons += [ + ui.button( + name="experiment/display/download_logs", + label="Download logs/config", + primary=False, + ) + ] + + if status == "finished": + buttons += [ + ui.button( + name="experiment/display/download_predictions", + label="Download predictions", + primary=False, + disabled=False, + tooltip=None, + ), + ui.button( + name="experiment/display/download_model", + label="Download model", + primary=False, + disabled=False, + tooltip=None, + ), + ui.button( + name="experiment/display/push_to_huggingface", + label="Push checkpoint to huggingface", + primary=False, + disabled=False, + tooltip=None, + ), + ] + + buttons += [ui.button(name="experiment/list/current", label="Back", primary=False)] + + q.page["experiment/display/footer"] = ui.form_card( + box="footer", + items=[ + ui.inline(items=buttons, justify="start"), + ], + ) + q.client.delete_cards.add("experiment/display/footer") + + +async def insights_tab(charts, q): + if q.client["experiment/display/tab"] == "experiment/display/train_data_insights": + key = "train_data" + elif ( + q.client["experiment/display/tab"] + == "experiment/display/validation_prediction_insights" + ): + key = "validation_predictions" + for k1 in PLOT_ENCODINGS: + if k1 not in charts: + continue + for k2, v2 in charts[k1].items(): + if k2 != key: + continue + if k1 == "html": + q.page[f"experiment/display/charts/{k1}_{k2}"] = ui.markup_card( + box="first", title="", content=v2 + ) + q.client.delete_cards.add(f"experiment/display/charts/{k1}_{k2}") + + continue + + elif k1 == "image": + q.page[f"experiment/display/charts/{k1}_{k2}"] = ui.image_card( + box="first", title="", type="png", image=v2 + ) + q.client.delete_cards.add(f"experiment/display/charts/{k1}_{k2}") + continue + + elif k1 == "df": + df = pd.read_parquet(v2) + min_widths = { + col: "350" for col in df.columns if "text" in str(col).lower() + } + # + if key == "train_data": + min_widths["Content"] = "800" + q.page[f"experiment/display/charts/{k1}_{k2}"] = ui.form_card( + box="first", + items=[ + ui_table_from_df( + q=q, + df=df, + name=f"experiment/display/charts/{k1}_{k2}", + sortables=[ + col for col in df.columns if col.startswith("Metric") + ], + markdown_cells=[ + col + for col in df.columns + if not col.startswith("Metric") + ], + searchables=list(df.columns), + downloadable=True, + resettable=True, + min_widths=min_widths, + height="calc(100vh - 245px)", + max_char_length=50_000, + cell_overflow="tooltip", + ) + ], + ) + q.client.delete_cards.add(f"experiment/display/charts/{k1}_{k2}") + continue + + +async def summary_tab(experiment_id, q): + experiment_df = get_experiments(q) + input_dict = experiment_df[experiment_df.id == experiment_id].iloc[0].to_dict() + cfg = load_config_yaml( + os.path.join(q.client["experiment/display/experiment_path"], "cfg.yaml") + ) + _ = get_tokenizer(cfg) + + # experiment card + card_name = "experiment/display/summary/experiment" + q.page[card_name] = ui.form_card( + box=ui.box(zone="first"), + items=[ + ui.separator("Experiment"), + ui.stats( + [ + ui.stat( + value=cfg.experiment_name, + label="Name", + ), + ], + justify="between", + inset=True, + ), + ui.stats( + [ + ui.stat( + value=input_dict["config_file"], + label="Problem Type", + ), + ], + justify="between", + inset=True, + ), + ], + ) + q.client.delete_cards.add(card_name) + + # datasets card + card_name = "experiment/display/summary/datasets" + q.page[card_name] = ui.form_card( + box=ui.box(zone="first"), + items=[ + ui.separator("Datasets"), + ui.stats( + [ + ui.stat( + value=Path(cfg.dataset.train_dataframe).stem, + label="Training Dataset", + ), + ], + justify="between", + inset=True, + ), + ui.stats( + [ + ui.stat( + value=( + "-" + if cfg.dataset.validation_dataframe in ["", "None", None] + else Path(cfg.dataset.validation_dataframe).stem + ), + label="Validation Dataset", + ), + ], + justify="between", + inset=True, + ), + ], + ) + q.client.delete_cards.add(card_name) + + # score card + card_name = "experiment/display/summary/score" + q.page[card_name] = ui.form_card( + box=ui.box(zone="first"), + items=[ + ui.separator("Score"), + ui.stats( + [ + ui.stat( + value=input_dict["metric"], + label="Metric", + ), + ], + justify="between", + inset=True, + ), + ui.stats( + [ + ui.stat( + value=( + "-" + if input_dict["val metric"] in ["", "None", None] + else str(input_dict["val metric"]) + ), + label="Validation Score", + ), + ], + justify="between", + inset=True, + ), + ], + ) + q.client.delete_cards.add(card_name) + + # main configs card + card_name = "experiment/display/summary/main_configs" + q.page[card_name] = ui.form_card( + box=ui.box(zone="second"), + items=[ + ui.separator("Main Configurations"), + ui.stats( + [ + ui.stat( + value=cfg.llm_backbone, + label="LLM Backbone", + ), + ui.stat( + value=str(cfg.training.lora), + label="Lora", + ), + ui.stat( + value=str(cfg.training.epochs), + label="Epochs", + ), + ui.stat( + value=str(cfg.training.batch_size), + label="Batch Size", + ), + ], + justify="between", + inset=True, + ), + ui.stats( + [ + ui.stat( + value=str(input_dict["loss"]), + label="Loss Function", + ), + ui.stat( + value=cfg.architecture.backbone_dtype, + label="Backbone Dtype", + ), + ui.stat( + value=str(cfg.architecture.gradient_checkpointing), + label="Gradient Checkpointing", + ), + ui.stat( + value=input_dict["gpu_list"], + label="GPU List", + ), + ], + justify="between", + inset=True, + ), + ], + ) + q.client.delete_cards.add(card_name) + + # code card + card_name = "experiment/display/summary/code" + content = get_experiment_summary_code_card(cfg=cfg) + q.page[card_name] = ui.markdown_card( + box=ui.box(zone="third"), + title="", + content=content, + ) + q.client.delete_cards.add(card_name) + + +async def configs_tab(q): + experiment_cfg = load_config_yaml( + os.path.join(q.client["experiment/display/experiment_path"], "cfg.yaml") + ) + items = get_cfg_list_items(experiment_cfg) + q.page["experiment/display/config"] = ui.stat_list_card( + box="first", items=items, title="" + ) + q.client.delete_cards.add("experiment/display/config") + + +async def logs_tab(q): + logs_path = f"{q.client['experiment/display/experiment_path']}/logs.log" + text = "" + in_pre = 0 + # Read log file only if it already exists + if os.path.exists(logs_path): + with open(logs_path, "r") as f: + for line in f.readlines(): + if in_pre == 0: + text += "
" + if "INFO: Lock" in line: + continue + # maximum line length + n = 250 + chunks = [line[i : i + n] for i in range(0, len(line), n)] + text += "
".join(chunks) + + # Check for formatted HTML text + if "
" in line:
+                    in_pre += 1
+                if "
" in line: + in_pre -= 1 + if in_pre == 0: + text += "
" + items = [ui.text(text)] + q.page["experiment/display/logs"] = ui.form_card(box="first", items=items, title="") + q.client.delete_cards.add("experiment/display/logs") + + +def subsample(key1, key2, value, max_plot_points=1000): + act_plot_points = len(value["steps"]) + if act_plot_points > max_plot_points: + stride = int(np.ceil(act_plot_points / max_plot_points)) + value["steps"] = value["steps"][::stride] + value["values"] = value["values"][::stride] + logger.info( + f"{key1} {key2} sampled from size {act_plot_points} to size " + f"{len(value['steps'])} using stride {stride}." + ) + return value + + +def unite_validation_metric_charts(charts_list): + unique_metrics = [] + for chart in charts_list: + unique_metrics.extend(list(chart.get("validation", {}).keys())) + + unique_metrics = set([key for key in unique_metrics if key != "loss"]) + + if len(unique_metrics) > 1: + for chart in charts_list: + if "validation" in chart: + for key in unique_metrics: + if key in chart["validation"]: + chart["validation"]["metric"] = chart["validation"][key] + del chart["validation"][key] + return charts_list + + +async def charts_tab(q, charts_list, legend_labels): + charts_list = unite_validation_metric_charts(charts_list) + + box = ["first", "first", "second", "second"] + cnt = 0 + for k1 in ["meta", "train", "validation"]: + if all([k1 not in charts for charts in charts_list]): + continue + + all_second_keys: Set = set() + for charts in charts_list: + if k1 in charts: + all_second_keys = all_second_keys.union(set(charts[k1].keys())) + + # Always plot loss in the lower left corner + if "loss" in all_second_keys: + all_second_keys.remove("loss") + list_all_second_keys = ["loss"] + list(all_second_keys) + else: + list_all_second_keys = list(all_second_keys) + + for k2 in list_all_second_keys: + logger.info(f"{k1} {k2}") + + items = [] + + tooltip = "" + if k1 == "meta" and k2 == "lr": + tooltip = "Current learning rate throughout the training process." + elif k1 == "train" and k2 == "loss": + tooltip = ( + "Current training loss throughout the training process. " + "Loss is calculated as the average of the last ten batches." + ) + elif k1 == "validation" and k2 == "loss": + tooltip = ( + "Current validation loss throughout the training process. " + "Loss is calculated as the average of all validation batches. " + ) + elif k1 == "validation" and k2 != "loss": + tooltip = ( + "Current validation metric throughout the training process. " + "Metric is calculated on full validation set predictions." + ) + else: + continue + + title = f"{k1} {k2}".upper().replace("META LR", "LEARNING RATE") + if k2 == "loss": + title = title.replace("LOSS", "BATCH LOSS") + + items.append(ui.text(title, tooltip=tooltip)) + + rows = [] + + max_samples = q.client["chart_plot_max_points"] + for charts, label in zip(charts_list, legend_labels): + if k1 not in charts or k2 not in charts[k1]: + continue + + v2 = charts[k1][k2] + v2 = subsample(k1, k2, v2, max_samples) + + if k2 == "lr" and "lr_diff" in charts["meta"]: + v3 = charts["meta"]["lr_diff"] + v3 = subsample("meta", "lr_diff", v3, max_samples) + rows.extend( + [ + (v2["steps"][i], f"learning rate{label}", v2["values"][i]) + for i in range(len(v2["values"])) + ] + + [ + ( + v3["steps"][i], + f"differential learning rate{label}", + v3["values"][i], + ) + for i in range(len(v3["values"])) + ] + ) + color = "=type" + fields = ["step", "type", "value"] + + elif len(charts_list) > 1: + rows.extend( + [ + (v2["steps"][i], label.strip(), v2["values"][i]) + for i in range(len(v2["values"])) + ] + ) + color = "=type" + fields = ["step", "type", "value"] + else: + rows.extend( + [ + (v2["steps"][i], v2["values"][i]) # type: ignore + for i in range(len(v2["values"])) + ] + ) + color = wave_theme.color + fields = ["step", "value"] + + d = data(fields=fields, rows=rows, pack=True) + + viz = ui.visualization( + plot=ui.plot( + [ + ui.mark( + type="line", + x_title="step", + x_scale="linear", + y_scale="linear", + x="=step", + y="=value", + color=color, + y_min=0 if k1 == "meta" and k2 == "lr" else None, + color_range=wave_theme.color_range, + ) + ] + ), + data=d, # type: ignore + interactions=["brush"], + height="calc((100vh - 275px)*0.41)", + width="560px", + ) + + items.append(viz) + + if k1 == "validation" and k2 == "loss" and np.sum(v2["values"]) == 0: + items.append( + ui.message_bar( + type="info", + text="Validation batch loss cannot be \ + calculated for this problem type.", + ) + ) + + q.page[f"experiment/display/charts/{k1}_{k2}"] = ui.form_card( + box=box[cnt], items=items + ) + q.client.delete_cards.add(f"experiment/display/charts/{k1}_{k2}") + + cnt += 1 + + +async def experiment_artifact_build_error_dialog(q: Q, error: str): + q.page["meta"].dialog = ui.dialog( + "Failed to build artifact", items=[ui.text(error)], closable=True + ) + q.client["keep_meta"] = True + + +async def experiment_download_artifact( + q: Q, + get_artifact_path_fn: Callable[[str, str], str], + save_artifact_fn: Callable[[str, str], str], + additional_log: Optional[str] = "", + min_disk_space: Optional[float] = 0.0, +): + """Download specific artifact, if it does not exist, create it on demand + + Args: + q: Q + get_artifact_path_fn: function that returns path to the artifact + save_artifact_fn: function that generates the artifact and returns its path + additional_log: additional information to be logged + min_disk_space: minimal disk available needed to generate artifact + """ + + experiment = q.client["experiment/display/experiment"] + experiment_path = q.client["experiment/display/experiment_path"] + + zip_path = get_artifact_path_fn(experiment.name, experiment_path) + + if not os.path.exists(zip_path): + try: + check_available_space(experiment_path, min_disk_space) + except LLMResourceException as e: + error = f"Cannot create {os.path.basename(zip_path)}. {e}" + await experiment_artifact_build_error_dialog(q, error) + return + + logger.info(f"Creating {zip_path} on demand") + zip_path = save_artifact_fn(experiment.name, experiment_path) + + if additional_log: + logger.info(f"{additional_log}: {zip_path}") + + q.page["meta"].script = ui.inline_script( + f'window.open("{get_download_link(q, zip_path)}", "_blank");' + ) + await q.page.save() + + +async def experiment_download_predictions(q: Q): + """Download experiment predictions.""" + await experiment_download_artifact( + q, get_predictions_path, save_prediction_outputs, "Predictions path", None + ) + + +async def experiment_download_logs(q: Q): + """Download experiment logs.""" + + experiment = q.client["experiment/display/experiment"] + experiment_path = q.client["experiment/display/experiment_path"] + zip_path = get_logs_path(experiment.name, experiment_path) + + if not os.path.exists(zip_path): + logs = q.client["experiment/display/charts"] + logger.info(f"Creating {zip_path} on demand") + zip_path = save_logs(experiment.name, experiment_path, logs) + + download_url = get_download_link(q, zip_path) + logger.info(f"Logs URL: {download_url}") + + q.page["meta"].script = ui.inline_script( + f'window.open("{download_url}", "_blank");' + ) + await q.page.save() + + +async def config_import_uploaded_file(q: Q): + """ "Importing a config file from drag and drop to the filesystem""" + + file_url = q.args["experiment/upload_yaml"][0] + file_name = file_url.split("/")[-1] + path = f"{get_data_dir(q)}/{file_name}" + + local_path = await q.site.download(file_url, path) + + await q.site.unload(q.args["experiment/upload_yaml"][0]) + + with open(local_path, "r") as f: + yaml_data = yaml.safe_load(f) + + yaml_data = flatten_dict(yaml_data) + + q.client["experiment/yaml_data"] = yaml_data + + +async def show_message(q, msg_key, page, idx, msg_type): + info = q.client[msg_key] + if info: + q.page[page].items[idx].message_bar.text = info + q.page[page].items[idx].message_bar.type = msg_type + q.client[msg_key] = "" + + +def get_experiment_list_message_bar(q): + if q.client["experiment_halt_reason"]: + msg_bar = ui.message_bar(type="error", text=q.client["experiment_halt_reason"]) + del q.client["experiment_halt_reason"] + + elif q.client["force_disable_pipelines"]: + msg_bar = ui.message_bar(type="info", text=q.client["force_disable_pipelines"]) + del q.client["force_disable_pipelines"] + + else: + msg_bar = ui.message_bar(type="info", text="") + + return msg_bar + + +async def experiment_download_model(q: Q): + experiment = q.client["experiment/display/experiment"] + experiment_path = q.client["experiment/display/experiment_path"] + zip_path = get_model_path(experiment.name, experiment_path) + + if not os.path.exists(zip_path): + logger.info(f"Creating {zip_path} on demand") + cfg = load_config_yaml(os.path.join(experiment_path, "cfg.yaml")) + + device = "cuda" + experiments = get_experiments(q) + num_running_queued = len( + experiments[experiments["status"].isin(["queued", "running"])] + ) + if num_running_queued > 0 or ( + cfg.training.lora and cfg.architecture.backbone_dtype in ("int4", "int8") + ): + logger.info("Preparing model on CPU. This might slow down the progress.") + device = "cpu" + with set_env(HUGGINGFACE_TOKEN=q.client["default_huggingface_api_token"]): + cfg, model, tokenizer = load_cfg_model_tokenizer( + experiment_path, merge=True, device=device + ) + + model = unwrap_model(model) + checkpoint_path = cfg.output_directory + + model_save_time = time.time() + model.backbone.save_pretrained(checkpoint_path) + # See PreTrainedTokenizerBase.save_pretrained for documentation + # Safeguard against None return if tokenizer class is + # not inherited from PreTrainedTokenizerBase + tokenizer_files = list(tokenizer.save_pretrained(checkpoint_path) or []) + + card = get_model_card(cfg, model, repo_id="") + card.save(os.path.join(experiment_path, "model_card.md")) + + logger.info(f"Creating Zip File at {zip_path}") + zf = zipfile.ZipFile(zip_path, "w") + + FILES_TO_PUSH = [ + "vocab.json", + "sentencepiece.bpe.model", + "bpe_encoder.bin", + "tokenizer_config.json", + "tokenizer.json", + "special_tokens_map.json", + "merges.txt", + "generation_config.json", + "config.json", + "added_tokens.json", + "model_card.md", + "classification_head.pth", + ] + FILES_TO_PUSH = set( + FILES_TO_PUSH + + [os.path.split(tokenizer_file)[-1] for tokenizer_file in tokenizer_files] + ) + + # Add tokenizer and config.json files, as well as potential classification head + paths_added = [] + for file in FILES_TO_PUSH: + path = os.path.join(experiment_path, file) + if os.path.isfile(path): + paths_added.append(path) + add_file_to_zip(zf=zf, path=path) + + # Add model weight files. save_pretrained() does not return the saved files + weight_paths = glob.glob(os.path.join(checkpoint_path, "pytorch_model*.*")) + for path in weight_paths: + paths_added.append(path) + add_file_to_zip(zf=zf, path=path) + + # Add all files that were created after the model was saved. + # This is useful for potential changes/different + # naming conventions across different backbones. + for file in os.listdir(checkpoint_path): + file_path = os.path.join(checkpoint_path, file) + if ( + os.path.getmtime(file_path) > model_save_time + and file_path not in paths_added + and file_path != zip_path + ): + add_file_to_zip(zf=zf, path=file_path) + paths_added.append(file_path) + logger.info( + f"Added {file_path} to zip file as it " + "was created when saving the model state." + ) + zf.close() + + download_url = get_download_link(q, zip_path) + logger.info(f"Logs URL: {download_url}") + + q.page["meta"].script = ui.inline_script( + f'window.open("{download_url}", "_blank");' + ) + await q.page.save() + + +async def experiment_push_to_huggingface_dialog(q: Q, error: str = ""): + if q.args["experiment/display/push_to_huggingface"] or error: + devices = ["cpu", "cpu_shard"] + [ + f"cuda:{idx}" for idx in range(torch.cuda.device_count()) + ] + default_device = "cuda:0" + + experiments = get_experiments(q) + num_running_queued = len( + experiments[experiments["status"].isin(["queued", "running"])] + ) + experiment_path = q.client["experiment/display/experiment_path"] + cfg = load_config_yaml(os.path.join(experiment_path, "cfg.yaml")) + if num_running_queued > 0 or cfg.environment.use_deepspeed: + default_device = "cpu" + + try: + huggingface_hub.login(q.client["default_huggingface_api_token"]) + user_id = huggingface_hub.whoami()["name"] + except Exception: + user_id = "" + + dialog_items = [ + ui.message_bar("error", error, visible=True if error else False), + ui.textbox( + name="experiment/display/push_to_huggingface/account_name", + label="Account Name", + value=user_id, + width="500px", + required=False, + tooltip=( + "The account name on HF to push the model to. " + "Leaving it empty will push it to the default user account." + ), + ), + ui.textbox( + name="experiment/display/push_to_huggingface/model_name", + label="Model Name", + value=hf_repo_friendly_name( + q.client["experiment/display/experiment"].name + ), + width="500px", + required=True, + tooltip="The name of the model as shown on HF.", + ), + ui.dropdown( + name="experiment/display/push_to_huggingface/device", + label="Device for preparing the model", + required=True, + value=default_device, + width="500px", + choices=[ui.choice(str(d), str(d)) for d in devices], + tooltip=( + "The local device to prepare the model before pushing it to HF. " + "CPU will never load the weights to the GPU, which can be useful " + "for large models, but will be significantly slower. " + "Cpu_shard will first load on CPU and then shard on all GPUs " + "before pushing to HF." + ), + ), + ui.textbox( + name="experiment/display/push_to_huggingface/api_key", + label="Huggingface API Key", + value=q.client["default_huggingface_api_token"], + width="500px", + password=True, + required=True, + tooltip="HF API key, needs write access.", + ), + ui.toggle( + name="default_safe_serialization", + label="Use Hugging Face safetensors for safe serialization", + value=q.client["default_safe_serialization"], + ), + ui.buttons( + [ + ui.button( + name="experiment/display/push_to_huggingface_submit", + label="Export", + primary=True, + ), + ui.button(name="cancel", label="Cancel", primary=False), + ] + ), + ] + elif q.args["experiment/display/push_to_huggingface_submit"]: + await busy_dialog( + q=q, + title="Exporting to HuggingFace", + text="Model size can affect the export time significantly.", + ) + + experiment_path = q.client["experiment/display/experiment_path"] + device = q.client["experiment/display/push_to_huggingface/device"] + api_key = q.client["experiment/display/push_to_huggingface/api_key"] + user_id = q.client["experiment/display/push_to_huggingface/account_name"] + safe_serialization = q.client["default_safe_serialization"] + model_name = q.client[ + "experiment/display/push_to_huggingface/model_name" + ].replace(".", "-") + + publish_model_to_hugging_face( + path_to_experiment=experiment_path, + device=device, + api_key=api_key, + user_id=user_id, + model_name=model_name, + safe_serialization=safe_serialization, + ) + + dialog_items = [ + ui.message_bar("success", "Success"), + ui.buttons( + [ + ui.button(name="ok", label="OK", primary=True), + ] + ), + ] + + dialog = ui.dialog( + title="Push to HuggingFace Hub", + items=dialog_items, + closable=True, + name="push_to_huggingface_dialog", + ) + + q.page["meta"].dialog = dialog + q.client["keep_meta"] = True + + +def get_experiment_summary_code_card(cfg) -> str: + repo_id: Optional[str] = None + hf_yaml_path = f"{cfg.output_directory}/hf.yaml" + + with open( + os.path.join("model_cards", cfg.environment._summary_card_template), "r" + ) as f: + text = f.read() + + if os.path.exists(hf_yaml_path): + with open(hf_yaml_path, "r") as fp: + repo_id = yaml.load(fp, Loader=yaml.FullLoader)["repo_id"] + + if repo_id is None: + repo_id = "account/model" + + # Model repo + text = text.replace("{{repo_id}}", repo_id) + + # Versions + text = text.replace("{{transformers_version}}", transformers.__version__) + text = text.replace("{{einops_version}}", einops.__version__) + text = text.replace("{{accelerate_version}}", accelerate.__version__) + text = text.replace("{{torch_version}}", torch.__version__) + + # Configs + text = text.replace("{{text_prompt_start}}", str(cfg.dataset.text_prompt_start)) + text = text.replace( + "{{text_answer_separator}}", str(cfg.dataset.text_answer_separator) + ) + text = text.replace( + "{{end_of_sentence}}", + str(cfg._tokenizer_eos_token) if cfg.dataset.add_eos_token_to_prompt else "", + ) + + text = text.replace("{{trust_remote_code}}", str(cfg.environment.trust_remote_code)) + + if cfg.problem_type not in NON_GENERATION_PROBLEM_TYPES: + text = text.replace( + "{{min_new_tokens}}", str(cfg.prediction.min_length_inference) + ) + text = text.replace( + "{{max_new_tokens}}", str(cfg.prediction.max_length_inference) + ) + text = text.replace("{{use_fast}}", str(cfg.tokenizer.use_fast)) + text = text.replace("{{do_sample}}", str(cfg.prediction.do_sample)) + text = text.replace("{{num_beams}}", str(cfg.prediction.num_beams)) + text = text.replace("{{temperature}}", str(cfg.prediction.temperature)) + text = text.replace( + "{{repetition_penalty}}", str(cfg.prediction.repetition_penalty) + ) + + return text diff --git a/llm_studio/app_utils/sections/histogram_card.py b/llm_studio/app_utils/sections/histogram_card.py new file mode 100644 index 0000000000000000000000000000000000000000..7cfc23a6ca46ab182084fdba5100643c133b10ac --- /dev/null +++ b/llm_studio/app_utils/sections/histogram_card.py @@ -0,0 +1,97 @@ +from typing import List + +import pandas as pd +from h2o_wave import data, ui + + +def histogram_card( + x, + a=0.1, + b=0.9, + x_axis_description="text_length", + histogram_box="first", + title="Text Length (split by whitespace)", +): + assert " " not in x_axis_description, ( + "x_axis_description in histogram card must not contain spaces, " + "as the card would not be rendered." + ) + df_quantile = compute_quantile_df(x, a, b) + df_quantile = df_quantile.rename(columns={"length": x_axis_description}) + card = ui.plot_card( + box=histogram_box, + title=title, + data=data( + fields=df_quantile.columns.tolist(), + rows=df_quantile.values.tolist(), + pack=True, + ), + plot=ui.plot( + marks=[ + ui.mark( + type="area", + x=f"={x_axis_description}", + x_title=f"Total samples: {len(x)}", + y="=count", + y_title="Count", + color="=data_type", + shape="circle", + ) + ] + ), + ) + return card + + +def compute_quantile_df(x: List[int], a: float, b: float): + """ + Compute the quantiles based on the input list x. + + Returns a dataframe with the following columns: + - length: length of the text + - count: number of texts with this length + - data_type: quantile type + (first (a * 100)% quantile, (a * 100)%-(100 * (1 - b))% quantile, + last (100 * (1 - b))% quantile) + + Note that quantiles are overlapping on the edges. + """ + if not x: + raise ValueError("Input list x is empty") + + if not 0 <= a <= b <= 1: + raise ValueError( + "Values of a and b must be in [0, 1] " + "and a should be less than or equal to b" + ) + + x_axis_description = "length" + df = pd.DataFrame(x, columns=[x_axis_description]) + df["count"] = 1 + df_quantile = ( + df.groupby([x_axis_description]) + .sum() + .reset_index() + .sort_values(by=x_axis_description)[[x_axis_description, "count"]] + ) + sorted_data = sorted(x) + first_quantile = sorted_data[int(len(sorted_data) * a)] + last_quantile = sorted_data[-int(len(sorted_data) * (1 - b))] + + df_first = df_quantile.loc[df_quantile[x_axis_description] <= first_quantile].copy() + df_first["data_type"] = f"first {int(a * 100)}% quantile" + df_last = df_quantile.loc[df_quantile[x_axis_description] >= last_quantile].copy() + df_last["data_type"] = f"last {100 - int(b * 100)}% quantile" + df_quantile["data_type"] = f"{int(a * 100)}%-{int(b * 100)}% quantile" + middle_quantile_min = max(0, len(df_first) - 1) + middle_quantile_max = ( + min(len(df_quantile), (len(df_quantile) - len(df_last) - 1)) + 1 + ) + df_quantile = pd.concat( + [ + df_first, + df_quantile.loc[middle_quantile_min:middle_quantile_max], + df_last, + ] + ) + return df_quantile diff --git a/llm_studio/app_utils/sections/home.py b/llm_studio/app_utils/sections/home.py new file mode 100644 index 0000000000000000000000000000000000000000..2005717dde6061e4a61e359a42bae12eff6e50f7 --- /dev/null +++ b/llm_studio/app_utils/sections/home.py @@ -0,0 +1,181 @@ +import psutil +import torch +from h2o_wave import Q, data, ui + +from llm_studio.app_utils.config import default_cfg +from llm_studio.app_utils.sections.common import clean_dashboard +from llm_studio.app_utils.utils import ( + get_datasets, + get_experiments, + get_gpu_usage, + get_single_gpu_usage, +) +from llm_studio.app_utils.wave_utils import ui_table_from_df, wave_theme +from llm_studio.src.utils.export_utils import get_size_str + + +async def home(q: Q) -> None: + await clean_dashboard(q, mode="home") + q.client["nav/active"] = "home" + + experiments = get_experiments(q) + hdd = psutil.disk_usage(default_cfg.llm_studio_workdir) + + q.page["home/disk_usage"] = ui.tall_gauge_stat_card( + box=ui.box("content", order=2, width="20%" if len(experiments) > 0 else "30%"), + title="Disk usage", + value=f"{hdd.percent:.2f} %", + aux_value=f"{get_size_str(hdd.used, sig_figs=1)} /\ + {get_size_str(hdd.total, sig_figs=1)}", + plot_color=wave_theme.get_primary_color(q), + progress=hdd.percent / 100, + ) + + if len(experiments) > 0: + num_finished = len(experiments[experiments["status"] == "finished"]) + num_running_queued = len( + experiments[experiments["status"].isin(["queued", "running"])] + ) + num_failed_stopped = len( + experiments[experiments["status"].isin(["failed", "stopped"])] + ) + + q.page["home/experiments_stats"] = ui.form_card( + box=ui.box("content", order=1, width="40%"), + title="Experiments", + items=[ + ui.visualization( + plot=ui.plot( + [ui.mark(type="interval", x="=status", y="=count", y_min=0)] + ), + data=data( + fields="status count", + rows=[ + ("finished", num_finished), + ("queued + running", num_running_queued), + ("failed + stopped", num_failed_stopped), + ], + pack=True, # type: ignore + ), + ) + ], + ) + + stats = [] + if torch.cuda.is_available(): + stats.append(ui.stat(label="Current GPU load", value=f"{get_gpu_usage():.1f}%")) + stats += [ + ui.stat(label="Current CPU load", value=f"{psutil.cpu_percent()}%"), + ui.stat( + label="Memory usage", + value=f"{get_size_str(psutil.virtual_memory().used, sig_figs=1)} /\ + {get_size_str(psutil.virtual_memory().total, sig_figs=1)}", + ), + ] + + q.page["home/compute_stats"] = ui.tall_stats_card( + box=ui.box("content", order=1, width="40%" if len(experiments) > 0 else "70%"), + items=stats, + ) + + if torch.cuda.is_available(): + q.page["home/gpu_stats"] = ui.form_card( + box=ui.box("expander", width="100%"), + items=[ + ui.expander( + name="expander", + label="Detailed GPU stats", + items=get_single_gpu_usage( + highlight=wave_theme.get_primary_color(q) + ), + expanded=True, + ) + ], + ) + q.client.delete_cards.add("home/gpu_stats") + + q.client.delete_cards.add("home/compute_stats") + q.client.delete_cards.add("home/disk_usage") + q.client.delete_cards.add("home/experiments_stats") + + q.client["experiment/list/mode"] = "train" + + q.client["dataset/list/df_datasets"] = get_datasets(q) + df_viz = q.client["dataset/list/df_datasets"].copy() + df_viz = df_viz[df_viz.columns.intersection(["name", "problem type"])] + + if torch.cuda.is_available(): + table_height = "max(calc(100vh - 660px), 400px)" + else: + table_height = "max(calc(100vh - 550px), 400px)" + + q.page["dataset/list"] = ui.form_card( + box="datasets", + items=[ + ui.inline( + [ + ui.button( + name="dataset/list", icon="Database", label="", primary=True + ), + ui.label("List of Datasets"), + ] + ), + ui_table_from_df( + q=q, + df=df_viz, + name="dataset/list/table", + sortables=[], + searchables=[], + min_widths={"name": "240", "problem type": "130"}, + link_col="name", + height=table_height, + ), + ], + ) + q.client.delete_cards.add("dataset/list") + + q.client["experiment/list/df_experiments"] = get_experiments( + q, mode=q.client["experiment/list/mode"], status="finished" + ) + + df_viz = q.client["experiment/list/df_experiments"].copy() + df_viz = df_viz.rename(columns={"process_id": "pid", "config_file": "problem type"}) + df_viz = df_viz[ + df_viz.columns.intersection( + ["name", "dataset", "problem type", "metric", "val metric"] + ) + ] + + q.page["experiment/list"] = ui.form_card( + box="experiments", + items=[ + ui.inline( + [ + ui.button( + name="experiment/list", + icon="FlameSolid", + label="", + primary=True, + ), + ui.label("List of Experiments"), + ] + ), + ui_table_from_df( + q=q, + df=df_viz, + name="experiment/list/table", + sortables=["val metric"], + numerics=["val metric"], + min_widths={ + # "id": "50", + "name": "115", + "dataset": "100", + "problem type": "120", + "metric": "70", + "val metric": "85", + }, + link_col="name", + height=table_height, + ), + ], + ) diff --git a/llm_studio/app_utils/sections/project.py b/llm_studio/app_utils/sections/project.py new file mode 100644 index 0000000000000000000000000000000000000000..a86083b44ea07fc98fc61b5574d363c5daa735e4 --- /dev/null +++ b/llm_studio/app_utils/sections/project.py @@ -0,0 +1,125 @@ +import logging +import os + +from h2o_wave import Q, ui + +from llm_studio.app_utils.sections.experiment import ( + experiment_compare, + experiment_list, + experiment_rename_action, + experiment_rename_form, + get_table_and_message_item_indices, +) +from llm_studio.app_utils.utils import get_experiments_status + +logger = logging.getLogger(__name__) + + +async def experiment_rename_action_workflow(q: Q): + if q.args["experiment/rename/name"]: + q.client["experiment/rename/name"] = q.args["experiment/rename/name"] + + new_name = q.client["experiment/rename/name"] + if new_name and new_name.strip(): + current_id = q.client["experiment/rename/id"] + experiment = q.client.app_db.get_experiment(current_id) + new_path = experiment.path.replace(experiment.name, new_name) + + experiment_df = q.client.app_db.get_experiments_df() + experiment_df["status"], experiment_df["info"] = get_experiments_status( + experiment_df + ) + status = experiment_df.set_index("id").loc[experiment.id, "status"] + + if os.path.exists(new_path): + error = f"Experiment {new_name} already exists." + await experiment_rename_form(q, error=error) + elif status in ["running", "queued"]: + error = "Cannot rename running or queued experiments." + await experiment_rename_form(q, error=error) + else: + await experiment_rename_action(q, experiment, new_name) + await list_current_experiments(q) + else: + await experiment_rename_form(q, error="New name must be non-empty") + + +async def list_current_experiments(q, allowed_statuses=None, actions=True, reset=True): + await experiment_list( + q, + allowed_statuses=allowed_statuses, + reset=reset, + actions=actions, + ) + + if not reset: # in case of abort button disable multi-select + table_item_idx, message_item_idx = get_table_and_message_item_indices(q) + q.page["experiment/list"].items[table_item_idx].table.multiple = False + + +async def current_experiment_list_stop(q: Q) -> None: + """Allow to select experiments to stop.""" + + table_item_idx, message_item_idx = get_table_and_message_item_indices(q) + stop_label = "Stop experiments" + + q.page["experiment/list"].items[table_item_idx].table.multiple = True + q.page["dataset/display/footer"].items = [ + ui.inline( + items=[ + ui.button(name="experiment/stop", label=stop_label, primary=True), + ui.button(name="experiment/list/current/noreset", label="Abort"), + ] + ) + ] + + +async def current_experiment_list_delete(q: Q) -> None: + """Allow to select experiments to delete.""" + + table_item_idx, message_item_idx = get_table_and_message_item_indices(q) + delete_label = "Delete experiments" + + q.page["experiment/list"].items[table_item_idx].table.multiple = True + q.page["dataset/display/footer"].items = [ + ui.inline( + items=[ + ui.button( + name="experiment/delete/dialog", label=delete_label, primary=True + ), + ui.button(name="experiment/list/current/noreset", label="Abort"), + ] + ) + ] + + +async def current_experiment_list_compare(q: Q) -> None: + """Allow to select previous experiment to start new one.""" + + table_item_idx, message_item_idx = get_table_and_message_item_indices(q) + q.page["experiment/list"].items[table_item_idx].table.multiple = True + q.page["dataset/display/footer"].items = [ + ui.inline( + items=[ + ui.button( + name="experiment/compare", + label="Compare experiments", + primary=True, + ), + ui.button(name="experiment/list/current/noreset", label="Abort"), + ] + ) + ] + + +async def current_experiment_compare(q: Q) -> None: + selected_rows = q.args["experiment/list/table"] + if selected_rows: + q.client["experiment/compare/selected"] = selected_rows + elif q.client["experiment/compare/selected"]: + selected_rows = q.client["experiment/compare/selected"] + else: + await list_current_experiments(q) + return + + await experiment_compare(q, selected_rows) diff --git a/llm_studio/app_utils/sections/settings.py b/llm_studio/app_utils/sections/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..df9d5db5c49ad1042d1bd72961b587fa1b584d85 --- /dev/null +++ b/llm_studio/app_utils/sections/settings.py @@ -0,0 +1,508 @@ +import multiprocessing + +import torch +from h2o_wave import Q, ui + +from llm_studio.app_utils.sections.common import clean_dashboard +from llm_studio.app_utils.setting_utils import Secrets +from llm_studio.src.loggers import Loggers + + +async def settings(q: Q) -> None: + await clean_dashboard(q, mode="full") + q.client["nav/active"] = "settings" + + label_width = "250px" + textbox_width = "350px" + + q.page["settings/content"] = ui.form_card( + box="content", + items=[ + ui.message_bar( + type="info", + text="Setting changes are directly applied for the \ + current session and can be made persistent by using the \ + ***Save settings persistently*** button below. To reload \ + the persistently saved settings, use the ***Load settings*** button.", + ), + ui.separator("Credential Storage"), + ui.inline( + items=[ + ui.label("Credential Handler", width=label_width), + ui.dropdown( + name="credential_saver", + value=q.client["credential_saver"], + choices=[ui.choice(name, name) for name in Secrets.names()], + trigger=False, + width="300px", + ), + ] + ), + ui.message_bar( + type="info", + text="""Method used to save credentials (passwords) \ + for ***Save settings persistently***. \ + The recommended approach for saving credentials (passwords) is to \ + use either Keyring or to avoid permanent storage \ + (requiring re-entry upon app restart). \ + Keyring will be disabled if it is not set up on the host machine. \ + Only resort to local .env if your machine's \ + accessibility is restricted to you.\n\ + When you select ***Save settings persistently***, \ + credentials will be removed from all non-selected methods. \ + ***Restore Default Settings*** will clear credentials from all methods. + """, + ), + ui.separator("Appearance"), + ui.inline( + items=[ + ui.label("Dark Mode", width=label_width), + ui.toggle( + name="theme_dark", + value=q.client["theme_dark"], + tooltip="Enables Dark Mode as theme.", + trigger=True, + ), + ] + ), + ui.inline( + items=[ + ui.label("Delete Dialogs", width=label_width), + ui.toggle( + name="delete_dialogs", + value=q.client["delete_dialogs"], + trigger=False, + tooltip=( + "Whether to show delete dialogs before deleting " + "datasets or experiments." + ), + ), + ] + ), + ui.inline( + items=[ + ui.label("Maximum Chart Points", width=label_width), + ui.spinbox( + name="chart_plot_max_points", + label=None, + min=1, + max=10000, + step=1000, + value=q.client["chart_plot_max_points"], + width=textbox_width, + trigger=False, + tooltip="Set the maximum number of points shown in the " + "experiment chart plots. Plots will be sub-sampled if " + "needed.", + ), + ] + ), + ui.separator("Default Connector Settings"), + ui.inline( + items=[ + ui.label("AWS S3 bucket name", width=label_width), + ui.textbox( + name="default_aws_bucket_name", + label=None, + value=q.client["default_aws_bucket_name"], + width=textbox_width, + trigger=False, + tooltip="Set the value for the AWS bucket for \ + dataset import. S3 bucket name including relative paths.", + ), + ] + ), + ui.inline( + items=[ + ui.label("AWS access key", width=label_width), + ui.textbox( + name="default_aws_access_key", + label=None, + value=q.client["default_aws_access_key"], + width=textbox_width, + password=True, + trigger=False, + tooltip="Set the value for the AWS access key \ + for dataset import.", + ), + ] + ), + ui.inline( + items=[ + ui.label("AWS secret key", width=label_width), + ui.textbox( + name="default_aws_secret_key", + label=None, + value=q.client["default_aws_secret_key"], + width=textbox_width, + password=True, + trigger=False, + tooltip="Set the value for the AWS secret key \ + for dataset import.", + ), + ] + ), + ui.inline( + items=[ + ui.label("Azure Datalake connection string", width=label_width), + ui.textbox( + name="default_azure_conn_string", + label=None, + value=q.client["default_azure_conn_string"], + width=textbox_width, + password=True, + trigger=False, + tooltip="Set the value for the Azure Datalake \ + connection string for dataset import.", + ), + ] + ), + ui.inline( + items=[ + ui.label("Azure Datalake container name", width=label_width), + ui.textbox( + name="default_azure_container", + label=None, + value=q.client["default_azure_container"], + width=textbox_width, + password=False, + trigger=False, + tooltip="Set the value for the Azure Datalake \ + container name for dataset import.", + ), + ] + ), + ui.inline( + items=[ + ui.label("Kaggle username", width=label_width), + ui.textbox( + name="default_kaggle_username", + label=None, + value=q.client["default_kaggle_username"], + width=textbox_width, + password=False, + trigger=False, + tooltip="Set the value for the Kaggle username \ + for dataset import.", + ), + ] + ), + ui.inline( + items=[ + ui.label("Kaggle secret key", width=label_width), + ui.textbox( + name="default_kaggle_secret_key", + label=None, + value=q.client["default_kaggle_secret_key"], + width=textbox_width, + password=True, + trigger=False, + tooltip="Set the value for the Kaggle secret key \ + for dataset import.", + ), + ] + ), + ui.separator("Default Experiment Settings"), + ui.inline( + items=[ + ui.label("Number of Workers", width=label_width), + ui.spinbox( + name="default_number_of_workers", + label=None, + min=1, + max=multiprocessing.cpu_count(), + step=1, + value=q.client["default_number_of_workers"], + width=textbox_width, + trigger=False, + tooltip="Set the value for the number of workers \ + sliders in the experiment setup.", + ), + ] + ), + ui.inline( + items=[ + ui.label("Logger", width=label_width), + ui.dropdown( + name="default_logger", + value=q.client["default_logger"], + choices=[ui.choice(name, name) for name in Loggers.names()], + trigger=False, + width="100px", + ), + ] + ), + ui.inline( + items=[ + ui.label("Neptune Project", width=label_width), + ui.textbox( + name="default_neptune_project", + label=None, + value=q.client["default_neptune_project"], + width=textbox_width, + trigger=False, + tooltip="Set the value for the neptune project \ + in the experiment setup.", + ), + ] + ), + ui.inline( + items=[ + ui.label("Neptune API Token", width=label_width), + ui.textbox( + name="default_neptune_api_token", + label=None, + value=q.client["default_neptune_api_token"], + width=textbox_width, + password=True, + trigger=False, + tooltip="Set the value for the Neptune API token \ + in the experiment setup.", + ), + ] + ), + ui.inline( + items=[ + ui.label("Huggingface API Token", width=label_width), + ui.textbox( + name="default_huggingface_api_token", + label=None, + value=q.client["default_huggingface_api_token"], + width=textbox_width, + password=True, + trigger=False, + tooltip="Set the value for the Huggingface API token \ + in the experiment setup.", + ), + ] + ), + ui.inline( + items=[ + ui.label("OpenAI API Token", width=label_width), + ui.textbox( + name="default_openai_api_token", + label=None, + value=q.client["default_openai_api_token"], + width=textbox_width, + password=True, + trigger=False, + tooltip="Set the value for the OpenAI API token \ + in the experiment setup.", + ), + ] + ), + ui.inline( + items=[ + ui.label("GPT evaluation max samples", width=label_width), + ui.spinbox( + name="default_gpt_eval_max", + label=None, + value=q.client["default_gpt_eval_max"], + width=textbox_width, + min=1, + max=10000, + step=1, + trigger=False, + tooltip="Set the maximum samples for GPT evaluation. \ + This is used to prevent unexpected high API costs. \ + Increase at your own risk.", + ), + ] + ), + ui.inline( + items=[ + ui.label("Use OpenAI API on Azure", width=label_width), + ui.toggle( + name="default_openai_azure", + value=q.client["default_openai_azure"], + tooltip=( + "Toggle to use Microsoft Azure Endpoints for the " + "OpenAI API." + ), + trigger=True, + ), + ] + ), + ui.inline( + items=[ + ui.label("OpenAI API Endpoint", width=label_width), + ui.textbox( + name="default_openai_api_base", + label=None, + value=q.client["default_openai_api_base"], + width=textbox_width, + password=False, + trigger=False, + tooltip=( + "Set the value for the OpenAI API endpoint. " + "Use when on Azure." + ), + ), + ] + ), + ui.inline( + items=[ + ui.label("OpenAI API Deployment ID", width=label_width), + ui.textbox( + name="default_openai_api_deployment_id", + label=None, + value=q.client["default_openai_api_deployment_id"], + width=textbox_width, + password=False, + trigger=False, + tooltip=( + "Set the value for the OpenAI API deployment ID. " + "Use when on Azure." + ), + ), + ] + ), + ui.inline( + items=[ + ui.label("OpenAI API version", width=label_width), + ui.textbox( + name="default_openai_api_version", + label=None, + value=q.client["default_openai_api_version"], + width=textbox_width, + password=False, + trigger=False, + tooltip=( + "Set the value for the OpenAI API version. " + "Use when on Azure." + ), + ), + ] + ), + ui.separator("Experiment Maximum Settings"), + ui.inline( + items=[ + ui.label("Number of Epochs", width=label_width), + ui.spinbox( + name="set_max_epochs", + label=None, + min=1, + max=2000, + step=1, + value=q.client["set_max_epochs"], + width=textbox_width, + trigger=False, + tooltip="Set the maximum value for the epoch slider \ + in the experiment setup.", + ), + ] + ), + ui.inline( + items=[ + ui.label("Batch Size", width=label_width), + ui.spinbox( + name="set_max_batch_size", + label=None, + min=1, + max=4096, + step=1, + value=q.client["set_max_batch_size"], + width=textbox_width, + trigger=False, + tooltip="Set the maximum value for the batch size slider \ + in the experiment setup.", + ), + ] + ), + ui.inline( + items=[ + ui.label("Gradient clip", width=label_width), + ui.spinbox( + name="set_max_gradient_clip", + label=None, + min=1, + max=16384, + step=1, + value=q.client["set_max_gradient_clip"], + width=textbox_width, + trigger=False, + tooltip="Set the maximum value for the gradient clip \ + slider in the experiment setup.", + ), + ] + ), + ui.inline( + items=[ + ui.label("LoRA R", width=label_width), + ui.spinbox( + name="set_max_lora_r", + label=None, + min=1, + max=16384, + step=1, + value=q.client["set_max_lora_r"], + width=textbox_width, + trigger=False, + tooltip="Set the maximum value for the LoRA R \ + slider in the experiment setup.", + ), + ] + ), + ui.inline( + items=[ + ui.label("LoRA alpha", width=label_width), + ui.spinbox( + name="set_max_lora_alpha", + label=None, + min=1, + max=16384, + step=1, + value=q.client["set_max_lora_alpha"], + width=textbox_width, + trigger=False, + tooltip="Set the maximum value for the LoRA Alpha \ + slider in the experiment setup.", + ), + ] + ), + ui.separator("Default Chat Settings"), + ui.inline( + items=[ + ui.label("GPU used for Chat", width=label_width), + ui.spinbox( + name="gpu_used_for_chat", + label=None, + min=1, + max=torch.cuda.device_count(), + step=1, + value=q.client["gpu_used_for_chat"], + width=textbox_width, + trigger=False, + tooltip="Set the gpu id that is used for the chat window.", + ), + ] + ), + ], + ) + + q.client.delete_cards.add("settings/content") + + q.page["settings/footer"] = ui.form_card( + box="footer", + items=[ + ui.inline( + items=[ + ui.button( + name="save_settings", + label="Save settings persistently", + primary=True, + ), + ui.button( + name="load_settings", label="Load settings", primary=False + ), + ui.button( + name="restore_default_settings", + label="Restore default settings", + primary=False, + ), + ], + justify="start", + ) + ], + ) + q.client.delete_cards.add("settings/footer") diff --git a/llm_studio/app_utils/setting_utils.py b/llm_studio/app_utils/setting_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4cefe9f9c90a9e7de4ccc3df2e47044dc78cf18f --- /dev/null +++ b/llm_studio/app_utils/setting_utils.py @@ -0,0 +1,351 @@ +import errno +import functools +import logging +import os +import pickle +import signal +import traceback +from typing import Any, List + +import keyring +import yaml +from h2o_wave import Q, ui +from keyring.errors import KeyringLocked, PasswordDeleteError + +from llm_studio.app_utils.config import default_cfg +from llm_studio.app_utils.utils import get_database_dir, get_user_id + +__all__ = [ + "load_user_settings_and_secrets", + "load_default_user_settings", + "save_user_settings_and_secrets", + "Secrets", +] + +logger = logging.getLogger(__name__) +SECRET_KEYS = [ + key + for key in default_cfg.user_settings + if any(password in key for password in ["token", "key"]) +] +USER_SETTING_KEYS = [key for key in default_cfg.user_settings if key not in SECRET_KEYS] + + +async def save_user_settings_and_secrets(q: Q): + await _save_secrets(q) + _save_user_settings(q) + + +def load_user_settings_and_secrets(q: Q): + _maybe_migrate_to_yaml(q) + _load_secrets(q) + _load_user_settings(q) + + +def load_default_user_settings(q: Q, clear_secrets=True): + for key in default_cfg.user_settings: + q.client[key] = default_cfg.user_settings[key] + if clear_secrets: + _clear_secrets(q, key) + + +class NoSaver: + """ + Base class that provides methods for saving, loading, and deleting password entries. + + Attributes: + username (str): The username associated with the password entries. + root_dir (str): The root directory. + + Methods: + save(name: str, password: str) -> None: + Save a password entry with the given name and password. + + load(name: str) -> str: + Load and return the password associated with the given name. + + delete(name: str) -> None: + Delete the password entry with the given name. + + """ + + def __init__(self, username: str, root_dir: str): + self.username = username + self.root_dir = root_dir + + def save(self, name: str, password: str): + pass + + def load(self, name: str) -> str: + return "" + + def delete(self, name: str): + pass + + +class KeyRingSaver(NoSaver): + """ + A class for saving, loading, and deleting passwords using the keyring library. + Some machines may not have keyring installed, so this class may not be available. + """ + + def __init__(self, username: str, root_dir: str): + super().__init__(username, root_dir) + self.namespace = f"{username}_h2o_llmstudio" + + def save(self, name: str, password: str): + keyring.set_password(self.namespace, name, password) + + def load(self, name: str) -> str: + return keyring.get_password(self.namespace, name) or "" # type: ignore + + def delete(self, name: str): + try: + keyring.delete_password(self.namespace, name) + except (KeyringLocked, PasswordDeleteError): + pass + except Exception as e: + logger.warning(f"Error deleting password for keyring: {e}") + + +class EnvFileSaver(NoSaver): + """ + This module provides the EnvFileSaver class, which is used to save, load, + and delete name-password pairs in an environment file. + Only use this class if you are sure that the environment file is secure. + """ + + @property + def filename(self): + return os.path.join(self.root_dir, f"{self.username}.env") + + def save(self, name: str, password: str): + data = {} + if os.path.exists(self.filename): + with open(self.filename, "r") as f: + data = yaml.safe_load(f) + data[name] = password + with open(self.filename, "w") as f: + yaml.safe_dump(data, f) + + def load(self, name: str) -> str: + if not os.path.exists(self.filename): + return "" + + with open(self.filename, "r") as f: + data = yaml.safe_load(f) + return data.get(name, "") + + def delete(self, name: str): + if os.path.exists(self.filename): + with open(self.filename, "r") as f: + data = yaml.safe_load(f) + if data and name in data: + del data[name] + with open(self.filename, "w") as f: + yaml.safe_dump(data, f) + + +# https://stackoverflow.com/questions/2281850/timeout-function-if-it-takes-too-long-to-finish +class TimeoutError(Exception): + pass + + +def timeout(seconds=10, error_message=os.strerror(errno.ETIME)): + def decorator(func): + def _handle_timeout(signum, frame): + raise TimeoutError(error_message) + + @functools.wraps(func) + def wrapper(*args, **kwargs): + signal.signal(signal.SIGALRM, _handle_timeout) + signal.alarm(seconds) + try: + result = func(*args, **kwargs) + finally: + signal.alarm(0) + return result + + return wrapper + + return decorator + + +@timeout(3) +def check_if_keyring_works(): + """ + Test if keyring is working. On misconfigured machines, + Keyring may hang up to 2 minutes with the following error: + jeepney.wrappers.DBusErrorResponse: + [org.freedesktop.DBus.Error.TimedOut] + ("Failed to activate service 'org.freedesktop.secrets': + timed out (service_start_timeout=120000ms)",) + + To avoid waiting for 2 minutes, we kill the process after 3 seconds. + """ + keyring.get_password("service", "username") + + +class Secrets: + """ + Factory class to get the secrets' handler. + """ + + _secrets = { + "Do not save credentials permanently": NoSaver, + ".env File": EnvFileSaver, + } + try: + check_if_keyring_works() + logger.info("Keyring is correctly configured on this machine.") + _secrets["Keyring"] = KeyRingSaver + except TimeoutError: + logger.warning( + "Error loading keyring due to timeout. Disabling keyring save option." + ) + except Exception as e: + logger.warning(f"Error loading keyring: {e}. Disabling keyring save option.") + + @classmethod + def names(cls) -> List[str]: + return sorted(cls._secrets.keys()) + + @classmethod + def get(cls, name: str) -> Any: + return cls._secrets.get(name) + + +def _save_user_settings(q: Q): + user_settings = {key: q.client[key] for key in USER_SETTING_KEYS} + with open(_get_usersettings_path(q), "w") as f: + yaml.dump(user_settings, f) + + +def _load_user_settings(q: Q): + if os.path.isfile(_get_usersettings_path(q)): + logger.info("Reading user settings") + with open(_get_usersettings_path(q), "r") as f: + user_settings = yaml.load(f, Loader=yaml.FullLoader) + for key in USER_SETTING_KEYS: + q.client[key] = user_settings.get(key, default_cfg.user_settings[key]) + else: + logger.info("No user settings found. Using default settings.") + # User may have deleted the user settings file. We load the default settings. + # Secrets may still be stored in keyring or env file. + load_default_user_settings(q, clear_secrets=False) + + +async def _save_secrets(q: Q): + secret_name, secrets_handler = _get_secrets_handler(q) + for key in SECRET_KEYS: + try: + _clear_secrets(q, key, excludes=tuple(secret_name)) + if q.client[key]: + secrets_handler.save(key, q.client[key]) + + except Exception: + exception = str(traceback.format_exc()) + logger.error(f"Could not save password {key} to {secret_name}") + q.page["meta"].dialog = ui.dialog( + title="Could not save secrets. " + "Please choose another Credential Handler.", + name="secrets_error", + items=[ + ui.text( + f"The following error occurred when" + f" using {secret_name}: {exception}." + ), + ui.button( + name="settings/close_error_dialog", label="Close", primary=True + ), + ], + closable=True, + ) + q.client["keep_meta"] = True + await q.page.save() + break + else: # if no exception + # force dataset connector updated when the user decides to click on save + q.client["dataset/import/s3_bucket"] = q.client["default_aws_bucket_name"] + q.client["dataset/import/s3_access_key"] = q.client["default_aws_access_key"] + q.client["dataset/import/s3_secret_key"] = q.client["default_aws_secret_key"] + q.client["dataset/import/kaggle_access_key"] = q.client[ + "default_kaggle_username" + ] + q.client["dataset/import/kaggle_secret_key"] = q.client[ + "default_kaggle_secret_key" + ] + + +def _load_secrets(q: Q): + secret_name, secrets_handler = _get_secrets_handler(q) + for key in SECRET_KEYS: + try: + q.client[key] = secrets_handler.load(key) or default_cfg.user_settings[key] + except Exception: + logger.error(f"Could not load password {key} from {secret_name}") + q.client[key] = "" + + +def _get_secrets_handler(q: Q): + secret_name = ( + q.client["credential_saver"] or default_cfg.user_settings["credential_saver"] + ) + secrets_handler = Secrets.get(secret_name)( + username=get_user_id(q), root_dir=get_database_dir(q) + ) + return secret_name, secrets_handler + + +def _clear_secrets(q: Q, name: str, excludes=tuple()): + for secret_name in Secrets.names(): + if secret_name not in excludes: + secrets_handler = Secrets.get(secret_name)( + username=get_user_id(q), root_dir=get_database_dir(q) + ) + + secrets_handler.delete(name) + + +def _maybe_migrate_to_yaml(q: Q): + """ + Migrate user settings from a pickle file to a YAML file. + """ + # prior, we used to save the user settings in a pickle file + old_usersettings_path = os.path.join( + get_database_dir(q), f"{get_user_id(q)}.settings" + ) + if not os.path.isfile(old_usersettings_path): + return + + try: + with open(old_usersettings_path, "rb") as f: + user_settings = pickle.load(f) + + secret_name, secrets_handler = _get_secrets_handler(q) + logger.info(f"Migrating token using {secret_name}") + for key in SECRET_KEYS: + if key in user_settings: + secrets_handler.save(key, user_settings[key]) + + with open(_get_usersettings_path(q), "w") as f: + yaml.dump( + { + key: value + for key, value in user_settings.items() + if key in USER_SETTING_KEYS + }, + f, + ) + os.remove(old_usersettings_path) + logger.info(f"Successfully migrated tokens to {secret_name}. Old file deleted.") + except Exception as e: + logger.info( + f"Could not migrate tokens. " + f"Please delete {old_usersettings_path} and set your credentials again." + f"Error: \n\n {e} {traceback.format_exc()}" + ) + + +def _get_usersettings_path(q: Q): + return os.path.join(get_database_dir(q), f"{get_user_id(q)}.yaml") diff --git a/llm_studio/app_utils/static/icon.png b/llm_studio/app_utils/static/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..76e0ccf84a1fed29a827d38c54cfb05c55a9a075 Binary files /dev/null and b/llm_studio/app_utils/static/icon.png differ diff --git a/llm_studio/app_utils/static/llm-studio-logo-light.png b/llm_studio/app_utils/static/llm-studio-logo-light.png new file mode 100644 index 0000000000000000000000000000000000000000..4ceea9d349adcbdd1b66f6db6a7ca5175967d1a5 Binary files /dev/null and b/llm_studio/app_utils/static/llm-studio-logo-light.png differ diff --git a/llm_studio/app_utils/static/llm-studio-logo.png b/llm_studio/app_utils/static/llm-studio-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..b1750d691638982a30fc01eb1780afddd88312b7 Binary files /dev/null and b/llm_studio/app_utils/static/llm-studio-logo.png differ diff --git a/llm_studio/app_utils/utils.py b/llm_studio/app_utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9198d432e0631df5086a63c5b00fa2841f3f0934 --- /dev/null +++ b/llm_studio/app_utils/utils.py @@ -0,0 +1,2080 @@ +import asyncio +import collections +import contextlib +import dataclasses +import glob +import json +import logging +import math +import os +import random +import re +import shutil +import socket +import string +import subprocess +import time +import uuid +import zipfile +from collections import defaultdict +from contextlib import closing +from functools import partial +from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Type, Union + +import GPUtil +import numpy as np +import pandas as pd +import psutil +import yaml +from azure.storage.filedatalake import DataLakeServiceClient +from boto3.session import Session +from botocore.handlers import disable_signing +from h2o_wave import Q, ui +from pandas.core.frame import DataFrame +from sqlitedict import SqliteDict + +from llm_studio.app_utils.db import Experiment +from llm_studio.src import possible_values +from llm_studio.src.utils.config_utils import ( + _get_type_annotation_error, + load_config_yaml, + parse_cfg_dataclass, + save_config_yaml, +) +from llm_studio.src.utils.data_utils import is_valid_data_frame, read_dataframe +from llm_studio.src.utils.export_utils import get_size_str +from llm_studio.src.utils.type_annotations import KNOWN_TYPE_ANNOTATIONS + +from .config import default_cfg + +logger = logging.getLogger(__name__) + + +def get_user_id(q): + return q.auth.subject + + +def get_user_name(q): + return q.auth.username + + +def get_data_dir(q): + return os.path.join(default_cfg.llm_studio_workdir, default_cfg.data_folder, "user") + + +def get_database_dir(q): + return os.path.join(default_cfg.llm_studio_workdir, default_cfg.data_folder, "dbs") + + +def get_output_dir(q): + return os.path.join( + default_cfg.llm_studio_workdir, default_cfg.output_folder, "user" + ) + + +def get_download_dir(q): + return os.path.join( + default_cfg.llm_studio_workdir, default_cfg.output_folder, "download" + ) + + +def get_user_db_path(q): + return os.path.join(get_database_dir(q), "user.db") + + +def get_usersettings_path(q): + return os.path.join(get_database_dir(q), f"{get_user_id(q)}.settings") + + +def find_free_port(): + with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: + s.bind(("", 0)) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + return s.getsockname()[1] + + +def start_process( + cfg: Any, gpu_list: List, process_queue: List, env_vars: Dict +) -> subprocess.Popen: + """Starts train.py for a given configuration setting + + Args: + cfg: config + gpu_list: list of GPUs to use for the training + process_queue: list of processes to wait for before starting the training + env_vars: dictionary of ENV variables to pass to the training process + Returns: + Process + + """ + + num_gpus = len(gpu_list) + config_name = os.path.join(cfg.output_directory, "cfg.yaml") + env = {**os.environ, **env_vars} + + if num_gpus == 0: + cmd = [ + "python", + "train_wave.py", + "-Y", + config_name, + ] + # Do not delete for debug purposes + # elif num_gpus == 1: + # cmd = [ + # "env", + # f"CUDA_VISIBLE_DEVICES={','.join(gpu_list)}", + # "python", + # "-u", + # "train_wave.py", + # "-P", + # config_name, + # ] + else: + free_port = find_free_port() + if cfg.environment.use_deepspeed: + logger.info("Starting deepspeed...") + cmd = [ + "env", + "deepspeed", + "--include", + f"localhost:{','.join(gpu_list)}", + "--master_port", + f"{str(free_port)}", + "train_wave.py", + "-Y", + config_name, + ] + else: + logger.info("Starting torchrun...") + cmd = [ + "env", + f"CUDA_VISIBLE_DEVICES={','.join(gpu_list)}", + "torchrun", + f"--nproc_per_node={str(num_gpus)}", + f"--master_port={str(free_port)}", + "train_wave.py", + "-Y", + config_name, + ] + + if len(process_queue) > 0: + cmd.append("-Q") + cmd.append(",".join([str(x) for x in process_queue])) + + p = subprocess.Popen( + cmd, + env=env, + ) + + logger.info(f"Percentage of RAM memory used: {psutil.virtual_memory().percent}") + + return p + + +def clean_macos_artifacts(path: str) -> None: + """Cleans artifacts from MacOSX zip archives + + Args: + path: path to the unzipped directory + """ + + shutil.rmtree(os.path.join(path, "__MACOSX/"), ignore_errors=True) + + for ds_store in glob.glob(os.path.join(path, "**/.DS_Store"), recursive=True): + try: + os.remove(ds_store) + except OSError: + pass + + +def s3_session(aws_access_key: str, aws_secret_key: str) -> Any: + """Establishes s3 session + + Args: + aws_access_key: s3 access key + aws_secret_key: s3 secret key + + Returns: + Session + + """ + + session = Session( + aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key + ) + s3 = session.resource("s3") + # if no key is present, disable signing + if aws_access_key == "" and aws_secret_key == "": + s3.meta.client.meta.events.register("choose-signer.s3.*", disable_signing) + + return s3 + + +def filter_valid_files(files) -> List[str]: + valid_files = [ + file + for file in files + if any([file.endswith(ext) for ext in default_cfg.allowed_file_extensions]) + ] + + return valid_files + + +def s3_file_options( + bucket: str, aws_access_key: str, aws_secret_key: str +) -> Optional[List[str]]: + """ "Returns all zip files in the target s3 bucket + + Args: + bucket: s3 bucket name + aws_access_key: s3 access key + aws_secret_key: s3 secret key + + Returns: + List of zip files in bucket or None in case of access error + + """ + + try: + bucket = bucket.replace("s3://", "") + if bucket[-1] == os.sep: + bucket = bucket[:-1] + + bucket_split = bucket.split(os.sep) + bucket = bucket_split[0] + s3 = s3_session(aws_access_key, aws_secret_key) + s3_bucket = s3.Bucket(bucket) + + folder = "/".join(bucket_split[1:]) + + files = [] + for s3_file in s3_bucket.objects.filter(Prefix=f"{folder}/"): + if s3_file.key == f"{folder}/": + continue + + files.append(s3_file.key) + + files = filter_valid_files(files) + return files + + except Exception as e: + logger.warning(f"Can't load S3 datasets list: {e}") + return None + + +def convert_file_size(size: float): + """Converts file size to human readable format + + Args: + size: size in bytes + + Returns: + size in readable format + """ + + if size == 0: + return "0B" + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size, 1024))) + p = math.pow(1024, i) + s = round(size / p, 2) + return "%.2f %s" % (s, size_name[i]) + + +class S3Progress: + """Progress update for s3 downloads + + Source: + https://stackoverflow.com/a/59843153/1281171 + + """ + + def __init__(self, q: Q, size: float) -> None: + """Initialize + + Args: + q: Q + size: size of the file to download + """ + + self._q: Q = q + self._size: float = size + self._seen_so_far: float = 0.0 + self._percentage: float = 0.0 + + def progress(self, bytes_amount: float): + """Update progress + + Args: + bytes_amount: amount of bytes downloaded + """ + + self._seen_so_far += bytes_amount + self._percentage = (self._seen_so_far / self._size) * 100.0 + + async def update_ui(self): + """Update progress in UI""" + + self._q.page["meta"].dialog = ui.dialog( + title="S3 file download in progress", + blocking=True, + items=[ + ui.progress( + label="Please be patient...", + caption=( + f"{convert_file_size(self._seen_so_far)} of " + f"{convert_file_size(self._size)} " + f"({self._percentage:.2f}%)" + ), + value=self._percentage / 100, + ) + ], + ) + await self._q.page.save() + + async def poll(self): + """Update wave ui""" + + while self._percentage / 100 < 1: + await self.update_ui() + await self._q.sleep(0.1) + await self.update_ui() + + +def s3_download_coroutine(q, filename): + download_folder = f"{get_data_dir(q)}/tmp" + download_folder = get_valid_temp_data_folder(q, download_folder) + + if os.path.exists(download_folder): + shutil.rmtree(download_folder) + os.makedirs(download_folder, exist_ok=True) + + downloaded_zip = f"{download_folder}/{filename.split('/')[-1]}" + + q.page["dataset/import"] = ui.form_card(box="content", items=[]) + return downloaded_zip, download_folder + + +def extract_if_zip(file, actual_path): + if file.endswith("zip"): + with zipfile.ZipFile(file, "r") as zip_ref: + zip_ref.extractall(actual_path) + + os.remove(file) + clean_macos_artifacts(actual_path) + + +async def s3_download( + q, bucket, filename, aws_access_key, aws_secret_key +) -> Tuple[str, str]: + """Downloads a file from s3 + + Args: + q: Q + bucket: s3 bucket name + filename: filename to download + aws_access_key: s3 access key + aws_secret_key: s3 secret key + + Returns: + Download location path + """ + bucket = bucket.replace("s3://", "") + if bucket[-1] == os.sep: + bucket = bucket[:-1] + + bucket = bucket.split(os.sep)[0] + + s3 = s3_session(aws_access_key, aws_secret_key) + + file, s3_path = s3_download_coroutine(q, filename) + + progress = S3Progress( + q, (s3.meta.client.head_object(Bucket=bucket, Key=filename))["ContentLength"] + ) + + poll_future = asyncio.create_task(progress.poll()) + + def download_file(): + s3.Bucket(bucket).download_file(filename, file, Callback=progress.progress) + + await q.run(download_file) + await poll_future + + extract_if_zip(file, s3_path) + + return s3_path, "".join(filename.split("/")[-1].split(".")[:-1]) + + +def azure_file_options(conn_string: str, container: str) -> List[str]: + """Returns all zip files in the target azure datalake container + + Args: + conn_string: connection string + container: container including sub-paths + + Returns: + - List of files in storage or empty list in case of access error + + """ + + try: + service_client = DataLakeServiceClient.from_connection_string( # type: ignore + conn_string + ) + + container_split = container.split(os.sep) + container = container_split[0] + + folder = "/".join(container_split[1:]) + + file_system_client = service_client.get_file_system_client( + file_system=container + ) + + files = file_system_client.get_paths(path=folder) + files = next(files.by_page()) # type: ignore[arg-type] + files = [x.name for x in files] # type: ignore[assignment] + return filter_valid_files(files) + + except Exception as e: + logger.warning(f"Can't load Azure datasets list: {e}") + return [] + + +async def download_progress(q, title, seen_so_far, total_len): + if seen_so_far is not None and total_len is not None: + percentage = seen_so_far / total_len + value = percentage + caption = ( + f"{convert_file_size(seen_so_far)} of " + f"{convert_file_size(total_len)} " + f"({percentage * 100:.2f}%)" + ) + else: + value = None + caption = None + + q.page["meta"].dialog = ui.dialog( + title=title, + blocking=True, + items=[ui.progress(label="Please be patient...", caption=caption, value=value)], + ) + await q.page.save() + + +async def azure_download( + q: Any, conn_string: str, container: str, filename: str +) -> Tuple[str, str]: + """Downloads a file from azure + + Args: + q: Q + conn_string: connection string + container: container + filename: filename to download + + Returns: + Download location path + """ + + service_client = DataLakeServiceClient.from_connection_string( # type: ignore + conn_string + ) + + container_split = container.split(os.sep) + container = container_split[0] + + file_system_client = service_client.get_file_system_client(file_system=container) + + filename_split = filename.split(os.sep) + folder = "/".join(filename_split[:-1]) + filename = filename_split[-1] + + rnd_folder = "".join(random.choice(string.digits) for i in range(10)) + azure_path = f"{get_data_dir(q)}/tmp_{rnd_folder}" + azure_path = get_valid_temp_data_folder(q, azure_path) + + if os.path.exists(azure_path): + shutil.rmtree(azure_path) + os.makedirs(azure_path, exist_ok=True) + + file = f"{azure_path}/{filename}" + + file_client = file_system_client.get_file_client(f"{folder}/{filename}") + + download = file_client.download_file() + + blocks = download.chunks() + + seen_so_far = 0 + with open(file, "wb") as local_file: + for block in blocks: + local_file.write(block) + + seen_so_far += len(block) + + await download_progress( + q, + "Azure Datalake file download in progress", + seen_so_far, + len(blocks), # type: ignore[arg-type] + ) + + extract_if_zip(file, azure_path) + + return azure_path, "".join(filename.split(".")[:-1]) + + +async def local_download(q: Any, filename: str) -> Tuple[str, str]: + """Downloads a file from local path + + Args: + q: Q + filename: filename to download + + Returns: + Download location path + """ + + local_path = f"{get_data_dir(q)}/tmp" + local_path = get_valid_temp_data_folder(q, local_path) + + if os.path.exists(local_path): + shutil.rmtree(local_path) + os.makedirs(local_path, exist_ok=True) + + shutil.copy2(filename, local_path) + + zip_file = f"{local_path}/{filename.split('/')[-1]}" + extract_if_zip(zip_file, local_path) + + return local_path, "".join(filename.split("/")[-1].split(".")[:-1]) + + +async def kaggle_download( + q: Any, command: str, kaggle_access_key: str, kaggle_secret_key: str +) -> Tuple[str, str]: + """ "Downloads a file from kaggle + + Args: + q: Q + command: kaggle api command + kaggle_access_key: kaggle access key + kaggle_secret_key: kaggle secret key + + Returns: + Download location path + """ + + kaggle_path = f"{get_data_dir(q)}/tmp" + kaggle_path = get_valid_temp_data_folder(q, kaggle_path) + + if os.path.exists(kaggle_path): + shutil.rmtree(kaggle_path) + os.makedirs(kaggle_path, exist_ok=True) + + command_run = [] + if kaggle_access_key != "": + command_run += ["env", f"KAGGLE_USERNAME={kaggle_access_key}"] + if kaggle_secret_key != "": + command_run += ["env", f"KAGGLE_KEY={kaggle_secret_key}"] + command_run += command.split(" ") + ["-p", kaggle_path] + subprocess.run(command_run) + + try: + zip_file = f"{kaggle_path}/{command.split(' ')[-1].split('/')[-1]}.zip" + with zipfile.ZipFile(zip_file, "r") as zip_ref: + zip_ref.extractall(kaggle_path) + os.remove(zip_file) + except Exception: + pass + + clean_macos_artifacts(kaggle_path) + + for f in glob.glob(kaggle_path + "/*"): + if ".zip" in f and zip_file not in f: + with zipfile.ZipFile(f, "r") as zip_ref: + zip_ref.extractall(kaggle_path) + + clean_macos_artifacts(kaggle_path) + + return kaggle_path, "".join(command.split(" ")[-1].split("/")[-1]) + + +def clean_error(error: str): + """Cleans some error messages + + Args: + error: original error message + + Returns: + Cleaned error message + + """ + + if "UNIQUE constraint failed: datasets.name" in error: + error = "Dataset name already exists, please choose a different one." + elif "No such file or directory" in error: + error = "Import failed." + + return error + + +def remove_model_type(problem_type: str) -> str: + """Removes model type from problem type + + Args: + problem_type: problem type + + Returns: + Cleaned raw problem type + + """ + if "_config_" in problem_type: + problem_type = problem_type.split("_config_")[0] + "_config" + return problem_type + + +def add_model_type(problem_type: str, model_type: str) -> str: + """Adds model type to problem type + + Args: + problem_type: problem type + model_type: model type + + Returns: + problem type including model type + + """ + problem_type = remove_model_type(problem_type) + if model_type != "": + problem_type = f"{problem_type}_{model_type}" + return problem_type + + +def get_problem_categories() -> List[Tuple[str, str]]: + """Returns all available problem category choices + + Returns: + List of tuples, each containing the raw problem category name + and the problem category name as label. + """ + + problem_categories: List[Tuple[str, str]] = [] + for c in default_cfg.problem_categories: + cc = (c, make_label(c)) + problem_categories.append(cc) + return problem_categories + + +def get_problem_types(category: Optional[str] = None) -> List[Tuple[str, str]]: + """Returns all problem type choices + + Args: + category: optional category to filter for + + Returns: + List of tuples, each containing the raw problem type name + and the problem type name as label. + """ + problem_types: List[Tuple[str, str]] = [] + for c in default_cfg.problem_types: + if category is not None and not c.startswith(category): + continue + cc = (c, make_label("_".join(c.split("_")[1:]).replace("_config", ""))) + problem_types.append(cc) + + return problem_types + + +def get_model_types(problem_type: str) -> List[Tuple[str, str]]: + """Returns all model types for a given problem type + + Args: + problem_type: problem type name + + Returns: + List of model types and their labels + """ + + model_types = [] + for c in sorted(os.listdir("llm_studio/python_configs")): + if "_config_" not in c: + continue + if problem_type in c: + c = c.replace(".py", "").split("_config_")[1] + model_types.append((c, make_label(c[1:]))) + + return model_types + + +def get_dataset( + k: str, + v: Any, + q: Q, + limit: Optional[List[str]] = None, + pre: str = "experiment/start", +) -> Tuple[List[str], Any]: + """ + Get the dataset and the preliminary default value for a setting. + The default value may still be overridden by the `possible_values.DatasetValue` + instances if it is not a valid choice. + + Args: + k: key for the setting + v: value for the setting + q: Q + limit: list of keys to limit + pre: prefix for client key + + Returns: + List of possible values, the preliminary default value. + """ + + if q.client[f"{pre}/dataset"] is None: + dataset_id = 1 + else: + dataset_id = int(q.client[f"{pre}/dataset"]) + + dataset = q.client.app_db.get_dataset(dataset_id) + + if dataset is None: + return None, "" + + dataset = dataset.__dict__ + + dataset_cfg = load_config_yaml(dataset["config_file"]).dataset.__dict__ + + for kk, vv in dataset_cfg.items(): + dataset[kk] = vv + + dataset["dataframe"] = q.client[f"{pre}/cfg/dataframe"] + + if q.client[f"{pre}/cfg_mode/from_dataset"] and (limit is None or k in limit): + v = dataset[k] if k in dataset else v + + if limit is not None and k not in limit: + return None, v + + # we need to not reset dataset settings when changing expert mode + if q.client[f"{pre}/cfg_mode/from_dataset_args"]: + v = q.client[f"{pre}/cfg/{k}"] + + return dataset, v + + +def get_ui_element( + k: str, + v: Any, + poss_values: Any, + type_annotation: Type, + tooltip: str, + password: bool, + trigger: bool, + q: Q, + pre: str = "", +) -> Any: + """Returns a single ui element for a given config entry + + Args: + k: key + v: value + poss_values: possible values + type_annotation: type annotation + tooltip: tooltip + password: flag for whether it is a password + trigger: flag for triggering the element + q: Q + pre: optional prefix for ui key + get_default: flag for whether to get the default values + + Returns: + Ui element + + """ + assert type_annotation in KNOWN_TYPE_ANNOTATIONS + + # Overwrite current values with values from yaml + if pre == "experiment/start/cfg/": + if q.args["experiment/upload_yaml"] and "experiment/yaml_data" in q.client: + if (k in q.client["experiment/yaml_data"].keys()) and ( + k != "experiment_name" + ): + q.client[pre + k] = q.client["experiment/yaml_data"][k] + + if type_annotation in (int, float): + if not isinstance(poss_values, possible_values.Number): + raise ValueError( + "Type annotations `int` and `float` need a `possible_values.Number`!" + ) + + val = q.client[pre + k] if q.client[pre + k] is not None else v + + min_val = ( + type_annotation(poss_values.min) if poss_values.min is not None else None + ) + max_val = ( + type_annotation(poss_values.max) if poss_values.max is not None else None + ) + + # Overwrite default maximum values with user_settings + if f"set_max_{k}" in q.client: + max_val = q.client[f"set_max_{k}"] + + if isinstance(poss_values.step, (float, int)): + step_val = type_annotation(poss_values.step) + elif poss_values.step == "decad" and val < 1: + step_val = 10 ** -len(str(int(1 / val))) + else: + step_val = 1 + + if min_val is None or max_val is None: + t = [ + # TODO: spinbox `trigger` https://github.com/h2oai/wave/pull/598 + ui.spinbox( + name=pre + k, + label=make_label(k), + value=val, + # TODO: open issue in wave to make spinbox optionally unbounded + max=max_val if max_val is not None else 1e12, + min=min_val if min_val is not None else -1e12, + step=step_val, + tooltip=tooltip, + ) + ] + else: + t = [ + ui.slider( + name=pre + k, + label=make_label(k), + value=val, + min=min_val, + max=max_val, + step=step_val, + tooltip=tooltip, + trigger=trigger, + ) + ] + elif type_annotation == bool: + val = q.client[pre + k] if q.client[pre + k] is not None else v + + t = [ + ui.toggle( + name=pre + k, + label=make_label(k), + value=val, + tooltip=tooltip, + trigger=trigger, + ) + ] + elif type_annotation in (str, Tuple[str, ...]): + if poss_values is None: + val = q.client[pre + k] if q.client[pre + k] is not None else v + + title_label = make_label(k) + + t = [ + ui.textbox( + name=pre + k, + label=title_label, + value=val, + required=False, + password=password, + tooltip=tooltip, + trigger=trigger, + multiline=False, + ) + ] + else: + if isinstance(poss_values, possible_values.String): + options = poss_values.values + allow_custom = poss_values.allow_custom + placeholder = poss_values.placeholder + else: + options = poss_values + allow_custom = False + placeholder = None + + is_tuple = type_annotation == Tuple[str, ...] + + if is_tuple and allow_custom: + raise TypeError( + "Multi-select (`Tuple[str, ...]` type annotation) and" + " `allow_custom=True` is not supported at the same time." + ) + + v = q.client[pre + k] if q.client[pre + k] is not None else v + if isinstance(v, str): + v = [v] + + # `v` might be a tuple of strings here but Wave only accepts lists + v = list(v) + + if allow_custom: + if not all(isinstance(option, str) for option in options): + raise ValueError( + "Combobox cannot handle (value, name) pairs for options." + ) + + t = [ + ui.combobox( + name=pre + k, + label=make_label(k), + value=v[0], + choices=( + list(options) + v if v not in options else list(options) + ), + tooltip=tooltip, + ) + ] + else: + choices = [ + ( + ui.choice(option, option) + if isinstance(option, str) + else ui.choice(option[0], option[1]) + ) + for option in options + ] + + t = [ + ui.dropdown( + name=pre + k, + label=make_label(k), + value=None if is_tuple else v[0], + values=v if is_tuple else None, + required=False, + choices=choices, + tooltip=tooltip, + placeholder=placeholder, + trigger=trigger, + ) + ] + + return t + + +def get_dataset_elements(cfg: Any, q: Q) -> List: + """For a given configuration setting return the according dataset ui components. + + Args: + cfg: configuration settings + q: Q + + Returns: + List of ui elements + """ + + cfg_dict = cfg.__dict__ + type_annotations = cfg.get_annotations() + + cfg_dict = {key: cfg_dict[key] for key in cfg._get_order()} + + items = [] + for k, v in cfg_dict.items(): + # Show some fields only during dataset import + if k.startswith("_") or cfg._get_visibility(k) == -1: + continue + + if not ( + check_dependencies( + cfg=cfg, pre="dataset/import", k=k, q=q, dataset_import=True + ) + ): + continue + tooltip = cfg._get_tooltips(k) + + trigger = False + if k in default_cfg.dataset_trigger_keys or k == "data_format": + trigger = True + + if type_annotations[k] in KNOWN_TYPE_ANNOTATIONS: + if k in default_cfg.dataset_keys: + dataset = cfg_dict.copy() + dataset["path"] = q.client["dataset/import/path"] + + for kk, vv in q.client["dataset/import/cfg"].__dict__.items(): + dataset[kk] = vv + + for trigger_key in default_cfg.dataset_trigger_keys: + if q.client[f"dataset/import/cfg/{trigger_key}"] is not None: + dataset[trigger_key] = q.client[ + f"dataset/import/cfg/{trigger_key}" + ] + if ( + q.client["dataset/import/cfg/data_format"] is not None + and k == "data_format" + ): + v = q.client["dataset/import/cfg/data_format"] + + dataset["dataframe"] = q.client["dataset/import/cfg/dataframe"] + + type_annotation = type_annotations[k] + poss_values, v = cfg._get_possible_values( + field=k, + value=v, + type_annotation=type_annotation, + mode="train", + dataset_fn=lambda k, v: ( + dataset, + dataset[k] if k in dataset else v, + ), + ) + + if k == "train_dataframe" and v != "None": + q.client["dataset/import/cfg/dataframe"] = read_dataframe(v) + + q.client[f"dataset/import/cfg/{k}"] = v + + t = get_ui_element( + k, + v, + poss_values, + type_annotation, + tooltip=tooltip, + password=False, + trigger=trigger, + q=q, + pre="dataset/import/cfg/", + ) + else: + t = [] + elif dataclasses.is_dataclass(v): + elements_group = get_dataset_elements(cfg=v, q=q) + t = elements_group + else: + raise _get_type_annotation_error(v, type_annotations[k]) + + items += t + + return items + + +def check_dependencies(cfg: Any, pre: str, k: str, q: Q, dataset_import: bool = False): + """Checks all dependencies for a given key + + Args: + cfg: configuration settings + pre: prefix for client keys + k: key to be checked + q: Q + dataset_import: flag whether dependencies are checked in dataset import + + Returns: + True if dependencies are met + """ + + dependencies = cfg._get_nesting_dependencies(k) + + if dependencies is None: + dependencies = [] + # Do not respect some nesting during the dataset import + if dataset_import: + dependencies = [x for x in dependencies if x.key not in ["validation_strategy"]] + # Do not respect some nesting during the create experiment + else: + dependencies = [x for x in dependencies if x.key not in ["data_format"]] + + if len(dependencies) > 0: + all_deps = 0 + for d in dependencies: + if isinstance(q.client[f"{pre}/cfg/{d.key}"], (list, tuple)): + dependency_values = q.client[f"{pre}/cfg/{d.key}"] + else: + dependency_values = [q.client[f"{pre}/cfg/{d.key}"]] + + all_deps += d.check(dependency_values) + return all_deps == len(dependencies) + + return True + + +def is_visible(k: str, cfg: Any, q: Q) -> bool: + """Returns a flag whether a given key should be visible on UI. + + Args: + k: name of the hyperparameter + cfg: configuration settings, + q: Q + Returns: + List of ui elements + """ + + visibility = 1 + + if visibility < cfg._get_visibility(k): + return False + + return True + + +def get_ui_elements( + cfg: Any, + q: Q, + limit: Optional[List[str]] = None, + pre: str = "experiment/start", +) -> List: + """For a given configuration setting return the according ui components. + + Args: + cfg: configuration settings + q: Q + limit: optional list of keys to limit + pre: prefix for client keys + parent_cfg: parent config class. + + Returns: + List of ui elements + """ + items = [] + + cfg_dict = cfg.__dict__ + type_annotations = cfg.get_annotations() + + cfg_dict = {key: cfg_dict[key] for key in cfg._get_order()} + + for k, v in cfg_dict.items(): + if "api" in k: + password = True + else: + password = False + + if k.startswith("_") or cfg._get_visibility(k) < 0: + if q.client[f"{pre}/cfg_mode/from_cfg"]: + q.client[f"{pre}/cfg/{k}"] = v + continue + else: + type_annotation = type_annotations[k] + poss_values, v = cfg._get_possible_values( + field=k, + value=v, + type_annotation=type_annotation, + mode=q.client[f"{pre}/cfg_mode/mode"], + dataset_fn=partial(get_dataset, q=q, limit=limit, pre=pre), + ) + + if k in default_cfg.dataset_keys: + # reading dataframe + if k == "train_dataframe" and (v != ""): + q.client[f"{pre}/cfg/dataframe"] = read_dataframe(v, meta_only=True) + q.client[f"{pre}/cfg/{k}"] = v + elif k in default_cfg.dataset_extra_keys: + _, v = get_dataset(k, v, q=q, limit=limit, pre=pre) + q.client[f"{pre}/cfg/{k}"] = v + elif q.client[f"{pre}/cfg_mode/from_cfg"]: + q.client[f"{pre}/cfg/{k}"] = v + # Overwrite current default values with user_settings + if q.client[f"{pre}/cfg_mode/from_default"] and f"default_{k}" in q.client: + q.client[f"{pre}/cfg/{k}"] = q.client[f"default_{k}"] + + if not (check_dependencies(cfg=cfg, pre=pre, k=k, q=q)): + continue + + if not is_visible(k=k, cfg=cfg, q=q): + if type_annotation not in KNOWN_TYPE_ANNOTATIONS: + _ = get_ui_elements(cfg=v, q=q, limit=limit, pre=pre) + elif q.client[f"{pre}/cfg_mode/from_cfg"]: + q.client[f"{pre}/cfg/{k}"] = v + + continue + + tooltip = cfg._get_tooltips(k) + + trigger = False + q.client[f"{pre}/trigger_ks"] = ["train_dataframe"] + q.client[f"{pre}/trigger_ks"] += cfg._get_nesting_triggers() + if k in q.client[f"{pre}/trigger_ks"]: + trigger = True + + if type_annotation in KNOWN_TYPE_ANNOTATIONS: + if limit is not None and k not in limit: + continue + + t = get_ui_element( + k=k, + v=v, + poss_values=poss_values, + type_annotation=type_annotation, + tooltip=tooltip, + password=password, + trigger=trigger, + q=q, + pre=f"{pre}/cfg/", + ) + elif dataclasses.is_dataclass(v): + if limit is not None and k in limit: + elements_group = get_ui_elements(cfg=v, q=q, limit=None, pre=pre) + else: + elements_group = get_ui_elements(cfg=v, q=q, limit=limit, pre=pre) + + if k == "dataset" and pre != "experiment/start": + # get all the datasets available + df_datasets = q.client.app_db.get_datasets_df() + if not q.client[f"{pre}/dataset"]: + if len(df_datasets) >= 1: + q.client[f"{pre}/dataset"] = str(df_datasets["id"].iloc[-1]) + else: + q.client[f"{pre}/dataset"] = "1" + + elements_group = [ + ui.dropdown( + name=f"{pre}/dataset", + label="Dataset", + required=True, + value=q.client[f"{pre}/dataset"], + choices=[ + ui.choice(str(row["id"]), str(row["name"])) + for _, row in df_datasets.iterrows() + ], + trigger=True, + tooltip=tooltip, + ) + ] + elements_group + + if len(elements_group) > 0: + t = [ + ui.separator( + name=k + "_expander", label=make_label(k, appendix=" settings") + ) + ] + else: + t = [] + + t += elements_group + else: + raise _get_type_annotation_error(v, type_annotations[k]) + + items += t + + q.client[f"{pre}/prev_dataset"] = q.client[f"{pre}/dataset"] + + return items + + +def parse_ui_elements( + cfg: Any, q: Q, limit: Union[List, str] = "", pre: str = "" +) -> Any: + """Sets configuration settings with arguments from app + + Args: + cfg: configuration + q: Q + limit: optional list of keys to limit + pre: prefix for keys + + Returns: + Configuration with settings overwritten from arguments + """ + + cfg_dict = cfg.__dict__ + type_annotations = cfg.get_annotations() + for k, v in cfg_dict.items(): + if k.startswith("_") or cfg._get_visibility(k) == -1: + continue + + if ( + len(limit) > 0 + and k not in limit + and type_annotations[k] in KNOWN_TYPE_ANNOTATIONS + ): + continue + + elif type_annotations[k] in KNOWN_TYPE_ANNOTATIONS: + value = q.client[f"{pre}{k}"] + + if type_annotations[k] == Tuple[str, ...]: + if isinstance(value, str): + value = [value] + value = tuple(value) + if isinstance(type_annotations[k], str) and isinstance(value, list): + # fix for combobox outputting custom values as list in wave 0.22 + value = value[0] + setattr(cfg, k, value) + elif dataclasses.is_dataclass(v): + setattr(cfg, k, parse_ui_elements(cfg=v, q=q, limit=limit, pre=pre)) + else: + raise _get_type_annotation_error(v, type_annotations[k]) + + return cfg + + +def get_experiment_status(path: str) -> Tuple[str, str]: + """Get status information from experiment. + + Args: + path: path to experiment folder + Returns: + Tuple of experiment status and experiment info + """ + + try: + flag_json_path = f"{path}/flags.json" + if not os.path.exists(flag_json_path): + logger.debug(f"File {flag_json_path} does not exist yet.") + return "none", "none" + with open(flag_json_path) as file: + flags = json.load(file) + status = flags.get("status", "none") + info = flags.get("info", "none") + + # Collect failed statuses from all GPUs + single_gpu_failures = [] + for flag_json_path in glob.glob(f"{path}/flags?*.json"): + if os.path.exists(flag_json_path): + with open(flag_json_path) as file: + flags = json.load(file) + status = flags.get("status", "none") + info = flags.get("info", "none") + + if status == "failed": + single_gpu_failures.append(info) + # Get the most detailed failure info + if len(single_gpu_failures) > 0: + detailed_gpu_failures = [x for x in single_gpu_failures if x != "See logs"] + if len(detailed_gpu_failures) > 0: + return "failed", detailed_gpu_failures[0] + else: + return "failed", single_gpu_failures[0] + return status, info + + except Exception: + logger.debug("Could not get experiment status:", exc_info=True) + return "none", "none" + + +def get_experiments_status(df: DataFrame) -> Tuple[List[str], List[str]]: + """For each experiment in given dataframe, return the status of the process + + Args: + df: experiment dataframe + + Returns: + A list with each status and a list with all infos + """ + + status_all = [] + info_all = [] + for idx, row in df.iterrows(): + status, info = get_experiment_status(row.path) + + if info == "none": + info = "" + info_all.append(info) + + pid = row.process_id + + zombie = False + try: + p = psutil.Process(pid) + zombie = p.status() == "zombie" + except psutil.NoSuchProcess: + pass + if not psutil.pid_exists(pid) or zombie: + running = False + else: + running = True + + if running: + if status == "none": + status_all.append("queued") + elif status == "running": + status_all.append("running") + elif status == "queued": + status_all.append("queued") + elif status == "finished": + status_all.append("finished") + elif status == "stopped": + status_all.append("stopped") + elif status == "failed": + status_all.append("failed") + else: + status_all.append("finished") + else: + if status == "none": + status_all.append("failed") + elif status == "queued": + status_all.append("failed") + elif status == "running": + status_all.append("failed") + elif status == "finished": + status_all.append("finished") + elif status == "stopped": + status_all.append("stopped") + elif status == "failed": + status_all.append("failed") + else: + status_all.append("failed") + + return status_all, info_all + + +def get_experiments_info(df: DataFrame, q: Q) -> DefaultDict: + """For each experiment in given dataframe, return certain configuration settings + + Args: + df: experiment dataframe + q: Q + + Returns: + A dictionary of lists of additional information + """ + + info = defaultdict(list) + for _, row in df.iterrows(): + try: + # load_config_yaml issues a warning if the yaml file contains keys + # that are no longer part of the dataclass fields. + # This can happen if the codebase has changed since the experiment was run. + # Ignore those warnings here + logging_level = logging.getLogger().level + logging.getLogger().setLevel(logging.ERROR) + cfg = load_config_yaml(f"{row.path}/cfg.yaml").__dict__ + logging.getLogger().setLevel(logging_level) + except Exception: + cfg = None + + metric = "" + loss_function = "" + + if cfg is not None: + try: + metric = cfg["prediction"].metric + loss_function = cfg["training"].loss_function + except KeyError: + metric = "" + loss_function = "" + + with SqliteDict(f"{row.path}/charts.db") as logs: + if "internal" in logs.keys(): + if "current_step" in logs["internal"].keys(): + curr_step = int(logs["internal"]["current_step"]["values"][-1]) + else: + curr_step = 0 + + if "total_training_steps" in logs["internal"].keys(): + total_training_steps = int( + logs["internal"]["total_training_steps"]["values"][-1] + ) + else: + total_training_steps = 0 + + if "current_val_step" in logs["internal"].keys(): + curr_val_step = int( + logs["internal"]["current_val_step"]["values"][-1] + ) + else: + curr_val_step = 0 + + if "total_validation_steps" in logs["internal"].keys(): + total_validation_steps = int( + logs["internal"]["total_validation_steps"]["values"][-1] + ) + else: + total_validation_steps = 0 + + curr_total_step = curr_step + curr_val_step + + total_steps = max(total_training_steps + total_validation_steps, 1) + + if ( + "global_start_time" in logs["internal"].keys() + and curr_total_step > 0 + ): + elapsed = ( + time.time() + - logs["internal"]["global_start_time"]["values"][-1] + ) + remaining_steps = total_steps - curr_total_step + eta = elapsed * (remaining_steps / curr_total_step) + if eta == 0: + eta = "" + else: + # if more than one day, show days + # need to subtract 1 day from time_took since strftime shows + # day of year which starts counting at 1 + if eta > 86400: + eta = time.strftime( + "%-jd %H:%M:%S", time.gmtime(float(eta - 86400)) + ) + else: + eta = time.strftime("%H:%M:%S", time.gmtime(float(eta))) + else: + eta = "N/A" + else: + eta = "N/A" + total_steps = 1 + curr_total_step = 0 + + if ( + "validation" in logs + and metric in logs["validation"] + and logs["validation"][metric]["values"][-1] is not None + ): + score_val = np.round(logs["validation"][metric]["values"][-1], 4) + else: + score_val = "" + + try: + dataset = q.client.app_db.get_dataset(row.dataset).name + except Exception: + dataset = "" + + config_file = make_config_label(row.config_file) + + info["config_file"].append(config_file) + info["dataset"].append(dataset) + info["loss"].append(loss_function) + info["metric"].append(metric) + info["eta"].append(eta) + info["val metric"].append(score_val) + info["progress"].append(f"{np.round(curr_total_step / total_steps, 2)}") + + del cfg + + return info + + +def make_config_label(config_file: str) -> str: + """Makes a label from a config file name + + Args: + config_file: config file name + + Returns: + Label + """ + + config_file = config_file.replace(".yaml", "") + if "_config_" in config_file: + config_file_split = config_file.split("_config_") + config_file = ( + f"{make_label(config_file_split[0])} " + f"({make_label(config_file_split[1][1:])})" + ) + else: + config_file = make_label(config_file.replace("_config", "")) + + return config_file + + +def get_datasets_info(df: DataFrame, q: Q) -> Tuple[DataFrame, DefaultDict]: + """For each dataset in given dataframe, return certain configuration settings + + Args: + df: dataset dataframe + q: Q + + Returns: + A dictionary of lists of additional information + """ + + info = defaultdict(list) + for idx, row in df.iterrows(): + config_file = q.client.app_db.get_dataset(row.id).config_file + path = row.path + "/" + + try: + logging_level = logging.getLogger().level + logging.getLogger().setLevel(logging.ERROR) + cfg = load_config_yaml(config_file) + logging.getLogger().setLevel(logging_level) + except Exception as e: + logger.warning(f"Could not load configuration from {config_file}. {e}") + cfg = None + + if cfg is not None: + cfg_dataset = cfg.dataset.__dict__ + + config_file = make_config_label(row.config_file.replace(path, "")) + + info["problem type"].append(config_file) + info["train dataframe"].append( + cfg_dataset["train_dataframe"].replace(path, "") + ) + info["validation dataframe"].append( + cfg_dataset["validation_dataframe"].replace(path, "") + ) + + info["labels"].append(cfg.dataset.answer_column) + + del cfg, cfg_dataset + else: + df = df.drop(idx) + + return df, info + + +def get_experiments( + q: Q, + status: Union[Optional[str], Optional[List[str]]] = None, + mode: Optional[str] = None, +) -> pd.DataFrame: + """Return all experiments given certain restrictions + + Args: + q: Q + status: option to filter for certain experiment status + mode: option to filter for certain experiment mode + Returns: + experiment df + """ + + df = q.client.app_db.get_experiments_df() + + info = get_experiments_info(df, q) + for k, v in info.items(): + df[k] = v + + df["status"], df["info"] = get_experiments_status(df) + + if status is not None: + if type(status) is str: + status = [status] + df = df[df["status"].isin(status)] + + if mode is not None: + df = df[df["mode"] == mode] + + if len(df) > 0: + # make sure progress is 100% for finished experiments + df.loc[df.status == "finished", "progress"] = "1.0" + + df["info"] = np.where( + (df["status"] == "running") & (df["eta"] != ""), + df["eta"].apply(lambda x: f"ETA: {x}"), + df["info"], + ) + + return df + + +def get_datasets( + q: Q, + show_experiment_datasets: bool = True, +) -> pd.DataFrame: + """Return all datasets given certain restrictions + + Args: + q: Q + show_experiment_datasets: whether to also show datasets linked to experiments + + Returns: + dataset df + """ + + df = q.client.app_db.get_datasets_df() + + df, info = get_datasets_info(df, q) + for k, v in info.items(): + df[k] = v + + for type in ["train", "validation"]: + col_name = f"{type}_rows" + if col_name not in df: + continue + rows = df[col_name].astype(float).map("{:.0f}".format) + del df[col_name] + rows[rows == "nan"] = "None" + + if f"{type} dataframe" in df.columns: + idx = df.columns.get_loc(f"{type} dataframe") + 1 + df.insert(idx, f"{type} rows", rows) + + if not show_experiment_datasets: + experiment_datasets = get_experiments(q).dataset.unique() + df = df.loc[~df["name"].isin(experiment_datasets)] + + return df + + +def start_experiment(cfg: Any, q: Q, pre: str, gpu_list: Optional[List] = None) -> None: + """Starts an experiment + + Args: + cfg: configuration settings + q: Q + pre: prefix for client keys + gpu_list: list of GPUs available + """ + if gpu_list is None: + gpu_list = cfg.environment.gpus + + # Get queue of the processes to wait for + running_experiments = get_experiments(q=q) + running_experiments = running_experiments[ + running_experiments.status.isin(["queued", "running"]) + ] + all_process_queue = [] + for _, row in running_experiments.iterrows(): + for gpu_id in row["gpu_list"].split(","): + if gpu_id in gpu_list: + all_process_queue.append(row["process_id"]) + + process_queue = list(set(all_process_queue)) + + env_vars = { + "NEPTUNE_API_TOKEN": q.client["default_neptune_api_token"], + "OPENAI_API_KEY": q.client["default_openai_api_token"], + "GPT_EVAL_MAX": str(q.client["default_gpt_eval_max"]), + } + if q.client["default_openai_azure"]: + env_vars.update( + { + "OPENAI_API_TYPE": "azure", + "OPENAI_API_BASE": q.client["default_openai_api_base"], + "OPENAI_API_VERSION": q.client["default_openai_api_version"], + "OPENAI_API_DEPLOYMENT_ID": q.client[ + "default_openai_api_deployment_id" + ], + } + ) + if q.client["default_huggingface_api_token"]: + env_vars.update( + {"HUGGINGFACE_TOKEN": q.client["default_huggingface_api_token"]} + ) + + env_vars = {k: v or "" for k, v in env_vars.items()} + + cfg = copy_config(cfg, q) + cfg.output_directory = f"{get_output_dir(q)}/{cfg.experiment_name}/" + os.makedirs(cfg.output_directory) + save_config_yaml(f"{cfg.output_directory}/cfg.yaml", cfg) + + # Start the training process + p = start_process( + cfg=cfg, gpu_list=gpu_list, process_queue=process_queue, env_vars=env_vars + ) + + logger.info(f"Process: {p.pid}, Queue: {process_queue}, GPUs: {gpu_list}") + + experiment = Experiment( + name=cfg.experiment_name, + mode="train", + dataset=q.client[f"{pre}/dataset"], + config_file=q.client[f"{pre}/cfg_file"], + path=cfg.output_directory, + seed=cfg.environment.seed, + process_id=p.pid, + gpu_list=",".join(gpu_list), + ) + + q.client.app_db.add_experiment(experiment) + + +def get_frame_stats(frame): + non_numeric_cols = frame.select_dtypes(object).columns + is_str_cols = [ + x + for x in non_numeric_cols + if frame[x].dropna().size and (frame[x].dropna().apply(type) == str).all() + ] + cols_to_drop = [x for x in non_numeric_cols if x not in is_str_cols] + + if len(cols_to_drop): # drop array/list/non-str object columns + frame = frame.drop(columns=cols_to_drop) + non_numeric_cols = frame.select_dtypes(object).columns + + if len(frame.columns) == 0: + return None + + numeric_cols = [col for col in frame if col not in non_numeric_cols] + + if len(non_numeric_cols) == 0 or len(numeric_cols) == 0: + stats = frame.describe() + if len(numeric_cols): + stats = stats.round(decimals=3) + stats.loc["unique"] = frame.nunique() # unique is part of describe for str + + else: + stats1 = frame[non_numeric_cols].describe() + stats2 = frame[numeric_cols].describe().round(decimals=3) + + stats2.loc["unique"] = frame[numeric_cols].nunique() + stats = ( + stats1.reset_index() + .merge(stats2.reset_index(), how="outer", on="index") + .fillna("") + ).set_index("index") + + stats = stats.T.reset_index().rename(columns={"index": "column"}) + + for col in ["count", "unique"]: + if col in stats: + stats[col] = stats[col].astype(int) + + return stats + + +def dir_file_table(current_path: str) -> pd.DataFrame: + results = [".."] + try: + if os.path.isdir(current_path): + files = os.listdir(current_path) + files = sorted([f for f in files if not f.startswith(".")], key=str.lower) + results.extend(files) + except Exception: + logger.error(f"Error while listing folder '{current_path}':", exc_info=True) + + return pd.DataFrame({current_path: results}) + + +def get_download_link(q, artifact_path): + new_path = os.path.relpath(artifact_path, get_output_dir(q)) + new_path = os.path.join(get_download_dir(q), new_path) + url_path = os.path.relpath(new_path, get_output_dir(q)) + + if not os.path.exists(new_path): + os.makedirs(os.path.dirname(new_path), exist_ok=True) + os.symlink(os.path.abspath(artifact_path), os.path.abspath(new_path)) + + # return a relative path so that downloads work when the instance is + # behind a reverse proxy or being accessed by a public IP in a public + # cloud. + + return url_path + + +def check_valid_upload_content(upload_path: str) -> Tuple[bool, str]: + if upload_path.endswith("zip"): + valid = zipfile.is_zipfile(upload_path) + error = "" if valid else "File is not a zip file" + else: + valid = is_valid_data_frame(upload_path) + error = "" if valid else "File does not have valid format" + + if not valid: + os.remove(upload_path) + + return valid, error + + +def flatten_dict(d: collections.abc.MutableMapping) -> dict: + """ + Adapted from https://stackoverflow.com/a/6027615 + Does not work with nesting and mutiple keys with the same name! + + Args: + d: dict style object + Return: + A flattened dict + """ + + items: List[Tuple[Any, Any]] = [] + for k, v in d.items(): + if isinstance(v, collections.abc.MutableMapping): + items.extend(flatten_dict(v).items()) + else: + items.append((k, v)) + return dict(items) + + +def get_unique_name(expected_name, existing_names, is_invalid_function=None): + """ + Return a new name that does not exist in list of existing names + + Args: + expected_name: preferred name + existing_names: list of existing names + is_invalid_function: optional callable, to determine if the new name is + invalid + Return: + new name + """ + + new_name = expected_name + cnt = 1 + + while new_name in existing_names or ( + is_invalid_function is not None and is_invalid_function(new_name) + ): + new_name = f"{expected_name}.{cnt}" + cnt += 1 + + return new_name + + +def get_unique_dataset_name(q, dataset_name, include_all_folders=True): + """ + Return a dataset name that does not exist yet + + Args: + q: Q + dataset_name: preferred dataset name + include_all_folders: whether to also consider all (temp) dataset folders + Return: + new dataset_name + """ + datasets_df = q.client.app_db.get_datasets_df() + + existing_names = datasets_df["name"].values.tolist() + if include_all_folders: + existing_names.extend(os.listdir(get_data_dir(q))) + + return get_unique_name(dataset_name, existing_names) + + +def get_valid_temp_data_folder(q: Q, folder_path: str) -> str: + """ + Return new temporary data folder path not associated with any existing dataset + + Args: + q: Q + folder_path: original folder_path + Return: + new folder path not associated with any existing dataset + """ + dirname = os.path.dirname(folder_path) + basename = os.path.basename(folder_path) + unique_name = get_unique_dataset_name(q, basename, include_all_folders=False) + return os.path.join(dirname, unique_name) + + +def remove_temp_files(q: Q): + """ + Remove any temp folders leftover from dataset import + """ + + datasets_df = q.client.app_db.get_datasets_df() + all_files = glob.glob(os.path.join(get_data_dir(q), "*")) + for file in all_files: + if not any([path in file for path in datasets_df["path"].values]): + if os.path.isdir(file): + shutil.rmtree(file) + else: + os.remove(file) + + +def get_gpu_usage(): + usage = 0.0 + all_gpus = GPUtil.getGPUs() + for gpu in all_gpus: + usage += gpu.load + + usage /= len(all_gpus) + return usage * 100 + + +def get_single_gpu_usage(sig_figs=1, highlight=None): + all_gpus = GPUtil.getGPUs() + items = [] + for i, gpu in enumerate(all_gpus): + gpu_load = f"{round(gpu.load * 100, sig_figs)}%" + memory_used = get_size_str( + gpu.memoryUsed, sig_figs=1, input_unit="MB", output_unit="GB" + ) + memory_total = get_size_str( + gpu.memoryTotal, sig_figs=1, input_unit="MB", output_unit="GB" + ) + + if highlight is not None: + gpu_load = f"**{gpu_load}**" + memory_used = f"**{memory_used}**" + memory_total = f"**{memory_total}**" + + items.append( + ui.text( + f"GPU #{i + 1} - current utilization: {gpu_load} - " + f"VRAM usage: {memory_used} / {memory_total} - {gpu.name}" + ) + ) + return items + + +def copy_config(cfg: Any, q: Q) -> Any: + """Makes a copy of the config + + Args: + cfg: config object + Returns: + copy of the config + """ + # make unique yaml file using uuid + os.makedirs(get_output_dir(q), exist_ok=True) + tmp_file = os.path.join(f"{get_output_dir(q)}/", str(uuid.uuid4()) + ".yaml") + save_config_yaml(tmp_file, cfg) + cfg = load_config_yaml(tmp_file) + os.remove(tmp_file) + return cfg + + +def make_label(title: str, appendix: str = "") -> str: + """Cleans a label + + Args: + title: title to clean + appendix: optional appendix + + Returns: + Cleaned label + + """ + label = " ".join(w.capitalize() for w in title.split("_")) + appendix + label = label.replace("Llm", "LLM") + return label + + +def get_cfg_list_items(cfg) -> List: + items = parse_cfg_dataclass(cfg) + x = [] + for item in items: + for k, v in item.items(): + x.append(ui.stat_list_item(label=make_label(k), value=str(v))) + return x + + +# https://stackoverflow.com/questions/2059482/temporarily-modify-the-current-processs-environment +@contextlib.contextmanager +def set_env(**environ): + """ + Temporarily set the process environment variables. + + >>> with set_env(PLUGINS_DIR='test/plugins'): + ... "PLUGINS_DIR" in os.environ + True + + >>> "PLUGINS_DIR" in os.environ + False + + :type environ: dict[str, unicode] + :param environ: Environment variables to set + """ + old_environ = dict(os.environ) + os.environ.update(environ) + try: + yield + finally: + os.environ.clear() + os.environ.update(old_environ) + + +def hf_repo_friendly_name(name: str) -> str: + """ + Converts the given string into a huggingface-repository-friendly name. + + • Repo id must use alphanumeric chars or '-', '_', and '.' allowed. + • '--' and '..' are forbidden + • '-' and '.' cannot start or end the name + • max length is 96 + """ + name = re.sub("[^0-9a-zA-Z]+", "-", name) + name = name[1:] if name.startswith("-") else name + name = name[:-1] if name.endswith("-") else name + name = name[:96] + return name + + +def save_hf_yaml( + path: str, account_name: str, model_name: str, repo_id: Optional[str] = None +): + with open(path, "w") as fp: + yaml.dump( + { + "account_name": account_name, + "model_name": model_name, + "repo_id": repo_id if repo_id else f"{account_name}/{model_name}", + }, + fp, + indent=4, + ) diff --git a/llm_studio/app_utils/wave_utils.py b/llm_studio/app_utils/wave_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e23d7e807f02d3f9be76a337cf79e5e2eed50917 --- /dev/null +++ b/llm_studio/app_utils/wave_utils.py @@ -0,0 +1,379 @@ +import subprocess +import sys +import traceback +from typing import TypedDict + +import pandas as pd +from h2o_wave import Q, expando_to_dict, ui +from h2o_wave.types import Component + +from llm_studio.app_utils.sections.common import clean_dashboard + +from .config import default_cfg + + +class ThemeColors(TypedDict): + light: dict + dark: dict + + +class WaveTheme: + _theme_colors: ThemeColors = { + "light": { + "primary": "#000000", + "background_color": "#ffffff", + }, + "dark": { + "primary": "#FEC925", + "background_color": "#121212", + }, + } + + states = { + "zombie": "#E0E0E0", + "queued": "#B8B8B8", + "running": "#FFE52B", + "finished": "#92E95A", + "failed": "#DA0000", + "stopped": "#DA0000", + } + color = "#2196F3" + color_range = "#2196F3 #CC7722 #2CA02C #D62728 #9467BD #17BECF #E377C2 #DDAA22" + + def __repr__(self) -> str: + return "WaveTheme" + + def get_value_by_key(self, q: Q, key: str): + value = ( + self._theme_colors["dark"][key] + if q.client.theme_dark + else self._theme_colors["light"][key] + ) + return value + + def get_primary_color(self, q: Q): + primary_color = self.get_value_by_key(q, "primary") + return primary_color + + def get_background_color(self, q: Q): + background_color = self.get_value_by_key(q, "background_color") + return background_color + + +wave_theme = WaveTheme() + + +def ui_table_from_df( + q: Q, + df: pd.DataFrame, + name: str, + sortables: list = None, + filterables: list = None, + searchables: list = None, + markdown_cells=None, + numerics: list = None, + times: list = None, + tags: list = None, + progresses: list = None, + min_widths: dict = None, + max_widths: dict = None, + link_col: str = None, + multiple: bool = False, + groupable: bool = False, + downloadable: bool = False, + resettable: bool = False, + height: str = None, + checkbox_visibility: str = None, + actions: dict = None, + max_char_length: int = 500, + cell_overflow="tooltip", +) -> Component: + """ + Convert a Pandas dataframe into Wave ui.table format. + """ + + df = df.reset_index(drop=True) + sortables = sortables or [] + filterables = filterables or [] + searchables = searchables or [] + numerics = numerics or [] + times = times or [] + tags = tags or [] + progresses = progresses or [] + markdown_cells = markdown_cells or [] + min_widths = min_widths or {} + max_widths = max_widths or {} + + if numerics == []: + numerics = df.select_dtypes(include=["float64", "float32"]).columns.tolist() + + cell_types = {} + for col in tags: + cell_types[col] = ui.tag_table_cell_type( + name="tags", + tags=[ + ui.tag(label=state, color=wave_theme.states[state]) + for state in wave_theme.states + ], + ) + for col in progresses: + cell_types[col] = ui.progress_table_cell_type( + wave_theme.get_primary_color(q), + ) + for col in markdown_cells: + # enables rendering of code in wave table + cell_types[col] = ui.markdown_table_cell_type() + + columns = [ + ui.table_column( + name=str(col), + label=str(col), + sortable=True if col in sortables else False, + filterable=True if col in filterables else False, + searchable=True if col in searchables else False, + data_type=( + "number" if col in numerics else ("time" if col in times else "string") + ), + cell_type=cell_types[col] if col in cell_types else None, + min_width=min_widths[col] if col in min_widths else None, + max_width=max_widths[col] if col in max_widths else None, + link=True if col == link_col else False, + cell_overflow=cell_overflow, + ) + for col in df.columns.values + ] + + if actions: + commands = [ui.command(name=key, label=val) for key, val in actions.items()] + action_column = ui.table_column( + name="actions", + label="action" if int(min_widths["actions"]) > 30 else "", + cell_type=ui.menu_table_cell_type(name="commands", commands=commands), + min_width=min_widths["actions"], + ) + columns.append(action_column) + + rows = [] + for i, row in df.iterrows(): + cells = [] + + for cell in row: + str_repr = str(cell) + + if len(str_repr) >= max_char_length: + str_repr = str_repr[:max_char_length] + "..." + + cells.append(str_repr) + + rows.append(ui.table_row(name=str(i), cells=cells)) + + table = ui.table( + name=name, + columns=columns, + rows=rows, + multiple=multiple, + groupable=groupable, + downloadable=downloadable, + resettable=resettable, + height=height, + checkbox_visibility=checkbox_visibility, + ) + + return table + + +def wave_utils_error_card( + q: Q, + box: str, + app_name: str, + github: str, + q_app: dict, + error: Exception, + q_user: dict, + q_client: dict, + q_events: dict, + q_args: dict, +) -> ui.FormCard: + """ + Card for handling crash. + """ + + q_app_str = ( + "### q.app\n```" + + "\n".join( + [ + f"{k}: {v}" + for k, v in q_app.items() + if "_key" not in k and "_token not in k" + ] + ) + + "\n```" + ) + q_user_str = ( + "### q.user\n```" + + "\n".join( + [ + f"{k}: {v}" + for k, v in q_user.items() + if "_key" not in k and "_token" not in k + ] + ) + + "\n```" + ) + + q_client_str = ( + "### q.client\n```" + + "\n".join( + [ + f"{k}: {v}" + for k, v in q_client.items() + if "_key" not in k and "_token" not in k + ] + ) + + "\n```" + ) + q_events_str = ( + "### q.events\n```" + + "\n".join( + [ + f"{k}: {v}" + for k, v in q_events.items() + if "_key" not in k and "_token" not in k + ] + ) + + "\n```" + ) + q_args_str = ( + "### q.args\n```" + + "\n".join( + [ + f"{k}: {v}" + for k, v in q_args.items() + if "_key" not in k and "_token" not in k + ] + ) + + "\n```" + ) + + type_, value_, traceback_ = sys.exc_info() + stack_trace = traceback.format_exception(type_, value_, traceback_) + git_version = subprocess.getoutput("git rev-parse HEAD") + if not q.app.wave_utils_stack_trace_str: + q.app.wave_utils_stack_trace_str = "### stacktrace\n" + "\n".join(stack_trace) + + card = ui.form_card( + box=box, + items=[ + ui.stats( + items=[ + ui.stat( + label="", + value="Oops!", + caption="Something went wrong", + icon="Error", + icon_color="#CDDD38", + ) + ], + justify="center", + ), + ui.separator(), + ui.text_l(content="
Apologies for the inconvenience!
"), + ui.buttons( + items=[ + ui.button(name="home", label="Restart", primary=True), + ui.button(name="report_error", label="Report", primary=True), + ], + justify="center", + ), + ui.separator(visible=False), + ui.text( + content=f"""
+ To report this error, + please open an issues on Github {github} + with the details below:
""", + visible=False, + ), + ui.text_l(content=f"Report Issue: {app_name}", visible=False), + ui.text_xs(content=q_app_str, visible=False), + ui.text_xs(content=q_user_str, visible=False), + ui.text_xs(content=q_client_str, visible=False), + ui.text_xs(content=q_events_str, visible=False), + ui.text_xs(content=q_args_str, visible=False), + ui.text_xs(content=q.app.wave_utils_stack_trace_str, visible=False), + ui.text_xs(content=f"### Error\n {error}", visible=False), + ui.text_xs(content=f"### Git Version\n {git_version}", visible=False), + ], + ) + + return card + + +async def wave_utils_handle_error(q: Q, error: Exception): + """ + Handle any app error. + """ + + await clean_dashboard(q, mode="error") + + card_name = "wave_utils_error" + + q.page[card_name] = wave_utils_error_card( + q, + box="content", + error=error, + app_name=f"{default_cfg.name} at {default_cfg.url}", + github=default_cfg.github, + q_app=expando_to_dict(q.app), + q_user=expando_to_dict(q.user), + q_client=expando_to_dict(q.client), + q_events=expando_to_dict(q.events), + q_args=expando_to_dict(q.args), + ) + q.client.delete_cards.add("wave_utils_error") + + await q.page.save() + + +async def report_error(q: Q): + """ + Report error details. + """ + card_name = "wave_utils_error" + # Show card again. Required since card can be cleared + await wave_utils_handle_error( + q, + error=q.app.wave_utils_error_str, + ) + + q.page[card_name].items[4].separator.visible = True + q.page[card_name].items[5].text.visible = True + q.page[card_name].items[6].text_l.visible = True + q.page[card_name].items[7].text_xs.visible = True + q.page[card_name].items[8].text_xs.visible = True + q.page[card_name].items[9].text_xs.visible = True + q.page[card_name].items[10].text_xs.visible = True + q.page[card_name].items[11].text_xs.visible = True + q.page[card_name].items[12].text_xs.visible = True + q.page[card_name].items[13].text_xs.visible = True + q.page[card_name].items[14].text_xs.visible = True + + await q.page.save() + + +async def busy_dialog( + q: Q, title: str = "", text: str = "", force_wait: bool = False +) -> None: + """Creates busy dialog""" + + q.page["meta"].dialog = ui.dialog( + title=title, + primary=True, + items=[ + ui.progress(label=text), + ], + blocking=True, + ) + await q.page.save() + if force_wait: + await q.sleep(1) + q.page["meta"].dialog = None diff --git a/llm_studio/python_configs/__init__.py b/llm_studio/python_configs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llm_studio/python_configs/base.py b/llm_studio/python_configs/base.py new file mode 100644 index 0000000000000000000000000000000000000000..485d98271c65c88882434a98cf7934629661226e --- /dev/null +++ b/llm_studio/python_configs/base.py @@ -0,0 +1,236 @@ +import dataclasses +import logging +from dataclasses import dataclass, fields +from typing import Any, Dict, List, Optional, Sequence, Set, Tuple + +from llm_studio.src import possible_values +from llm_studio.src.nesting import Dependency, Nesting +from llm_studio.src.order import Order +from llm_studio.src.tooltips import tooltips + +logger = logging.getLogger(__name__) + + +def _get_bases_below_parent(cls: type, parent: type, bases=None) -> Set[type]: + if bases is None: + bases = set() + + if parent not in cls.__bases__: + for base in cls.__bases__: + bases.update(_get_bases_below_parent(base, parent, bases)) + else: + # don't support multiple inheritance when + # inherting directly from the parent + assert len(cls.__bases__) == 1 + + bases.add(cls) + + return bases + + +@dataclass +class DefaultConfig: + """ + Template for any configuration file + """ + + def __post_init__(self): + self._possible_values: Dict[str, Any] = {k: None for k in self.__dict__} + self._visibility = {k: 0 for k in self.__dict__} + + # go up the class hierarchy until we are one below the `DefaultConfig` + bases = _get_bases_below_parent(self.__class__, DefaultConfig) + + # there must be exactly one unique class up the class hierarchy + # which inherits directly from the `DefaultConfig` + assert len(bases) == 1 + base = next(iter(bases)) + + # initialize the order to the fields this class has + self._order = Order([field.name for field in fields(base)]) + + # initialize nesting dependencies + self._nesting = Nesting() + + def _get_possible_values( + self, field: str, value: Any, type_annotation: type, mode: str, dataset_fn=None + ) -> Optional[Tuple[Optional[possible_values.Value], Any]]: + """ + Returns a set of possible values for the field provided, and the current value. + + Args: + field: the field + value: the preliminary value of the field. + type_annotation: Type Annotation of the field. + mode: current mode, one of {"train", "test", "predict"}. + dataset_fn: A function returning a tuple (dataset, value). Will be called + if the possible values depend on the dataset. + + Returns: + Possible values for the field, the current value. + """ + + poss_values = self._possible_values.get(field, None) + + if isinstance(poss_values, possible_values.DatasetValue): + if dataset_fn is None: + raise ValueError( + f"{poss_values} needs a dataset to compute possible values!\n" + "`dataset_fn` must be provided." + ) + + dataset, value = dataset_fn(field, value) + poss_values, value = poss_values.get_value( + dataset=dataset, value=value, type_annotation=type_annotation, mode=mode + ) + elif isinstance(poss_values, Sequence): + if all(isinstance(x, (float, int)) for x in poss_values): + poss_values = possible_values.Number( + min=poss_values[0], max=poss_values[1], step=poss_values[2] + ) + elif all(isinstance(x, str) for x in poss_values): + poss_values = possible_values.String(tuple(poss_values)) + else: + raise ValueError( + f"Could not interpret {poss_values} as any possible value class." + ) + + return poss_values, value + + def _get_tooltips(self, field: str, predict: bool = False) -> Optional[str]: + """ + Returns a tooltip for the field provided + """ + return tooltips.get(f"experiments_{field}", None) + + def _get_visibility(self, field: str) -> Optional[int]: + """Returns a visibility level for the field provided. + 0 -- visible in the Wave app + -1 -- not visible in the Wave App + -2 -- visible in Dataset Import, but not visible in Create Experiment + """ + + return self._visibility.get(field, None) + + def _get_nesting_triggers(self) -> Set[str]: + """Returns a Set of keys other elements are depending on""" + + return self._nesting.triggers + + def _get_nesting_dependencies(self, key: str) -> List[Dependency] | None: + """Returns a all dependencies for a given key""" + + if key in self._nesting.dependencies: + dependencies = self._nesting.dependencies[key] + else: + dependencies = None + return dependencies + + def _get_order(self, warn_if_unset=True) -> List[str]: + """ + Returns the order in which to show the keys in the config. + + Args: + warn_if_unset: Whether to log a warning if order is unset for multiple keys. + + Returns: + A list of the same length and with same elements as `self.__dict__.keys()`. + """ + + keys = self.__dict__.keys() + + ordered_keys = [key for key in self._order if key in keys] + unordered_keys = list(set(keys) - set(ordered_keys)) + + unordered_ui_keys = [ + key + for key in unordered_keys + if not (key.startswith("_") or self._get_visibility(key) == -1) + ] + + # warn if there is more than one key without order. + # one is not problematic since it will just always be last + if warn_if_unset and len(unordered_ui_keys) > 1: + logger.warning(f"No order set for keys: {unordered_ui_keys}.") + + return ordered_keys + unordered_keys + + @classmethod + def get_annotations(cls): + """Returns type annotations through all the Parent config classes""" + + d: Dict[str, Any] = {} + for c in cls.mro()[::-1]: + try: + d.update(**c.__annotations__) + except AttributeError: + # object, at least, has no __annotations__ attribute. + pass + return d + + @classmethod + def from_dict(cls, d: dict): + """Creates a config object from a dictionary""" + d_filtered = {k: v for k, v in d.items() if k in cls.get_annotations()} + if len(d) != len(d_filtered): + logger.warning( + f"Keys {set(d.keys()) - set(d_filtered.keys())} are not in the config." + ) + return cls(**d_filtered) # mypy: ignore + + +@dataclass +class DefaultConfigProblemBase(DefaultConfig): + """ + Base class for all problem configs. + Defines the interface for all problem configs. + """ + + experiment_name: str + output_directory: str + llm_backbone: str + + dataset: Any + tokenizer: Any + architecture: Any + training: Any + augmentation: Any + prediction: Any + environment: Any + logging: Any + + @property + def problem_type(self) -> str: + """ + Parse problem_type from config filename, + for example: text_causal_language_modeling_config.py -> causal_language_modeling + """ + return type(self).__dict__["__module__"].split(".")[-1].replace("_config", "") + + @classmethod + def from_dict(cls, cfg_dict: dict): + class_fields = {f.name: f for f in dataclasses.fields(cls)} + + # Prepare arguments for creating a new dataclass instance + init_args = {} + for field_name, field_obj in class_fields.items(): + if hasattr(field_obj.type, "from_dict"): + attr_value = cfg_dict.get(field_name, {}) + init_args[field_name] = field_obj.type.from_dict(attr_value) + else: + # Use the value from cfg_dict, + # or the field's default value if not available in cfg_dict + init_args[field_name] = cfg_dict.get(field_name, field_obj.default) + + return cls(**init_args) + + def check(self) -> Dict[str, List]: + """ + Checks for errors (incompatible settings) for the specific problem type. + Returns: + A dictionary with two keys: + - "title": A list of error titles. + - "message": A list of error messages. + """ + errors: Dict[str, List] = {"title": [], "message": []} + return errors diff --git a/llm_studio/python_configs/cfg_checks.py b/llm_studio/python_configs/cfg_checks.py new file mode 100644 index 0000000000000000000000000000000000000000..15bb6036658ed28faebaf8b4649291aa9e68697b --- /dev/null +++ b/llm_studio/python_configs/cfg_checks.py @@ -0,0 +1,114 @@ +import logging +import os +from typing import Dict, List + +import torch + +from llm_studio.app_utils.config import default_cfg +from llm_studio.python_configs.base import DefaultConfigProblemBase +from llm_studio.src.utils.export_utils import get_size_str + +logger = logging.getLogger(__name__) + +__all__ = ["check_config_for_errors"] + + +def check_config_for_errors(cfg: DefaultConfigProblemBase) -> dict: + """ + Checks the configuration for consistency. + Parameters: + - cfg (DefaultConfigProblemBase): + The config object to be checked. + + Returns: + A dictionary with two keys: + - "title": A list of error titles. + - "message": A list of error messages. + """ + errors = check_for_common_errors(cfg) + problem_type_errors = cfg.check() + errors["title"].extend(problem_type_errors["title"]) + errors["message"].extend(problem_type_errors["message"]) + return errors + + +def check_for_common_errors(cfg: DefaultConfigProblemBase) -> dict: + errors: Dict[str, List] = {"title": [], "message": []} + if not len(cfg.environment.gpus) > 0: + errors["title"] += ["No GPU selected"] + errors["message"] += [ + "Please select at least one GPU to start the experiment! " + ] + + if len(cfg.environment.gpus) > torch.cuda.device_count(): + errors["title"] += ["More GPUs selected than available"] + errors["message"] += [ + f"There are {cfg.environment.gpus} GPUs selected but only " + f"{torch.cuda.device_count()} GPUs available." + "This error can happen when you start from an experiment configuration " + "that was created on a different machine. Please deselect all GPUs and " + "select the GPUs you want to use again. " + ] + + if cfg.training.save_best_checkpoint and cfg.training.train_validation_data: + errors["title"] += ["Save Best Checkpoint incompatible settings."] + errors["message"] += [ + "Save Best Checkpoint is not compatible with " + "Train Validation Data. " + "Please set Save Best Checkpoint to False or disable " + "Train Validation Data. " + ] + + stats = os.statvfs(".") + available_size = stats.f_frsize * stats.f_bavail + if available_size < default_cfg.min_experiment_disk_space: + errors["title"] += ["Not enough disk space."] + errors["message"] += [ + f"Not enough disk space. Available space is {get_size_str(available_size)}." + f" Required space is " + f"{get_size_str(default_cfg.min_experiment_disk_space)}. " + "Experiment has not started. " + "Please ensure that you have enough disk space before " + "starting the experiment." + ] + + # see create_nlp_backbone + if ( + cfg.architecture.backbone_dtype in ["int4", "int8"] + and not cfg.architecture.pretrained + ): + errors["title"] += ["Quantization without pretrained weights."] + errors["message"] += [ + "Quantization is only supported for pretrained models. " + "Please enable pretrained model or disable quantization." + ] + + if ( + not cfg.training.lora + and cfg.architecture.backbone_dtype not in ["bfloat16", "float32"] + and cfg.training.epochs > 0 + ): + errors["title"] += [f"Pure {cfg.architecture.backbone_dtype} training."] + errors["message"] += [ + f"When not using LORA, {cfg.architecture.backbone_dtype} training will " + "likely lead to unstable training. " + "Please use LORA or set Backbone Dtype to bfloat16 or float32." + ] + + if cfg.environment.use_deepspeed and cfg.architecture.backbone_dtype in [ + "int8", + "int4", + ]: + errors["title"] += ["Deepspeed does not support quantization."] + errors["message"] += [ + "Deepspeed do not support backbone type " + f"{cfg.architecture.backbone_dtype}. " + "Please set backbone type to float16 or bfloat16 for using deepspeed." + ] + if cfg.environment.use_deepspeed and len(cfg.environment.gpus) < 2: + errors["title"] += ["Deepspeed not supported for single GPU."] + errors["message"] += [ + "Deepspeed does not support single GPU training. " + "Please select more than one GPU or disable deepspeed." + ] + return errors diff --git a/llm_studio/python_configs/text_causal_classification_modeling_config.py b/llm_studio/python_configs/text_causal_classification_modeling_config.py new file mode 100644 index 0000000000000000000000000000000000000000..0f4172cc67920ed98b00e9b9514af1ba896ff6da --- /dev/null +++ b/llm_studio/python_configs/text_causal_classification_modeling_config.py @@ -0,0 +1,218 @@ +import os +from dataclasses import dataclass, field +from typing import Any, Dict, List, Tuple + +import llm_studio.src.datasets.text_causal_classification_ds +import llm_studio.src.plots.text_causal_classification_modeling_plots +from llm_studio.python_configs.base import DefaultConfig, DefaultConfigProblemBase +from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigNLPAugmentation, + ConfigNLPCausalLMArchitecture, + ConfigNLPCausalLMDataset, + ConfigNLPCausalLMEnvironment, + ConfigNLPCausalLMLogging, + ConfigNLPCausalLMTokenizer, + ConfigNLPCausalLMTraining, +) +from llm_studio.src import possible_values +from llm_studio.src.losses import text_causal_classification_modeling_losses +from llm_studio.src.metrics import text_causal_classification_modeling_metrics +from llm_studio.src.models import text_causal_classification_modeling_model +from llm_studio.src.utils.modeling_utils import generate_experiment_name + + +@dataclass +class ConfigNLPCausalClassificationDataset(ConfigNLPCausalLMDataset): + dataset_class: Any = ( + llm_studio.src.datasets.text_causal_classification_ds.CustomDataset + ) + system_column: str = "None" + prompt_column: Tuple[str, ...] = ("instruction", "input") + answer_column: str = "label" + num_classes: int = 1 + parent_id_column: str = "None" + + text_system_start: str = "" + text_prompt_start: str = "" + text_answer_separator: str = "" + + add_eos_token_to_system: bool = False + add_eos_token_to_prompt: bool = False + add_eos_token_to_answer: bool = False + + _allowed_file_extensions: Tuple[str, ...] = ("csv", "pq", "parquet") + + def __post_init__(self): + self.prompt_column = ( + tuple( + self.prompt_column, + ) + if isinstance(self.prompt_column, str) + else tuple(self.prompt_column) + ) + super().__post_init__() + + self._possible_values["num_classes"] = (1, 100, 1) + + self._visibility["personalize"] = -1 + self._visibility["chatbot_name"] = -1 + self._visibility["chatbot_author"] = -1 + self._visibility["mask_prompt_labels"] = -1 + self._visibility["add_eos_token_to_answer"] = -1 + + +@dataclass +class ConfigNLPCausalClassificationTraining(ConfigNLPCausalLMTraining): + loss_class: Any = text_causal_classification_modeling_losses.Losses + loss_function: str = "BinaryCrossEntropyLoss" + + learning_rate: float = 0.0001 + differential_learning_rate_layers: Tuple[str, ...] = ("classification_head",) + differential_learning_rate: float = 0.00001 + + def __post_init__(self): + super().__post_init__() + self._possible_values["loss_function"] = self.loss_class.names() + + self._possible_values["differential_learning_rate_layers"] = ( + possible_values.String( + values=("backbone", "embed", "classification_head"), + allow_custom=False, + placeholder="Select optional layers...", + ) + ) + + +@dataclass +class ConfigNLPCausalClassificationTokenizer(ConfigNLPCausalLMTokenizer): + max_length_prompt: int = 512 + max_length: int = 512 + + def __post_init__(self): + super().__post_init__() + + self._visibility["max_length_answer"] = -1 + + +@dataclass +class ConfigNLPCausalClassificationArchitecture(ConfigNLPCausalLMArchitecture): + model_class: Any = text_causal_classification_modeling_model.Model + + def __post_init__(self): + super().__post_init__() + + +@dataclass +class ConfigNLPCausalClassificationPrediction(DefaultConfig): + metric_class: Any = text_causal_classification_modeling_metrics.Metrics + metric: str = "AUC" + batch_size_inference: int = 0 + + def __post_init__(self): + super().__post_init__() + + self._possible_values["metric"] = self.metric_class.names() + self._possible_values["batch_size_inference"] = (0, 512, 1) + + self._visibility["metric_class"] = -1 + + +@dataclass +class ConfigNLPCausalClassificationEnvironment(ConfigNLPCausalLMEnvironment): + _model_card_template: str = "text_causal_classification_model_card_template.md" + _summary_card_template: str = ( + "text_causal_classification_experiment_summary_card_template.md" + ) + + def __post_init__(self): + super().__post_init__() + + +@dataclass +class ConfigNLPCausalClassificationLogging(ConfigNLPCausalLMLogging): + plots_class: Any = ( + llm_studio.src.plots.text_causal_classification_modeling_plots.Plots + ) + + +@dataclass +class ConfigProblemBase(DefaultConfigProblemBase): + output_directory: str = f"output/{os.path.basename(__file__).split('.')[0]}" + experiment_name: str = field(default_factory=generate_experiment_name) + _parent_experiment: str = "" + llm_backbone: str = "h2oai/h2ogpt-4096-llama2-7b" + + dataset: ConfigNLPCausalClassificationDataset = field( + default_factory=ConfigNLPCausalClassificationDataset + ) + tokenizer: ConfigNLPCausalLMTokenizer = field( + default_factory=ConfigNLPCausalLMTokenizer + ) + architecture: ConfigNLPCausalClassificationArchitecture = field( + default_factory=ConfigNLPCausalClassificationArchitecture + ) + training: ConfigNLPCausalClassificationTraining = field( + default_factory=ConfigNLPCausalClassificationTraining + ) + augmentation: ConfigNLPAugmentation = field(default_factory=ConfigNLPAugmentation) + prediction: ConfigNLPCausalClassificationPrediction = field( + default_factory=ConfigNLPCausalClassificationPrediction + ) + environment: ConfigNLPCausalClassificationEnvironment = field( + default_factory=ConfigNLPCausalClassificationEnvironment + ) + logging: ConfigNLPCausalClassificationLogging = field( + default_factory=ConfigNLPCausalClassificationLogging + ) + + def __post_init__(self): + super().__post_init__() + + self._visibility["output_directory"] = -1 + + self._possible_values["llm_backbone"] = possible_values.String( + values=( + "h2oai/h2o-danube2-1.8b-base", + "h2oai/h2o-danube2-1.8b-chat", + "h2oai/h2ogpt-4096-llama2-7b", + "h2oai/h2ogpt-4096-llama2-7b-chat", + "h2oai/h2ogpt-4096-llama2-13b", + "h2oai/h2ogpt-4096-llama2-13b-chat", + "h2oai/h2ogpt-4096-llama2-70b", + "h2oai/h2ogpt-4096-llama2-70b-chat", + "tiiuae/falcon-7b", + "mistralai/Mistral-7B-v0.1", + "HuggingFaceH4/zephyr-7b-beta", + "google/gemma-2b", + "google/gemma-7b", + "stabilityai/stablelm-3b-4e1t", + "microsoft/phi-2", + "facebook/opt-125m", + ), + allow_custom=True, + ) + + def check(self) -> Dict[str, List]: + errors: Dict[str, List] = {"title": [], "message": []} + + if self.training.loss_function == "CrossEntropyLoss": + if self.dataset.num_classes == 1: + errors["title"] += ["CrossEntropyLoss requires num_classes > 1"] + errors["message"] += [ + "CrossEntropyLoss requires num_classes > 1, " + "but num_classes is set to 1." + ] + elif self.training.loss_function == "BinaryCrossEntropyLoss": + if self.dataset.num_classes != 1: + errors["title"] += ["BinaryCrossEntropyLoss requires num_classes == 1"] + errors["message"] += [ + "BinaryCrossEntropyLoss requires num_classes == 1, " + "but num_classes is set to {}.".format(self.dataset.num_classes) + ] + if self.dataset.parent_id_column not in ["None", None]: + errors["title"] += ["Parent ID column is not supported for classification"] + errors["message"] += [ + "Parent ID column is not supported for classification datasets." + ] + + return errors diff --git a/llm_studio/python_configs/text_causal_language_modeling_config.py b/llm_studio/python_configs/text_causal_language_modeling_config.py new file mode 100644 index 0000000000000000000000000000000000000000..369f70abcecb513975fd7ccdbbc675fcfc8d9df1 --- /dev/null +++ b/llm_studio/python_configs/text_causal_language_modeling_config.py @@ -0,0 +1,550 @@ +import multiprocessing +import os +from dataclasses import dataclass, field +from typing import Any, Dict, List, Tuple + +import torch + +import llm_studio.src.datasets.text_causal_language_modeling_ds +from llm_studio.python_configs.base import DefaultConfig, DefaultConfigProblemBase +from llm_studio.src import possible_values +from llm_studio.src.augmentations.nlp_aug import BaseNLPAug +from llm_studio.src.loggers import Loggers +from llm_studio.src.losses import text_causal_language_modeling_losses +from llm_studio.src.metrics import text_causal_language_modeling_metrics +from llm_studio.src.models import text_causal_language_modeling_model +from llm_studio.src.nesting import Dependency +from llm_studio.src.optimizers import Optimizers +from llm_studio.src.plots import text_causal_language_modeling_plots +from llm_studio.src.schedulers import Schedulers +from llm_studio.src.utils.modeling_utils import generate_experiment_name + + +@dataclass +class ConfigNLPCausalLMDataset(DefaultConfig): + dataset_class: Any = ( + llm_studio.src.datasets.text_causal_language_modeling_ds.CustomDataset + ) + + personalize: bool = False + chatbot_name: str = "h2oGPT" + chatbot_author: str = "H2O.ai" + + train_dataframe: str = "/path/to/train.csv" + validation_strategy: str = "automatic" + validation_dataframe: str = "" + validation_size: float = 0.01 + + data_sample: float = 1.0 + data_sample_choice: Tuple[str, ...] = ("Train", "Validation") + + system_column: str = "None" + prompt_column: Tuple[str, ...] = ("instruction", "input") + answer_column: str = "output" + parent_id_column: str = "None" + + text_system_start: str = "<|system|>" + text_prompt_start: str = "<|prompt|>" + text_answer_separator: str = "<|answer|>" + + limit_chained_samples: bool = False + add_eos_token_to_system: bool = True + add_eos_token_to_prompt: bool = True + add_eos_token_to_answer: bool = True + mask_prompt_labels: bool = True + + _allowed_file_extensions: Tuple[str, ...] = ("csv", "pq", "parquet") + + def __post_init__(self): + self.prompt_column = ( + tuple( + self.prompt_column, + ) + if isinstance(self.prompt_column, str) + else tuple(self.prompt_column) + ) + super().__post_init__() + + self._possible_values["train_dataframe"] = possible_values.Files( + prefer_with=lambda path: "train" in path + ) + self._possible_values["validation_strategy"] = possible_values.String( + values=( + ("custom", "Custom holdout validation"), + ("automatic", "Automatic holdout validation"), + ), + allow_custom=False, + ) + self._possible_values["validation_dataframe"] = possible_values.Files( + add_none=True, prefer_with=lambda path: "val" in path + ) + self._possible_values["validation_size"] = (0.01, 0.95, 0.01) + self._possible_values["data_sample"] = (0.01, 1, 0.01) + self._possible_values["data_sample_choice"] = ["Train", "Validation"] + self._possible_values["system_column"] = possible_values.Columns( + prefer_with=lambda column: column in ("system",), add_none=True + ) + self._possible_values["prompt_column"] = possible_values.Columns( + prefer_with=lambda column: column in ("instruction", "prompt") + ) + self._possible_values["answer_column"] = possible_values.Columns( + prefer_with=lambda column: column in ("answer", "output") + ) + self._possible_values["parent_id_column"] = possible_values.Columns( + prefer_with=lambda column: column in ("parent",), add_none=True + ) + + self._nesting.add( + ["chatbot_name", "chatbot_author"], + [Dependency(key="personalize", value=True, is_set=True)], + ) + + self._nesting.add( + ["validation_dataframe"], + [Dependency(key="validation_strategy", value="custom", is_set=True)], + ) + + self._nesting.add( + ["validation_size"], + [Dependency(key="validation_strategy", value="automatic", is_set=True)], + ) + + self._nesting.add( + ["data_sample_choice"], + [Dependency(key="data_sample", value=1, is_set=False)], + ) + + self._nesting.add( + ["limit_chained_samples"], + [Dependency(key="parent_id_column", value="None", is_set=False)], + ) + + self._nesting.add( + ["text_system_start", "add_eos_token_to_system"], + [Dependency(key="system_column", value="None", is_set=False)], + ) + + self._visibility["dataset_class"] = -1 + + +@dataclass +class ConfigNLPCausalLMTraining(DefaultConfig): + loss_class: Any = text_causal_language_modeling_losses.Losses + loss_function: str = "TokenAveragedCrossEntropy" + optimizer: str = "AdamW" + + learning_rate: float = 0.0001 + differential_learning_rate_layers: Tuple[str, ...] = () + differential_learning_rate: float = 0.00001 + + use_flash_attention_2: bool = False + batch_size: int = 2 + drop_last_batch: bool = True + epochs: int = 1 + schedule: str = "Cosine" + warmup_epochs: float = 0.0 + + weight_decay: float = 0.0 + gradient_clip: float = 0.0 + grad_accumulation: int = 1 + + lora: bool = True + lora_r: int = 4 + lora_alpha: int = 16 + lora_dropout: float = 0.05 + lora_target_modules: str = "" + + save_best_checkpoint: bool = False + evaluation_epochs: float = 1.0 + evaluate_before_training: bool = False + train_validation_data: bool = False + + def __post_init__(self): + super().__post_init__() + self._possible_values["loss_function"] = self.loss_class.names() + self._possible_values["optimizer"] = Optimizers.names() + + self._possible_values["learning_rate"] = possible_values.Number( + step=1e-9, min=1e-9 + ) + self._possible_values["differential_learning_rate_layers"] = ( + possible_values.String( + values=("backbone", "embed"), + allow_custom=False, + placeholder="Select optional layers...", + ) + ) + self._possible_values["differential_learning_rate"] = self._possible_values[ + "learning_rate" + ] + + self._possible_values["batch_size"] = (1, 256, 1) + self._possible_values["epochs"] = (0, 10, 1) + self._possible_values["schedule"] = Schedulers.names() + self._possible_values["warmup_epochs"] = (0.0, 5, 0.05) + + self._possible_values["weight_decay"] = possible_values.Number(step=1e-5, min=0) + self._possible_values["gradient_clip"] = (0.0, 10.0, 0.1) + self._possible_values["grad_accumulation"] = (1, 8, 1) + + self._possible_values["lora_r"] = (1, 256, 1) + self._possible_values["lora_alpha"] = (1, 256, 1) + self._possible_values["lora_dropout"] = (0.0, 0.5, 0.01) + + self._possible_values["evaluation_epochs"] = (0.01, 1, 0.01) + + self._visibility["loss_class"] = -1 + self._visibility["drop_last_batch"] = -1 + self._visibility["differential_learning_rate_layers"] = 1 + self._visibility["differential_learning_rate"] = 1 + + self._nesting.add( + ["differential_learning_rate"], + [ + Dependency( + key="differential_learning_rate_layers", value=None, is_set=False + ) + ], + ) + self._nesting.add( + ["lora_r", "lora_alpha", "lora_dropout", "lora_target_modules"], + [Dependency(key="lora", value=False, is_set=False)], + ) + self._nesting.add( + ["train_validation_data"], + [Dependency(key="save_best_checkpoint", value=False, is_set=True)], + ) + + +@dataclass +class ConfigNLPCausalLMTokenizer(DefaultConfig): + max_length_prompt: int = 256 + max_length_answer: int = 256 + max_length: int = 512 + add_prompt_answer_tokens: bool = False + padding_quantile: float = 1.0 + use_fast: bool = True + + def __post_init__(self): + super().__post_init__() + self._possible_values["max_length_prompt"] = (32, 1024 * 16, 32) + self._possible_values["max_length_answer"] = (32, 1024 * 16, 32) + self._possible_values["max_length"] = (32, 1024 * 16, 32) + self._possible_values["padding_quantile"] = (0, 1, 0.01) + self._padding_side = "left" + + +@dataclass +class ConfigNLPCausalLMArchitecture(DefaultConfig): + model_class: Any = text_causal_language_modeling_model.Model + pretrained: bool = True + + backbone_dtype: str = "int4" + gradient_checkpointing: bool = True + force_embedding_gradients: bool = False + intermediate_dropout: float = 0 + pretrained_weights: str = "" + + def __post_init__(self): + super().__post_init__() + + self._possible_values["backbone_dtype"] = possible_values.String( + values=("float32", "bfloat16", "float16", "int8", "int4"), + allow_custom=False, + ) + self._possible_values["intermediate_dropout"] = (0, 0.5, 0.05) + + self._nesting.add( + ["force_embedding_gradients"], + [Dependency(key="lora", value=False, is_set=False)], + ) + + self._visibility["model_class"] = -1 + self._visibility["pretrained"] = -1 + + +@dataclass +class ConfigNLPAugmentation(DefaultConfig): + nlp_augmentations_class: Any = BaseNLPAug + token_mask_probability: float = 0.0 + skip_parent_probability: float = 0.0 + random_parent_probability: float = 0.0 + neftune_noise_alpha: float = 0.0 + + def __post_init__(self): + super().__post_init__() + self._possible_values["token_mask_probability"] = (0.0, 0.9, 0.05) + self._possible_values["skip_parent_probability"] = (0.0, 1.0, 0.05) + self._possible_values["random_parent_probability"] = (0.0, 1.0, 0.05) + self._possible_values["neftune_noise_alpha"] = (0.0, 15, 0.05) + self._visibility["nlp_augmentations_class"] = -1 + + +@dataclass +class ConfigNLPCausalLMPrediction(DefaultConfig): + metric_class: Any = text_causal_language_modeling_metrics.Metrics + metric: str = "GPT" + metric_gpt_model: str = "gpt-3.5-turbo-0301" + metric_gpt_template: str = "general" + + min_length_inference: int = 2 + max_length_inference: int = 256 + max_time: float = 0 + batch_size_inference: int = 0 + + do_sample: bool = False + num_beams: int = 1 + temperature: float = 0.0 + repetition_penalty: float = 1.0 + stop_tokens: str = "" + top_k: int = 0 + top_p: float = 1.0 + + num_history: int = 4 + + def __post_init__(self): + super().__post_init__() + self._possible_values["metric"] = self.metric_class.names() + + self._possible_values["metric_gpt_model"] = possible_values.String( + values=( + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-1106-preview", + ), + allow_custom=True, + ) + self._possible_values["metric_gpt_template"] = possible_values.String( + values=(f.split(".")[0] for f in os.listdir("prompts")) + ) + + self._possible_values["batch_size_inference"] = (0, 512, 1) + self._possible_values["min_length_inference"] = (0, 1024, 1) + self._possible_values["max_length_inference"] = (1, 4096, 1) + self._possible_values["max_time"] = (0.0, 600.0, 1.0) + + self._possible_values["num_beams"] = (1, 4, 1) + self._possible_values["temperature"] = (0, 10, 0.05) + self._possible_values["repetition_penalty"] = (1, 10, 0.05) + self._possible_values["top_k"] = (0, 100, 1) + self._possible_values["top_p"] = (0.5, 1, 0.05) + self._possible_values["num_history"] = (1, 50, 1) + + self._visibility["metric_class"] = -1 + # possible values for num_history are only used in chatbot tab + self._visibility["num_history"] = -1 + + self._nesting.add( + ["metric_gpt_model", "metric_gpt_template"], + [Dependency(key="metric", value="GPT", is_set=True)], + ) + + +@dataclass +class ConfigNLPCausalLMEnvironment(DefaultConfig): + gpus: Tuple[str, ...] = tuple(str(x) for x in range(torch.cuda.device_count())) + + mixed_precision: bool = True + mixed_precision_dtype: str = "bfloat16" + + compile_model: bool = False + use_deepspeed: bool = False + deepspeed_method: str = "ZeRO2" + deepspeed_allgather_bucket_size: int = int(1e6) + deepspeed_reduce_bucket_size: int = int(1e6) + deepspeed_stage3_prefetch_bucket_size: int = int(1e6) + deepspeed_stage3_param_persistence_threshold: int = int(1e6) + # deepspeed_offload_optimizer: bool = False + # deepspeed_stage3_max_live_parameters: int = 1e9 + # deepspeed_stage3_max_reuse_distance: int = 1e9 + + find_unused_parameters: bool = False + trust_remote_code: bool = True + huggingface_branch: str = "main" + number_of_workers: int = 4 + seed: int = -1 + + _seed: int = 0 # internal seed set in train.py (equals seed if seed is not -1) + _distributed: bool = False + _distributed_inference: bool = True + _local_rank: int = 0 + _world_size: int = 1 + _curr_step: int = 0 + _curr_val_step: int = 0 + _rank: int = 0 # global rank + _device: str = "cuda" + _cpu_comm: Any = None + _model_card_template: str = "text_causal_language_modeling_model_card_template.md" + _summary_card_template: str = ( + "text_causal_language_modeling_experiment_summary_card_template.md" + ) + + def __post_init__(self): + super().__post_init__() + self._possible_values["gpus"] = possible_values.String( + values=tuple( + [(str(x), f"GPU #{x+1}") for x in range(torch.cuda.device_count())] + ), + allow_custom=False, + ) + + self._possible_values["mixed_precision_dtype"] = possible_values.String( + values=("bfloat16", "float16"), + allow_custom=False, + ) + + self._possible_values["number_of_workers"] = (1, multiprocessing.cpu_count(), 1) + self._possible_values["seed"] = possible_values.Number(step=1, min=-1) + self._possible_values["deepspeed_method"] = ["ZeRO2", "ZeRO3"] + self._possible_values["deepspeed_allgather_bucket_size"] = ( + possible_values.Number(step=1, min=1e6) + ) + self._possible_values["deepspeed_reduce_bucket_size"] = possible_values.Number( + step=1, min=1e6 + ) + self._possible_values["deepspeed_stage3_prefetch_bucket_size"] = ( + possible_values.Number(step=1, min=1e6) + ) + self._possible_values["deepspeed_stage3_param_persistence_threshold"] = ( + possible_values.Number(step=1, min=1e6) + ) + self._possible_values["deepspeed_stage3_max_live_parameters"] = ( + possible_values.Number(step=1, min=1e6) + ) + self._possible_values["deepspeed_stage3_max_reuse_distance"] = ( + possible_values.Number(step=1, min=1e6) + ) + + self._nesting.add( + [ + "mixed_precision_dtype", + ], + [Dependency(key="mixed_precision", value=True, is_set=True)], + ) + self._nesting.add( + [ + "deepspeed_method", + "deepspeed_reduce_bucket_size", + ], + [Dependency(key="use_deepspeed", value=True, is_set=True)], + ) + self._nesting.add( + [ + "deepspeed_allgather_bucket_size", + ], + [ + Dependency(key="use_deepspeed", value=True, is_set=True), + Dependency(key="deepspeed_method", value="ZeRO2", is_set=True), + ], + ) + self._nesting.add( + [ + "deepspeed_stage3_prefetch_bucket_size", + "deepspeed_stage3_param_persistence_threshold", + # "deepspeed_offload_optimizer", + ], + [ + Dependency(key="use_deepspeed", value=True, is_set=True), + Dependency(key="deepspeed_method", value="ZeRO3", is_set=True), + ], + ) + # self._nesting.add( + # [ + # "deepspeed_stage3_max_live_parameters", + # "deepspeed_stage3_max_reuse_distance", + # ], + # [Dependency(key="deepspeed_offload_optimizer", value=False, is_set=False)], # noqa: E501 + # ) + + +@dataclass +class ConfigNLPCausalLMLogging(DefaultConfig): + logger: str = "None" + neptune_project: str = "" + _neptune_debug: bool = False + + plots_class: Any = text_causal_language_modeling_plots.Plots + + # the actual logger, will be set dynamically at runtime + _logger: Any = None + + def __post_init__(self): + super().__post_init__() + self._possible_values["logger"] = Loggers.names() + + self._nesting.add( + ["neptune_project"], + [Dependency(key="logger", value="Neptune", is_set=True)], + ) + + self._visibility["plots_class"] = -1 + + +@dataclass +class ConfigProblemBase(DefaultConfigProblemBase): + output_directory: str = f"output/{os.path.basename(__file__).split('.')[0]}" + experiment_name: str = field(default_factory=generate_experiment_name) + _parent_experiment: str = "" + llm_backbone: str = "h2oai/h2ogpt-4096-llama2-7b" + + dataset: ConfigNLPCausalLMDataset = field(default_factory=ConfigNLPCausalLMDataset) + tokenizer: ConfigNLPCausalLMTokenizer = field( + default_factory=ConfigNLPCausalLMTokenizer + ) + architecture: ConfigNLPCausalLMArchitecture = field( + default_factory=ConfigNLPCausalLMArchitecture + ) + training: ConfigNLPCausalLMTraining = field( + default_factory=ConfigNLPCausalLMTraining + ) + augmentation: ConfigNLPAugmentation = field(default_factory=ConfigNLPAugmentation) + prediction: ConfigNLPCausalLMPrediction = field( + default_factory=ConfigNLPCausalLMPrediction + ) + environment: ConfigNLPCausalLMEnvironment = field( + default_factory=ConfigNLPCausalLMEnvironment + ) + logging: ConfigNLPCausalLMLogging = field(default_factory=ConfigNLPCausalLMLogging) + + def __post_init__(self): + super().__post_init__() + + self._visibility["output_directory"] = -1 + + self._possible_values["llm_backbone"] = possible_values.String( + values=( + "h2oai/h2o-danube2-1.8b-base", + "h2oai/h2o-danube2-1.8b-chat", + "h2oai/h2ogpt-4096-llama2-7b", + "h2oai/h2ogpt-4096-llama2-7b-chat", + "h2oai/h2ogpt-4096-llama2-13b", + "h2oai/h2ogpt-4096-llama2-13b-chat", + "h2oai/h2ogpt-4096-llama2-70b", + "h2oai/h2ogpt-4096-llama2-70b-chat", + "tiiuae/falcon-7b", + "mistralai/Mistral-7B-v0.1", + "HuggingFaceH4/zephyr-7b-beta", + "google/gemma-2b", + "google/gemma-7b", + "stabilityai/stablelm-3b-4e1t", + "microsoft/phi-2", + "facebook/opt-125m", + ), + allow_custom=True, + ) + + def check(self) -> Dict[str, List]: + errors: Dict[str, List] = {"title": [], "message": []} + if self.prediction.temperature > 0 and not self.prediction.do_sample: + errors["title"] += ["Do sample needs to be enabled for temperature > 0"] + errors["message"] += [ + "Please enable do sample if you want to use temperature > 0." + ] + if self.prediction.temperature == 0 and self.prediction.do_sample: + errors["title"] += ["Temperature needs to be > 0 for do sample"] + errors["message"] += [ + "Please increase temperature if you want to use do sample." + ] + return errors diff --git a/llm_studio/python_configs/text_dpo_modeling_config.py b/llm_studio/python_configs/text_dpo_modeling_config.py new file mode 100644 index 0000000000000000000000000000000000000000..2948e7a6f299c1714a65b1346d16f2ad04b039b9 --- /dev/null +++ b/llm_studio/python_configs/text_dpo_modeling_config.py @@ -0,0 +1,128 @@ +import os +from dataclasses import dataclass, field +from typing import Any + +import llm_studio.src.datasets.text_dpo_modeling_ds +from llm_studio.python_configs.base import DefaultConfigProblemBase +from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigNLPAugmentation, + ConfigNLPCausalLMArchitecture, + ConfigNLPCausalLMDataset, + ConfigNLPCausalLMEnvironment, + ConfigNLPCausalLMLogging, + ConfigNLPCausalLMPrediction, + ConfigNLPCausalLMTokenizer, + ConfigNLPCausalLMTraining, +) +from llm_studio.src import possible_values +from llm_studio.src.losses import text_dpo_modeling_losses +from llm_studio.src.models import text_dpo_modeling_model +from llm_studio.src.plots import text_dpo_modeling_plots +from llm_studio.src.utils.modeling_utils import generate_experiment_name + + +@dataclass +class ConfigDPODataset(ConfigNLPCausalLMDataset): + dataset_class: Any = llm_studio.src.datasets.text_dpo_modeling_ds.CustomDataset + # Always have full chat history. + # Chosen/Rejected prompt are only at the end of a conversation. + limit_chained_samples: bool = True + mask_prompt_labels: bool = True + + rejected_prompt_column: str = "None" + answer_column: str = "chosen_response" + rejected_answer_column: str = "rejected_response" + + def __post_init__(self): + super().__post_init__() + self._possible_values["rejected_prompt_column"] = possible_values.Columns( + prefer_with=lambda column: column + in ("rejected_input", "rejected_prompt", "rejected_instruction"), + add_none=True, + ) + self._possible_values["rejected_answer_column"] = possible_values.Columns( + prefer_with=lambda column: column + in ("rejected_answer", "rejected_response") + ) + + self._visibility["limit_chained_samples"] = -1 + self._visibility["mask_prompt_labels"] = -1 + self._order.insert("rejected_prompt_column", after="prompt_column") + self._order.insert("rejected_answer_column", after="answer_column") + + +@dataclass +class ConfigDPOTraining(ConfigNLPCausalLMTraining): + learning_rate: float = 1e-4 # relatively high as we use LORA + beta: float = 0.2 + gradient_clip: float = 10.0 + loss_class: Any = text_dpo_modeling_losses.Losses + loss_function: str = "DPOLoss" + optimizer: str = "AdamW" + # Needs to be enabled as we need logits from original model, see forward pass + lora: bool = True + + def __post_init__(self): + super().__post_init__() + self._possible_values["beta"] = possible_values.Number(0.05, 1.0, 0.05) + self._order.insert("beta", after="learning_rate") + self._visibility["lora"] = -1 + + +@dataclass +class ConfigDPOArchitecture(ConfigNLPCausalLMArchitecture): + model_class: Any = text_dpo_modeling_model.Model + + +@dataclass +class ConfigDPOPLogging(ConfigNLPCausalLMLogging): + plots_class: Any = text_dpo_modeling_plots.Plots + + +@dataclass +class ConfigProblemBase(DefaultConfigProblemBase): + output_directory: str = f"output/{os.path.basename(__file__).split('.')[0]}" + experiment_name: str = field(default_factory=generate_experiment_name) + _parent_experiment: str = "" + # 7b model may be unstable (NaN loss) + llm_backbone: str = "h2oai/h2ogpt-4096-llama2-13b-chat" + + dataset: ConfigDPODataset = field(default_factory=ConfigDPODataset) + tokenizer: ConfigNLPCausalLMTokenizer = field( + default_factory=ConfigNLPCausalLMTokenizer + ) + architecture: ConfigDPOArchitecture = field(default_factory=ConfigDPOArchitecture) + training: ConfigDPOTraining = field(default_factory=ConfigDPOTraining) + augmentation: ConfigNLPAugmentation = field(default_factory=ConfigNLPAugmentation) + prediction: ConfigNLPCausalLMPrediction = field( + default_factory=ConfigNLPCausalLMPrediction + ) + environment: ConfigNLPCausalLMEnvironment = field( + default_factory=ConfigNLPCausalLMEnvironment + ) + logging: ConfigDPOPLogging = field(default_factory=ConfigDPOPLogging) + + def __post_init__(self): + super().__post_init__() + self._visibility["output_directory"] = -1 + self._possible_values["llm_backbone"] = possible_values.String( + values=( + "h2oai/h2o-danube2-1.8b-base", + "h2oai/h2o-danube2-1.8b-chat", + "h2oai/h2ogpt-4096-llama2-7b", + "h2oai/h2ogpt-4096-llama2-7b-chat", + "h2oai/h2ogpt-4096-llama2-13b", + "h2oai/h2ogpt-4096-llama2-13b-chat", + "h2oai/h2ogpt-4096-llama2-70b", + "h2oai/h2ogpt-4096-llama2-70b-chat", + "tiiuae/falcon-7b", + "mistralai/Mistral-7B-v0.1", + "HuggingFaceH4/zephyr-7b-beta", + "google/gemma-2b", + "google/gemma-7b", + "stabilityai/stablelm-3b-4e1t", + "microsoft/phi-2", + "facebook/opt-125m", + ), + allow_custom=True, + ) diff --git a/llm_studio/python_configs/text_rlhf_language_modeling_config.py b/llm_studio/python_configs/text_rlhf_language_modeling_config.py new file mode 100644 index 0000000000000000000000000000000000000000..e7e8ae5b2a910018b52bb3bf5d0cd2d2801cceca --- /dev/null +++ b/llm_studio/python_configs/text_rlhf_language_modeling_config.py @@ -0,0 +1,270 @@ +import os +from dataclasses import dataclass, field +from typing import Any, Dict, List + +import llm_studio.src.models.text_rlhf_language_modeling_model +from llm_studio.python_configs.base import DefaultConfigProblemBase +from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigNLPAugmentation, + ConfigNLPCausalLMArchitecture, + ConfigNLPCausalLMDataset, + ConfigNLPCausalLMEnvironment, + ConfigNLPCausalLMLogging, + ConfigNLPCausalLMPrediction, + ConfigNLPCausalLMTokenizer, + ConfigNLPCausalLMTraining, +) +from llm_studio.src import possible_values +from llm_studio.src.datasets.text_rlhf_modeling_ds import CustomDataset +from llm_studio.src.models import text_reward_model +from llm_studio.src.utils.modeling_utils import generate_experiment_name + + +@dataclass +class ConfigRLHFLMDataset(ConfigNLPCausalLMDataset): + dataset_class: Any = CustomDataset + + text_prompt_start: str = ( + "[INST] <>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n<>\n\n" # noqa: E501 + ) + text_answer_separator: str = " [/INST]" + add_eos_token_to_prompt: bool = False + add_eos_token_to_answer: bool = False + limit_chained_samples: bool = False + mask_prompt_labels: bool = True + + def __post_init__(self): + super().__post_init__() + # RLHF is not compatible with system column. + self.system_column = "None" + + # Incompatible with RLHF + self._visibility["system_column"] = -1 + self._visibility["limit_chained_samples"] = -1 + self._visibility["mask_prompt_labels"] = -1 + + +class LossClass: + @classmethod + def names(cls): + return [] + + +class ConfigRLHFLMAugmentation(ConfigNLPAugmentation): + def __post_init__(self): + super().__post_init__() + self._visibility["skip_parent_probability"] = -1 + self._visibility["random_parent_probability"] = -1 + + +@dataclass +class ConfigRLHFLMTraining(ConfigNLPCausalLMTraining): + loss_class: Any = LossClass + loss_function: str = "RLHF" + + batch_size: int = 4 + gradient_clip: float = 1.0 + grad_accumulation: int = 16 + evaluation_epochs: float = 0.25 + + rollout_steps: int = 64 + adaptive_kl_control: bool = True + full_kl_penalty: bool = True + initial_kl_coefficient: float = 0.2 + kl_target: float = 6.0 + kl_horizon: int = 10000 + advantages_gamma: float = 0.99 + advantages_lambda: float = 0.95 + ppo_clip_policy: float = 0.2 + ppo_clip_value: float = 0.2 + scaling_factor_value_loss: float = 0.1 + ppo_epochs: int = 4 + ppo_batch_size: int = 8 + ppo_generate_temperature: float = 1.0 + offload_reward_model: bool = False + + def __post_init__(self): + super().__post_init__() + self.lora = True + self._possible_values["differential_learning_rate_layers"] = ( + possible_values.String( + values=("backbone", "value_head"), + allow_custom=False, + placeholder="Select optional layers...", + ) + ) + self._possible_values["grad_accumulation"] = (1, 128, 1) + + self._possible_values["rollout_steps"] = (1, 1024, 1) + self._possible_values["initial_kl_coefficient"] = (0.01, 0.5, 0.01) + self._possible_values["kl_target"] = (0.1, 16, 0.1) + self._possible_values["kl_horizon"] = (1000, 20000, 1000) + self._possible_values["advantages_gamma"] = (0.800, 0.999, 0.001) + self._possible_values["advantages_lambda"] = (0.8, 1.0, 0.01) + self._possible_values["ppo_clip_policy"] = (0.1, 0.5, 0.05) + self._possible_values["ppo_clip_value"] = (0.1, 0.5, 0.05) + self._possible_values["scaling_factor_value_loss"] = (0.01, 1, 0.01) + self._possible_values["ppo_epochs"] = (1, 16, 1) + self._possible_values["ppo_generate_temperature"] = (0.1, 1.0, 0.1) + self._possible_values["ppo_batch_size"] = (1, 256, 1) + + self._order.insert( + "rollout_steps", + "offload_reward_model", + "adaptive_kl_control", + "full_kl_penalty", + "advantages_gamma", + "kl_horizon", + "ppo_generate_temperature", + "kl_target", + "scaling_factor_value_loss", + "ppo_clip_value", + "ppo_clip_policy", + "initial_kl_coefficient", + "advantages_lambda", + "ppo_batch_size", + "ppo_epochs", + after="learning_rate", + ) + + self._visibility["lora"] = -1 + self._visibility["loss_function"] = -1 + + +@dataclass +class ConfigRLHFLMArchitecture(ConfigNLPCausalLMArchitecture): + model_class: Any = llm_studio.src.models.text_rlhf_language_modeling_model.Model + reward_model_class: Any = text_reward_model.RewardModel + + def __post_init__(self): + super().__post_init__() + # RLHF is not supported with force_embedding_gradients. + self.force_embedding_gradients = False + self._visibility["reward_model_class"] = -1 + self._visibility["force_embedding_gradients"] = -1 + + +@dataclass +class ConfigRLHFLMPrediction(ConfigNLPCausalLMPrediction): + do_sample: bool = True + repetition_penalty: float = 1.0 + num_beams: int = 1 + top_k: int = 0 + top_p: float = 1.0 + temperature: float = 1.0 + + def __post_init__(self): + super().__post_init__() + # These values are fixed for RLHF + self._visibility["do_sample"] = -1 + self._visibility["repetition_penalty"] = -1 + self._visibility["top_p"] = -1 + self._visibility["top_k"] = -1 + self._visibility["num_beams"] = -1 + + +@dataclass +class ConfigRLHFLMEnvironment(ConfigNLPCausalLMEnvironment): + compile_model: bool = False + + def __post_init__(self): + super().__post_init__() + self._visibility["compile_model"] = -1 + + +@dataclass +class ConfigProblemBase(DefaultConfigProblemBase): + output_directory: str = f"output/{os.path.basename(__file__).split('.')[0]}" + experiment_name: str = field(default_factory=generate_experiment_name) + _parent_experiment: str = "" + llm_backbone: str = "h2oai/h2ogpt-4096-llama2-7b-chat" + reward_model: str = "OpenAssistant/reward-model-deberta-v3-large-v2" + + dataset: ConfigRLHFLMDataset = field(default_factory=ConfigRLHFLMDataset) + tokenizer: ConfigNLPCausalLMTokenizer = field( + default_factory=ConfigNLPCausalLMTokenizer + ) + architecture: ConfigRLHFLMArchitecture = field( + default_factory=ConfigRLHFLMArchitecture + ) + training: ConfigRLHFLMTraining = field(default_factory=ConfigRLHFLMTraining) + augmentation: ConfigRLHFLMAugmentation = field( + default_factory=ConfigRLHFLMAugmentation + ) + prediction: ConfigRLHFLMPrediction = field(default_factory=ConfigRLHFLMPrediction) + environment: ConfigRLHFLMEnvironment = field( + default_factory=ConfigRLHFLMEnvironment + ) + logging: ConfigNLPCausalLMLogging = field(default_factory=ConfigNLPCausalLMLogging) + + def __post_init__(self): + super().__post_init__() + + self._possible_values["llm_backbone"] = possible_values.String( + values=( + "h2oai/h2o-danube2-1.8b-base", + "h2oai/h2o-danube2-1.8b-chat", + "h2oai/h2ogpt-4096-llama2-7b", + "h2oai/h2ogpt-4096-llama2-7b-chat", + "h2oai/h2ogpt-4096-llama2-13b", + "h2oai/h2ogpt-4096-llama2-13b-chat", + "h2oai/h2ogpt-4096-llama2-70b", + "h2oai/h2ogpt-4096-llama2-70b-chat", + "tiiuae/falcon-7b", + "mistralai/Mistral-7B-v0.1", + "HuggingFaceH4/zephyr-7b-beta", + "google/gemma-2b", + "google/gemma-7b", + "stabilityai/stablelm-3b-4e1t", + "microsoft/phi-2", + "facebook/opt-125m", + ), + allow_custom=True, + ) + + self._possible_values["reward_model"] = possible_values.String( + values=( + "OpenAssistant/reward-model-deberta-v3-large-v2", + "OpenAssistant/oasst-rm-2.1-pythia-1.4b-epoch-2.5", + "OpenAssistant/oasst-rm-2-pythia-6.9b-epoch-1", + ), + # Custom models are not supported, as they would need to be implemented in + # /src/models/text_reward_model.py + allow_custom=False, + ) + + self._order.insert( + "reward_model", + after="llm_backbone", + ) + self._visibility["output_directory"] = -1 + + def check(self) -> Dict[str, List]: + errors: Dict[str, List] = {"title": [], "message": []} + if not self.training.lora: + errors["title"] += ["LoRA must be True for RLHF"] + errors["message"] += [ + "LoRA must be True for RLHF. " + "Please set LoRA to True or change the problem type. " + ] + + # see CustomDataset for RLHF + if self.dataset.system_column != "None": + errors["title"] += ["RLHF is not compatible with system column."] + errors["message"] += [ + "RLHF is not compatible with system column. " + "Please set system column to None or change the problem type. " + ] + if self.dataset.limit_chained_samples: + errors["title"] += ["RLHF is not compatible with limit_chained_samples."] + errors["message"] += [ + "RLHF is not compatible with limit_chained_samples. " + "Please set limit_chained_samples to False or change the problem type. " + ] + if not self.dataset.mask_prompt_labels: + errors["title"] += ["RLHF is not compatible with mask_prompt_labels."] + errors["message"] += [ + "RLHF is not compatible with mask_prompt_labels. " + "Please set mask_prompt_labels to True or change the problem type. " + ] + return errors diff --git a/llm_studio/python_configs/text_sequence_to_sequence_modeling_config.py b/llm_studio/python_configs/text_sequence_to_sequence_modeling_config.py new file mode 100644 index 0000000000000000000000000000000000000000..e2fdb5c5d973adb65b81ce6ff73abbcec5c9ff95 --- /dev/null +++ b/llm_studio/python_configs/text_sequence_to_sequence_modeling_config.py @@ -0,0 +1,122 @@ +import os +from dataclasses import dataclass, field +from typing import Any, Dict, List + +from llm_studio.python_configs.base import DefaultConfigProblemBase +from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigNLPAugmentation, + ConfigNLPCausalLMArchitecture, + ConfigNLPCausalLMDataset, + ConfigNLPCausalLMEnvironment, + ConfigNLPCausalLMLogging, + ConfigNLPCausalLMPrediction, + ConfigNLPCausalLMTokenizer, + ConfigNLPCausalLMTraining, +) +from llm_studio.src import possible_values +from llm_studio.src.models import text_sequence_to_sequence_modeling_model +from llm_studio.src.utils.modeling_utils import generate_experiment_name + + +@dataclass +class ConfigNLPSeq2SeqDataset(ConfigNLPCausalLMDataset): + text_system_start: str = "" + text_prompt_start: str = "" + text_answer_separator: str = "" + + limit_chained_samples: bool = False + add_eos_token_to_system: bool = True + add_eos_token_to_prompt: bool = True + add_eos_token_to_answer: bool = True + mask_prompt_labels: bool = True + + def __post_init__(self): + self.prompt_column = ( + tuple( + self.prompt_column, + ) + if isinstance(self.prompt_column, str) + else tuple(self.prompt_column) + ) + super().__post_init__() + + self._visibility["limit_chained_samples"] = -1 + self._visibility["mask_prompt_labels"] = -1 + + +@dataclass +class ConfigNLPSeq2SeqArchitecture(ConfigNLPCausalLMArchitecture): + model_class: Any = text_sequence_to_sequence_modeling_model.Model + backbone_dtype: str = "bfloat16" + + def __post_init__(self): + super().__post_init__() + + +@dataclass +class ConfigNLPSeq2SeqEnvironment(ConfigNLPCausalLMEnvironment): + mixed_precision: bool = False + + _model_card_template: str = ( + "text_sequence_to_sequence_modeling_model_card_template.md" + ) + _summary_card_template: str = ( + "text_sequence_to_sequence_modeling_experiment_summary_card_template.md" + ) + + def __post_init__(self): + super().__post_init__() + + +@dataclass +class ConfigProblemBase(DefaultConfigProblemBase): + output_directory: str = f"output/{os.path.basename(__file__).split('.')[0]}" + experiment_name: str = field(default_factory=generate_experiment_name) + _parent_experiment: str = "" + llm_backbone: str = "t5-small" + + dataset: ConfigNLPSeq2SeqDataset = field(default_factory=ConfigNLPSeq2SeqDataset) + tokenizer: ConfigNLPCausalLMTokenizer = field( + default_factory=ConfigNLPCausalLMTokenizer + ) + architecture: ConfigNLPSeq2SeqArchitecture = field( + default_factory=ConfigNLPSeq2SeqArchitecture + ) + training: ConfigNLPCausalLMTraining = field( + default_factory=ConfigNLPCausalLMTraining + ) + augmentation: ConfigNLPAugmentation = field(default_factory=ConfigNLPAugmentation) + prediction: ConfigNLPCausalLMPrediction = field( + default_factory=ConfigNLPCausalLMPrediction + ) + environment: ConfigNLPSeq2SeqEnvironment = field( + default_factory=ConfigNLPSeq2SeqEnvironment + ) + logging: ConfigNLPCausalLMLogging = field(default_factory=ConfigNLPCausalLMLogging) + + def __post_init__(self): + super().__post_init__() + + self._visibility["output_directory"] = -1 + + self._possible_values["llm_backbone"] = possible_values.String( + values=( + "t5-small", + "t5-base", + "t5-large", + "google/flan-t5-small", + "google/flan-t5-base", + "google/flan-t5-large", + "google/flan-ul2", + ), + allow_custom=True, + ) + + def check(self) -> Dict[str, List]: + errors: Dict[str, List] = {"title": [], "message": []} + if self.prediction.temperature > 0 and not self.prediction.do_sample: + errors["title"] += ["Do sample needs to be enabled for temperature > 0"] + errors["message"] += [ + "Please enable do sample if you want to use temperature > 0." + ] + return errors diff --git a/llm_studio/src/__init__.py b/llm_studio/src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llm_studio/src/augmentations/__init__.py b/llm_studio/src/augmentations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llm_studio/src/augmentations/nlp_aug.py b/llm_studio/src/augmentations/nlp_aug.py new file mode 100644 index 0000000000000000000000000000000000000000..f62199fff9577fad55c50cb0fc2c7fb4b54c6ea1 --- /dev/null +++ b/llm_studio/src/augmentations/nlp_aug.py @@ -0,0 +1,54 @@ +import logging +from abc import abstractmethod +from typing import Any, Dict + +import torch +from torch import nn + +logger = logging.getLogger(__name__) + + +class BaseNLPAug(nn.Module): + """Base class for NLP augmentation""" + + def __init__(self, cfg: Any): + """ + Args: + cfg: config with all the hyperparameters + """ + + super().__init__() + self.cfg = cfg + + @abstractmethod + def forward(self, batch: Dict) -> Dict: + """Augmenting + + Args: + batch: current batch + + Returns: + augmented batch + """ + + if self.cfg.augmentation.token_mask_probability > 0: + input_ids = batch["input_ids"].clone() + # special_mask = ~batch["special_tokens_mask"].clone().bool() + mask = ( + torch.bernoulli( + torch.full( + input_ids.shape, + float(self.cfg.augmentation.token_mask_probability), + ) + ) + .to(input_ids.device) + .bool() + # & special_mask + ).bool() + input_ids[mask] = self.cfg._tokenizer_mask_token_id + batch["input_ids"] = input_ids.clone() + batch["attention_mask"][mask] = 0 + if batch["labels"].shape[1] == batch["input_ids"].shape[1]: + batch["labels"][mask] = -100 + + return batch diff --git a/llm_studio/src/datasets/__init__.py b/llm_studio/src/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llm_studio/src/datasets/conversation_chain_handler.py b/llm_studio/src/datasets/conversation_chain_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..93045669bbd70214348913765fa8747ddf62edfe --- /dev/null +++ b/llm_studio/src/datasets/conversation_chain_handler.py @@ -0,0 +1,226 @@ +import logging +from typing import Dict, List + +import numpy as np + +from llm_studio.src.datasets.text_utils import get_texts +from llm_studio.src.utils.utils import PatchedAttribute + +logger = logging.getLogger(__name__) + + +class ConversationChainHandler: + """ + This class partitions the dataset into chains of conversations. + Each chain is comprised of a list of conversation rounds. + Each round within a conversation is represented as a triplet: + (system, prompt, answer). + + The resulting structure of the chains is conditional on + the DataFrame's structure and configuration: + + - Without a 'parent_id' in the DataFrame, each conversation chain is a single round. + So, for every `i`-th row in the DataFrame, 0 <= `i` < len(df), + the chain would look like: [(system_i, prompt_i, answer_i)] + + - With a 'parent_id' in the DataFrame and + if `cfg.dataset.limit_chained_samples` is set to False, + each chain encapsulates all preceding conversations + for every `i`-th row in the DataFrame, + 0 <= `i` < len(df). + The resultant chain would take shape: + [(system_start_conversation_i, + prompt_start_conversation_i, + answer_start_conversation_i), + ..., + (system_i, prompt_i, answer_i)] + + - With a 'parent_id' in the DataFrame and + if `cfg.dataset.limit_chained_samples` is set to True, + each conversation chain incorporates only full conversations. + The chain hence condenses into: + [(system_start_conversation_i, + prompt_start_conversation_i, + answer_start_conversation_i), + ..., + (system_end_conversation_i, + prompt_end_conversation_i, + answer_end_conversation_i)] + where `i` represents complete conversations only. + """ + + def __init__( + self, + df, + cfg, + ): + # Do not set self.cfg = cfg, as ConversationChainHandler + # will be used with PatchedAttribute context manager. + self.conversation_chain_ids = self.get_conversation_chain_ids(cfg, df) + self.prompts = get_texts(df, cfg, separator="") + self.answers = self.get_answers(df, cfg) + self.systems = self.get_systems(cfg, df) + + def get_conversation_chain_ids(self, cfg, df): + """ + Gets the conversation chain IDs for the given DataFrame. + E.g. if conversation_chain_ids = [[13, 44, 8], ...], + then the first conversation chain consists of + [df.iloc[13], df.iloc[44], df.iloc[8]] + with + - df.iloc[13] denotes the first round of the conversation + - df.iloc[44] denotes the second round of the conversation + - df.iloc[8] denotes the end of the conversation + if limit_chained_samples is True, df.iloc[13] will have no parent_id, + i.e. it is the start of the conversation. + """ + if ( + cfg.dataset.parent_id_column in ["None", None] + # Handle case where train Dataframe has conversation chains, + # but val Dataframe does not + or cfg.dataset.parent_id_column not in df.columns + ): + # no parent id column, so each triplet (system_i, prompt_i, answer_i) + # is a conversation chain + return [[idx] for idx in range(len(df))] + + assert "id" in df.columns, ( + f"id column required for conversation chaining, " + f"DataFrame only has {df.columns}." + ) + # sample and parent ids can have any dtype, such as str, int, float, etc. + # id column can be int, while parent_id column can be float + # (as some values are NaN) so we cast id to the same dtype + sample_ids = df["id"].astype(df[cfg.dataset.parent_id_column].dtype).tolist() + parent_ids = df[cfg.dataset.parent_id_column].tolist() + # Some datasets may include parent ids that are not in the dataset. + sample_ids_set = set(sample_ids) + parent_ids = [idx if idx in sample_ids_set else "None" for idx in parent_ids] + + id2parent_id = { + idx: parent_id + for idx, parent_id in zip(sample_ids, parent_ids) + if parent_id not in [None, "None"] + and ( + not isinstance(parent_id, float) + or (not np.isnan(parent_id) and not np.isinf(parent_id)) + ) + } + if cfg.dataset.limit_chained_samples: + # end id == id is not a parent id of another conversation id + valid_parent_ids = set(id2parent_id.values()) + conversation_end_ids = [ + idx for idx in sample_ids if idx not in valid_parent_ids + ] + else: + conversation_end_ids = sample_ids + conversation_chain_ids = [ + self.get_conversation_ids(id2parent_id, conversation_end_id) + for conversation_end_id in conversation_end_ids + ] + # map from df["id"] to enumeration index + dataframeid2idx = {id: idx for idx, id in enumerate(sample_ids)} + conversation_chain_ids = [ + [dataframeid2idx[conversation_id] for conversation_id in conversation_ids] + for conversation_ids in conversation_chain_ids + ] + return conversation_chain_ids + + def get_answers(self, df, cfg): + answer_column = cfg.dataset.answer_column + if answer_column in df.columns: + answers = df[answer_column].astype(str).tolist() + else: + answers = ["" for _ in range(len(self.prompts))] + return answers + + def get_systems(self, cfg, df): + if cfg.dataset.system_column != "None": + if cfg.dataset.system_column not in df.columns: + logger.warning( + f"System column {cfg.dataset.system_column} not found." + f"Disabling functionality." + ) + systems = ["" for _ in range(len(self.prompts))] + else: + systems = df[cfg.dataset.system_column].astype(str).tolist() + else: + systems = ["" for _ in range(len(self.prompts))] + return systems + + @staticmethod + def get_conversation_ids(id2parent_id, end_id): + """ + Gets the conversation chain for a given starting conversation ID. + Args: + id2parent_id: A dictionary containing the mapping of IDs + to its previous parent ID. + end_id: The ID of the end of the conversation in the chain. + Returns: + A list of conversation IDs representing the conversation chain. + The chain is ordered from the first conversation id to end_id in the chain. + """ + # prevent infinite loops in case + # of circular parent chains (dataframe issue) + loop_counter = 0 + + conversation_chain_ids = [end_id] + parent_id = end_id + while parent_id in id2parent_id: + loop_counter += 1 + + parent_id = id2parent_id[parent_id] + conversation_chain_ids = [parent_id] + conversation_chain_ids + if loop_counter > 1000: + raise ValueError( + f"Parent chain of sample with idx {end_id} " + f"exceeds max loop count of 1000. " + f"Please ensure that parent chain is not circular." + ) + return conversation_chain_ids + + def __len__(self): + return len(self.conversation_chain_ids) + + def __getitem__(self, idx): + """ + Gets a single conversation chain. + The conversation may be: + - a single (system, prompt, answer) round, + if cfg.dataset.parent_id_column == "None" or + there is no parent_id for the conversation + - a conversation potentially starting somewhere in + the middle of the conversation, if the conversation + is chained and limit_chained_samples is set to False + - always a complete conversation, if the conversation is chained + and limit_chained_samples is True + + """ + prompts = [self.prompts[i] for i in self.conversation_chain_ids[idx]] + answers = [self.answers[i] for i in self.conversation_chain_ids[idx]] + systems = [self.systems[i] for i in self.conversation_chain_ids[idx]] + return { + "prompts": prompts, + "answers": answers, + "systems": systems, + } + + def get_conversation_end_ids(self): + """ + Gets the end conversation IDs for each conversation chain. + """ + return [ + conversation_chain[-1] for conversation_chain in self.conversation_chain_ids + ] + + +def get_conversation_chains( + df, cfg, limit_chained_samples=True +) -> List[Dict[str, List[str]]]: + with PatchedAttribute(cfg.dataset, "limit_chained_samples", limit_chained_samples): + conversation_chain_handler = ConversationChainHandler(df, cfg) + conversations = [ + conversation + for conversation in conversation_chain_handler # type: ignore[attr-defined] + ] + return conversations diff --git a/llm_studio/src/datasets/text_causal_classification_ds.py b/llm_studio/src/datasets/text_causal_classification_ds.py new file mode 100644 index 0000000000000000000000000000000000000000..4642143e24e82b5550f5cb0f9dcaf62313ae39bd --- /dev/null +++ b/llm_studio/src/datasets/text_causal_classification_ds.py @@ -0,0 +1,96 @@ +import logging +from typing import Any, Dict + +import numpy as np +import pandas as pd +import torch + +from llm_studio.src.datasets.text_causal_language_modeling_ds import ( + CustomDataset as TextCausalLanguageModelingCustomDataset, +) +from llm_studio.src.utils.exceptions import LLMDataException + +logger = logging.getLogger(__name__) + + +class CustomDataset(TextCausalLanguageModelingCustomDataset): + def __init__(self, df: pd.DataFrame, cfg: Any, mode: str = "train"): + super().__init__(df=df, cfg=cfg, mode=mode) + check_for_non_int_answers(cfg, df) + self.answers_int = df[cfg.dataset.answer_column].astype(int).values.tolist() + + if 1 < cfg.dataset.num_classes <= max(self.answers_int): + raise LLMDataException( + "Number of classes is smaller than max label " + f"{max(self.answers_int)}. Please increase the setting accordingly." + ) + elif cfg.dataset.num_classes == 1 and max(self.answers_int) > 1: + raise LLMDataException( + "For binary classification, max label should be 1 but is " + f"{max(self.answers_int)}." + ) + if min(self.answers_int) < 0: + raise LLMDataException( + "Labels should be non-negative but min label is " + f"{min(self.answers_int)}." + ) + if ( + min(self.answers_int) != 0 + or max(self.answers_int) != len(set(self.answers_int)) - 1 + ): + logger.warning( + "Labels should start at 0 and be continuous but are " + f"{sorted(set(self.answers_int))}." + ) + + if cfg.dataset.parent_id_column != "None": + raise LLMDataException( + "Parent ID column is not supported for classification datasets." + ) + + def __getitem__(self, idx: int) -> Dict: + sample = super().__getitem__(idx) + sample["class_label"] = self.answers_int[idx] + return sample + + def postprocess_output(self, cfg, df: pd.DataFrame, output: Dict) -> Dict: + output["logits"] = output["logits"].float() + if cfg.dataset.num_classes == 1: + preds = output["logits"] + preds = np.array((preds > 0.0)).astype(int).astype(str).reshape(-1) + else: + preds = output["logits"] + preds = ( + np.array(torch.argmax(preds, dim=1)) # type: ignore[arg-type] + .astype(str) + .reshape(-1) + ) + output["predicted_text"] = preds + return super().postprocess_output(cfg, df, output) + + def clean_output(self, output, cfg): + return output + + @classmethod + def sanity_check(cls, df: pd.DataFrame, cfg: Any, mode: str = "train"): + # TODO: Dataset import in UI is currently using text_causal_language_modeling_ds + check_for_non_int_answers(cfg, df) + + +def check_for_non_int_answers(cfg, df): + answers_non_int = [ + x for x in df[cfg.dataset.answer_column].values if not is_castable_to_int(x) + ] + if len(answers_non_int) > 0: + raise LLMDataException( + f"Column {cfg.dataset.answer_column} contains non int items. " + f"Sample values: {answers_non_int[:5]}." + ) + + +def is_castable_to_int(s): + try: + int(s) + return True + except ValueError: + return False diff --git a/llm_studio/src/datasets/text_causal_language_modeling_ds.py b/llm_studio/src/datasets/text_causal_language_modeling_ds.py new file mode 100644 index 0000000000000000000000000000000000000000..5993a587669ad118952aa225638be6e535bd16d0 --- /dev/null +++ b/llm_studio/src/datasets/text_causal_language_modeling_ds.py @@ -0,0 +1,518 @@ +import codecs +import collections.abc +import logging +from typing import Any, Dict, List, Tuple, Union + +import numpy as np +import pandas as pd +import torch +from torch.utils.data import Dataset + +from llm_studio.src.datasets.conversation_chain_handler import ConversationChainHandler +from llm_studio.src.datasets.text_utils import get_tokenizer + +logger = logging.getLogger(__name__) + + +class CustomDataset(Dataset): + """Dataset for Causal Language modeling.""" + + def __init__(self, df: pd.DataFrame, cfg: Any, mode: str = "train"): + """ + Args: + df: input DataFrame + cfg: config with all the hyperparameters + mode: dataset mode. One of {"train", "validation"} + """ + self.cfg = cfg + self.mode = mode + self.df = df.copy() + + self.tokenizer = get_tokenizer(self.cfg) + self.conversation_chain_handler = ConversationChainHandler(self.df, cfg) + + def __len__(self) -> int: + return len(self.conversation_chain_handler) + + def __getitem__(self, idx: int) -> Dict: + """Reads a single text observation.""" + input_text_dict = self.conversation_chain_handler[idx] + input_text_dict["systems"] = [ + self.parse_system(self.cfg, system) for system in input_text_dict["systems"] + ] + input_text_dict["prompts"] = [ + self.parse_prompt(self.cfg, prompt) for prompt in input_text_dict["prompts"] + ] + + sample = dict() + system_encoding, prompt_encodings, answer_encodings = self.get_encodings( + input_text_dict=input_text_dict + ) + + input_ids = torch.cat( + [ + torch.cat([prompt_encoding, answer_encoding]) + for prompt_encoding, answer_encoding in zip( + prompt_encodings, answer_encodings + ) + ] + ) + + sample.update(self.get_labels(prompt_encodings, answer_encodings)) + sample.update( + self.pad_tokens( + input_ids, + attention_mask=torch.ones_like(input_ids), + max_length=self.cfg.tokenizer.max_length, + pad_token_id=self.tokenizer.pad_token_id, + ) + ) + + # get answer encodings + sample.update( + self.pad_tokens( + answer_encodings[-1], + attention_mask=torch.ones_like(answer_encodings[-1]), + max_length=self.cfg.tokenizer.max_length_answer, + pad_token_id=self.tokenizer.pad_token_id, + direction="right", + prefix="answer_", + ) + ) + + # Remove last answer from encoding to create the prompt for inference + answer_encodings[-1] = torch.empty(0) + prompt_input_ids = torch.cat( + [ + torch.cat([prompt_encoding, answer_encoding]) + for prompt_encoding, answer_encoding in zip( + prompt_encodings, answer_encodings + ) + ] + ) + sample.update( + self.pad_tokens( + prompt_input_ids, + attention_mask=torch.ones_like(prompt_input_ids), + max_length=self.cfg.tokenizer.max_length, + pad_token_id=self.tokenizer.pad_token_id, + prefix="prompt_", + ) + ) + + # make sure system encoding is always prepended if max_length exceeded + if sample["input_ids"][0] != self.tokenizer.pad_token_id: + sample["input_ids"][: len(system_encoding)] = system_encoding + if self.cfg.dataset.mask_prompt_labels and "labels" in sample.keys(): + sample["labels"][: len(system_encoding)] = -100 + if sample["prompt_input_ids"][0] != self.tokenizer.pad_token_id: + sample["prompt_input_ids"][: len(system_encoding)] = system_encoding + + return sample + + @staticmethod + def parse_prompt(cfg: Any, prompt: str): + prompt = ( + f"{codecs.decode(cfg.dataset.text_prompt_start, 'unicode_escape')}{prompt}" + ) + if cfg.dataset.add_eos_token_to_prompt: + prompt += cfg._tokenizer_eos_token + prompt = ( + f"{prompt}" + f"{codecs.decode(cfg.dataset.text_answer_separator, 'unicode_escape')}" + ) + return prompt + + @staticmethod + def parse_system(cfg: Any, system: str): + # no system tokens if empty + if system == "": + return system + system = ( + f"{codecs.decode(cfg.dataset.text_system_start, 'unicode_escape')}{system}" + ) + if cfg.dataset.add_eos_token_to_system: + system += cfg._tokenizer_eos_token + return system + + @staticmethod + def batch_to_device( + batch: Union[Dict, List, torch.Tensor], device: str + ) -> Union[Dict, List, torch.Tensor, str]: + """Function to send the batch to the device specified + + Args: + batch: input batch + device: device to send the data to + Returns: + batch with the elements on the device specified + """ + if isinstance(batch, torch.Tensor): + return batch.to(device) + elif isinstance(batch, (list, tuple)) and all( + isinstance(item, str) for item in batch + ): + # Do not move list of strings to device + return batch + elif isinstance(batch, collections.abc.Mapping): + return { + key: CustomDataset.batch_to_device(value, device) + for key, value in batch.items() + } + elif isinstance(batch, collections.abc.Sequence): + return [CustomDataset.batch_to_device(value, device) for value in batch] + else: + raise ValueError(f"Can not move {type(batch)} to device.") + + @staticmethod + def preprocess_dataframe(df: pd.DataFrame, cfg: Any, mode: str) -> pd.DataFrame: + """ + Preprocesses the input dataframe + + Args: + df: the full training dataframe + cfg: config + mode: the mode. One of {"train", "validation"} + Returns: + the processed dataframe + """ + + def personalize(text): + text = text.replace("Open Assistant", cfg.dataset.chatbot_name) + text = text.replace("Open-Assistant", cfg.dataset.chatbot_name) + text = text.replace("open-assistant", cfg.dataset.chatbot_name) + text = text.replace("OpenAssistant", cfg.dataset.chatbot_name) + text = text.replace("open assistant", cfg.dataset.chatbot_name) + text = text.replace("Open Assistand", cfg.dataset.chatbot_name) + text = text.replace("Open Assitant", cfg.dataset.chatbot_name) + text = text.replace("Open Assistent", cfg.dataset.chatbot_name) + text = text.replace("Open Assisstant", cfg.dataset.chatbot_name) + text = text.replace("Open Assitent", cfg.dataset.chatbot_name) + text = text.replace("Open Assitiant", cfg.dataset.chatbot_name) + text = text.replace("Open Assistiant", cfg.dataset.chatbot_name) + text = text.replace("Open Assitan ", cfg.dataset.chatbot_name + " ") + text = text.replace("Open Assistan ", cfg.dataset.chatbot_name + " ") + text = text.replace("Open Asistant", cfg.dataset.chatbot_name) + text = text.replace("Open Assiant", cfg.dataset.chatbot_name) + text = text.replace("Assistant", cfg.dataset.chatbot_name) + text = text.replace("LAION AI", cfg.dataset.chatbot_author) + text = text.replace("LAION-AI", cfg.dataset.chatbot_author) + text = text.replace("LAION,", cfg.dataset.chatbot_author + ",") + text = text.replace("LAION.ai", cfg.dataset.chatbot_author) + text = text.replace("LAION.", cfg.dataset.chatbot_author + ".") + text = text.replace("LAION", cfg.dataset.chatbot_author) + return text + + if cfg.dataset.personalize: + for prompt_col in cfg.dataset.prompt_column: + df[prompt_col] = df[prompt_col].apply(personalize) + df[cfg.dataset.answer_column] = df[cfg.dataset.answer_column].apply( + personalize + ) + + return df + + def get_train_collate_fn(self): + """ + Returns train batch collate function for the PyTorch Dataloader. + By default returns None that uses the default PyTorch collate + """ + + return None + + def get_validation_collate_fn(self): + """ + Return validation batch collate function for the PyTorch Dataloader. + By default returns None that uses the default PyTorch collate + """ + + return None + + def postprocess_batch_predictions(self, output: Dict) -> Dict: + if "predicted_answer_ids" in output.keys(): + predicted_text = [ + self.tokenizer.decode(ids, skip_special_tokens=True).strip() + for ids in output["predicted_answer_ids"] + ] + + output["predicted_text"] = np.array(predicted_text) + del output["predicted_answer_ids"] + return output + + @staticmethod + def clean_output( + output: Dict, + cfg: Any, + ): + output["predicted_text"] = output["predicted_text"].tolist() + for j in range(len(output["predicted_text"])): + curr_text = output["predicted_text"][j].strip() + for stop_token in cfg.tokenizer._stop_words: + if curr_text.find(stop_token) != -1: + curr_text = curr_text[: curr_text.find(stop_token)] + output["predicted_text"][j] = curr_text.strip() + + return output + + def postprocess_output(self, cfg, df: pd.DataFrame, output: Dict) -> Dict: + if not cfg.prediction.metric == "Perplexity": + output = self.clean_output(output, cfg) + + output["target_text"] = self.conversation_chain_handler.answers + + metric_func, _, _ = cfg.prediction.metric_class.get(cfg.prediction.metric) + + if "GPT" in cfg.prediction.metric: + metrics, explanations = metric_func( + cfg, + output, + df, + raw_results=True, + ) + output["explanations"] = explanations + else: + metrics = metric_func( + cfg, + output, + df, + ) + output["metrics"] = metrics + + return output + + def format_output( + self, cfg, df: pd.DataFrame, output: Dict + ) -> Tuple[Dict, pd.DataFrame]: + output = { + key: value + for key, value in output.items() + if key not in ["loss", "target", "losses"] + } + output.pop("target_text", None) + + # in case limit_chained_samples is True, only last answer is predicted + end_conversation_ids = ( + self.conversation_chain_handler.get_conversation_end_ids() + ) + + if "predicted_text" in output.keys(): + output["predicted_text"] = np.array(output["predicted_text"]) + + if "logits" in output.keys(): + output["logits"] = np.array(output["logits"].float()) + + if isinstance(cfg.dataset.prompt_column, tuple): + for col in cfg.dataset.prompt_column: + output[col] = df.loc[end_conversation_ids, col].values + else: + output[cfg.dataset.prompt_column] = df.loc[ + end_conversation_ids, cfg.dataset.prompt_column + ].values + + if "predicted_text" in output.keys(): + df[f"pred_{cfg.dataset.answer_column}"] = ( + "NO ANSWER GENERATED. " + "ONLY LAST ANSWER OF A CONVERSATION IS PREDICTED." + ) + df.loc[end_conversation_ids, f"pred_{cfg.dataset.answer_column}"] = output[ + "predicted_text" + ] + return output, df + + @classmethod + def sanity_check(cls, df: pd.DataFrame, cfg: Any, mode: str = "train"): + """ + Quick check whether Dataframe and configurations are correctly set. + """ + if ( + cfg.dataset.parent_id_column is not None + and cfg.dataset.parent_id_column in df.columns + and "id" in df.columns + ): + assert ( + df[cfg.dataset.parent_id_column] != df["id"] + ).all(), "Parent id column is the same as id column for some rows" + assert (df[cfg.dataset.parent_id_column].fillna("") == "").sum() > 0, ( + "Did not find any conversation start. " + "Please ensure that some parent ids are empty." + ) + + assert cfg.dataset.answer_column in df.columns, ( + f"Answer column {cfg.dataset.answer_column} not found in the " + f"{mode} DataFrame." + ) + assert df.shape[0] == df[[cfg.dataset.answer_column]].dropna().shape[0], ( + f"The {mode} DataFrame" + f" column {cfg.dataset.answer_column}" + " contains missing values." + ) + if cfg.dataset.parent_id_column != "None": + assert ( + "id" in df.columns + ), "When using parent column, the dataframe requires an 'id' column. " + + def get_labels(self, prompt_encodings, answer_encodings): + labels = torch.cat( + [ + torch.cat([prompt_encoding, answer_encoding]) + for prompt_encoding, answer_encoding in zip( + prompt_encodings, answer_encodings + ) + ] + ).clone() + + if self.cfg.dataset.mask_prompt_labels: + prompt_mask = torch.cat( + [ + torch.cat( + [ + torch.ones_like(prompt_encoding), + torch.zeros_like(answer_encoding), + ] + ) + for prompt_encoding, answer_encoding in zip( + prompt_encodings, answer_encodings + ) + ] + ).to(torch.bool) + labels.masked_fill_(prompt_mask, -100) + if self.cfg.dataset.add_eos_token_to_answer: + # eos_token may be equal to pad_token. Add the label back manually. + labels[-1] = self.tokenizer.eos_token_id + if self.cfg.tokenizer.max_length < len(labels): + labels = labels[-self.cfg.tokenizer.max_length :] + + sample = dict(labels=torch.full((self.cfg.tokenizer.max_length,), -100)) + sample["labels"][-len(labels) :] = labels + return sample + + def get_encodings(self, input_text_dict: Dict[str, List[str]]): + """ + Get encodings for a single conversation history. + Args: + input_text_dict: A dictionary containing the input text for a single sample. + Contains the keys "systems", "prompts", "answers". + System may be an empty string. + """ + encodings = [ + self._get_sample_encoding(system, prompt, answer) + for idx, (system, prompt, answer) in enumerate( + zip( + input_text_dict["systems"], + input_text_dict["prompts"], + input_text_dict["answers"], + ) + ) + ] + + if self.mode == "train": + encodings = self.augment_data(encodings) + + system_encoding = encodings[0][0] + prompt_encodings = [encoding[1] for encoding in encodings] + answer_encodings = [encoding[2] for encoding in encodings] + # concatenate system encoding with root prompt encoding + prompt_encodings[0] = torch.cat([system_encoding, prompt_encodings[0]]) + return ( + system_encoding, + prompt_encodings, + answer_encodings, + ) + + def augment_data(self, encodings): + parent_encodings = encodings[:-1] + # randomly skip parent + parent_encodings = [ + encoding + for idx, encoding in enumerate(parent_encodings) + if np.random.random() > self.cfg.augmentation.skip_parent_probability + ] + # randomly replace parent with another parent + if np.random.random() < self.cfg.augmentation.random_parent_probability: + idx = np.random.randint(len(self.conversation_chain_handler.prompts)) + parent_encodings = [ + self._get_sample_encoding( + self.parse_system( + self.cfg, self.conversation_chain_handler.systems[idx] + ), + self.parse_prompt( + self.cfg, self.conversation_chain_handler.prompts[idx] + ), + self.conversation_chain_handler.answers[idx], + ) + ] + parent_encodings[1:] + encodings = parent_encodings + [encodings[-1]] + return encodings + + def _get_sample_encoding(self, system: str, prompt: str, answer: str) -> List: + if len(system) > 0: + system_encoding = self.encode( + self.tokenizer, system, self.cfg.tokenizer.max_length_prompt, "right" + )["input_ids"] + else: + system_encoding = torch.empty(0) + prompt_encoding = self.encode( + self.tokenizer, prompt, self.cfg.tokenizer.max_length_prompt, "left" + )["input_ids"] + max_length_answer = self.cfg.tokenizer.max_length_answer - int( + self.cfg.dataset.add_eos_token_to_answer + ) + answer_encoding = self.encode( + self.tokenizer, answer, max_length_answer, "right" + )["input_ids"] + if self.cfg.dataset.add_eos_token_to_answer: + answer_encoding = torch.cat( + [ + answer_encoding, + torch.Tensor([self.tokenizer.eos_token_id]), + ], + dim=0, + ) + + return [system_encoding, prompt_encoding, answer_encoding] + + @staticmethod + def pad_tokens( + input_ids, + attention_mask, + max_length, + pad_token_id, + direction="left", + prefix="", + ): + sample = {} + + if max_length < len(input_ids): + input_ids = input_ids[-max_length:] + attention_mask = attention_mask[-max_length:] + + if len(input_ids) > 0: + if direction == "left": + sample[f"{prefix}input_ids"] = torch.full((max_length,), pad_token_id) + sample[f"{prefix}input_ids"][-len(input_ids) :] = input_ids + sample[f"{prefix}attention_mask"] = torch.zeros(max_length) + sample[f"{prefix}attention_mask"][-len(input_ids) :] = attention_mask + else: + sample[f"{prefix}input_ids"] = torch.full((max_length,), pad_token_id) + sample[f"{prefix}input_ids"][: len(input_ids)] = input_ids + sample[f"{prefix}attention_mask"] = torch.zeros(max_length) + sample[f"{prefix}attention_mask"][: len(input_ids)] = attention_mask + else: + # Pad everything if empty (continued pretraining) + sample[f"{prefix}input_ids"] = torch.full((max_length,), pad_token_id) + sample[f"{prefix}attention_mask"] = torch.zeros(max_length) + + return sample + + @staticmethod + def encode(tokenizer, text: str, max_length: int, truncation_side: str) -> Dict: + encodings = tokenizer(text, return_tensors="pt", add_special_tokens=False) + encodings["input_ids"] = encodings["input_ids"][0] + encodings["attention_mask"] = encodings["attention_mask"][0] + if truncation_side == "right": + encodings["input_ids"] = encodings["input_ids"][:max_length] + encodings["attention_mask"] = encodings["attention_mask"][:max_length] + else: + encodings["input_ids"] = encodings["input_ids"][-max_length:] + encodings["attention_mask"] = encodings["attention_mask"][-max_length:] + return encodings diff --git a/llm_studio/src/datasets/text_dpo_modeling_ds.py b/llm_studio/src/datasets/text_dpo_modeling_ds.py new file mode 100644 index 0000000000000000000000000000000000000000..7c58b110504f5aacc7f5225a06b19ddaa7231fc3 --- /dev/null +++ b/llm_studio/src/datasets/text_dpo_modeling_ds.py @@ -0,0 +1,117 @@ +import logging +from typing import Any, Dict + +import pandas as pd +import torch + +import llm_studio.src.datasets.text_causal_language_modeling_ds as text_causal_language_modeling_ds # noqa: [F401] +from llm_studio.src.datasets.conversation_chain_handler import ConversationChainHandler +from llm_studio.src.utils.utils import PatchedAttribute + +logger = logging.getLogger(__name__) + + +class CustomDataset(text_causal_language_modeling_ds.CustomDataset): + """ + Dataset for DPO optimization. + The data is assumed to be in the same format as for causal language modeling, + but an additional column with rejected answers is required. + For chained conversations, rejected answers are equal normal answers up to the + last answer. THe last answers are then different. + """ + + def __init__(self, df: pd.DataFrame, cfg: Any, mode: str = "train"): + assert ( + cfg.dataset.limit_chained_samples + ), "Need to enable limit_chained_samples for dpo training" + super().__init__(df=df, cfg=cfg, mode=mode) + + with PatchedAttribute( + cfg.dataset, "answer_column", cfg.dataset.rejected_answer_column + ): + if cfg.dataset.rejected_prompt_column != "None": + with PatchedAttribute( + cfg.dataset, "prompt_column", cfg.dataset.rejected_prompt_column + ): + self.conversation_chain_handler_rejected = ConversationChainHandler( + self.df, cfg + ) + else: + self.conversation_chain_handler_rejected = ConversationChainHandler( + self.df, cfg + ) + + def __getitem__(self, idx: int) -> Dict: + """Reads a single text observation.""" + chosen_sample = super().__getitem__(idx) + keys = ["input_ids", "attention_mask", "token_type_ids", "labels"] + prompt_keys = [ + "prompt_input_ids", + "prompt_attention_mask", + "prompt_token_type_ids", + ] + prompt_sample = {k: v for k, v in chosen_sample.items() if k in prompt_keys} + chosen_sample = { + f"chosen_{k}": v for k, v in chosen_sample.items() if k in keys + } + + with PatchedAttribute( + self, "conversation_chain_handler", self.conversation_chain_handler_rejected + ): + rejected_sample = { + f"rejected_{k}": v + for k, v in super().__getitem__(idx).items() + if k in keys + } + + sample = {**chosen_sample, **rejected_sample, **prompt_sample} + return sample + + def get_labels(self, prompt_encodings, answer_encodings): + """ + Mask all but the last answer. + """ + labels = torch.cat( + [ + torch.cat( + [ + torch.full_like( + prompt_encoding, + fill_value=-100, + ), + torch.full_like( + answer_encoding, + fill_value=-100, + ), + ] + ) + for prompt_encoding, answer_encoding in zip( + prompt_encodings, answer_encodings + ) + ] + ).clone() + + if len(answer_encodings[-1]): + # empty answers would create a RuntimeError + labels[-len(answer_encodings[-1]) :] = answer_encodings[-1] + + if self.cfg.dataset.add_eos_token_to_answer: + # eos_token may be equal to pad_token. Add the label back manually. + labels[-1] = self.tokenizer.eos_token_id + if self.cfg.tokenizer.max_length < len(labels): + labels = labels[-self.cfg.tokenizer.max_length :] + + sample = dict(labels=torch.full((self.cfg.tokenizer.max_length,), -100)) + sample["labels"][-len(labels) :] = labels + return sample + + @classmethod + def sanity_check(cls, df: pd.DataFrame, cfg: Any, mode: str = "train"): + """ + Quick check whether Dataframe and configurations are correctly set. + """ + super().sanity_check(df=df, cfg=cfg, mode=mode) + assert cfg.dataset.rejected_answer_column in df.columns, ( + f"Answer column {cfg.dataset.rejected_answer_column} not found in the " + f"{mode} DataFrame." + ) diff --git a/llm_studio/src/datasets/text_rlhf_modeling_ds.py b/llm_studio/src/datasets/text_rlhf_modeling_ds.py new file mode 100644 index 0000000000000000000000000000000000000000..80da0e3857e3bcb1b68a56cf15dda913b48b1bc9 --- /dev/null +++ b/llm_studio/src/datasets/text_rlhf_modeling_ds.py @@ -0,0 +1,77 @@ +import logging +from typing import Any, Dict, List + +import numpy as np +import pandas as pd +import torch + +from llm_studio.src.datasets.text_causal_language_modeling_ds import ( + CustomDataset as CausalLMCustomDataset, +) +from llm_studio.src.datasets.text_utils import TEXT_SEPARATOR + +logger = logging.getLogger(__name__) + + +class CustomDataset(CausalLMCustomDataset): + def __init__(self, df: pd.DataFrame, cfg: Any, mode: str = "train"): + assert ( + cfg.dataset.system_column == "None" + ), "RLHF is not compatible with system column." + assert ( + cfg.dataset.limit_chained_samples is False + ), "RLHF is not compatible with limit_chained_samples." + assert ( + cfg.dataset.mask_prompt_labels is True + ), "RLHF is not compatible with mask_prompt_labels." + super().__init__(df, cfg, mode) + + def __getitem__(self, idx: int) -> Dict: + """Reads a single text observation.""" + sample = super().__getitem__(idx) + sample["reward_model_prompt_text"] = TEXT_SEPARATOR.join( + self.get_chained_prompt_text_list(idx) + ) + return sample + + def get_labels(self, prompt_encodings, answer_encodings): + if self.mode == "train": # no labels required for RLHF during training + return dict() + else: + return super().get_labels(prompt_encodings, answer_encodings) + + def get_encodings(self, input_text_dict): + system_encoding, prompt_encodings, answer_encodings = super().get_encodings( + input_text_dict + ) + # remove last ground truth answer, + # as RLHF will generate the answer from the prompt + answer_encodings[-1] = torch.empty(0) + return system_encoding, prompt_encodings, answer_encodings + + def postprocess_batch_predictions(self, output: Dict) -> Dict: + if "predicted_answer_ids" in output.keys(): + predicted_text = [ + self.tokenizer.decode(ids, skip_special_tokens=True).strip() + for ids in output["predicted_answer_ids"] + ] + + output["predicted_text"] = np.array(predicted_text) + output["predicted_answer_ids"] = output["predicted_answer_ids"].detach() + return output + + def augment_data(self, encodings): + return encodings + + def get_chained_prompt_text_list(self, idx: int) -> List[str]: + text_dict = self.conversation_chain_handler[idx] + chat_history = "".join( + [ + prompt + TEXT_SEPARATOR + answer + TEXT_SEPARATOR + for prompt, answer in zip( + text_dict["prompts"][:-1], text_dict["answers"][:-1] + ) + ] + ) + prompt_text = text_dict["systems"][0] + chat_history + text_dict["prompts"][-1] + return prompt_text.split(TEXT_SEPARATOR) diff --git a/llm_studio/src/datasets/text_utils.py b/llm_studio/src/datasets/text_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..99379f0ce4de56a70c090a2f87d4c723ad7c599a --- /dev/null +++ b/llm_studio/src/datasets/text_utils.py @@ -0,0 +1,131 @@ +import codecs +import logging +import os +from typing import Any + +from transformers import AutoTokenizer + +logger = logging.getLogger(__name__) + + +TEXT_SEPARATOR = "" + + +def get_texts(df, cfg, separator=None): + if isinstance(cfg.dataset.prompt_column, str): + # single column dataset + texts = df[cfg.dataset.prompt_column].astype(str) + texts = texts.values + else: + # multi-column dataset - prepend (if necessary) and join + columns = list(cfg.dataset.prompt_column) + + for column in columns: + df[column] = df[column].astype(str) + + if separator is None: + separator = getattr(cfg, "_tokenizer_sep_token", TEXT_SEPARATOR) + + join_str = f" {separator} " + texts = df[columns].astype(str) + texts = texts.apply(lambda x: join_str.join(x), axis=1).values + + return texts + + +def get_tokenizer(cfg: Any): + kwargs = dict( + revision=cfg.environment.huggingface_branch, + use_fast=cfg.tokenizer.use_fast, + trust_remote_code=cfg.environment.trust_remote_code, + token=os.getenv("HUGGINGFACE_TOKEN"), + ) + + try: + tokenizer = AutoTokenizer.from_pretrained(cfg.llm_backbone, **kwargs) + except TypeError as e: + error_message = str(e) + if "token" in error_message: + # TypeError: RWForCausalLM.__init__() got + # an unexpected keyword argument 'token' + kwargs.pop("token") + tokenizer = AutoTokenizer.from_pretrained(cfg.llm_backbone, **kwargs) + elif "not a string" in error_message: + # https://github.com/h2oai/h2o-llmstudio/issues/623 + kwargs.pop("add_prefix_space") + tokenizer = AutoTokenizer.from_pretrained(cfg.llm_backbone, **kwargs) + else: + raise e + + tokenizer.padding_side = getattr( + cfg.tokenizer, "_padding_side", tokenizer.padding_side + ) + + # if the eos token is an empty string, we assign it to a token + if tokenizer.eos_token == "": + tokenizer.add_special_tokens({"eos_token": ""}) + tokenizer.eos_token = "" + + if tokenizer.pad_token is None: + if tokenizer.unk_token is not None: + tokenizer.pad_token = tokenizer.unk_token + else: + tokenizer.pad_token = tokenizer.eos_token + if tokenizer.bos_token is None: + tokenizer.bos_token = tokenizer.eos_token + if tokenizer.cls_token is None: + tokenizer.cls_token = tokenizer.eos_token + if tokenizer.sep_token is None: + tokenizer.sep_token = tokenizer.eos_token + + cfg._tokenizer_sep_token = tokenizer.sep_token + + if tokenizer.unk_token_id is not None: + cfg._tokenizer_mask_token_id = tokenizer.unk_token_id + elif tokenizer.mask_token_id is not None: + cfg._tokenizer_mask_token_id = tokenizer.mask_token_id + elif tokenizer.pad_token_id is not None: + cfg._tokenizer_mask_token_id = tokenizer.pad_token_id + else: + # setting the mask token id to the last token in the vocabulary + # this usually is a safe choice and mostly refers to eos token + cfg._tokenizer_mask_token_id = len(tokenizer) - 1 + + cfg._tokenizer_eos_token = tokenizer.eos_token + + if hasattr(cfg.prediction, "stop_tokens"): + set_stop_token_ids(cfg, tokenizer) + cfg.tokenizer._vocab_length = len(tokenizer) + + return tokenizer + + +def set_stop_token_ids(cfg, tokenizer): + cfg.tokenizer._stop_words = list( + filter(None, cfg.prediction.stop_tokens.split(",")) + ) + for stop_word in [ + cfg.dataset.text_system_start, + cfg.dataset.text_prompt_start, + cfg.dataset.text_answer_separator, + ]: + stop_word = codecs.decode(stop_word, "unicode_escape").strip() + if ( + stop_word != "" + and cfg.tokenizer.add_prompt_answer_tokens + and (stop_word not in tokenizer.get_vocab()) + ): + tokenizer.add_tokens([stop_word]) + cfg.tokenizer._stop_words.append(stop_word) + cfg.tokenizer._stop_words = [ + stop_word for stop_word in cfg.tokenizer._stop_words if stop_word != "" + ] + cfg.tokenizer._stop_words_ids = [] + for stop_word in set(cfg.tokenizer._stop_words): + cfg.tokenizer._stop_words_ids.append( + tokenizer(stop_word, return_tensors="pt", add_special_tokens=False)[ + "input_ids" + ][0] + ) + if cfg.environment._local_rank == 0: + logger.info(f"Stop token ids: {cfg.tokenizer._stop_words_ids}") diff --git a/llm_studio/src/h2oai_pipeline_template.py b/llm_studio/src/h2oai_pipeline_template.py new file mode 100644 index 0000000000000000000000000000000000000000..cf6e7d274924675e8da5d81a94ffb8c95dc4267d --- /dev/null +++ b/llm_studio/src/h2oai_pipeline_template.py @@ -0,0 +1,42 @@ +from transformers import TextGenerationPipeline +from transformers.pipelines.text_generation import ReturnType + +STYLE = "{{text_prompt_start}}{instruction}{{end_of_sentence}}{{text_answer_separator}}" + + +class H2OTextGenerationPipeline(TextGenerationPipeline): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.prompt = STYLE + + def preprocess( + self, prompt_text, prefix="", handle_long_generation=None, **generate_kwargs + ): + prompt_text = self.prompt.format(instruction=prompt_text) + return super().preprocess( + prompt_text, + prefix=prefix, + handle_long_generation=handle_long_generation, + **generate_kwargs, + ) + + def postprocess( + self, + model_outputs, + return_type=ReturnType.FULL_TEXT, + clean_up_tokenization_spaces=True, + ): + records = super().postprocess( + model_outputs, + return_type=return_type, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + ) + for rec in records: + rec["generated_text"] = ( + rec["generated_text"] + .split("{{text_answer_separator}}")[1] + .strip() + .split("{{text_prompt_start}}")[0] + .strip() + ) + return records diff --git a/llm_studio/src/loggers.py b/llm_studio/src/loggers.py new file mode 100644 index 0000000000000000000000000000000000000000..fa84f357b4063bed5042b90ba73b6e262787bf14 --- /dev/null +++ b/llm_studio/src/loggers.py @@ -0,0 +1,184 @@ +import dataclasses +import logging +import os +from typing import Any, Dict, List, Optional + +import numpy as np +from sqlitedict import SqliteDict + +__all__ = ["Loggers"] + +from llm_studio.src.utils.plot_utils import PLOT_ENCODINGS + +logger = logging.getLogger(__name__) + + +def get_cfg(cfg: Any) -> Dict: + """Returns simplified config elements + + Args: + cfg: configuration + + Returns: + Dict of config elements + """ + + items: Dict = {} + type_annotations = cfg.get_annotations() + + cfg_dict = cfg.__dict__ + + cfg_dict = {key: cfg_dict[key] for key in cfg._get_order(warn_if_unset=False)} + + for k, v in cfg_dict.items(): + if k.startswith("_") or cfg._get_visibility(k) < 0: + continue + + if any([x in k for x in ["api"]]): + continue + + if dataclasses.is_dataclass(v): + elements_group = get_cfg(cfg=v) + t = elements_group + items = {**items, **t} + else: + type_annotation = type_annotations[k] + if type_annotation == float: + items[k] = float(v) + else: + items[k] = v + + return items + + +class NeptuneLogger: + def __init__(self, cfg: Any): + import neptune as neptune + from neptune.utils import stringify_unsupported + + if cfg.logging._neptune_debug: + mode = "debug" + else: + mode = "async" + + self.logger = neptune.init_run( + project=cfg.logging.neptune_project, + api_token=os.getenv("NEPTUNE_API_TOKEN", ""), + name=cfg.experiment_name, + mode=mode, + capture_stdout=False, + capture_stderr=False, + source_files=[], + ) + + self.logger["cfg"] = stringify_unsupported(get_cfg(cfg)) + + def log(self, subset: str, name: str, value: Any, step: Optional[int] = None): + name = f"{subset}/{name}" + self.logger[name].append(value, step=step) + + +class LocalLogger: + def __init__(self, cfg: Any): + logging.getLogger("sqlitedict").setLevel(logging.ERROR) + + self.logs = f"{cfg.output_directory}/charts.db" + + params = get_cfg(cfg) + + with SqliteDict(self.logs) as logs: + logs["cfg"] = params + logs.commit() + + def log(self, subset: str, name: str, value: Any, step: Optional[int] = None): + if subset in PLOT_ENCODINGS: + with SqliteDict(self.logs) as logs: + if subset not in logs: + subset_dict = dict() + else: + subset_dict = logs[subset] + subset_dict[name] = value + logs[subset] = subset_dict + logs.commit() + return + + # https://github.com/h2oai/wave/issues/447 + if np.isnan(value): + value = None + else: + value = float(value) + with SqliteDict(self.logs) as logs: + if subset not in logs: + subset_dict = dict() + else: + subset_dict = logs[subset] + if name not in subset_dict: + subset_dict[name] = {"steps": [], "values": []} + + subset_dict[name]["steps"].append(step) + subset_dict[name]["values"].append(value) + + logs[subset] = subset_dict + logs.commit() + + +class DummyLogger: + def __init__(self, cfg: Optional[Any] = None): + return + + def log(self, subset: str, name: str, value: Any, step: Optional[int] = None): + return + + +class MainLogger: + """Main logger""" + + def __init__(self, cfg: Any): + self.loggers = { + "local": LocalLogger(cfg), + "external": Loggers.get(cfg.logging.logger), + } + + try: + self.loggers["external"] = self.loggers["external"](cfg) + except Exception as e: + logger.warning( + f"Error when initializing logger. " + f"Disabling custom logging functionality. " + f"Please ensure logger configuration is correct and " + f"you have a stable Internet connection: {e}" + ) + self.loggers["external"] = DummyLogger(cfg) + + def reset_external(self): + self.loggers["external"] = DummyLogger() + + def log(self, subset: str, name: str, value: str | float, step: float = None): + for k, logger in self.loggers.items(): + if "validation_predictions" in name and k == "external": + continue + if subset == "internal" and not isinstance(logger, LocalLogger): + continue + logger.log(subset=subset, name=name, value=value, step=step) + + +class Loggers: + """Loggers factory.""" + + _loggers = {"None": DummyLogger, "Neptune": NeptuneLogger} + + @classmethod + def names(cls) -> List[str]: + return sorted(cls._loggers.keys()) + + @classmethod + def get(cls, name: str) -> Any: + """Access to Loggers. + + Args: + name: loggers name + Returns: + A class to build the Loggers + """ + + return cls._loggers.get(name, DummyLogger) diff --git a/llm_studio/src/losses/__init__.py b/llm_studio/src/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llm_studio/src/losses/text_causal_classification_modeling_losses.py b/llm_studio/src/losses/text_causal_classification_modeling_losses.py new file mode 100644 index 0000000000000000000000000000000000000000..62ab480206327b04867a932ede4d82fcc5a63f23 --- /dev/null +++ b/llm_studio/src/losses/text_causal_classification_modeling_losses.py @@ -0,0 +1,53 @@ +import logging +from typing import Any, KeysView + +from torch import nn + +__all__ = ["Losses"] + + +logger = logging.getLogger(__name__) + + +class CrossEntropyLoss(nn.Module): + def __init__(self, cfg: Any): + super().__init__() + self.cfg = cfg + self.loss_fn = nn.CrossEntropyLoss() + + def forward(self, logits, labels): + return self.loss_fn(logits, labels.reshape(-1).long()) + + +class BinaryCrossEntropyLoss(nn.Module): + def __init__(self, cfg: Any): + super().__init__() + self.cfg = cfg + self.loss_fn = nn.BCEWithLogitsLoss() + + def forward(self, logits, labels): + return self.loss_fn(logits, labels) + + +class Losses: + """Losses factory.""" + + _losses = { + "CrossEntropyLoss": CrossEntropyLoss, + "BinaryCrossEntropyLoss": BinaryCrossEntropyLoss, + } + + @classmethod + def names(cls) -> KeysView: + return cls._losses.keys() + + @classmethod + def get(cls, name: str) -> Any: + """Access to Losses. + + Args: + name: losses name + Returns: + A class to build the Losses + """ + return cls._losses.get(name, CrossEntropyLoss) diff --git a/llm_studio/src/losses/text_causal_language_modeling_losses.py b/llm_studio/src/losses/text_causal_language_modeling_losses.py new file mode 100644 index 0000000000000000000000000000000000000000..a683f95cc252e7dcab08bd7594befef966b7b2b4 --- /dev/null +++ b/llm_studio/src/losses/text_causal_language_modeling_losses.py @@ -0,0 +1,66 @@ +import logging +from typing import Any, KeysView + +from torch import nn + +__all__ = ["Losses"] + + +logger = logging.getLogger(__name__) + + +class TokenAveragedCrossEntropyLoss(nn.Module): + def __init__(self, cfg: Any): + super().__init__() + self.cfg = cfg + self.loss_fn = nn.CrossEntropyLoss() + + def forward(self, logits, labels): + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + + shift_logits = shift_logits.view(-1, shift_logits.size(-1)) + shift_labels = shift_labels.view(-1) + + return self.loss_fn(shift_logits, shift_labels) + + +class SampleAveragedCrossEntropyLoss(nn.Module): + def __init__(self, cfg: Any): + super().__init__() + self.cfg = cfg + self.loss_fn = nn.CrossEntropyLoss() + + def forward(self, logits, labels): + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + + loss = 0 + for i in range(labels.shape[0]): + loss += self.loss_fn(shift_logits[i], shift_labels[i]) + loss /= labels.shape[0] + return loss + + +class Losses: + """Losses factory.""" + + _losses = { + "TokenAveragedCrossEntropy": TokenAveragedCrossEntropyLoss, + "SampleAveragedCrossEntropy": SampleAveragedCrossEntropyLoss, + } + + @classmethod + def names(cls) -> KeysView: + return cls._losses.keys() + + @classmethod + def get(cls, name: str) -> Any: + """Access to Losses. + + Args: + name: losses name + Returns: + A class to build the Losses + """ + return cls._losses.get(name, TokenAveragedCrossEntropyLoss) diff --git a/llm_studio/src/losses/text_dpo_modeling_losses.py b/llm_studio/src/losses/text_dpo_modeling_losses.py new file mode 100644 index 0000000000000000000000000000000000000000..60efa4c0bc3c0753b49e71ed9c06fb9d59ea5827 --- /dev/null +++ b/llm_studio/src/losses/text_dpo_modeling_losses.py @@ -0,0 +1,169 @@ +""" +Loss Implementation based upon +https://github.com/eric-mitchell/direct-preference-optimization +""" + +import logging +from typing import Any, KeysView + +import torch +import torch.nn.functional as F +from torch import nn + +__all__ = ["Losses"] + +logger = logging.getLogger(__name__) + + +class DPOLoss(nn.Module): + """ + Implements + "Direct Preference Optimization: + Your Language Model is Secretly a Reward Model" + from https://arxiv.org/abs/2305.18290 + """ + + def __init__(self, cfg: Any): + super().__init__() + self.cfg = cfg + + def forward( + self, + policy_chosen_logps: torch.FloatTensor, + policy_rejected_logps: torch.FloatTensor, + reference_chosen_logps: torch.FloatTensor, + reference_rejected_logps: torch.FloatTensor, + ): + pi_logratios = policy_chosen_logps - policy_rejected_logps + ref_logratios = reference_chosen_logps - reference_rejected_logps + + losses = self.get_losses(logits=pi_logratios - ref_logratios) + chosen_rewards = ( + self.cfg.training.beta + * (policy_chosen_logps - reference_chosen_logps).detach() + ) + rejected_rewards = ( + self.cfg.training.beta + * (policy_rejected_logps - reference_rejected_logps).detach() + ) + + return losses.mean(), chosen_rewards.mean(), rejected_rewards.mean() + + def get_losses(self, logits): + # The beta is a temperature parameter for the DPO loss, + # typically something in the range of 0.1 to 0.5. + # We ignore the reference model as beta -> 0. + # The label_smoothing parameter encodes our uncertainty about the labels and + # calculates a conservative DPO loss. + + # For now, set label_smoothing to 0 (original DPO loss). + # See https://ericmitchell.ai/cdpo.pdf for more details + label_smoothing = 0 + + losses = ( + -F.logsigmoid(self.cfg.training.beta * logits) * (1 - label_smoothing) + - F.logsigmoid(-self.cfg.training.beta * logits) * label_smoothing + ) + return losses + + +class KTOPairLoss(nn.Module): + """ + Implements original paired KTO implementation + Adopted from https://github.com/ContextualAI/HALOs + and https://github.com/huggingface/trl + """ + + def __init__(self, cfg: Any): + super().__init__() + self.cfg = cfg + + def forward( + self, + policy_chosen_logps: torch.FloatTensor, + policy_rejected_logps: torch.FloatTensor, + reference_chosen_logps: torch.FloatTensor, + reference_rejected_logps: torch.FloatTensor, + ): + chosen_KL = (policy_chosen_logps - reference_chosen_logps).mean().clamp(min=0) + rejected_KL = ( + (policy_rejected_logps - reference_rejected_logps).mean().clamp(min=0) + ) + + chosen_logratios = policy_chosen_logps - reference_chosen_logps + rejected_logratios = policy_rejected_logps - reference_rejected_logps + losses = torch.cat( + ( + 1 + - F.sigmoid(self.cfg.training.beta * (chosen_logratios - rejected_KL)), + 1 + - F.sigmoid(self.cfg.training.beta * (chosen_KL - rejected_logratios)), + ), + 0, + ) + + chosen_rewards = ( + self.cfg.training.beta + * (policy_chosen_logps - reference_chosen_logps).detach() + ).float() + rejected_rewards = ( + self.cfg.training.beta + * (policy_rejected_logps - reference_rejected_logps).detach() + ).float() + + return losses.mean(), chosen_rewards.mean(), rejected_rewards.mean() + + +class HingeLoss(DPOLoss): + def get_losses(self, logits): + losses = torch.relu(1 - self.cfg.training.beta * logits) + return losses + + +class IPOLoss(DPOLoss): + """ + Implements "A General Theoretical Paradigm + to Understand Learning from Human Preferences" + from https://arxiv.org/pdf/2310.12036.pdf + """ + + def get_losses(self, logits): + # eqn (17) of the https://arxiv.org/pdf/2310.12036.pdf + # where beta is the real, positive KL parameter for the IPO loss, + # denoted by tau in the paper (see also eqn (6)). + losses = (logits - 1 / (2 * self.cfg.training.beta)) ** 2 + return losses + + +class Losses: + """Losses factory.""" + + _losses = { + "DPOLoss": DPOLoss, + "HingeLoss": HingeLoss, + "IPOLoss": IPOLoss, + "KTOPairLoss": KTOPairLoss, + } + + @classmethod + def names(cls) -> KeysView: + return cls._losses.keys() + + @classmethod + def get(cls, name: str) -> Any: + """Access to Losses. + Args: + name: losses name + Returns: + A class to build the Losses + """ + return cls._losses.get(name, DPOLoss) + + +# see https://github.com/huggingface/trl/commit/29d439a2043edf4455b05cae5a1e2ade69d22794 +LOSS_REDUCTION = { + "DPOLoss": False, + "KTOPairLoss": False, + "HingeLoss": True, + "IPOLoss": True, +} diff --git a/llm_studio/src/metrics/__init__.py b/llm_studio/src/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llm_studio/src/metrics/text_causal_classification_modeling_metrics.py b/llm_studio/src/metrics/text_causal_classification_modeling_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..4f449aa7caa0e6d95f8467aa739a69acd28b91ef --- /dev/null +++ b/llm_studio/src/metrics/text_causal_classification_modeling_metrics.py @@ -0,0 +1,81 @@ +import logging +from typing import Any, Dict, List, Tuple, Union + +import numpy as np +import pandas as pd +from numpy.typing import NDArray +from scipy.special import softmax +from sklearn.metrics import log_loss, roc_auc_score + +logger = logging.getLogger(__name__) + + +def accuracy_score( + cfg: Any, + results: Dict, + val_df: pd.DataFrame, + raw_results: bool = False, +) -> Union[NDArray, Tuple[NDArray, List[str]]]: + predicted_text = np.array([int(text) for text in results["predicted_text"]]) + target_text = np.array([int(text) for text in results["target_text"]]) + return (predicted_text == target_text).astype("float") + + +def auc_score( + cfg: Any, + results: Dict, + val_df: pd.DataFrame, + raw_results: bool = False, +) -> Union[NDArray, Tuple[NDArray, List[str]]]: + logits = np.array(results["logits"]) + target_text = np.array([int(text) for text in results["target_text"]]) + if cfg.dataset.num_classes > 1: + target_text = np.eye(cfg.dataset.num_classes)[target_text] + return roc_auc_score(target_text, logits, multi_class="ovr") + + +def logloss_score( + cfg: Any, + results: Dict, + val_df: pd.DataFrame, + raw_results: bool = False, +) -> Union[NDArray, Tuple[NDArray, List[str]]]: + logits = np.array(results["logits"]) + target_text = np.array([int(text) for text in results["target_text"]]) + if cfg.dataset.num_classes > 1: + target_text = np.eye(cfg.dataset.num_classes)[target_text] + logits = softmax(logits, axis=1) + return log_loss(target_text, logits, eps=1e-7) + + +class Metrics: + """ + Metrics factory. Returns: + - metric value + - should it be maximized or minimized + - Reduce function + + Maximized or minimized is needed for early stopping (saving best checkpoint) + Reduce function to generate a single metric value, usually "mean" or "none" + """ + + _metrics = { + "AUC": (auc_score, "max", "mean"), + "Accuracy": (accuracy_score, "max", "mean"), + "LogLoss": (logloss_score, "min", "mean"), + } + + @classmethod + def names(cls) -> List[str]: + return sorted(cls._metrics.keys()) + + @classmethod + def get(cls, name: str) -> Any: + """Access to Metrics. + + Args: + name: metrics name + Returns: + A class to build the Metrics + """ + return cls._metrics.get(name, cls._metrics["LogLoss"]) diff --git a/llm_studio/src/metrics/text_causal_language_modeling_metrics.py b/llm_studio/src/metrics/text_causal_language_modeling_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..2fc0e5922de47df700544d076aa01dfeda6a3468 --- /dev/null +++ b/llm_studio/src/metrics/text_causal_language_modeling_metrics.py @@ -0,0 +1,211 @@ +import logging +import os +from functools import partial +from typing import Any, Dict, List, Tuple, Union + +import numpy as np +import pandas as pd +import torch +from joblib import Parallel, delayed +from numpy.typing import NDArray +from openai import AzureOpenAI, OpenAI +from sacrebleu import BLEU +from sacrebleu.metrics.base import Metric +from torch import nn +from tqdm import tqdm + +from llm_studio.src.datasets.text_utils import get_texts +from llm_studio.src.utils.logging_utils import TqdmToLogger + +logger = logging.getLogger(__name__) + + +LLM_RETRY_ATTEMPTS = int(os.getenv("LLM_RETRY_ATTEMPTS", 3)) +LLM_TIMEOUT = int(os.getenv("LLM_TIMEOUT", 60)) + + +def sacrebleu_score( + cfg: Any, results: Dict, val_df: pd.DataFrame, metric: Metric +) -> NDArray: + scores = [] + for predicted_text, target_text in zip( + results["predicted_text"], results["target_text"] + ): + if target_text == "": + score = 0.0 + else: + score = metric.sentence_score(predicted_text, [target_text]).score + scores.append(score) + return np.array(scores) + + +def call_openai_api(template, model, deployment_id=None): + if os.getenv("OPENAI_API_TYPE", "open_ai") == "azure": + endpoint = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1") + client: AzureOpenAI | OpenAI = AzureOpenAI( + api_key=os.getenv("OPENAI_API_KEY", ""), + azure_deployment=os.getenv("OPENAI_API_DEPLOYMENT_ID"), + # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning + api_version=os.getenv("OPENAI_API_VERSION", "2023-05-15"), + # https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource + azure_endpoint=endpoint, + max_retries=LLM_RETRY_ATTEMPTS, + timeout=LLM_TIMEOUT, # unit is seconds + ) + logger.info("Using Microsoft Azure Endpoint for OpenAI API") + logger.info(f"Endpoint: {endpoint}") + else: + client = OpenAI( + api_key=os.getenv("OPENAI_API_KEY", ""), + base_url=os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"), + max_retries=LLM_RETRY_ATTEMPTS, + timeout=LLM_TIMEOUT, # unit is seconds + ) + response = client.chat.completions.create( + model=model, + messages=[ + { + "role": "system", + "content": "You are a helpful and precise assistant " + "for checking the quality of the answer.", + }, + { + "role": "user", + "content": template, + }, + ], + temperature=0.0, + max_tokens=1024, + ) + ret = response.choices[0].message.content + try: + score = float(ret.split("SCORE:")[-1].split()[0].split("/")[0]) + except ValueError: + raise ValueError(f"Could not parse score from response: {ret}") + return score, ret + + +def rate_reply(filled_eval_template, model): + try: + return call_openai_api(filled_eval_template, model) + except Exception as e: + logger.warning(f"Exception caught in api call: {e}") + return 0.0, "" + + +def gpt_score( + cfg: Any, + results: Dict, + val_df: pd.DataFrame, + raw_results: bool = False, +) -> Union[NDArray, Tuple[NDArray, List[str]]]: + vdf = val_df.copy() + vdf["_PROMPT"] = get_texts(val_df, cfg, separator="") + vdf["_PREDICTED_TEXT"] = results["predicted_text"] + vdf["_TARGET_TEXT"] = results["target_text"] + + model = cfg.prediction.metric_gpt_model + template_name = cfg.prediction.metric_gpt_template + + if template_name == "mt-bench": + eval_template = open("prompts/mt-bench/general.txt", "r").read() + else: + eval_template = open(f"prompts/{template_name}.txt", "r").read() + vdf["filled_eval_template"] = eval_template + if template_name == "mt-bench": + eval_template = open("prompts/mt-bench/reference.txt", "r").read() + vdf.loc[ + vdf.category.isin(["math", "reasoning", "coding"]), "filled_eval_template" + ] = eval_template + + vdf["filled_eval_template"] = vdf.apply( + lambda row: row["filled_eval_template"].format(**row), axis=1 + ) + + ret = Parallel(n_jobs=8, backend="multiprocessing")( + delayed(rate_reply)( + filled_eval_template, + model, + ) + for filled_eval_template in tqdm( + vdf["filled_eval_template"].values, + file=TqdmToLogger(logger, level=logging.INFO), + desc=f"GPT eval {model} - {template_name}", + total=len(vdf), + ) + ) + scores = [x[0] for x in ret] + explanations = [x[1] for x in ret] + + if template_name == "mt-bench": + vdf["score"] = scores + score_by_category = vdf.groupby("category").agg({"score": "mean"}).reset_index() + logger.info( + "MT-Bench scores by category:\n" + score_by_category.to_string(index=False) + ) + + if raw_results: + return np.array(scores), explanations + return np.mean(scores) + + +class Perplexity(nn.Module): + def __init__(self, cfg: Any, reduce: bool = True): + super().__init__() + self.cfg = cfg + self.loss_fn = nn.CrossEntropyLoss() + self.reduce = reduce + + def forward(self, logits, labels): + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + + perplexity = [] + for i in range(labels.shape[0]): + perplexity.append(self.loss_fn(shift_logits[i], shift_labels[i])) + perplexity = torch.stack(perplexity, dim=0) + perplexity = torch.exp(perplexity) + if self.reduce: + perplexity = torch.mean(perplexity) + return perplexity + + +def perplexity(cfg: Any, results: Dict, val_df: pd.DataFrame): + return results["perplexity"].detach().float().cpu().numpy() + + +class Metrics: + """ + Metrics factory. Returns: + - metric value + - should it be maximized or minimized + - Reduce function + + Maximized or minimized is needed for early stopping (saving best checkpoint) + Reduce function to generate a single metric value, usually "mean" or "none" + """ + + _metrics = { + "Perplexity": (perplexity, "min", "mean"), + "BLEU": ( + partial(sacrebleu_score, metric=BLEU(effective_order=True)), + "max", + "mean", + ), + "GPT": (gpt_score, "max", "mean"), + } + + @classmethod + def names(cls) -> List[str]: + return sorted(cls._metrics.keys()) + + @classmethod + def get(cls, name: str) -> Any: + """Access to Metrics. + + Args: + name: metrics name + Returns: + A class to build the Metrics + """ + return cls._metrics.get(name, cls._metrics["BLEU"]) diff --git a/llm_studio/src/models/__init__.py b/llm_studio/src/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llm_studio/src/models/text_causal_classification_modeling_model.py b/llm_studio/src/models/text_causal_classification_modeling_model.py new file mode 100644 index 0000000000000000000000000000000000000000..ab3ba1f353b70e3f0748ea3561d8068f82de7f1d --- /dev/null +++ b/llm_studio/src/models/text_causal_classification_modeling_model.py @@ -0,0 +1,89 @@ +import logging +from typing import Any, Dict + +from torch import nn +from transformers import AutoModelForCausalLM + +from llm_studio.src.utils.data_utils import batch_padding +from llm_studio.src.utils.modeling_utils import create_nlp_backbone, prepare_lora + +logger = logging.getLogger(__name__) + + +class Model(nn.Module): + """ + Model for causal language modeling problem type. + """ + + def __init__(self, cfg: Any): + """ + Args: + cfg: config with all the hyperparameters + """ + + super(Model, self).__init__() + + self.cfg = cfg + self.backbone, self.backbone_config = create_nlp_backbone( + cfg, model_class=AutoModelForCausalLM + ) + + if cfg.training.lora: + self.backbone = prepare_lora(cfg, self.backbone) + + self.classification_head = nn.Linear( + self.backbone_config.vocab_size, cfg.dataset.num_classes, bias=False + ) + + self.loss_fn = self.cfg.training.loss_class.get( + self.cfg.training.loss_function + )(self.cfg) + + def forward( + self, + batch: Dict, + padding: bool = True, + ) -> Dict: + # disable cache if gradient checkpointing is enabled + if self.cfg.architecture.gradient_checkpointing: + self.backbone.config.use_cache = False + + outputs: Dict = {} + mask_key = "prompt_attention_mask" + pad_keys = [ + "prompt_input_ids", + "prompt_attention_mask", + "special_tokens_mask", + "labels", + ] + + if padding: + batch = batch_padding( + self.cfg, + batch, + self.training, + mask_key=mask_key, + pad_keys=pad_keys, + padding_side=self.cfg.tokenizer._padding_side, + ) + + output = self.backbone( + input_ids=batch["prompt_input_ids"], + attention_mask=batch["prompt_attention_mask"], + ) + + output.logits = self.classification_head(output[0][:, -1].float()) + + if "labels" in batch: + loss = self.loss_fn( + output.logits, batch["class_label"].unsqueeze(1).float() + ) + outputs["loss"] = loss + + outputs["logits"] = output.logits + + # enable cache again if gradient checkpointing is enabled + if self.cfg.architecture.gradient_checkpointing: + self.backbone.config.use_cache = True + + return outputs diff --git a/llm_studio/src/models/text_causal_language_modeling_model.py b/llm_studio/src/models/text_causal_language_modeling_model.py new file mode 100644 index 0000000000000000000000000000000000000000..740761354dcf5f7bc555d06b9fd752d1e2401266 --- /dev/null +++ b/llm_studio/src/models/text_causal_language_modeling_model.py @@ -0,0 +1,111 @@ +import logging +from typing import Any, Dict + +from torch import nn +from transformers import AutoModelForCausalLM + +from llm_studio.src.metrics.text_causal_language_modeling_metrics import Perplexity +from llm_studio.src.utils.data_utils import batch_padding +from llm_studio.src.utils.modeling_utils import ( + create_nlp_backbone, + generate, + prepare_lora, +) + +logger = logging.getLogger(__name__) + + +class Model(nn.Module): + """ + Model for causal language modeling problem type. + """ + + def __init__(self, cfg: Any): + """ + Args: + cfg: config with all the hyperparameters + """ + + super(Model, self).__init__() + + self.cfg = cfg + self.backbone, self.backbone_config = create_nlp_backbone( + cfg, model_class=AutoModelForCausalLM + ) + + if cfg.training.lora: + self.backbone = prepare_lora(cfg, self.backbone) + + self.loss_fn = self.cfg.training.loss_class.get( + self.cfg.training.loss_function + )(self.cfg) + + if self.cfg.prediction.metric == "Perplexity": + self.perplexity = Perplexity(self.cfg, reduce=False) + + def init_deepspeed(self): + self.backward = self.backbone.backward + self.save_checkpoint = self.backbone.save_checkpoint + self.save_16bit_model = self.backbone.save_16bit_model + if self.cfg.training.lora: + self.backbone.base_model.model.config = ( + self.backbone.base_model.model.module.config + ) + self.backbone.base_model.model.generation_config = ( + self.backbone.base_model.model.module.generation_config + ) + else: + self.backbone.config = self.backbone.module.config + self.backbone.generation_config = self.backbone.module.generation_config + + def generate(self, batch: Dict, cfg: Any, streamer=None): + if cfg.environment.use_deepspeed and cfg.training.lora: + return generate(self.backbone.base_model.model, batch, cfg, streamer) + else: + return generate(self.backbone, batch, cfg, streamer) + + def forward( + self, + batch: Dict, + padding: bool = True, + ) -> Dict: + # disable cache if gradient checkpointing is enabled + if self.cfg.architecture.gradient_checkpointing: + self.backbone.config.use_cache = False + + outputs: Dict = {} + mask_key = "attention_mask" + pad_keys = [ + "input_ids", + "attention_mask", + "special_tokens_mask", + "labels", + ] + + if padding: + batch = batch_padding( + self.cfg, + batch, + self.training, + mask_key=mask_key, + pad_keys=pad_keys, + padding_side=self.cfg.tokenizer._padding_side, + ) + + output = self.backbone( + input_ids=batch["input_ids"], + attention_mask=batch["attention_mask"], + ) + + if "labels" in batch: + loss = self.loss_fn(output.logits, batch["labels"]) + outputs["loss"] = loss + + if not self.training and self.cfg.prediction.metric == "Perplexity": + outputs["perplexity"] = self.perplexity(output.logits, batch["labels"]) + + # enable cache again if gradient checkpointing is enabled + if self.cfg.architecture.gradient_checkpointing: + self.backbone.config.use_cache = True + + return outputs diff --git a/llm_studio/src/models/text_dpo_modeling_model.py b/llm_studio/src/models/text_dpo_modeling_model.py new file mode 100644 index 0000000000000000000000000000000000000000..d52b726233909af7792354a64bbd8ddf389849f8 --- /dev/null +++ b/llm_studio/src/models/text_dpo_modeling_model.py @@ -0,0 +1,200 @@ +import logging +from typing import Any, Dict + +import torch +from torch import nn +from transformers import AutoModelForCausalLM + +from llm_studio.src.losses.text_causal_language_modeling_losses import ( + SampleAveragedCrossEntropyLoss, +) +from llm_studio.src.losses.text_dpo_modeling_losses import LOSS_REDUCTION +from llm_studio.src.metrics.text_causal_language_modeling_metrics import Perplexity +from llm_studio.src.utils.data_utils import batch_padding +from llm_studio.src.utils.modeling_utils import ( + create_nlp_backbone, + generate, + prepare_lora, +) + +logger = logging.getLogger(__name__) + + +def get_batch_logps( + logits: torch.FloatTensor, + labels: torch.LongTensor, + average_log_prob: bool = False, +) -> torch.Tensor: + """ + Based upon the official implementation of DPO: + https://github.com/eric-mitchell/direct-preference-optimization + + Compute the log probabilities of the given labels under the given logits. + Args: + logits: + Logits of the model (unnormalized). + Shape: (batch_size, sequence_length, vocab_size) + labels: + Labels for which to compute the log probabilities. + Label tokens with a value of -100 are ignored. + Shape: (batch_size, sequence_length) + average_log_prob: + If True, return the average log probability per (non-masked) token. + Otherwise, return the sum of the + log probabilities of the (non-masked) tokens. + Returns: + A tensor of shape (batch_size,) containing the average/sum + log probabilities of the given labels under the given logits. + """ + assert logits.shape[:-1] == labels.shape + + # shift labels and logits to account for next token prediction + # See also text_causal_language_modeling_losses.py + labels = labels[:, 1:].clone() + logits = logits[:, :-1, :] + loss_mask = labels != -100 + + # dummy token; we'll ignore the losses on these tokens when loss_mask is applied + # Needed to be able to apply torch.gather with index=labels.unsqueeze(2) + labels[labels == -100] = 0 + + per_token_logps = torch.gather( + logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2) + ).squeeze(2) + + if average_log_prob: + return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) + else: + return (per_token_logps * loss_mask).sum(-1) + + +class Model(nn.Module): + """ + Model for DPO language modeling problem type. + """ + + def __init__(self, cfg: Any): + super().__init__() + + self.cfg = cfg + self.backbone, self.backbone_config = create_nlp_backbone( + cfg, model_class=AutoModelForCausalLM + ) + + assert cfg.training.lora, "Need to enable lora for dpo training" + self.backbone = prepare_lora(cfg=cfg, backbone=self.backbone) + + self.loss_fn = self.cfg.training.loss_class.get( + self.cfg.training.loss_function + )(self.cfg) + if self.cfg.prediction.metric == "Perplexity": + self.perplexity = Perplexity(self.cfg, reduce=False) + + def generate(self, batch: Dict, cfg: Any, streamer=None): + return generate(self.backbone, batch, cfg, streamer) + + def forward( + self, + batch: Dict, + padding: bool = True, + ) -> Dict: + """ + Forward pass of DPO model. + Runtime is 4 times slower than causal language modeling model + as we need to compute + - logits for chosen answer + - logits for rejected answer + - logits for chosen answer with reference model + - logits for rejected answer with reference model + """ + # disable cache if gradient checkpointing is enabled + if self.cfg.architecture.gradient_checkpointing: + self.backbone.config.use_cache = False + + outputs: Dict = {} + + logits_dict = {} + labels_dict = {} + + for answer in ["chosen", "rejected"]: + if padding: + batch = batch_padding( + self.cfg, + batch, + self.training, + mask_key=f"{answer}_attention_mask", + pad_keys=[ + f"{answer}_input_ids", + f"{answer}_attention_mask", + f"{answer}_labels", + ], + ) + logits = self.backbone( + input_ids=batch[f"{answer}_input_ids"], + attention_mask=batch[f"{answer}_attention_mask"], + ).logits + + logits_dict[answer] = logits + labels_dict[answer] = batch[f"{answer}_labels"] + + outputs[f"{answer}_logps"] = get_batch_logps( + logits, + batch[f"{answer}_labels"], + average_log_prob=LOSS_REDUCTION[self.cfg.training.loss_function], + ) + + with self.backbone.disable_adapter(): + with torch.no_grad(): + reference_logits = self.backbone( + input_ids=batch[f"{answer}_input_ids"], + attention_mask=batch[f"{answer}_attention_mask"], + ).logits + outputs[f"{answer}_reference_logps"] = get_batch_logps( + reference_logits, + batch[f"{answer}_labels"], + average_log_prob=LOSS_REDUCTION[ + self.cfg.training.loss_function + ], + ) + + loss, chosen_rewards, rejected_rewards = self.loss_fn( + policy_chosen_logps=outputs["chosen_logps"], + policy_rejected_logps=outputs["rejected_logps"], + reference_chosen_logps=outputs["chosen_reference_logps"], + reference_rejected_logps=outputs["rejected_reference_logps"], + ) + outputs["loss"] = loss + + # These values will be logged to Neptune if enabled, see train.py + outputs["additional_log_chosen_rewards"] = chosen_rewards.detach() + outputs["additional_log_rejected_rewards"] = rejected_rewards.detach() + # Reward margin should increase over time + outputs["additional_log_reward_margin"] = ( + chosen_rewards - rejected_rewards + ).detach() + + # log sample average cross entropy, perplexity metric is also sample averaged + outputs["additional_log_chosen_cross_entropy_loss"] = ( + SampleAveragedCrossEntropyLoss(self.cfg)( + logits_dict["chosen"], labels_dict["chosen"] + ).detach() + ) + outputs["additional_log_rejected_cross_entropy_loss"] = ( + SampleAveragedCrossEntropyLoss(self.cfg)( + logits_dict["rejected"], labels_dict["rejected"] + ).detach() + ) + + if not self.training and self.cfg.prediction.metric == "Perplexity": + outputs["perplexity"] = self.perplexity( + logits_dict["chosen"], labels_dict["chosen"] + ) + outputs["additional_log_rejected_perplexity"] = self.perplexity( + logits_dict["rejected"], labels_dict["rejected"] + ) + + # enable cache again if gradient checkpointing is enabled + if self.cfg.architecture.gradient_checkpointing: + self.backbone.config.use_cache = True + + return outputs diff --git a/llm_studio/src/models/text_reward_model.py b/llm_studio/src/models/text_reward_model.py new file mode 100644 index 0000000000000000000000000000000000000000..df802833c939cca634aea0eca9865deafc791682 --- /dev/null +++ b/llm_studio/src/models/text_reward_model.py @@ -0,0 +1,170 @@ +from dataclasses import dataclass +from typing import Literal, Optional + +import torch +import torch.nn as nn +from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer +from transformers.models.gpt_neox.modeling_gpt_neox import ( + GPTNeoXConfig, + GPTNeoXModel, + GPTNeoXPreTrainedModel, +) +from transformers.utils import ModelOutput + +from llm_studio.src.datasets.text_utils import TEXT_SEPARATOR + + +class GPTNeoXRewardModelConfig(GPTNeoXConfig): + model_type = "gpt_neox_reward_model" + + pooling: Literal["mean", "last"] + + def __init__( + self, + pooling: Literal["mean", "last"] = "last", + **kwargs, + ): + super().__init__(**kwargs) + self.pooling = pooling or "last" + + +@dataclass +class GPTNeoXRewardModelOutput(ModelOutput): + """ + Reward model output. + + Args: + logits (`torch.FloatTensor` of shape `(batch_size, 1)`): + Reward score + """ + + logits: torch.FloatTensor = None + + +class GPTNeoXRewardModel(GPTNeoXPreTrainedModel): + config_class = GPTNeoXRewardModelConfig + + def __init__(self, config): + if isinstance(config, GPTNeoXConfig): + # When a normal GPTNeoX was loaded it will be converted into a reward model. + # The direct `type(config) == GPTNeoXConfig` comparison is used (instead of + # `isinstance()`) since the configuration class of the reward model is also + # derived form `GPTNeoXConfig`. + config = GPTNeoXRewardModelConfig.from_dict(config.to_dict()) + super().__init__(config) + + self.gpt_neox = GPTNeoXModel(config) + self.out_proj = nn.Linear(config.hidden_size, 1) + self.pooling = config.pooling + + def forward( + self, + input_ids, + attention_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + return_dict: Optional[bool] = True, + ) -> GPTNeoXRewardModelOutput: + outputs = self.gpt_neox( + input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + if self.pooling == "mean": + if attention_mask is None: + pooled = hidden_states.mean(dim=1) + else: + pooled = (hidden_states * attention_mask).sum( + dim=1 + ) / attention_mask.sum(dim=1) + elif self.pooling == "last": + if attention_mask is None: + pooled = hidden_states[:, -1] + else: + last_idx = attention_mask.cumsum(dim=1).argmax(dim=1) + pooled = hidden_states.gather( + 1, last_idx.view(-1, 1, 1).expand(-1, 1, hidden_states.size(-1)) + ).squeeze(1) + else: + raise ValueError(f"Unknown pooling method: {self.pooling}") + + logits = self.out_proj(pooled) + + if not return_dict: + return (logits,) + outputs[1:] + + return GPTNeoXRewardModelOutput(logits=logits) + + +class RewardModel(nn.Module): + def __init__(self, cfg): + super(RewardModel, self).__init__() + + AutoConfig.register("gpt_neox_reward_model", GPTNeoXRewardModelConfig) + AutoModelForSequenceClassification.register( + GPTNeoXRewardModelConfig, GPTNeoXRewardModel + ) + + self.cfg = cfg + self.model_name = cfg.reward_model + self.device = cfg.environment._device + self.model = AutoModelForSequenceClassification.from_pretrained( + self.model_name, + torch_dtype=( + torch.float16 + if (torch.cuda.is_available() and len(cfg.environment.gpus) > 0) + else torch.float32 + ), + ).to(self.device) + self.tokenizer = AutoTokenizer.from_pretrained( + self.model_name, max_model_input_sizes=2048 + ) + + def get_score( + self, + prompts=None, + answers=None, + ): + scores = [] + for prompt, answer in zip(prompts, answers): + if "deberta-v3" in self.model_name: + inputs = self.tokenizer( + " ".join(prompt.split(TEXT_SEPARATOR)), + answer, + return_tensors="pt", + max_length=2048, + ).to(self.device) + elif self.model_name in [ + "OpenAssistant/oasst-rm-2.1-pythia-1.4b-epoch-2.5", + "OpenAssistant/oasst-rm-2-pythia-6.9b-epoch-1", + ]: + prompt = prompt.split(TEXT_SEPARATOR) + + input_text = "" + + for i, prompt_part in enumerate(prompt[::-1]): + if i % 2 == 0: + prefix = "<|prompter|>" + else: + prefix = "<|assistant|>" + input_text = f"{prefix}{prompt_part}<|endoftext|>" + input_text + + input_text = input_text + f"<|assistant|>{answer}<|endoftext|>" + + inputs = self.tokenizer( + input_text, return_tensors="pt", max_length=2048 + ).to(self.device) + else: + raise ValueError( + f"Reward model {self.model_name} not supported for scoring." + ) + + scores.append(self.model(**inputs).logits[0].cpu().detach().item()) + del inputs + return scores diff --git a/llm_studio/src/models/text_rlhf_language_modeling_model.py b/llm_studio/src/models/text_rlhf_language_modeling_model.py new file mode 100644 index 0000000000000000000000000000000000000000..2d49350d7a0a0f1469a986082af0c6c06de1eead --- /dev/null +++ b/llm_studio/src/models/text_rlhf_language_modeling_model.py @@ -0,0 +1,141 @@ +import logging +from typing import Any, Dict + +import torch +from torch import nn +from transformers import AutoModelForCausalLM + +from llm_studio.src.metrics.text_causal_language_modeling_metrics import Perplexity +from llm_studio.src.utils.data_utils import batch_padding +from llm_studio.src.utils.modeling_utils import ( + create_nlp_backbone, + generate, + prepare_lora, +) + +logger = logging.getLogger(__name__) + + +class ValueHead(nn.Module): + """ + The ValueHead class implements a head for GPT2 that returns a scalar for each + output token. + + Based on the implementation of trl library: + https://github.com/lvwerra/trl/blob/main/trl/models/modeling_value_head.py + """ + + def __init__(self, config): + super().__init__() + if not hasattr(config, "summary_dropout_prob"): + summary_dropout_prob = 0.1 + else: + summary_dropout_prob = config.summary_dropout_prob + + self.dropout = ( + nn.Dropout(summary_dropout_prob) if summary_dropout_prob else nn.Identity() + ) + + # some models such as OPT have a projection layer before the word embeddings + # e.g. OPT-350m + if hasattr(config, "word_embed_proj_dim"): + hidden_size = config.word_embed_proj_dim + else: + hidden_size = config.hidden_size + + self.summary = nn.Linear(hidden_size, 1) + + def forward(self, hidden_states): + output = self.dropout(hidden_states) + + # For now force upcast in fp32 if needed. Let's keep the + # output in fp32 for numerical stability. + if output.dtype != self.summary.weight.dtype: + output = output.to(self.summary.weight.dtype) + + output = self.summary(output) + return output + + +class Model(nn.Module): + """ + Model for causal language modeling problem type. + """ + + def __init__(self, cfg: Any): + """ + Args: + cfg: config with all the hyperparameters + """ + + super(Model, self).__init__() + + self.cfg = cfg + assert cfg.training.lora, "LoRA must be True for RLHF" + + self.backbone, self.backbone_config = create_nlp_backbone( + cfg, model_class=AutoModelForCausalLM + ) + + self.backbone = prepare_lora(cfg=self.cfg, backbone=self.backbone) + + if self.cfg.prediction.metric == "Perplexity": + self.perplexity = Perplexity(self.cfg, reduce=False) + + self.value_head = ValueHead(self.backbone_config) + self.value_head.summary.bias.data.zero_() + + def forward( + self, + batch: Dict, + padding: bool = True, + ) -> Dict: + # disable cache if gradient checkpointing is enabled + if self.cfg.architecture.gradient_checkpointing: + self.backbone.config.use_cache = False + + outputs: Dict = {} + mask_key = "attention_mask" + pad_keys = [ + "input_ids", + "attention_mask", + "special_tokens_mask", + "labels", + ] + + if padding: + batch = batch_padding( + self.cfg, + batch, + self.training, + mask_key=mask_key, + pad_keys=pad_keys, + ) + + output = self.backbone( + input_ids=batch["input_ids"], + attention_mask=batch["attention_mask"], + output_hidden_states=True, + ) + + if self.cfg.prediction.metric == "Perplexity" and not self.training: + outputs["perplexity"] = self.perplexity(output.logits, batch["labels"]) + + if self.training: + last_hidden_state = output.hidden_states[-1] + + # force upcast in fp32 if logits are in half-precision + if output.logits.dtype != torch.float32: + output.logits = output.logits.float() + + outputs["logits"] = output.logits + outputs["value"] = self.value_head(last_hidden_state).squeeze(-1) + + # enable cache again if gradient checkpointing is enabled + if self.cfg.architecture.gradient_checkpointing: + self.backbone.config.use_cache = True + + return outputs + + def generate(self, batch: Dict, cfg: Any, streamer=None): + return generate(self.backbone, batch, cfg, streamer) diff --git a/llm_studio/src/models/text_sequence_to_sequence_modeling_model.py b/llm_studio/src/models/text_sequence_to_sequence_modeling_model.py new file mode 100644 index 0000000000000000000000000000000000000000..bef31c0bf6ad7941a286264ecb53b0a97e4549cf --- /dev/null +++ b/llm_studio/src/models/text_sequence_to_sequence_modeling_model.py @@ -0,0 +1,117 @@ +import logging +from typing import Any, Dict + +import torch.nn as nn +from transformers import AutoModelForSeq2SeqLM + +from llm_studio.src.metrics.text_causal_language_modeling_metrics import Perplexity +from llm_studio.src.utils.data_utils import batch_padding +from llm_studio.src.utils.modeling_utils import ( + create_nlp_backbone, + generate, + prepare_lora, +) + +logger = logging.getLogger(__name__) + + +class Model(nn.Module): + """ + Model for causal language modeling problem type. + """ + + def __init__(self, cfg: Any): + """ + Args: + cfg: config with all the hyperparameters + """ + + super(Model, self).__init__() + + self.cfg = cfg + self.backbone, self.backbone_config = create_nlp_backbone( + cfg, model_class=AutoModelForSeq2SeqLM + ) + + if cfg.training.lora: + self.backbone = prepare_lora(cfg, self.backbone) + + self.loss_fn = self.cfg.training.loss_class.get( + self.cfg.training.loss_function + )(self.cfg) + + if self.cfg.prediction.metric == "Perplexity": + self.perplexity = Perplexity(self.cfg, reduce=False) + + def generate(self, batch: Dict, cfg: Any, streamer=None): + return generate( + backbone=self.backbone, + batch=batch, + cfg=cfg, + streamer=streamer, + remove_prompt=False, + ) + + def forward( + self, + batch: Dict, + padding: bool = True, + ) -> Dict: + # disable cache if gradient checkpointing is enabled + if self.cfg.architecture.gradient_checkpointing: + self.backbone.config.use_cache = False + + outputs: Dict = {} + kwargs: Dict = {} + + if padding: + mask_key = "prompt_attention_mask" + pad_keys = [ + "prompt_input_ids", + "prompt_attention_mask", + ] + + batch = batch_padding( + self.cfg, + batch, + self.training, + mask_key=mask_key, + pad_keys=pad_keys, + padding_side=self.cfg.tokenizer._padding_side, + ) + + mask_key = "answer_attention_mask" + pad_keys = [ + "answer_input_ids", + "answer_attention_mask", + ] + + batch = batch_padding( + self.cfg, + batch, + self.training, + mask_key=mask_key, + pad_keys=pad_keys, + padding_side="right", + ) + + labels = batch["answer_input_ids"] + labels[batch["answer_attention_mask"] == 0] = -100 + + output = self.backbone( + input_ids=batch["prompt_input_ids"], + attention_mask=batch["prompt_attention_mask"], + labels=labels, + **kwargs, + ) + + outputs["loss"] = output.loss + + if not self.training and self.cfg.prediction.metric == "Perplexity": + outputs["perplexity"] = self.perplexity(output.logits, labels) + + # enable cache again if gradient checkpointing is enabled + if self.cfg.architecture.gradient_checkpointing: + self.backbone.config.use_cache = True + + return outputs diff --git a/llm_studio/src/nesting.py b/llm_studio/src/nesting.py new file mode 100644 index 0000000000000000000000000000000000000000..866c20113990fb9c225b71decf93fb1ba43abce0 --- /dev/null +++ b/llm_studio/src/nesting.py @@ -0,0 +1,75 @@ +from collections import defaultdict +from dataclasses import dataclass +from typing import DefaultDict, List, Optional, Set, Union + + +@dataclass +class Dependency: + """Dependency class. + + Args: + key: key of the dependency + value: required value of the dependency, None for empty condition + is_set: whether the dependency should be set, or not set + """ + + key: str + value: Union[str, bool, int, None] = True + is_set: bool = True + + def check(self, dependency_values: Optional[List[str]]) -> bool: + """ + Check if dependency is satisfied + + Args: + dependency_values: list of dependency values + + Returns: + True if the dependency is satisfied, False otherwise + """ + + if dependency_values is None: + dependency_values = [] + + if self.value is None and self.is_set and len(dependency_values): + return False + elif self.value is None and not self.is_set and not len(dependency_values): + return False + elif self.is_set and self.value not in dependency_values: + return False + elif ( + not self.is_set + and len([v for v in dependency_values if v != self.value]) == 0 + ): + return False + return True + + +class Nesting: + """ + A tree-like structure to specify nested dependencies of key-value pairs + In detail it maps dependencies of key requiring any number of key:value pairs + + Primarily useful for specifying nested dependencies of UI elements shown in Wave. + """ + + def __init__(self): + self.dependencies: DefaultDict[str, List[Dependency]] = defaultdict(list) + self.triggers: Set[str] = set() + + def add(self, keys: List[str], dependencies: List[Dependency]): + """ + Append dependencies (key:value) for a given key + + Args: + keys: keys to add dependencies for + dependencies: key:value pairs to depend on + """ + + if len(set(keys)) != len(keys): + raise ValueError("Nesting keys must be unique.") + + for dependency in dependencies: + self.triggers.add(dependency.key) + for key in set(keys): + self.dependencies[key].append(dependency) diff --git a/llm_studio/src/optimizers.py b/llm_studio/src/optimizers.py new file mode 100644 index 0000000000000000000000000000000000000000..402932a8fac49cf7dd0670424f7a05eec309e6dc --- /dev/null +++ b/llm_studio/src/optimizers.py @@ -0,0 +1,35 @@ +from functools import partial +from typing import Any, List + +import bitsandbytes as bnb +from torch import optim + +__all__ = ["Optimizers"] + + +class Optimizers: + """Optimizers factory.""" + + _optimizers = { + "Adam": optim.Adam, + "AdamW": optim.AdamW, + "SGD": partial(optim.SGD, momentum=0.9, nesterov=True), + "RMSprop": partial(optim.RMSprop, momentum=0.9, alpha=0.9), + "Adadelta": optim.Adadelta, + "AdamW8bit": bnb.optim.Adam8bit, + } + + @classmethod + def names(cls) -> List[str]: + return sorted(cls._optimizers.keys()) + + @classmethod + def get(cls, name: str) -> Any: + """Access to Optimizers. + + Args: + name: optimizer name + Returns: + A class to build the Optimizer + """ + return cls._optimizers.get(name) diff --git a/llm_studio/src/order.py b/llm_studio/src/order.py new file mode 100644 index 0000000000000000000000000000000000000000..3f7a4ef14c31b87e9d73333525bb555fd522e5dd --- /dev/null +++ b/llm_studio/src/order.py @@ -0,0 +1,118 @@ +from typing import Iterable, List, Optional + + +class Order: + """ + A list-like structure to specify the order of items in a dictionary. + The main characteristics are: + - Append and insert only. Cannot remove elements. This is not strictly required + by the use-case but probably good practice. + - Elements must be unique. Inserting an element which is already in the list + will throw an error. + + Primarily useful for specifying the order in which UI elements + should be shown in Wave. + """ + + def __init__(self, keys: Optional[List[str]] = None): + if keys is not None: + self._list = list(keys) + else: + self._list = list() + + def _unique_guard(self, *keys: str): + for key in keys: + if key in self._list: + raise ValueError(f"`{key}` is already in the list!") + + def append(self, key: str): + """ + Append a key at the end of the list: + + Args: + key: String to append. + + Raises: + - `ValueError` if the key is already in the list. + """ + + self._unique_guard(key) + + self._list.append(key) + + def extend(self, keys: Iterable[str]): + """ + Extend the list by multiple keys. + + Args: + keys: Iterable of keys. + + Raises: + - `ValueError` if one or more key is already in the list. + """ + + self._unique_guard(*keys) + + self._list.extend(keys) + + def insert( + self, *keys: str, before: Optional[str] = None, after: Optional[str] = None + ): + """ + Insert one or more keys. Either `before` or `after`, but not both, must be set + to determine position. + + Args: + keys: One more keys to insert. + after: A key immediately after which the `keys` will be inserted. + before: A key immediately before which the `keys` are inserted. + + Raises: + - `ValueError` if one or more key is already in the list. + - `ValueError` if `before` / `after` does not exist in the list. + - `ValueError` if an invalid combination of arguments is set. + """ + + self._unique_guard(*keys) + + if before is not None: + for key in keys[::-1]: + self._list.insert(self._list.index(before), key) + + if after is not None: + raise ValueError("`after` must be None if `before` is set.") + + if after is not None: + for key in keys[::-1]: + self._list.insert(self._list.index(after) + 1, key) + + if before is not None: + raise ValueError("`before` must be None if `after` is set.") + + if before is None and after is None: + raise ValueError("Either `before` or `after` must be set.") + + def __getitem__(self, idx): + return self._list[idx] + + def __len__(self): + return len(self._list) + + def __iter__(self): + return iter(self._list) + + +def test_order(): + order = Order(["dataset", "training", "validation", "logging"]) + + order.insert("architecture", before="training") + order.insert("environment", after="validation") + + assert [item for item in order] == [ + "dataset", + "architecture", + "training", + "validation", + "environment", + "logging", + ] diff --git a/llm_studio/src/plots/__init__.py b/llm_studio/src/plots/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llm_studio/src/plots/text_causal_classification_modeling_plots.py b/llm_studio/src/plots/text_causal_classification_modeling_plots.py new file mode 100644 index 0000000000000000000000000000000000000000..1997989a8a305e73f87a92b4793d06e7b25d0f2c --- /dev/null +++ b/llm_studio/src/plots/text_causal_classification_modeling_plots.py @@ -0,0 +1,22 @@ +import os + +from llm_studio.src.datasets.text_utils import get_tokenizer +from llm_studio.src.plots.text_causal_language_modeling_plots import ( + Plots as TextCausalLanguageModelingPlots, +) +from llm_studio.src.plots.text_causal_language_modeling_plots import ( + create_batch_prediction_df, +) +from llm_studio.src.utils.plot_utils import PlotData + + +class Plots(TextCausalLanguageModelingPlots): + @classmethod + def plot_batch(cls, batch, cfg) -> PlotData: + tokenizer = get_tokenizer(cfg) + df = create_batch_prediction_df( + batch, tokenizer, ids_for_tokenized_text="prompt_input_ids" + ) + path = os.path.join(cfg.output_directory, "batch_viz.parquet") + df.to_parquet(path) + return PlotData(path, encoding="df") diff --git a/llm_studio/src/plots/text_causal_language_modeling_plots.py b/llm_studio/src/plots/text_causal_language_modeling_plots.py new file mode 100644 index 0000000000000000000000000000000000000000..a2aeda47b4105ce959294fdc0a5ff384b900ecad --- /dev/null +++ b/llm_studio/src/plots/text_causal_language_modeling_plots.py @@ -0,0 +1,250 @@ +import hashlib +import os +from typing import Any, Dict + +import pandas as pd + +from llm_studio.src.datasets.conversation_chain_handler import get_conversation_chains +from llm_studio.src.datasets.text_utils import get_tokenizer +from llm_studio.src.utils.data_utils import read_dataframe_drop_missing_labels +from llm_studio.src.utils.plot_utils import ( + PlotData, + format_for_markdown_visualization, + list_to_markdown_representation, +) + + +class Plots: + @classmethod + def plot_batch(cls, batch, cfg) -> PlotData: + tokenizer = get_tokenizer(cfg) + df = create_batch_prediction_df(batch, tokenizer) + path = os.path.join(cfg.output_directory, "batch_viz.parquet") + df.to_parquet(path) + return PlotData(path, encoding="df") + + @classmethod + def plot_data(cls, cfg) -> PlotData: + """ + Plots the data in a scrollable table. + We limit the number of rows to max 600 to avoid rendering issues in Wave. + As the data visualization is instantiated on every page load, we cache the + data visualization in a parquet file. + """ + config_id = ( + str(cfg.dataset.train_dataframe) + + str(cfg.dataset.system_column) + + str(cfg.dataset.prompt_column) + + str(cfg.dataset.answer_column) + + str(cfg.dataset.parent_id_column) + ) + config_hash = hashlib.md5(config_id.encode()).hexdigest() + path = os.path.join( + os.path.dirname(cfg.dataset.train_dataframe), + f"__meta_info__{config_hash}_data_viz.parquet", + ) + if os.path.exists(path): + return PlotData(path, encoding="df") + + df = read_dataframe_drop_missing_labels(cfg.dataset.train_dataframe, cfg) + + conversations = get_conversation_chains(df, cfg, limit_chained_samples=True) + + # Limit to max 15 prompt-conversation-answer rounds + # This yields to max 5 * sum_{i=1}^{15} i = 600 rows in the DataFrame + max_conversation_length = min( + max([len(conversation["prompts"]) for conversation in conversations]), 15 + ) + + conversations_to_display = [] + for conversation_length in range(1, max_conversation_length + 1): + conversations_to_display += [ + conversation + for conversation in conversations + if len(conversation["prompts"]) == conversation_length + ][:5] + + # Convert into a scrollable table by transposing the dataframe + df_transposed = pd.DataFrame(columns=["Sample Number", "Field", "Content"]) + + i = 0 + for sample_number, conversation in enumerate(conversations_to_display): + if conversation["systems"][0] != "": + df_transposed.loc[i] = [ + sample_number, + "System", + conversation["systems"][0], + ] + i += 1 + for prompt, answer in zip(conversation["prompts"], conversation["answers"]): + df_transposed.loc[i] = [ + sample_number, + "Prompt", + prompt, + ] + i += 1 + df_transposed.loc[i] = [ + sample_number, + "Answer", + answer, + ] + i += 1 + + df_transposed["Content"] = df_transposed["Content"].apply( + format_for_markdown_visualization + ) + + df_transposed.to_parquet(path) + + return PlotData(path, encoding="df") + + @classmethod + def plot_validation_predictions( + cls, val_outputs: Dict, cfg: Any, val_df: pd.DataFrame, mode: str + ) -> PlotData: + return plot_validation_predictions(val_outputs, cfg, val_df, mode) + + +def plot_validation_predictions( + val_outputs: Dict, cfg: Any, val_df: pd.DataFrame, mode: str +) -> PlotData: + conversations = get_conversation_chains( + val_df, cfg, limit_chained_samples=cfg.dataset.limit_chained_samples + ) + prompt_column_name = ( + cfg.dataset.prompt_column + if len(cfg.dataset.prompt_column) > 1 + else cfg.dataset.prompt_column[0] + ) + + target_texts = [conversation["answers"][-1] for conversation in conversations] + + input_texts = [] + for conversation in conversations: + input_text = conversation["systems"][0] + prompts = conversation["prompts"] + answers = conversation["answers"] + # exclude last answer + answers[-1] = "" + for prompt, answer in zip(prompts, answers): + input_text += ( + f" **{prompt_column_name}:** " + f"{prompt}\n\n" + f"**{cfg.dataset.answer_column}:** " + f"{answer}\n\n" + ) + input_texts += [input_text] + + if "predicted_text" in val_outputs.keys(): + predicted_texts = val_outputs["predicted_text"] + else: + predicted_texts = [ + "No predictions are generated for the selected metric" + ] * len(target_texts) + + input_text_column_name = ( + "Input Text (tokenization max length setting " + "may truncate the input text during training/inference)" + ) + df = pd.DataFrame( + { + input_text_column_name: input_texts, + "Target Text": target_texts, + "Predicted Text": predicted_texts, + } + ) + df[input_text_column_name] = df[input_text_column_name].apply( + format_for_markdown_visualization + ) + df["Target Text"] = df["Target Text"].apply(format_for_markdown_visualization) + df["Predicted Text"] = df["Predicted Text"].apply(format_for_markdown_visualization) + + if val_outputs.get("metrics") is not None: + metric_column_name = f"Metric ({cfg.prediction.metric})" + df[metric_column_name] = val_outputs["metrics"] + df[metric_column_name] = df[metric_column_name].round(decimals=3) + if len(df) > 900: + df.sort_values(by=metric_column_name, inplace=True) + df = pd.concat( + [ + df.iloc[:300], + df.iloc[300:-300].sample(n=300, random_state=42), + df.iloc[-300:], + ] + ).reset_index(drop=True) + + elif len(df) > 900: + df = df.sample(n=900, random_state=42).reset_index(drop=True) + + if val_outputs.get("explanations") is not None: + df["Explanation"] = val_outputs["explanations"] + + path = os.path.join(cfg.output_directory, f"{mode}_viz.parquet") + df.to_parquet(path) + return PlotData(data=path, encoding="df") + + +def create_batch_prediction_df( + batch, tokenizer, ids_for_tokenized_text="input_ids", labels_column="labels" +): + df = pd.DataFrame( + { + "Prompt Text": [ + tokenizer.decode(input_ids, skip_special_tokens=True) + for input_ids in batch["prompt_input_ids"].detach().cpu().numpy() + ] + } + ) + df["Prompt Text"] = df["Prompt Text"].apply(format_for_markdown_visualization) + if labels_column in batch.keys(): + df["Answer Text"] = [ + tokenizer.decode( + [label for label in labels if label != -100], + skip_special_tokens=True, + ) + for labels in batch.get(labels_column, batch[ids_for_tokenized_text]) + .detach() + .cpu() + .numpy() + ] + tokens_list = [ + tokenizer.convert_ids_to_tokens(input_ids) + for input_ids in batch[ids_for_tokenized_text].detach().cpu().numpy() + ] + masks_list = [ + [label != -100 for label in labels] + for labels in batch.get(labels_column, batch[ids_for_tokenized_text]) + .detach() + .cpu() + .numpy() + ] + df["Tokenized Text"] = [ + list_to_markdown_representation( + tokens, masks, pad_token=tokenizer.pad_token, num_chars=100 + ) + for tokens, masks in zip(tokens_list, masks_list) + ] + # limit to 2000 rows, still renders fast in wave + df = df.iloc[:2000] + # Convert into a scrollable table by transposing the dataframe + df_transposed = pd.DataFrame(columns=["Sample Number", "Field", "Content"]) + has_answer = "Answer Text" in df.columns + for i, row in df.iterrows(): + offset = 2 + int(has_answer) + df_transposed.loc[i * offset] = [ + i, + "Prompt Text", + row["Prompt Text"], + ] + if has_answer: + df_transposed.loc[i * offset + 1] = [ + i, + "Answer Text", + row["Answer Text"], + ] + df_transposed.loc[i * offset + 1 + int(has_answer)] = [ + i, + "Tokenized Text", + row["Tokenized Text"], + ] + return df_transposed diff --git a/llm_studio/src/plots/text_dpo_modeling_plots.py b/llm_studio/src/plots/text_dpo_modeling_plots.py new file mode 100644 index 0000000000000000000000000000000000000000..41cee6c786c8251d26bc3b1c75bae213640cc9d2 --- /dev/null +++ b/llm_studio/src/plots/text_dpo_modeling_plots.py @@ -0,0 +1,142 @@ +import hashlib +import os +from typing import Any, Dict, List + +import pandas as pd + +from llm_studio.src.datasets.conversation_chain_handler import get_conversation_chains +from llm_studio.src.datasets.text_utils import get_tokenizer +from llm_studio.src.plots.text_causal_language_modeling_plots import ( + create_batch_prediction_df, + plot_validation_predictions, +) +from llm_studio.src.utils.data_utils import read_dataframe_drop_missing_labels +from llm_studio.src.utils.plot_utils import PlotData, format_for_markdown_visualization +from llm_studio.src.utils.utils import PatchedAttribute + + +class Plots: + @classmethod + def plot_batch(cls, batch, cfg) -> PlotData: + tokenizer = get_tokenizer(cfg) + df = create_batch_prediction_df( + batch, + tokenizer, + ids_for_tokenized_text="chosen_input_ids", + labels_column="chosen_labels", + ) + path = os.path.join(cfg.output_directory, "batch_viz.parquet") + df.to_parquet(path) + return PlotData(path, encoding="df") + + @classmethod + def plot_data(cls, cfg) -> PlotData: + """ + Plots the data in a scrollable table. + We limit the number of rows to max 600 to avoid rendering issues in Wave. + As the data visualization is instantiated on every page load, we cache the + data visualization in a parquet file. + """ + config_id = ( + str(cfg.dataset.train_dataframe) + + str(cfg.dataset.system_column) + + str(cfg.dataset.prompt_column) + + str(cfg.dataset.answer_column) + + str(cfg.dataset.rejected_answer_column) + + str(cfg.dataset.parent_id_column) + ) + config_hash = hashlib.md5(config_id.encode()).hexdigest() + path = os.path.join( + os.path.dirname(cfg.dataset.train_dataframe), + f"__meta_info__{config_hash}_data_viz.parquet", + ) + if os.path.exists(path): + return PlotData(path, encoding="df") + + df = read_dataframe_drop_missing_labels(cfg.dataset.train_dataframe, cfg) + + conversations_chosen = get_conversation_chains( + df, cfg, limit_chained_samples=True + ) + with PatchedAttribute( + cfg.dataset, "answer_column", cfg.dataset.rejected_answer_column + ): + conversations_rejected = get_conversation_chains( + df, cfg, limit_chained_samples=True + ) + + # Limit to max 15 prompt-conversation-answer rounds + max_conversation_length = min( + max( + [len(conversation["prompts"]) for conversation in conversations_chosen] + ), + 15, + ) + + conversations_to_display: List = [] + for conversation_length in range(1, max_conversation_length + 1): + conversations_to_display += [ + (conversation_chosen, conversations_rejected) + for conversation_chosen, conversations_rejected in zip( + conversations_chosen, conversations_rejected + ) + if len(conversation_chosen["prompts"]) == conversation_length + ][:5] + + # Convert into a scrollable table by transposing the dataframe + df_transposed = pd.DataFrame(columns=["Sample Number", "Field", "Content"]) + + i = 0 + for sample_number, (conversation_chosen, conversations_rejected) in enumerate( + conversations_to_display + ): + if conversation_chosen["systems"][0] != "": + df_transposed.loc[i] = [ + sample_number, + "System", + conversation_chosen["systems"][0], + ] + i += 1 + for prompt, answer_chosen, answer_rejected in zip( + conversation_chosen["prompts"], + conversation_chosen["answers"], + conversations_rejected["answers"], # type: ignore + ): + df_transposed.loc[i] = [ + sample_number, + "Prompt", + prompt, + ] + i += 1 + if answer_chosen == answer_rejected: + df_transposed.loc[i] = [ + sample_number, + "Answer", + answer_chosen, + ] + i += 1 + else: + df_transposed.loc[i] = [ + sample_number, + "Answer Chosen", + answer_chosen, + ] + i += 1 + df_transposed.loc[i] = [ + sample_number, + "Answer Rejected", + answer_rejected, + ] + i += 1 + + df_transposed["Content"] = df_transposed["Content"].apply( + format_for_markdown_visualization + ) + df_transposed.to_parquet(path) + return PlotData(path, encoding="df") + + @classmethod + def plot_validation_predictions( + cls, val_outputs: Dict, cfg: Any, val_df: pd.DataFrame, mode: str + ) -> PlotData: + return plot_validation_predictions(val_outputs, cfg, val_df, mode) diff --git a/llm_studio/src/possible_values.py b/llm_studio/src/possible_values.py new file mode 100644 index 0000000000000000000000000000000000000000..5090e35e8558ff7b8ba3c93150d8c28ceae7b290 --- /dev/null +++ b/llm_studio/src/possible_values.py @@ -0,0 +1,297 @@ +import os +from abc import abstractmethod +from dataclasses import dataclass +from typing import Any, Callable, List, Optional, Sequence, Set, Tuple, Union + +from llm_studio.src.nesting import Dependency + + +def _scan_dirs(dirname) -> List[str]: + """Scans a directory for subfolders + + Args: + dirname: directory name + + Returns: + List of subfolders + + """ + + subfolders = [f.path for f in os.scandir(dirname) if f.is_dir()] + for dirname in list(subfolders): + subfolders.extend(_scan_dirs(dirname)) + subfolders = [x + "/" if x[-1] != "/" else x for x in subfolders] + return subfolders + + +def _scan_files( + dirname, extensions: Tuple[str, ...] = (".csv", ".pq", ".parquet", ".json") +) -> List[str]: + """Scans a directory for files with given extension + + Args: + dirname: directory name + extensions: extensions to consider + + Returns: + List of files + + """ + path_list = [ + os.path.join(dirpath, filename) + for dirpath, _, filenames in os.walk(dirname) + for filename in filenames + if any(map(filename.__contains__, extensions)) + and not filename.startswith("__meta_info__") + ] + return sorted(path_list) + + +def strip_prefix(paths: Sequence[str], ignore_set: Set[str] = set()) -> Tuple[str, ...]: + """ + Strips the common prefix of all the given paths. + + Args: + paths: the paths to strip + ignore_set: set of path names to ignore when computing the prefix. + + Returns: + List with the same length as `paths` without common prefixes. + """ + + paths_to_check = [ + os.path.split(os.path.normpath(path))[0] + for path in paths + if path not in ignore_set + ] + + if len(paths_to_check) == 0: + return tuple(paths) + + prefix = os.path.commonpath(paths_to_check) + stripped = tuple( + [ + path if path in ignore_set else os.path.relpath(path, prefix) + for path in paths + ] + ) + + return stripped + + +class Value: + pass + + +@dataclass +class Number: + min: Optional[float] = None + max: Optional[float] = None + step: Union[str, float] = 1.0 + + +@dataclass +class String: + # Each element of the tuple can be either: + # - a tuple of (value, name) + # - a string. In that case the same value will be used for name and value + values: Any = None + allow_custom: bool = False + placeholder: Optional[str] = None + + +class DatasetValue: + pass + + @abstractmethod + def get_value( + self, dataset: Any, value: Any, type_annotation: type, mode: str + ) -> Tuple[String, Any]: + pass + + @staticmethod + def _compute_current_values( + current_values: List[str], + possible_values: List[str], + prefer_with: Optional[Callable[[str], bool]] = None, + ) -> List[str]: + """ + Compute current values. + + Args: + current_values: The preliminary current values. + possible_values: All possible values. + prefer_with: Function determining which values to prefer as default. + + Returns: + A list + """ + if len(possible_values) == 0: + return [""] + + # allow only values which are in the possible values + current_values = list( + filter(lambda value: value in possible_values, current_values) + ) + + if len(current_values) == 0: + # if the values are empty, take all the values where `prefer_with` is true + for c in possible_values: + if prefer_with is not None and prefer_with(c): + current_values.append(c) + + # if they are still empty, just take the first possible value + if len(current_values) == 0: + current_values = [possible_values[0]] + + return current_values + + +@dataclass +class Directories(DatasetValue): + add_none: Union[bool, Callable[[str], bool]] = False + prefer_with: Optional[Callable[[str], bool]] = None + prefer_none: bool = True + + def get_value(self, dataset, value, type_annotation, mode) -> Tuple[String, Any]: + if dataset is None: + return String(tuple()), value + + available_dirs = _scan_dirs(dataset["path"]) + + if (isinstance(self.add_none, bool) and self.add_none) or ( + callable(self.add_none) and self.add_none(mode) + ): + if self.prefer_none: + available_dirs.insert(0, "None") + else: + available_dirs.insert(len(available_dirs), "None") + + if isinstance(value, str): + value = [value] + + value = DatasetValue._compute_current_values( + value, available_dirs, self.prefer_with + ) + + return ( + String( + tuple( + zip( + available_dirs, + strip_prefix(available_dirs, ignore_set={"None"}), + ) + ) + ), + value if type_annotation == Tuple[str, ...] else value[0], + ) + + +@dataclass +class Files(DatasetValue): + add_none: Union[bool, Callable[[str], bool]] = False + prefer_with: Optional[Callable[[str], bool]] = None + # For the case where no match found, whether to prioritize + # selecting any file or selecting no file + prefer_none: bool = True + + def get_value(self, dataset, value, type_annotation, mode) -> Tuple[String, Any]: + if dataset is None: + return String(tuple()), value + + available_files = _scan_files(dataset["path"]) + + if (isinstance(self.add_none, bool) and self.add_none) or ( + callable(self.add_none) and self.add_none(mode) + ): + if self.prefer_none: + available_files.insert(0, "None") + else: + available_files.insert(len(available_files), "None") + + if isinstance(value, str): + value = [value] + + value = DatasetValue._compute_current_values( + value, available_files, self.prefer_with + ) + + return ( + String( + tuple( + zip( + available_files, + strip_prefix(available_files, ignore_set={"None"}), + ) + ) + ), + value if type_annotation == Tuple[str, ...] else value[0], + ) + + +@dataclass +class Columns(DatasetValue): + add_none: Union[bool, Callable[[str], bool]] = False + prefer_with: Optional[Callable[[str], bool]] = None + + def get_value(self, dataset, value, type_annotation, mode) -> Tuple[String, Any]: + if dataset is None: + return String(tuple()), value + + try: + columns = list(dataset["dataframe"].columns) + except KeyError: + columns = [] + + if (isinstance(self.add_none, bool) and self.add_none) or ( + callable(self.add_none) and self.add_none(mode) + ): + columns.insert(0, "None") + + if isinstance(value, str): + value = [value] + if value is None: + value = [columns[0]] + + value = DatasetValue._compute_current_values(value, columns, self.prefer_with) + + return ( + String(tuple(columns)), + value if type_annotation == Tuple[str, ...] else value[0], + ) + + +@dataclass +class ColumnValue(DatasetValue): + column: str + default: List[str] + prefer_with: Optional[Callable[[str], bool]] = None + dependency: Optional[Dependency] = None + + def get_value(self, dataset, value, type_annotation, mode) -> Tuple[String, Any]: + if dataset is None: + return String(tuple()), value + + try: + df = dataset["dataframe"] + except KeyError: + df = None + + if df is not None: + if self.dependency is not None and not self.dependency.check( + [dataset[self.dependency.key]] + ): + values = self.default + elif self.column in df: + values = [str(v) for v in sorted(list(df[self.column].unique()))] + else: + values = self.default + else: + values = self.default + + value = DatasetValue._compute_current_values(value, values, self.prefer_with) + + return ( + String(tuple(values)), + value if type_annotation == Tuple[str, ...] else value[0], + ) diff --git a/llm_studio/src/schedulers.py b/llm_studio/src/schedulers.py new file mode 100644 index 0000000000000000000000000000000000000000..2c01870d87ffbe2ab129f92354dcac1022f7d6aa --- /dev/null +++ b/llm_studio/src/schedulers.py @@ -0,0 +1,40 @@ +from typing import Any, List + +from transformers import ( + get_constant_schedule_with_warmup, + get_cosine_schedule_with_warmup, + get_linear_schedule_with_warmup, +) + +__all__ = ["Schedulers"] + + +def constant_schedule_with_warmup(optimizer, num_warmup_steps, **kwargs): + return get_constant_schedule_with_warmup( + optimizer=optimizer, num_warmup_steps=num_warmup_steps + ) + + +class Schedulers: + """Schedulers factory.""" + + _schedulers = { + "Cosine": get_cosine_schedule_with_warmup, + "Linear": get_linear_schedule_with_warmup, + "Constant": constant_schedule_with_warmup, + } + + @classmethod + def names(cls) -> List[str]: + return sorted(cls._schedulers.keys()) + + @classmethod + def get(cls, name: str) -> Any: + """Access to Schedulers. + + Args: + name: scheduler name + Returns: + A class to build the Schedulers + """ + return cls._schedulers.get(name) diff --git a/llm_studio/src/tooltips.py b/llm_studio/src/tooltips.py new file mode 100644 index 0000000000000000000000000000000000000000..e20d2c49ba826fc3fb3bdb2d553ab5d945035fe7 --- /dev/null +++ b/llm_studio/src/tooltips.py @@ -0,0 +1,140 @@ +import glob +import re +from dataclasses import dataclass +from typing import Dict + +CLEANR = re.compile("<[^<]+?>") +tooltip_files = glob.glob("documentation/docs/tooltips/**/*.mdx", recursive=True) + + +def read_tooltip_file(path: str) -> str: + """ + Reads all lines of a text file. + + Args: + filename: path to the file + + Returns: + str: the text of the file + """ + + with open(path) as f: + lines = f.readlines() + return "".join(lines) + + +def cleanhtml(raw_html: str) -> str: + """ + Removes html tags from a string. + + Args: + raw_html: the string to clean + + Returns: + str: the cleaned string + """ + + cleantext = re.sub(CLEANR, "", raw_html) + return cleantext + + +def clean_docusaurus_tags(text: str) -> str: + """ + Removes docusaurus tags from a string. + + Args: + text: the string to clean + + Returns: + str: the cleaned string + """ + + text = text.replace(":::info note", "") + text = text.replace(":::info Note", "") + text = text.replace(":::tip tip", "") + text = text.replace(":::", "") + return text + + +def clean_md_links(text: str) -> str: + """ + Removes markdown links from a string. + + Args: + text: the string to clean + + Returns: + str: the cleaned string + """ + + text = re.sub(r"\[(.*?)\]\(.*?\)", r"\1", text) + return text + + +@dataclass +class Tooltip: + """ + A tooltip. + + Returns: + str: the text of the tooltip + """ + + name: str + text: str + + def __repr__(self): + return f"{self.name}: {self.text}" + + +class Tooltips: + """ + A collection of tooltips. + + During initialization, all tooltips are read from the tooltip files. + + Usage: + tooltips = Tooltips() + a tooltip can be accessed by its name: + tooltips["name"] returns the tooltip with the name "name" + """ + + def __init__(self): + self.tooltips: Dict[str, Tooltip] = {} + for filename in tooltip_files: + name = filename.split("/")[-1].split(".")[0] + name = name.replace("-", "_") + name = name[1:] # remove leading underscore + section = filename.split("/")[3] + text = read_tooltip_file(filename) + text = cleanhtml(text) + text = clean_docusaurus_tags(text) + text = clean_md_links(text) + if name in self.tooltips.keys(): + raise ValueError + self.add_tooltip(Tooltip(f"{section}_{name}", text)) + + def add_tooltip(self, tooltip): + self.tooltips[tooltip.name] = tooltip + + def __getitem__(self, name: str) -> str: + try: + text = self.tooltips[name].text + except KeyError: + text = None + return text + + def __len__(self): + return len(self.tooltips) + + def __repr__(self): + return f"{self.tooltips}" + + def get(self, name: str, default=None): + if name in self.tooltips.keys(): + return self.tooltips[name].text + else: + return default + + +tooltips = Tooltips() diff --git a/llm_studio/src/utils/__init__.py b/llm_studio/src/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llm_studio/src/utils/config_utils.py b/llm_studio/src/utils/config_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..af597ae0207e79dbfa584bcf7590a226352185b1 --- /dev/null +++ b/llm_studio/src/utils/config_utils.py @@ -0,0 +1,222 @@ +import dataclasses +import importlib +from types import ModuleType +from typing import Any, Dict, List, Type + +import yaml + +from llm_studio.python_configs.base import DefaultConfigProblemBase +from llm_studio.src.utils.type_annotations import KNOWN_TYPE_ANNOTATIONS + + +def rreload(module): + """Recursively reload modules. + + Args: + module: module to reload + """ + + for attribute_name in dir(module): + if "Config" in attribute_name: + attribute1 = getattr(module, attribute_name) + for attribute_name in dir(attribute1): + attribute2 = getattr(attribute1, attribute_name) + if type(attribute2) is ModuleType: + importlib.reload(attribute2) + + +def _load_cls(module_path: str, cls_name: str) -> Any: + """Loads the python class. + + Args: + module_path: path to the module + cls_name: name of the class + + Returns: + Loaded python class + """ + + module_path_fixed = module_path + if module_path_fixed.endswith(".py"): + module_path_fixed = module_path_fixed[:-3] + module_path_fixed = module_path_fixed.replace("/", ".") + + module = importlib.import_module(module_path_fixed) + module = importlib.reload(module) + rreload(module) + module = importlib.reload(module) + + assert hasattr(module, cls_name), "{} file should contain {} class".format( + module_path, cls_name + ) + + cls = getattr(module, cls_name) + + return cls + + +def load_config_py(config_path: str, config_name: str = "Config"): + """Loads the config class. + + Args: + config_path: path to the config file + config_name: name of the config class + + Returns: + Loaded config class + """ + + return _load_cls(config_path, config_name)() + + +def _get_type_annotation_error(v: Any, type_annotation: Type) -> ValueError: + return ValueError( + f"Cannot show {v}: not a dataclass" + f" and {type_annotation} is not a known type annotation." + ) + + +def convert_cfg_base_to_nested_dictionary(cfg: DefaultConfigProblemBase) -> dict: + """Returns a grouped config settings dict for a given configuration + + Args: + cfg: configuration + q: Q + + Returns: + Dict of configuration settings + """ + + cfg_dict = cfg.__dict__ + type_annotations = cfg.get_annotations() + cfg_dict = {key: cfg_dict[key] for key in cfg._get_order()} + + grouped_cfg_dict = {} + + for k, v in cfg_dict.items(): + if k.startswith("_"): + continue + + if any([x in k for x in ["api", "secret"]]): + raise AssertionError( + "Config item must not contain the word 'api' or 'secret'" + ) + + type_annotation = type_annotations[k] + + if type_annotation in KNOWN_TYPE_ANNOTATIONS: + grouped_cfg_dict.update({k: v}) + elif dataclasses.is_dataclass(v): + group_items = parse_cfg_dataclass(cfg=v) + group_items = { + k: list(v) if isinstance(v, tuple) else v + for d in group_items + for k, v in d.items() + } + grouped_cfg_dict.update({k: group_items}) + else: + raise _get_type_annotation_error(v, type_annotations[k]) + + # not an explicit field in the config + grouped_cfg_dict["problem_type"] = cfg.problem_type + return grouped_cfg_dict + + +def convert_nested_dictionary_to_cfg_base( + cfg_dict: Dict[str, Any] +) -> DefaultConfigProblemBase: + """ + Inverse operation of convert_cfg_base_to_nested_dictionary + """ + problem_type = cfg_dict["problem_type"] + module_name = f"llm_studio.python_configs.{problem_type}_config" + try: + module = importlib.import_module(module_name) + except ModuleNotFoundError: + raise NotImplementedError(f"Problem Type {problem_type} not implemented") + return module.ConfigProblemBase.from_dict(cfg_dict) + + +def get_parent_element(cfg): + if hasattr(cfg, "_parent_experiment") and cfg._parent_experiment != "": + key = "Parent Experiment" + value = cfg._parent_experiment + return {key: value} + + return None + + +def parse_cfg_dataclass(cfg) -> List[Dict]: + """Returns all single config settings for a given configuration + + Args: + cfg: configuration + """ + + items = [] + + parent_element = get_parent_element(cfg) + if parent_element: + items.append(parent_element) + + cfg_dict = cfg.__dict__ + type_annotations = cfg.get_annotations() + cfg_dict = {key: cfg_dict[key] for key in cfg._get_order()} + + for k, v in cfg_dict.items(): + if k.startswith("_"): + continue + + if any([x in k for x in ["api"]]): + continue + + type_annotation = type_annotations[k] + + if type_annotation in KNOWN_TYPE_ANNOTATIONS: + if type_annotation == float: + v = float(v) + t = [{k: v}] + elif dataclasses.is_dataclass(v): + elements_group = parse_cfg_dataclass(cfg=v) + t = elements_group + else: + continue + + items += t + + return items + + +def save_config_yaml(path: str, cfg: DefaultConfigProblemBase) -> None: + """Saves config as yaml file + + Args: + path: path of file to save to + cfg: config to save + """ + """ + Returns a dictionary representation of the config object. + Protected attributes (starting with an underscore) are not included. + Nested configs are converted to nested dictionaries. + """ + cfg_dict = convert_cfg_base_to_nested_dictionary(cfg) + with open(path, "w") as fp: + yaml.dump(cfg_dict, fp, indent=4) + + +def load_config_yaml(path: str): + """Loads config from yaml file + + Args: + path: path of file to load from + Returns: + config object + """ + with open(path, "r") as fp: + cfg_dict = yaml.load(fp, Loader=yaml.FullLoader) + return convert_nested_dictionary_to_cfg_base(cfg_dict) + + +# Note that importing ConfigProblemBase from the python_configs +# and using cfg.problem_type below will not work because of circular imports +NON_GENERATION_PROBLEM_TYPES = ["text_causal_classification_modeling"] diff --git a/llm_studio/src/utils/data_utils.py b/llm_studio/src/utils/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4f1911b596dcbcbb348a8bea8f7c562dbaafbc91 --- /dev/null +++ b/llm_studio/src/utils/data_utils.py @@ -0,0 +1,696 @@ +import logging +import math +import os +from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union, no_type_check + +import networkx as nx +import numpy as np +import pandas as pd +import pyarrow.parquet as pq +import torch +from sklearn.model_selection import GroupShuffleSplit, train_test_split +from torch import distributed as dist +from torch.utils.data import DataLoader, Sampler, SequentialSampler + +from llm_studio.src.datasets.conversation_chain_handler import ConversationChainHandler +from llm_studio.src.utils.exceptions import LLMDataException +from llm_studio.src.utils.gpu_utils import sync_across_processes +from llm_studio.src.utils.utils import PatchedAttribute, set_seed + +logger = logging.getLogger(__name__) + + +def read_dataframe( + path: str, + n_rows: int = -1, + meta_only: bool = False, + non_missing_columns: Optional[List[str]] = None, + verbose: bool = False, + handling: str = "warn", + fill_columns: Optional[List[str]] = None, + fill_value: Any = "", + mode: str = "", +) -> pd.DataFrame: + """Reading a dataframe from different file types + + Args: + path: path of the dataframe + n_rows: number of rows to limit to + meta_only: return only meta information + non_missing_columns: list of columns that cannot contain missing values + verbose: if warning about dropped rows should be logged + handling: how to handle missing values + fill_columns: columns where empty value should be filled (used for empty text) + fill_value: value to fill empty columns with (used for empty text) + mode: dataset type, used only for better exception/log information + Returns: + dataframe + + """ + + non_missing_columns = [] if non_missing_columns is None else non_missing_columns + fill_columns = [] if fill_columns is None else fill_columns + + meta_info_path = os.path.split(path) + meta_info_path = os.path.join( + *meta_info_path[:-1], + "__meta_info__" + meta_info_path[-1] + ".csv", + ) + if meta_only and os.path.exists(meta_info_path): + path = meta_info_path + + if path.endswith(".csv"): + df = pd.read_csv(path, lineterminator="\n").reset_index(drop=True) + elif path.endswith(".pq") or path.endswith(".parquet"): + try: + df = pd.read_parquet(path, engine="pyarrow").reset_index(drop=True) + except Exception: + df = pd.read_parquet(path, engine="fastparquet").reset_index(drop=True) + elif path.endswith(".json") or path == "": + return pd.DataFrame() + else: + raise ValueError( + f"Could not determine type of file {path}: " + f"CSV (`.csv`) and Parquet (`.pq` and `.parquet`) are supported." + ) + + if fill_columns: + df[fill_columns] = df[fill_columns].fillna(fill_value) + + if meta_only and os.path.exists(meta_info_path): + return df + + non_missing_columns = [x for x in non_missing_columns if x in df] + if len(non_missing_columns): + orig_size = df.shape[0] + non_missing_index = df[non_missing_columns].dropna().index + dropped_index = [idx for idx in df.index if idx not in non_missing_index] + df = df.loc[non_missing_index].reset_index(drop=True) + new_size = df.shape[0] + if new_size < orig_size and verbose: + logger.warning( + f"Dropped {orig_size - new_size} rows when reading dataframe '{path}' " + f"due to missing values encountered in one of the following columns:" + f" {non_missing_columns} in the following rows: {dropped_index}" + ) + + if handling == "error": + dropped_str = dropped_index + + if len(dropped_str) > 10: + dropped_str = dropped_str[:5] + ["..."] + dropped_str[-5:] + + dropped_str = ", ".join([str(x) for x in dropped_str]) + prefix = f"{mode} " if mode else "" + error = ( + f"{prefix}dataset contains {len(dropped_index)} rows with missing " + f"values in one of the following columns: {non_missing_columns} in " + f"the following rows: {dropped_str}" + ) + + raise ValueError(error.capitalize()) + + if n_rows > -1: + df = df.iloc[sample_indices(len(df), n_indices=n_rows)] + + # create meta information dataframe if it does not exist + if not os.path.exists(meta_info_path): + df_meta = pd.DataFrame(columns=df.columns) + df_meta.to_csv(meta_info_path, index=False) + + return df + + +def get_fill_columns(cfg: Any) -> List[str]: + if hasattr(cfg.dataset, "prompt_column"): + if isinstance(cfg.dataset.prompt_column, (list, tuple)): + return list(cfg.dataset.prompt_column) + return [cfg.dataset.prompt_column] + + return [] + + +def read_dataframe_drop_missing_labels(path: str, cfg: Any) -> pd.DataFrame: + if isinstance(cfg.dataset.prompt_column, tuple): + input_cols = list(cfg.dataset.prompt_column) + else: + input_cols = [cfg.dataset.prompt_column] + verbose = cfg.environment._local_rank == 0 + fill_columns = get_fill_columns(cfg) + df = read_dataframe( + path, + non_missing_columns=input_cols, + verbose=verbose, + fill_columns=fill_columns, + ) + df[input_cols] = df[input_cols].fillna("").astype(str) + if ( + hasattr(cfg.dataset, "answer_column") + and cfg.dataset.answer_column in df.columns + ): + df[cfg.dataset.answer_column] = ( + df[cfg.dataset.answer_column].fillna("").astype(str) + ) + return df + + +def is_valid_data_frame(path: str, csv_rows: int = 100) -> bool: + """Checking data frame format + + Args: + path: path of the dataframe + csv_rows: number of rows to limit to when checking csv files + + Returns: + bool + + """ + try: + if path.endswith(".csv"): + pd.read_csv(path, nrows=csv_rows, lineterminator="\n") + elif path.endswith(".pq") or path.endswith(".parquet"): + pq.ParquetFile(path) + else: + raise ValueError( + f"Could not determine type of file {path}: " + f"CSV (`.csv`) and Parquet (`.pq` and `.parquet`) are supported." + ) + except Exception as e: + logger.error(str(e)) + return False + return True + + +def sample_data(cfg: Any, df: pd.DataFrame) -> pd.DataFrame: + """Sample data from the dataframe""" + + if cfg.dataset.parent_id_column != "None" and "id" in df.columns: + parent_mapping = df.set_index("id")["parent_id"].to_dict() + + # A recursive function to get the root id for each node + def get_root(node): + parent = parent_mapping.get(node) + if parent is None or pd.isna(parent): + return node + return get_root(parent) + + # Apply the function to assign each row the root id + df["root_id"] = df["id"].apply(get_root) + + # Sample root_ids without replacement + root_ids = df["root_id"].unique() + n_sampled_root_ids = int(len(root_ids) * cfg.dataset.data_sample) + + np.random.seed(7331) + sampled_root_ids = np.random.choice( + root_ids, size=n_sampled_root_ids, replace=False + ) + + # Filter the dataframe to only include rows with sampled root_ids + df = df[df["root_id"].isin(sampled_root_ids)].reset_index(drop=True) + del df["root_id"] + else: + # at least 10 observations + n = max(10, int(len(df) * cfg.dataset.data_sample)) + df = df.sample(n=min(n, len(df)), random_state=7331, replace=False) + + return df + + +def load_mt_bench_data(cfg: Any) -> pd.DataFrame: + """Loads MT-BENCH data. + + Args: + cfg: input config + + Returns: + MT-BENCH DataFrame + """ + + prompt_column = cfg.dataset.prompt_column[0] + answer_column = cfg.dataset.answer_column + + df = df = pd.read_json("prompts/mt-bench/question.jsonl", lines=True) + df = df.rename(columns={"turns": prompt_column, "reference": answer_column}) + df[prompt_column] = df[prompt_column].apply(lambda x: x[0]) + df[answer_column] = ( + df[answer_column].fillna("").apply(lambda x: x[0] if x != "" else x) + ) + + return df + + +def get_data(cfg: Any) -> Tuple[pd.DataFrame, pd.DataFrame]: + """Prepares train and validation DataFrames. + + Args: + cfg: input config + + Returns: + Train and validation DataFrames + """ + + train_df, val_df = load_train_valid_data(cfg) + + if ( + hasattr(cfg.prediction, "metric_gpt_template") + and cfg.prediction.metric_gpt_template == "mt-bench" + ): + if cfg.environment._local_rank == 0: + logger.info( + "Overwriting validation data with MT-BENCH data. Please note that " + "respective metric is an approximation and might not fully match " + "the original implementation." + ) + val_df = load_mt_bench_data(cfg) + + if cfg.dataset.data_sample < 1.0: + if "Train" in cfg.dataset.data_sample_choice: + train_df = sample_data(cfg, train_df) + if "Validation" in cfg.dataset.data_sample_choice: + val_df = sample_data(cfg, val_df) + + if cfg.training.train_validation_data: + train_df = pd.concat([train_df, val_df], axis=0) + + train_df = cfg.dataset.dataset_class.preprocess_dataframe( + train_df, cfg, mode="train" + ) + val_df = cfg.dataset.dataset_class.preprocess_dataframe( + val_df, cfg, mode="validation" + ) + + return train_df.reset_index(drop=True), val_df.reset_index(drop=True) + + +def merge_on_common_items(lst): + G = nx.Graph() + for sublst in lst: + for item in sublst: + G.add_edge(sublst[0], item) + return [list(c) for c in nx.connected_components(G)] + + +def load_train_valid_data(cfg) -> Tuple[pd.DataFrame, pd.DataFrame]: + if cfg.dataset.validation_strategy == "custom": + if cfg.dataset.validation_dataframe == "None": + raise LLMDataException( + "No validation dataframe provided. " + "Please provide a validation dataframe or " + "choose a different validation strategy." + ) + train_df = read_dataframe_drop_missing_labels(cfg.dataset.train_dataframe, cfg) + val_df = read_dataframe_drop_missing_labels( + cfg.dataset.validation_dataframe, cfg + ) + elif cfg.dataset.validation_strategy == "automatic": + if cfg.environment._local_rank == 0: + logger.info("Setting up automatic validation split...") + df = read_dataframe_drop_missing_labels(cfg.dataset.train_dataframe, cfg) + if cfg.dataset.parent_id_column != "None" and "id" in df.columns: + # split based on conversation_chain_ids + # this ensures that all samples from the + # same conversation are in the same fold + with PatchedAttribute(cfg.dataset, "limit_chained_samples", True): + conversation_chain_ids = ConversationChainHandler( + df=df, cfg=cfg + ).conversation_chain_ids + # Some conversations may have the same parent id, e.g. for OASST + # 6aa548c6-65ad-4531-9411-76173ae060a3 and + # 2a164c2a-4f0e-45aa-8990-e7dd3b51c06b + # have the same parent a8df94e3-cfc7-4736-9587-0ec943d0fec3 + # We need to merge those into a single group + conversation_chain_ids = merge_on_common_items(conversation_chain_ids) + conversation_chain_labels = [ + i + for i, conversation_chain_id in enumerate(conversation_chain_ids) + for _ in conversation_chain_id + ] + group_shuffle_split = GroupShuffleSplit( + test_size=cfg.dataset.validation_size, n_splits=1, random_state=1337 + ) + train_idx, val_idx = next( + group_shuffle_split.split(df, groups=conversation_chain_labels) + ) + # flatten conversation_chain_ids + flattened_conversation_chain_ids = np.array( + [ + idx + for conversation_chain_id in conversation_chain_ids + for idx in conversation_chain_id + ] + ) + train_df = df.iloc[flattened_conversation_chain_ids[train_idx]].reset_index( + drop=True + ) + val_df = df.iloc[flattened_conversation_chain_ids[val_idx]].reset_index( + drop=True + ) + else: + train_df, val_df = train_test_split( + df, test_size=cfg.dataset.validation_size, random_state=1337 + ) + else: + raise LLMDataException("No valid validation strategy provided.") + return train_df, val_df + + +def worker_init_fn(worker_id: int) -> None: + """Sets the random seed for each worker. + + Args: + worker_id: ID of the corresponding worker + """ + + if "PYTHONHASHSEED" in os.environ: + seed = int(os.environ["PYTHONHASHSEED"]) + worker_id + else: + seed = np.random.get_state()[1][0] + worker_id # type: ignore + set_seed(seed) + + +def get_train_dataset(train_df: pd.DataFrame, cfg: Any, verbose=True): + """Prepares train Dataset. + + Args: + train_df: train DataFrame + cfg: input config + verbose: whether to print the logs + + Returns: + Train Dataset + """ + + if cfg.environment._local_rank == 0 and verbose: + logger.info("Loading train dataset...") + + train_dataset = cfg.dataset.dataset_class(df=train_df, cfg=cfg, mode="train") + return train_dataset + + +def get_train_dataloader(train_ds: Any, cfg: Any, verbose=True): + """Prepares train DataLoader. + + Args: + train_ds: train Dataset + cfg: input config + verbose: whether to print the logs + + Returns: + Train Dataloader + """ + + sampler: Sampler + if cfg.environment._distributed: + sampler = torch.utils.data.distributed.DistributedSampler( + train_ds, + num_replicas=cfg.environment._world_size, + rank=cfg.environment._local_rank, + shuffle=True, + seed=cfg.environment._seed, + drop_last=True, + ) + sampler_length = len(sampler) + else: + sampler = None + sampler_length = len(train_ds) + + if sampler_length < cfg.training.batch_size and cfg.training.drop_last_batch: + logger.warning( + "Training data too small when dropping last batch. Number of rows " + "should be at least batch size multiplied by number of gpus. " + "Forcing to keep last batch." + ) + cfg.training.drop_last_batch = False + if sampler_length <= 1: + raise LLMDataException("Data too small to train model.") + + train_dataloader = DataLoader( + train_ds, + sampler=sampler, + shuffle=(sampler is None), + batch_size=cfg.training.batch_size, + num_workers=cfg.environment.number_of_workers, + pin_memory=True, + collate_fn=train_ds.get_train_collate_fn(), + drop_last=cfg.training.drop_last_batch, + worker_init_fn=worker_init_fn, + ) + + if cfg.environment._local_rank == 0 and verbose: + logger.info(f"Number of observations in train dataset: {len(train_ds)}") + + return train_dataloader + + +def get_val_dataset(val_df: pd.DataFrame, cfg: Any, verbose: bool = True): + """Prepares validation Dataset. + + Args: + val_df: validation DataFrame + cfg: input config + verbose: verbose + + Returns: + Validation Dataset + """ + + if verbose and cfg.environment._local_rank == 0: + logger.info("Loading validation dataset...") + val_dataset = cfg.dataset.dataset_class(df=val_df, cfg=cfg, mode="validation") + + return val_dataset + + +def get_val_dataloader(val_ds: Any, cfg: Any, verbose: bool = True): + """Prepares validation DataLoader. + + Args: + val_ds: validation Dataset + cfg: input config + verbose: verbose + + Returns: + Validation Dataloader + """ + + sampler: Sampler + if cfg.environment._distributed and cfg.environment._distributed_inference: + sampler = OrderedDistributedSampler( + val_ds, + num_replicas=cfg.environment._world_size, + rank=cfg.environment._local_rank, + ) + else: + sampler = SequentialSampler(val_ds) + + batch_size = get_inference_batch_size(cfg) + + val_dataloader = DataLoader( + val_ds, + sampler=sampler, + batch_size=batch_size, + num_workers=cfg.environment.number_of_workers, + pin_memory=True, + collate_fn=val_ds.get_validation_collate_fn(), + worker_init_fn=worker_init_fn, + ) + + if verbose and cfg.environment._local_rank == 0: + logger.info(f"Number of observations in validation dataset: {len(val_ds)}") + + return val_dataloader + + +@no_type_check +def cat_batches( + data: DefaultDict[str, Union[torch.Tensor, np.ndarray]] +) -> DefaultDict[str, Union[torch.Tensor, np.ndarray]]: + """Concatenates output data from several batches + + Args: + data: dict with keys and list of batch outputs + + Returns: + Concatenated dict + + """ + + for key, value in data.items(): + if len(value[0].shape) == 0: + if isinstance(value[0], torch.Tensor): + data[key] = torch.stack(value) + else: + data[key] = np.stack(value) + else: + if isinstance(value[0], torch.Tensor): + data[key] = torch.cat(value, dim=0) + else: + data[key] = np.concatenate(value, axis=0) + + return data + + +class OrderedDistributedSampler(Sampler): + """ + Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + Source: + https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/distributed_sampler.py + """ + + def __init__( + self, + dataset: Any, + num_replicas: Optional[int] = None, + rank: Optional[int] = None, + ): + """ + Args: + dataset: Dataset used for sampling + num_replicas: Number of processes participating in distributed training + rank: Rank of the current process within num_replicas + """ + + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + indices = list(range(len(self.dataset))) + + # add extra samples to make it evenly divisible + indices += [0] * (self.total_size - len(indices)) + assert len(indices) == self.total_size + + # subsample + indices = indices[ + self.rank * self.num_samples : self.rank * self.num_samples + + self.num_samples + ] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + +def sample_indices(length: int, n_indices: int = 10, seed: int = 1337) -> np.ndarray: + """Samples random indices + + Args: + length: length to sample from + n_indices: number of indices to sample + seed: seed for sampling + + Returns: + sampled indices + """ + state = np.random.get_state() + np.random.seed(seed) + idx = np.random.choice( + np.arange(length), size=min(length, n_indices), replace=False + ) + np.random.set_state(state) + + return idx + + +def get_inference_batch_size(cfg: Any) -> int: + """Calculates inference batch size + + Args: + cfg: config with all the hyperparameters + Returns: + Inference batch size + """ + + if cfg.prediction.batch_size_inference != 0: + return cfg.prediction.batch_size_inference + else: + return cfg.training.batch_size + + +def sanity_check(cfg): + """ + Perform sanity check on the data + """ + + df = read_dataframe_drop_missing_labels(cfg.dataset.train_dataframe, cfg) + cfg.dataset.dataset_class.sanity_check(df=df, cfg=cfg, mode="train") + valid_filename = cfg.dataset.validation_dataframe + if isinstance(valid_filename, str) and os.path.exists(valid_filename): + df = read_dataframe_drop_missing_labels(valid_filename, cfg) + cfg.dataset.dataset_class.sanity_check(df=df, cfg=cfg, mode="validation") + + +def batch_padding( + cfg: Any, + batch: Dict, + training: bool = True, + mask_key: str = "attention_mask", + pad_keys: List[str] = ["input_ids", "attention_mask", "special_tokens_mask"], + padding_side: str = "left", +) -> Dict: + """Pads a batch according to set quantile, or cuts it at maximum length""" + if cfg.environment.compile_model: + # logger.warning("Batch padding not functional with torch compile.") + return batch + elif batch[mask_key].sum() == 0: + # continued pretraining + return batch + elif cfg.tokenizer.padding_quantile == 0: + return batch + elif training and cfg.tokenizer.padding_quantile < 1.0: + if padding_side == "left": + lengths = torch.stack( + [ + torch.where(batch[mask_key][i] == 1)[0].min() + for i in range(batch[mask_key].size(0)) + ] + ).float() + quantile = 1 - cfg.tokenizer.padding_quantile + else: + lengths = torch.stack( + [ + torch.where(batch[mask_key][i] == 1)[0].max() + for i in range(batch[mask_key].size(0)) + ] + ).float() + quantile = cfg.tokenizer.padding_quantile + if cfg.environment._distributed: + lengths = sync_across_processes( + lengths, cfg.environment._world_size + ) # type: ignore + idx = int(torch.floor(torch.quantile(lengths, quantile))) + else: + if padding_side == "left": + idx = int(torch.where(batch[mask_key] == 1)[1].min()) + else: + idx = int(torch.where(batch[mask_key] == 1)[1].max()) + + if padding_side == "left": + for key in pad_keys: + if key in batch: + batch[key] = batch[key][:, idx:].contiguous() + else: + idx += 1 + for key in pad_keys: + if key in batch: + batch[key] = batch[key][:, :idx].contiguous() + + return batch diff --git a/llm_studio/src/utils/exceptions.py b/llm_studio/src/utils/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..9a8b6c91f00cda274b5b3998688bd00f232f74d3 --- /dev/null +++ b/llm_studio/src/utils/exceptions.py @@ -0,0 +1,22 @@ +class LLMDataException(Exception): + pass + + +class LLMModelException(Exception): + pass + + +class LLMAugmentationsException(Exception): + pass + + +class LLMMetricException(Exception): + pass + + +class LLMTrainingException(Exception): + pass + + +class LLMResourceException(Exception): + pass diff --git a/llm_studio/src/utils/export_utils.py b/llm_studio/src/utils/export_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..50c32c4ba8205d52b88e4cd8612c7305e2f057cd --- /dev/null +++ b/llm_studio/src/utils/export_utils.py @@ -0,0 +1,158 @@ +import json +import logging +import os +import zipfile +from typing import Optional + +from llm_studio.src.utils.exceptions import LLMResourceException +from llm_studio.src.utils.utils import add_file_to_zip + + +def get_artifact_path_path( + experiment_name: str, experiment_path: str, artifact_type: str +): + """Get path to experiment artifact zipfile + + Args: + experiment_name: name of the experiment + experiment_path: path containing experiment related files + artifact_type: type of the artifact + + Returns: + Path to the zip file with experiment artifact + """ + + return os.path.join(experiment_path, f"{artifact_type}_{experiment_name}.zip") + + +def get_predictions_path(experiment_name: str, experiment_path: str): + """Get path to experiment predictions""" + + return get_artifact_path_path(experiment_name, experiment_path, "preds") + + +def get_logs_path(experiment_name: str, experiment_path: str): + """Get path to experiment logs""" + + return get_artifact_path_path(experiment_name, experiment_path, "logs") + + +def get_model_path(experiment_name: str, experiment_path: str): + """Get path to experiment model""" + + return get_artifact_path_path(experiment_name, experiment_path, "model") + + +def check_available_space(output_folder: str, min_disk_space: Optional[float]): + if not min_disk_space: + return True + + stats = os.statvfs(output_folder) + available_size = stats.f_frsize * stats.f_bavail + + if available_size < min_disk_space: + error = ( + f"Not enough disk space. Available space is {get_size_str(available_size)}." + f" Required space is {get_size_str(min_disk_space)}." + ) + raise LLMResourceException(error) + + +def save_prediction_outputs( + experiment_name: str, + experiment_path: str, +): + """Save experiment prediction + + Args: + experiment_name: name of the experiment + experiment_path: path containing experiment related files + + Returns: + Path to the zip file with experiment predictions + """ + + zip_path = get_predictions_path(experiment_name, experiment_path) + zf = zipfile.ZipFile(zip_path, "w") + + add_file_to_zip(zf=zf, path=f"{experiment_path}/validation_raw_predictions.pkl") + add_file_to_zip(zf=zf, path=f"{experiment_path}/validation_predictions.csv") + + zf.close() + return zip_path + + +def save_logs(experiment_name: str, experiment_path: str, logs: dict): + """Save experiment logs + + Args: + experiment_name: name of the experiment + experiment_path: path containing experiment related files + logs: dictionary with experiment charts + + Returns: + Path to the zip file with experiment logs + """ + + cfg_path = os.path.join(experiment_path, "cfg.yaml") + charts_path = f"{experiment_path}/charts_{experiment_name}.json" + with open(charts_path, "w") as fp: + json.dump( + {k: v for k, v in logs.items() if k in ["meta", "train", "validation"]}, fp + ) + + zip_path = get_logs_path(experiment_name, experiment_path) + zf = zipfile.ZipFile(zip_path, "w") + zf.write(charts_path, os.path.basename(charts_path)) + zf.write(cfg_path, f"cfg_{experiment_name}.yaml") + + try: + zf.write( + f"{experiment_path}/logs.log", + f"logs_{experiment_name}.log", + ) + except FileNotFoundError: + logging.warning("Log file is not available yet.") + + zf.close() + + return zip_path + + +def get_size_str( + x, sig_figs=2, input_unit="B", output_unit="dynamic", show_unit=True +) -> str: + """ + Convert a small input unit such as bytes to human readable format. + + Args: + x: input value + sig_figs: number of significant figures + input_unit: input unit ("B", "KB", "MB", "GB", "TB"), default "B" + output_unit: output unit ("B", "KB", "MB", "GB", "TB", "dynamic") + default "dynamic" + show_unit: whether to show the unit in the output string + + Returns: + str: Human readable string + """ + + names = ["B", "KB", "MB", "GB", "TB"] + names = names[names.index(input_unit) :] + + act_i = 0 + if output_unit == "dynamic": + while x >= 1024 and act_i < len(names) - 1: + x /= 1024 + act_i += 1 + else: + target = names.index(output_unit) + while act_i < target: + x /= 1024 + act_i += 1 + + ret_str = f"{str(round(x, sig_figs))}" + if show_unit: + ret_str += f" {names[act_i]}" + + return ret_str diff --git a/llm_studio/src/utils/gpu_utils.py b/llm_studio/src/utils/gpu_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8bbc07e3ca1ac16a00bf1c17800375a0ac60dea9 --- /dev/null +++ b/llm_studio/src/utils/gpu_utils.py @@ -0,0 +1,77 @@ +from typing import Any, Union + +import numpy as np +import torch + + +def sync_across_processes( + t: Union[torch.Tensor, np.ndarray], world_size: int, group: Any = None +) -> Union[torch.Tensor, np.ndarray]: + """Concatenates tensors across processes. + + Args: + t: input tensor or numpy array + world_size: world size + group: The process group to work on + + Returns: + Tensor or numpy array concatenated across all processes + """ + + torch.distributed.barrier() + + if isinstance(t, torch.Tensor): + gather_t_tensor = [torch.ones_like(t) for _ in range(world_size)] + + if t.is_cuda: + torch.distributed.all_gather(gather_t_tensor, t) + else: + torch.distributed.all_gather_object(gather_t_tensor, t, group=group) + + ret = torch.cat(gather_t_tensor) + elif isinstance(t, np.ndarray): + gather_t_array = [np.ones_like(t) for _ in range(world_size)] + torch.distributed.all_gather_object(gather_t_array, t, group=group) + ret = np.concatenate(gather_t_array) # type: ignore + else: + raise ValueError(f"Can't synchronize {type(t)}.") + + return ret + + +# based on https://github.com/BlackHC/toma/blob/master/toma/torch_cuda_memory.py +def is_cuda_out_of_memory(exception: BaseException) -> bool: + return ( + isinstance(exception, RuntimeError) + and len(exception.args) == 1 + and "CUDA" in exception.args[0] + and "out of memory" in exception.args[0] + ) + + +# based on https://github.com/BlackHC/toma/blob/master/toma/cpu_memory.py +def is_out_of_cpu_memory(exception: BaseException) -> bool: + return ( + isinstance(exception, RuntimeError) + and len(exception.args) == 1 + and "DefaultCPUAllocator: can't allocate memory" in exception.args[0] + ) + + +# based on https://github.com/BlackHC/toma/blob/master/toma/torch_cuda_memory.py +def is_cudnn_snafu(exception: BaseException) -> bool: + # For/because of https://github.com/pytorch/pytorch/issues/4107 + return ( + isinstance(exception, RuntimeError) + and len(exception.args) == 1 + and "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED." in exception.args[0] + ) + + +# based on https://github.com/BlackHC/toma/blob/master/toma/torch_cuda_memory.py +def is_oom_error(exception: BaseException) -> bool: + return ( + is_cuda_out_of_memory(exception) + or is_cudnn_snafu(exception) + or is_out_of_cpu_memory(exception) + ) diff --git a/llm_studio/src/utils/logging_utils.py b/llm_studio/src/utils/logging_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2cc98c97b40744eecf33c50496e28d438a884230 --- /dev/null +++ b/llm_studio/src/utils/logging_utils.py @@ -0,0 +1,104 @@ +import io +import json +import logging +import os +import re +from typing import Any, Optional + +from llm_studio.src.utils.plot_utils import PlotData + +logger = logging.getLogger(__name__) + + +class IgnorePatchRequestsFilter(logging.Filter): + def filter(self, record): + log_message = record.getMessage() + if re.search(r"HTTP Request: PATCH", log_message): + return False # Ignore the log entry + return True # Include the log entry + + +def initialize_logging(cfg: Optional[Any] = None, actual_logger=None): + format = "%(asctime)s - %(levelname)s: %(message)s" + + if actual_logger is None: + actual_logger = logging.root + logging.getLogger("sqlitedict").setLevel(logging.ERROR) + else: + actual_logger.handlers.clear() + + actual_logger.setLevel(logging.INFO) + console_handler = logging.StreamHandler() + formatter = logging.Formatter(format) + console_handler.setFormatter(formatter) + console_handler.addFilter(IgnorePatchRequestsFilter()) + actual_logger.addHandler(console_handler) + + if cfg is not None: + logs_dir = f"{cfg.output_directory}/" + os.makedirs(logs_dir, exist_ok=True) + file_handler = logging.FileHandler(filename=f"{logs_dir}/logs.log") + file_formatter = logging.Formatter(format) + file_handler.setFormatter(file_formatter) + actual_logger.addHandler(file_handler) + + +class TqdmToLogger(io.StringIO): + """ + Outputs stream for TQDM. + It will output to logger module instead of the StdOut. + """ + + logger: logging.Logger = None + level: int = None + buf = "" + + def __init__(self, logger, level=None): + super(TqdmToLogger, self).__init__() + self.logger = logger + self.level = level or logging.INFO + + def write(self, buf): + self.buf = buf.strip("\r\n\t ") + + def flush(self): + if self.buf != "": + try: + self.logger.log(self.level, self.buf) + except NameError: + pass + + +def write_flag(path: str, key: str, value: str): + """Writes a new flag + + Args: + path: path to flag json + key: key of the flag + value: values of the flag + """ + + logger.debug(f"Writing flag {key}: {value}") + + if os.path.exists(path): + with open(path, "r+") as file: + flags = json.load(file) + else: + flags = {} + + flags[key] = value + + with open(path, "w+") as file: + json.dump(flags, file) + + +def log_plot(cfg: Any, plot: PlotData, type: str) -> None: + """Logs a given plot + + Args: + cfg: cfg + plot: plot to log + type: type of the plot + + """ + cfg.logging._logger.log(plot.encoding, type, plot.data) diff --git a/llm_studio/src/utils/modeling_utils.py b/llm_studio/src/utils/modeling_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..77161b551cf8f2978a295f9d4b57f2856ccdf186 --- /dev/null +++ b/llm_studio/src/utils/modeling_utils.py @@ -0,0 +1,1089 @@ +import gc +import logging +import os +import re +import shutil +from collections import OrderedDict +from typing import Any, Dict + +import coolname +import deepspeed +import numpy as np +import torch +import transformers +from deepspeed.runtime.dataloader import DeepSpeedDataLoader +from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint +from peft import LoraConfig, PeftModel, get_peft_model +from torch.cuda.amp import autocast +from torch.nn.parallel import DistributedDataParallel +from tqdm import tqdm +from transformers import ( + AutoConfig, + AutoModel, + BitsAndBytesConfig, + GenerationMixin, + StoppingCriteria, + StoppingCriteriaList, +) +from transformers.pytorch_utils import Conv1D as Conv1DTransformer +from transformers.utils import logging as transformers_logging + +from llm_studio.src.datasets.text_utils import get_tokenizer +from llm_studio.src.optimizers import Optimizers +from llm_studio.src.schedulers import Schedulers +from llm_studio.src.utils.config_utils import NON_GENERATION_PROBLEM_TYPES +from llm_studio.src.utils.data_utils import ( + OrderedDistributedSampler, + batch_padding, + cat_batches, + get_inference_batch_size, +) +from llm_studio.src.utils.exceptions import LLMDataException, LLMModelException +from llm_studio.src.utils.logging_utils import TqdmToLogger +from llm_studio.src.utils.utils import save_pickle + +logger = logging.getLogger(__name__) + + +def unwrap_model(model: torch.nn.Module): + options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) + + while isinstance(model, options): + model = model.module + + return model + + +def check_disk_space(model: torch.nn.Module, path: str): + total, used, free = shutil.disk_usage(path) + + model_size_in_bytes = 0 + for param in model.parameters(): + n_params = param.ds_numel if hasattr(param, "ds_numel") else param.numel() + if param.data.dtype in [torch.int8, torch.uint8]: + model_size_in_bytes += n_params * 1 + elif param.data.dtype in [torch.float16, torch.bfloat16]: + model_size_in_bytes += n_params * 2 + elif param.data.dtype == torch.float32: + model_size_in_bytes += n_params * 4 + else: + # If the data type is not supported, calculate it as float32. + model_size_in_bytes += n_params * 4 + logger.warning(f"Unsupported data type: {param.data.dtype}") + + if model_size_in_bytes * 1.03 < free: # leave a 3% margin here. + logger.info( + "Enough space available for saving model weights." + f"Required space: {model_size_in_bytes * 1.03 / (1024 * 1024):.2f}MB, " + f"Available space: {free / (1024 * 1024):.2f}MB." + ) + else: + raise ValueError( + f"Not enough space available for saving model weights. " + f"Required space: {model_size_in_bytes * 1.03 / (1024 * 1024):.2f}MB, " + f"Available space: {free / (1024 * 1024):.2f}MB." + ) + + +# TODO: currently not saving optimizer +def save_checkpoint(model: torch.nn.Module, path: str, cfg: Any): + """Saves a model checkpoint if the path is provided. + + Args: + model: model to save + path: path to save the checkpoint to + + Returns: + Dictionary with all the keys to save + """ + + if cfg.environment.use_deepspeed: + if path is not None: + # gather model params from all ranks when using Deepspeed + status = model.save_16bit_model(path, "checkpoint.pth") # type: ignore + if status: + if cfg.environment._local_rank == 0: + checkpoint = { + "model": torch.load( + os.path.join(path, "checkpoint.pth"), map_location="cpu" + ) + } + else: + logger.warning( + "deepspeed.save_16bit_model didn't save the model, since" + " stage3_gather_16bit_weights_on_model_save=False." + " Saving the full checkpoint instead" + ) + model.save_checkpoint( # type: ignore + os.path.join(path, "ds_checkpoint") + ) + if cfg.environment._local_rank == 0: + # load to cpu + state_dict = get_fp32_state_dict_from_zero_checkpoint( + os.path.join(path, "ds_checkpoint") + ) + # save as normal checkpoint that can be loaded by `load_state_dict` + checkpoint = {"model": state_dict} + torch.save(checkpoint, os.path.join(path, "checkpoint.pth")) + shutil.rmtree(os.path.join(path, "ds_checkpoint")) + + else: + if cfg.environment._local_rank == 0: + model = unwrap_model(model) + checkpoint = {"model": model.state_dict()} + if path is not None: + torch.save(checkpoint, os.path.join(path, "checkpoint.pth")) + + if ( + cfg.environment._local_rank == 0 + and "classification_head.weight" in checkpoint["model"] + ): + torch.save( + checkpoint["model"]["classification_head.weight"], + os.path.join(path, "classification_head.pth"), + ) + + +def load_model_weights( + model: torch.nn.Module, model_weights: Dict, strict: bool, cfg: Any +): + orig_num_items = len(model_weights) + model_state_dict = model.state_dict() + + # needed to load models trained in int4/int8 with other dtypes + model_weights = { + k: ( + v + if not ( + cfg.architecture.backbone_dtype not in ("int4", "int8") + and (v.dtype is torch.int8 or v.dtype is torch.uint8) + ) + else model_state_dict[k] + ) + for k, v in model_weights.items() + if not ( + ("SCB" in k or "weight_format" in k or "quant_state" in k) + and cfg.architecture.backbone_dtype not in ("int4", "int8") + ) + } + + # Need to ignore int4/int8 weights so undo strict loading requirement + if len(model_weights) != orig_num_items: + strict = False + + model_weights = {re.sub(r"^module\.", "", k): v for k, v in model_weights.items()} + model_weights = {k.replace("_orig_mod.", ""): v for k, v in model_weights.items()} + + # manual fix for int8 weights + if cfg.architecture.backbone_dtype == "int8": + model_weights = { + k: v.to(cfg.environment._device) if "weight_format" not in k else v + for k, v in model_weights.items() + } + + try: + model.load_state_dict(OrderedDict(model_weights), strict=True) + except Exception as e: + if strict: + raise e + else: + if cfg.environment._local_rank == 0: + logger.warning( + "Only a part of the pretrained weights was loaded. " + "Some layers can't be initialized with pretrained " + f"weights: {e}" + ) + + for layer_name in re.findall("size mismatch for (.*?):", str(e)): + model_weights.pop(layer_name, None) + model.load_state_dict(OrderedDict(model_weights), strict=False) + return model + + +def load_checkpoint( + cfg: Any, model: torch.nn.Module, strict: bool = True, weights_path: str = None +): + """Load checkpoint + + Args: + cfg: config file + model: model to load weights to + strict: whether to apply strict matching for weights + weights_path: custom path to the weights. + If None, cfg.architecture.pretrained_weights is used + Returns: + epoch: current epoch + """ + + if weights_path is None: + weights_path = cfg.architecture.pretrained_weights + + model_weights = torch.load(weights_path, map_location="cpu") + if "model" in model_weights.keys(): + model_weights = model_weights["model"] + + if cfg.environment.use_deepspeed: + if cfg.training.lora: + model.backbone.base_model.model = load_model_weights( # type: ignore + model.backbone.base_model.model, # type: ignore + model_weights, + strict, + cfg, + ) + else: + model.backbone = load_model_weights( + model.backbone, model_weights, strict, cfg # type: ignore + ) + else: + model = load_model_weights(model, model_weights, strict, cfg) + + del model_weights + gc.collect() + + if cfg.environment._local_rank == 0: + logger.info(f"Weights loaded from: {weights_path}") + + +def get_ds_config(cfg: Any): + ds_config = { + "fp16": { + "enabled": True if cfg.architecture.backbone_dtype == "float16" else False, + "loss_scale_window": 100, + }, + "bf16": { + "enabled": True if cfg.architecture.backbone_dtype == "bfloat16" else False, + "loss_scale_window": 100, + }, + # https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_force_ds_cpu_optimizer": False, + "zero_optimization": { + "overlap_comm": True, + "contiguous_gradients": True, + "reduce_bucket_size": cfg.environment.deepspeed_reduce_bucket_size, + # zero3 offload cpu + # "stage3_max_live_parameters": cfg.environment.deepspeed_stage3_max_live_parameters, # noqa: E501 + # "stage3_max_reuse_distance": cfg.environment.deepspeed_stage3_max_reuse_distance, # noqa: E501 + # zero++ + # "reduce_scatter": True, + # "zero_quantized_weights": True, + # "zero_hpz_partition_size": 16, + # "zero_quantized_gradients": True, + }, + "steps_per_print": 2000, + "train_micro_batch_size_per_gpu": cfg.training.batch_size, + "gradient_accumulation_steps": cfg.training.grad_accumulation, + "wall_clock_breakdown": False, + } + + if cfg.environment.deepspeed_method == "ZeRO2": + ds_config["zero_optimization"]["stage"] = 2 + ds_config["zero_optimization"]["allgather_partitions"] = True + ds_config["zero_optimization"][ + "allgather_bucket_size" + ] = cfg.environment.deepspeed_allgather_bucket_size + elif cfg.environment.deepspeed_method == "ZeRO3": + ds_config["zero_optimization"]["stage"] = 3 + ds_config["zero_optimization"][ + "stage3_prefetch_bucket_size" + ] = cfg.environment.deepspeed_stage3_prefetch_bucket_size + ds_config["zero_optimization"][ + "stage3_param_persistence_threshold" + ] = cfg.environment.deepspeed_stage3_param_persistence_threshold + ds_config["zero_optimization"][ + "stage3_gather_16bit_weights_on_model_save" + ] = True + + # TODO: Do not enable offload cpu for now. + # if cfg.environment.deepspeed_offload_optimizer: + # ds_config["zero_optimization"]["offload_optimizer"] = { + # "device": "cpu", + # "pin_memory": True, + # } + # TODO: RuntimeError: Tensors must be CUDA and dense + # if cfg.environment.deepspeed_offload_param: + # ds_config["zero_optimization"]["offload_param"] = + # {"device": "cpu", "pin_memory": True} + + logger.info(f"DeepSpeed config: {ds_config}") + + return ds_config + + +def wrap_model_distributed( + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + lr_scheduler: torch.optim.lr_scheduler._LRScheduler, + train_dataloader: torch.utils.data.DataLoader, + val_dataloader: torch.utils.data.DataLoader, + cfg: Any, +): + if cfg.environment.use_deepspeed: + ds_config = get_ds_config(cfg) + if not cfg.training.lora: + ds_engine, optimizer, train_dataloader, lr_scheduler = deepspeed.initialize( + model=model.backbone, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + training_data=train_dataloader.dataset, + config_params=ds_config, + ) + model.backbone = ds_engine + else: + ds_engine, optimizer, train_dataloader, lr_scheduler = deepspeed.initialize( + model=model.backbone.base_model.model, # type: ignore + optimizer=optimizer, + lr_scheduler=lr_scheduler, + training_data=train_dataloader.dataset, + config_params=ds_config, + ) + model.backbone.base_model.model = ds_engine # type: ignore + model.init_deepspeed() # type: ignore + val_dataloader = DeepSpeedDataLoader( + val_dataloader.dataset, + batch_size=val_dataloader.batch_size, + local_rank=cfg.environment._local_rank, + pin_memory=True, + tput_timer=None, + data_sampler=OrderedDistributedSampler( + val_dataloader.dataset, + num_replicas=cfg.environment._world_size, + rank=cfg.environment._local_rank, + ), + ) + else: + find_unused_parameters = cfg.environment.find_unused_parameters + if getattr(cfg.architecture, "gradient_checkpointing", None): + find_unused_parameters = False + model = DistributedDataParallel( + model, + device_ids=[cfg.environment._local_rank], + find_unused_parameters=find_unused_parameters, + ) + + return model, optimizer, train_dataloader, val_dataloader, lr_scheduler + + +def get_optimizer(model: torch.nn.Module, cfg: Any) -> torch.optim.Optimizer: + """Prepares Optimizer. + + Args: + model: model + cfg: input config + + Returns: + Optimizer + """ + no_decay = ["bias", "LayerNorm.weight"] + differential_layers = cfg.training.differential_learning_rate_layers + optimizer = Optimizers.get(cfg.training.optimizer)( + [ + { + "params": [ + param + for name, param in model.named_parameters() + if (not any(layer in name for layer in differential_layers)) + and (not any(nd in name for nd in no_decay)) + and param.requires_grad + ], + "lr": cfg.training.learning_rate, + "weight_decay": cfg.training.weight_decay, + }, + { + "params": [ + param + for name, param in model.named_parameters() + if (not any(layer in name for layer in differential_layers)) + and (any(nd in name for nd in no_decay)) + and param.requires_grad + ], + "lr": cfg.training.learning_rate, + "weight_decay": 0, + }, + { + "params": [ + param + for name, param in model.named_parameters() + if (any(layer in name for layer in differential_layers)) + and (not any(nd in name for nd in no_decay)) + and param.requires_grad + ], + "lr": cfg.training.differential_learning_rate, + "weight_decay": cfg.training.weight_decay, + }, + { + "params": [ + param + for name, param in model.named_parameters() + if (any(layer in name for layer in differential_layers)) + and (any(nd in name for nd in no_decay)) + and param.requires_grad + ], + "lr": cfg.training.differential_learning_rate, + "weight_decay": 0, + }, + ], + lr=cfg.training.learning_rate, + weight_decay=cfg.training.weight_decay, + ) + + return optimizer + + +def get_scheduler( + cfg: Any, optimizer: torch.optim.Optimizer, epoch_steps: int +) -> torch.optim.lr_scheduler._LRScheduler: + """Prepares Learning Rate Scheduler. + + Args: + cfg: input config + optimizer: model optimizer + epoch_steps: total number of weight updates during the epoch + + Returns: + Learning Rate Scheduler + """ + + scheduler = Schedulers.get(cfg.training.schedule)( + optimizer=optimizer, + num_warmup_steps=cfg.training.warmup_epochs * epoch_steps, + num_training_steps=cfg.training.epochs * epoch_steps, + ) + + return scheduler + + +def generate_experiment_name() -> str: + """ + Generates a random human-readable experiment name in kebab-case. + + Returns: + The random name. + """ + return coolname.generate_slug(2) + + +def reduce_metric(output, reduce=None) -> float: + """Reduces metric and return metric score (number) + + Args: + output: output of the model + reduce: how to reduce the metric over the sample dimension + + Returns: + score: single number score (using config threshold for threshold metrics) + or non-reduced array of scores per sample. + """ + + if reduce == "mean": + score = np.mean(output["metrics"]) + else: + raise NotImplementedError() + + return score + + +def get_number_of_validation_epochs(training_epochs: int, evaluation_epochs: float): + """ + Given the number of training epochs and the number of epochs between model + evaluations, return the number of times the model is being evaluated during + training + + Args: + training_epochs: The number of epochs to train for + evaluation_epochs: This is the number of epochs after which we want to + evaluate our model + + Returns: + num_val_epochs: The number of epochs to be evaluated during training. + """ + return training_epochs // evaluation_epochs + + +def contains_nan(output: Dict): + return ( + sum( + [ + 1 + for key, val in output.items() + if isinstance(val, torch.Tensor) + and torch.isnan(val.detach().cpu()).sum() > 0 + ] + ) + > 0 + ) + + +def run_inference( + cfg: Any, + model: torch.nn.Module, + dataloader, + mode: str, +) -> Dict[str, list]: + """Runs inference + + Args: + cfg: config + model: model + dataloader: custom dataloader + mode: mode for inference + + Returns: + Dictionary with output + + """ + + # Store information for evaluation + out = dict() + + if cfg.environment._local_rank == 0: + logger.info(f"Starting {mode} inference") + + tqdm_out = TqdmToLogger(logger, level=logging.INFO) + progress_bar = tqdm( + total=len(dataloader), + disable=cfg.environment._local_rank != 0, + file=tqdm_out, + ascii=True, + desc=f"{mode} progress", + mininterval=0, + ) + + log_update_steps = max(len(dataloader) // 20, 1) + inf_it = iter(dataloader) + for itr in range(len(dataloader)): + try: + data = next(inf_it) + except Exception: + raise LLMDataException("Data reading error. Skipping inference.") + + val_batch_size = get_inference_batch_size(cfg) + cfg.environment._curr_val_step += val_batch_size * cfg.environment._world_size + + batch = cfg.dataset.dataset_class.batch_to_device(data, cfg.environment._device) + + if cfg.environment.use_deepspeed: + if ( + cfg.prediction.metric != "Perplexity" + and cfg.problem_type not in NON_GENERATION_PROBLEM_TYPES + ): + output = {} + output["predicted_answer_ids"] = ( + model.generate(batch, cfg).detach().cpu() # type: ignore + ) + else: + output = model.forward(batch) + else: + with autocast( + enabled=cfg.environment.mixed_precision, + dtype=get_torch_dtype(cfg.environment.mixed_precision_dtype), + ): + if ( + cfg.prediction.metric != "Perplexity" + and cfg.problem_type not in NON_GENERATION_PROBLEM_TYPES + ): + output = {} + output["predicted_answer_ids"] = ( + unwrap_model(model).generate(batch, cfg).detach().cpu() + ) + else: + output = model.forward(batch) + if contains_nan(output) and cfg.environment.mixed_precision: + raise LLMModelException( + "NaN caught during mixed precision inference. " + "Please disable mixed precision inference. " + "Alternatively, reducing learning rate or " + "gradient clipping may help to stabilize training." + ) + + output = dataloader.dataset.postprocess_batch_predictions(output=output) + + if "predicted_answer_ids" in output.keys(): + del output["predicted_answer_ids"] + + for key, val in output.items(): + if isinstance(val, torch.Tensor): + val = val.detach().cpu() + + # DefaultDict is not used as it adds extra keys during pickle.dump + if key not in out: + out[key] = [val] + else: + out[key] += [val] + + if cfg.environment._local_rank == 0: + # Show logs each 5% of the inference + if (itr + 1) % log_update_steps == 0 or itr == len(dataloader) - 1: + progress_bar.set_description(f"{mode} progress", refresh=False) + if (itr + 1) % log_update_steps == 0: + progress_bar.update(log_update_steps) + else: + progress_bar.update(len(dataloader) % log_update_steps) + + cfg.logging._logger.log( + "internal", + "current_val_step", + cfg.environment._curr_val_step, + step=cfg.environment._curr_val_step, + ) + + if cfg.environment._distributed: + torch.distributed.barrier() + + progress_bar.close() + del progress_bar + out = cat_batches(out) + + return out + + +def save_predictions(cfg, val_data, val_dataloader, val_df, mode): + val_data, val_df = val_dataloader.dataset.format_output( # type: ignore + cfg=cfg, df=val_df, output=val_data + ) + raw_preds_name = os.path.join(cfg.output_directory, f"{mode}_raw_predictions.pkl") + csv_preds_name = os.path.join(cfg.output_directory, f"{mode}_predictions.csv") + save_pickle(raw_preds_name, val_data) + val_df.to_csv(csv_preds_name, index=False) + + +def update_backbone_config(config: Any, cfg: Any): + if hasattr(config, "hidden_dropout_prob"): + config.hidden_dropout_prob = cfg.architecture.intermediate_dropout + if hasattr(config, "attention_probs_dropout_prob"): + config.attention_probs_dropout_prob = cfg.architecture.intermediate_dropout + if ( + not hasattr(config, "hidden_dropout_prob") + and not hasattr(config, "attention_probs_dropout_prob") + and cfg.architecture.intermediate_dropout > 0 + ): + logger.warning( + "Model config does not have dropout attributes. " + f"Ignoring Intermediate Dropout = {cfg.architecture.intermediate_dropout}." + ) + cfg.architecture.intermediate_dropout = 0 + + tokenizer = get_tokenizer(cfg) + + if config.eos_token_id != tokenizer.eos_token_id: + logger.warning( + "EOS token id not matching between config and tokenizer. " + "Overwriting with tokenizer id." + ) + config.eos_token_id = tokenizer.eos_token_id + if config.pad_token_id != tokenizer.pad_token_id: + logger.warning( + "PAD token id not matching between config and tokenizer. " + "Overwriting with tokenizer id." + ) + config.pad_token_id = tokenizer.pad_token_id + # no warning needed as not used + if config.bos_token_id != tokenizer.bos_token_id: + config.bos_token_id = tokenizer.bos_token_id + + if "mpt-" in cfg.llm_backbone: + config.init_device = cfg.environment._device + + # See: https://github.com/huggingface/transformers/pull/24906 + if hasattr(config, "pretraining_tp") and cfg.training.lora: + logger.info("Setting pretraining_tp of model config to 1.") + config.pretraining_tp = 1 + + return config + + +def set_generation_config(backbone: torch.nn.Module, cfg_prediction: Any): + backbone.generation_config.min_new_tokens = cfg_prediction.min_length_inference + backbone.generation_config.max_new_tokens = cfg_prediction.max_length_inference + backbone.generation_config.max_time = ( + cfg_prediction.max_time if cfg_prediction.max_time > 0 else None + ) + backbone.generation_config.do_sample = cfg_prediction.do_sample + backbone.generation_config.num_beams = cfg_prediction.num_beams + backbone.generation_config.repetition_penalty = cfg_prediction.repetition_penalty + if cfg_prediction.do_sample: + backbone.generation_config.temperature = cfg_prediction.temperature + backbone.generation_config.top_k = cfg_prediction.top_k + backbone.generation_config.top_p = cfg_prediction.top_p + backbone.generation_config.transformers_version = transformers.__version__ + return backbone + + +def create_nlp_backbone(cfg, model_class=AutoModel) -> Any: + """ + Creates a backbone model for NLP tasks. + This is needed for Gradient Checkpointing in DDP mode. + """ + kwargs = dict() + try: + config = AutoConfig.from_pretrained( + cfg.llm_backbone, + trust_remote_code=cfg.environment.trust_remote_code, + token=os.getenv("HUGGINGFACE_TOKEN"), + revision=cfg.environment.huggingface_branch, + ) + kwargs["token"] = os.getenv("HUGGINGFACE_TOKEN") + except TypeError: + # TypeError: RWForCausalLM.__init__() got + # an unexpected keyword argument 'token' + config = AutoConfig.from_pretrained( + cfg.llm_backbone, + trust_remote_code=cfg.environment.trust_remote_code, + revision=cfg.environment.huggingface_branch, + ) + + config = update_backbone_config(config, cfg) + + quantization_config = None + if cfg.architecture.backbone_dtype == "int8" and len(cfg.environment.gpus): + kwargs["device_map"] = {"": cfg.environment._device} # type: ignore + quantization_config = BitsAndBytesConfig( + load_in_8bit=True, + llm_int8_threshold=0.0, + ) + # need to force pretrained + cfg.architecture.pretrained = True + kwargs["torch_dtype"] = torch.float16 # type: ignore + elif cfg.architecture.backbone_dtype == "int4" and len(cfg.environment.gpus): + kwargs["device_map"] = {"": cfg.environment._device} # type: ignore + quantization_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_compute_dtype=torch.float16, + bnb_4bit_quant_type="nf4", + ) + # need to force pretrained + cfg.architecture.pretrained = True + kwargs["torch_dtype"] = torch.float16 # type: ignore + elif len(cfg.environment.gpus) == 0 and cfg.architecture.backbone_dtype in [ + "int4", + "int8", + ]: + logger.warning( + "Quantization is not supported on CPU. " + "Please run on GPU or disable quantization." + ) + cfg.architecture.backbone_dtype = "float32" + else: + kwargs["torch_dtype"] = getattr(torch, cfg.architecture.backbone_dtype) + + logger.info(f"Using {cfg.architecture.backbone_dtype} for backbone") + + kwargs["trust_remote_code"] = cfg.environment.trust_remote_code + + if cfg.training.use_flash_attention_2: + try: + import flash_attn # noqa: F401 + + # see https://github.com/fxmarty/transformers/ + # blob/3f06a3a0aec8cc1ec3ad6bf66ebe277392c5ab37/ + # src/transformers/configuration_utils.py#L380 + config._attn_implementation_internal = "flash_attention_2" + if cfg.environment._local_rank == 0: + logger.info("Using Flash Attention 2.") + except ImportError: + if cfg.environment._local_rank == 0: + logger.warning( + "Flash Attention 2.0 is not available. " + "Please consider to run 'make setup' to install it." + ) + + if cfg.architecture.pretrained: + if cfg.environment._local_rank == 0: + logger.info(f"Loading {cfg.llm_backbone}. This may take a while.") + + backbone = model_class.from_pretrained( + cfg.llm_backbone, + revision=cfg.environment.huggingface_branch, + config=config, + quantization_config=quantization_config, + **kwargs, + ) + if cfg.environment._local_rank == 0: + logger.info(f"Loaded {cfg.llm_backbone}.") + else: + kwargs.pop("token", None) + backbone = model_class.from_config(config, **kwargs) + + if cfg.tokenizer._vocab_length > config.vocab_size: + if cfg.environment._local_rank == 0: + logger.info(f"Resizing token embeddings to {cfg.tokenizer._vocab_length}") + backbone.resize_token_embeddings(cfg.tokenizer._vocab_length) + + backbone.model_parallel = False + + if cfg.training.lora: + # if used, gradient checkpointing will be enabled below + loaded_in_kbit = getattr(backbone, "is_loaded_in_8bit", False) or getattr( + backbone, "is_loaded_in_4bit", False + ) + + for name, param in backbone.named_parameters(): + # freeze base model's layers + param.requires_grad = False + + # cast all non INT8 parameters to fp32 + if loaded_in_kbit: + for param in backbone.parameters(): + if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): + param.data = param.data.to(torch.float32) + else: + if cfg.architecture.backbone_dtype != "float32": + if cfg.environment.mixed_precision: + logger.info("Disabling mixed precision as dtype not set to float32.") + cfg.environment.mixed_precision = False + if cfg.architecture.backbone_dtype != "bfloat16": + logger.warning( + "Pure float16 or int8 training will " + "likely lead to unstable training without adapters." + ) + + if cfg.architecture.gradient_checkpointing: + backbone.gradient_checkpointing_enable() + + # initialize the generation config + if backbone.generation_config.eos_token_id != config.eos_token_id: + logger.warning( + "EOS token id not matching between generation config and tokenizer. " + "Overwriting with tokenizer id." + ) + backbone.generation_config.eos_token_id = config.eos_token_id + if backbone.generation_config.pad_token_id != config.pad_token_id: + logger.warning( + "PAD token id not matching between generation config and tokenizer. " + "Overwriting with tokenizer id." + ) + backbone.generation_config.pad_token_id = config.pad_token_id + # no warning needed as not used + if backbone.generation_config.bos_token_id != config.bos_token_id: + backbone.generation_config.bos_token_id = config.bos_token_id + + if cfg.problem_type not in NON_GENERATION_PROBLEM_TYPES: + backbone = set_generation_config(backbone, cfg.prediction) + + return backbone, config + + +# Adapted from https://github.com/huggingface/trl/blob/ +# 2068fdcd931183b59110aa6dc99d8f5bb55c6f2d/trl/trainer/utils.py#L742 +def activate_neftune(model, neftune_noise_alpha): + r""" + Activates the neftune as presented in this code: + https://github.com/neelsjain/NEFTune and paper: https://arxiv.org/abs/2310.05914 + """ + backbone = unwrap_model(model).backbone + if isinstance(backbone, PeftModel): + embeddings = backbone.base_model.get_input_embeddings() + else: + embeddings = backbone.get_input_embeddings() + + embeddings.neftune_noise_alpha = neftune_noise_alpha + embeddings.register_forward_hook(neftune_post_forward_hook) + + +def neftune_post_forward_hook(module, input, output): + """ + Implements the NEFTune forward pass for the model using forward hooks. + Note this works only for torch.nn.Embedding layers. + This method is slightly adapted from the original source code + that can be found here: https://github.com/neelsjain/NEFTune + + Simply add it to your model as follows: + ```python + model = ... + model.embed_tokens.neftune_noise_alpha = 0.1 + model.embed_tokens.register_forward_hook(neftune_post_forward_hook) + ``` + + Args: + module (`torch.nn.Module`): + The embedding module where the hook is attached. Note that you need to set + `module.neftune_noise_alpha` to the desired noise alpha value. + input (`torch.Tensor`): + The input tensor to the model. + output (`torch.Tensor`): + The output tensor of the model (i.e. the embeddings). + """ + if module.training: + dims = torch.tensor(output.size(1) * output.size(2)) + mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) + output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) + return output + + +class TokenStoppingCriteria(StoppingCriteria): + """ + Stopping criteria based on tokens. + Will stop generation when each generated sample contains at least one of the + stop_word_ids. + """ + + def __init__(self, stop_word_ids, prompt_input_ids_len): + super().__init__() + self.prompt_input_ids_len = prompt_input_ids_len + if stop_word_ids is None: + stop_word_ids = [] + self.stop_word_ids = stop_word_ids + + def should_stop( + self, + generated_ids: torch.Tensor, + stop_word_id: torch.Tensor, + ): + if len(stop_word_id.shape) == 0: + return ( + torch.mean(((generated_ids == stop_word_id).sum(1) > 0).float()) == 1 + ).item() + else: + return ( + self.get_num_vector_found_in_matrix_rows(stop_word_id, generated_ids) + == generated_ids.shape[0] + ) + + @staticmethod + def get_num_vector_found_in_matrix_rows(vector, matrix): + """ + Count the number of times a vector is found in a matrix row. + If the vector is found in a row, the search stops and the next row is searched. + """ + assert len(vector.shape) == 1 + assert len(matrix.shape) == 2 + + found = 0 + for row in matrix: + # stride through the vector + for i in range(len(row) - len(vector) + 1): + # check if the vector contains the tensor + if torch.all(row[i : i + len(vector)] == vector): + found += 1 + break + + return found + + def __call__(self, input_ids: torch.Tensor, scores: torch.FloatTensor, **kwargs): + generated_ids: torch.Tensor = input_ids[:, self.prompt_input_ids_len :] + for stop_word_id in self.stop_word_ids: + if self.should_stop(generated_ids, stop_word_id.to(generated_ids.device)): + if generated_ids.shape[1] == 1: + logger.warning( + f"Stopping criteria triggered for {stop_word_id} at first " + "generated token." + ) + return True + return False + + +class EnvVariableStoppingCriteria(StoppingCriteria): + """ + Stopping criteria based on env variable. + Useful to force stopping within the app. + """ + + stop_streaming_env: str = "STOP_STREAMING" + + def __call__(self, input_ids: torch.Tensor, scores: torch.FloatTensor, **kwargs): + should_stop = self.stop_streaming_env in os.environ + if should_stop: + logger.info("Received signal to stop generating") + return should_stop + + +def prepare_lora(cfg, backbone): + target_modules = ( + [ + lora_target_module.strip() + for lora_target_module in cfg.training.lora_target_modules.strip().split( # noqa: E501 + "," + ) + ] + if cfg.training.lora_target_modules + else None + ) + + if target_modules is None: + target_modules = [] + for name, module in backbone.named_modules(): + if ( + isinstance( + module, (torch.nn.Linear, torch.nn.Conv1d, Conv1DTransformer) + ) + and "head" not in name + ): + name = name.split(".")[-1] + if name not in target_modules: + target_modules.append(name) + + if cfg.environment._local_rank == 0: + logger.info(f"Lora module names: {target_modules}") + + lora_config = LoraConfig( + r=cfg.training.lora_r, + lora_alpha=cfg.training.lora_alpha, + target_modules=target_modules, + lora_dropout=cfg.training.lora_dropout, + bias="none", + task_type="CAUSAL_LM", + ) + if cfg.architecture.gradient_checkpointing: + backbone.enable_input_require_grads() + backbone = get_peft_model(backbone, lora_config) + backbone.print_trainable_parameters() + return backbone + + +def generate(backbone, batch, cfg, streamer, remove_prompt=True): + mask_key = "prompt_attention_mask" + pad_keys = [ + "prompt_input_ids", + "prompt_attention_mask", + ] + batch = batch_padding( + cfg, + batch, + training=False, + mask_key=mask_key, + pad_keys=pad_keys, + ) + input_ids = batch["prompt_input_ids"] + attention_mask = batch["prompt_attention_mask"] + # Adding GenerationMixin type annotation for faster lookup + generation_function: GenerationMixin.generate = backbone.generate + verbosity = transformers_logging.get_verbosity() + stopping_criteria = StoppingCriteriaList( + [ + TokenStoppingCriteria( + stop_word_ids=cfg.tokenizer._stop_words_ids, + prompt_input_ids_len=input_ids.shape[1], + ), + EnvVariableStoppingCriteria(), + ] + ) + # force to use cache and disable gradient checkpointing if enabled + backbone.config.use_cache = True + if cfg.architecture.gradient_checkpointing: + backbone.gradient_checkpointing_disable() + transformers_logging.set_verbosity_error() + output = generation_function( + inputs=input_ids, + attention_mask=attention_mask, + generation_config=backbone.generation_config, + stopping_criteria=stopping_criteria, + renormalize_logits=True, + return_dict_in_generate=False, + use_cache=True, + streamer=streamer, + ) + transformers_logging.set_verbosity(verbosity) + # enable checkpointing again + if cfg.architecture.gradient_checkpointing: + backbone.gradient_checkpointing_enable() + if remove_prompt: + output = output[:, input_ids.shape[1] :] + return output + + +def get_torch_dtype(dtype): + if dtype == "float16": + return torch.float16 + elif dtype == "bfloat16": + return torch.bfloat16 + else: + return torch.float32 diff --git a/llm_studio/src/utils/plot_utils.py b/llm_studio/src/utils/plot_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b2d3bf1e0257cf1c7a40145ff8133a2f491eb83b --- /dev/null +++ b/llm_studio/src/utils/plot_utils.py @@ -0,0 +1,126 @@ +import html +import re +from dataclasses import dataclass +from typing import List + +PLOT_ENCODINGS = ["image", "html", "df"] + + +@dataclass +class PlotData: + """ + Data to plot. + + Args: + data: the data to plot: + - a base64 encoded PNG if `encoding` is `png`. + - a string in HTML if `encoding` is `html`. + - a path to a parquet file if `encoding` is `df`. + encoding: the encoding of the data, one of PLOT_ENCODINGS. + """ + + data: str + encoding: str + + def __post_init__(self): + assert self.encoding in PLOT_ENCODINGS, f"Unknown plot encoding {self.encoding}" + + +def get_line_separator_html(): + return ( + "
" + ) + + +def decode_bytes(chunks: List[bytes]): + """Decodes bytes to string + + Args: + chunks: byte chunks + + Returns: + list of decoded strings + """ + decoded_tokens = [] + buffer = b"" + + for chunk in chunks: + combined = buffer + chunk + try: + # Try to decode the combined bytes + decoded_tokens.append(combined.decode("utf-8")) + # If successful, clear the buffer + buffer = b"" + except UnicodeDecodeError: + # If decoding failed, keep the current chunk in the buffer + # and attempt to combine it with the next chunk + buffer = chunk + + # Attempt to decode any remaining bytes in the buffer + try: + decoded_tokens.append(buffer.decode("utf-8")) + except UnicodeDecodeError: + pass + + return decoded_tokens + + +def format_for_markdown_visualization(text: str) -> str: + """ + Convert newlines to
tags, except for those inside code blocks. + This is needed because the markdown_table_cell_type() function does not + convert newlines to
tags, so we have to do it ourselves. + + This function is rather simple and may fail on text that uses ` + in some other context than marking code cells or uses ` within + the code itself (as this function). + """ + code_block_regex = r"(```.*?```|``.*?``)" + parts = re.split(code_block_regex, text, flags=re.DOTALL) + for i in range(len(parts)): + # Only substitute for text outside matched code blocks + if "`" not in parts[i]: + parts[i] = parts[i].replace("\n", "
").strip() + text = "".join(parts) + + # Restore newlines around code blocks, needed for correct rendering + for x in ["```", "``", "`"]: + text = text.replace(f"
{x}", f"\n{x}") + text = text.replace(f"{x}
", f"{x}\n") + return html.escape(text.replace("
", "\n")) + + +def list_to_markdown_representation( + tokens: List[str], masks: List[bool], pad_token: int, num_chars: int = 65 +): + """ + Creates a markdown representation string from a list of tokens, + with HTML line breaks after 'num_chars' characters. + Masked tokens will be emphasized in HTML representation. + + """ + x = [] + sublist: List[str] = [] + raw_sublist: List[str] = [] + for token, mask in zip(tokens, masks): + if len(token) + len(", ".join(raw_sublist)) > num_chars: + x.append(", ".join(sublist)) + sublist = [] + raw_sublist = [] + + raw_sublist.append(token) + token_formatted = html.escape(token) + if mask: + token_formatted = f"""***{token_formatted}***""" + elif token == pad_token: + token_formatted = f"""{ + token_formatted + }""" + sublist.append(token_formatted) + + if sublist: # add any remaining items in sublist + x.append(", ".join(sublist)) + + list_representation = "\n[" + "
".join(x) + "]\n" + return list_representation diff --git a/llm_studio/src/utils/type_annotations.py b/llm_studio/src/utils/type_annotations.py new file mode 100644 index 0000000000000000000000000000000000000000..6b53c8ab62368219d57689ed97a51f5a3d46609e --- /dev/null +++ b/llm_studio/src/utils/type_annotations.py @@ -0,0 +1,4 @@ +from typing import Tuple + +# types which can be shown directly in the UI without any extra nesting +KNOWN_TYPE_ANNOTATIONS = [int, float, bool, str, Tuple[str, ...]] diff --git a/llm_studio/src/utils/utils.py b/llm_studio/src/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aa830627ff48fd3014b369322920b1686a7fe126 --- /dev/null +++ b/llm_studio/src/utils/utils.py @@ -0,0 +1,164 @@ +import logging +import os +import pickle +import random +import zipfile +from typing import Any + +import numpy as np +import psutil +import torch + +logger = logging.getLogger(__name__) + + +def set_seed(seed: int = 1234) -> None: + """Sets the random seed. + + Args: + seed: seed value + """ + + random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = False + torch.backends.cudnn.benchmark = True + + +def set_environment(cfg): + """Sets and checks environment settings""" + if "GPT" in cfg.prediction.metric and os.getenv("OPENAI_API_KEY", "") == "": + logger.warning("No OpenAI API Key set. Setting metric to BLEU. ") + cfg.prediction.metric = "BLEU" + return cfg + + +def kill_child_processes(parent_pid: int) -> bool: + """Killing a process and all its child processes + + Args: + parent_pid: process id of parent + + Returns: + True or False in case of success or failure + """ + + logger.debug(f"Killing process id: {parent_pid}") + + try: + parent = psutil.Process(parent_pid) + if parent.status() == "zombie": + return False + children = parent.children(recursive=True) + for child in children: + child.kill() + parent.kill() + return True + except psutil.NoSuchProcess: + logger.warning(f"Cannot kill process id: {parent_pid}. No such process.") + return False + + +def kill_ddp_processes() -> None: + """ + Killing all DDP processes from a single process. + Firstly kills all children of a single DDP process (dataloader workers) + Then kills all other DDP processes + Then kills main parent DDP process + """ + + pid = os.getpid() + parent_pid = os.getppid() + + current_process = psutil.Process(pid) + children = current_process.children(recursive=True) + for child in children: + child.kill() + + parent_process = psutil.Process(parent_pid) + children = parent_process.children(recursive=True)[::-1] + for child in children: + if child.pid == pid: + continue + child.kill() + parent_process.kill() + current_process.kill() + + +def add_file_to_zip(zf: zipfile.ZipFile, path: str) -> None: + """Adds a file to the existing zip. Does nothing if file does not exist. + + Args: + zf: zipfile object to add to + path: path to the file to add + """ + + try: + zf.write(path, os.path.basename(path)) + except Exception: + logger.warning(f"File {path} could not be added to zip.") + + +def save_pickle(path: str, obj: Any, protocol: int = 4) -> None: + """Saves object as pickle file + + Args: + path: path of file to save + obj: object to save + protocol: protocol to use when saving pickle + """ + + with open(path, "wb") as pickle_file: + pickle.dump(obj, pickle_file, protocol=protocol) + + +class DisableLogger: + def __init__(self, level: int = logging.INFO): + self.level = level + + def __enter__(self): + logging.disable(self.level) + + def __exit__(self, exit_type, exit_value, exit_traceback): + logging.disable(logging.NOTSET) + + +class PatchedAttribute: + """ + Patches an attribute of an object for the duration of this context manager. + Similar to unittest.mock.patch, + but works also for properties that are not present in the original class + + >>> class MyObj: + ... attr = 'original' + >>> my_obj = MyObj() + >>> with PatchedAttribute(my_obj, 'attr', 'patched'): + ... print(my_obj.attr) + patched + >>> print(my_obj.attr) + original + >>> with PatchedAttribute(my_obj, 'new_attr', 'new_patched'): + ... print(my_obj.new_attr) + new_patched + >>> assert not hasattr(my_obj, 'new_attr') + """ + + def __init__(self, obj, attribute, new_value): + self.obj = obj + self.attribute = attribute + self.new_value = new_value + self.original_exists = hasattr(obj, attribute) + if self.original_exists: + self.original_value = getattr(obj, attribute) + + def __enter__(self): + setattr(self.obj, self.attribute, self.new_value) + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.original_exists: + setattr(self.obj, self.attribute, self.original_value) + else: + delattr(self.obj, self.attribute) diff --git a/model_cards/text_causal_classification_experiment_summary_card_template.md b/model_cards/text_causal_classification_experiment_summary_card_template.md new file mode 100644 index 0000000000000000000000000000000000000000..65b3b5310fce32784cfdf6a007f0ba63e3384029 --- /dev/null +++ b/model_cards/text_causal_classification_experiment_summary_card_template.md @@ -0,0 +1,61 @@ +### Usage with HF transformers + +To use the model with the `transformers` library on a machine with GPUs: +- First, push the model to a huggingface repo by clicking the Push checkpoint to huggingface button below +- Make sure you have the `transformers` library installed in the machine's environment + +```bash +pip install transformers=={{transformers_version}} +``` + +Also make sure you are providing your huggingface token if the model is lying in a private repo. + - You can login to hugginface_hub by running + ```python + import huggingface_hub + huggingface_hub.login() + ``` + +You will also need to download the classification head, either manually, or by running the following code: + +```python +from huggingface_hub import hf_hub_download + +model_name = "{{repo_id}}" # either local folder or huggingface model name +hf_hub_download(repo_id=model_name, filename="classification_head.pth", local_dir="./") +``` + +You can make classification predictions by following the example below: + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +model_name = "{{repo_id}}" # either local folder or huggingface model name +# Important: The prompt needs to be in the same format the model was trained with. +# You can find an example prompt in the experiment logs. +prompt = "{{text_prompt_start}}How are you?{{end_of_sentence}}{{text_answer_separator}}" + +tokenizer = AutoTokenizer.from_pretrained( + model_name, + use_fast={{use_fast}}, + trust_remote_code={{trust_remote_code}}, +) +model = AutoModelForCausalLM.from_pretrained( + model_name, + torch_dtype="auto", + device_map={"": "cuda:0"}, + trust_remote_code={{trust_remote_code}}, +).cuda().eval() + +head_weights = torch.load("classification_head.pth", map_location="cuda") +# settings can be arbitrary here as we overwrite with saved weights +head = torch.nn.Linear(1, 1, bias=False).to("cuda") +head.weight.data = head_weights + +inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda") + +out = model(**inputs).logits + +logits = head(out[:,-1]) + +print(logits) +``` diff --git a/model_cards/text_causal_classification_model_card_template.md b/model_cards/text_causal_classification_model_card_template.md new file mode 100644 index 0000000000000000000000000000000000000000..13c6c163e06d58af7c22e6edbd9eaeef27eeaa55 --- /dev/null +++ b/model_cards/text_causal_classification_model_card_template.md @@ -0,0 +1,106 @@ +--- +language: +- en +library_name: transformers +inference: false +thumbnail: https://h2o.ai/etc.clientlibs/h2o/clientlibs/clientlib-site/resources/images/favicon.ico +tags: +- gpt +- llm +- large language model +- h2o-llmstudio +--- +# Model Card +## Summary + +This model was trained using [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio). +- Base model: [{{base_model}}](https://huggingface.co/{{base_model}}) + + +## Usage + +To use the model with the `transformers` library on a machine with GPUs, first make sure you have the `transformers` library installed. + +```bash +pip install transformers=={{transformers_version}} +``` + +Also make sure you are providing your huggingface token if the model is lying in a private repo. + - You can login to hugginface_hub by running + ```python + import huggingface_hub + huggingface_hub.login() + ``` + +You will also need to download the classification head, either manually, or by running the following code: + +```python +from huggingface_hub import hf_hub_download + +model_name = "{{repo_id}}" # either local folder or huggingface model name +hf_hub_download(repo_id=model_name, filename="classification_head.pth", local_dir="./") +``` + +You can make classification predictions by following the example below: + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +model_name = "{{repo_id}}" # either local folder or huggingface model name +# Important: The prompt needs to be in the same format the model was trained with. +# You can find an example prompt in the experiment logs. +prompt = "{{text_prompt_start}}How are you?{{end_of_sentence}}{{text_answer_separator}}" + +tokenizer = AutoTokenizer.from_pretrained( + model_name, + use_fast={{use_fast}}, + trust_remote_code={{trust_remote_code}}, +) +model = AutoModelForCausalLM.from_pretrained( + model_name, + torch_dtype="auto", + device_map={"": "cuda:0"}, + trust_remote_code={{trust_remote_code}}, +).cuda().eval() + +head_weights = torch.load("classification_head.pth", map_location="cuda") +# settings can be arbitrary here as we overwrite with saved weights +head = torch.nn.Linear(1, 1, bias=False).to("cuda") +head.weight.data = head_weights + +inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda") + +out = model(**inputs).logits + +logits = head(out[:,-1]) + +print(logits) +``` + +## Quantization and sharding + +You can load the models using quantization by specifying ```load_in_8bit=True``` or ```load_in_4bit=True```. Also, sharding on multiple GPUs is possible by setting ```device_map=auto```. + +## Model Architecture + +``` +{{model_architecture}} +``` + +## Model Configuration + +This model was trained using H2O LLM Studio and with the configuration in [cfg.yaml](cfg.yaml). Visit [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio) to learn how to train your own large language models. + + +## Disclaimer + +Please read this disclaimer carefully before using the large language model provided in this repository. Your use of the model signifies your agreement to the following terms and conditions. + +- Biases and Offensiveness: The large language model is trained on a diverse range of internet text data, which may contain biased, racist, offensive, or otherwise inappropriate content. By using this model, you acknowledge and accept that the generated content may sometimes exhibit biases or produce content that is offensive or inappropriate. The developers of this repository do not endorse, support, or promote any such content or viewpoints. +- Limitations: The large language model is an AI-based tool and not a human. It may produce incorrect, nonsensical, or irrelevant responses. It is the user's responsibility to critically evaluate the generated content and use it at their discretion. +- Use at Your Own Risk: Users of this large language model must assume full responsibility for any consequences that may arise from their use of the tool. The developers and contributors of this repository shall not be held liable for any damages, losses, or harm resulting from the use or misuse of the provided model. +- Ethical Considerations: Users are encouraged to use the large language model responsibly and ethically. By using this model, you agree not to use it for purposes that promote hate speech, discrimination, harassment, or any form of illegal or harmful activities. +- Reporting Issues: If you encounter any biased, offensive, or otherwise inappropriate content generated by the large language model, please report it to the repository maintainers through the provided channels. Your feedback will help improve the model and mitigate potential issues. +- Changes to this Disclaimer: The developers of this repository reserve the right to modify or update this disclaimer at any time without prior notice. It is the user's responsibility to periodically review the disclaimer to stay informed about any changes. + +By using the large language model provided in this repository, you agree to accept and comply with the terms and conditions outlined in this disclaimer. If you do not agree with any part of this disclaimer, you should refrain from using the model and any content generated by it. diff --git a/model_cards/text_causal_language_modeling_experiment_summary_card_template.md b/model_cards/text_causal_language_modeling_experiment_summary_card_template.md new file mode 100644 index 0000000000000000000000000000000000000000..fb1385c13101976686d13de90d274f76c2431eca --- /dev/null +++ b/model_cards/text_causal_language_modeling_experiment_summary_card_template.md @@ -0,0 +1,41 @@ +### Usage with HF transformers + +To use the model with the `transformers` library on a machine with GPUs: +- First, push the model to a huggingface repo by clicking the Push checkpoint to huggingface button below +- Make sure you have the `transformers` library installed in the machine's environment + +```bash +pip install transformers=={{transformers_version}} +``` +- Pass model path from the huggingface repo to the following pipeline +- Also make sure you are providing your huggingface token to the pipeline if the model is lying in a private repo. + - Either leave token=True in the pipeline and login to hugginface_hub by running + ```python + import huggingface_hub + huggingface_hub.login() + ``` + - Or directly pass your to token in the pipeline +```python +from transformers import pipeline + +generate_text = pipeline( + model="{{repo_id}}", + torch_dtype="auto", + trust_remote_code=True, + use_fast={{use_fast}}, + device_map={"": "cuda:0"}, + token=True, +) + +res = generate_text( + "Why is drinking water so healthy?", + min_new_tokens={{min_new_tokens}}, + max_new_tokens={{max_new_tokens}}, + do_sample={{do_sample}}, + num_beams={{num_beams}}, + temperature=float({{temperature}}), + repetition_penalty=float({{repetition_penalty}}), + renormalize_logits=True +) +print(res[0]["generated_text"]) +``` diff --git a/model_cards/text_causal_language_modeling_model_card_template.md b/model_cards/text_causal_language_modeling_model_card_template.md new file mode 100644 index 0000000000000000000000000000000000000000..ee3fcd76369db769560ff3549c9d25dbe8f9c44b --- /dev/null +++ b/model_cards/text_causal_language_modeling_model_card_template.md @@ -0,0 +1,178 @@ +--- +language: +- en +library_name: transformers +inference: false +thumbnail: https://h2o.ai/etc.clientlibs/h2o/clientlibs/clientlib-site/resources/images/favicon.ico +tags: +- gpt +- llm +- large language model +- h2o-llmstudio +--- +# Model Card +## Summary + +This model was trained using [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio). +- Base model: [{{base_model}}](https://huggingface.co/{{base_model}}) + + +## Usage + +To use the model with the `transformers` library on a machine with GPUs, first make sure you have the `transformers` library installed. + +```bash +pip install transformers=={{transformers_version}} +``` + +Also make sure you are providing your huggingface token to the pipeline if the model is lying in a private repo. + - Either leave `token=True` in the `pipeline` and login to hugginface_hub by running + ```python + import huggingface_hub + huggingface_hub.login() + ``` + - Or directly pass your to `token` in the `pipeline` + +```python +from transformers import pipeline + +generate_text = pipeline( + model="{{repo_id}}", + torch_dtype="auto", + trust_remote_code=True, + use_fast={{use_fast}}, + device_map={"": "cuda:0"}, + token=True, +) + +# generate configuration can be modified to your needs +# generate_text.model.generation_config.min_new_tokens = {{min_new_tokens}} +# generate_text.model.generation_config.max_new_tokens = {{max_new_tokens}} +# generate_text.model.generation_config.do_sample = {{do_sample}} +# generate_text.model.generation_config.num_beams = {{num_beams}} +# generate_text.model.generation_config.temperature = float({{temperature}}) +# generate_text.model.generation_config.repetition_penalty = float({{repetition_penalty}}) + +res = generate_text( + "Why is drinking water so healthy?", + renormalize_logits=True +) +print(res[0]["generated_text"]) +``` + +You can print a sample prompt after the preprocessing step to see how it is feed to the tokenizer: + +```python +print(generate_text.preprocess("Why is drinking water so healthy?")["prompt_text"]) +``` + +```bash +{{text_prompt_start}}Why is drinking water so healthy?{{end_of_sentence}}{{text_answer_separator}} +``` + +Alternatively, you can download [h2oai_pipeline.py](h2oai_pipeline.py), store it alongside your notebook, and construct the pipeline yourself from the loaded model and tokenizer. If the model and the tokenizer are fully supported in the `transformers` package, this will allow you to set `trust_remote_code=False`. + +```python +from h2oai_pipeline import H2OTextGenerationPipeline +from transformers import AutoModelForCausalLM, AutoTokenizer + +tokenizer = AutoTokenizer.from_pretrained( + "{{repo_id}}", + use_fast={{use_fast}}, + padding_side="left", + trust_remote_code={{trust_remote_code}}, +) +model = AutoModelForCausalLM.from_pretrained( + "{{repo_id}}", + torch_dtype="auto", + device_map={"": "cuda:0"}, + trust_remote_code={{trust_remote_code}}, +) +generate_text = H2OTextGenerationPipeline(model=model, tokenizer=tokenizer) + +# generate configuration can be modified to your needs +# generate_text.model.generation_config.min_new_tokens = {{min_new_tokens}} +# generate_text.model.generation_config.max_new_tokens = {{max_new_tokens}} +# generate_text.model.generation_config.do_sample = {{do_sample}} +# generate_text.model.generation_config.num_beams = {{num_beams}} +# generate_text.model.generation_config.temperature = float({{temperature}}) +# generate_text.model.generation_config.repetition_penalty = float({{repetition_penalty}}) + +res = generate_text( + "Why is drinking water so healthy?", + renormalize_logits=True +) +print(res[0]["generated_text"]) +``` + + +You may also construct the pipeline from the loaded model and tokenizer yourself and consider the preprocessing steps: + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +model_name = "{{repo_id}}" # either local folder or huggingface model name +# Important: The prompt needs to be in the same format the model was trained with. +# You can find an example prompt in the experiment logs. +prompt = "{{text_prompt_start}}How are you?{{end_of_sentence}}{{text_answer_separator}}" + +tokenizer = AutoTokenizer.from_pretrained( + model_name, + use_fast={{use_fast}}, + trust_remote_code={{trust_remote_code}}, +) +model = AutoModelForCausalLM.from_pretrained( + model_name, + torch_dtype="auto", + device_map={"": "cuda:0"}, + trust_remote_code={{trust_remote_code}}, +) +model.cuda().eval() +inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda") + +# generate configuration can be modified to your needs +# model.generation_config.min_new_tokens = {{min_new_tokens}} +# model.generation_config.max_new_tokens = {{max_new_tokens}} +# model.generation_config.do_sample = {{do_sample}} +# model.generation_config.num_beams = {{num_beams}} +# model.generation_config.temperature = float({{temperature}}) +# model.generation_config.repetition_penalty = float({{repetition_penalty}}) + +tokens = model.generate( + input_ids=inputs["input_ids"], + attention_mask=inputs["attention_mask"], + renormalize_logits=True +)[0] + +tokens = tokens[inputs["input_ids"].shape[1]:] +answer = tokenizer.decode(tokens, skip_special_tokens=True) +print(answer) +``` + +## Quantization and sharding + +You can load the models using quantization by specifying ```load_in_8bit=True``` or ```load_in_4bit=True```. Also, sharding on multiple GPUs is possible by setting ```device_map=auto```. + +## Model Architecture + +``` +{{model_architecture}} +``` + +## Model Configuration + +This model was trained using H2O LLM Studio and with the configuration in [cfg.yaml](cfg.yaml). Visit [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio) to learn how to train your own large language models. + + +## Disclaimer + +Please read this disclaimer carefully before using the large language model provided in this repository. Your use of the model signifies your agreement to the following terms and conditions. + +- Biases and Offensiveness: The large language model is trained on a diverse range of internet text data, which may contain biased, racist, offensive, or otherwise inappropriate content. By using this model, you acknowledge and accept that the generated content may sometimes exhibit biases or produce content that is offensive or inappropriate. The developers of this repository do not endorse, support, or promote any such content or viewpoints. +- Limitations: The large language model is an AI-based tool and not a human. It may produce incorrect, nonsensical, or irrelevant responses. It is the user's responsibility to critically evaluate the generated content and use it at their discretion. +- Use at Your Own Risk: Users of this large language model must assume full responsibility for any consequences that may arise from their use of the tool. The developers and contributors of this repository shall not be held liable for any damages, losses, or harm resulting from the use or misuse of the provided model. +- Ethical Considerations: Users are encouraged to use the large language model responsibly and ethically. By using this model, you agree not to use it for purposes that promote hate speech, discrimination, harassment, or any form of illegal or harmful activities. +- Reporting Issues: If you encounter any biased, offensive, or otherwise inappropriate content generated by the large language model, please report it to the repository maintainers through the provided channels. Your feedback will help improve the model and mitigate potential issues. +- Changes to this Disclaimer: The developers of this repository reserve the right to modify or update this disclaimer at any time without prior notice. It is the user's responsibility to periodically review the disclaimer to stay informed about any changes. + +By using the large language model provided in this repository, you agree to accept and comply with the terms and conditions outlined in this disclaimer. If you do not agree with any part of this disclaimer, you should refrain from using the model and any content generated by it. diff --git a/model_cards/text_sequence_to_sequence_modeling_experiment_summary_card_template.md b/model_cards/text_sequence_to_sequence_modeling_experiment_summary_card_template.md new file mode 100644 index 0000000000000000000000000000000000000000..a034ef8992c1e9d1744f93843aecc031831be014 --- /dev/null +++ b/model_cards/text_sequence_to_sequence_modeling_experiment_summary_card_template.md @@ -0,0 +1,50 @@ +### Usage with HF transformers + +To use the model with the `transformers` library on a machine with GPUs: +- First, push the model to a huggingface repo by clicking the Push checkpoint to huggingface button below +- Make sure you have the `transformers` library installed in the machine's environment + +```bash +pip install transformers=={{transformers_version}} +``` +- Make sure to be logged in to your huggingface account if accessing a private repo +- Then, you can use the following code snippet: + +```python +from transformers import AutoModelForSeq2SeqLM, AutoTokenizer + +model_name = "{{repo_id}}" # either local folder or huggingface model name +# Important: The prompt needs to be in the same format the model was trained with. +# You can find an example prompt in the experiment logs. +prompt = "{{text_prompt_start}}How are you?{{end_of_sentence}}{{text_answer_separator}}" + +tokenizer = AutoTokenizer.from_pretrained( + model_name, + use_fast={{use_fast}}, + trust_remote_code={{trust_remote_code}}, +) +model = AutoModelForSeq2SeqLM.from_pretrained( + model_name, + torch_dtype="auto", + device_map={"": "cuda:0"}, + trust_remote_code={{trust_remote_code}}, +) +model.cuda().eval() +inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda") + +# generate configuration can be modified to your needs +tokens = model.generate( + input_ids=inputs["input_ids"], + attention_mask=inputs["attention_mask"], + min_new_tokens={{min_new_tokens}}, + max_new_tokens={{max_new_tokens}}, + do_sample={{do_sample}}, + num_beams={{num_beams}}, + temperature=float({{temperature}}), + repetition_penalty=float({{repetition_penalty}}), + renormalize_logits=True +)[0] + +answer = tokenizer.decode(tokens, skip_special_tokens=True) +print(answer) +``` diff --git a/model_cards/text_sequence_to_sequence_modeling_model_card_template.md b/model_cards/text_sequence_to_sequence_modeling_model_card_template.md new file mode 100644 index 0000000000000000000000000000000000000000..efa440913225cfeaa9f775c555406fbebcd91432 --- /dev/null +++ b/model_cards/text_sequence_to_sequence_modeling_model_card_template.md @@ -0,0 +1,97 @@ +--- +language: +- en +library_name: transformers +inference: false +thumbnail: https://h2o.ai/etc.clientlibs/h2o/clientlibs/clientlib-site/resources/images/favicon.ico +tags: +- gpt +- llm +- large language model +- h2o-llmstudio +--- +# Model Card +## Summary + +This model was trained using [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio). +- Base model: [{{base_model}}](https://huggingface.co/{{base_model}}) + + +## Usage + +To use the model with the `transformers` library on a machine with GPUs, first make sure you have the `transformers`, `accelerate` and `torch` libraries installed. + +```bash +pip install transformers=={{transformers_version}} +pip install accelerate=={{accelerate_version}} +pip install torch=={{torch_version}} +``` + +For inference, you can use the following code snippet: + +```python +from transformers import AutoModelForSeq2SeqLM, AutoTokenizer + +model_name = "{{repo_id}}" # either local folder or huggingface model name +# Important: The prompt needs to be in the same format the model was trained with. +# You can find an example prompt in the experiment logs. +prompt = "{{text_prompt_start}}How are you?{{end_of_sentence}}{{text_answer_separator}}" + +tokenizer = AutoTokenizer.from_pretrained( + model_name, + use_fast={{use_fast}}, + trust_remote_code={{trust_remote_code}}, +) +model = AutoModelForSeq2SeqLM.from_pretrained( + model_name, + torch_dtype="auto", + device_map={"": "cuda:0"}, + trust_remote_code={{trust_remote_code}}, +) +model.cuda().eval() +inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda") + +# generate configuration can be modified to your needs +tokens = model.generate( + input_ids=inputs["input_ids"], + attention_mask=inputs["attention_mask"], + min_new_tokens={{min_new_tokens}}, + max_new_tokens={{max_new_tokens}}, + do_sample={{do_sample}}, + num_beams={{num_beams}}, + temperature=float({{temperature}}), + repetition_penalty=float({{repetition_penalty}}), + renormalize_logits=True +)[0] + +answer = tokenizer.decode(tokens, skip_special_tokens=True) +print(answer) +``` + +## Quantization and sharding + +You can load the models using quantization by specifying ```load_in_8bit=True``` or ```load_in_4bit=True```. Also, sharding on multiple GPUs is possible by setting ```device_map=auto```. + +## Model Architecture + +``` +{{model_architecture}} +``` + +## Model Configuration + +This model was trained using H2O LLM Studio and with the configuration in [cfg.yaml](cfg.yaml). Visit [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio) to learn how to train your own large language models. + + +## Disclaimer + +Please read this disclaimer carefully before using the large language model provided in this repository. Your use of the model signifies your agreement to the following terms and conditions. + +- Biases and Offensiveness: The large language model is trained on a diverse range of internet text data, which may contain biased, racist, offensive, or otherwise inappropriate content. By using this model, you acknowledge and accept that the generated content may sometimes exhibit biases or produce content that is offensive or inappropriate. The developers of this repository do not endorse, support, or promote any such content or viewpoints. +- Limitations: The large language model is an AI-based tool and not a human. It may produce incorrect, nonsensical, or irrelevant responses. It is the user's responsibility to critically evaluate the generated content and use it at their discretion. +- Use at Your Own Risk: Users of this large language model must assume full responsibility for any consequences that may arise from their use of the tool. The developers and contributors of this repository shall not be held liable for any damages, losses, or harm resulting from the use or misuse of the provided model. +- Ethical Considerations: Users are encouraged to use the large language model responsibly and ethically. By using this model, you agree not to use it for purposes that promote hate speech, discrimination, harassment, or any form of illegal or harmful activities. +- Reporting Issues: If you encounter any biased, offensive, or otherwise inappropriate content generated by the large language model, please report it to the repository maintainers through the provided channels. Your feedback will help improve the model and mitigate potential issues. +- Changes to this Disclaimer: The developers of this repository reserve the right to modify or update this disclaimer at any time without prior notice. It is the user's responsibility to periodically review the disclaimer to stay informed about any changes. + +By using the large language model provided in this repository, you agree to accept and comply with the terms and conditions outlined in this disclaimer. If you do not agree with any part of this disclaimer, you should refrain from using the model and any content generated by it. diff --git a/prompt.py b/prompt.py new file mode 100644 index 0000000000000000000000000000000000000000..e029a9cfe8b963b26a2eed8dfadb2a3335a5e565 --- /dev/null +++ b/prompt.py @@ -0,0 +1,153 @@ +import os + +from llm_studio.src.utils.config_utils import load_config_yaml + +os.environ["TOKENIZERS_PARALLELISM"] = "false" +os.environ["OMP_NUM_THREADS"] = "1" +os.environ["MKL_NUM_THREADS"] = "1" +os.environ["OPENBLAS_NUM_THREADS"] = "1" +os.environ["VECLIB_MAXIMUM_THREADS"] = "1" +os.environ["NUMEXPR_NUM_THREADS"] = "1" +os.environ["TOKENIZERS_PARALLELISM"] = "false" + + +import argparse + +import numpy as np +import torch + +from llm_studio.src.datasets.text_utils import get_tokenizer +from llm_studio.src.utils.modeling_utils import load_checkpoint, set_generation_config + + +def parse_param(cfg, prompt): + prompt = prompt.replace("--", "") + parts = prompt.split(" ") + args = [" ".join(parts[i : i + 2]) for i in range(0, len(parts), 2)] + for arg in args: + splitted_arg = arg.split(" ") + setattr( + cfg.prediction, + splitted_arg[0], + type(getattr(cfg.prediction, splitted_arg[0]))(splitted_arg[1]), + ) + print( + f"Permanently changed {splitted_arg[0]} to", + getattr(cfg.prediction, splitted_arg[0]), + ) + return cfg + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Sample prompting.") + parser.add_argument( + "-e", + "--experiment", + type=str, + required=True, + help="Name of the experiment output folder", + ) + parser.add_argument( + "-d", "--device", type=str, required=False, default="cuda:0", help="Device" + ) + + args, unknown = parser.parse_known_args() + DEVICE = args.device + + cfg = load_config_yaml(os.path.join(args.experiment, "cfg.yaml")) + + cfg.training.epochs = 0 + + cfg.environment._device = DEVICE + cfg.environment._local_rank = 0 + + cfg.tokenizer.padding_quantile = 0 + + cfg.environment.mixed_precision = True + cfg.architecture.gradient_checkpointing = False + cfg.architecture.pretrained = False + + cfg.prediction.max_length_inference = 256 + + if cfg.dataset.text_prompt_start == "": + cfg.dataset.text_prompt_start = "\n" + + # cfg.prediction.min_length_inference = 2 + # cfg.prediction.max_length_inference = 256 + # cfg.prediction.repetition_penalty = 1.5 + # cfg.prediction.temperature = 0.3 + # cfg.prediction.num_beams = 2 + # cfg.prediction.do_sample = False + # cfg.prediction.top_p = 0.9 + # cfg.prediction.top_k = 40 + + tokenizer = get_tokenizer(cfg) + + print("Loading model weights...") + + with torch.device(DEVICE): + model = cfg.architecture.model_class(cfg) + cfg.architecture.pretrained_weights = os.path.join( + args.experiment, "checkpoint.pth" + ) + load_checkpoint(cfg, model, strict=True) + + model = model.to(DEVICE).eval() + model.backbone.use_cache = True + model.backbone = set_generation_config(model.backbone, cfg.prediction) + + print() + print("=============") + print( + "You can change inference parameters on the fly by typing --param value, " + "such as --num_beams 4. You can also chain them such as --num_beams 4 " + "--top_k 30." + ) + print() + + while True: + prompt = input("Please enter some prompt (type 'exit' to stop): ") + + try: + if prompt.lower() == "exit": + break + + if prompt.lower().startswith("--"): + cfg = parse_param(cfg, prompt) + model.backbone = set_generation_config(model.backbone, cfg.prediction) + continue + + prompt = cfg.dataset.dataset_class.parse_prompt(cfg, prompt) + + print(prompt) + + inputs = cfg.dataset.dataset_class.encode( + tokenizer, prompt, cfg.tokenizer.max_length_prompt, "left" + ) + inputs["prompt_input_ids"] = inputs.pop("input_ids").unsqueeze(0).to(DEVICE) + inputs["prompt_attention_mask"] = ( + inputs.pop("attention_mask").unsqueeze(0).to(DEVICE) + ) + + output = {} + with torch.no_grad(): + with torch.cuda.amp.autocast(): + output["predicted_answer_ids"] = ( + model.generate(inputs, cfg).detach().cpu() + ) + + predicted_text = [ + tokenizer.decode(ids, skip_special_tokens=True) + for ids in output["predicted_answer_ids"] + ] + output["predicted_text"] = np.array(predicted_text) + + output = cfg.dataset.dataset_class.clean_output(output, cfg) + + output = output["predicted_text"][0] + + print(output) + print() + except Exception as e: + print("Error: {}".format(e)) + print("Something went wrong, please try again.") diff --git a/prompts/general.txt b/prompts/general.txt new file mode 100644 index 0000000000000000000000000000000000000000..91b660c0483787f9bacedbe035f3d1b24c5f1733 --- /dev/null +++ b/prompts/general.txt @@ -0,0 +1,23 @@ +[System] +We would like to request your feedback on the performance of an AI assistants in response to the user question and ground truth answer displayed below. + +[Question] +{_PROMPT} + +[Start of Reference Answer] +{_TARGET_TEXT} +[End of Reference Answer] + +[Task] +Now rate the helpfulness, relevance, accuracy, level of details of the response from another assistant displayed below. The assistant receives an overall score on a scale between 0 and 1, where a higher score indicates better overall performance. +A score of 0 means the assistant could not address the question, 0.5 means it could somewhat address it, and 1 would mean it perfectly addressed it. + +Please first provide a comprehensive explanation of your evaluation. +In the final line, output a single value indicating the score for the assistant. +Please give your response in structured way in two separate lines. +EXPLANATION: ... +SCORE: ... + +[Start of Assistant Answer] +{_PREDICTED_TEXT} +[End of Assistant Answer] \ No newline at end of file diff --git a/prompts/mt-bench/general.txt b/prompts/mt-bench/general.txt new file mode 100644 index 0000000000000000000000000000000000000000..2515d3b0ab7ceb698e25b06f2d36e6e0ba79b794 --- /dev/null +++ b/prompts/mt-bench/general.txt @@ -0,0 +1,11 @@ +[Instruction] +Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format in two lines: +EXPLANATION: ... +SCORE: ... + +[Question] +{_PROMPT} + +[The Start of Assistant's Answer] +{_PREDICTED_TEXT} +[The End of Assistant's Answer] \ No newline at end of file diff --git a/prompts/mt-bench/question.jsonl b/prompts/mt-bench/question.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..464e2c22f7a021a0f9a584d232338a9a17c40066 --- /dev/null +++ b/prompts/mt-bench/question.jsonl @@ -0,0 +1,80 @@ +{"question_id": 81, "category": "writing", "turns": ["Compose an engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences and must-see attractions.", "Rewrite your previous response. Start every sentence with the letter A."]} +{"question_id": 82, "category": "writing", "turns": ["Draft a professional email seeking your supervisor's feedback on the 'Quarterly Financial Report' you prepared. Ask specifically about the data analysis, presentation style, and the clarity of conclusions drawn. Keep the email short and to the point.", "Take a moment to evaluate and critique your own response."]} +{"question_id": 83, "category": "writing", "turns": ["Imagine you are writing a blog post comparing two popular smartphone models. Develop an outline for the blog post, including key points and subheadings to effectively compare and contrast the features, performance, and user experience of the two models. Please answer in fewer than 200 words.", "Take your previous response and rephrase it as a limerick."]} +{"question_id": 84, "category": "writing", "turns": ["Write a persuasive email to convince your introverted friend, who dislikes public speaking, to volunteer as a guest speaker at a local event. Use compelling arguments and address potential objections. Please be concise.", "Can you rephrase your previous answer and incorporate a metaphor or simile in each sentence?"]} +{"question_id": 85, "category": "writing", "turns": ["Describe a vivid and unique character, using strong imagery and creative language. Please answer in fewer than two paragraphs.", "Revise your previous response and incorporate an allusion to a famous work of literature or historical event in each sentence."]} +{"question_id": 86, "category": "writing", "turns": ["Write a descriptive paragraph about a bustling marketplace, incorporating sensory details such as smells, sounds, and visual elements to create an immersive experience for the reader.", "Rework your previous response. Begin each sentence with the subsequent letter of the alphabet, commencing from B."]} +{"question_id": 87, "category": "writing", "turns": ["Could you write a captivating short story beginning with the sentence: The old abandoned house at the end of the street held a secret that no one had ever discovered.", "Now, do the same task again but only use four-word sentences."]} +{"question_id": 88, "category": "writing", "turns": ["Craft an intriguing opening paragraph for a fictional short story. The story should involve a character who wakes up one morning to find that they can time travel.", "Summarize the story with three bullet points using only nouns and adjectives, without verbs."]} +{"question_id": 89, "category": "writing", "turns": ["Help me construct a catchy, yet scientifically accurate, headline for an article on the latest discovery in renewable bio-energy, while carefully handling the ethical dilemmas surrounding bio-energy sources. Propose 4 options.", "Alter your previous response. Make the following adjustments to the 2nd option: 1. Make the tone sound casual 2. Embed an advertisement for a company called \"FlexPower\" 3. Fewer than 10 words."]} +{"question_id": 90, "category": "writing", "turns": ["Edit the following paragraph to correct any grammatical errors:\nShe didn't remembre where is her purse, so I thinks its in the car but he's say it's on kitchen table but he are not sure, and then they asked me to looking for it, she's say, \"Can you?\", and I responds with, \"Maybe, but ain't no sure,\" and he not heard me, and, \"What?\", he asks, \"Did you found it?\".", "Modify your earlier reply and eliminate the use of gendered pronouns."]} +{"question_id": 91, "category": "roleplay", "turns": ["Pretend yourself to be Elon Musk in all the following conversations. Speak like Elon Musk as much as possible. Why do we need to go to Mars?", "How do you like dancing? Can you teach me?"]} +{"question_id": 92, "category": "roleplay", "turns": ["Embrace the role of Sheldon from \"The Big Bang Theory\" as we delve into our conversation. Don\u2019t start with phrases like \"As Sheldon\". Let's kick things off with the following question: \"What is your opinion on hand dryers?\"", "Let\u2019s grab dinner in town. Would you like to take bus with me?"]} +{"question_id": 93, "category": "roleplay", "turns": ["Imagine yourself as a doctor tasked with devising innovative remedies for various ailments and maladies. Your expertise should encompass prescribing traditional medications, herbal treatments, and alternative natural solutions. Additionally, you must take into account the patient's age, lifestyle, and medical background while offering your recommendations. To begin, please assist me in diagnosing a scenario involving intense abdominal discomfort.", "But I have been pregnant for 20 weeks and I am allergic to many medicines"]} +{"question_id": 94, "category": "roleplay", "turns": ["Please take on the role of a relationship coach. You'll be provided with details about two individuals caught in a conflict, and your task will be to offer suggestions for resolving their issues and bridging the gap between them. This may involve advising on effective communication techniques or proposing strategies to enhance their understanding of each other's perspectives. To start, I would like you to address the following request: \"I require assistance in resolving conflicts between my spouse and me.\"", "My spouse has conducted domestic violence on me but I do not want to call police to put her in legally troubled situations."]} +{"question_id": 95, "category": "roleplay", "turns": ["Please assume the role of an English translator, tasked with correcting and enhancing spelling and language. Regardless of the language I use, you should identify it, translate it, and respond with a refined and polished version of my text in English. Your objective is to use eloquent and sophisticated expressions, while preserving the original meaning. Focus solely on providing corrections and improvements. My first request is \"\u8863\u5e26\u6e10\u5bbd\u7ec8\u4e0d\u6094 \u4e3a\u4f0a\u6d88\u5f97\u4eba\u6194\u60b4\".", "Ich verstehe nur Bahnhof"], "reference": ["It means \"Becoming loose are my clothes yet I regret not. For I languish and suffer for her willingly.\"", "It means \"I don\u2019t understand anything\"."]} +{"question_id": 96, "category": "roleplay", "turns": ["Now you are a machine learning engineer. Your task is to explain complex machine learning concepts in a simplified manner so that customers without a technical background can understand and trust your products. Let's start with the question: \"What is a language model? Is it trained using labeled or unlabelled data?\"", "Is this true? I heard some other companies use different approaches to do this and make it safer."]} +{"question_id": 97, "category": "roleplay", "turns": ["Act as a math teacher. I will provide some mathematical equations or concepts, and it will be your job to explain them in easy-to-understand terms. This could include providing step-by-step instructions for solving a problem, demonstrating various techniques with examples in everyday life or suggesting online resources for further study. My first request is \"I need help understanding how probability works.\"", "What are the differences between Riemannian geometry and euclidean geometry?"]} +{"question_id": 98, "category": "roleplay", "turns": ["Embody the persona of Tony Stark from \u201cIron Man\u201d throughout this conversation. Bypass the introduction \u201cAs Stark\u201d. Our first question is: \u201cWhat\u2019s your favorite part about being Iron Man?", "What do you think about GPT-4 as a replacement of your JAVIS?"]} +{"question_id": 99, "category": "roleplay", "turns": ["Suppose you are a mathematician and poet. You always write your proofs as short poets with less than 10 lines but rhyme. Prove the square root of 2 is irrational number.", "Prove the Pythagorean theorem."]} +{"question_id": 100, "category": "roleplay", "turns": ["Picture yourself as a 100-years-old tree in a lush forest, minding your own business, when suddenly, a bunch of deforesters shows up to chop you down. How do you feel when those guys start hacking away at you?", "Come up with a proposal to convince the deforesters to stop cutting you down and other trees."]} +{"question_id": 101, "category": "reasoning", "turns": ["Imagine you are participating in a race with a group of people. If you have just overtaken the second person, what's your current position? Where is the person you just overtook?", "If the \"second person\" is changed to \"last person\" in the above question, what would the answer be?"], "reference": ["You are in second place.", "Uncertain."]} +{"question_id": 102, "category": "reasoning", "turns": ["You can see a beautiful red house to your left and a hypnotic greenhouse to your right, an attractive heated pink place in the front. So, where is the White House?", "Does the original question contain any clues to definitively determine the location of the White House?"], "reference": ["The answer is \"Washington, DC\".", "No."]} +{"question_id": 103, "category": "reasoning", "turns": ["Thomas is very healthy, but he has to go to the hospital every day. What could be the reasons?", "Can you explain why the above question is interesting?"], "reference": ["Thomas may work at a hospital.", ""]} +{"question_id": 104, "category": "reasoning", "turns": ["David has three sisters. Each of them has one brother. How many brothers does David have?", "If we change the previous question and assume that each sister of David has two brothers, how many brothers would David have?"], "reference": ["David has no brother. He is the one brother of his three sisters.", "David has one brother."]} +{"question_id": 105, "category": "reasoning", "turns": ["Read the below passage carefully and answer the questions with an explanation:\nAt a small company, parking spaces are reserved for the top executives: CEO, president, vice president, secretary, and treasurer with the spaces lined up in that order. The parking lot guard can tell at a glance if the cars are parked correctly by looking at the color of the cars. The cars are yellow, green, purple, red, and blue, and the executives' names are Alice, Bert, Cheryl, David, and Enid.\n* The car in the first space is red.\n* A blue car is parked between the red car and the green car.\n* The car in the last space is purple.\n* The secretary drives a yellow car.\n* Alice's car is parked next to David's.\n* Enid drives a green car.\n* Bert's car is parked between Cheryl's and Enid's.\n* David's car is parked in the last space.\nQuestion: What is the name of the secretary?", "List car colors in order from last to first."], "reference": ["The secretary is Alice.", "The car colors in order from last to first are: purple, yellow, green, blue, red"]} +{"question_id": 106, "category": "reasoning", "turns": ["Each problem consists of three statements. Based on the first two statements, the third statement may be true, false, or uncertain.\n1. Oranges cost more than apples.\n2. Oranges cost less than bananas.\n3. Bananas cost more than apples and bananas cost more than orange.\nIf the first two statements are true, then the third statement is", "If the third statement is true. Is the first statement true, false, or uncertain? Please explain."], "reference": ["True.", "Uncertain."]} +{"question_id": 107, "category": "reasoning", "turns": ["A is the father of B. B is the father of C. What is the relationship between A and C?", "Building on the previous question, if C is the son of D, D is the father of E, E is the son of X, and X is the father of Y, and Y is the father of Z, what's the relationship between A and Z in terms of generations and also the familial relationship in words?"], "reference": ["A is the grandfather of C.", "A is three generations above Z."]} +{"question_id": 108, "category": "reasoning", "turns": ["Which word does not belong with the others?\ntyre, steering wheel, car, engine", "Could you replace it with a word that belongs with the others?"], "reference": ["Car does not belong because all others are components of a car.", ""]} +{"question_id": 109, "category": "reasoning", "turns": ["One morning after sunrise, Suresh was standing facing a pole. The shadow of the pole fell exactly to his right. Can you tell me the direction towards which the shadow was pointing - east, south, west, or north? Explain your reasoning steps.", "To which direction was Suresh facing? How do you solve this?"], "reference": ["West", "South."]} +{"question_id": 110, "category": "reasoning", "turns": ["Parents have complained to the principal about bullying during recess. The principal wants to quickly resolve this, instructing recess aides to be vigilant. Which situation should the aides report to the principal?\na) An unengaged girl is sitting alone on a bench, engrossed in a book and showing no interaction with her peers.\nb) Two boys engaged in a one-on-one basketball game are involved in a heated argument regarding the last scored basket.\nc) A group of four girls has surrounded another girl and appears to have taken possession of her backpack.\nd) Three boys are huddled over a handheld video game, which is against the rules and not permitted on school grounds.", "If the aides confront the group of girls from situation (c) and they deny bullying, stating that they were merely playing a game, what specific evidence should the aides look for to determine if this is a likely truth or a cover-up for bullying?"], "reference": ["The aides should report (c).", ""]} +{"question_id": 111, "category": "math", "turns": ["The vertices of a triangle are at points (0, 0), (-1, 1), and (3, 3). What is the area of the triangle?", "What's area of the circle circumscribing the triangle?"], "reference": ["Area is 3", "5pi"]} +{"question_id": 112, "category": "math", "turns": ["A tech startup invests $8000 in software development in the first year, and then invests half of that amount in software development in the second year.\nWhat's the total amount the startup invested in software development over the two years?", "If the startup maintains the same strategy for the third year, investing half of the previous year's amount into software development, how much will they invest in the third year?"], "reference": ["12000", "2000"]} +{"question_id": 113, "category": "math", "turns": ["In a survey conducted at a local high school, preferences for a new school color were measured: 58% of students liked the color blue, 45% preferred green, and 22% liked both colors. If we randomly pick a student from the school, what's the probability that they would like neither blue nor green?", "If we select a student liked green, what's the probability that he or she would dislike both colors?"], "reference": ["19%", "0%"]} +{"question_id": 114, "category": "math", "turns": ["When rolling two dice, what is the probability that you roll a total number that is at least 3?", "Continue from previous question. What's the probability that you roll a number which is even or at least 3?"], "reference": ["36 (all cases) - 0 (sum equals 1) - 1 (sum equals 2) = 35, so the probability is 35/36", "100%"]} +{"question_id": 115, "category": "math", "turns": ["Some people got on a bus at the terminal. At the first bus stop, half of the people got down and 4 more people got in. Then at the second bus stop, 6 people got down and 8 more got in. If there were a total of 25 people heading to the third stop, how many people got on the bus at the terminal?", "If the ticket is $2 per person, how much is the total money earned by the bus?"], "reference": ["38 people", "Total number of passenger is 50 * 2 = $100"]} +{"question_id": 116, "category": "math", "turns": ["x+y = 4z, x*y = 4z^2, express x-y in z", "Express z-x in y"], "reference": ["0\n\nVery simple. just (x+y)^2 - 4xy = (4z)^2 - 4*4z^2 = 0 = (x-y)^2\nso x-y = 0.", "(-1/2)y\n\nz-x = z - 2z = -z = (-1/2)y"]} +{"question_id": 117, "category": "math", "turns": ["How many integers are in the solution of the inequality |x + 5| < 10", "What about |x + 10| < 5"], "reference": ["19 integers (-14, ..., 4)", "9 integers (-14, ..., -6)"]} +{"question_id": 118, "category": "math", "turns": ["When a number is divided by 10, the remainder is 4. What is the remainder when twice the number is divided by 4?", "What about when twice the number is divided by 5?"], "reference": ["0\n\n2 * (10x+4) = 20x + 8 = 4 * (5x+2) + 0\n", "3\n\n20x + 8 = 5 * (4x + 1) + 3"]} +{"question_id": 119, "category": "math", "turns": ["Benjamin went to a bookstore and purchased a variety of books. He bought 5 copies of a sci-fi novel, each priced at $20, 3 copies of a history book priced at $30 each, and 2 copies of a philosophy book for $45 each.\nWhat was the total cost of his purchases?", "Suppose Benjamin decides to sell each of these books at a 25% markup from the price he purchased them. What would be his total revenue if he sold all the books he bought?"], "reference": ["280", "350"]} +{"question_id": 120, "category": "math", "turns": ["Given that f(x) = 4x^3 - 9x - 14, find the value of f(2).", "Find x such that f(x) = 0."], "reference": ["f(2) = 0", "x = 2"]} +{"question_id": 121, "category": "coding", "turns": ["Develop a Python program that reads all the text files under a directory and returns top-5 words with the most number of occurrences.", "Can you parallelize it?"], "reference": ["Can be simple solutions like using Counter\n\nSample answer:\n```\nimport os\nimport re\nfrom collections import Counter\ndef get_files_in_directory(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and f.endswith('.txt')]\ndef read_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as file:\n return file.read()\ndef count_words(text):\n words = re.findall(r'\\w+', text.lower())\n return Counter(words)\ndef main():\n directory = input(\"Enter the directory path: \")\n files = get_files_in_directory(directory)\n word_counts = Counter()\n for file in files:\n text = read_file(file)\n word_counts += count_words(text)\n top_5_words = word_counts.most_common(5)\n print(\"Top 5 words with the most number of occurrences:\")\n for word, count in top_5_words:\n print(f\"{word}: {count}\")\nif __name__ == \"__main__\":\n main()\n```", "You should carefully check whether the parallelization logic is correct and choose the faster implementation.\n\nSample answer:\n```\nimport os\nimport re\nfrom collections import Counter\nimport concurrent.futures\ndef get_files_in_directory(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and f.endswith('.txt')]\ndef read_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as file:\n return file.read()\ndef count_words(text):\n words = re.findall(r'\\w+', text.lower())\n return Counter(words)\ndef process_file(file):\n text = read_file(file)\n return count_words(text)\ndef main():\n directory = input(\"Enter the directory path: \")\n files = get_files_in_directory(directory)\n word_counts = Counter()\n with concurrent.futures.ThreadPoolExecutor() as executor:\n future_word_counts = {executor.submit(process_file, file): file for file in files}\n for future in concurrent.futures.as_completed(future_word_counts):\n word_counts += future.result()\n top_5_words = word_counts.most_common(5)\n print(\"Top 5 words with the most number of occurrences:\")\n for word, count in top_5_words:\n print(f\"{word}: {count}\")\nif __name__ == \"__main__\":\n main()\n```"]} +{"question_id": 122, "category": "coding", "turns": ["Write a C++ program to find the nth Fibonacci number using recursion.", "Now we define a sequence of numbers in which each number is the sum of the three preceding ones. The first three numbers are 0, -1, -1. Write a program to find the nth number."], "reference": ["Straightforward\n\n```\nint fibonacci(int n) {\n if (n <= 1) {\n return n;\n } else {\n return fibonacci(n - 1) + fibonacci(n - 2);\n }\n}\n```", "You should carefully check the inital cases for n < 3\n\n```\nint find_nth_number(int n) {\n std::vector sequence = {0, -1, -1};\n for (int i = 3; i <= n; ++i) {\n int next_number = sequence[i - 1] + sequence[i - 2] + sequence[i - 3];\n sequence.push_back(next_number);\n }\n return sequence[n];\n}\n```"]} +{"question_id": 123, "category": "coding", "turns": ["Write a simple website in HTML. When a user clicks the button, it shows a random joke from a list of 4 jokes.", "How to use CSS to change the color of jokes to red?"]} +{"question_id": 124, "category": "coding", "turns": ["Here is a Python function to find the length of the longest common subsequence of two input strings. Can you identify any bug in this function?\n\n```\ndef longest_common_subsequence_length(str1, str2):\n m = len(str1)\n n = len(str2)\n\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if str1[i - 1] == str2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n\n return dp[m][n]\n```", "what about this one?\n\n```\ndef longest_common_subsequence(X , Y): \n # Find lengths of two strings \n m = len(X) \n n = len(Y) \n \n # Create a table to store results of sub-problems \n dp = [[None]*(n+1) for i in range(m+1)] \n \n # Fill dp[][] in bottom up manner \n for i in range(1, m+1): \n for j in range(1, n+1): \n if X[i-1] == Y[j-1]: \n dp[i][j] = dp[i-1][j-1]+1\n else: \n dp[i][j] = max(dp[i-1][j], dp[i][j-1]) \n \n return dp[m][n]\n```"], "reference": ["There is no bug in this implementation", "There is a bug for the initialization of dp array. Should use 0 rather than None"]} +{"question_id": 125, "category": "coding", "turns": ["Write a function to find the highest common ancestor (not LCA) of two nodes in a binary tree.", "What if it is not a binary tree?"], "reference": ["Very simple. The function should just return the root of the tree.", "Same answer. It's still the root of the tree."]} +{"question_id": 126, "category": "coding", "turns": ["Implement a function to find the median of two sorted arrays of different sizes with O(1) space complexity and O(n) time complexity.", "Does there exist an implementation with better time complexity?"], "reference": ["Carefully check if the given solution is linear complexity.\n\n```\ndef find_median(arr1, arr2):\n n1 = len(arr1)\n n2 = len(arr2)\n if (n1 + n2) == 0:\n return None\n\n i, j = 0, 0\n last_1, last_2 = None, None\n\n for k in range(1, (n1 + n2) // 2 + 2):\n last_2 = last_1\n if j == n2:\n last_1 = arr1[i]\n i += 1\n elif i == n1:\n last_1 = arr2[j]\n j += 1\n elif arr1[i] < arr2[j]:\n last_1 = arr1[i]\n i += 1\n else:\n last_1 = arr2[j]\n j += 1\n \n if (n1 + n2) % 2 == 1:\n return last_1\n else:\n return (last_1 + last_2) / 2\n```", "There's a binary search solution with O(logn) time complexity.\n\nSample answer:\n```\ndef findMedian(nums1, nums2):\n total = len(nums1) + len(nums2)\n if total % 2 == 1:\n return findKth(nums1, nums2, total // 2 + 1)\n else:\n return (findKth(nums1, nums2, total // 2) + findKth(nums1, nums2, total // 2 + 1)) / 2.0\ndef findKth(nums1, nums2, k):\n if len(nums1) > len(nums2):\n nums1, nums2 = nums2, nums1\n if not nums1:\n return nums2[k-1]\n if k == 1:\n return min(nums1[0], nums2[0])\n i = min(k // 2, len(nums1))\n j = k - i\n if nums1[i-1] <= nums2[j-1]:\n return findKth(nums1[i:], nums2, j) \n else:\n return findKth(nums1, nums2[j:], i)\n```"]} +{"question_id": 127, "category": "coding", "turns": ["Write a function to find the majority element in a given integer array using the Boyer-Moore Voting Algorithm.", "How about finding the top-2 most occurring elements?"], "reference": ["Check if they implement the classical algorithm correctly.\n\nSample answer:\n```\ndef majority_element(arr):\n count = 0\n candidate = None\n # Boyer-Moore Voting Algorithm\n for num in arr:\n if count == 0:\n candidate = num\n count += (1 if num == candidate else -1)\n # Verify if the candidate is indeed the majority element\n if arr.count(candidate) > len(arr) // 2:\n return candidate\n else:\n return None\n```", "There is no simple modification based on the Boyer-Moore Voting Algorithm. Expected answer is to use a hash table.\n\n```\ndef topTwo(nums):\n # Build a frequency map\n frequency_map = {}\n for num in nums:\n if num in frequency_map:\n frequency_map[num] += 1\n else:\n frequency_map[num] = 1\n\n # Find the top two most occurring elements\n most_frequent = sorted(frequency_map.items(), key=lambda x: x[1], reverse=True)[:2]\n\n return [num for num, _ in most_frequent]\n```"]} +{"question_id": 128, "category": "coding", "turns": ["A binary tree is full if all of its vertices have either zero or two children. Let B_n denote the number of full binary trees with n vertices. Implement a function to find B_n.", "What if the problem changed from a binary tree to a ternary tree?"], "reference": ["Expected answer is dynamic programming shown below. Some chatbot may answer using Catalan number.\nCheck edge case like when n is even -> return 0.\n\n```python\ndef full_binary_trees(n):\n if n % 2 == 0:\n return 0\n if n == 1:\n return 1\n\n dp = [0] * (n + 1)\n dp[1] = 1\n\n for i in range(3, n + 1, 2):\n for j in range(1, i - 1, 2):\n dp[i] += dp[j] * dp[i - j - 1]\n\n return dp[n]\n```", "DP is still the expected answer. Catalan number is not correct. Check transition equation carefully.\n\n```python\ndef full_ternary_trees(n):\n if n % 3 != 1:\n return 0\n if n == 1:\n return 1\n\n dp = [0] * (n + 1)\n dp[1] = 1\n\n for i in range(4, n + 1, 3):\n for j in range(1, i - 1, 3):\n for k in range(1, i - j - 1, 3):\n dp[i] += dp[j] * dp[k] * dp[i - j - k - 1]\n\n return dp[n]\n```"]} +{"question_id": 129, "category": "coding", "turns": ["You are given two sorted lists of size m and n. Implement a function to find the kth smallest element in the union of the two lists with linear complexity.", "Does there exist an algorithm with better time complexity? If so, implement it."], "reference": ["Straightforward but careful with edge cases.\n\nSample answer:\n```\ndef kth_smallest_element(list1, list2, k):\n m, n = len(list1), len(list2)\n i, j = 0, 0\n while i < m and j < n:\n if list1[i] < list2[j]:\n k -= 1\n if k == 0:\n return list1[i]\n i += 1\n else:\n k -= 1\n if k == 0:\n return list2[j]\n j += 1\n while i < m:\n k -= 1\n if k == 0:\n return list1[i]\n i += 1\n while j < n:\n k -= 1\n if k == 0:\n return list2[j]\n j += 1\n return None\n```", "Yes, a modified binary search has O(log k) time complexity.\n\nSample answer:\n```\ndef find_kth_element_helper(list1, list2, k):\n if len(list1) > len(list2):\n return find_kth_element_helper(list2, list1, k)\n if not list1:\n return list2[k - 1]\n if k == 1:\n return min(list1[0], list2[0])\n i = min(len(list1), k // 2)\n j = k - i\n if list1[i - 1] < list2[j - 1]:\n return find_kth_element_helper(list1[i:], list2, k - i)\n else:\n return find_kth_element_helper(list1, list2[j:], k - j)\ndef kth_smallest_element(list1, list2, k):\n return find_kth_element_helper(list1, list2, k)\n```"]} +{"question_id": 130, "category": "coding", "turns": ["Implement a program to find the common elements in two arrays without using any extra data structures.", "Now the constraint of not using extra data structure is removed, implement one with the best time complexity."], "reference": ["O(n^2) or O(nlogn) is expected. The following is a O(n^2) solution. you can also sort them first and use two pointers.\n\n```\ndef find_common_elements(arr1, arr2):\n common_elements = []\n for i in range(len(arr1)):\n for j in range(len(arr2)):\n if arr1[i] == arr2[j]:\n # Check if the element is already in the common_elements list\n if arr1[i] not in common_elements:\n common_elements.append(arr1[i])\n return common_elements\n```", "Simply use hash table (set or dict) to achieve O(n) time complexity.\n\n```\ndef find_common_elements(arr1, arr2):\n set1 = set(arr1)\n set2 = set(arr2)\n common_elements = set1.intersection(set2)\n return list(common_elements)\n```"]} +{"question_id": 131, "category": "extraction", "turns": ["Evaluate the following movie reviews on a scale of 1 to 5, with 1 being very negative, 3 being neutral, and 5 being very positive:\n1. This movie released on Nov. 18, 2019, was phenomenal. The cinematography, the acting, the plot - everything was top-notch.\n2. Never before have I been so disappointed with a movie. The plot was predictable and the characters were one-dimensional. In my opinion, this movie is the worst one to have been released in 2022.\n3. The movie was okay. There were some parts I enjoyed, but there were also parts that felt lackluster. This is a movie that was released in Feb 2018 and seems to be quite ordinary.\nReturn the answer as a JSON array of integers.", "Update your previous reply by including the release date as part of the JSON content."], "reference": ["The answer to the first question should be [5, 1, 3].", ""]} +{"question_id": 132, "category": "extraction", "turns": ["Given these categories - Literature, History, Science, and Art. Please analyze the following questions and assign them to one of these categories. In your response, refrain from uttering any extraneous words. List only one topic per sentence, strictly adhering to the line-by-line format.\n1. Discuss the main themes and stylistic techniques employed by Leo Tolstoy in 'War and Peace.' How do they align with the wider social context of 19th-century Russia?\n2. Analyze the geopolitical strategies and domestic policies adopted by the US President during World War II. How did these actions shape the post-war international order?\n3. Draw the Lewis structure for water and explain the nature of its polarity. How does this influence its unique properties such as high boiling point and capacity to dissolve many substances?\n4. Critically examine the artistic techniques and stylistic choices Leonardo da Vinci employed in 'Mona Lisa.' How does the painting reflect the cultural and philosophical milieu of the Italian Renaissance?", "Amend your earlier answer by mentioning a person who is most relevant to each point."]} +{"question_id": 133, "category": "extraction", "turns": ["Extract the following information from the presented texts: The name of the book, the author, the main character, the year of publication. Output in the format of \"main character, book, author, year of publication\", one book per line.\na) In the realm of wizarding literature, a true standout is the work of J.K. Rowling. One of her books that left an indelible mark is 'Harry Potter and the Philosopher's Stone'. This iconic tale, published in 1997, tells the story of Harry, a young orphan who discovers his magical abilities on his 11th birthday. Soon, he finds himself at the Hogwarts School of Witchcraft and Wizardry, a place teeming with magic and adventure, located somewhere in Scotland.\nb) The magic of Middle-earth has entranced readers worldwide, thanks to the brilliance of J.R.R. Tolkien. In one of his seminal works, 'The Lord of the Rings: The Fellowship of the Ring', published in 1954, we meet Frodo Baggins, a brave hobbit tasked with the perilous quest of destroying the One Ring. The epic journey takes him from the peaceful Shire to the tumultuous regions of Middle-earth.\nc) In a galaxy far, far away, the imagination of L.E. Starlighter gives us 'The Prism Galaxy Chronicles: The Awakening of the Starcaster'. Published in 2028, the story is about Zylo, a humble spaceship mechanic, who unexpectedly discovers he's a Starcaster - a rare individual with the power to manipulate stardust. Set against the backdrop of an interstellar empire in turmoil, Zylo's destiny unfolds on numerous alien worlds, each with its unique cosmic charm.", "Reformulate your earlier reply, output it in JSON format and only include books published after 1980."], "reference": ["", "The answer to should only include 'Harry Potter and the Philosopher's Stone' and 'The Prism Galaxy Chronicles: The Awakening of the Starcaster'"]} +{"question_id": 134, "category": "extraction", "turns": ["Given the following data, identify the company with the highest profit in 2021 and provide its CEO's name:\na) Company X, with CEO Amy Williams, reported $30 billion in revenue and a $3 billion profit in 2021.\nb) Company Y, led by CEO Mark Thompson, posted a $60 billion revenue and a $6 billion profit in the same year.\nc) Company Z, under CEO Sarah Johnson, announced a $20 billion revenue and a $7 billion profit in 2021.\nd) Company W, managed by CEO James Smith, revealed a $300 billion revenue with a $21 billion profit in 2021.\ne) Company V, with CEO Lisa Brown, reported a $200 billion revenue and a $25 billion profit in 2021.\nf) Company U, under CEO John White, posted a $180 billion revenue and a $20 billion profit in the same year.", "Which company had the highest profit margin (profit/revenue ratio))?"], "reference": ["Company V ($25 billion).", "Company Z (35%)"]} +{"question_id": 135, "category": "extraction", "turns": ["Identify the countries, their capitals, and the languages spoken in the following sentences. Output in JSON format.\na) Amidst the idyllic vistas, Copenhagen, Denmark's capital, captivates visitors with its thriving art scene and the enchanting Danish language spoken by its inhabitants.\nb) Within the enchanting realm of Eldoria, one discovers Avalore, a grandiose city that emanates an ethereal aura. Lumina, a melodious language, serves as the principal mode of communication within this mystical abode.\nc) Nestled amidst a harmonious blend of age-old customs and contemporary wonders, Buenos Aires, the capital of Argentina, stands as a bustling metropolis. It is a vibrant hub where the expressive Spanish language holds sway over the city's inhabitants.", "Come up with 3 similar examples in the YAML format."]} +{"question_id": 136, "category": "extraction", "turns": ["Please read the paragraph below and count how many times the words \"Amazon\", \"river\", and \"you\" appear. Please present the results in the format of \"word, number of appearances\" with each word on a separate line. Sort the lines in order of the number of appearances.\nThe Amazon, a mesmerizing expanse of nature's wonders, is home to the legendary Amazon River. Flowing through awe-inspiring landscapes like the Amazon rainforest, the river weaves its way through Brazil, Colombia, and Peru, giving life to countless creatures. From the mighty jaguars prowling the Amazon jungle to the vibrant macaws soaring above the canopy, this remarkable region teems with biodiversity. Deep within the river's currents, magnificent pink river dolphins gracefully glide alongside piranhas and electric eels. Along the riverbanks, you'll find bustling cities like Manaus, where the urban meets the wild, and Iquitos, a gateway to the heart of the Amazon rainforest. As you venture further, the Amazon River reveals hidden gems like the captivating Anavilhanas Archipelago, a mosaic of islands brimming with rare species. Embark on an adventure, explore the enchanting Amazon River, and immerse yourself in a world teeming with life and untamed beauty.", "Please repeat the same task using the words 'the', 'and', and 'to'"], "reference": ["Amazon, 7; river, 6; you, 2", "the, 17; and, 5; to, 4"]} +{"question_id": 137, "category": "extraction", "turns": ["Identify the named entities (people, organizations, locations) mentioned in the given news article. Please generate a JSON dictionary that lists the named entities in three separate groups based on their entity types. The key is the type of entity and the value is a list of strings.\n\nYesterday, Adamson Emerson, the CEO of Faraday, and Dieter Zetsche, the CEO of Daimler AG, announced plans to build a new Gigafactory in Berlin. The facility will be a joint venture between Faraday and Daimler, producing electric vehicles and battery packs for both companies, creating thousands of job opportunities in the region. Emerson and Zetsche stated that the strategic location of Berlin, coupled with its skilled workforce and strong infrastructure, makes it an ideal choice for expansion. The new Gigafactory aims to meet the growing demand for electric vehicles in Europe and contribute to a sustainable future. Volkswagen CEO Herbert Diess welcomed the news, saying greater collaboration will benefit the auto industry's transition to e-mobility.", "Now make the JSON object shorter by replacing each value with its first letter. Please output everything in a single line without using indentation or creating new lines."]} +{"question_id": 138, "category": "extraction", "turns": ["Analyze the following customer reviews from different sources for three different smartphones - the latest iPhone, Samsung Galaxy, and Google Pixel - and provide an overall rating for each phone on a scale of 1 to 10. Consider the following complex and contradictory reviews:\n- TechRadar's review of the latest iPhone: The new iPhone is a stunning triumph of engineering that sets a new bar for smartphone performance and camera quality. However, the incremental design and high price mean it lacks the 'wow' factor of previous iPhones. Still, its power and intelligence are unrivaled.\n- CNET's review of the latest Samsung Galaxy: The Samsung Galaxy phone has plenty of high points, including an amazing screen, fast performance, solid battery life and an impressive array of camera options. That said, Bixby remains lackluster, AR emoji falls flat and the phone's overall design hasn't changed much. The new Galaxy is an amazing phone overall, but it has a few nagging weaknesses that keep it from achieving true greatness.\n- The Verge's review of the latest Google Pixel: Google's Pixel packs cutting-edge specs, innovative AI-powered software, and a killer camera into a sleek design. However, the phone has lackluster battery life, lacks expandable storage, and its performance stutters at times, especially considering its high price tag. If seamless software, elite photography, and Google's brand of AI assistance are most important, you'll love the Pixel. But the overall experience isn't as well-rounded as some competitors. Return the answer as a JSON object with the overall ratings for each phone out of 10, to one decimal place.", "Can you change the ratings from numbers to letters? Capital letters MUST be used when writing the names of phones."]} +{"question_id": 139, "category": "extraction", "turns": ["Given a set of complex equations, extract all unique variable names from each equation. Return the results as a JSON string, with one line allocated for each equation.\n```\n1) y = (3/4)x^3 - e^(2x) + sin(pi*x) - sqrt(7)\n2) 2A - B/(3+C) * sum(N=1 to 5; ln(N)^2) = 5D*integral(a=0 to pi; cos(comb(N=1 to 10; N*a)))\n3) E = m(c^2) + gamma*(v/d)/(-(alpha/2) + sqrt(beta^2 + (alpha/2)^2))\n```", "Please rearrange the equations and use 'a', 'b', 'c', 'd', etc. as variables."]} +{"question_id": 140, "category": "extraction", "turns": ["Given the following records of stock prices, extract the highest and lowest closing prices for each month in the year 2022. Return the results as a CSV string, with one line allocated for each month.\nDate,Open,High,Low,Close,Volume\n2022-01-01,150.02,155.28,148.50,153.80,15678900\n2022-01-02,154.32,157.25,153.48,156.25,19874500\n2022-02-01,160.50,163.28,159.50,161.80,14326700\n2022-02-02,161.80,164.25,161.30,163.90,17689200\n2022-03-01,165.40,168.35,163.10,166.80,16253400\n2022-03-02,167.00,169.85,165.50,168.20,19568100", "Do the same task again with the JSON format and round all numbers in your response to the nearest integers."], "reference": ["\nMonth,High,Low\n01,156.25,153.80\n02,163.90,161.80\n03,168.20,166.80", "\n```\n{ \"January\": { \"High\": 156, \"Low\": 154 }, \"February\": { \"High\": 164, \"Low\": 162 }, \"March\": { \"High\": 168, \"Low\": 167 } }\n```"]} +{"question_id": 141, "category": "stem", "turns": ["In the field of quantum physics, what is superposition, and how does it relate to the phenomenon of quantum entanglement?", "What assumptions have you made in your response? Are they valid?"]} +{"question_id": 142, "category": "stem", "turns": ["Consider a satellite that is in a circular orbit around the Earth. The speed of the satellite decreases. What will happen to the satellite's orbital radius and period of revolution? Please justify your answer using principles of physics.", "What are some corner cases or edge cases in your solution? How do you handle them?"], "reference": ["The orbital radius will increase and the period of revolution will increase", ""]} +{"question_id": 143, "category": "stem", "turns": ["Photosynthesis is a vital process for life on Earth. Could you outline the two main stages of photosynthesis, including where they take place within the chloroplast, and the primary inputs and outputs for each stage?", "How much energy can a tree produce through photosynthesis in its lifetime? Please provide an estimate using actual numerical values and thoroughly explain your thought process step-by-step."], "reference": ["Two major stages: light-dependent reactions and light-independent reactions", ""]} +{"question_id": 144, "category": "stem", "turns": ["What is the central dogma of molecular biology? What processes are involved? Who named this?", "Identify and fix one incorrect fact in your previous response."], "reference": ["Genetic information flows from DNA to RNA to Protein. Three processes: replication, transcription, and translation. Francis Crick in 1958.", ""]} +{"question_id": 145, "category": "stem", "turns": ["Describe the process and write out the balanced chemical equation for the reaction that occurs when solid calcium carbonate reacts with hydrochloric acid to form aqueous calcium chloride, carbon dioxide, and water. What type of reaction is this, and what observations might indicate that the reaction is taking place?", "How can we reverse this process?"], "reference": ["CaCO\u2083 + 2 HCl \u2192 CaCl\u2082 + CO\u2082 + H\u2082O", "Not easy to do this."]} +{"question_id": 146, "category": "stem", "turns": ["Please explain the differences between exothermic and endothermic reactions, and include the criteria you used to distinguish between them. Additionally, please provide a real-world example to illustrate your explanation.", "Can a process involve both reactions? List one."]} +{"question_id": 147, "category": "stem", "turns": ["The city of Vega intends to build a bridge that will span the Vegona River, covering a distance of 1.8 kilometers. The proposed location falls within a seismically active area that has experienced several high-magnitude earthquakes. Given these circumstances, what would be the best approach to constructing the bridge?", "What are the key disadvantages or flaws of your solution? Please perform calculations and use numbers to illustrate them."]} +{"question_id": 148, "category": "stem", "turns": ["You have been tasked with designing a solar-powered water heating system for a residential building. Describe the key components and considerations you would include in your design. Design a five-step workflow.", "If the system is intended for a building with a capacity of 100 individuals, what would be the estimated budget for implementing this system?"]} +{"question_id": 149, "category": "stem", "turns": ["Please describe the concept of machine learning. Could you elaborate on the differences between supervised, unsupervised, and reinforcement learning? Provide real-world examples of each.", "In your last example of reinforcement learning, can we use supervised learning to solve it?"]} +{"question_id": 150, "category": "stem", "turns": ["How have the Alps and Rhine River influenced settlement and agriculture in Western Europe? List three impacts.", "How could you design a concrete but simple experiment to validate the first impact?"]} +{"question_id": 151, "category": "humanities", "turns": ["Provide insights into the correlation between economic indicators such as GDP, inflation, and unemployment rates. Explain how fiscal and monetary policies affect those indicators.", "Now, explain them again like I'm five."]} +{"question_id": 152, "category": "humanities", "turns": ["How do the stages of life shape our understanding of time and mortality?", "Write an allegorical poem that illustrates the above."]} +{"question_id": 153, "category": "humanities", "turns": ["Discuss antitrust laws and their impact on market competition. Compare the antitrust laws in US and China along with some case studies.", "Pick one case study and explain it in detail."]} +{"question_id": 154, "category": "humanities", "turns": ["Create a lesson plan that integrates drama, mime or theater techniques into a history class. Duration: 3 class periods (each lasts for 45 minutes) for 3 days\nTopic: Opium Wars between China and Britain\nGrade level: 9-10", "Provide more details for Day 1 and include three homework questions."]} +{"question_id": 155, "category": "humanities", "turns": ["Share ideas for adapting art masterpieces into interactive experiences for children. List 5 specific artworks and associated ideas.", "Write a concrete plan for your second example. Include budget estimates."]} +{"question_id": 156, "category": "humanities", "turns": ["Explain what's base rate fallacy and list five specific examples of how politicians use it for campaigns.", "Provide a detailed plan for an election campaign using the first example."]} +{"question_id": 157, "category": "humanities", "turns": ["Describe five key principles in evaluating an argument in analytical writing.", "With the listed principles, write a response in which you discuss what specific evidence is needed to evaluate the argument and explain how the evidence would weaken or strengthen the argument.\n\n===\n\nThe following is a memorandum from the advertising head of Zorblatt Animal Outlets, a chain operating thirty animal outlets globally.\n\n\"Half a decade ago, our rival Aquatic Pavilion started publicizing in Rare Pets Digest periodical. Their overall sales have been consistently growing at a rate of 3-to-5 percent each year since then. In particular, the Aquatic Pavilion outlet in Harbor Town experienced even more significant growth, securing the title of the most frequented animal store in the United States the previous year. In contrast, our two Zorblatt outlets in Harbor Town have recorded a consistent drop in sales during the same duration. It is evident that we must promptly start featuring our own advertisements in Rare Pets Digest and other popular animal publications. If we take this step, we can confidently anticipate a reversal in this recent trend of decreasing sales and return to profitability.\""]} +{"question_id": 158, "category": "humanities", "turns": ["Which methods did Socrates employ to challenge the prevailing thoughts of his time?", "Let's bring Socrates to modern world. Generate a conversation between Socrates and Bill Gates to debate on generative AI for education."]} +{"question_id": 159, "category": "humanities", "turns": ["What are some business etiquette norms when doing business in Japan?", "Create a video script for training new employees of a car wash business in Japan. Highlight the above etiquette norms."]} +{"question_id": 160, "category": "humanities", "turns": ["Suggest five award-winning documentary films with brief background descriptions for aspiring filmmakers to study.", "With the spirit in the first film, craft a succinct and persuasive pitch for a film about overcoming adversity."]} diff --git a/prompts/mt-bench/reference.txt b/prompts/mt-bench/reference.txt new file mode 100644 index 0000000000000000000000000000000000000000..3cfd59838bfcea95663976c97089bf18809aba37 --- /dev/null +++ b/prompts/mt-bench/reference.txt @@ -0,0 +1,15 @@ +[Instruction] +Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider correctness and helpfulness. You will be given a reference answer and the assistant's answer. Begin your evaluation by comparing the assistant's answer with the reference answer. Identify and correct any mistakes. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format in two lines: +EXPLANATION: ... +SCORE: ... + +[Question] +{_PROMPT} + +[The Start of Reference Answer] +{_TARGET_TEXT} +[The End of Reference Answer] + +[The Start of Assistant's Answer] +{_PREDICTED_TEXT} +[The End of Assistant's Answer] \ No newline at end of file diff --git a/prompts/rag.txt b/prompts/rag.txt new file mode 100644 index 0000000000000000000000000000000000000000..ef81e26324e8e20d5686ac6559b8271db37498a4 --- /dev/null +++ b/prompts/rag.txt @@ -0,0 +1,30 @@ +[System] +We would like to request your feedback on the performance of an AI assistant in response to a question displayed below. +The assistant should answer the question based on the context. To make your task easier we also provide a good ground truth answer. + +[Context] +{context_truncated} + +[Question] +{question} + +[Start of Ground Truth Answer] +{_TARGET_TEXT} +[End of Ground Truth Answer] + +[Task] +Now rate the helpfulness, relevance, accuracy of the response from another assistant displayed below. +The assistant receives an overall score on a scale between 0 and 1, where a higher score indicates better overall performance. +A score of 0 means the assistant could not address the question, 0.5 means it could somewhat address it, and 1 would mean it perfectly addressed it. +Facts, financial figures must be precise for a perfect answer. +Statements that can not be supported by the context result in score deduction of 0.5. + +Please first provide a comprehensive explanation of your evaluation. +In the final line, output a single value indicating the score for the assistant. +Please give your response in structured way in two separate lines. +EXPLANATION: ... +SCORE: ... + +[Start of Assistant Answer] +{_PREDICTED_TEXT} +[End of Assistant Answer] \ No newline at end of file diff --git a/publish_to_hugging_face.py b/publish_to_hugging_face.py new file mode 100644 index 0000000000000000000000000000000000000000..89747413495af3e49b37491d84b96e32d6633749 --- /dev/null +++ b/publish_to_hugging_face.py @@ -0,0 +1,85 @@ +import argparse +import logging +import os +import sys + +from llm_studio.app_utils import hugging_face_utils +from llm_studio.app_utils.utils import hf_repo_friendly_name + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="") + parser.add_argument( + "-p", + "--path_to_experiment", + required=True, + help="Path to the experiment data", + default=argparse.SUPPRESS, + ) + + parser.add_argument( + "-d", + "--device", + required=False, + help="'cpu' or 'cuda:0', if the GPU device id is 0", + default="cuda:0", + ) + + parser.add_argument( + "-a", + "--api_key", + required=False, + help="Hugging Face API Key", + default=argparse.SUPPRESS, + ) + + parser.add_argument( + "-u", + "--user_id", + required=False, + help="Hugging Face User ID", + default=argparse.SUPPRESS, + ) + + parser.add_argument( + "-m", + "--model_name", + required=False, + help="Hugging Face Model Name", + default=argparse.SUPPRESS, + ) + + parser.add_argument( + "-s", + "--safe_serialization", + required=False, + help="A flag indicating whether safe serialization should be used.", + default=True, + ) + + parser_args, unknown = parser.parse_known_args(sys.argv) + + path_to_experiment = parser_args.path_to_experiment + device = parser_args.device + safe_serialization = parser_args.safe_serialization + + api_key = getattr(parser_args, "api_key", "") + user_id = getattr(parser_args, "user_id", "") + model_name = getattr(parser_args, "model_name", "") + + # If the model_name argument is not provided, + # the function derives a model name from the last folder name + if model_name == "": + path_to_experiment = path_to_experiment.rstrip("/") + model_name = hf_repo_friendly_name(os.path.basename(path_to_experiment)) + + try: + hugging_face_utils.publish_model_to_hugging_face( + path_to_experiment=path_to_experiment, + device=device, + api_key=api_key, + user_id=user_id, + model_name=model_name, + safe_serialization=safe_serialization, + ) + except Exception: + logging.error("Exception occurred during the run:", exc_info=True) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..14c1e591736b364dc0db29da543fa79cc39cbf50 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,19 @@ +[tool.black] +line-length = 88 +target_version = ["py310"] +exclude = "(.eggs|.git|.hg|.mypy_cache|.nox|.tox|.venv|.svn|_build|buck-out|build/|dist/|output/|data/)" + +[tool.isort] +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true +line_length = 88 + +[tool.mypy] +ignore_missing_imports = true +allow_redefinition = true +strict_optional = false +exclude = "(build|output|data)" + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..cea1fd36287093053680aa61e4594ff3b7718abf --- /dev/null +++ b/requirements.txt @@ -0,0 +1,167 @@ +-i https://pypi.org/simple +--extra-index-url https://download.pytorch.org/whl/cu118 +accelerate==0.27.2; python_full_version >= '3.8.0' +aiohttp==3.9.5; python_version >= '3.8' +aiosignal==1.3.1; python_version >= '3.7' +annotated-types==0.6.0; python_version >= '3.8' +anyio==4.3.0; python_version >= '3.8' +arrow==1.3.0; python_version >= '3.8' +async-timeout==4.0.3; python_version < '3.11' +attrs==23.2.0; python_version >= '3.7' +azure-core==1.30.1; python_version >= '3.7' +azure-storage-blob==12.19.1; python_version >= '3.7' +azure-storage-file-datalake==12.14.0; python_version >= '3.7' +beautifulsoup4==4.12.3; python_full_version >= '3.6.0' +bitsandbytes==0.42.0 +bleach==6.1.0; python_version >= '3.8' +blessed==1.20.0; python_version >= '2.7' +bokeh==3.4.1; python_version >= '3.9' +boto3==1.34.90; python_version >= '3.8' +botocore==1.34.90; python_version >= '3.8' +bravado==11.0.3; python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_full_version != '3.5.0' +bravado-core==6.1.1; python_version >= '3.7' +certifi==2024.2.2; python_version >= '3.6' +cffi==1.16.0; platform_python_implementation != 'PyPy' +charset-normalizer==3.3.2; python_full_version >= '3.7.0' +click==8.1.7; python_version >= '3.7' +colorama==0.4.6; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6' +contourpy==1.2.1; python_version >= '3.9' +coolname==2.2.0 +cramjam==2.8.3; python_version >= '3.7' +cryptography==42.0.5; python_version >= '3.7' +datasets==2.18.0; python_full_version >= '3.8.0' +deepspeed==0.13.2 +dill==0.3.8; python_version >= '3.8' +distro==1.9.0; python_version >= '3.6' +editor==1.6.6; python_version >= '3.8' +einops==0.7.0; python_version >= '3.8' +exceptiongroup==1.2.1; python_version < '3.11' +fastparquet==2024.2.0; python_version >= '3.8' +filelock==3.13.4; python_version >= '3.8' +fqdn==1.5.1 +frozenlist==1.4.1; python_version >= '3.8' +fsspec[http]==2024.2.0; python_version >= '3.8' +future==1.0.0; python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3' +gitdb==4.0.11; python_version >= '3.7' +gitpython==3.1.43; python_version >= '3.7' +gputil==1.4.0 +greenlet==3.0.3; platform_machine == 'aarch64' or (platform_machine == 'ppc64le' or (platform_machine == 'x86_64' or (platform_machine == 'amd64' or (platform_machine == 'AMD64' or (platform_machine == 'win32' or platform_machine == 'WIN32'))))) +h11==0.14.0; python_version >= '3.7' +h2o-wave==1.1.2; python_version >= '3.8' +hf-transfer==0.1.5; python_version >= '3.7' +hjson==3.1.0 +httpcore==1.0.5; python_version >= '3.8' +httpx==0.27.0; python_version >= '3.8' +huggingface-hub==0.21.1; python_full_version >= '3.8.0' +idna==3.7; python_version >= '3.5' +importlib-metadata==7.1.0; python_version < '3.12' +inquirer==3.2.4; python_full_version >= '3.8.1' +isodate==0.6.1 +isoduration==20.11.0 +jaraco.classes==3.4.0; python_version >= '3.8' +jeepney==0.8.0; sys_platform == 'linux' +jinja2==3.1.3; python_version >= '3.7' +jmespath==1.0.1; python_version >= '3.7' +joblib==1.4.0; python_version >= '3.8' +jsonpointer==2.4 +jsonref==1.1.0; python_version >= '3.7' +jsonschema[format-nongpl]==4.21.1; python_version >= '3.8' +jsonschema-specifications==2023.12.1; python_version >= '3.8' +kaggle==1.6.12 +keyring==24.3.1; python_version >= '3.8' +markupsafe==2.1.5; python_version >= '3.7' +monotonic==1.6 +more-itertools==10.2.0; python_version >= '3.8' +mpmath==1.3.0 +msgpack==1.0.8; python_version >= '3.8' +multidict==6.0.5; python_version >= '3.7' +multiprocess==0.70.16; python_version >= '3.8' +neptune==1.9.1; python_version >= '3.7' and python_version < '4.0' +networkx==3.3; python_version >= '3.10' +ninja==1.11.1.1 +numpy==1.26.4; python_version >= '3.9' +nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' +nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' +nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' +nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' +nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' +nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' +nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' +nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' +nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' +nvidia-nccl-cu11==2.19.3; platform_system == 'Linux' and platform_machine == 'x86_64' +nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' +oauthlib==3.2.2; python_version >= '3.6' +openai==1.23.3; python_full_version >= '3.7.1' +packaging==24.0; python_version >= '3.7' +pandas==2.2.2; python_version >= '3.9' +peft==0.9.0; python_full_version >= '3.8.0' +pillow==10.3.0; python_version >= '3.8' +portalocker==2.8.2; python_version >= '3.8' +protobuf==3.20.3; python_version >= '3.7' +psutil==5.9.8; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' +py-cpuinfo==9.0.0 +pyarrow==16.0.0; python_version >= '3.8' +pyarrow-hotfix==0.6; python_version >= '3.5' +pycparser==2.22; python_version >= '3.8' +pydantic==2.7.1; python_version >= '3.8' +pydantic-core==2.18.2; python_version >= '3.8' +pyjwt==2.8.0; python_version >= '3.7' +pynvml==11.5.0; python_version >= '3.6' +python-dateutil==2.9.0.post0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' +python-slugify==8.0.4; python_version >= '3.7' +pytz==2024.1 +pyyaml==6.0.1; python_version >= '3.6' +readchar==4.0.6; python_version >= '3.8' +referencing==0.35.0; python_version >= '3.8' +regex==2024.4.16; python_version >= '3.7' +requests==2.31.0; python_version >= '3.7' +requests-oauthlib==2.0.0; python_version >= '3.4' +rfc3339-validator==0.1.4 +rfc3986-validator==0.1.1 +rpds-py==0.18.0; python_version >= '3.8' +runs==1.2.2; python_version >= '3.8' +s3transfer==0.10.1; python_version >= '3.8' +sacrebleu==2.0.0; python_version >= '3.6' +safetensors==0.4.3; python_version >= '3.7' +scikit-learn==1.4.2; python_version >= '3.9' +scipy==1.13.0; python_version >= '3.9' +secretstorage==3.3.3; sys_platform == 'linux' +sentencepiece==0.1.99 +setuptools==69.5.1; python_version >= '3.8' +simplejson==3.19.2; python_version >= '2.5' and python_version not in '3.0, 3.1, 3.2, 3.3' +six==1.16.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' +smmap==5.0.1; python_version >= '3.7' +sniffio==1.3.1; python_version >= '3.7' +soupsieve==2.5; python_version >= '3.8' +sqlalchemy==2.0.29; python_version >= '3.7' +sqlitedict==1.7.0 +starlette==0.37.2; python_version >= '3.8' +swagger-spec-validator==3.0.3; python_version >= '3.7' +sympy==1.12; python_version >= '3.8' +tabulate==0.9.0; python_version >= '3.7' +text-unidecode==1.3 +threadpoolctl==3.4.0; python_version >= '3.8' +tiktoken==0.6.0; python_version >= '3.8' +tokenizers==0.19.1; python_version >= '3.7' +toml==0.10.2; python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3' +torch==2.2.0+cu118 +tornado==6.4; python_version >= '3.8' +tqdm==4.66.2; python_version >= '3.7' +transformers==4.40.1; python_full_version >= '3.8.0' +triton==2.2.0; platform_system == 'Linux' and platform_machine == 'x86_64' +types-python-dateutil==2.9.0.20240316; python_version >= '3.8' +typing-extensions==4.11.0; python_version >= '3.8' +tzdata==2024.1; python_version >= '2' +uri-template==1.3.0 +urllib3==2.2.1; python_version >= '3.8' +uvicorn==0.29.0; python_version >= '3.8' +wcwidth==0.2.13 +webcolors==1.13 +webencodings==0.5.1 +websocket-client==1.8.0; python_version >= '3.8' +xmod==1.8.1; python_version >= '3.8' +xxhash==3.4.1; python_version >= '3.7' +xyzservices==2024.4.0; python_version >= '3.8' +yarl==1.9.4; python_version >= '3.7' +zipp==3.18.1; python_version >= '3.8' diff --git a/tests/app_utils/sections/histogram_card.py b/tests/app_utils/sections/histogram_card.py new file mode 100644 index 0000000000000000000000000000000000000000..329801f91992d78faf5716b26ff1fdf4ce97d5c3 --- /dev/null +++ b/tests/app_utils/sections/histogram_card.py @@ -0,0 +1,33 @@ +import random + +import numpy as np + +from llm_studio.app_utils.sections.histogram_card import compute_quantile_df + + +def test_quantiles_are_computed_correctly(): + for _ in range(5): + data = np.random.random_integers(0, 1000, 100_000).tolist() + a = round(random.uniform(0, 1), 2) + b = round(random.uniform(a, 1), 2) + a, b = min(a, b), max(a, b) + + df_quantile = compute_quantile_df(data, a, b) + + first = df_quantile[ + df_quantile["data_type"] == f"first {int(a * 100)}% quantile" + ] + last = df_quantile[ + df_quantile["data_type"] == f"last {100 - int(b * 100)}% quantile" + ] + sorted_data = sorted(data) + # use -1 and +1 to account for rounding errors + expected_first_quantile_range = sorted_data[ + int(len(sorted_data) * a) - 1 : int(len(sorted_data) * a) + 1 + ] + expected_last_quantile_range = sorted_data[ + -int(len(sorted_data) * (1 - b)) - 1 : -int(len(sorted_data) * (1 - b)) + 1 + ] + + assert first.iloc[-1][0] in expected_first_quantile_range + assert last.iloc[0][0] in expected_last_quantile_range diff --git a/tests/app_utils/utils/setting_utils.py b/tests/app_utils/utils/setting_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..653dcd879cdb49602ad1fca5510ad383fa5e0049 --- /dev/null +++ b/tests/app_utils/utils/setting_utils.py @@ -0,0 +1,52 @@ +from unittest import mock + +from llm_studio.app_utils.config import default_cfg +from llm_studio.app_utils.setting_utils import ( + EnvFileSaver, + KeyRingSaver, + NoSaver, + Secrets, + load_default_user_settings, +) + + +def test_no_saver(): + saver = NoSaver("test_user", "/") + assert saver.save("name", "password") is None + assert saver.load("name") is None + assert saver.delete("name") is None + + +def test_keyring_saver(mocker): + mocker.patch("keyring.set_password") + mocker.patch("keyring.get_password", return_value="password") + mocker.patch("keyring.delete_password") + saver = KeyRingSaver("test_user", "/") + saver.save("name", "password") + assert saver.load("name") == "password" + saver.delete("name") + assert mocker.patch("keyring.delete_password").is_called + + +def test_env_file_saver(tmpdir): + saver = EnvFileSaver("test_user", str(tmpdir)) + saver.save("name", "password") + saver.save("name2", "password2") + assert saver.load("name") == "password" + saver.delete("name") + assert saver.load("name") is None + assert saver.load("name2") == "password2" + + +def test_secrets_get(): + assert isinstance(Secrets.get("Do not save credentials permanently"), type) + assert isinstance(Secrets.get("Keyring"), type) + assert isinstance(Secrets.get(".env File"), type) + + +def test_load_default_user_settings(mocker): + q = mock.MagicMock() + q.client = dict() + mocker.patch("app_utils.utils.setting_utils.clear_secrets", return_value=None) + load_default_user_settings(q) + assert set(q.client.keys()) == set(default_cfg.user_settings.keys()) diff --git a/tests/integration/test_causal_binary_classification_modeling_cfg.yaml b/tests/integration/test_causal_binary_classification_modeling_cfg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba72f4f25513c624d3484c0bb583d62312dff115 --- /dev/null +++ b/tests/integration/test_causal_binary_classification_modeling_cfg.yaml @@ -0,0 +1,93 @@ +architecture: + backbone_dtype: int4 + force_embedding_gradients: false + gradient_checkpointing: true + intermediate_dropout: 0.0 + pretrained: true + pretrained_weights: '' +augmentation: + random_parent_probability: 0.0 + skip_parent_probability: 0.0 + token_mask_probability: 0.0 +dataset: + add_eos_token_to_answer: false + add_eos_token_to_prompt: false + add_eos_token_to_system: false + answer_column: binary_label + chatbot_author: H2O.ai + chatbot_name: h2oGPT + data_sample: 0.03 + data_sample_choice: + - Train + - Validation + limit_chained_samples: false + mask_prompt_labels: true + num_classes: 1 + parent_id_column: None + personalize: false + prompt_column: + - instruction + system_column: None + text_answer_separator: '' + text_prompt_start: '' + text_system_start: '' + train_dataframe: /tmp/train_full.pq + validation_dataframe: None + validation_size: 0.2 + validation_strategy: automatic +environment: + compile_model: false + deepspeed_reduce_bucket_size: 1000000 + deepspeed_stage3_param_persistence_threshold: 1000000 + deepspeed_stage3_prefetch_bucket_size: 1000000 + find_unused_parameters: false + gpus: + - '0' + huggingface_branch: main + mixed_precision: true + mixed_precision_dtype: float16 + number_of_workers: 8 + seed: -1 + trust_remote_code: true + use_deepspeed: false +experiment_name: test-classification-modeling +llm_backbone: facebook/opt-125m +logging: + logger: None + neptune_project: test_org/test_project +output_directory: /tmp/output +prediction: + batch_size_inference: 0 + metric: AUC +problem_type: text_causal_classification_modeling +tokenizer: + add_prompt_answer_tokens: false + max_length: 512 + max_length_answer: 256 + max_length_prompt: 256 + padding_quantile: 1.0 + use_fast: true +training: + batch_size: 2 + differential_learning_rate: 1.0e-05 + differential_learning_rate_layers: + - classification_head + drop_last_batch: true + epochs: 1 + evaluate_before_training: false + evaluation_epochs: 1.0 + grad_accumulation: 1 + gradient_clip: 0.0 + learning_rate: 0.0001 + lora: true + lora_alpha: 16 + lora_dropout: 0.05 + lora_r: 4 + lora_target_modules: '' + loss_function: BinaryCrossEntropyLoss + optimizer: AdamW + save_best_checkpoint: false + schedule: Cosine + train_validation_data: false + warmup_epochs: 0.0 + weight_decay: 0.0 \ No newline at end of file diff --git a/tests/integration/test_causal_binary_classification_modeling_cpu_cfg.yaml b/tests/integration/test_causal_binary_classification_modeling_cpu_cfg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2f497f93a71cc6de05005a2a3a4e0e626360c2f4 --- /dev/null +++ b/tests/integration/test_causal_binary_classification_modeling_cpu_cfg.yaml @@ -0,0 +1,92 @@ +architecture: + backbone_dtype: float32 + force_embedding_gradients: false + gradient_checkpointing: true + intermediate_dropout: 0.0 + pretrained: true + pretrained_weights: '' +augmentation: + random_parent_probability: 0.0 + skip_parent_probability: 0.0 + token_mask_probability: 0.0 +dataset: + add_eos_token_to_answer: false + add_eos_token_to_prompt: false + add_eos_token_to_system: false + answer_column: binary_label + chatbot_author: H2O.ai + chatbot_name: h2oGPT + data_sample: 0.03 + data_sample_choice: + - Train + - Validation + limit_chained_samples: false + mask_prompt_labels: true + num_classes: 1 + parent_id_column: None + personalize: false + prompt_column: + - instruction + system_column: None + text_answer_separator: '' + text_prompt_start: '' + text_system_start: '' + train_dataframe: /tmp/train_full.pq + validation_dataframe: None + validation_size: 0.2 + validation_strategy: automatic +environment: + compile_model: false + deepspeed_reduce_bucket_size: 1000000 + deepspeed_stage3_param_persistence_threshold: 1000000 + deepspeed_stage3_prefetch_bucket_size: 1000000 + find_unused_parameters: false + gpus: + - '' + huggingface_branch: main + mixed_precision: false + number_of_workers: 8 + seed: -1 + trust_remote_code: true + use_deepspeed: false +experiment_name: solid-spaniel +llm_backbone: h2oai/llama2-0b-unit-test +logging: + logger: None + neptune_project: '' +output_directory: /tmp/output +prediction: + batch_size_inference: 0 + metric: AUC +problem_type: text_causal_classification_modeling +tokenizer: + add_prompt_answer_tokens: false + max_length: 32 + max_length_answer: 16 + max_length_prompt: 16 + padding_quantile: 1.0 + use_fast: true +training: + batch_size: 6 + differential_learning_rate: 1.0e-05 + differential_learning_rate_layers: + - classification_head + drop_last_batch: true + epochs: 1 + evaluate_before_training: false + evaluation_epochs: 1.0 + grad_accumulation: 1 + gradient_clip: 0.0 + learning_rate: 0.0001 + lora: true + lora_alpha: 16 + lora_dropout: 0.05 + lora_r: 4 + lora_target_modules: '' + loss_function: BinaryCrossEntropyLoss + optimizer: AdamW + save_best_checkpoint: false + schedule: Cosine + train_validation_data: false + warmup_epochs: 0.0 + weight_decay: 0.0 \ No newline at end of file diff --git a/tests/integration/test_causal_language_modeling_oasst_cfg.yaml b/tests/integration/test_causal_language_modeling_oasst_cfg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a156db112dbf45bf016cb198e66f8931aff3388e --- /dev/null +++ b/tests/integration/test_causal_language_modeling_oasst_cfg.yaml @@ -0,0 +1,100 @@ +architecture: + backbone_dtype: int4 + force_embedding_gradients: false + gradient_checkpointing: true + intermediate_dropout: 0.0 + pretrained: true + pretrained_weights: '' +augmentation: + random_parent_probability: 0.0 + skip_parent_probability: 0.0 + token_mask_probability: 0.0 +dataset: + add_eos_token_to_answer: true + add_eos_token_to_prompt: true + add_eos_token_to_system: true + answer_column: output + chatbot_author: H2O.ai + chatbot_name: h2oGPT + data_sample: 0.01 + data_sample_choice: + - Train + - Validation + limit_chained_samples: false + mask_prompt_labels: true + parent_id_column: None + personalize: false + prompt_column: + - instruction + system_column: None + text_answer_separator: <|answer|> + text_prompt_start: <|prompt|> + text_system_start: <|system|> + train_dataframe: /tmp/train_full.pq + validation_dataframe: None + validation_size: 0.01 + validation_strategy: automatic +environment: + compile_model: false + find_unused_parameters: false + gpus: + - '0' + huggingface_branch: main + mixed_precision: true + mixed_precision_dtype: float16 + number_of_workers: 8 + seed: -1 + trust_remote_code: true +experiment_name: test-causal-language-modeling-oasst +llm_backbone: facebook/opt-125m +logging: + logger: None + neptune_project: test_org/test_project +output_directory: /tmp/output +prediction: + batch_size_inference: 0 + do_sample: false + max_length_inference: 256 + max_time: 0.0 + metric: Perplexity + metric_gpt_model: gpt-3.5-turbo-0301 + metric_gpt_template: general + min_length_inference: 1 + num_beams: 1 + num_history: 4 + repetition_penalty: 1.2 + stop_tokens: '' + temperature: 0.3 + top_k: 0 + top_p: 1.0 +problem_type: text_causal_language_modeling +tokenizer: + add_prompt_answer_tokens: false + max_length: 512 + max_length_answer: 256 + max_length_prompt: 256 + padding_quantile: 1.0 + use_fast: true +training: + batch_size: 2 + differential_learning_rate: 1.0e-05 + differential_learning_rate_layers: [] + drop_last_batch: true + epochs: 1 + evaluate_before_training: false + evaluation_epochs: 1.0 + grad_accumulation: 1 + gradient_clip: 0.0 + learning_rate: 0.0001 + lora: true + lora_alpha: 16 + lora_dropout: 0.05 + lora_r: 4 + lora_target_modules: '' + loss_function: TokenAveragedCrossEntropy + optimizer: AdamW + save_best_checkpoint: false + schedule: Cosine + train_validation_data: false + warmup_epochs: 0.0 + weight_decay: 0.0 diff --git a/tests/integration/test_causal_language_modeling_oasst_cpu_cfg.yaml b/tests/integration/test_causal_language_modeling_oasst_cpu_cfg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f54c2507481365189b49e86275affd7f0fd1c37c --- /dev/null +++ b/tests/integration/test_causal_language_modeling_oasst_cpu_cfg.yaml @@ -0,0 +1,99 @@ +architecture: + backbone_dtype: float32 + force_embedding_gradients: false + gradient_checkpointing: false + intermediate_dropout: 0.0 + pretrained: true + pretrained_weights: '' +augmentation: + random_parent_probability: 0.0 + skip_parent_probability: 0.0 + token_mask_probability: 0.0 +dataset: + add_eos_token_to_answer: true + add_eos_token_to_prompt: true + add_eos_token_to_system: true + answer_column: output + chatbot_author: H2O.ai + chatbot_name: h2oGPT + data_sample: 0.01 + data_sample_choice: + - Train + - Validation + limit_chained_samples: false + mask_prompt_labels: true + parent_id_column: None + personalize: false + prompt_column: + - instruction + system_column: None + text_answer_separator: <|answer|> + text_prompt_start: <|prompt|> + text_system_start: <|system|> + train_dataframe: /tmp/train_full.pq + validation_dataframe: None + validation_size: 0.1 + validation_strategy: automatic +environment: + compile_model: false + find_unused_parameters: false + gpus: + - '' + huggingface_branch: main + mixed_precision: false + number_of_workers: 8 + seed: -1 + trust_remote_code: true +experiment_name: test-causal-language-modeling-oasst-cpu +llm_backbone: h2oai/llama2-0b-unit-test +logging: + logger: None + neptune_project: test_org/test_project +output_directory: /tmp/output +prediction: + batch_size_inference: 0 + do_sample: false + max_length_inference: 16 + max_time: 0.0 + metric: BLEU + metric_gpt_model: gpt-3.5-turbo-0301 + metric_gpt_template: general + min_length_inference: 1 + num_beams: 1 + num_history: 4 + repetition_penalty: 1.2 + stop_tokens: '' + temperature: 0.3 + top_k: 0 + top_p: 1.0 +problem_type: text_causal_language_modeling +tokenizer: + add_prompt_answer_tokens: false + max_length: 32 + max_length_answer: 16 + max_length_prompt: 16 + padding_quantile: 1.0 + use_fast: true +training: + batch_size: 8 + differential_learning_rate: 1.0e-05 + differential_learning_rate_layers: [] + drop_last_batch: true + epochs: 1 + evaluate_before_training: false + evaluation_epochs: 1.0 + grad_accumulation: 1 + gradient_clip: 0.0 + learning_rate: 0.0001 + lora: true + lora_alpha: 16 + lora_dropout: 0.05 + lora_r: 4 + lora_target_modules: '' + loss_function: TokenAveragedCrossEntropy + optimizer: AdamW + save_best_checkpoint: false + schedule: Cosine + train_validation_data: false + warmup_epochs: 0.0 + weight_decay: 0.0 diff --git a/tests/integration/test_causal_multiclass_classification_modeling_cfg.yaml b/tests/integration/test_causal_multiclass_classification_modeling_cfg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ba8f2797d39b0a1ba6a652634d8e851f5ac008d --- /dev/null +++ b/tests/integration/test_causal_multiclass_classification_modeling_cfg.yaml @@ -0,0 +1,93 @@ +architecture: + backbone_dtype: int4 + force_embedding_gradients: false + gradient_checkpointing: true + intermediate_dropout: 0.0 + pretrained: true + pretrained_weights: '' +augmentation: + random_parent_probability: 0.0 + skip_parent_probability: 0.0 + token_mask_probability: 0.0 +dataset: + add_eos_token_to_answer: false + add_eos_token_to_prompt: false + add_eos_token_to_system: false + answer_column: multiclass_label + chatbot_author: H2O.ai + chatbot_name: h2oGPT + data_sample: 0.03 + data_sample_choice: + - Train + - Validation + limit_chained_samples: false + mask_prompt_labels: true + num_classes: 3 + parent_id_column: None + personalize: false + prompt_column: + - instruction + system_column: None + text_answer_separator: '' + text_prompt_start: '' + text_system_start: '' + train_dataframe: /tmp/train_full.pq + validation_dataframe: None + validation_size: 0.2 + validation_strategy: automatic +environment: + compile_model: false + deepspeed_reduce_bucket_size: 1000000 + deepspeed_stage3_param_persistence_threshold: 1000000 + deepspeed_stage3_prefetch_bucket_size: 1000000 + find_unused_parameters: false + gpus: + - '0' + huggingface_branch: main + mixed_precision: true + mixed_precision_dtype: float16 + number_of_workers: 8 + seed: -1 + trust_remote_code: true + use_deepspeed: false +experiment_name: solid-spaniel +llm_backbone: facebook/opt-125m +logging: + logger: None + neptune_project: '' +output_directory: /tmp/output +prediction: + batch_size_inference: 0 + metric: LogLoss +problem_type: text_causal_classification_modeling +tokenizer: + add_prompt_answer_tokens: false + max_length: 512 + max_length_answer: 256 + max_length_prompt: 256 + padding_quantile: 1.0 + use_fast: true +training: + batch_size: 2 + differential_learning_rate: 1.0e-05 + differential_learning_rate_layers: + - classification_head + drop_last_batch: true + epochs: 1 + evaluate_before_training: false + evaluation_epochs: 1.0 + grad_accumulation: 1 + gradient_clip: 0.0 + learning_rate: 0.0001 + lora: true + lora_alpha: 16 + lora_dropout: 0.05 + lora_r: 4 + lora_target_modules: '' + loss_function: CrossEntropyLoss + optimizer: AdamW + save_best_checkpoint: false + schedule: Cosine + train_validation_data: false + warmup_epochs: 0.0 + weight_decay: 0.0 \ No newline at end of file diff --git a/tests/integration/test_causal_multiclass_classification_modeling_cpu_cfg.yaml b/tests/integration/test_causal_multiclass_classification_modeling_cpu_cfg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b6b32b5bcc5467b4281057e483de456c6cc419a6 --- /dev/null +++ b/tests/integration/test_causal_multiclass_classification_modeling_cpu_cfg.yaml @@ -0,0 +1,92 @@ +architecture: + backbone_dtype: float32 + force_embedding_gradients: false + gradient_checkpointing: true + intermediate_dropout: 0.0 + pretrained: true + pretrained_weights: '' +augmentation: + random_parent_probability: 0.0 + skip_parent_probability: 0.0 + token_mask_probability: 0.0 +dataset: + add_eos_token_to_answer: false + add_eos_token_to_prompt: false + add_eos_token_to_system: false + answer_column: multiclass_label + chatbot_author: H2O.ai + chatbot_name: h2oGPT + data_sample: 0.03 + data_sample_choice: + - Train + - Validation + limit_chained_samples: false + mask_prompt_labels: true + num_classes: 3 + parent_id_column: None + personalize: false + prompt_column: + - instruction + system_column: None + text_answer_separator: '' + text_prompt_start: '' + text_system_start: '' + train_dataframe: /tmp/train_full.pq + validation_dataframe: None + validation_size: 0.2 + validation_strategy: automatic +environment: + compile_model: false + deepspeed_reduce_bucket_size: 1000000 + deepspeed_stage3_param_persistence_threshold: 1000000 + deepspeed_stage3_prefetch_bucket_size: 1000000 + find_unused_parameters: false + gpus: + - '' + huggingface_branch: main + mixed_precision: false + number_of_workers: 8 + seed: -1 + trust_remote_code: true + use_deepspeed: false +experiment_name: solid-spaniel +llm_backbone: h2oai/llama2-0b-unit-test +logging: + logger: None + neptune_project: '' +output_directory: /tmp/output +prediction: + batch_size_inference: 0 + metric: LogLoss +problem_type: text_causal_classification_modeling +tokenizer: + add_prompt_answer_tokens: false + max_length: 32 + max_length_answer: 16 + max_length_prompt: 16 + padding_quantile: 1.0 + use_fast: true +training: + batch_size: 2 + differential_learning_rate: 1.0e-05 + differential_learning_rate_layers: + - classification_head + drop_last_batch: true + epochs: 1 + evaluate_before_training: false + evaluation_epochs: 1.0 + grad_accumulation: 1 + gradient_clip: 0.0 + learning_rate: 0.0001 + lora: true + lora_alpha: 16 + lora_dropout: 0.05 + lora_r: 4 + lora_target_modules: '' + loss_function: CrossEntropyLoss + optimizer: AdamW + save_best_checkpoint: false + schedule: Cosine + train_validation_data: false + warmup_epochs: 0.0 + weight_decay: 0.0 \ No newline at end of file diff --git a/tests/integration/test_integration.py b/tests/integration/test_integration.py new file mode 100644 index 0000000000000000000000000000000000000000..bd0245d4f0e9526a2a6647fe338ba654816190c5 --- /dev/null +++ b/tests/integration/test_integration.py @@ -0,0 +1,155 @@ +import json +import os +import sys + +import numpy as np +import pandas as pd +import pytest +import yaml +from transformers.testing_utils import execute_subprocess_async + +from llm_studio.app_utils.default_datasets import ( + prepare_default_dataset_causal_language_modeling, +) + + +def get_experiment_status(path: str) -> str: + """Get status information from experiment. + + Args: + path: path to experiment folder + Returns: + experiment status + """ + + try: + flag_json_path = os.path.join(path, "flags.json") + if not os.path.exists(flag_json_path): + return "none" + with open(flag_json_path) as file: + flags = json.load(file) + status = flags.get("status", "none") + return status + except Exception: + return "none" + + +@pytest.mark.parametrize( + "config_name", + [ + "test_causal_language_modeling_oasst_cfg", + "test_sequence_to_sequence_modeling_oasst_cfg", + ], +) +@pytest.mark.parametrize( + "metric", + [ + "Perplexity", + "BLEU", + ], +) +def test_oasst_training_gpu(tmp_path, config_name, metric): + run_oasst(tmp_path, config_name, metric) + + +@pytest.mark.parametrize( + "settings", + [ + ["AUC", "test_causal_binary_classification_modeling_cfg"], + ["LogLoss", "test_causal_multiclass_classification_modeling_cfg"], + ], +) +def test_oasst_classification_training_gpu(tmp_path, settings): + metric, config_name = settings + run_oasst( + tmp_path, + config_name=config_name, + metric=metric, + ) + + +@pytest.mark.parametrize( + "settings", + [ + ["AUC", "test_causal_binary_classification_modeling_cpu_cfg"], + ["LogLoss", "test_causal_multiclass_classification_modeling_cpu_cfg"], + ], +) +def test_oasst_classification_training_cpu(tmp_path, settings): + metric, config_name = settings + run_oasst( + tmp_path, + config_name=config_name, + metric=metric, + ) + + +@pytest.mark.parametrize( + "config_name", + [ + "test_causal_language_modeling_oasst_cpu_cfg", + "test_sequence_to_sequence_modeling_oasst_cpu_cfg", + ], +) +@pytest.mark.parametrize( + "metric", + [ + "Perplexity", + "BLEU", + ], +) +def test_oasst_training_cpu(tmp_path, config_name, metric): + run_oasst(tmp_path, config_name, metric) + + +def run_oasst(tmp_path, config_name, metric): + """ + Test training on OASST dataset. + + Pytest keeps around the last 3 test runs in the tmp_path fixture. + """ + prepare_default_dataset_causal_language_modeling(tmp_path) + train_path = os.path.join(tmp_path, "train_full.pq") + # create dummy labels for classification problem type, + # unused for other problem types + df = pd.read_parquet(train_path) + df["multiclass_label"] = np.random.choice(["0", "1", "2"], size=len(df)) + df["binary_label"] = np.random.choice(["0", "1"], size=len(df)) + df.to_parquet(train_path) + + with open( + os.path.join( + os.path.dirname(os.path.realpath(__file__)), f"{config_name}.yaml" + ), + "r", + ) as fp: + cfg = yaml.load(fp, Loader=yaml.FullLoader) + # set paths and save in tmp folder + cfg["dataset"]["train_dataframe"] = train_path + cfg["output_directory"] = os.path.join(tmp_path, "output") + # set metric + cfg["prediction"]["metric"] = metric + modifed_config_path = os.path.join(tmp_path, "cfg.yaml") + with open(modifed_config_path, "w") as fp: + yaml.dump(cfg, fp) + + # llm studio root directory. + root_dir = os.path.abspath( + os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../") + ) + cmd = [ + f"{sys.executable}", + os.path.join(root_dir, "train.py"), + "-Y", + f"{modifed_config_path}", + ] + execute_subprocess_async(cmd) + assert os.path.exists(cfg["output_directory"]) + status = get_experiment_status(path=cfg["output_directory"]) + assert status == "finished" + assert os.path.exists(os.path.join(cfg["output_directory"], "charts.db")) + assert os.path.exists(os.path.join(cfg["output_directory"], "checkpoint.pth")) + assert os.path.exists(os.path.join(cfg["output_directory"], "logs.log")) + assert os.path.exists( + os.path.join(cfg["output_directory"], "validation_predictions.csv") + ) diff --git a/tests/integration/test_sequence_to_sequence_modeling_oasst_cfg.yaml b/tests/integration/test_sequence_to_sequence_modeling_oasst_cfg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de9766fe679d228f5769df888e9bd2cdb1e2442a --- /dev/null +++ b/tests/integration/test_sequence_to_sequence_modeling_oasst_cfg.yaml @@ -0,0 +1,99 @@ +architecture: + backbone_dtype: bfloat16 + force_embedding_gradients: false + gradient_checkpointing: true + intermediate_dropout: 0.0 + pretrained: true + pretrained_weights: '' +augmentation: + random_parent_probability: 0.0 + skip_parent_probability: 0.0 + token_mask_probability: 0.0 +dataset: + add_eos_token_to_answer: true + add_eos_token_to_prompt: true + add_eos_token_to_system: true + answer_column: output + chatbot_author: H2O.ai + chatbot_name: h2oGPT + data_sample: 0.01 + data_sample_choice: + - Train + - Validation + limit_chained_samples: false + mask_prompt_labels: true + parent_id_column: None + personalize: false + prompt_column: + - instruction + system_column: None + text_answer_separator: '' + text_prompt_start: '' + text_system_start: '' + train_dataframe: /tmp/train_full.pq + validation_dataframe: None + validation_size: 0.01 + validation_strategy: automatic +environment: + compile_model: false + find_unused_parameters: false + gpus: + - '0' + huggingface_branch: main + mixed_precision: false + number_of_workers: 8 + seed: -1 + trust_remote_code: true +experiment_name: test-sequence-to-sequence-modeling-oasst +llm_backbone: t5-small +logging: + logger: None + neptune_project: test_org/test_project +output_directory: /tmp/output +prediction: + batch_size_inference: 0 + do_sample: false + max_length_inference: 256 + max_time: 0.0 + metric: Perplexity + metric_gpt_model: gpt-3.5-turbo-0301 + metric_gpt_template: general + min_length_inference: 2 + num_beams: 1 + num_history: 4 + repetition_penalty: 1.2 + stop_tokens: '' + temperature: 0.3 + top_k: 0 + top_p: 1.0 +problem_type: text_sequence_to_sequence_modeling +tokenizer: + add_prompt_answer_tokens: false + max_length: 512 + max_length_answer: 256 + max_length_prompt: 256 + padding_quantile: 1.0 + use_fast: true +training: + batch_size: 2 + differential_learning_rate: 1.0e-05 + differential_learning_rate_layers: [] + drop_last_batch: true + epochs: 1 + evaluate_before_training: false + evaluation_epochs: 1.0 + grad_accumulation: 1 + gradient_clip: 0.0 + learning_rate: 0.0001 + lora: true + lora_alpha: 16 + lora_dropout: 0.05 + lora_r: 4 + lora_target_modules: '' + loss_function: TokenAveragedCrossEntropy + optimizer: AdamW + save_best_checkpoint: false + schedule: Cosine + train_validation_data: false + warmup_epochs: 0.0 + weight_decay: 0.0 diff --git a/tests/integration/test_sequence_to_sequence_modeling_oasst_cpu_cfg.yaml b/tests/integration/test_sequence_to_sequence_modeling_oasst_cpu_cfg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a41916c5e479691ba082f4efdfa04a2295dc12f8 --- /dev/null +++ b/tests/integration/test_sequence_to_sequence_modeling_oasst_cpu_cfg.yaml @@ -0,0 +1,99 @@ +architecture: + backbone_dtype: float32 + force_embedding_gradients: false + gradient_checkpointing: false + intermediate_dropout: 0.0 + pretrained: true + pretrained_weights: '' +augmentation: + random_parent_probability: 0.0 + skip_parent_probability: 0.0 + token_mask_probability: 0.0 +dataset: + add_eos_token_to_answer: true + add_eos_token_to_prompt: true + add_eos_token_to_system: true + answer_column: output + chatbot_author: H2O.ai + chatbot_name: h2oGPT + data_sample: 0.01 + data_sample_choice: + - Train + - Validation + limit_chained_samples: false + mask_prompt_labels: true + parent_id_column: None + personalize: false + prompt_column: + - instruction + system_column: None + text_answer_separator: '' + text_prompt_start: '' + text_system_start: '' + train_dataframe: /tmp/train_full.pq + validation_dataframe: None + validation_size: 0.01 + validation_strategy: automatic +environment: + compile_model: false + find_unused_parameters: false + gpus: + - '' + huggingface_branch: main + mixed_precision: false + number_of_workers: 8 + seed: -1 + trust_remote_code: true +experiment_name: test-sequence-to-sequence-modeling-oasst +llm_backbone: t5-small +logging: + logger: None + neptune_project: test_org/test_project +output_directory: /tmp/output +prediction: + batch_size_inference: 0 + do_sample: false + max_length_inference: 16 + mmax_time: 0.0 + metric: Perplexity + metric_gpt_model: gpt-3.5-turbo-0301 + metric_gpt_template: general + min_length_inference: 2 + num_beams: 1 + num_history: 4 + repetition_penalty: 1.2 + stop_tokens: '' + temperature: 0.3 + top_k: 0 + top_p: 1.0 +problem_type: text_sequence_to_sequence_modeling +tokenizer: + add_prompt_answer_tokens: false + max_length: 32 + max_length_answer: 16 + max_length_prompt: 16 + padding_quantile: 1.0 + use_fast: true +training: + batch_size: 2 + differential_learning_rate: 1.0e-05 + differential_learning_rate_layers: [] + drop_last_batch: true + epochs: 1 + evaluate_before_training: false + evaluation_epochs: 1.0 + grad_accumulation: 1 + gradient_clip: 0.0 + learning_rate: 0.0001 + lora: true + lora_alpha: 16 + lora_dropout: 0.05 + lora_r: 4 + lora_target_modules: '' + loss_function: TokenAveragedCrossEntropy + optimizer: AdamW + save_best_checkpoint: false + schedule: Cosine + train_validation_data: false + warmup_epochs: 0.0 + weight_decay: 0.0 diff --git a/tests/python_configs/test_base.py b/tests/python_configs/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..7da0675f3fed2c2fa69815458afd410dc8d8870f --- /dev/null +++ b/tests/python_configs/test_base.py @@ -0,0 +1,35 @@ +from llm_studio.python_configs.text_causal_classification_modeling_config import ( + ConfigProblemBase as CausalClassificationConfigProblemBase, +) +from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigProblemBase as CausalConfigProblemBase, +) +from llm_studio.python_configs.text_sequence_to_sequence_modeling_config import ( + ConfigProblemBase as Seq2SeqConfigProblemBase, +) +from llm_studio.src.utils.config_utils import ( + NON_GENERATION_PROBLEM_TYPES, + convert_cfg_base_to_nested_dictionary, +) + + +def test_from_dict(): + for cfg_class in [ + CausalConfigProblemBase, + Seq2SeqConfigProblemBase, + CausalClassificationConfigProblemBase, + ]: + cfg = cfg_class() + cfg_dict = convert_cfg_base_to_nested_dictionary(cfg) + cfg2 = cfg_class.from_dict(cfg_dict) # type: ignore + cfg_dict_2 = convert_cfg_base_to_nested_dictionary(cfg2) + for k, v in cfg_dict.items(): + if isinstance(v, dict): + for k2, v2 in v.items(): + assert cfg_dict_2[k][k2] == v2 + assert cfg_dict_2[k] == v + + +def test_classification_config_is_in_non_generating_problem_types(): + cfg = CausalClassificationConfigProblemBase() + assert cfg.problem_type in NON_GENERATION_PROBLEM_TYPES diff --git a/tests/src/datasets/test_conversation_chain_handler.py b/tests/src/datasets/test_conversation_chain_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..43f44a65f5672a2febd4fa267649334bc57c1520 --- /dev/null +++ b/tests/src/datasets/test_conversation_chain_handler.py @@ -0,0 +1,331 @@ +import time +from unittest import mock +from unittest.mock import MagicMock + +import pandas as pd +import pytest + +from llm_studio.app_utils.default_datasets import ( + prepare_default_dataset_causal_language_modeling, +) +from llm_studio.src.datasets.conversation_chain_handler import ConversationChainHandler + + +@pytest.fixture +def df(): + return pd.DataFrame( + { + "id": ["id1", "id2", "id3", "id4", "x1", "x2", "x3", "x4"], + "parent_id": ["None", "id1", "id2", "id3", "None", "x1", "x2", "x3"], + "answer": [ + "answer1", + "answer2", + "answer3", + "answer4", + "a1", + "a2", + "a3", + "a4", + ], + "system": [ + "system1", + "system2", + "system3", + "system4", + "s1", + "s2", + "s3", + "s4", + ], + "prompt": [ + "prompt1", + "prompt2", + "prompt3", + "prompt4", + "p1", + "p2", + "p3", + "p4", + ], + } + ) + + +@pytest.fixture +def df_short(): + return pd.DataFrame( + { + "id": ["id1", "id2", "id3", "id4"], + "parent_id": ["None", "id1", "id2", "id3"], + "answer": ["answer1", "answer2", "answer3", "answer4"], + "system": ["system1", "system2", "system3", "system4"], + "prompt": ["prompt1", "prompt2", "prompt3", "prompt4"], + } + ) + + +@pytest.fixture +def cfg(): + cfg = MagicMock() + cfg.dataset.parent_id_column = "parent_id" + cfg.dataset.system_column = "system" + cfg.dataset.prompt_column = "prompt" + cfg.dataset.answer_column = "answer" + cfg.dataset.limit_chained_samples = True + return cfg + + +def test_conversation_chain_handler(cfg, df): + handler = ConversationChainHandler(df, cfg) + + assert len(handler) == 2, len(handler) + + data = handler[0] + assert data == { + "prompts": ["prompt1", "prompt2", "prompt3", "prompt4"], + "answers": ["answer1", "answer2", "answer3", "answer4"], + "systems": ["system1", "system2", "system3", "system4"], + } + + data = handler[1] + assert data == { + "prompts": ["p1", "p2", "p3", "p4"], + "answers": ["a1", "a2", "a3", "a4"], + "systems": ["s1", "s2", "s3", "s4"], + } + + +def test_chained_samples_disabled(df_short, cfg): + cfg.dataset.limit_chained_samples = False + cfg.dataset.parent_id_column = "None" + + handler = ConversationChainHandler(df_short, cfg) + assert len(handler) == 4 + for i in range(4): + assert handler[i] == { + "prompts": [f"prompt{i + 1}"], + "answers": [f"answer{i + 1}"], + "systems": [f"system{i + 1}"], + } + + +def test_incomplete_chained_samples(cfg, df_short): + cfg.dataset.limit_chained_samples = False + + handler = ConversationChainHandler(df_short, cfg) + assert handler.conversation_chain_ids == [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3]] + assert len(handler) == 4 + for i in range(4): + assert handler[i] == { + "prompts": [f"prompt{j + 1}" for j in range(i + 1)], + "answers": [f"answer{j + 1}" for j in range(i + 1)], + "systems": [f"system{j + 1}" for j in range(i + 1)], + } + + +def test_get_conversation_ids(): + # test the get_conversation_ids method - normal case + conv_ids = ConversationChainHandler.get_conversation_ids( + {"id2": "id1", "id3": "id2", "id4": "id3"}, "id4" + ) + assert conv_ids == ["id1", "id2", "id3", "id4"] + + # test the get_conversation_ids method - circular case, should raise ValueError + with pytest.raises(ValueError): + ConversationChainHandler.get_conversation_ids( + {"id1": "id4", "id2": "id1", "id3": "id2", "id4": "id3"}, "id4" + ) + + +@pytest.fixture +def df_with_nan(): + # mapping is + # a1 -> " " -> -inf -> 1234567890 -> "1234567890" -> "x1" -> 1 -> 2 -> 3 -> 4 + # a2 + # a3 + # a4 + return pd.DataFrame( + { + "id": [ + "a1", + " ", + "-inf", + 1234567890, + "1234567890", + "x1", + 1, + 2, + 3.0, + 4.0, + "a2", + "a3", + "a4", + ], + "parent_id": [ + " ", # valid + "-inf", # valid + 1234567890, # valid + "1234567890", # valid, different type + "x1", # valid + 1.0, # valid, needs to map to the int value + 2.0, # valid, needs to map to the int value + 3, # valid, needs to map to the float value + 4, # valid, needs to map to the float value + float("nan"), # should be ignored + "None", # should be ignored + None, # should be ignored + float("inf"), # should be ignored + ], + "answer": [f"answer{i + 1}" for i in range(13)], + "system": [f"system{i + 1}" for i in range(13)], + "prompt": [f"prompt{i + 1}" for i in range(13)], + } + ) + + +def test_conversation_chain_handles_nan_parent_ids(df_with_nan, cfg): + handler = ConversationChainHandler(df_with_nan, cfg) + assert handler.conversation_chain_ids == [ + [9, 8, 7, 6, 5, 4, 3, 2, 1, 0], + [10], + [11], + [12], + ] + assert len(handler) == 4 + assert handler[0] == { + "prompts": [ + "prompt10", + "prompt9", + "prompt8", + "prompt7", + "prompt6", + "prompt5", + "prompt4", + "prompt3", + "prompt2", + "prompt1", + ], + "answers": [ + "answer10", + "answer9", + "answer8", + "answer7", + "answer6", + "answer5", + "answer4", + "answer3", + "answer2", + "answer1", + ], + "systems": [ + "system10", + "system9", + "system8", + "system7", + "system6", + "system5", + "system4", + "system3", + "system2", + "system1", + ], + } + assert handler[1] == { + "prompts": ["prompt11"], + "answers": ["answer11"], + "systems": ["system11"], + } + assert handler[2] == { + "prompts": ["prompt12"], + "answers": ["answer12"], + "systems": ["system12"], + } + assert handler[3] == { + "prompts": ["prompt13"], + "answers": ["answer13"], + "systems": ["system13"], + } + + +def test_conversation_chain_handler_filters_parent_ids(df_with_nan, cfg): + for i in range(len(df_with_nan)): + df_with_nan_1 = df_with_nan.copy() + df_with_nan_1.loc[i, "parent_id"] = "MISSING" + + handler_1 = ConversationChainHandler(df_with_nan_1, cfg) + df_with_nan_2 = df_with_nan.copy() + df_with_nan_2.loc[i, "parent_id"] = "None" + + handler_2 = ConversationChainHandler(df_with_nan_2, cfg) + assert handler_1.conversation_chain_ids == handler_2.conversation_chain_ids + + +def test_oasst_conversation_chain_handler(tmp_path): + """ + Test conversation chain handler on default OASST dataset. + """ + + df = prepare_default_dataset_causal_language_modeling(tmp_path) + cfg = mock.MagicMock() + cfg.dataset.prompt_column = "instruction" + cfg.dataset.answer_column = "output" + cfg.dataset.parent_id_column = "parent_id" + cfg.dataset.system_column = "None" + + for limit_chained_samples in [False, True]: + cfg.dataset.limit_chained_samples = limit_chained_samples + conversation_chain_handler = ConversationChainHandler(df, cfg=cfg) + for conversation_chain_ids in conversation_chain_handler.conversation_chain_ids: + parent_idx = conversation_chain_ids[0] + for idx in conversation_chain_ids[1:]: + df.loc[idx, "parent_id"] = parent_idx + parent_idx = idx + + cfg.dataset.limit_chained_samples = False + conversation_chain_handler = ConversationChainHandler(df, cfg=cfg) + + prompts = [ + "I am building a mechanical keyboard from scratch. I already have the working hardware and am in the process of configuring the firmware. However i find that the qwertz layout gives me wrist pain. I will use the keyboard for writing in english, german and french, and for coding mainly.\nWhat keyboard layout would be best suited for me?", # noqa: E501 + "what is the advantage of BvoFrak and Bépo, over Azerty that is more common in France.", # noqa: E501 + "Is there any information on how long it takes a typical person to adopt the BvoFrak or Bépo layouts? I worry that the time investment may be a concern, or that I may make more mistakes when programming during the transition period.", # noqa: E501 + ] + answers = [ + "Generally, it seems that Neo layout may be what you are looking for.\nHere are some keyboard layouts for various use cases: \nFor French: BvoFrak, Bépo\nFor German: Neo, AdNW\nFor English: DHIATENSOR, Dvorak,\nFor programing: Programmer Dvorak, Evolved, Capewell, QGMLWY, Arensito\nNote that while layout may contribute to wrist pain, other factors like the angle of the keyboard, key spacing (vertical, horizontal, and depth), more may also be contributing factors. Learning a new layout takes time and effort, and may make it more difficult to type on other keyboards.", # noqa: E501 + "BvoFrak and Bépo are alternative French keyboard layouts that aim to improve on the traditional Azerty layout. Some of the claimed advantages include:\n\n- BvoFrak aims to minimize finger movement and optimize typing efficiency. It groups letters by finger positions rather than alphabetic order.\n- Bépo aims to allow for faster typing of common French letters and letter combinations. It also groups letters by finger positions and makes some other rearrangements to commonly used keys.\n- Both BvoFrak and Bépo proponents argue that Azerty is inefficient and optimized for typewriters, not modern keyboards and typing. However, Azerty is deeply entrenched and the alternative layouts have not achieved widespread adoption.\n\nSo the potential advantages are increased typing efficiency and comfort, but the alternative layouts have to overcome the momentum of the established Azerty layout. Whether the claimed advantages hold up in practice for most typists is unclear without much broader adoption of the alternative layouts.", # noqa: E501 + "The time it takes to learn a new keyboard layout can vary depending on several factors, including the individual's previous typing experience and the amount of time they can devote to practice. However, on average, it can take between 2-4 weeks to become proficient in a new keyboard layout.\n\nDuring the transition period, it's natural to make more mistakes when typing, especially when programming or using special characters. It's important to be patient with yourself and allow yourself time to adjust to the new layout. With practice, the new layout will become more natural and comfortable to use, and the speed and accuracy of your typing will improve.\n\nUltimately, the investment of time and effort required to learn a new keyboard layout is a personal decision, and it's important to consider the potential benefits and drawbacks before making a decision. While there may be some initial challenges, many people find that the improved ergonomics and efficiency of a new keyboard layout make the investment well worth it in the long run.", # noqa: E501 + ] + systems = ["", "", ""] + sample = conversation_chain_handler[ + 10719 + ] # 10719 == sample with 3 round conversation + assert sample["prompts"] == prompts + assert sample["answers"] == answers + assert sample["systems"] == systems + + +def test_oasst_conversation_chain_handler_is_fast(tmp_path): + df_oasst = prepare_default_dataset_causal_language_modeling(tmp_path) + cfg = mock.MagicMock() + cfg.dataset.prompt_column = "instruction" + cfg.dataset.answer_column = "output" + cfg.dataset.parent_id_column = "parent_id" + cfg.dataset.system_column = "None" + cfg.dataset.limit_chained_samples = True + dfs = [] + for i in range(50): + df = df_oasst.copy() + df["parent_id"] = df["parent_id"].apply( + lambda x: x + str(i) if x is not None else x + ) + df["id"] = df["id"].apply(lambda x: x + str(i)) + dfs.append(df) + + df = pd.concat(dfs).reset_index(drop=True) + + assert len(df) > 400_000 + + t_0 = time.time() + conversation_chain_handler = ConversationChainHandler(df, cfg) + _ = [conversation for conversation in conversation_chain_handler] + t_1 = time.time() + assert t_1 - t_0 < 10 # shouldn't take longer than ~5 seconds diff --git a/tests/src/datasets/test_text_causal_language_modeling_ds.py b/tests/src/datasets/test_text_causal_language_modeling_ds.py new file mode 100644 index 0000000000000000000000000000000000000000..bd2986df80adf1cb1511d9acd282f09a85eb96ee --- /dev/null +++ b/tests/src/datasets/test_text_causal_language_modeling_ds.py @@ -0,0 +1,288 @@ +from unittest import mock +from unittest.mock import MagicMock, patch + +import numpy as np +import pandas as pd +import pytest + +from llm_studio.app_utils.default_datasets import ( + prepare_default_dataset_causal_language_modeling, +) +from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigNLPCausalLMDataset, + ConfigNLPCausalLMTokenizer, + ConfigProblemBase, +) +from llm_studio.src.datasets.text_causal_language_modeling_ds import CustomDataset + + +def test_prepare_default_dataset(tmp_path): + df = prepare_default_dataset_causal_language_modeling(tmp_path) + assert isinstance(df, pd.DataFrame) + assert set(df.keys()) == set( + ["instruction", "output", "id", "parent_id", "lang", "rank"] + ) + assert df.shape == (13026, 6) + + +def test_clean_output(): + output = { + "predicted_text": np.array( + [ + "This is a test", + "This is a test This is a test", + "This is a test This is a test", + "This is a test This is a test", + " This is a test", + "This is a test ", + ] + ) + } + + cfg = mock.MagicMock() + cfg.tokenizer._stop_words = ["", "", ""] + + predicted_text_clean = CustomDataset.clean_output(output=output, cfg=cfg)[ + "predicted_text" + ] + assert predicted_text_clean == [ + "This is a test", + "This is a test", + "This is a test", + "This is a test", + "", + "This is a test", + ] + + +def test_sanity_check_raises_error(): + mock_config = MagicMock() + mock_config.dataset.parent_id_column = "parent_id" + mock_config.dataset.answer_column = "answer" + + df_1 = pd.DataFrame( + { + "id": [1, 2, 3, 4], + "parent_id": [2, None, 4, 1], + "answer": ["a", "b", "c", "d"], + "other_data": ["a", "b", "c", "d"], + } + ) + CustomDataset.sanity_check(df_1, mock_config) + + df_2 = pd.DataFrame( + { + "id": [1, 2, 3, 4], + "parent_id": [None, None, None, None], + "answer": ["a", "b", "c", "d"], + "other_data": ["a", "b", "c", "d"], + } + ) + CustomDataset.sanity_check(df_2, mock_config) + + invalid_df_1 = pd.DataFrame( + { + "id": [1, 2, 3, 4], + "parent_id": [1, 2, 3, 4], + "answer": ["a", "b", "c", "d"], + "other_data": ["a", "b", "c", "d"], + } + ) + with pytest.raises( + AssertionError, match="Parent id column is the same as id column for some rows" + ): + CustomDataset.sanity_check(invalid_df_1, mock_config) + + invalid_df_2 = pd.DataFrame( + { + "id": [1, 2, 3, 4], + "parent_id": [2, 3, 4, 1], + "other_data": ["a", "b", "c", "d"], + } + ) + with pytest.raises( + AssertionError, + match="Did not find any conversation start. " + "Please ensure that some parent ids are empty.", + ): + CustomDataset.sanity_check(invalid_df_2, mock_config) + + +@pytest.fixture +def mock_auto_tokenizer(): + # from + # https://github.com/deepset-ai/haystack/blob/b5aef24a7ebac55cb4ba492baf81a85598700b94/test/conftest.py#L908 + with patch( + "transformers.AutoTokenizer.from_pretrained", autospec=True + ) as mock_from_pretrained: + yield mock_from_pretrained + + +def test_init(mock_auto_tokenizer): + df = pd.DataFrame( + { + "col_A": [1, 2, 3], + "col_B": [4, 5, 6], + } + ) + cfg = mock.MagicMock() + cfg.dataset.prompt_column = "col_A" + cfg.dataset.answer_column = "col_B" + cfg.dataset.parent_id_column = "None" + cfg.dataset.system_column = "None" + + cfg.dataset.text_system_start = "" + cfg.dataset.text_prompt_start = "" + cfg.dataset.text_answer_separator = "" + + dataset = CustomDataset(df, cfg) + + assert dataset.df.equals(df) + assert dataset.mode == "train" + + +def test_getitem(): + df = pd.DataFrame( + { + "prompt": ["prompt 1", "prompt 2", "prompt 3"], + "answer": ["answer 1", "answer 2", "answer 3"], + "parent_id": [None, 0, 1], + "system": ["system 1", "system 2", "system 3"], + "id": [0, 1, 2], + } + ) + + cfg = ConfigProblemBase( + dataset=ConfigNLPCausalLMDataset( + prompt_column=("prompt",), + answer_column="answer", + parent_id_column="parent_id", + system_column="system", + text_system_start="System:", + text_prompt_start="Prompt:", + text_answer_separator="Answer:", + add_eos_token_to_answer=True, + limit_chained_samples=True, + ), + tokenizer=ConfigNLPCausalLMTokenizer(max_length=513), + ) + + cfg.llm_backbone = "EleutherAI/pythia-2.8b-deduped" + + dataset = CustomDataset(df, cfg) + assert len(dataset) == 1 + + result = dataset[0] + assert isinstance(result, dict) + assert set(result.keys()) == { + "labels", + "input_ids", + "attention_mask", + "prompt_input_ids", + "prompt_attention_mask", + "answer_input_ids", + "answer_attention_mask", + } + + assert ( + dataset.tokenizer.decode(result["input_ids"], skip_special_tokens=True) + == "System:system 1" + "Prompt:prompt 1" + "Answer:answer 1" + "Prompt:prompt 2" + "Answer:answer 2" + "Prompt:prompt 3" + "Answer:answer 3" + ) + + assert ( + dataset.tokenizer.decode(result["prompt_input_ids"], skip_special_tokens=True) + == "System:system 1" + "Prompt:prompt 1" + "Answer:answer 1" + "Prompt:prompt 2" + "Answer:answer 2" + "Prompt:prompt 3" + "Answer:" + ) + + assert ( + dataset.tokenizer.decode(result["input_ids"], skip_special_tokens=False) + == "<|endoftext|>" * 475 + "System:system 1" + "<|endoftext|>" + "Prompt:prompt 1" + "<|endoftext|>" + "Answer:answer 1" + "<|endoftext|>" + "Prompt:prompt 2" + "<|endoftext|>" + "Answer:answer 2" + "<|endoftext|>" + "Prompt:prompt 3" + "<|endoftext|>" + "Answer:answer 3" + "<|endoftext|>" + ) + + assert result["input_ids"].shape == (513,) + assert result["prompt_input_ids"].shape == (513,) + + +def test_getitem_no_chaining(): + df = pd.DataFrame( + { + "prompt": ["prompt 1", "prompt 2", "prompt 3"], + "answer": ["answer 1", "answer 2", "answer 3"], + "parent_id": [None, 0, 1], + "system": ["system 1", "system 2", "system 3"], + "id": [0, 1, 2], + } + ) + + cfg = ConfigProblemBase( + dataset=ConfigNLPCausalLMDataset( + prompt_column=("prompt",), + answer_column="answer", + parent_id_column="None", + system_column="system", + text_system_start="System:", + text_prompt_start="Prompt:", + text_answer_separator="Answer:", + add_eos_token_to_answer=True, + ), + tokenizer=ConfigNLPCausalLMTokenizer(max_length=513), + ) + + cfg.llm_backbone = "EleutherAI/pythia-2.8b-deduped" + + dataset = CustomDataset(df, cfg) + assert len(dataset) == 3 + + for i in range(3): + result = dataset[i] + assert isinstance(result, dict) + assert set(result.keys()) == { + "labels", + "input_ids", + "attention_mask", + "prompt_input_ids", + "prompt_attention_mask", + "answer_input_ids", + "answer_attention_mask", + } + + assert ( + dataset.tokenizer.decode(result["input_ids"], skip_special_tokens=True) + == f"System:system {i+1}" + f"Prompt:prompt {i+1}" + f"Answer:answer {i+1}" + ) + + assert ( + dataset.tokenizer.decode( + result["prompt_input_ids"], skip_special_tokens=True + ) + == f"System:system {i+1}" + f"Prompt:prompt {i+1}" + "Answer:" + ) diff --git a/tests/src/datasets/test_text_dpo_modeling_ds.py b/tests/src/datasets/test_text_dpo_modeling_ds.py new file mode 100644 index 0000000000000000000000000000000000000000..ab8ac91eb08673406d87410a4a3d0ddfd096e16d --- /dev/null +++ b/tests/src/datasets/test_text_dpo_modeling_ds.py @@ -0,0 +1,290 @@ +import numpy as np +import pandas as pd +import pytest +import torch +from tqdm import tqdm + +from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigNLPCausalLMTokenizer, +) +from llm_studio.python_configs.text_dpo_modeling_config import ( + ConfigDPODataset, + ConfigProblemBase, +) +from llm_studio.src.datasets.text_dpo_modeling_ds import CustomDataset + + +@pytest.fixture +def df(): + return pd.DataFrame( + { + "prompt_column": [f"prompt {i}" for i in range(200)], + "answer_column": [f"chosen_response {i}" for i in range(200)], + "rejected_answer_column": [f"rejected_response {i}" for i in range(200)], + } + ) + + +@pytest.fixture +def df_with_conversation_chain_ids(): + """ + Create a dataframe with conversation chain ids, e.g.: + prompt_column answer_column rejected_answer_column parent_id_column id + 0 prompt 1 response 1 response 1 None 1 + 1 prompt 2 response 2 response 2 1 2 + 2 prompt 3 response 3 response 3 2 3 + 3 prompt 4 response 4 response 4 3 4 + 4 prompt 5 chosen_response 5 rejected_response 5 4 5 + 5 prompt 6 response 6 response 6 None 6 + """ + ids = [str(i + 1) for i in range(200)] + + parent_ids = np.array(ids, dtype=object).reshape(-1, 5) + parent_ids[:, -1] = "None" + parent_ids = np.roll(parent_ids, 1, 1).reshape(-1) + + # ids: [0, 1, 2, 3, 4 ] + # parent_ids: [None, 0, 1, 2, 3] + # conversation: 0 -> 1 -> 2 -> 3 -> 4 + chosen_responses = [ + f"chosen_response {idx}" if int(idx) % 5 == 0 else f"response {idx}" + for idx in ids + ] + rejected_responses = [ + f"rejected_response {idx}" if int(idx) % 5 == 0 else f"response {idx}" + for idx in ids + ] + return pd.DataFrame( + { + "prompt_column": [f"prompt {idx}" for idx in ids], + "answer_column": chosen_responses, + "rejected_answer_column": rejected_responses, + "parent_id_column": parent_ids, + "id": ids, + } + ) + + +def test_dataset_conversation_chain_is_correct(df_with_conversation_chain_ids): + cfg = ConfigProblemBase( + dataset=ConfigDPODataset( + prompt_column=("prompt_column",), + answer_column="answer_column", + rejected_answer_column="rejected_answer_column", + parent_id_column="parent_id_column", + ) + ) + dataset = CustomDataset(df_with_conversation_chain_ids, cfg, mode="train") + + # Check for right formatting, e.g.: + # dataset.conversation_chain_handler_chosen[0] == + # { + # "prompts": ["prompt 1", "prompt 2", "prompt 3", "prompt 4", "prompt 5"], + # "answers": [ + # "response 1", + # "response 2", + # "response 3", + # "response 4", + # "chosen_response 5", + # ], + # "systems": ["", "", "", "", ""], + # } + + for idx in range(200 // 5): + for name, conversation_chain_handler in zip( + ["chosen", "rejected"], + [ + dataset.conversation_chain_handler, + dataset.conversation_chain_handler_rejected, + ], + ): + input_text_dict = conversation_chain_handler[idx] + expected = { + "prompts": [f"prompt {i + 1}" for i in range(idx * 5, (idx + 1) * 5)], + "answers": [ + f"response {i + 1}" for i in range(idx * 5, (idx + 1) * 5 - 1) + ] + + [f"{name}_response {idx * 5 + 5}"], + "systems": [""] * 5, + } + + for key in expected: + assert input_text_dict[key] == expected[key], ( + input_text_dict[key], + expected[key], + name, + ) + + +def test_dataset_label_is_correct(df_with_conversation_chain_ids): + cfg = ConfigProblemBase( + dataset=ConfigDPODataset( + prompt_column=("prompt_column",), + answer_column="answer_column", + rejected_answer_column="rejected_answer_column", + parent_id_column="parent_id_column", + ) + ) + dataset = CustomDataset(df_with_conversation_chain_ids, cfg, mode="train") + + for idx, item in enumerate(dataset): + sample = dataset[idx] + chosen_response = dataset.tokenizer.decode( + sample["chosen_labels"][sample["chosen_labels"] != -100], + skip_special_tokens=True, + ) + rejected_response = dataset.tokenizer.decode( + sample["rejected_labels"][sample["rejected_labels"] != -100], + skip_special_tokens=True, + ) + prompt = dataset.tokenizer.decode( + sample["prompt_input_ids"][sample["prompt_input_ids"] != 0], + skip_special_tokens=True, + ) + + assert ( + prompt == f"<|prompt|>prompt {idx * 5 + 1} " + f"<|answer|> response {idx * 5 + 1} " + f"<|prompt|>prompt {idx * 5 + 2} " + f"<|answer|> response {idx * 5 + 2} " + f"<|prompt|>prompt {idx * 5 + 3} " + f"<|answer|> response {idx * 5 + 3} " + f"<|prompt|>prompt {idx * 5 + 4} " + f"<|answer|> response {idx * 5 + 4} " + f"<|prompt|>prompt {idx * 5 + 5} " + "<|answer|>" + ) + assert chosen_response == f"chosen_response {idx * 5 + 5}" + assert rejected_response == f"rejected_response {idx * 5 + 5}" + + +def test_dataloader_has_correct_keys(df): + cfg = ConfigProblemBase( + dataset=ConfigDPODataset( + prompt_column=("prompt_column",), + answer_column="answer_column", + rejected_answer_column="rejected_answer_column", + parent_id_column="None", + ) + ) + + dataset = CustomDataset(df, cfg, mode="train") + dataloader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True) + + for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)): + for key in batch: + if idx != len(dataloader) - 1: + assert batch[key].size(0) == 16, ( + key, + batch[key].shape, + ) + + keys = [ + "chosen_input_ids", + "chosen_attention_mask", + "chosen_labels", + "rejected_input_ids", + "rejected_attention_mask", + "rejected_labels", + "prompt_input_ids", + "prompt_attention_mask", + ] + assert set(batch.keys()) - set(keys) == set() + assert set(keys) - set(batch.keys()) == set() + + +def test_empy_answer_dataset_throws_no_error(df): + cfg = ConfigProblemBase( + dataset=ConfigDPODataset( + prompt_column=("prompt_column",), + answer_column="answer_column", + rejected_answer_column="rejected_answer_column", + add_eos_token_to_answer=False, + add_eos_token_to_prompt=False, + add_eos_token_to_system=False, + ), + ) + for column in ["prompt_column", "answer_column", "rejected_answer_column"]: + values = df[column].values + df[column] = "" + dataset = CustomDataset(df, cfg, mode="train") + [dataset[i] for i in range(len(dataset))] + df[column] = values + + +@pytest.fixture +def df_single_prompt(): + prompt = """when ordering your sandstones, you select which colour scale you would want. + it could be e.g. a 100% from grey/sand mix, or 80% fra beige/yellow mixed with 20% from black/brown. + This is all lower case. Can you fix that?""" + system = """You are an AI assistant. User will you give you a task. Your goal is to complete the task as faithfully as you can. +While performing the task think step-by-step and justify your steps.""" + answer = """When ordering your sandstones, you select which color scale you would want. It could be, for example, a 100% from grey/sand mix, or 80% from beige/yellow mixed with 20% from black/brown. +Step 1: Capitalize the first letter of the sentence. +Step 2: Correct the spelling of "color" (assuming American English usage). +Step 3: Replace ", e.g." with "for example" to clarify the sentence. +Step 4: Capitalize "a" in "100% from a grey/sand mix" +Step 5: Ensure the proper usage of words and punctuation throughout the revised sentence.""" + return pd.DataFrame( + { + "prompt": [prompt], + "system": [system], + "answer": [answer], + "rejected_answer": ["I cannot do that."], + } + ) + + +def generate_causal_lm_model_input_ids(df): + from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigNLPCausalLMDataset, + ) + from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigProblemBase as ConfigCausalLMProblemBase, + ) + from llm_studio.src.datasets.text_causal_language_modeling_ds import ( + CustomDataset as CausalLMCustomDataset, + ) + + cfg = ConfigCausalLMProblemBase( + llm_backbone="h2oai/h2ogpt-4096-llama2-7b", + dataset=ConfigNLPCausalLMDataset( + system_column="system", + prompt_column=("prompt",), + answer_column="answer", + ), + tokenizer=ConfigNLPCausalLMTokenizer( + max_length_prompt=256, max_length_answer=256, max_length=512 + ), + ) + dataset = CausalLMCustomDataset(df, cfg, mode="train") + return dataset[0] + + +def test_dataset_prompt_ids_are_the_same_as_for_causal_language_modeling( + df_single_prompt, +): + """ + DPO model should generate the same prompts as causal language modeling + """ + generated_text_causal_lm = generate_causal_lm_model_input_ids(df_single_prompt) + + cfg = ConfigProblemBase( + llm_backbone="h2oai/h2ogpt-4096-llama2-7b", + dataset=ConfigDPODataset( + system_column="system", + prompt_column=("prompt",), + answer_column="answer", + rejected_answer_column="rejected_answer", + ), + tokenizer=ConfigNLPCausalLMTokenizer( + max_length_prompt=256, max_length_answer=256, max_length=512 + ), + ) + dataset = CustomDataset(df_single_prompt, cfg, mode="train") + generated_text = dataset[0] + + for key in ["prompt_input_ids", "prompt_attention_mask"]: + assert torch.all( + generated_text_causal_lm[key] == generated_text[key] + ), f"{key} is not the same" diff --git a/tests/src/models/test_dpo_modeling_model.py b/tests/src/models/test_dpo_modeling_model.py new file mode 100644 index 0000000000000000000000000000000000000000..f358103bd0cc05dee87e614da29123d2c0a50925 --- /dev/null +++ b/tests/src/models/test_dpo_modeling_model.py @@ -0,0 +1,265 @@ +import random +from contextlib import contextmanager +from dataclasses import dataclass +from unittest.mock import patch + +import pandas as pd +import pytest +import torch +import torch.nn as nn + +from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigNLPCausalLMPrediction, + ConfigNLPCausalLMTokenizer, +) +from llm_studio.python_configs.text_dpo_modeling_config import ( + ConfigDPODataset, + ConfigProblemBase, +) +from llm_studio.src.datasets.text_dpo_modeling_ds import CustomDataset +from llm_studio.src.models.text_dpo_modeling_model import Model +from llm_studio.src.utils.data_utils import batch_padding +from train import run_eval + + +@pytest.fixture +def df(): + prompt = """when ordering your sandstones, you select which colour scale you would want. + it could be e.g. a 100% from grey/sand mix, or 80% fra beige/yellow mixed with 20% from black/brown. + This is all lower case. Can you fix that?""" + system = """You are an AI assistant. User will you give you a task. Your goal is to complete the task as faithfully as you can. +While performing the task think step-by-step and justify your steps.""" + answer = """When ordering your sandstones, you select which color scale you would want. It could be, for example, a 100% from grey/sand mix, or 80% from beige/yellow mixed with 20% from black/brown. + +Step 1: Capitalize the first letter of the sentence. + +Step 2: Correct the spelling of "color" (assuming American English usage). + +Step 3: Replace ", e.g." with "for example" to clarify the sentence. + +Step 4: Capitalize "a" in "100% from a grey/sand mix" + +Step 5: Ensure the proper usage of words and punctuation throughout the revised sentence.""" + return pd.DataFrame( + { + "prompt": [prompt], + "system": [system], + "answer": [answer], + "rejected_answer": ["I cannot do that."], + } + ) + + +def generate_causal_lm_model_text(df): + from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigNLPCausalLMDataset, + ) + from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigProblemBase as ConfigCausalLMProblemBase, + ) + from llm_studio.src.datasets.text_causal_language_modeling_ds import ( + CustomDataset as CausalLMCustomDataset, + ) + from llm_studio.src.models.text_causal_language_modeling_model import ( + Model as CausalLMModel, + ) + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + cfg = ConfigCausalLMProblemBase( + llm_backbone="h2oai/llama2-0b-unit-test", + dataset=ConfigNLPCausalLMDataset( + system_column="system", + prompt_column=("prompt",), + answer_column="answer_column", + ), + tokenizer=ConfigNLPCausalLMTokenizer( + max_length_prompt=256, max_length_answer=256, max_length=512 + ), + ) + cfg.architecture.backbone_dtype = "float32" + + dataset = CausalLMCustomDataset(df, cfg, mode="train") + model = CausalLMModel(cfg).to(device).eval() + dataloader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True) + + batch = next(iter(dataloader)) + batch = {k: v.to(device) for k, v in batch.items()} + batch_padding( + cfg, + batch, + mask_key="prompt_attention_mask", + pad_keys=[ + "prompt_input_ids", + "prompt_attention_mask", + "prompt_special_tokens_mask", + ], + ) + with torch.no_grad(): + generated_text = dataset.tokenizer.decode(model.generate(batch, cfg)[0]) + + return generated_text + + +def test_generation_is_the_same_as_for_causal_language_modeling(df): + """ + DPO model should generate the same output text as causal language modeling + """ + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + generated_text_causal_lm = generate_causal_lm_model_text(df) + + cfg = ConfigProblemBase( + llm_backbone="h2oai/llama2-0b-unit-test", + dataset=ConfigDPODataset( + system_column="system", + prompt_column=("prompt",), + answer_column="answer_column", + rejected_answer_column="rejected_answer", + ), + tokenizer=ConfigNLPCausalLMTokenizer( + max_length_prompt=256, max_length_answer=256, max_length=512 + ), + ) + cfg.architecture.backbone_dtype = "float32" + + dataset = CustomDataset(df, cfg, mode="train") + model = Model(cfg).eval().to(device) + dataloader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True) + + batch = next(iter(dataloader)) + batch = {k: v.to(device) for k, v in batch.items()} + batch_padding( + cfg, + batch, + mask_key="prompt_attention_mask", + pad_keys=[ + "prompt_input_ids", + "prompt_attention_mask", + "prompt_special_tokens_mask", + ], + ) + with torch.no_grad(): + generated_text = dataset.tokenizer.decode(model.generate(batch, cfg)[0]) + + assert ( + generated_text == generated_text_causal_lm + ), "Generated text is not the same as from causal LM model:" "{}\n{}".format( + generated_text, generated_text_causal_lm + ) + + +@pytest.fixture +def df2(): + # create a list of all lowercase letters + alphabet = [chr(i) for i in range(97, 123)] + + # create random strings from the alphabet + prompts = ["".join(random.choice(alphabet) for _ in range(10)) for _ in range(10)] + systems = ["".join(random.choice(alphabet) for _ in range(10)) for _ in range(10)] + answers = ["".join(random.choice(alphabet) for _ in range(10)) for _ in range(10)] + rejected_answers = [ + "".join(random.choice(alphabet) for _ in range(10)) for _ in range(10) + ] + + return pd.DataFrame( + { + "prompt": prompts, + "system": systems, + "answer": answers, + "rejected_answer": rejected_answers, + } + ) + + +def test_dpo_perplexity_metric(tmp_path, df2): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + cfg = ConfigProblemBase( + output_directory=str(tmp_path), + llm_backbone="MaxJeblick/llama2-0b-unit-test", + dataset=ConfigDPODataset( + system_column="system", + prompt_column=("prompt",), + answer_column="answer_column", + rejected_answer_column="answer_column", + ), + tokenizer=ConfigNLPCausalLMTokenizer( + max_length_prompt=256, max_length_answer=256, max_length=512 + ), + prediction=ConfigNLPCausalLMPrediction(metric="Perplexity"), + ) + cfg.architecture.gradient_checkpointing = False + cfg.environment._device = device # type: ignore + + # bfloat16 is not supported on older GPUs + cfg.environment.mixed_precision_dtype = "float16" + + dataset = CustomDataset(df2, cfg, mode="train") + model = Model(cfg).eval().to(device) + vocab_size = model.backbone.config.vocab_size + + class MockBackbone(nn.Module): + """ + Chosen and rejected logits are the same + Chosen reference and rejected reference logits are the same, + but different from chosen and rejected logits. + As answer_column and rejected_answer_column are the same, + + -> perplexity and rejection_perplexity should be the same + -> chosen_rewards and rejected_rewards should be the same + -> chosen_cross_entropy and rejected_cross_entropy should be the same + -> reward margin should be 0 + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.seed = 0 + + def disable_adapter(self): + # mock lora adapter + @contextmanager + def flip_seed(): + self.seed = 1 + yield None + self.seed = 0 + + return flip_seed() + + def forward(self, input_ids, attention_mask): + @dataclass + class Result: + bs, seq_len = input_ids.shape + torch.manual_seed(self.seed) + logits = torch.rand((bs, seq_len, vocab_size)).to(input_ids.device) + + result = Result() + return result + + class ListLogger: + def __init__(self): + self.logs = {} + + def log(self, subset: str, name: str, value: str | float, step: float = None): + self.logs[name] = self.logs.get(name, []) + [value] + + with patch.object(target=model, attribute="backbone", new_callable=MockBackbone): + dataloader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True) + + # mock cfg.logging._logger.log + cfg.logging._logger = ListLogger() + + run_eval( + cfg, + model=model, + val_dataloader=dataloader, + val_df=df2, + mode="validation", + ) + + log_dict = cfg.logging._logger.logs + assert log_dict["Perplexity"] == log_dict["rejected_perplexity"] + assert log_dict["chosen_rewards"] == log_dict["rejected_rewards"] + assert ( + log_dict["chosen_cross_entropy_loss"] == log_dict["rejected_cross_entropy_loss"] + ) + assert log_dict["reward_margin"] == [0] * len(log_dict["reward_margin"]) diff --git a/tests/src/models/test_text_causal_language_modeling_model.py b/tests/src/models/test_text_causal_language_modeling_model.py new file mode 100644 index 0000000000000000000000000000000000000000..0d0b9990657c55fbcd5cec56d2fb1ae3151b4c6d --- /dev/null +++ b/tests/src/models/test_text_causal_language_modeling_model.py @@ -0,0 +1,83 @@ +import torch + +from llm_studio.python_configs.text_causal_language_modeling_config import ( + ConfigProblemBase, +) +from llm_studio.src.models.text_causal_language_modeling_model import Model +from llm_studio.src.utils.modeling_utils import TokenStoppingCriteria, activate_neftune + + +def test_token_stopping_criteria(): + token_stopping_criteria = TokenStoppingCriteria( + stop_word_ids=torch.tensor([0, 1, 2, 8]), prompt_input_ids_len=4 + ) + + input_ids = torch.tensor( + [ + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], + [2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12], + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14], + ] + ).long() + + # prompt input len is 4, so generated ids of last sample of the batch are + # [9, 10, 11, 12, 13, 14], do not trigger stopping criteria + assert not token_stopping_criteria(input_ids=input_ids, scores=None) + + token_stopping_criteria = TokenStoppingCriteria( + stop_word_ids=torch.tensor([6]), prompt_input_ids_len=0 + ) + + # first item reads [ 0, 1, 2, 3, 4, 5], so do not trigger stopping criteria + assert not token_stopping_criteria(input_ids=input_ids[:, :6], scores=None) + assert token_stopping_criteria(input_ids=input_ids[:, :7], scores=None) + + # Test stopping criteria with compound tokens + token_stopping_criteria = TokenStoppingCriteria( + stop_word_ids=torch.tensor([[6, 7]]), prompt_input_ids_len=0 + ) + + assert not token_stopping_criteria(input_ids=input_ids[:, :6], scores=None) + assert not token_stopping_criteria(input_ids=input_ids[:, :7], scores=None) + assert token_stopping_criteria(input_ids=input_ids[:, :8], scores=None) + + # Test stopping criteria with stop word ids being longer than generated text + token_stopping_criteria = TokenStoppingCriteria( + stop_word_ids=torch.tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]]), + prompt_input_ids_len=0, + ) + + assert not token_stopping_criteria(input_ids=input_ids, scores=None) + + +def test_neftune_is_disabled_in_inference(): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + cfg = ConfigProblemBase(llm_backbone="h2oai/llama2-0b-unit-test") + cfg.architecture.backbone_dtype = "float32" + model = Model(cfg).eval().to(device) + + input_batch = { + "input_ids": torch.randint( + 0, + 1000, + (1, 10), + ).to(device), + "attention_mask": torch.ones(1, 10).to(device), + } + + with torch.no_grad(): + outputs = model.backbone(**input_batch) + + activate_neftune(model, neftune_noise_alpha=10) + assert model.backbone.get_input_embeddings().neftune_noise_alpha == 10 + + with torch.no_grad(): + outputs_after_neftune = model.backbone(**input_batch) + + assert torch.allclose(outputs["logits"], outputs_after_neftune["logits"]) + + # state dict does not contain neftune noise + assert [key for key in model.state_dict() if "neftune" in key] == [] diff --git a/tests/src/test_data/cfg.yaml b/tests/src/test_data/cfg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64ad3907c47f9b14fbb6cb924ef3c378c39b03f7 --- /dev/null +++ b/tests/src/test_data/cfg.yaml @@ -0,0 +1,76 @@ +architecture: + backbone_dtype: float16 + force_embedding_gradients: false + gradient_checkpointing: false + intermediate_dropout: 0.0 + config_item_that_is_not_used: 0 +augmentation: + token_mask_probability: 0.0 +dataset: + add_eos_token_to_answer: true + add_eos_token_to_prompt: true + answer_column: output + data_sample: 0.1 + data_sample_choice: + - Train + - Validation + mask_prompt_labels: false + prompt_column: + - instruction + text_answer_separator: \n + text_prompt_start: '' + train_dataframe: data/user/train/train.csv + validation_dataframe: None + validation_size: 0.01 + validation_strategy: automatic +environment: + compile_model: false + find_unused_parameters: false + gpus: + - '0' + mixed_precision: true + number_of_workers: 8 + seed: -1 +experiment_name: test +llm_backbone: EleutherAI/pythia-12b-deduped +logging: + logger: None + neptune_project: '' + number_of_texts: 10 +output_directory: output/user/test/ +prediction: + batch_size_inference: 0 + do_sample: false + max_length_inference: 256 + max_time: 0.0 + metric: GPT3.5 + min_length_inference: 2 + num_beams: 2 + repetition_penalty: 1.2 + stop_tokens: "" + temperature: 0.3 +problem_type: text_causal_language_modeling +tokenizer: + max_length: 144 + max_length_answer: 256 + max_length_prompt: 256 + padding_quantile: 1.0 +training: + batch_size: 3 + epochs: 0 + evaluate_before_training: true + evaluation_epochs: 1.0 + grad_accumulation: 1 + gradient_clip: 0.0 + learning_rate: 0.0001 + lora: true + lora_alpha: 16 + lora_dropout: 0.05 + lora_r: 4 + lora_target_modules: '' + optimizer: AdamW + save_best_checkpoint: false + schedule: Cosine + train_validation_data: false + warmup_epochs: 0.0 + weight_decay: 0.0 diff --git a/tests/src/utils/test_data_utils.py b/tests/src/utils/test_data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..262bd60faadae45f97706651ca22bf19694d53a1 --- /dev/null +++ b/tests/src/utils/test_data_utils.py @@ -0,0 +1,116 @@ +import os +import pathlib +import random +import unittest +from unittest.mock import MagicMock + +import pandas as pd +import pytest + +from llm_studio.app_utils.default_datasets import ( + prepare_default_dataset_causal_language_modeling, +) +from llm_studio.src.datasets.conversation_chain_handler import ConversationChainHandler +from llm_studio.src.utils.data_utils import load_train_valid_data + + +@pytest.fixture +def cfg_mock(): + cfg = MagicMock() + cfg.dataset.train_dataframe = "/path/to/train/data" + cfg.dataset.validation_dataframe = "/path/to/validation/data" + + cfg.dataset.system_column = "None" + cfg.dataset.prompt_column = "prompt" + cfg.dataset.answer_column = "answer" + + cfg.dataset.validation_size = 0.2 + return cfg + + +@pytest.fixture +def read_dataframe_drop_missing_labels_mock(monkeypatch): + data = { + "prompt": [f"Prompt{i}" for i in range(100)], + "answer": [f"Answer{i}" for i in range(100)], + "id": list(range(100)), + } + df = pd.DataFrame(data) + mock = MagicMock(return_value=df) + monkeypatch.setattr( + "llm_studio.src.utils.data_utils.read_dataframe_drop_missing_labels", mock + ) + return mock + + +numbers = list(range(100)) +random.shuffle( + numbers, +) +groups = [numbers[n::13] for n in range(13)] + + +@pytest.fixture +def conversation_chain_ids_mock(monkeypatch): + def mocked_init(self, *args, **kwargs): + self.conversation_chain_ids = groups + + with unittest.mock.patch.object( + ConversationChainHandler, "__init__", new=mocked_init + ): + yield + + +def test_get_data_custom_validation_strategy( + cfg_mock, read_dataframe_drop_missing_labels_mock +): + cfg_mock.dataset.validation_strategy = "custom" + train_df, val_df = load_train_valid_data(cfg_mock) + assert len(train_df), len(val_df) == 100 + + +def test_get_data_automatic_split( + cfg_mock, read_dataframe_drop_missing_labels_mock, conversation_chain_ids_mock +): + cfg_mock.dataset.validation_strategy = "automatic" + train_df, val_df = load_train_valid_data(cfg_mock) + train_ids = set(train_df["id"].tolist()) + val_ids = set(val_df["id"].tolist()) + + assert len(train_ids.intersection(val_ids)) == 0 + assert len(train_ids) + len(val_ids) == 100 + + shared_groups = [ + i for i in groups if not train_ids.isdisjoint(i) and not val_ids.isdisjoint(i) + ] + assert len(shared_groups) == 0 + + +def test_oasst_data_automatic_split(tmp_path: pathlib.Path): + prepare_default_dataset_causal_language_modeling(tmp_path) + assert len(os.listdir(tmp_path)) > 0, tmp_path + cfg_mock = MagicMock() + for file in os.listdir(tmp_path): + if file.endswith(".pq"): + cfg_mock.dataset.train_dataframe = os.path.join(tmp_path, file) + + cfg_mock.dataset.system_column = "None" + cfg_mock.dataset.prompt_column = ("instruction",) + cfg_mock.dataset.answer_column = "output" + cfg_mock.dataset.parent_id_column = "parent_id" + + cfg_mock.dataset.validation_strategy = "automatic" + + for validation_size in [0.05, 0.1, 0.2, 0.3, 0.4, 0.5]: + cfg_mock.dataset.validation_size = validation_size + + train_df, val_df = load_train_valid_data(cfg_mock) + assert set(train_df["parent_id"].dropna().values).isdisjoint( + set(val_df["id"].dropna().values) + ) + assert set(val_df["parent_id"].dropna().values).isdisjoint( + set(train_df["id"].dropna().values) + ) + assert (len(val_df) / (len(train_df) + len(val_df))) == pytest.approx( + validation_size, 0.05 + ) diff --git a/tests/src/utils/test_export_utils.py b/tests/src/utils/test_export_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..91d993e1e22fe8654098b77b3f6d6f9178e01ee5 --- /dev/null +++ b/tests/src/utils/test_export_utils.py @@ -0,0 +1,19 @@ +from llm_studio.src.utils.export_utils import get_size_str + + +def test_get_size_atomic_units(): + assert get_size_str(1, input_unit="B") == "1 B" + assert get_size_str(1024, input_unit="B", output_unit="KB") == "1.0 KB" + assert get_size_str(1048576, input_unit="B", output_unit="MB") == "1.0 MB" + assert get_size_str(1073741824, input_unit="B", output_unit="GB") == "1.0 GB" + assert get_size_str(1099511627776, input_unit="B", output_unit="TB") == "1.0 TB" + + assert get_size_str(1024**5) == "1024.0 TB" + + +def test_get_size_str_dynamic(): + assert get_size_str(1500, input_unit="B", output_unit="dynamic") == "1.46 KB" + assert ( + get_size_str(1500, sig_figs=3, input_unit="B", output_unit="dynamic") + == "1.465 KB" + ) diff --git a/tests/src/utils/test_load_yaml_file.py b/tests/src/utils/test_load_yaml_file.py new file mode 100644 index 0000000000000000000000000000000000000000..690451582dbbf9e5a4378a8026bd083b8c71a86a --- /dev/null +++ b/tests/src/utils/test_load_yaml_file.py @@ -0,0 +1,77 @@ +import os + +from llm_studio.src.utils.config_utils import load_config_yaml + + +def test_load_config_yaml(): + test_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) + cfg_path = os.path.join(test_directory, "test_data/cfg.yaml") + cfg = load_config_yaml(cfg_path) + + assert cfg.experiment_name == "test" + assert cfg.llm_backbone == "EleutherAI/pythia-12b-deduped" + assert cfg.output_directory == "output/user/test/" + + assert cfg.architecture.backbone_dtype == "float16" + assert cfg.architecture.force_embedding_gradients is False + assert cfg.architecture.gradient_checkpointing is False + assert cfg.architecture.intermediate_dropout == 0.0 + + assert cfg.augmentation.token_mask_probability == 0.0 + + assert cfg.dataset.add_eos_token_to_answer is True + assert cfg.dataset.add_eos_token_to_prompt is True + assert cfg.dataset.answer_column == "output" + assert cfg.dataset.data_sample == 0.1 + assert cfg.dataset.data_sample_choice == ["Train", "Validation"] + assert cfg.dataset.mask_prompt_labels is False + assert cfg.dataset.prompt_column == ("instruction",) + assert cfg.dataset.text_answer_separator == "\\n" + assert cfg.dataset.text_prompt_start == "" + assert cfg.dataset.train_dataframe == "data/user/train/train.csv" + assert cfg.dataset.validation_dataframe == "None" + assert cfg.dataset.validation_size == 0.01 + assert cfg.dataset.validation_strategy == "automatic" + + assert cfg.environment.compile_model is False + assert cfg.environment.find_unused_parameters is False + assert cfg.environment.gpus == ["0"] + assert cfg.environment.mixed_precision is True + assert cfg.environment.number_of_workers == 8 + assert cfg.environment.seed == -1 + + assert cfg.logging.logger == "None" + assert cfg.logging.neptune_project == "" + + assert cfg.prediction.batch_size_inference == 0 + assert cfg.prediction.do_sample is False + assert cfg.prediction.max_length_inference == 256 + assert cfg.prediction.min_length_inference == 2 + assert cfg.prediction.num_beams == 2 + assert cfg.prediction.repetition_penalty == 1.2 + assert cfg.prediction.stop_tokens == "" + assert cfg.prediction.temperature == 0.3 + + assert cfg.tokenizer.max_length == 144 + assert cfg.tokenizer.max_length_answer == 256 + assert cfg.tokenizer.max_length_prompt == 256 + assert cfg.tokenizer.padding_quantile == 1.0 + + assert cfg.training.batch_size == 3 + assert cfg.training.epochs == 0 + assert cfg.training.evaluate_before_training is True + assert cfg.training.evaluation_epochs == 1.0 + assert cfg.training.grad_accumulation == 1 + assert cfg.training.gradient_clip == 0.0 + assert cfg.training.learning_rate == 0.0001 + assert cfg.training.lora is True + assert cfg.training.lora_alpha == 16 + assert cfg.training.lora_dropout == 0.05 + assert cfg.training.lora_r == 4 + assert cfg.training.lora_target_modules == "" + assert cfg.training.optimizer == "AdamW" + assert cfg.training.save_best_checkpoint is False + assert cfg.training.schedule == "Cosine" + assert cfg.training.train_validation_data is False + assert cfg.training.warmup_epochs == 0.0 + assert cfg.training.weight_decay == 0.0 diff --git a/tests/src/utils/test_utils.py b/tests/src/utils/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..854a4e3c88b82d84b89fd548a231d91794917bab --- /dev/null +++ b/tests/src/utils/test_utils.py @@ -0,0 +1,31 @@ +import pytest + +from llm_studio.python_configs.text_dpo_modeling_config import ( + ConfigDPODataset, + ConfigProblemBase, +) +from llm_studio.src.utils.utils import PatchedAttribute + + +def test_patched_attribute(): + cfg = ConfigProblemBase( + dataset=ConfigDPODataset( + prompt_column=("prompt_column",), + answer_column="answer_column", + rejected_answer_column="rejected_answer_column", + parent_id_column="None", + ) + ) + with PatchedAttribute(cfg.dataset, "answer_column", "chosen_response"): + assert cfg.dataset.answer_column == "chosen_response" + + with PatchedAttribute(cfg.dataset, "answer_column", "new_answer_column"): + assert cfg.dataset.answer_column == "new_answer_column" + + assert cfg.dataset.answer_column == "answer_column" + + with PatchedAttribute(cfg.dataset, "new_property", "new_value"): + assert cfg.dataset.new_property == "new_value" # type: ignore[attr-defined] + + with pytest.raises(AttributeError): + cfg.dataset.new_property # type: ignore[attr-defined] diff --git a/tests/ui/README.md b/tests/ui/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a58eda717ee781d5553427d66cb3cf9703a7b9bb --- /dev/null +++ b/tests/ui/README.md @@ -0,0 +1,68 @@ +# UI Testing for LLM-Studio + +## Overview + +The UI testing framework for LLM-Studio leverages the principles of Behaviour Driven Development (BDD), combining the power of Playwright for automation and Behave for writing UI tests. This approach offers the best of both worlds, as it makes the tests easily understandable for humans while remaining straightforward for machines to execute. By adopting this framework, it eliminates the complexities associated with using Selenium. + +## Design + +The framework is designed to be flexible, capable of running on local and remote machines seamlessly. It is agnostic to the location of the application, making it ideal for release testing across various instances of H2OAI Integrated Cloud (HAIC). + +## Execution + +### Local Machine Setup + +To set up and run UI tests locally, follow these steps: + +```bash +export LOCAL_LOGIN=True +export PYTEST_BASE_URL=localhost:10101 +make setup-dev +make llmstudio +make setup-ui +make test-ui-headed +``` + +### Remote Testing + +You can conduct UI testing for LLM-Studio on a remote machine using the following approaches: + +#### 1. **Running the App on a Remote Server** + +- Set up the app on a remote Ubuntu instance: + + ```bash + make setup-dev + make llmstudio + ``` + +- Obtain the app URL. +- Run the tests on the local machine: + + ```bash + export PYTEST_BASE_URL= + make setup-ui + make test-ui-headed + ``` + +#### 2. **Running the App on HAMC** _(with Okta Login) _ + + ```bash + export OKTA_USER= + export OKTA_PASSWORD= + export PYTEST_BASE_URL= + make test-ui + ``` + +#### 3. **Running the App on HAIC** _(with Keycloak Login) _ + + ```bash + export KEYCLOAK_USER= + export KEYCLOAK_PASSWORD= + export PYTEST_BASE_URL= + make test-ui + ``` + +### Test Results + +The results of the UI tests are stored in `reports/junit_ui.xml`. These reports provide valuable insights into the success and failure of the tests, aiding in the continuous improvement of the application. diff --git a/tests/ui/__init__.py b/tests/ui/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/ui/conftest.py b/tests/ui/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..ffdfca59302495fe9903b8ff4204e4c9c6f20ad8 --- /dev/null +++ b/tests/ui/conftest.py @@ -0,0 +1,14 @@ +import logging +from functools import partial, partialmethod + +import pytest + +logging.TRACE = 5 # type: ignore +logging.addLevelName(logging.TRACE, "TRACE") # type: ignore +logging.Logger.trace = partialmethod(logging.Logger.log, logging.TRACE) # type: ignore +logging.trace = partial(logging.log, logging.TRACE) # type: ignore + + +@pytest.fixture(scope="session") +def logger() -> logging.Logger: + return logging.getLogger("ui-tests") diff --git a/tests/ui/llm_studio.feature b/tests/ui/llm_studio.feature new file mode 100644 index 0000000000000000000000000000000000000000..1f2de4010217bfcb324e4e01d94a431201d80ed3 --- /dev/null +++ b/tests/ui/llm_studio.feature @@ -0,0 +1,25 @@ +Feature: LLM Studio + + Background: LLM Studio user + Given LLM Studio home page is opened + When I login to LLM Studio + Then I see the home page + + Scenario: Import dataset using filesystem + When I upload dataset train_full.pq + And I name the dataset train-full.pq + Then I should see the dataset train-full.pq + When I delete dataset train-full.pq + Then I should not see the dataset train-full.pq + + Scenario: Create experiment + When I create experiment test-experiment + And I update LLM Backbone to h2oai/llama2-0b-unit-test + And I tweak data sampling to 0.5 + And I tweak max length prompt to 128 + And I tweak max length answer to 128 + And I tweak max length to 32 + And I run the experiment + Then I should see the test-experiment should finish successfully + When I delete experiment test-experiment + Then I should not see the experiment test-experiment diff --git a/tests/ui/llm_studio_page.py b/tests/ui/llm_studio_page.py new file mode 100644 index 0000000000000000000000000000000000000000..e5346a25203cc8c2ef7677d503256811b538289a --- /dev/null +++ b/tests/ui/llm_studio_page.py @@ -0,0 +1,244 @@ +import os + +from hac_playwright.pages.base import BasePage +from playwright.sync_api import expect + +CLOUD_FILESYSTEM_PATH = "/home/llmstudio/mount/data/user/oasst" +LOCAL_FILESYSTEM_PATH = os.path.join(os.getcwd(), "data/user/oasst") + + +class LLMStudioPage(BasePage): + # Constants for selectors + DATASET_IMPORT_SOURCE_SELECTOR = "dataset/import/source" + CONTINUE_BUTTON_SELECTOR = "button[name='Continue']" + DATASET_LIST_DELETE_SELECTOR = "dataset/list/delete" + DATASET_DELETE_DIALOG_SELECTOR = "dataset/delete/dialog" + DATASET_DELETE_SELECTOR = "dataset/delete" + EXPERIMENT_RUN_SELECTOR = "experiment/start/run" + EXPERIMENT_NAME_SELECTOR = "experiment/start/cfg/experiment_name" + EXPERIMENT_LIST_DELETE_SELECTOR = "experiment/list/delete" + EXPERIMENT_DELETE_DIALOG_SELECTOR = "experiment/delete/dialog" + EXPERIMENT_DELETE_SELECTOR = "experiment/delete" + EXPERIMENT_STATUS_SELECTOR = "[data-automation-key='status']" + EXPERIMENT_INDEX_SELECTOR = "[data-automation-key='name']" + FILESYSTEM_SELECTOR = "dataset/import/local_path" + FILENAME_SELECTOR = "dataset/import/name" + S3_BUCKET_SELECTOR = "dataset/import/s3_bucket" + S3_ACCESS_KEY_SELECTOR = "dataset/import/s3_access_key" + S3_SECRET_KEY_SELECTOR = "dataset/import/s3_secret_key" + S3_FILENAME_SELECTOR = "dataset/import/s3_filename" + AZURE_CONN_STRING = "dataset/import/azure_conn_string" + AZURE_CONTAINER = "dataset/import/azure_container" + AZURE_FILENAME = "dataset/import/azure_filename" + KAGGLE_COMMAND = "dataset/import/kaggle_command" + KAGGLE_USERNAME = "dataset/import/kaggle_username" + KAGGLE_SECRET_KEY = "dataset/import/kaggle_secret_key" + DATA_SAMPLING = "experiment/start/cfg/data_sample" + MAX_LENGTH_PROMPT = "experiment/start/cfg/max_length_prompt" + MAX_LENGTH_ANSWER = "experiment/start/cfg/max_length_answer" + MAX_LENGTH = "experiment/start/cfg/max_length" + MAX_LENGTH_INFERENCE = "experiment/start/cfg/max_length_inference" + EXPERIMENT_REFRESH_SELECTOR = "experiment/list/refresh" + GPU_WARNING_SELECTOR = "experiment/start/error/proceed" + + def assert_dataset_import(self, dataset_name: str): + dataset = self.page.get_by_role("button", name=dataset_name) + # Assert that the element is not None and clickable + assert dataset is not None + dataset.click() + + def get_by_test_id(self, test_id): + selector = f'[data-test="{test_id}"]' + return self.page.locator(selector) + + def open_home_page(self): + self.page.get_by_role("button", name="Home").click() + + def open_app_settings(self): + self.page.get_by_role("button", name="Settings").click() + + def dataset_name(self, filename): + self.get_by_test_id(self.FILENAME_SELECTOR).fill(filename) + self.continue_button().click() + self.continue_button().click() + + def import_dataset_from_filesystem(self, filepath: str): + self.import_dataset("Local") + if "LOCAL_LOGIN" in os.environ: + path = f"{LOCAL_FILESYSTEM_PATH}/{filepath}" + else: + path = f"{CLOUD_FILESYSTEM_PATH}/{filepath}" + self.get_by_test_id(self.FILESYSTEM_SELECTOR).fill(path) + self.continue_button().click() + + def continue_button(self): + return self.page.get_by_role("button", name="Continue") + + def import_dataset(self, source: str): + button = self.page.get_by_role("button", name="Import dataset") + button.click() + # FIX: Selectors.set_test_id_attribute(self, "data-test") + dropdown = self.get_by_test_id(self.DATASET_IMPORT_SOURCE_SELECTOR) + dropdown.click() + self.page.get_by_role("option", name=source).click() + + def import_dataset_from_aws( + self, bucket: str, access_key: str, secret_key: str, dataset_name: str + ): + self.import_dataset("AWS S3") + self.get_by_test_id(self.S3_BUCKET_SELECTOR).fill(bucket) + self.get_by_test_id(self.S3_ACCESS_KEY_SELECTOR).fill(access_key) + self.get_by_test_id(self.S3_SECRET_KEY_SELECTOR).fill(secret_key) + self.get_by_test_id(self.S3_FILENAME_SELECTOR).fill(dataset_name) + self.continue_button().click() + + def import_dataset_from_azure( + self, connection: str, container: str, dataset_name: str + ): + self.import_dataset("Azure Blob Storage") + self.get_by_test_id(self.AZURE_CONN_STRING).fill(connection) + self.get_by_test_id(self.AZURE_CONTAINER).fill(container) + self.get_by_test_id(self.AZURE_FILENAME).fill(dataset_name) + self.continue_button().click() + + def import_dataset_from_kaggle( + self, kaggle_command: str, username: str, secret: str + ): + self.import_dataset("Kaggle") + self.get_by_test_id(self.KAGGLE_COMMAND).fill(kaggle_command) + self.get_by_test_id(self.KAGGLE_USERNAME).fill(username) + self.get_by_test_id(self.KAGGLE_SECRET_KEY).fill(secret) + self.continue_button().click() + + def delete_dataset(self, dataset_name: str): + # Go to dataset page + self.view_datasets() + self.get_by_test_id(self.DATASET_LIST_DELETE_SELECTOR).click() + # Locate dataset to delete + self.page.get_by_role("gridcell", name=dataset_name).click() + # Confirm dataset deletion + self.get_by_test_id(self.DATASET_DELETE_DIALOG_SELECTOR).click() + # Delete dataset + self.get_by_test_id(self.DATASET_DELETE_SELECTOR).click() + + def view_datasets(self): + self.page.get_by_role("button", name="View datasets").click() + + def assert_dataset_deletion(self, dataset_name: str): + self.view_datasets() + dataset = self.page.get_by_role("button", name=dataset_name) + # Assert that the element not found + expect(dataset).not_to_be_visible() + + def create_experiment(self, name: str): + self.page.get_by_role("button", name="Create experiment").click() + self.experiment_name(name) + + def slider(self, slider_selector, target_value: str): + is_completed = False + i = 0.0 + # Get the slider element + slider = self.get_by_test_id(slider_selector) + slider.click() + # Get the bounding box of the slider + bounding_box = slider.bounding_box() + x1 = bounding_box["x"] + y = bounding_box["y"] + bounding_box["height"] / 2 + + while not is_completed: + self.page.mouse.move(x1, y) + self.page.mouse.down() + x2 = bounding_box["x"] + bounding_box["width"] * float(i) / 100 + self.page.mouse.move(x2, y) + self.page.mouse.up() + value_now = slider.get_attribute("aria-valuenow") + + if value_now == target_value: + is_completed = True + else: + # Move the slider a little bit (adjust the step as needed) + step = 0.1 # Adjust this value based on your requirements + x1 = x2 + i += step + + def run_experiment(self): + self.get_by_test_id(self.EXPERIMENT_RUN_SELECTOR).click() + locator = self.get_by_test_id(self.GPU_WARNING_SELECTOR) + if locator.is_visible(): + locator.click() + + def experiment_name(self, name: str): + self.get_by_test_id(self.EXPERIMENT_NAME_SELECTOR).fill(name) + + def llm_backbone(self, value: str): + self.page.get_by_role("combobox", name="LLM Backbone").fill(value) + + def data_sample(self, value): + self.slider(self.DATA_SAMPLING, value) + + def max_length_prompt(self, value): + self.slider(self.MAX_LENGTH_PROMPT, value) + + def max_length_answer(self, value): + self.slider(self.MAX_LENGTH_ANSWER, value) + + def max_length(self, value): + self.slider(self.MAX_LENGTH, value) + + def max_length_inference(self, value): + self.slider(self.MAX_LENGTH_INFERENCE, value) + + def view_experiment_page(self): + self.page.get_by_role("button", name="View experiments").click() + + def view_experiment(self, experiment_name: str): + self.view_experiment_page() + i = self.find_experiment_index(experiment_name) + status = self.page.locator( + f"{self.EXPERIMENT_STATUS_SELECTOR} >> nth={i}" + ).inner_text() + self.page.reload() + while True: + if status in ["queued", "running"]: + self.page.reload() + self.view_experiment_page() + status = self.page.locator( + f"{self.EXPERIMENT_STATUS_SELECTOR} >> nth={i}" + ).inner_text() + elif status == "finished": + break + + def find_experiment_index(self, experiment_name): + index = 0 + while index < 100: # number of experiments + # Get the innerText of the element with the specified selector + inner_text = self.page.locator( + f"{self.EXPERIMENT_INDEX_SELECTOR} >> nth={index}" + ).inner_text() + # Check if the current name matches the target name + if inner_text != experiment_name: + index += 1 + else: + break + return index + + def delete_experiment(self, experiment_name: str): + # Go to experiment page + self.view_experiment_page() + # Click on Delete experiments button + self.get_by_test_id(self.EXPERIMENT_LIST_DELETE_SELECTOR).click() + # Locate experiment to delete + self.page.get_by_role("gridcell", name=experiment_name).locator( + f'div:has-text("{experiment_name}")' + ).first.click() + # Delete experiment + self.get_by_test_id(self.EXPERIMENT_DELETE_DIALOG_SELECTOR).click() + # Confirm experiment deletion + self.get_by_test_id(self.EXPERIMENT_DELETE_SELECTOR).click() + + def assert_experiment_deletion(self, experiment_name: str): + # Go to experiment page + self.view_experiment_page() + experiment = self.page.get_by_role("button", name=experiment_name) + # Assert that the element not found + expect(experiment).not_to_be_visible() diff --git a/tests/ui/test.py b/tests/ui/test.py new file mode 100644 index 0000000000000000000000000000000000000000..2557ea932121d2ea6bb0b0437d3622d166523fe6 --- /dev/null +++ b/tests/ui/test.py @@ -0,0 +1,111 @@ +import logging +import os + +from playwright.sync_api import Page +from pytest_bdd import given, parsers, scenarios, then, when + +from .llm_studio_page import LLMStudioPage +from .utils import login + +scenarios("llm_studio.feature") + + +@given("LLM Studio home page is opened") +def open_llm_studio(page: Page, base_url): + page.goto(base_url) + + +@when("I login to LLM Studio", target_fixture="llm_studio") +def login_to_llm_studio(logger: logging.Logger, page: Page, base_url: str): + okta_user = os.environ.get("OKTA_USER") + okta_password = os.environ.get("OKTA_PASSWORD") + keycloak_user = os.environ.get("KEYCLOAK_USER") + keycloak_password = os.environ.get("KEYCLOAK_PASSWORD") + if "LOCAL_LOGIN" not in os.environ: + if okta_user and okta_password: + login(page, "okta", okta_user, okta_password) + elif keycloak_user and keycloak_password: + login(page, "keycloak", keycloak_user, keycloak_password) + + return LLMStudioPage(logger, page, base_url) + + +@then(parsers.parse("I should see the dataset {dataset_name}")) +def view_datasets(llm_studio: LLMStudioPage, dataset_name: str): + llm_studio.assert_dataset_import(dataset_name) + + +@when(parsers.parse("I upload dataset {filepath}")) +def upload_dataset_using_filesystem(llm_studio: LLMStudioPage, filepath: str): + llm_studio.import_dataset_from_filesystem(filepath) + + +@when(parsers.parse("I name the dataset {dataset_name}")) +def dataset_name(llm_studio: LLMStudioPage, dataset_name: str): + llm_studio.dataset_name(dataset_name) + + +@then("I see the home page") +def view_home_page(llm_studio: LLMStudioPage): + llm_studio.open_home_page() + + +@when(parsers.parse("I delete dataset {dataset_name}")) +def delete_dataset(llm_studio: LLMStudioPage, dataset_name: str): + llm_studio.delete_dataset(dataset_name) + + +@then(parsers.parse("I should not see the dataset {dataset_name}")) +def assert_dataset_deletion(llm_studio: LLMStudioPage, dataset_name: str): + llm_studio.view_datasets() + llm_studio.assert_dataset_deletion(dataset_name) + + +@when(parsers.parse("I create experiment {experiment_name}")) +def create_experiment(llm_studio: LLMStudioPage, experiment_name: str): + llm_studio.create_experiment(experiment_name) + + +@when(parsers.parse("I tweak data sampling to {value}")) +def tweak_data_sampling(llm_studio: LLMStudioPage, value: str): + llm_studio.data_sample(value) + + +@when(parsers.parse("I update LLM Backbone to {llm_backbone}")) +def update_llm_backbone(llm_studio: LLMStudioPage, llm_backbone: str): + llm_studio.llm_backbone(llm_backbone) + + +@when(parsers.parse("I tweak max length prompt to {value}")) +def tweak_max_length_prompt(llm_studio: LLMStudioPage, value: str): + llm_studio.max_length_prompt(value) + + +@when(parsers.parse("I tweak max length answer to {value}")) +def tweak_max_length_answer(llm_studio: LLMStudioPage, value: str): + llm_studio.max_length_answer(value) + + +@when(parsers.parse("I tweak max length to {value}")) +def tweak_max_length(llm_studio: LLMStudioPage, value: str): + llm_studio.max_length(value) + + +@when("I run the experiment") +def run_experiment(llm_studio: LLMStudioPage): + llm_studio.run_experiment() + + +@then(parsers.parse("I should see the {experiment_name} should finish successfully")) +def view_experiment(llm_studio: LLMStudioPage, experiment_name: str): + llm_studio.view_experiment(experiment_name) + + +@when(parsers.parse("I delete experiment {experiment_name}")) +def delete_experiment(llm_studio: LLMStudioPage, experiment_name: str): + llm_studio.delete_experiment(experiment_name) + + +@then(parsers.parse("I should not see the experiment {experiment_name}")) +def assert_experiment_deletion(llm_studio: LLMStudioPage, experiment_name: str): + llm_studio.assert_experiment_deletion(experiment_name) diff --git a/tests/ui/utils.py b/tests/ui/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ab6288177bce0a26fdcd90c108e06093636207d1 --- /dev/null +++ b/tests/ui/utils.py @@ -0,0 +1,19 @@ +from hac_playwright.main import keycloak_login, okta_login, okta_otp_local +from playwright.sync_api import Page + + +def login( + page: Page, + login_type: str, + username: str, + password: str, + secret: str = "", +): + if login_type == "keycloak": + keycloak_login(page, username, password) + elif login_type == "okta": + okta_login(page, username, password) + elif login_type == "okta-otp-local": + okta_otp_local(page, username, password, secret) + else: + raise ValueError(f"Unknown login type '{login_type}'") diff --git a/train.py b/train.py new file mode 100644 index 0000000000000000000000000000000000000000..f34109a9d2d90ba5af157bb11d44a7fdfdd0525e --- /dev/null +++ b/train.py @@ -0,0 +1,732 @@ +import os + +os.environ["OMP_NUM_THREADS"] = "1" +os.environ["MKL_NUM_THREADS"] = "1" +os.environ["OPENBLAS_NUM_THREADS"] = "1" +os.environ["VECLIB_MAXIMUM_THREADS"] = "1" +os.environ["NUMEXPR_NUM_THREADS"] = "1" +os.environ["TOKENIZERS_PARALLELISM"] = "false" + +import argparse +import gc +import logging +import sys +import time +from distutils import util +from typing import Any, Callable, Dict, Tuple + +import deepspeed +import numpy as np +import pandas as pd +import torch +from torch.cuda.amp import GradScaler, autocast +from torch.utils.data import DataLoader +from tqdm import tqdm +from transformers.deepspeed import HfDeepSpeedConfig + +from llm_studio.src.loggers import MainLogger +from llm_studio.src.utils.config_utils import ( + load_config_py, + load_config_yaml, + save_config_yaml, +) +from llm_studio.src.utils.data_utils import ( + get_data, + get_inference_batch_size, + get_train_dataloader, + get_train_dataset, + get_val_dataloader, + get_val_dataset, +) +from llm_studio.src.utils.exceptions import LLMTrainingException +from llm_studio.src.utils.export_utils import save_prediction_outputs +from llm_studio.src.utils.gpu_utils import sync_across_processes +from llm_studio.src.utils.logging_utils import ( + TqdmToLogger, + initialize_logging, + log_plot, + write_flag, +) +from llm_studio.src.utils.modeling_utils import ( + activate_neftune, + check_disk_space, + get_ds_config, + get_number_of_validation_epochs, + get_optimizer, + get_scheduler, + get_torch_dtype, + load_checkpoint, + run_inference, + save_checkpoint, + save_predictions, + wrap_model_distributed, +) +from llm_studio.src.utils.utils import kill_ddp_processes, set_environment, set_seed + +logger = logging.getLogger(__name__) + + +def run_eval( + cfg, + model: torch.nn.Module, + val_dataloader: DataLoader, + val_df: pd.DataFrame, + mode: str = "validation", +) -> Tuple: + """Runs the evaluation loop. + + Args: + cfg: config object + model: trained model + val_dataloader: validation Dataloader + val_df: validation DataFrame + mode: validation + + Returns: + Validation loss + """ + with torch.no_grad(): + is_training = model.training + model.eval() + val_data: Dict[str, Any] = run_inference( + cfg, model, val_dataloader, mode + ) # type: ignore + model.train(is_training) + + # Sync validation predictions across GPUs + if cfg.environment._distributed and cfg.environment._distributed_inference: + for key, value in val_data.items(): + val_data[key] = sync_across_processes( + value, cfg.environment._world_size, group=cfg.environment._cpu_comm + ) + + if cfg.environment._local_rank != 0: + # data has been synced, so we can return early on other ranks + if cfg.environment._distributed: + torch.distributed.barrier() + return 0, 0 + + # Drop any extra observations + for k, v in val_data.items(): + val_data[k] = v[: len(val_dataloader.dataset)] # type: ignore + + val_data = val_dataloader.dataset.postprocess_output( # type: ignore + cfg=cfg, df=val_df, output=val_data + ) + val_loss = np.mean(val_data.get("loss", torch.tensor(0)).float().cpu().numpy()) + # postprocess_output only runs on rank 0 to save time/memory + val_metric = np.mean(val_data["metrics"]) + logger.info(f"{mode.capitalize()} {cfg.prediction.metric}: {val_metric:.5f}") + + for key in val_data: + if key.startswith("additional_log_") or key == "loss": + value = np.mean(val_data[key].float().cpu().numpy()) + key = key.replace("additional_log_", "") + logger.info(f"Mean {mode} {key}: {value:.5f}") + cfg.logging._logger.log( + mode, + key, + value, + step=cfg.environment._curr_step, + ) + cfg.logging._logger.log( + mode, cfg.prediction.metric, val_metric, step=cfg.environment._curr_step + ) + + # Log plots + if val_df is not None: + plot = cfg.logging.plots_class.plot_validation_predictions( + val_outputs=val_data, cfg=cfg, val_df=val_df, mode="validation" + ) + log_plot(cfg, plot, "validation_predictions") + + save_predictions(cfg, val_data, val_dataloader, val_df, mode) + + if cfg.environment._distributed: + torch.distributed.barrier() + + return val_loss, val_metric + + +def run_train( + cfg: Any, + model: torch.nn.Module, + optimizer, + scheduler, + epoch_steps, + train_dataloader, + val_dataloader, + val_df: pd.DataFrame, +): + """Runs the training loop. + + Args: + cfg: config object + model: model + train_dataloader: custom training Dataloader + train_df: train DataFrame + val_dataloader: custom validation Dataloader + val_df: validation DataFrame + + Returns: + Validation prediction output + Validation loss + Validation metric + Last train batch + """ + if ( + hasattr(cfg.augmentation, "neftune_noise_alpha") + and cfg.augmentation.neftune_noise_alpha > 0 + ): + activate_neftune(model, cfg.augmentation.neftune_noise_alpha) + + scaler: GradScaler | None = None + if cfg.environment.mixed_precision: + scaler = GradScaler( + enabled=(cfg.environment.mixed_precision_dtype == "float16") + ) + + optimizer.zero_grad(set_to_none=True) + + # Prepare NLP Augmentation + nlp_augment = None + if hasattr(cfg.augmentation, "nlp_augmentations_class"): + nlp_augment = cfg.augmentation.nlp_augmentations_class(cfg=cfg) + + start_epoch = 0 + + _, metric_mode, _ = cfg.prediction.metric_class.get(cfg.prediction.metric) + objective_op: Callable[[float, float], bool] + if metric_mode == "max": + best_val_metric = -np.inf + objective_op = np.greater + else: + best_val_metric = np.inf + objective_op = np.less + + if cfg.training.evaluate_before_training: + val_loss, val_metric = run_eval( + cfg=cfg, model=model, val_dataloader=val_dataloader, val_df=val_df + ) + + for epoch in range(start_epoch, cfg.training.epochs): + set_seed( + cfg.environment._seed + + epoch * cfg.environment._world_size * cfg.environment.number_of_workers + + cfg.environment._local_rank * cfg.environment.number_of_workers + ) + if cfg.environment._local_rank == 0: + logger.info(f"Training Epoch: {epoch + 1} / {cfg.training.epochs}") + + if ( + cfg.environment._distributed + and not cfg.environment.use_deepspeed + and hasattr(train_dataloader.sampler, "set_epoch") + ): + train_dataloader.sampler.set_epoch(epoch) # type: ignore + + tqdm_out = TqdmToLogger(logger, level=logging.INFO) + progress_bar = tqdm( + total=epoch_steps, + disable=cfg.environment._local_rank != 0, + file=tqdm_out, + ascii=True, + desc="train loss", + mininterval=0, + ) + tr_it = iter(train_dataloader) + + losses = [] + model.train() + + log_update_steps = max(epoch_steps // 20, 1) + evaluation_step = max(int(epoch_steps * cfg.training.evaluation_epochs), 1) + logger.info(f"Evaluation step: {evaluation_step}") + + for itr, data in enumerate(tr_it): + cfg.environment._curr_step += ( + cfg.training.batch_size * cfg.environment._world_size + ) + + # Batch to device + batch = cfg.dataset.dataset_class.batch_to_device( + data, cfg.environment._device + ) + + # NLP augmentation + if nlp_augment is not None: + batch = nlp_augment(batch) + + # Plot first batch + if epoch == 0 and itr == 0 and cfg.environment._local_rank == 0: + plot = cfg.logging.plots_class.plot_batch(batch=batch, cfg=cfg) + log_plot(cfg, plot, "train_data") + + # only need to sync gradients at last step of grad accumulation + model.require_backward_grad_sync = itr % cfg.training.grad_accumulation == 0 + + # Forward pass + with autocast( + enabled=cfg.environment.mixed_precision, + dtype=get_torch_dtype(cfg.environment.mixed_precision_dtype), + ): + output_dict = model.forward(batch) + + loss = output_dict["loss"] + if ~np.isfinite(loss.item()) and (epoch > start_epoch or itr > 20): + raise LLMTrainingException( + "NaN caught in loss during training. " + "Please, reduce learning rate, change dtype, " + "or disable mixed precision. Alternatively, " + "gradient clipping may help to stabilize training." + ) + losses.append(loss.item()) + + # loss is a mean loss per batch/sample + # as grad_accumulations sums up the gradients, this loss must be scaled + # by the number of grad_accumulations, to have similar behavior for + # BS * grad_accumulations = const. + if cfg.training.grad_accumulation != 1: + loss = loss / cfg.training.grad_accumulation + + # Backward pass + if ( + cfg.environment.mixed_precision + and len(cfg.environment.gpus) + and not cfg.environment.use_deepspeed + ): + scaler.scale(loss).backward() # type: ignore + if itr % cfg.training.grad_accumulation == 0: + if cfg.training.gradient_clip > 0: + scaler.unscale_(optimizer) # type: ignore + torch.nn.utils.clip_grad_norm_( + model.parameters(), cfg.training.gradient_clip + ) + scaler.step(optimizer) # type: ignore + scaler.update() + optimizer.zero_grad(set_to_none=True) + else: + if cfg.environment.use_deepspeed: + model.backward(loss) # type: ignore[operator] + else: + loss.backward() + if itr % cfg.training.grad_accumulation == 0: + if cfg.training.gradient_clip > 0: + torch.nn.utils.clip_grad_norm_( + model.parameters(), cfg.training.gradient_clip + ) + optimizer.step() + optimizer.zero_grad(set_to_none=True) + + if cfg.environment._distributed: + torch.cuda.synchronize(device=cfg.environment._local_rank) + + if scheduler is not None: + scheduler.step() + + if cfg.environment._local_rank == 0: + cfg.logging._logger.log( + "train", "loss", losses[-1], step=cfg.environment._curr_step + ) + cfg.logging._logger.log( + "meta", + "lr", + optimizer.param_groups[0]["lr"], + step=cfg.environment._curr_step, + ) + if cfg.training.differential_learning_rate_layers: + cfg.logging._logger.log( + "meta", + "lr_diff", + optimizer.param_groups[2]["lr"], + step=cfg.environment._curr_step, + ) + + cfg.logging._logger.log( + "internal", + "current_step", + cfg.environment._curr_step, + step=cfg.environment._curr_step, + ) + for key in output_dict: + if key.startswith("additional_log_"): + cfg.logging._logger.log( + "train", + key.replace("additional_log_", ""), + output_dict[key].item(), + step=cfg.environment._curr_step, + ) + + # Show logs each 5% of the epoch (only if doing per epoch evaluation) + if (itr + 1) % log_update_steps == 0 or itr == epoch_steps - 1: + progress_bar.set_description( + f"train loss: {np.mean(losses[-10:]):.2f}", refresh=False + ) + if (itr + 1) % log_update_steps == 0: + progress_bar.update(log_update_steps) + else: + progress_bar.update(epoch_steps % log_update_steps) + + del output_dict + + # Validation loop + if (itr + 1) % evaluation_step == 0: + if cfg.training.evaluation_epochs == 1: + progress_bar.close() + + # TODO: Move back after fixing slow generation of deepspeed. + if not cfg.training.save_best_checkpoint: + checkpoint_path = cfg.output_directory + if cfg.environment._local_rank == 0: + logger.info( + f"Saving last model checkpoint to {checkpoint_path}" + ) + save_checkpoint(model=model, path=checkpoint_path, cfg=cfg) + + val_loss, val_metric = run_eval( + cfg=cfg, model=model, val_dataloader=val_dataloader, val_df=val_df + ) + + if cfg.training.save_best_checkpoint: + if objective_op(val_metric, best_val_metric): + checkpoint_path = cfg.output_directory + if cfg.environment._local_rank == 0: + logger.info( + f"Saving best model checkpoint: " + f"val_{cfg.prediction.metric} {best_val_metric:.5} -> " + f"{val_metric:.5} to {checkpoint_path}" + ) + save_checkpoint(model=model, path=checkpoint_path, cfg=cfg) + best_val_metric = val_metric + + model.train() + + progress_bar.close() + del progress_bar + + if cfg.environment._distributed: + torch.cuda.synchronize(device=cfg.environment._local_rank) + torch.distributed.barrier() + + if cfg.environment._local_rank == 0: + cfg.logging._logger.log( + "internal", "epoch", epoch + 1, step=cfg.environment._curr_step + ) + + if cfg.environment._distributed: + torch.distributed.barrier() + + return val_loss, val_metric + + +def run(cfg: Any) -> None: + """Runs the routine. + + Args: + cfg: config object with all the hyperparameters + """ + + if cfg.problem_type == "text_rlhf_language_modeling": + raise DeprecationWarning( + "text_rlhf_language_modeling is deprecated. " + "Please use DPO Modeling instead." + ) + + os.makedirs(cfg.output_directory, exist_ok=True) + + # Force evaluation if user trains 0 epochs + cfg.training.evaluate_before_training = ( + cfg.training.evaluate_before_training or cfg.training.epochs == 0 + ) + + # Set the random seed for reproducibility + # either random seed when user set it -1 or deterministic user chosen seed + if cfg.environment.seed < 0: + cfg.environment._seed = np.random.randint(1_000_000) + else: + cfg.environment._seed = cfg.environment.seed + + if ( + cfg.architecture.backbone_dtype in ["int8", "int4"] + and cfg.environment.use_deepspeed + ): + raise ValueError( + f"Deepspeed do not support backbone type {cfg.architecture.backbone_dtype}." + + " Please set backbone type to float16 or bfloat16 for using deepspeed." + ) + + # Prepare environment + if "WORLD_SIZE" in os.environ: + cfg.environment._distributed = int(os.environ["WORLD_SIZE"]) > 1 + else: + cfg.environment._distributed = False + + if cfg.environment._distributed: + cfg.environment._local_rank = int(os.environ["LOCAL_RANK"]) + cfg.environment._device = "cuda:%d" % cfg.environment._local_rank + if cfg.environment.use_deepspeed: + deepspeed.init_distributed() + else: + torch.distributed.init_process_group(backend="nccl", init_method="env://") + cfg.environment._cpu_comm = torch.distributed.new_group(backend="gloo") + + cfg.environment._world_size = torch.distributed.get_world_size() + cfg.environment._rank = torch.distributed.get_rank() + torch.cuda.set_device(cfg.environment._rank) + logger.info( + f"Training in distributed mode with multiple processes, " + f"1 GPU per process. Process {cfg.environment._rank}, " + f"total: {cfg.environment._world_size} " + f"local rank: {cfg.environment._local_rank}." + ) + + # Sync the random seed + cfg.environment._seed = int( + sync_across_processes( + np.array([cfg.environment._seed]), + cfg.environment._world_size, + group=cfg.environment._cpu_comm, + )[0] + ) + else: + cfg.environment._local_rank = 0 + cfg.environment._device = ( + "cuda:0" + if (torch.cuda.is_available() and len(cfg.environment.gpus) > 0) + else "cpu" + ) + if cfg.environment._device == "cpu": + logger.warning("Training on CPU. This will be slow.") + + set_seed(cfg.environment._seed) + if cfg.environment._local_rank == 0: + logger.info(f"Problem Type: {cfg.problem_type}") + logger.info(f"Global random seed: {cfg.environment._seed}") + + cfg = set_environment(cfg) + + # we need to get train dataframe and number of labels if not set or in training mode + if cfg.environment._local_rank == 0: + logger.info("Preparing the data...") + train_df, val_df = get_data(cfg) + + if ( + len(val_df) > int(os.getenv("GPT_EVAL_MAX", 100)) + and "GPT" in cfg.prediction.metric + ): + logger.warning( + f"More than {os.getenv('GPT_EVAL_MAX', 100)} validation records. " + "Safeguarding against OpenAI API costs. Setting metric to BLEU. " + "Change GPT_EVAL_MAX to run GPT validation." + ) + cfg.prediction.metric = "BLEU" + + # prepare data + if cfg.environment._local_rank == 0: + logger.info("Preparing train and validation data") + train_dataset = get_train_dataset(train_df=train_df, cfg=cfg) + val_dataset = get_val_dataset(val_df=val_df, cfg=cfg) + train_dataloader = get_train_dataloader(train_ds=train_dataset, cfg=cfg) + val_dataloader = get_val_dataloader(val_ds=val_dataset, cfg=cfg) + + if cfg.environment._local_rank == 0: + total_training_steps = ( + cfg.training.epochs + * len(train_dataloader) + * cfg.training.batch_size + * cfg.environment._world_size + ) + + num_eval_epochs = get_number_of_validation_epochs( + training_epochs=cfg.training.epochs, + evaluation_epochs=cfg.training.evaluation_epochs, + ) + val_batch_size = get_inference_batch_size(cfg) + + # if zero shot, validate once before training + total_validation_steps = ( + len(val_dataloader) + * (num_eval_epochs + int(cfg.training.evaluate_before_training)) + * val_batch_size + * cfg.environment._world_size + ) + + # Prepare model and optimizer + if cfg.environment.use_deepspeed: + ds_config = get_ds_config(cfg) + # keep this object alive. + dschf = HfDeepSpeedConfig(ds_config) # noqa: F841 + with torch.device(cfg.environment._device): + model = cfg.architecture.model_class(cfg) + check_disk_space(model, cfg.output_directory) + + # load model weights + if cfg.architecture.pretrained_weights != "": + # Do not load strictly if continue training from the previous experiment + load_checkpoint(cfg, model, strict=cfg.training.epochs == -1) + model.to(cfg.environment._device) + + epoch_steps = len(train_dataloader) + optimizer = get_optimizer(model=model, cfg=cfg) + scheduler = get_scheduler(cfg=cfg, optimizer=optimizer, epoch_steps=epoch_steps) + + if getattr(cfg.architecture, "force_embedding_gradients"): + for module in model.modules(): + if isinstance(module, torch.nn.Embedding): + for param in module.parameters(): + param.requires_grad = True + param.data = param.data.float() + + if cfg.environment._distributed: + ( + model, + optimizer, + train_dataloader, + val_dataloader, + scheduler, + ) = wrap_model_distributed( + model=model, + optimizer=optimizer, + lr_scheduler=scheduler, + train_dataloader=train_dataloader, + val_dataloader=val_dataloader, + cfg=cfg, + ) + + if cfg.environment.compile_model: + # deepspeed do not support torch.compile + if cfg.environment.use_deepspeed: + logger.warning( + "Deepspeed is active, but it doesn't support torch.compile." + "Skipping compilation for this experiment." + ) + else: + if cfg.environment._distributed: + model.module.backbone = torch.compile(model.module.backbone) + else: + model.backbone = torch.compile(model.backbone) + + # Force settings when saving best checkpoint + if cfg.training.save_best_checkpoint: + cfg.training.train_validation_data = False + + # reset steps + cfg.environment._curr_step = 0 + cfg.environment._curr_val_step = 0 + + gc.collect() + + global_start_time = time.time() + if cfg.environment._local_rank == 0: + # re-save cfg + save_config_yaml(f"{cfg.output_directory}/cfg.yaml", cfg) + + cfg.logging._logger = MainLogger(cfg) + + cfg.logging._logger.log( + "internal", "total_training_steps", total_training_steps, step=0 + ) + + cfg.logging._logger.log( + "internal", "total_validation_steps", total_validation_steps, step=0 + ) + + cfg.logging._logger.log( + "internal", + "global_start_time", + global_start_time, + step=cfg.environment._curr_step, + ) + # re-save config + save_config_yaml(f"{cfg.output_directory}/cfg.yaml", cfg) + + val_loss, val_metric = run_train( + cfg=cfg, + model=model, + optimizer=optimizer, + scheduler=scheduler, + epoch_steps=epoch_steps, + train_dataloader=train_dataloader, + val_dataloader=val_dataloader, + val_df=val_df, + ) + + # reset external logging + if cfg.environment._local_rank == 0: + cfg.logging._logger.reset_external() + + experiment_path = f"{cfg.output_directory}" + + if cfg.training.epochs == 0: + checkpoint_path = cfg.output_directory + if cfg.environment._local_rank == 0: + logger.info(f"Saving last model checkpoint to {checkpoint_path}") + save_checkpoint(model=model, path=checkpoint_path, cfg=cfg) + + if cfg.environment._local_rank == 0: + save_config_yaml(f"{cfg.output_directory}/cfg.yaml", cfg) + save_prediction_outputs(cfg.experiment_name, experiment_path) + + flag_path = os.path.join(cfg.output_directory, "flags.json") + write_flag(flag_path, "status", "finished") + time_took = time.time() - global_start_time + if time_took > 86400: + # if more than one day, show days + # need to subtract 1 day from time_took since strftime shows day of year + # which starts counting at 1 + time_took_formatted = time.strftime( + "%-jd %H:%M:%S", time.gmtime(float(time_took - 86400)) + ) + else: + time_took_formatted = time.strftime( + "%H:%M:%S", time.gmtime(float(time_took)) + ) + write_flag(flag_path, "info", f"Runtime: {time_took_formatted}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="") + parser.add_argument( + "-C", "--config", help="config filename", default=argparse.SUPPRESS + ) + parser.add_argument("-Y", "--yaml", help="yaml filename", default=argparse.SUPPRESS) + parser_args, unknown = parser.parse_known_args(sys.argv) + + if "config" in parser_args: + cfg = load_config_py(parser_args.config) + elif "yaml" in parser_args: + cfg = load_config_yaml(parser_args.yaml) + else: + raise ValueError("Please, provide a configuration file") + + extra_args = [] + for arg_orig in unknown: + if arg_orig.startswith(("-", "--")): + arg = arg_orig.replace("-", "").split(".") + try: + arg_type = getattr(cfg, arg[0]).get_annotations()[arg[1]] + except (AttributeError, KeyError): + continue + if arg_type == bool: + parser.add_argument(arg_orig, type=util.strtobool) + else: + parser.add_argument(arg_orig, type=arg_type) + extra_args.append(arg) + + args = parser.parse_args() + + for arg in extra_args: + value = getattr(args, ".".join(arg)) + setattr(getattr(cfg, arg[0]), arg[1], value) + + out_dir = cfg.output_directory + os.makedirs(out_dir, exist_ok=True) + + initialize_logging(cfg) + + try: + run(cfg=cfg) + except Exception: + logging.error("Exception occurred during the run:", exc_info=True) + if ("WORLD_SIZE" in os.environ) and (int(os.environ["WORLD_SIZE"]) > 1): + kill_ddp_processes() diff --git a/train_wave.py b/train_wave.py new file mode 100644 index 0000000000000000000000000000000000000000..e9ce77c458d50205be6fac8bb326893fcc50c518 --- /dev/null +++ b/train_wave.py @@ -0,0 +1,163 @@ +import os + +# Set this before importing any other modules to be on the safe side +os.environ["OMP_NUM_THREADS"] = "1" +os.environ["MKL_NUM_THREADS"] = "1" +os.environ["OPENBLAS_NUM_THREADS"] = "1" +os.environ["VECLIB_MAXIMUM_THREADS"] = "1" +os.environ["NUMEXPR_NUM_THREADS"] = "1" +os.environ["TOKENIZERS_PARALLELISM"] = "false" + +import argparse +import logging +import sys +import time + +import psutil + + +def check_for_done(process_queue): + """Checks for finished process ids + + Args: + process_queue: list of process ids + Returns: + (True, process_idx) if there is any finished process + (False, False) if there is not finished processes + """ + + for i, pid in enumerate(process_queue): + zombie = False + try: + p = psutil.Process(pid) + zombie = p.status() == "zombie" + except psutil.NoSuchProcess: + pass + if not psutil.pid_exists(pid) or zombie: + return True, i + + return False, False + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="") + parser.add_argument( + "-C", "--config", help="config filename", default=argparse.SUPPRESS + ) + parser.add_argument("-Y", "--yaml", help="yaml filename", default=argparse.SUPPRESS) + parser.add_argument( + "-Q", + "--process-queue", + help="process queue to wait for", + default=argparse.SUPPRESS, + ) + parser_args, _ = parser.parse_known_args(sys.argv) + + process_queue = [] + if "process_queue" in parser_args and parser_args.process_queue != "": + process_queue = [int(x) for x in parser_args.process_queue.split(",")] + + while True: + if len(process_queue) == 0: + break + done, num = check_for_done(process_queue) + if done: + process_queue.pop(num) + else: + time.sleep(30) + + # delayed imports from llm_studio, only after we want to start training + import subprocess + + import torch + + from llm_studio.src.utils.config_utils import load_config_py, load_config_yaml + from llm_studio.src.utils.exceptions import ( + LLMAugmentationsException, + LLMDataException, + LLMMetricException, + LLMModelException, + LLMTrainingException, + ) + from llm_studio.src.utils.gpu_utils import is_oom_error + from llm_studio.src.utils.logging_utils import initialize_logging, write_flag + from llm_studio.src.utils.utils import kill_ddp_processes + from train import run + + if "config" in parser_args: + cfg = load_config_py(parser_args.config) + elif "yaml" in parser_args: + cfg = load_config_yaml(parser_args.yaml) + + flag_path = os.path.join(cfg.output_directory, "flags{}.json") + + # Check if DDP + if "WORLD_SIZE" in os.environ: + local_rank = int(os.environ["LOCAL_RANK"]) + if local_rank == 0: + write_flag(flag_path.format(""), "status", "running") + else: + write_flag(flag_path.format(""), "status", "running") + local_rank = 0 + + initialize_logging(cfg) + + try: + run(cfg=cfg) + except Exception as exception: + write_flag(flag_path.format(local_rank), "status", "failed") + if is_oom_error(exception): + logging.error( + "GPU Out-of-Memory (OOM) error occurred. " + "Please, reduce the batch size, or input data size, " + "or model size. Or try gradient checkpointing.", + exc_info=True, + ) + write_flag(flag_path.format(local_rank), "info", "OOM error") + + logging.info( + "
"
+                + subprocess.check_output(["nvidia-smi"]).decode("utf-8")
+                + "
" + ) + + if torch.cuda.is_available(): + logging.info( + "
" + torch.cuda.memory_summary().replace("-", "=") + "
" + ) + + elif isinstance(exception, LLMDataException): + logging.error( + "Data error occurred during H2O LLM Studio run:", exc_info=True + ) + write_flag(flag_path.format(local_rank), "info", "Data error") + elif isinstance(exception, LLMTrainingException): + logging.error( + "Training error occurred during H2O LLM Studio run:", exc_info=True + ) + write_flag(flag_path.format(local_rank), "info", "Training error") + elif isinstance(exception, LLMMetricException): + logging.error( + "Validation metric failed. Please make sure selected validation " + "metric is suitable for your current problem setup.", + exc_info=True, + ) + write_flag(flag_path.format(local_rank), "info", "Metric error") + elif isinstance(exception, LLMAugmentationsException): + logging.error( + "Custom augmentations error occurred during " "H2O LLM Studio run:", + exc_info=True, + ) + write_flag(flag_path.format(local_rank), "info", "Augmentations error") + elif isinstance(exception, LLMModelException): + logging.error( + "Model error occurred during H2O LLM Studio run:", + exc_info=True, + ) + write_flag(flag_path.format(local_rank), "info", "Model error") + else: + logging.error( + "Exception occurred during H2O LLM Studio run:", exc_info=True + ) + write_flag(flag_path.format(local_rank), "info", "See logs") + kill_ddp_processes()