diff --git a/.github/build/Containerfile b/.github/build/Containerfile index 3c1393cc..8345fbe1 100644 --- a/.github/build/Containerfile +++ b/.github/build/Containerfile @@ -5,10 +5,18 @@ LABEL summary="Toolchain for running pre-commit hooks." \ io.k8s.display-name="Pre-Commit Toolchain" USER root -RUN dnf install nodejs +RUN dnf install nodejs -y && \ + dnf clean all && \ + rm -rf /var/cache/dnf +ADD https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz $TMPDIR/ +RUN tar -C /usr/local/bin -xvf $TMPDIR/oc.tar.gz && \ + chmod +x /usr/local/bin/oc && \ + rm $TMPDIR/oc.tar.gz USER $USERID -COPY requirements-dev.txt /tmp/requirements-dev.txt -RUN python -m pip install -r /tmp/requirements-dev.txt +RUN pip3 install poetry && \ + poetry config virtualenvs.create false +COPY pyproject.toml ./ +RUN poetry install CMD bash diff --git a/.github/build/README.md b/.github/build/README.md index 9273edb8..9731ef30 100644 --- a/.github/build/README.md +++ b/.github/build/README.md @@ -1,3 +1,5 @@ # Pre-Commit Build Artifacts -This directory contains the artifacts required to build the codeflare-sdk pre-commit image. As of right now, we will need to manually update `requirements-dev.txt` in this directory as well. +This directory contains the artifacts required to build the codeflare-sdk pre-commit image. + +To build the image run `podman build -f .github/build/Containerfile .` from the root directory. diff --git a/.github/build/requirements-dev.txt b/.github/build/requirements-dev.txt deleted file mode 100644 index 986bc512..00000000 --- a/.github/build/requirements-dev.txt +++ /dev/null @@ -1,6 +0,0 @@ -pre-commit -poetry -pytest -pytest-mock -coverage -black==22.3.0 diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 6a7695c0..9d2233c4 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,6 +1,45 @@ +# Please see the documentation for all configuration options: +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + version: 2 updates: + # This is to update requirements.txt files in the guided-demos, and e2e directories. + - package-ecosystem: "pip" + directories: + - "**/demo-notebooks/guided-demos*" + - "/tests/e2e" + schedule: + interval: "daily" + ignore: + - dependency-name: "*" + update-types: ["version-update:semver-patch"] + open-pull-requests-limit: 1 + labels: + - "dependabot" + - "test-guided-notebooks" + + # pip means poetry in this case, this keeps poetry.lock up to date with constraints in pyproject.toml. - package-ecosystem: "pip" directory: "/" schedule: - interval: "weekly" + interval: "daily" + ignore: + - dependency-name: "*" + update-types: ["version-update:semver-patch"] + open-pull-requests-limit: 1 + labels: + - "dependabot" + - "test-guided-notebooks" + + # npm means yarn in this case, this keeps yarn.lock up to date with constraints in package.json. + - package-ecosystem: "npm" + directory: "/ui-tests" + schedule: + interval: "daily" + ignore: + - dependency-name: "*" + update-types: ["version-update:semver-patch"] + open-pull-requests-limit: 1 + labels: + - "dependabot" + - "test-ui-notebooks" diff --git a/.github/resources/minio_remote_config_cell.json b/.github/resources/minio_remote_config_cell.json new file mode 100644 index 00000000..e36c4b18 --- /dev/null +++ b/.github/resources/minio_remote_config_cell.json @@ -0,0 +1,20 @@ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@ray.remote\n", + "def get_minio_run_config():\n", + " import s3fs\n", + " import pyarrow\n", + " s3_fs = s3fs.S3FileSystem(\n", + " key = \"minio\",\n", + " secret = \"minio123\",\n", + " endpoint_url = \"http://minio-service.default.svc.cluster.local:9000\"\n", + " )\n", + " custom_fs = pyarrow.fs.PyFileSystem(pyarrow.fs.FSSpecHandler(s3_fs))\n", + " run_config = ray.train.RunConfig(storage_path='training', storage_filesystem=custom_fs)\n", + " return run_config" + ] + } diff --git a/.github/resources/wait_for_job_cell.json b/.github/resources/wait_for_job_cell.json new file mode 100644 index 00000000..eb8805bd --- /dev/null +++ b/.github/resources/wait_for_job_cell.json @@ -0,0 +1,20 @@ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from time import sleep\n", + "\n", + "finished = False\n", + "while not finished:\n", + " sleep(5)\n", + " status = client.get_job_status(submission_id)\n", + " finished = (status == \"SUCCEEDED\" or status == \"FAILED\" or status == \"STOPPED\")\n", + " print(status)\n", + "print(\"Job status \" + status)\n", + "print(\"Logs: \")\n", + "print(client.get_job_logs(submission_id))\n", + "assert status == \"SUCCEEDED\", \"Job failed or was stopped!\"" + ] + } diff --git a/.github/workflows/additional_demo_notebook_tests.yaml b/.github/workflows/additional_demo_notebook_tests.yaml new file mode 100644 index 00000000..096cb509 --- /dev/null +++ b/.github/workflows/additional_demo_notebook_tests.yaml @@ -0,0 +1,255 @@ +name: Additional demo notebooks tests + +on: + pull_request: + types: [ labeled ] + workflow_dispatch: + +concurrency: + group: ${{ github.head_ref }}-${{ github.workflow }} + cancel-in-progress: true + +env: + CODEFLARE_OPERATOR_IMG: "quay.io/project-codeflare/codeflare-operator:dev" + +jobs: + verify-local_interactive: + if: ${{ github.event.label.name == 'test-additional-notebooks' }} + runs-on: ubuntu-latest-4core + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Checkout common repo code + uses: actions/checkout@v4 + with: + repository: 'project-codeflare/codeflare-common' + ref: 'main' + path: 'common' + + - name: Checkout CodeFlare operator repository + uses: actions/checkout@v4 + with: + repository: project-codeflare/codeflare-operator + path: codeflare-operator + + - name: Set Go + uses: actions/setup-go@v5 + with: + go-version-file: './codeflare-operator/go.mod' + cache-dependency-path: "./codeflare-operator/go.sum" + + - name: Set up gotestfmt + uses: gotesttools/gotestfmt-action@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up specific Python version + uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: 'pip' # caching pip dependencies + + - name: Setup and start KinD cluster + uses: ./common/github-actions/kind + + - name: Deploy CodeFlare stack + id: deploy + run: | + cd codeflare-operator + echo Setting up CodeFlare stack + make setup-e2e + echo Deploying CodeFlare operator + make deploy -e IMG="${CODEFLARE_OPERATOR_IMG}" -e ENV="e2e" + kubectl wait --timeout=120s --for=condition=Available=true deployment -n openshift-operators codeflare-operator-manager + cd .. + + - name: Setup Additional demo notebooks execution + run: | + echo "Installing papermill and dependencies..." + pip install poetry papermill ipython ipykernel + # Disable virtualenv due to problems using packaged in virtualenv in papermill + poetry config virtualenvs.create false + + echo "Installing SDK..." + poetry install --with test,docs + + - name: Run local_interactive.ipynb + run: | + set -euo pipefail + + # Remove login/logout cells, as KinD doesn't support authentication using token + jq -r 'del(.cells[] | select(.source[] | contains("Create authentication object and log in to desired user account")))' local_interactive.ipynb > local_interactive.ipynb.tmp && mv local_interactive.ipynb.tmp local_interactive.ipynb + jq -r 'del(.cells[] | select(.source[] | contains("auth.logout()")))' local_interactive.ipynb > local_interactive.ipynb.tmp && mv local_interactive.ipynb.tmp local_interactive.ipynb + # Rewrite cluster_uri() to local_client_url() to retrieve client URL available out of cluster, as the test is executed outside of cluster + sed -i "s/cluster_uri()/local_client_url()/g" local_interactive.ipynb + # Replace async logs with waiting for job to finish, async logs don't work properly in papermill + JOB_WAIT=$(jq -r '.' ${GITHUB_WORKSPACE}/.github/resources/wait_for_job_cell.json) + jq --argjson job_wait "$JOB_WAIT" -r '(.cells[] | select(.source[] | contains("async for lines in client.tail_job_logs"))) |= $job_wait' local_interactive.ipynb > local_interactive.ipynb.tmp && mv local_interactive.ipynb.tmp local_interactive.ipynb + # Set explicit namespace as SDK need it (currently) to resolve local queues + sed -i "s/worker_cpu_requests=1,/worker_cpu_requests='250m', namespace='default',/" local_interactive.ipynb + # Run notebook + poetry run papermill local_interactive.ipynb local_interactive_out.ipynb --log-output --execution-timeout 1200 + env: + GRPC_DNS_RESOLVER: "native" + working-directory: demo-notebooks/additional-demos + + - name: Print CodeFlare operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing CodeFlare operator logs" + kubectl logs -n openshift-operators --tail -1 -l app.kubernetes.io/name=codeflare-operator | tee ${TEMP_DIR}/codeflare-operator.log + + - name: Print Kueue operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing Kueue operator logs" + KUEUE_CONTROLLER_POD=$(kubectl get pods -n kueue-system | grep kueue-controller | awk '{print $1}') + kubectl logs -n kueue-system --tail -1 ${KUEUE_CONTROLLER_POD} | tee ${TEMP_DIR}/kueue.log + + - name: Print KubeRay operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing KubeRay operator logs" + kubectl logs -n ray-system --tail -1 -l app.kubernetes.io/name=kuberay | tee ${TEMP_DIR}/kuberay.log + + - name: Export all KinD pod logs + uses: ./common/github-actions/kind-export-logs + if: always() && steps.deploy.outcome == 'success' + with: + output-directory: ${TEMP_DIR} + + - name: Upload logs + uses: actions/upload-artifact@v4 + if: always() && steps.deploy.outcome == 'success' + with: + name: logs-local_interactive + retention-days: 10 + path: | + ${{ env.TEMP_DIR }}/**/*.log + + verify-ray_job_client: + if: ${{ github.event.label.name == 'test-additional-notebooks' }} + runs-on: ubuntu-latest-4core + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Checkout common repo code + uses: actions/checkout@v4 + with: + repository: 'project-codeflare/codeflare-common' + ref: 'main' + path: 'common' + + - name: Checkout CodeFlare operator repository + uses: actions/checkout@v4 + with: + repository: project-codeflare/codeflare-operator + path: codeflare-operator + + - name: Set Go + uses: actions/setup-go@v5 + with: + go-version-file: './codeflare-operator/go.mod' + cache-dependency-path: "./codeflare-operator/go.sum" + + - name: Set up gotestfmt + uses: gotesttools/gotestfmt-action@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up specific Python version + uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: 'pip' # caching pip dependencies + + - name: Setup and start KinD cluster + uses: ./common/github-actions/kind + + - name: Deploy CodeFlare stack + id: deploy + run: | + cd codeflare-operator + echo Setting up CodeFlare stack + make setup-e2e + echo Deploying CodeFlare operator + make deploy -e IMG="${CODEFLARE_OPERATOR_IMG}" -e ENV="e2e" + kubectl wait --timeout=120s --for=condition=Available=true deployment -n openshift-operators codeflare-operator-manager + cd .. + + - name: Setup Additional demo notebooks execution + run: | + echo "Installing papermill and dependencies..." + pip install poetry papermill ipython ipykernel + # Disable virtualenv due to problems using packaged in virtualenv in papermill + poetry config virtualenvs.create false + + echo "Installing SDK..." + poetry install --with test,docs + + - name: Run ray_job_client.ipynb + run: | + set -euo pipefail + + # Remove login/logout cells, as KinD doesn't support authentication using token + jq -r 'del(.cells[] | select(.source[] | contains("Create authentication object for user permissions")))' ray_job_client.ipynb > ray_job_client.ipynb.tmp && mv ray_job_client.ipynb.tmp ray_job_client.ipynb + jq -r 'del(.cells[] | select(.source[] | contains("auth.logout()")))' ray_job_client.ipynb > ray_job_client.ipynb.tmp && mv ray_job_client.ipynb.tmp ray_job_client.ipynb + # Rewrite cluster_uri() to local_client_url() to retrieve client URL available out of cluster, as the test is executed outside of cluster + sed -i "s/cluster_uri()/local_client_url()/g" ray_job_client.ipynb + # Replace async logs with waiting for job to finish, async logs don't work properly in papermill + JOB_WAIT=$(jq -r '.' ${GITHUB_WORKSPACE}/.github/resources/wait_for_job_cell.json) + jq --argjson job_wait "$JOB_WAIT" -r '(.cells[] | select(.source[] | contains("async for lines in client.tail_job_logs"))) |= $job_wait' ray_job_client.ipynb > ray_job_client.ipynb.tmp && mv ray_job_client.ipynb.tmp ray_job_client.ipynb + # Set explicit namespace as SDK need it (currently) to resolve local queues + sed -i "s/worker_cpu_requests=1,/worker_cpu_requests='250m', namespace='default',/" ray_job_client.ipynb + sed -i "s/worker_memory_requests=4,/worker_memory_requests=1,/" ray_job_client.ipynb + sed -i "s/worker_memory_limits=4,/worker_memory_limits=1,/" ray_job_client.ipynb + sed -i "s/'Authorization': .*/'Authorization': None\",/" ray_job_client.ipynb + sed -i "s/num_workers=2/num_workers=1/" ray_job_client.ipynb + sed -i "s/RayJobClient(address=ray_dashboard, headers=header, verify=True)/RayJobClient(address=ray_dashboard, verify=False)/" ray_job_client.ipynb + # Run notebook + poetry run papermill ray_job_client.ipynb hf_interactive_out.ipynb --log-output --execution-timeout 1200 + env: + GRPC_DNS_RESOLVER: "native" + working-directory: demo-notebooks/additional-demos + + - name: Print CodeFlare operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing CodeFlare operator logs" + kubectl logs -n openshift-operators --tail -1 -l app.kubernetes.io/name=codeflare-operator | tee ${TEMP_DIR}/codeflare-operator.log + + - name: Print Kueue operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing Kueue operator logs" + KUEUE_CONTROLLER_POD=$(kubectl get pods -n kueue-system | grep kueue-controller | awk '{print $1}') + kubectl logs -n kueue-system --tail -1 ${KUEUE_CONTROLLER_POD} | tee ${TEMP_DIR}/kueue.log + + - name: Print KubeRay operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing KubeRay operator logs" + kubectl logs -n ray-system --tail -1 -l app.kubernetes.io/name=kuberay | tee ${TEMP_DIR}/kuberay.log + + - name: Export all KinD pod logs + uses: ./common/github-actions/kind-export-logs + if: always() && steps.deploy.outcome == 'success' + with: + output-directory: ${TEMP_DIR} + + - name: Upload logs + uses: actions/upload-artifact@v4 + if: always() && steps.deploy.outcome == 'success' + with: + name: logs-ray_job_client + retention-days: 10 + path: | + ${{ env.TEMP_DIR }}/**/*.log diff --git a/.github/workflows/auto-add-issues.yaml b/.github/workflows/auto-add-issues.yaml deleted file mode 100644 index a8be31eb..00000000 --- a/.github/workflows/auto-add-issues.yaml +++ /dev/null @@ -1,25 +0,0 @@ -name: Label new issues as needs-triage and add to CodeFlare Sprint Board -on: - issues: - types: - - opened -jobs: - add_label: - name: Add needs-triage label to new issues - runs-on: ubuntu-latest - permissions: - issues: write - steps: - - uses: actions/checkout@v3 - - run: gh issue edit ${{ github.event.issue.number }} --add-label "triage/needs-triage" - env: - GH_TOKEN: ${{ github.token }} - - add-to-project: - name: Add issue to project - runs-on: ubuntu-latest - steps: - - uses: actions/add-to-project@v0.5.0 - with: - project-url: https://github.com/orgs/project-codeflare/projects/8 - github-token: ${{ secrets.CODEFLARE_MACHINE_ACCOUNT_TOKEN }} diff --git a/.github/workflows/coverage-badge.yaml b/.github/workflows/coverage-badge.yaml index 625576fc..2c3b40fa 100644 --- a/.github/workflows/coverage-badge.yaml +++ b/.github/workflows/coverage-badge.yaml @@ -4,7 +4,7 @@ name: Coverage Badge on: push: - branches: [ main ] + branches: [ main, ray-jobs-feature ] jobs: report: @@ -12,27 +12,27 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.9 - uses: actions/setup-python@v2 + - uses: actions/checkout@v4 + - name: Set up Python 3.11 + uses: actions/setup-python@v5 with: - python-version: 3.9 + python-version: 3.11 - name: Install dependencies run: | python -m pip install --upgrade pip - pip install pytest==6.2.4 - pip install pytest-mock==3.6.1 - pip install coverage - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + pip install poetry + poetry config virtualenvs.create false + poetry lock + poetry install --with test - name: Generate coverage report run: | - coverage run -m --source=src pytest -v tests/unit_test.py + coverage run --omit="src/**/test_*.py,src/codeflare_sdk/common/utils/unit_test_support.py" -m pytest - name: Coverage Badge - uses: tj-actions/coverage-badge-py@v1.8 + uses: tj-actions/coverage-badge-py@v2 - name: Verify Changed files - uses: tj-actions/verify-changed-files@v12 + uses: tj-actions/verify-changed-files@v18 id: changed_files with: files: coverage.svg diff --git a/.github/workflows/dependabot-labeler.yaml b/.github/workflows/dependabot-labeler.yaml new file mode 100644 index 00000000..f9bd27f9 --- /dev/null +++ b/.github/workflows/dependabot-labeler.yaml @@ -0,0 +1,27 @@ +# This workflow file adds the 'lgtm' and 'approved' labels to Dependabot PRs +# This is done to ensure that the PRs that pass required status checks are automatically merged by the CodeFlare bot +name: Dependabot Labeler + +on: + pull_request_target: + branches: [ main ] + +jobs: + add-approve-lgtm-label: + if: ${{ github.actor == 'dependabot[bot]' && contains(github.event.pull_request.labels.*.name, 'dependabot') }} + runs-on: ubuntu-latest + + # Permission required to edit a PR + permissions: + pull-requests: write + issues: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Add approve and lgtm labels to Dependabot PR + run: | + gh pr edit ${{ github.event.pull_request.number }} --add-label "lgtm" --add-label "approved" + env: + GITHUB_TOKEN: ${{ secrets.GH_CLI_TOKEN }} diff --git a/.github/workflows/e2e_tests.yaml b/.github/workflows/e2e_tests.yaml new file mode 100644 index 00000000..ba59a9e1 --- /dev/null +++ b/.github/workflows/e2e_tests.yaml @@ -0,0 +1,164 @@ +# e2e tests workflow for CodeFlare-SDK +name: e2e + +on: + pull_request: + branches: + - main + - 'release-*' + - ray-jobs-feature + paths-ignore: + - 'docs/**' + - '**.adoc' + - '**.md' + - 'LICENSE' + +concurrency: + group: ${{ github.head_ref }}-${{ github.workflow }} + cancel-in-progress: true + +env: + CODEFLARE_OPERATOR_IMG: "quay.io/project-codeflare/codeflare-operator:dev" + +jobs: + kubernetes: + runs-on: gpu-t4-4-core + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Checkout common repo code + uses: actions/checkout@v4 + with: + repository: 'project-codeflare/codeflare-common' + ref: 'main' + path: 'common' + + - name: Checkout CodeFlare operator repository + uses: actions/checkout@v4 + with: + repository: project-codeflare/codeflare-operator + path: codeflare-operator + + - name: Set Go + uses: actions/setup-go@v5 + with: + go-version-file: './codeflare-operator/go.mod' + cache-dependency-path: "./codeflare-operator/go.sum" + + - name: Set up gotestfmt + uses: gotesttools/gotestfmt-action@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up specific Python version + uses: actions/setup-python@v5 + with: + python-version: '3.12' + cache: 'pip' # caching pip dependencies + + - name: Setup NVidia GPU environment for KinD + uses: ./common/github-actions/nvidia-gpu-setup + + - name: Setup and start KinD cluster + uses: ./common/github-actions/kind + with: + worker-nodes: 1 + + - name: Install NVidia GPU operator for KinD + uses: ./common/github-actions/nvidia-gpu-operator + + - name: Deploy CodeFlare stack + id: deploy + run: | + cd codeflare-operator + echo Setting up CodeFlare stack + make setup-e2e + echo Deploying CodeFlare operator + make deploy -e IMG="${CODEFLARE_OPERATOR_IMG}" -e ENV="e2e" + kubectl wait --timeout=120s --for=condition=Available=true deployment -n openshift-operators codeflare-operator-manager + cd .. + + - name: Add user to KinD + uses: ./common/github-actions/kind-add-user + with: + user-name: sdk-user + + - name: Configure RBAC for sdk user with limited permissions + run: | + kubectl create clusterrole list-ingresses --verb=get,list --resource=ingresses + kubectl create clusterrolebinding sdk-user-list-ingresses --clusterrole=list-ingresses --user=sdk-user + kubectl create clusterrole namespace-creator --verb=get,list,create,delete,patch --resource=namespaces + kubectl create clusterrolebinding sdk-user-namespace-creator --clusterrole=namespace-creator --user=sdk-user + kubectl create clusterrole raycluster-creator --verb=get,list,create,delete,patch --resource=rayclusters + kubectl create clusterrolebinding sdk-user-raycluster-creator --clusterrole=raycluster-creator --user=sdk-user + kubectl create clusterrole appwrapper-creator --verb=get,list,create,delete,patch --resource=appwrappers + kubectl create clusterrolebinding sdk-user-appwrapper-creator --clusterrole=appwrapper-creator --user=sdk-user + kubectl create clusterrole resourceflavor-creator --verb=get,list,create,delete --resource=resourceflavors + kubectl create clusterrolebinding sdk-user-resourceflavor-creator --clusterrole=resourceflavor-creator --user=sdk-user + kubectl create clusterrole clusterqueue-creator --verb=get,list,create,delete,patch --resource=clusterqueues + kubectl create clusterrolebinding sdk-user-clusterqueue-creator --clusterrole=clusterqueue-creator --user=sdk-user + kubectl create clusterrole localqueue-creator --verb=get,list,create,delete,patch --resource=localqueues + kubectl create clusterrolebinding sdk-user-localqueue-creator --clusterrole=localqueue-creator --user=sdk-user + kubectl create clusterrole list-secrets --verb=get,list --resource=secrets + kubectl create clusterrolebinding sdk-user-list-secrets --clusterrole=list-secrets --user=sdk-user + kubectl create clusterrole pod-creator --verb=get,list,watch --resource=pods + kubectl create clusterrolebinding sdk-user-pod-creator --clusterrole=pod-creator --user=sdk-user + kubectl create clusterrole service-reader --verb=get,list,watch --resource=services + kubectl create clusterrolebinding sdk-user-service-reader --clusterrole=service-reader --user=sdk-user + kubectl create clusterrole port-forward-pods --verb=create --resource=pods/portforward + kubectl create clusterrolebinding sdk-user-port-forward-pods-binding --clusterrole=port-forward-pods --user=sdk-user + kubectl config use-context sdk-user + + - name: Run e2e tests + run: | + export CODEFLARE_TEST_OUTPUT_DIR=${{ env.TEMP_DIR }} + echo "CODEFLARE_TEST_OUTPUT_DIR=${CODEFLARE_TEST_OUTPUT_DIR}" >> $GITHUB_ENV + + set -euo pipefail + pip install poetry + poetry install --with test,docs + echo "Running e2e tests..." + poetry run pytest -v -s ./tests/e2e -m 'kind and nvidia_gpu' > ${CODEFLARE_TEST_OUTPUT_DIR}/pytest_output.log 2>&1 + env: + GRPC_DNS_RESOLVER: "native" + + - name: Switch to kind-cluster context to print logs + if: always() && steps.deploy.outcome == 'success' + run: kubectl config use-context kind-cluster + + - name: Print Pytest output log + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing Pytest output logs" + cat ${CODEFLARE_TEST_OUTPUT_DIR}/pytest_output.log + + - name: Print CodeFlare operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing CodeFlare operator logs" + kubectl logs -n openshift-operators --tail -1 -l app.kubernetes.io/name=codeflare-operator | tee ${CODEFLARE_TEST_OUTPUT_DIR}/codeflare-operator.log + + - name: Print KubeRay operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing KubeRay operator logs" + kubectl logs -n ray-system --tail -1 -l app.kubernetes.io/name=kuberay | tee ${CODEFLARE_TEST_OUTPUT_DIR}/kuberay.log + + - name: Export all KinD pod logs + uses: ./common/github-actions/kind-export-logs + if: always() && steps.deploy.outcome == 'success' + with: + output-directory: ${CODEFLARE_TEST_OUTPUT_DIR} + + - name: Upload logs + uses: actions/upload-artifact@v4 + if: always() && steps.deploy.outcome == 'success' + with: + name: logs + retention-days: 10 + path: | + ${{ env.CODEFLARE_TEST_OUTPUT_DIR }}/**/*.log diff --git a/.github/workflows/guided_notebook_tests.yaml b/.github/workflows/guided_notebook_tests.yaml new file mode 100644 index 00000000..3309c6a1 --- /dev/null +++ b/.github/workflows/guided_notebook_tests.yaml @@ -0,0 +1,382 @@ +name: Guided notebooks tests + +on: + pull_request: + branches: [ main ] + types: [ labeled ] + +concurrency: + group: ${{ github.head_ref }}-${{ github.workflow }} + cancel-in-progress: true + +env: + CODEFLARE_OPERATOR_IMG: "quay.io/project-codeflare/codeflare-operator:dev" + +jobs: + verify-0_basic_ray: + if: ${{ contains(github.event.pull_request.labels.*.name, 'test-guided-notebooks') }} + runs-on: ubuntu-latest-4core + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Checkout common repo code + uses: actions/checkout@v4 + with: + repository: 'project-codeflare/codeflare-common' + ref: 'main' + path: 'common' + + - name: Checkout CodeFlare operator repository + uses: actions/checkout@v4 + with: + repository: project-codeflare/codeflare-operator + path: codeflare-operator + + - name: Set Go + uses: actions/setup-go@v5 + with: + go-version-file: './codeflare-operator/go.mod' + cache-dependency-path: "./codeflare-operator/go.sum" + + - name: Set up gotestfmt + uses: gotesttools/gotestfmt-action@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up specific Python version + uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: 'pip' # caching pip dependencies + + - name: Setup and start KinD cluster + uses: ./common/github-actions/kind + + - name: Deploy CodeFlare stack + id: deploy + run: | + cd codeflare-operator + echo Setting up CodeFlare stack + make setup-e2e + echo Deploying CodeFlare operator + make deploy -e IMG="${CODEFLARE_OPERATOR_IMG}" -e ENV="e2e" + kubectl wait --timeout=120s --for=condition=Available=true deployment -n openshift-operators codeflare-operator-manager + cd .. + + - name: Setup Guided notebooks execution + run: | + echo "Installing papermill and dependencies..." + pip install poetry papermill ipython ipykernel + # Disable virtualenv due to problems using packaged in virtualenv in papermill + poetry config virtualenvs.create false + + echo "Installing SDK..." + poetry install --with test,docs + + - name: Run 0_basic_ray.ipynb + run: | + set -euo pipefail + + # Remove login/logout cells, as KinD doesn't support authentication using token + jq -r 'del(.cells[] | select(.source[] | contains("Create authentication object for user permissions")))' 0_basic_ray.ipynb > 0_basic_ray.ipynb.tmp && mv 0_basic_ray.ipynb.tmp 0_basic_ray.ipynb + jq -r 'del(.cells[] | select(.source[] | contains("auth.logout()")))' 0_basic_ray.ipynb > 0_basic_ray.ipynb.tmp && mv 0_basic_ray.ipynb.tmp 0_basic_ray.ipynb + # Set explicit namespace as SDK need it (currently) to resolve local queues + sed -i "s/head_memory_limits=2,/head_memory_limits=2, namespace='default',/" 0_basic_ray.ipynb + # Run notebook + poetry run papermill 0_basic_ray.ipynb 0_basic_ray_out.ipynb --log-output --execution-timeout 600 + working-directory: demo-notebooks/guided-demos + + - name: Print CodeFlare operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing CodeFlare operator logs" + kubectl logs -n openshift-operators --tail -1 -l app.kubernetes.io/name=codeflare-operator | tee ${TEMP_DIR}/codeflare-operator.log + + - name: Print Kueue operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing Kueue operator logs" + KUEUE_CONTROLLER_POD=$(kubectl get pods -n kueue-system | grep kueue-controller | awk '{print $1}') + kubectl logs -n kueue-system --tail -1 ${KUEUE_CONTROLLER_POD} | tee ${TEMP_DIR}/kueue.log + + - name: Print KubeRay operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing KubeRay operator logs" + kubectl logs -n ray-system --tail -1 -l app.kubernetes.io/name=kuberay | tee ${TEMP_DIR}/kuberay.log + + - name: Export all KinD pod logs + uses: ./common/github-actions/kind-export-logs + if: always() && steps.deploy.outcome == 'success' + with: + output-directory: ${TEMP_DIR} + + - name: Upload logs + uses: actions/upload-artifact@v4 + if: always() && steps.deploy.outcome == 'success' + with: + name: logs-0_basic_ray + retention-days: 10 + path: | + ${{ env.TEMP_DIR }}/**/*.log + + verify-1_cluster_job_client: + if: ${{ contains(github.event.pull_request.labels.*.name, 'test-guided-notebooks') }} + runs-on: gpu-t4-4-core + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Checkout common repo code + uses: actions/checkout@v4 + with: + repository: 'project-codeflare/codeflare-common' + ref: 'main' + path: 'common' + + - name: Checkout CodeFlare operator repository + uses: actions/checkout@v4 + with: + repository: project-codeflare/codeflare-operator + path: codeflare-operator + + - name: Set Go + uses: actions/setup-go@v5 + with: + go-version-file: './codeflare-operator/go.mod' + cache-dependency-path: "./codeflare-operator/go.sum" + + - name: Set up gotestfmt + uses: gotesttools/gotestfmt-action@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up specific Python version + uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: 'pip' # caching pip dependencies + + - name: Setup NVidia GPU environment for KinD + uses: ./common/github-actions/nvidia-gpu-setup + + - name: Setup and start KinD cluster + uses: ./common/github-actions/kind + + - name: Install NVidia GPU operator for KinD + uses: ./common/github-actions/nvidia-gpu-operator + with: + enable-time-slicing: 'true' + + - name: Deploy CodeFlare stack + id: deploy + run: | + cd codeflare-operator + echo Setting up CodeFlare stack + make setup-e2e + echo Deploying CodeFlare operator + make deploy -e IMG="${CODEFLARE_OPERATOR_IMG}" -e ENV="e2e" + kubectl wait --timeout=120s --for=condition=Available=true deployment -n openshift-operators codeflare-operator-manager + cd .. + + - name: Setup Guided notebooks execution + run: | + echo "Installing papermill and dependencies..." + pip install poetry papermill ipython ipykernel + # Disable virtualenv due to problems using packaged in virtualenv in papermill + poetry config virtualenvs.create false + + echo "Installing SDK..." + poetry install --with test,docs + + - name: Run 1_cluster_job_client.ipynb + run: | + set -euo pipefail + + # Remove login/logout cells, as KinD doesn't support authentication using token + jq -r 'del(.cells[] | select(.source[] | contains("Create authentication object for user permissions")))' 1_cluster_job_client.ipynb > 1_cluster_job_client.ipynb.tmp && mv 1_cluster_job_client.ipynb.tmp 1_cluster_job_client.ipynb + jq -r 'del(.cells[] | select(.source[] | contains("auth.logout()")))' 1_cluster_job_client.ipynb > 1_cluster_job_client.ipynb.tmp && mv 1_cluster_job_client.ipynb.tmp 1_cluster_job_client.ipynb + # Replace async logs with waiting for job to finish, async logs don't work properly in papermill + JOB_WAIT=$(jq -r '.' ${GITHUB_WORKSPACE}/.github/resources/wait_for_job_cell.json) + jq --argjson job_wait "$JOB_WAIT" -r '(.cells[] | select(.source[] | contains("async for lines in client.tail_job_logs"))) |= $job_wait' 1_cluster_job_client.ipynb > 1_cluster_job_client.ipynb.tmp && mv 1_cluster_job_client.ipynb.tmp 1_cluster_job_client.ipynb + # Set explicit namespace as SDK need it (currently) to resolve local queues + sed -i "s/head_cpu_limits=1,/head_cpu_limits=1, namespace='default',/" 1_cluster_job_client.ipynb + # Run notebook + poetry run papermill 1_cluster_job_client.ipynb 1_cluster_job_client_out.ipynb --log-output --execution-timeout 1200 + working-directory: demo-notebooks/guided-demos + + - name: Print CodeFlare operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing CodeFlare operator logs" + kubectl logs -n openshift-operators --tail -1 -l app.kubernetes.io/name=codeflare-operator | tee ${TEMP_DIR}/codeflare-operator.log + + - name: Print Kueue operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing Kueue operator logs" + KUEUE_CONTROLLER_POD=$(kubectl get pods -n kueue-system | grep kueue-controller | awk '{print $1}') + kubectl logs -n kueue-system --tail -1 ${KUEUE_CONTROLLER_POD} | tee ${TEMP_DIR}/kueue.log + + - name: Print KubeRay operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing KubeRay operator logs" + kubectl logs -n ray-system --tail -1 -l app.kubernetes.io/name=kuberay | tee ${TEMP_DIR}/kuberay.log + + - name: Export all KinD pod logs + uses: ./common/github-actions/kind-export-logs + if: always() && steps.deploy.outcome == 'success' + with: + output-directory: ${TEMP_DIR} + + - name: Upload logs + uses: actions/upload-artifact@v4 + if: always() && steps.deploy.outcome == 'success' + with: + name: logs-1_cluster_job_client + retention-days: 10 + path: | + ${{ env.TEMP_DIR }}/**/*.log + + verify-2_basic_interactive: + if: ${{ contains(github.event.pull_request.labels.*.name, 'test-guided-notebooks') }} + runs-on: gpu-t4-4-core + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Checkout common repo code + uses: actions/checkout@v4 + with: + repository: 'project-codeflare/codeflare-common' + ref: 'main' + path: 'common' + + - name: Checkout CodeFlare operator repository + uses: actions/checkout@v4 + with: + repository: project-codeflare/codeflare-operator + path: codeflare-operator + + - name: Set Go + uses: actions/setup-go@v5 + with: + go-version-file: './codeflare-operator/go.mod' + cache-dependency-path: "./codeflare-operator/go.sum" + + - name: Set up gotestfmt + uses: gotesttools/gotestfmt-action@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up specific Python version + uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: 'pip' # caching pip dependencies + + - name: Setup NVidia GPU environment for KinD + uses: ./common/github-actions/nvidia-gpu-setup + + - name: Setup and start KinD cluster + uses: ./common/github-actions/kind + + - name: Install NVidia GPU operator for KinD + uses: ./common/github-actions/nvidia-gpu-operator + with: + enable-time-slicing: 'true' + + - name: Deploy CodeFlare stack + id: deploy + run: | + cd codeflare-operator + echo Setting up CodeFlare stack + make setup-e2e + echo Deploying CodeFlare operator + make deploy -e IMG="${CODEFLARE_OPERATOR_IMG}" -e ENV="e2e" + kubectl wait --timeout=120s --for=condition=Available=true deployment -n openshift-operators codeflare-operator-manager + cd .. + + - name: Install MINIO + run: | + kubectl apply -f ./tests/e2e/minio_deployment.yaml + kubectl wait --timeout=120s --for=condition=Available=true deployment -n default minio + + - name: Setup Guided notebooks execution + run: | + echo "Installing papermill and dependencies..." + pip install poetry papermill ipython ipykernel + # Disable virtualenv due to problems using packaged in virtualenv in papermill + poetry config virtualenvs.create false + + echo "Installing SDK..." + poetry install --with test,docs + + - name: Run 2_basic_interactive.ipynb + run: | + set -euo pipefail + + # Remove login/logout cells, as KinD doesn't support authentication using token + jq -r 'del(.cells[] | select(.source[] | contains("Create authentication object for user permissions")))' 2_basic_interactive.ipynb > 2_basic_interactive.ipynb.tmp && mv 2_basic_interactive.ipynb.tmp 2_basic_interactive.ipynb + jq -r 'del(.cells[] | select(.source[] | contains("auth.logout()")))' 2_basic_interactive.ipynb > 2_basic_interactive.ipynb.tmp && mv 2_basic_interactive.ipynb.tmp 2_basic_interactive.ipynb + # Rewrite cluster_uri() to local_client_url() to retrieve client URL available out of cluster, as the test is executed outside of cluster + sed -i "s/cluster_uri()/local_client_url()/" 2_basic_interactive.ipynb + # Set explicit namespace as SDK need it (currently) to resolve local queues + sed -i "s/head_cpu_limits=1,/head_cpu_limits=1, namespace='default',/" 2_basic_interactive.ipynb + # Add MINIO related modules to runtime environment + sed -i "s/\\\\\"transformers/\\\\\"s3fs\\\\\", \\\\\"pyarrow\\\\\", \\\\\"transformers/" 2_basic_interactive.ipynb + # Replace markdown cell with remote configuration for MINIO + MINIO_CONFIG=$(jq -r '.' ${GITHUB_WORKSPACE}/.github/resources/minio_remote_config_cell.json) + jq --argjson minio_config "$MINIO_CONFIG" -r '(.cells[] | select(.source[] | contains("Now that we are connected"))) |= $minio_config' 2_basic_interactive.ipynb > 2_basic_interactive.ipynb.tmp && mv 2_basic_interactive.ipynb.tmp 2_basic_interactive.ipynb + # Configure persistent storage for Ray trainer + sed -i -E "s/# run_config.*\)/, run_config=ray.get(get_minio_run_config.remote())/" 2_basic_interactive.ipynb + # Run notebook + poetry run papermill 2_basic_interactive.ipynb 2_basic_interactive_out.ipynb --log-output --execution-timeout 1200 + env: + GRPC_DNS_RESOLVER: "native" + working-directory: demo-notebooks/guided-demos + + - name: Print CodeFlare operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing CodeFlare operator logs" + kubectl logs -n openshift-operators --tail -1 -l app.kubernetes.io/name=codeflare-operator | tee ${TEMP_DIR}/codeflare-operator.log + + - name: Print Kueue operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing Kueue operator logs" + KUEUE_CONTROLLER_POD=$(kubectl get pods -n kueue-system | grep kueue-controller | awk '{print $1}') + kubectl logs -n kueue-system --tail -1 ${KUEUE_CONTROLLER_POD} | tee ${TEMP_DIR}/kueue.log + + - name: Print KubeRay operator logs + if: always() && steps.deploy.outcome == 'success' + run: | + echo "Printing KubeRay operator logs" + kubectl logs -n ray-system --tail -1 -l app.kubernetes.io/name=kuberay | tee ${TEMP_DIR}/kuberay.log + + - name: Export all KinD pod logs + uses: ./common/github-actions/kind-export-logs + if: always() && steps.deploy.outcome == 'success' + with: + output-directory: ${TEMP_DIR} + + - name: Upload logs + uses: actions/upload-artifact@v4 + if: always() && steps.deploy.outcome == 'success' + with: + name: logs-2_basic_interactive + retention-days: 10 + path: | + ${{ env.TEMP_DIR }}/**/*.log diff --git a/.github/workflows/image-build-and-push.yaml b/.github/workflows/image-build-and-push.yaml deleted file mode 100644 index 8aaf7703..00000000 --- a/.github/workflows/image-build-and-push.yaml +++ /dev/null @@ -1,51 +0,0 @@ -name: Notebook Image Build and Push - -on: - workflow_dispatch: - inputs: - release-version: - type: string - required: true - description: 'Version number (for example: 0.1.0)' - is-stable: - description: 'Select if the built image should be tagged as stable' - required: true - type: boolean - quay-organization: - description: 'Quay organization used to push the built images to' - required: true - default: 'project-codeflare' - python_version: - type: string - default: "3.8" - required: true - poetry_version: - type: string - default: "1.5.1" - required: true - -jobs: - release: - runs-on: ubuntu-latest - steps: - - name: Checkout the repository - uses: actions/checkout@v3 - - name: Install Python - uses: actions/setup-python@v4 - with: - python-version: ${{ github.event.inputs.python_version }} - - name: Image Build - run: | - cd custom-nb-image - podman build --build-arg SDK_VERSION="${{ github.event.inputs.release-version }}" -t quay.io/${{ github.event.inputs.quay-organization }}/notebook:v${{ github.event.inputs.release-version }} . - - name: Login to Quay.io - uses: redhat-actions/podman-login@v1 - with: - registry: quay.io - username: ${{ secrets.QUAY_ID }} - password: ${{ secrets.QUAY_TOKEN }} - - name: Image Push - run: podman push quay.io/${{ github.event.inputs.quay-organization }}/notebook:v${{ github.event.inputs.release-version }} - - name: Image Push Stable - if: ${{ inputs.is-stable }} - run: podman push quay.io/${{ github.event.inputs.quay-organization }}/notebook:v${{ github.event.inputs.release-version }} quay.io/${{ github.event.inputs.quay-organization }}/notebook:stable diff --git a/.github/workflows/nightly-image-build.yaml b/.github/workflows/nightly-image-build.yaml deleted file mode 100644 index 43e53fc2..00000000 --- a/.github/workflows/nightly-image-build.yaml +++ /dev/null @@ -1,50 +0,0 @@ -name: Nightly Image Build - -on: - workflow_dispatch: - push: - branches: - - main - -env: - PYTHON_VERSION: 3.8 - POETRY_VERSION: 1.5.1 - QUAY_ORGANIZATION: 'project-codeflare' - SDK_FILE: 'codeflare_sdk-0.0.0.dev0.tar.gz' - IMAGE_TAG: 'dev' - -jobs: - nightly-build: - runs-on: ubuntu-latest - steps: - - name: Checkout the repository - uses: actions/checkout@v3 - - name: Install Python - uses: actions/setup-python@v4 - with: - python-version: ${{ env.PYTHON_VERSION }} - - name: Install Poetry - uses: abatilo/actions-poetry@v2 - with: - poetry-version: ${{ env.POETRY_VERSION }} - - name: Run poetry install - run: poetry install --with docs - - name: Run poetry build - run: poetry build - - name: Copy SDK package - run: cp dist/${SDK_FILE} custom-nb-image - - name: Modify Dockerfile to use locally built SDK - run: | - sed -i "s/ARG SDK_VERSION=.*/COPY ${{ env.SDK_FILE }} ./" custom-nb-image/Dockerfile - sed -i "s/codeflare-sdk==.*/${{ env.SDK_FILE }}\\\\/" custom-nb-image/Dockerfile - - name: Image Build - working-directory: custom-nb-image - run: docker build -t quay.io/${{ env.QUAY_ORGANIZATION }}/notebook:${{ env.IMAGE_TAG }} . - - name: Login to Quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_ID }} - password: ${{ secrets.QUAY_TOKEN }} - - name: Image Push - run: docker push quay.io/${{ env.QUAY_ORGANIZATION }}/notebook:${{ env.IMAGE_TAG }} diff --git a/.github/workflows/odh-notebooks-sync.yml b/.github/workflows/odh-notebooks-sync.yml new file mode 100644 index 00000000..91f5aecb --- /dev/null +++ b/.github/workflows/odh-notebooks-sync.yml @@ -0,0 +1,165 @@ +# The aim of this GitHub workflow is to update the pipfile to sync with Codeflare-SDK release. +name: Sync ODH-notebooks with codeflare-sdk release +on: + workflow_dispatch: + inputs: + upstream-repository-organization: + required: true + description: "Owner of target upstream notebooks repository used to open a PR against" + default: "opendatahub-io" + notebooks-target-branch: + required: true + description: "Target branch of upstream repository" + default: "main" + python-version: + required: true + description: "Provide the python version to be used for the notebooks" + default: "3.11" + codeflare-repository-organization: + required: true + description: "Owner of origin notebooks repository used to open a PR" + default: "project-codeflare" + + codeflare_sdk_release_version: + required: true + description: "Provide version of the Codeflare-SDK release" + +env: + BRANCH_NAME: ${{ github.event.inputs.notebooks-target-branch }} + PYTHON_VERSION: ${{ github.event.inputs.python-version }} + CODEFLARE_RELEASE_VERSION: ${{ github.event.inputs.codeflare_sdk_release_version }} + UPDATER_BRANCH: odh-sync-updater-${{ github.run_id }} + UPSTREAM_OWNER: ${{ github.event.inputs.upstream-repository-organization }} + REPO_OWNER: ${{ github.event.inputs.codeflare-repository-organization }} + REPO_NAME: notebooks + GITHUB_TOKEN: ${{ secrets.CODEFLARE_MACHINE_ACCOUNT_TOKEN }} + MINIMUM_SUPPORTED_PYTHON_VERSION: 3.11 + +jobs: + build: + runs-on: ubuntu-latest-8core + steps: + - name: Clone repository and Sync + run: | + git clone https://x-access-token:${GITHUB_TOKEN}@github.com/$REPO_OWNER/$REPO_NAME.git $REPO_NAME + cd $REPO_NAME + git remote add upstream https://github.com/$UPSTREAM_OWNER/$REPO_NAME.git + git config --global user.email "138894154+codeflare-machine-account@users.noreply.github.com" + git config --global user.name "codeflare-machine-account" + git remote -v + git checkout $BRANCH_NAME + git pull upstream $BRANCH_NAME && git push origin $BRANCH_NAME + + - name: Setup Python environment + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + cache: 'pipenv' + + - name: Install pipenv and pip-versions + run: pip install pipenv==2024.4.0 pip-versions + + - name: Update Pipfiles in accordance with Codeflare-SDK latest release + run: | + package_name=codeflare-sdk + available_python_versions=("$PYTHON_VERSION") # add space separated python versions according to 'python-versions' specified in 'Setup Python Environment' step + install_package_using_pipenv(){ + # args allow custom names for Pipfile and Pipfile.lock + if [ $# -eq 2 ]; then + mv "${1}" Pipfile + mv "${2}" Pipfile.lock + fi + # replace existing version of cf-sdk with new version in Pipfile + sed -i "s/codeflare-sdk = .*$/codeflare-sdk = \"~=$CODEFLARE_RELEASE_VERSION\"/g" Pipfile + # Lock dependencies, ensuring pre-release are included and clear previous state + if ! pipenv lock --verbose --pre --clear ; then + echo "Failed to lock dependencies" + exit 1 + fi + # remove virtual env and clear cache + if ! pipenv --rm --clear ; then + echo "Failed to remove virtual environment" + exit 1 + fi + if [ $# -eq 2 ]; then + mv Pipfile "${1}" + mv Pipfile.lock "${2}" + fi + } + # Get the list of available versions for the package + if ! versions=$(pipenv run pip-versions list $package_name);then + echo "Failed to retrieve versions for $package_name" + exit 1 + fi + # Check if the desired version exists in the list + if echo "$versions" | grep -q "${CODEFLARE_RELEASE_VERSION}"; then + echo "Version ${CODEFLARE_RELEASE_VERSION} is available for $package_name" + # list all Pipfile paths having Codeflare-SDK listed + # Extracting only directories from file paths, excluding a `.gitworkflow` and `.git` directory + # Extracting Intel directories as they are not supported in RHOAI + # Removing tensorflow image TEMPORARILY until solution has been made for the tf2onnx package dependency resolution + directories+=($(grep --exclude-dir=.git --exclude-dir=.github --exclude-dir=intel --exclude-dir=tensorflow --exclude-dir=rocm-tensorflow --include="Pipfile*" -rl "${package_name} = \"~=.*\"" | xargs dirname | sort | uniq)) + counter=0 + total=${#directories[@]} + for dir in "${directories[@]}"; do + counter=$((counter+1)) + echo "--Processing directory $counter '$dir' of total $total" + cd "$dir" + minimum_supported_python_version_major=$(echo "${MINIMUM_SUPPORTED_PYTHON_VERSION}" | awk -F '.' '{print $1}') #integer of MINIMUM_SUPPORTED_PYTHON_VERSION env variable + minimum_supported_python_version_minor=$(echo "${MINIMUM_SUPPORTED_PYTHON_VERSION}" | awk -F '.' '{print $2}') #decimal of MINIMUM_SUPPORTED_PYTHON_VERSION env variable + if ! [ -f "Pipfile" ]; then + if [ -f "Pipfile.cpu" ]; then + pipfile_python_version=$(grep -E '^python_version' ./Pipfile.cpu | cut -d '"' -f 2) # extracted from pipfile.cpu + fi + else + pipfile_python_version=$(grep -E '^python_version' ./Pipfile | cut -d '"' -f 2) # extracted from pipfile + fi + pipfile_python_version_major=$(echo "$pipfile_python_version" | awk -F '.' '{print $1}') + pipfile_python_version_minor=$(echo "$pipfile_python_version" | awk -F '.' '{print $2}') + if [[ " ${available_python_versions[@]} " =~ " ${pipfile_python_version} " && "$pipfile_python_version_major" -ge "$minimum_supported_python_version_major" && "$pipfile_python_version_minor" -ge "$minimum_supported_python_version_minor" ]]; then + if ! [ -f "Pipfile" ]; then + if [ -f "Pipfile.cpu" ]; then + install_package_using_pipenv Pipfile.cpu Pipfile.lock.cpu + fi + if [ -f "Pipfile.gpu" ]; then + install_package_using_pipenv Pipfile.gpu Pipfile.lock.gpu + fi + else + #install specified package + install_package_using_pipenv + fi + else + echo "Skipped installation of ${package_name} with version ${CODEFLARE_RELEASE_VERSION} in $dir" + fi + cd - + echo "$((total-counter)) directories remaining.." + done + else + versions_list=$(echo "$versions" | tr '\n' ' ' | sed 's/, $//') + versions="${versions_list%,}" + echo "Version '${CODEFLARE_RELEASE_VERSION}' is not available for $package_name" + echo "Available versions for $package_name: $versions" + exit 1 + fi + + - name: Push changes + run: | + cd $REPO_NAME + git add . && git status && git checkout -b ${{ env.UPDATER_BRANCH }} && \ + git commit -am "Updated notebooks via ${{ env.UPDATER_BRANCH }} GitHub action" --signoff && + git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/$REPO_OWNER/$REPO_NAME.git + git push origin ${{ env.UPDATER_BRANCH }} + + - name: Create Pull Request + run: | + gh pr create --repo $UPSTREAM_OWNER/$REPO_NAME \ + --title "$pr_title" \ + --body "$pr_body" \ + --head $REPO_OWNER:$UPDATER_BRANCH \ + --base $BRANCH_NAME + env: + pr_title: "[Codeflare Action] Update notebook's pipfile to sync with Codeflare-SDK release ${{ env.CODEFLARE_RELEASE_VERSION }}" + pr_body: | + :rocket: This is an automated Pull Request generated by [odh-notebooks-sync.yml](https://github.com/project-codeflare/codeflare-sdk/blob/main/.github/workflows/odh-notebooks-sync.yml) workflow. + + This PR updates the `Pipfile` to sync with latest Codeflare-SDK release. diff --git a/.github/workflows/precommit.yaml b/.github/workflows/pre-commit.yaml similarity index 73% rename from .github/workflows/precommit.yaml rename to .github/workflows/pre-commit.yaml index b2ed80ca..1575a654 100644 --- a/.github/workflows/precommit.yaml +++ b/.github/workflows/pre-commit.yaml @@ -1,10 +1,5 @@ name: Pre-commit on: - push: - branches: - - '**' - tags-ignore: - - 'v*' pull_request: workflow_dispatch: @@ -14,7 +9,7 @@ jobs: container: image: quay.io/project-codeflare/codeflare-sdk-precommit:v0.0.1 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Run pre-commit checks run: pre-commit run --all-files diff --git a/.github/workflows/publish-documentation.yaml b/.github/workflows/publish-documentation.yaml new file mode 100644 index 00000000..a96891c3 --- /dev/null +++ b/.github/workflows/publish-documentation.yaml @@ -0,0 +1,45 @@ +name: Publish Documentation + +on: + workflow_dispatch: + inputs: + codeflare_sdk_release_version: + type: string + required: true + description: 'Version number (for example: 0.1.0)' + +permissions: + contents: write + +jobs: + docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: 3.11 + - name: Install Sphinx + run: | + sudo apt-get update + sudo apt-get install python3-sphinx + - name: Install Poetry + uses: abatilo/actions-poetry@v2 + with: + poetry-version: 1.8.3 + - name: Create new documentation + run: | + python3 -m venv .venv + source .venv/bin/activate + poetry install --with docs + sed -i 's/release = "v[0-9]\+\.[0-9]\+\.[0-9]\+"/release = "${{ github.event.inputs.codeflare_sdk_release_version }}"/' docs/sphinx/conf.py + sphinx-apidoc -o docs/sphinx src/codeflare_sdk "**/*test_*" --force # Generate docs but ignore test files + make html -C docs/sphinx + - name: Deploy to GitHub Pages + uses: peaceiris/actions-gh-pages@v3 + with: + publish_branch: gh-pages + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: docs/sphinx/_build/html + force_orphan: true diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml deleted file mode 100644 index f22e8f22..00000000 --- a/.github/workflows/python-app.yml +++ /dev/null @@ -1,38 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a single version of Python -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: Python application - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - -jobs: - build: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.9 - uses: actions/setup-python@v2 - with: - python-version: 3.9 - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install pytest==6.2.4 - pip install pytest-mock==3.6.1 - pip install coverage - pip install black==22.3.0 - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - - name: Check formatting with black - run: | - black --check . - - name: Test with pytest and check coverage - run: | - coverage run -m --source=src pytest -v tests/unit_test.py - coverage=$(coverage report -m | tail -1 | tail -c 4 | head -c 2) - if (( $coverage < 90 )); then exit 1; else echo "Coverage passed, ${coverage}%"; fi diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index a13d92f3..c3e47dab 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -17,29 +17,34 @@ on: default: 'project-codeflare' python_version: type: string - default: "3.8" + default: "3.11" required: true poetry_version: type: string - default: "1.5.1" + default: "1.8.3" required: true codeflare-repository-organization: type: string default: "project-codeflare" +env: + PR_BRANCH_NAME: snyk-tag-monitoring-${{ github.run_id }} + jobs: release: runs-on: ubuntu-latest permissions: contents: write id-token: write # This permission is required for trusted publishing - env: - PR_BRANCH_NAME: adjustments-release-${{ github.event.inputs.release-version }} + pull-requests: write # This permission is required for creating PRs + actions: write # This permission is required for running actions steps: - name: Checkout the repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + token: ${{ secrets.GH_CLI_TOKEN }} - name: Install Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ github.event.inputs.python_version }} - name: Install Poetry @@ -50,53 +55,54 @@ jobs: run: poetry version "${{ github.event.inputs.release-version }}" - name: Run poetry install run: poetry install --with docs - - name: Run poetry build - run: poetry build - name: Create new documentation - run: poetry run pdoc --html -o docs src/codeflare_sdk && pushd docs && rm -rf cluster job utils && mv codeflare_sdk/* . && rm -rf codeflare_sdk && popd && find docs -type f -name "*.html" -exec bash -c "echo '' >> {}" \; - - - name: Commit changes in docs - uses: stefanzweifel/git-auto-commit-action@v4 - with: - file_pattern: 'docs' - commit_message: "Changes in docs for release: v${{ github.event.inputs.release-version }}" - create_branch: true - branch: ${{ env.PR_BRANCH_NAME }} - - name: Create a PR with code changes run: | - if git branch -a | grep "${{ env.PR_BRANCH_NAME }}"; then - GIT_BRANCH=${GITHUB_REF#refs/heads/} - gh pr create --base "$GIT_BRANCH" --fill --head "${{ env.PR_BRANCH_NAME }}" --label "lgtm" --label "approved" - fi + gh workflow run publish-documentation.yaml \ + --repo ${{ github.event.inputs.codeflare-repository-organization }}/codeflare-sdk \ + --ref ${{ github.ref }} \ + --field codeflare_sdk_release_version=${{ github.event.inputs.release-version }} env: GITHUB_TOKEN: ${{ secrets.CODEFLARE_MACHINE_ACCOUNT_TOKEN }} - - name: Wait until PR with code changes is merged - run: | - if git branch -a | grep "${{ env.PR_BRANCH_NAME }}"; then - timeout 3600 bash -c 'until [[ $(gh pr view '${{ env.PR_BRANCH_NAME }}' --json state --jq .state) == "MERGED" ]]; do sleep 5 && echo "$(gh pr view '${{ env.PR_BRANCH_NAME }}' --json state --jq .state)"; done' - fi + - name: Copy demo notebooks into SDK package + run: cp -r demo-notebooks src/codeflare_sdk/demo-notebooks + - name: Run poetry build + run: poetry build env: - GITHUB_TOKEN: ${{ github.TOKEN }} + GITHUB_TOKEN: ${{ secrets.CODEFLARE_MACHINE_ACCOUNT_TOKEN }} - name: Create Github release uses: ncipollo/release-action@v1 with: tag: "v${{ github.event.inputs.release-version }}" + generateReleaseNotes: true - name: Publish package distributions to PyPI uses: pypa/gh-action-pypi-publish@release/v1 - - name: Notebook Image Build and Push + - name: Sync ODH Notebooks run: | - gh workflow run image-build-and-push.yml --repo ${{ github.event.inputs.codeflare-repository-organization }}/codeflare-sdk --ref ${{ github.ref }} --field is-stable=${{ github.event.inputs.is-stable }} --field release-version=${{ github.event.inputs.release-version }} --field quay-organization=${{ github.event.inputs.quay-organization }} + gh workflow run odh-notebooks-sync.yml \ + --repo ${{ github.event.inputs.codeflare-repository-organization }}/codeflare-sdk \ + --ref ${{ github.ref }} \ + --field upstream-repository-organization=opendatahub-io \ + --field codeflare-repository-organization=${{ github.event.inputs.codeflare-repository-organization }} \ + --field codeflare_sdk_release_version=${{ github.event.inputs.release-version }} env: GITHUB_TOKEN: ${{ secrets.CODEFLARE_MACHINE_ACCOUNT_TOKEN }} shell: bash - - name: Wait for Notebook image build and push to finish - run: | - # wait for a while for Run to be started - sleep 5 - run_id=$(gh run list --workflow image-build-and-push.yaml --repo ${{ github.event.inputs.codeflare-repository-organization }}/codeflare-sdk --limit 1 --json databaseId --jq .[].databaseId) - gh run watch ${run_id} --repo ${{ github.event.inputs.codeflare-repository-organization }}/codeflare-sdk --interval 10 --exit-status + - name: Install Snyk CLI and setup monitoring for new release tag env: - GITHUB_TOKEN: ${{ secrets.CODEFLARE_MACHINE_ACCOUNT_TOKEN }} - shell: bash + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + SNYK_ORG: ${{ secrets.SNYK_ORG }} + run: | + echo "Installing Snyk CLI" + npm install -g snyk + + echo "Fetching tags" + git fetch origin 'refs/tags/*:refs/tags/*' + + echo "Authenticating with Snyk" + snyk auth ${SNYK_TOKEN} + + echo "Scanning project: codeflare-sdk/v${{ github.event.inputs.release-version }}" + git checkout v${{ github.event.inputs.release-version }} + snyk monitor --all-projects --exclude=requirements.txt --org=${SNYK_ORG} --target-reference="$(git describe --tags)" diff --git a/.github/workflows/snyk-security.yaml b/.github/workflows/snyk-security.yaml new file mode 100644 index 00000000..ba4af2dc --- /dev/null +++ b/.github/workflows/snyk-security.yaml @@ -0,0 +1,29 @@ +name: Snyk Security +on: + push: + branches: + - main + +jobs: + snyk-scan: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install Snyk CLI + run: npm install -g snyk + + - name: Snyk Monitor and Test multiple projects + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + SNYK_ORG: ${{ secrets.SNYK_ORG }} + run: | + echo "Fetching tags" + git fetch origin 'refs/tags/*:refs/tags/*' + + echo "Authenticating with Snyk" + snyk auth ${SNYK_TOKEN} + + echo "Scanning project: codeflare-sdk/main" + snyk monitor --all-projects --exclude=requirements.txt --org=${SNYK_ORG} --target-reference="main" diff --git a/.github/workflows/ui_notebooks_test.yaml b/.github/workflows/ui_notebooks_test.yaml new file mode 100644 index 00000000..1b5ad524 --- /dev/null +++ b/.github/workflows/ui_notebooks_test.yaml @@ -0,0 +1,115 @@ +name: UI notebooks tests + +on: + pull_request: + branches: [ main ] + types: [ labeled ] + +concurrency: + group: ${{ github.head_ref }}-${{ github.workflow }} + cancel-in-progress: true + +env: + CODEFLARE_OPERATOR_IMG: "quay.io/project-codeflare/codeflare-operator:dev" + +jobs: + verify-3_widget_example: + if: ${{ contains(github.event.pull_request.labels.*.name, 'test-guided-notebooks') || contains(github.event.pull_request.labels.*.name, 'test-ui-notebooks') }} + runs-on: ubuntu-latest-4core + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Checkout common repo code + uses: actions/checkout@v4 + with: + repository: "project-codeflare/codeflare-common" + ref: "main" + path: "common" + + - name: Checkout CodeFlare operator repository + uses: actions/checkout@v4 + with: + repository: project-codeflare/codeflare-operator + path: codeflare-operator + + - name: Set Go + uses: actions/setup-go@v5 + with: + go-version-file: "./codeflare-operator/go.mod" + cache-dependency-path: "./codeflare-operator/go.sum" + + - name: Set up gotestfmt + uses: gotesttools/gotestfmt-action@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up specific Python version + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: "pip" # caching pip dependencies + + - name: Setup and start KinD cluster + uses: ./common/github-actions/kind + + - name: Deploy CodeFlare stack + id: deploy + run: | + cd codeflare-operator + echo Setting up CodeFlare stack + make setup-e2e + echo Deploying CodeFlare operator + make deploy -e IMG="${CODEFLARE_OPERATOR_IMG}" -e ENV="e2e" + kubectl wait --timeout=120s --for=condition=Available=true deployment -n openshift-operators codeflare-operator-manager + cd .. + + - name: Setup Guided notebooks execution + run: | + echo "Installing papermill and dependencies..." + pip install poetry ipython ipykernel + poetry config virtualenvs.create false + echo "Installing SDK..." + poetry install --with test,docs + + - name: Install Yarn dependencies + run: | + poetry run yarn install + poetry run yarn playwright install chromium + working-directory: ui-tests + + - name: Fix 3_widget_example.ipynb notebook for test + run: | + # Remove login/logout cells, as KinD doesn't support authentication using token + jq -r 'del(.cells[] | select(.source[] | contains("Create authentication object for user permissions")))' 3_widget_example.ipynb > 3_widget_example.ipynb.tmp && mv 3_widget_example.ipynb.tmp 3_widget_example.ipynb + jq -r 'del(.cells[] | select(.source[] | contains("auth.logout()")))' 3_widget_example.ipynb > 3_widget_example.ipynb.tmp && mv 3_widget_example.ipynb.tmp 3_widget_example.ipynb + # Set explicit namespace as SDK need it (currently) to resolve local queues + sed -i "s|head_memory_limits=2,|head_memory_limits=2, namespace='default',|" 3_widget_example.ipynb + sed -i "s|view_clusters()|view_clusters('default')|" 3_widget_example.ipynb + working-directory: demo-notebooks/guided-demos + + - name: Run UI notebook tests + run: | + set -euo pipefail + + poetry run yarn test + working-directory: ui-tests + + - name: Upload Playwright Test assets + if: always() + uses: actions/upload-artifact@v4 + with: + name: ipywidgets-test-assets + path: | + ui-tests/test-results + + - name: Upload Playwright Test report + if: always() + uses: actions/upload-artifact@v4 + with: + name: ipywidgets-test-report + path: | + ui-tests/playwright-report diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml new file mode 100755 index 00000000..e38e6973 --- /dev/null +++ b/.github/workflows/unit-tests.yml @@ -0,0 +1,35 @@ +name: Python Tests + +on: + pull_request: + branches: [ main, ray-jobs-feature ] + push: + branches: [ main, ray-jobs-feature ] + +jobs: + unit-tests: + + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + - name: Install poetry + run: pip install poetry + - name: Install dependencies with poetry + run: | + poetry config virtualenvs.create false + poetry lock + poetry install --with test + - name: Test with pytest and check coverage + run: | + coverage run --omit="src/**/test_*.py,src/codeflare_sdk/common/utils/unit_test_support.py" -m pytest + coverage=$(coverage report -m | tail -1 | tail -c 4 | head -c 2) + if (( $coverage < 90 )); then echo "Coverage failed at ${coverage}%"; exit 1; else echo "Coverage passed, ${coverage}%"; fi + - name: Upload to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.gitignore b/.gitignore index fbb31b2b..43684cb4 100644 --- a/.gitignore +++ b/.gitignore @@ -4,8 +4,17 @@ __pycache__/ .coverage Pipfile Pipfile.lock -poetry.lock .venv* build/ tls-cluster-namespace quicktest.yaml +node_modules +.DS_Store +ui-tests/playwright-report +ui-tests/test-results +/src/codeflare_sdk.egg-info/ +docs/sphinx/_build +docs/sphinx/codeflare_sdk.*.rst +docs/sphinx/codeflare_sdk.rst +docs/sphinx/modules.rst +.idea/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 89e037cd..7928084d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,6 +7,7 @@ repos: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml + args: [--allow-multiple-documents] - id: check-added-large-files - repo: https://github.com/psf/black rev: 23.3.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..1d6371db --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,90 @@ +# Contributing to the CodeFlare SDK + +Thank you for your interest in contributing to the CodeFlare SDK! + +## Getting Started + +### Prerequisites + +- Python 3.11 +- [Poetry](https://python-poetry.org/) + +### Setting Up Your Development Environment + +1. **Clone the repository:** + + ```sh + git clone https://github.com/project-codeflare/codeflare-sdk.git + cd codeflare-sdk + ``` + +2. Create a Poetry virtual environment: + + ```sh + poetry shell + ``` + +3. Install dependencies: + + ```sh + poetry install + ``` + + - To include test dependencies, run: + + ```sh + poetry install --with test + ``` + + - To include docs dependencies, run: + + ```sh + poetry install --with docs + ``` + + - To include both test and docs dependencies, run: + + ```sh + poetry install --with test,docs + ``` + +## Development Workflow + +### Pre-commit + +We use pre-commit to ensure consistent code formatting. To enable pre-commit hooks, run: + +```sh +pre-commit install +``` + +## Testing + +To install CodeFlare SDK in editable mode, run: + +```sh +pip install -e . +``` + +### Unit Testing + +To run the unit tests, execute: + +```sh +pytest -v src/codeflare_sdk +``` + +### Local e2e Testing + +- Please follow the [e2e documentation](https://github.com/project-codeflare/codeflare-sdk/blob/main/docs/sphinx/user-docs/e2e.rst) + +#### Code Coverage + +- Run tests with the following command: `coverage run -m pytest` +- To then view a code coverage report w/ missing lines, run `coverage report -m` + +### Code Formatting + +- To check file formatting, in top-level dir run `black --check .` +- To auto-reformat all files, remove the `--check` flag +- To reformat an individual file, run `black ` diff --git a/OWNERS b/OWNERS index d6c91274..78bda8e8 100644 --- a/OWNERS +++ b/OWNERS @@ -1,8 +1,33 @@ approvers: - - maxusmusti - - MichaelClifford + - astefanutti + - Bobbins228 + - CathalOConnorRH + - chipspeak + - ChristianZaccaria + - dimakis + - Fiona-Waters + - franciscojavierarceo + - kpostoffice + - kryanbeane + - laurafitzgerald + - pawelpaszki + - pmccarthy + - szaher + - varshaprasad96 reviewers: - - anishasthana + - astefanutti + - Bobbins228 + - CathalOConnorRH + - chipspeak + - ChristianZaccaria + - dimakis + - Fiona-Waters + - franciscojavierarceo - kpostoffice - - maxusmusti - - MichaelClifford + - kryanbeane + - laurafitzgerald + - pawelpaszki + - pmccarthy + - szaher + - varshaprasad96 + - Ygnas diff --git a/README.md b/README.md index 6f5e6881..ffc22626 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# Codeflare-SDK +# CodeFlare SDK -[![Python application](https://github.com/project-codeflare/codeflare-sdk/actions/workflows/python-app.yml/badge.svg?branch=main)](https://github.com/project-codeflare/codeflare-sdk/actions/workflows/python-app.yml) +[![Python application](https://github.com/project-codeflare/codeflare-sdk/actions/workflows/unit-tests.yml/badge.svg?branch=main)](https://github.com/project-codeflare/codeflare-sdk/actions/workflows/unit-tests.yml) ![coverage badge](./coverage.svg) An intuitive, easy-to-use python interface for batch resource requesting, access, job submission, and observation. Simplifying the developer's life while enabling access to high-performance compute resources, either in the cloud or on-prem. @@ -8,10 +8,10 @@ An intuitive, easy-to-use python interface for batch resource requesting, access For guided demos and basics walkthroughs, check out the following links: - Guided demo notebooks available [here](https://github.com/project-codeflare/codeflare-sdk/tree/main/demo-notebooks/guided-demos), and copies of the notebooks with [expected output](https://github.com/project-codeflare/codeflare-sdk/tree/main/demo-notebooks/guided-demos/notebook-ex-outputs) also available -- Note that these notebooks will work with the latest `codeflare-sdk` PyPI release. For testing and experimentation with `main` branch, please use the [preview notebooks](https://github.com/project-codeflare/codeflare-sdk/tree/main/demo-notebooks/guided-demos/preview_nbs) +- these demos can be copied into your current working directory when using the `codeflare-sdk` by using the `codeflare_sdk.copy_demo_nbs()` function - Additionally, we have a [video walkthrough](https://www.youtube.com/watch?v=U76iIfd9EmE) of these basic demos from June, 2023 -Full documentation can be found [here](https://project-codeflare.github.io/codeflare-sdk/) +Full documentation can be found [here](https://project-codeflare.github.io/codeflare-sdk/index.html) ## Installation @@ -19,39 +19,7 @@ Can be installed via `pip`: `pip install codeflare-sdk` ## Development -### Prerequisites - -We recommend using Python 3.9 for development. -Install development specific dependencies: - `$ pip install -r requirements-dev.txt` - -Additional dependencies can be found in `requirements.txt`: `$ pip install -r requirements.txt` - -### Pre-commit - -We use pre-commit to make sure the code is consistently formatted. To make sure that pre-commit is run every time you commit changes, simply run `pre-commit install` - -### Testing - -- To install codeflare-sdk in editable mode, run `pip install -e .` from the repo root. -- To run the unit tests, run `pytest -v tests/unit_test.py` -- Any new test functions/scripts can be added into the `tests` folder -- NOTE: Functional tests coming soon, will live in `tests/func_test.py` - -#### Code Coverage - -- Run tests with the following command: `coverage run -m --source=src pytest tests/unit_test.py` -- To then view a code coverage report w/ missing lines, run `coverage report -m` - -### Code Formatting - -- To check file formatting, in top-level dir run `black --check .` -- To auto-reformat all files, remove the `--check` flag -- To reformat an individual file, run `black ` - -### Package Build - -To build the python package: `$ poetry build` +Please see our [CONTRIBUTING.md](./CONTRIBUTING.md) for detailed instructions. ## Release Instructions @@ -64,17 +32,10 @@ It is possible to use the Release Github workflow to do the release. This is gen The following instructions apply when doing release manually. This may be required in instances where the automation is failing. - Check and update the version in "pyproject.toml" file. -- Generate new documentation. -`pdoc --html -o docs src/codeflare_sdk && pushd docs && rm -rf cluster job utils && mv codeflare_sdk/* . && rm -rf codeflare_sdk && popd && find docs -type f -name "*.html" -exec bash -c "echo '' >> {}" \;` (it is possible to install **pdoc** using the following command `poetry install --with docs`) - Commit all the changes to the repository. - Create Github release (). - Build the Python package. `poetry build` - If not present already, add the API token to Poetry. `poetry config pypi-token.pypi API_TOKEN` - Publish the Python package. `poetry publish` -- Change directory to custom-nb-image. `cd custom-nb-image` -- Set tag `export tag=TAG` -- Build the container image. `podman build --build-arg SDK_VERSION= -t quay.io/project-codeflare/notebook:${tag} .` -- Login to quay.io. `podman login quay.io` -- Push the image. `podman push quay.io/project-codeflare/notebook:${tag}` -- Push the stable image tag `podman push quay.io/project-codeflare/notebook:${tag} quay.io/project-codeflare/notebook:stable` +- Trigger the [Publish Documentation](https://github.com/project-codeflare/codeflare-sdk/actions/workflows/publish-documentation.yaml) workflow diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000..550965e6 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "**/*.ipynb" + - "demo-notebooks/**" diff --git a/coverage.svg b/coverage.svg index 607d3de4..a8c7e72a 100644 --- a/coverage.svg +++ b/coverage.svg @@ -15,7 +15,7 @@ coverage coverage - 91% - 91% + 92% + 92% diff --git a/custom-nb-image/Dockerfile b/custom-nb-image/Dockerfile deleted file mode 100644 index e5393f8b..00000000 --- a/custom-nb-image/Dockerfile +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2022 IBM, Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM quay.io/opendatahub/notebooks:jupyter-minimal-ubi8-python-3.8-4c8f26e -# Install: torch (v1.12), ray (v2.1.0) and others - -COPY requirements.txt requirements.txt - -RUN pip install -r requirements.txt - -RUN pip uninstall pickle5 -y - -# Install codeflare-sdk and other libraries -ARG SDK_VERSION=0.* -RUN pip install codeflare-sdk==${SDK_VERSION} \ - datasets==2.6.1 \ - transformers==4.23.1 \ - evaluate==0.3.0 - -RUN chmod -R g+w /opt/app-root/lib/python3.8/site-packages && \ - fix-permissions /opt/app-root -P diff --git a/custom-nb-image/imagestream.yaml b/custom-nb-image/imagestream.yaml deleted file mode 100644 index bd17076f..00000000 --- a/custom-nb-image/imagestream.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2022 IBM, Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: ImageStream -apiVersion: image.openshift.io/v1 -metadata: - name: codeflare-notebook - labels: - opendatahub.io/notebook-image: 'true' - annotations: - opendatahub.io/notebook-image-name: - "CodeFlare Notebook" - opendatahub.io/notebook-image-desc: "Custom Jupyter notebook image with CodeFlare SDK, Python 3.8, Ray 2.5.0 and PyTorch 1.12.1" -spec: - lookupPolicy: - local: true - tags: - - annotations: - openshift.io/imported-from: quay.io/project-codeflare/notebook - name: latest - from: - kind: DockerImage - name: quay.io/project-codeflare/notebook:latest - importPolicy: - scheduled: true diff --git a/custom-nb-image/requirements.txt b/custom-nb-image/requirements.txt deleted file mode 100644 index 35b5d559..00000000 --- a/custom-nb-image/requirements.txt +++ /dev/null @@ -1,205 +0,0 @@ -# -# These requirements were autogenerated by pipenv -# To regenerate from the project's Pipfile, run: -# -# pipenv lock --requirements -# - --i https://pypi.org/simple -aiohttp-cors==0.7.0 -aiohttp==3.8.3 -aiorwlock==1.3.0 -aiosignal==1.2.0; python_version >= '3.6' -anyio==3.6.1; python_full_version >= '3.6.2' -argon2-cffi-bindings==21.2.0; python_version >= '3.6' -argon2-cffi==21.3.0; python_version >= '3.6' -asgiref==3.5.2; python_version >= '3.7' -asttokens==2.0.8 -astunparse==1.6.3 -async-timeout==4.0.2; python_version >= '3.6' -attrs==22.1.0; python_version >= '3.5' -babel==2.10.3; python_version >= '3.6' -backcall==0.2.0 -bcrypt==4.0.0; python_version >= '3.6' -beautifulsoup4==4.11.1; python_version >= '3.6' -black==22.8.0; python_full_version >= '3.6.2' -bleach==5.0.1; python_version >= '3.7' -blessed==1.19.1; python_version >= '2.7' -boto3==1.17.11 -botocore==1.20.112; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' -cachetools==5.2.0; python_version ~= '3.7' -certifi==2022.9.24; python_version >= '3.6' -cffi==1.15.1 -charset-normalizer==2.1.1; python_version >= '3.6' -click==8.0.4; python_version >= '3.6' -cloudpickle==2.2.0; python_version >= '3.6' -codeflare==0.1.2.dev0 -colorama==0.4.5; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' -colorful==0.5.4 -contourpy==1.0.5; python_version >= '3.7' -cryptography==38.0.1; python_version >= '3.6' -cycler==0.11.0; python_version >= '3.6' -cython==0.29.32 -dask[array,dataframe]==2021.2.0 -dataclasses==0.6 -debugpy==1.6.3; python_version >= '3.7' -decorator==5.1.1; python_version >= '3.5' -defusedxml==0.7.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' -distlib==0.3.6 -entrypoints==0.4; python_version >= '3.6' -executing==1.1.0 -fastapi==0.85.0 -fastjsonschema==2.16.2 -filelock==3.8.0; python_version >= '3.7' -flatbuffers==22.9.24 -fonttools==4.37.3; python_version >= '3.7' -frozenlist==1.3.1; python_version >= '3.7' -fsspec==2022.8.2 -future==0.18.2; python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3' -gitdb==4.0.9; python_version >= '3.6' -gitpython==3.1.27; python_version >= '3.7' -google-api-core==2.10.1; python_version >= '3.6' -google-auth==2.12.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' -googleapis-common-protos==1.56.4; python_version >= '3.7' -gpustat==1.0.0 -graphviz==0.20.1; python_version >= '3.7' -greenery==3.3.3 -grpcio==1.43.0; python_version >= '3.6' -h11==0.14.0; python_version >= '3.7' -hyperopt==0.2.5 -idna==3.4; python_version >= '3.5' -importlib-metadata==4.12.0; python_version < '3.10' -importlib-resources==5.9.0; python_version < '3.9' -ipykernel==6.16.0; python_version >= '3.7' -ipython-genutils==0.2.0 -ipython==8.5.0; python_version >= '3.8' -ipywidgets==8.0.2 -iso8601==1.1.0; python_version < '4' and python_full_version >= '3.6.2' -jedi==0.18.1; python_version >= '3.6' -jinja2==3.1.2; python_version >= '3.7' -jmespath==0.10.0; python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3' -joblib==1.2.0; python_version >= '3.7' -json5==0.9.10 -jsonref==0.2 -jsonschema==4.16.0; python_version >= '3.7' -jsonsubschema==0.0.6 -jupyter-client==7.3.5; python_version >= '3.7' -jupyter-core==4.11.1; python_version >= '3.7' -jupyter-server-mathjax==0.2.6; python_version >= '3.7' -jupyter-server==1.19.1; python_version >= '3.7' -jupyterlab-git==0.30.0 -jupyterlab-pygments==0.2.2; python_version >= '3.7' -jupyterlab-s3-browser==0.10.1 -jupyterlab-server==2.15.2; python_version >= '3.7' -jupyterlab-widgets==3.0.3; python_version >= '3.7' -jupyterlab==3.4.7; python_version >= '3.7' -kiwisolver==1.4.4; python_version >= '3.7' -kopf==1.35.6 -kubernetes==24.2.0 -lale==0.6.19 -locket==1.0.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' -lxml==4.9.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' -markupsafe==2.1.1; python_version >= '3.7' -matplotlib-inline==0.1.6; python_version >= '3.5' -matplotlib==3.6.0 -memory-profiler==0.60.0 -mistune==2.0.4 -msgpack==1.0.4 -multidict==6.0.2; python_version >= '3.7' -mypy-extensions==0.4.3 -nbclassic==0.4.3; python_version >= '3.7' -nbclient==0.6.8; python_version >= '3.7' -nbconvert==7.0.0; python_version >= '3.7' -nbdime==3.1.1; python_version >= '3.6' -nbformat==5.6.1; python_version >= '3.7' -nest-asyncio==1.5.5; python_version >= '3.5' -networkx==2.8.6; python_version >= '3.8' -notebook-shim==0.1.0; python_version >= '3.7' -notebook==6.4.12; python_version >= '3.7' -numpy==1.23.3 -nvidia-ml-py==11.495.46 -oauthlib==3.2.1; python_version >= '3.6' -opencensus-context==0.1.3 -opencensus==0.11.0 -openshift-client==1.0.18 -packaging==21.3 -pandas==1.5.0 -pandocfilters==1.5.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' -paramiko==2.11.0 -parso==0.8.3; python_version >= '3.6' -partd==1.3.0 -pathspec==0.10.1; python_version >= '3.7' -pexpect==4.8.0; sys_platform != 'win32' -pickleshare==0.7.5 -pillow==9.2.0; python_version >= '3.7' -pkgutil-resolve-name==1.3.10; python_version < '3.9' -platformdirs==2.5.2; python_version >= '3.7' -portion==2.3.0; python_version ~= '3.6' -prometheus-client==0.13.1 -prompt-toolkit==3.0.31; python_full_version >= '3.6.2' -protobuf==3.20.1; python_version >= '3.7' -psutil==5.9.2; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' -ptyprocess==0.7.0; os_name != 'nt' -pure-eval==0.2.2 -py-spy==0.3.14 -pyarrow==6.0.1 -pyasn1-modules==0.2.8 -pyasn1==0.4.8 -pycparser==2.21 -pydantic==1.10.2; python_version >= '3.7' -pygments==2.13.0; python_version >= '3.6' -pynacl==1.5.0; python_version >= '3.6' -pyparsing==3.0.9; python_full_version >= '3.6.8' -pyrsistent==0.18.1; python_version >= '3.7' -python-dateutil==2.8.2; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' -python-dotenv==0.21.0 -python-json-logger==2.0.4; python_version >= '3.5' -pytz==2022.2.1 -pyyaml==6.0; python_version >= '3.6' -pyzmq==24.0.1; python_version >= '3.6' -ray[default]==2.5.0 -requests-oauthlib==1.3.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' -requests==2.28.1; python_version >= '3.7' and python_version < '4' -rsa==4.9; python_version >= '3.6' -s3fs==0.3.4 -s3transfer==0.3.7 -scikit-learn==1.1.1 -scipy==1.8.1 -send2trash==1.8.0 -singleton-decorator==1.0.0 -six==1.16.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' -sklearn==0.0 -smart-open==6.2.0 -smmap==5.0.0; python_version >= '3.6' -sniffio==1.3.0; python_version >= '3.7' -sortedcontainers==2.4.0 -soupsieve==2.3.2.post1; python_version >= '3.6' -stack-data==0.5.1 -starlette==0.20.4 -tabulate==0.8.10 -tensorboardx==2.5.1 -terminado==0.15.0; python_version >= '3.7' -threadpoolctl==3.1.0; python_version >= '3.6' -tinycss2==1.1.1; python_version >= '3.6' -tomli==2.0.1; python_full_version < '3.11.0a7' -toolz==0.12.0 -torch==1.12.1 -torchvision==0.13.1 -tornado==6.2; python_version >= '3.7' -tqdm==4.64.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' -traitlets==5.4.0; python_version >= '3.7' -tune-sklearn==0.4.3 -typing-extensions==4.3.0; python_version < '3.10' -urllib3==1.26.12 -uvicorn==0.16.0 -virtualenv==20.16.5; python_version >= '3.6' -wcwidth==0.2.5 -webencodings==0.5.1 -websocket-client==1.4.1; python_version >= '3.7' -wheel==0.37.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' -widgetsnbextension==4.0.3; python_version >= '3.7' -wrapt==1.14.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' -xgboost-ray==0.1.10 -xgboost==1.6.2 -yarl==1.8.1; python_version >= '3.7' -zipp==3.8.1; python_version < '3.10' diff --git a/demo-notebooks/additional-demos/batch-inference/remote_offline_bi.ipynb b/demo-notebooks/additional-demos/batch-inference/remote_offline_bi.ipynb new file mode 100644 index 00000000..68b514c4 --- /dev/null +++ b/demo-notebooks/additional-demos/batch-inference/remote_offline_bi.ipynb @@ -0,0 +1,214 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Remote Offline Batch Inference with Ray Data & vLLM Example\n", + "\n", + "This notebook presumes:\n", + "- You have a Ray Cluster URL given to you to run workloads on\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from codeflare_sdk import RayJobClient\n", + "\n", + "# Setup Authentication Configuration\n", + "auth_token = \"XXXX\"\n", + "header = {\"Authorization\": f\"Bearer {auth_token}\"}" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "# Gather the dashboard URL (provided by the creator of the RayCluster)\n", + "ray_dashboard = \"XXXX\" # Replace with the Ray dashboard URL\n", + "\n", + "# Initialize the RayJobClient\n", + "client = RayJobClient(address=ray_dashboard, headers=header, verify=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Simple Example Explanation\n", + "\n", + "With the RayJobClient instantiated, lets run some batch inference. The following code is stored in `simple_batch_inf.py`, and is used as the entrypoint for the RayJob.\n", + "\n", + "What this processor configuration does:\n", + "- Set up a vLLM engine with your model\n", + "- Configure some settings for GPU processing\n", + "- Defines batch processing parameters (8 requests per batch, 2 GPU workers)\n", + "\n", + "#### Model Source Configuration\n", + "\n", + "The `model_source` parameter supports several loading methods:\n", + "\n", + "* **Hugging Face Hub** (default): Use repository ID `model_source=\"meta-llama/Llama-2-7b-chat-hf\"`\n", + "* **Local Directory**: Use file path `model_source=\"/path/to/my/local/model\"`\n", + "* **Other Sources**: ModelScope via environment variables `VLLM_MODELSCOPE_DOWNLOADS_DIR`\n", + "\n", + "For complete model support and options, see the [official vLLM documentation](https://docs.vllm.ai/en/latest/models/supported_models.html).\n", + "\n", + "```python\n", + "import ray\n", + "from ray.data.llm import build_llm_processor, vLLMEngineProcessorConfig\n", + "\n", + "processor_config = vLLMEngineProcessorConfig(\n", + " model_source=\"replace-me\",\n", + " engine_kwargs=dict(\n", + " enable_lora=False,\n", + " dtype=\"half\",\n", + " max_model_len=1024,\n", + " ),\n", + " # Batch size: Larger batches increase throughput but reduce fault tolerance\n", + " # - Small batches (4-8): Better for fault tolerance and memory constraints\n", + " # - Large batches (16-32): Higher throughput, better GPU utilization\n", + " # - Choose based on your Ray Cluster size and memory availability\n", + " batch_size=8,\n", + " # Concurrency: Number of vLLM engine workers to spawn \n", + " # - Set to match your total GPU count for maximum utilization\n", + " # - Each worker gets assigned to a GPU automatically by Ray scheduler\n", + " # - Can use all GPUs across head and worker nodes\n", + " concurrency=2,\n", + ")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With the config defined, we can instantiate the processor. This enables batch inference by processing multiple requests through the vLLM engine, with two key steps:\n", + "- **Preprocess**: Converts each row into a structured chat format with system instructions and user queries, preparing the input for the LLM\n", + "- **Postprocess**: Extracts only the generated text from the model response, cleaning up the output\n", + "\n", + "The processor defines the pipeline that will be applied to each row in the dataset, enabling efficient batch processing through Ray Data's distributed execution framework.\n", + "\n", + "```python\n", + "processor = build_llm_processor(\n", + " processor_config,\n", + " preprocess=lambda row: dict(\n", + " messages=[\n", + " {\n", + " \"role\": \"system\",\n", + " \"content\": \"You are a calculator. Please only output the answer \"\n", + " \"of the given equation.\",\n", + " },\n", + " {\"role\": \"user\", \"content\": f\"{row['id']} ** 3 = ?\"},\n", + " ],\n", + " sampling_params=dict(\n", + " temperature=0.3,\n", + " max_tokens=20,\n", + " detokenize=False,\n", + " ),\n", + " ),\n", + " postprocess=lambda row: {\n", + " \"resp\": row[\"generated_text\"],\n", + " },\n", + ")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Running the Pipeline\n", + "Now we can run the batch inference pipeline on our data, it will:\n", + "- In the background, the processor will download the model into memory where vLLM serves it locally (on Ray Cluster) for use in inference\n", + "- Generate a sample Ray Dataset with 32 rows (0-31) to process\n", + "- Run the LLM processor on the dataset, triggering the preprocessing, inference, and postprocessing steps\n", + "- Execute the lazy pipeline and loads results into memory\n", + "- Iterate through all outputs and print each response \n", + "\n", + "```python\n", + "ds = ray.data.range(30)\n", + "ds = processor(ds)\n", + "ds = ds.materialize()\n", + "\n", + "for out in ds.take_all():\n", + " print(out)\n", + " print(\"==========\")\n", + "```\n", + "\n", + "### Job Submission\n", + "\n", + "Now we can submit this job against the Ray Cluster using the `RayJobClient` from earlier " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tempfile\n", + "import shutil\n", + "\n", + "# Create a clean directory with ONLY your script\n", + "temp_dir = tempfile.mkdtemp()\n", + "shutil.copy(\"simple_batch_inf.py\", temp_dir)\n", + "\n", + "entrypoint_command = \"python simple_batch_inf.py\"\n", + "\n", + "submission_id = client.submit_job(\n", + " entrypoint=entrypoint_command,\n", + " runtime_env={\"working_dir\": temp_dir, \"pip\": \"requirements.txt\"},\n", + ")\n", + "\n", + "print(submission_id + \" successfully submitted\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the job's status\n", + "client.get_job_status(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the job's logs\n", + "client.get_job_logs(submission_id)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demo-notebooks/additional-demos/batch-inference/requirements.txt b/demo-notebooks/additional-demos/batch-inference/requirements.txt new file mode 100644 index 00000000..d9e8b73b --- /dev/null +++ b/demo-notebooks/additional-demos/batch-inference/requirements.txt @@ -0,0 +1,4 @@ +vllm +transformers +triton>=2.0.0 +torch>=2.0.0 diff --git a/demo-notebooks/additional-demos/batch-inference/simple_batch_inf.py b/demo-notebooks/additional-demos/batch-inference/simple_batch_inf.py new file mode 100644 index 00000000..c86ed15b --- /dev/null +++ b/demo-notebooks/additional-demos/batch-inference/simple_batch_inf.py @@ -0,0 +1,62 @@ +import ray +from ray.data.llm import build_llm_processor, vLLMEngineProcessorConfig + + +# 1. Construct a vLLM processor config. +processor_config = vLLMEngineProcessorConfig( + # The base model. + model_source="unsloth/Llama-3.2-1B-Instruct", + # vLLM engine config. + engine_kwargs=dict( + enable_lora=False, + # # Older GPUs (e.g. T4) don't support bfloat16. You should remove + # # this line if you're using later GPUs. + dtype="half", + # Reduce the model length to fit small GPUs. You should remove + # this line if you're using large GPUs. + max_model_len=1024, + ), + # The batch size used in Ray Data. + batch_size=8, + # Use one GPU in this example. + concurrency=1, + # If you save the LoRA adapter in S3, you can set the following path. + # dynamic_lora_loading_path="s3://your-lora-bucket/", +) + +# 2. Construct a processor using the processor config. +processor = build_llm_processor( + processor_config, + preprocess=lambda row: dict( + # Remove the LoRA model specification + messages=[ + { + "role": "system", + "content": "You are a calculator. Please only output the answer " + "of the given equation.", + }, + {"role": "user", "content": f"{row['id']} ** 3 = ?"}, + ], + sampling_params=dict( + temperature=0.3, + max_tokens=20, + detokenize=False, + ), + ), + postprocess=lambda row: { + "resp": row["generated_text"], + }, +) + +# 3. Synthesize a dataset with 32 rows. +ds = ray.data.range(32) +# 4. Apply the processor to the dataset. Note that this line won't kick off +# anything because processor is execution lazily. +ds = processor(ds) +# Materialization kicks off the pipeline execution. +ds = ds.materialize() + +# 5. Print all outputs. +for out in ds.take_all(): + print(out) + print("==========") diff --git a/demo-notebooks/additional-demos/hf_interactive.ipynb b/demo-notebooks/additional-demos/hf_interactive.ipynb index 32e7be41..9b32ab2e 100644 --- a/demo-notebooks/additional-demos/hf_interactive.ipynb +++ b/demo-notebooks/additional-demos/hf_interactive.ipynb @@ -13,7 +13,7 @@ "id": "d4acfb10-1aa1-445d-947e-396ea5ebed1a", "metadata": {}, "source": [ - "In this notebook you will learn how to leverage the **[huggingface](https://huggingface.co/)** support in ray ecosystem to carry out a text classification task using transfer learning. We will be referencing the example **[here](https://huggingface.co/docs/transformers/tasks/sequence_classification)**" + "In this notebook you will learn how to leverage the **[huggingface](https://huggingface.co/)** support in ray ecosystem to carry out a text classification task using transfer learning. We will be referencing the examples **[here](https://huggingface.co/docs/transformers/tasks/sequence_classification)** and **[here](https://docs.ray.io/en/latest/train/getting-started-transformers.html)**." ] }, { @@ -21,9 +21,7 @@ "id": "70b77929-e96c-434e-ada3-8b14795bfbb1", "metadata": {}, "source": [ - "The example carries out a text classification task on **[imdb dataset](https://huggingface.co/datasets/imdb)** and tries to classify the movie reviews as positive or negative. Huggingface library provides an easy way to build a model and the dataset to carry out this classification task. In this case we will be using **distilbert-base-uncased** model which is a **BERT** based model.\n", - "\n", - "Huggingface has a **[built in support for ray ecosystem](https://docs.ray.io/en/releases-1.13.0/_modules/ray/ml/train/integrations/huggingface/huggingface_trainer.html)** which allows the huggingface trainer to scale on CodeFlare and can scale the training as we add additional gpus and can run distributed training across multiple GPUs that will help scale out the training.\n" + "The example carries out a text classification task on **[imdb dataset](https://huggingface.co/datasets/imdb)** and tries to classify the movie reviews as positive or negative. Huggingface library provides an easy way to build a model and the dataset to carry out this classification task. In this case we will be using **distilbert-base-uncased** model which is a **BERT** based model." ] }, { @@ -36,14 +34,13 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "id": "c737a768-6e31-4767-a301-60ae932b4ed9", "metadata": {}, "outputs": [], "source": [ "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication" ] }, { @@ -69,35 +66,38 @@ "id": "bc27f84c", "metadata": {}, "source": [ - "Here, we want to define our cluster by specifying the resources we require for our batch workload. Below, we define our cluster object (which generates a corresponding AppWrapper)." + "Here, we want to define our cluster by specifying the resources we require for our batch workload. Below, we define our cluster object (which generates a corresponding Ray Cluster).\n", + "\n", + "NOTE: The default images used by the CodeFlare SDK for creating a RayCluster resource depend on the installed Python version:\n", + "\n", + "- For Python 3.11: 'quay.io/modh/ray:2.47.1-py311-cu121'\n", + "- For Python 3.12: 'quay.io/modh/ray:2.47.1-py312-cu128'\n", + "\n", + "If you prefer to use a custom Ray image that better suits your needs, you can specify it in the image field to override the default." ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "220b9d85-3a3c-4c0c-aaf2-0d866823dcd8", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Written to: hfgputest.yaml\n" - ] - } - ], + "outputs": [], "source": [ - "# Create our cluster and submit appwrapper\n", - "cluster = Cluster(ClusterConfiguration(name='hfgputest', \n", - " namespace=\"default\",\n", + "# Create our cluster and submit\n", + "# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n", + "cluster_name= \"hfgputest\"\n", + "cluster = Cluster(ClusterConfiguration(name=cluster_name, \n", + " head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':1},\n", " num_workers=1,\n", - " min_cpus=8, \n", - " max_cpus=8, \n", - " min_memory=16, \n", - " max_memory=16, \n", - " num_gpus=4,\n", - " image=\"quay.io/project-codeflare/ray:2.5.0-py38-cu116\",\n", - " instascale=True, machine_types=[\"m5.xlarge\", \"p3.8xlarge\"]))" + " worker_cpu_requests=8, \n", + " worker_cpu_limits=8, \n", + " worker_memory_requests=16, \n", + " worker_memory_limits=16, \n", + " # image=\"\", # Optional Field \n", + " write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n", + " # local_queue=\"local-queue-name\" # Specify the local queue manually\n", + " ))" ] }, { @@ -105,7 +105,7 @@ "id": "12eef53c", "metadata": {}, "source": [ - "Next, we want to bring our cluster up, so we call the `up()` function below to submit our cluster AppWrapper yaml onto the MCAD queue, and begin the process of obtaining our resource cluster." + "Next, we want to bring our cluster up, so we call the `up()` function below to submit our Ray Cluster onto the queue, and begin the process of obtaining our resource cluster." ] }, { @@ -115,7 +115,7 @@ "metadata": {}, "outputs": [], "source": [ - "cluster.up()" + "cluster.apply()" ] }, { @@ -128,52 +128,10 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "id": "4d0db5f5-22f1-4806-ae7e-a0ee865625c1", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
╭─────────────────────────╮\n",
-       "│   🚀 List of CodeFlare  │\n",
-       "│   clusters in queue🚀   │\n",
-       "│ +-----------+---------+ │\n",
-       "│ | Name      | Status  | │\n",
-       "│ +===========+=========+ │\n",
-       "│ | hfgputest | pending | │\n",
-       "│ |           |         | │\n",
-       "│ +-----------+---------+ │\n",
-       "╰─────────────────────────╯\n",
-       "
\n" - ], - "text/plain": [ - "╭─────────────────────────╮\n", - "│ \u001b[3m \u001b[0m\u001b[1;3m 🚀 List of CodeFlare\u001b[0m\u001b[3m \u001b[0m │\n", - "│ \u001b[3m \u001b[0m\u001b[1;3mclusters in queue🚀\u001b[0m\u001b[3m \u001b[0m │\n", - "│ +-----------+---------+ │\n", - "│ |\u001b[1m \u001b[0m\u001b[1mName \u001b[0m\u001b[1m \u001b[0m|\u001b[1m \u001b[0m\u001b[1mStatus \u001b[0m\u001b[1m \u001b[0m| │\n", - "│ +===========+=========+ │\n", - "│ |\u001b[36m \u001b[0m\u001b[36mhfgputest\u001b[0m\u001b[36m \u001b[0m|\u001b[35m \u001b[0m\u001b[35mpending\u001b[0m\u001b[35m \u001b[0m| │\n", - "│ |\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m|\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m| │\n", - "│ +-----------+---------+ │\n", - "╰─────────────────────────╯\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "(False, )" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "cluster.status()" ] @@ -208,75 +166,17 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "id": "06a54428-f186-4c27-948e-4eaf9c0e34b5", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
                  🚀 List of CodeFlare clusters 🚀                  \n",
-       "                                                                    \n",
-       " ╭────────────────────────────────────────────────────────────────╮ \n",
-       " │   Owner                                                        │ \n",
-       " │   hfgputest                                        Active ✅   │ \n",
-       " │                                                                │ \n",
-       " │   URI: ray://hfgputest-head-svc.default.svc:10001              │ \n",
-       " │                                                                │ \n",
-       " │   Dashboard🔗                                                  │ \n",
-       " │                                                                │ \n",
-       " │                      Cluster Resources                         │ \n",
-       " │   ╭─ Workers ──╮  ╭───────── Worker specs(each) ─────────╮     │ \n",
-       " │   │  Min  Max  │  │  Memory      CPU         GPU         │     │ \n",
-       " │   │            │  │                                      │     │ \n",
-       " │   │  1    1    │  │  16G~16G     8           4           │     │ \n",
-       " │   │            │  │                                      │     │ \n",
-       " │   ╰────────────╯  ╰──────────────────────────────────────╯     │ \n",
-       " ╰────────────────────────────────────────────────────────────────╯ \n",
-       "
\n" - ], - "text/plain": [ - "\u001b[3m \u001b[0m\u001b[1;3m 🚀 List of CodeFlare clusters 🚀\u001b[0m\u001b[3m \u001b[0m\n", - "\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\n", - " ╭────────────────────────────────────────────────────────────────╮ \n", - " │ \u001b[1;37;42mOwner\u001b[0m │ \n", - " │ \u001b[1;4mhfgputest\u001b[0m Active ✅ │ \n", - " │ │ \n", - " │ \u001b[1mURI:\u001b[0m ray://hfgputest-head-svc.default.svc:10001 │ \n", - " │ │ \n", - " │ \u001b]8;id=552692;ray-dashboard-hfgputest-default.apps.prepfullinstall.psap.aws.rhperfscale.org\u001b\\\u001b[4;34mDashboard🔗\u001b[0m\u001b]8;;\u001b\\ │ \n", - " │ │ \n", - " │ \u001b[3m Cluster Resources \u001b[0m │ \n", - " │ ╭─ Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n", - " │ │ \u001b[1m \u001b[0m\u001b[1mMin\u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mMax\u001b[0m\u001b[1m \u001b[0m │ │ \u001b[1m \u001b[0m\u001b[1mMemory \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mCPU \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mGPU \u001b[0m\u001b[1m \u001b[0m │ │ \n", - " │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ │ \u001b[36m \u001b[0m\u001b[36m1 \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m1 \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m16G~16G \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m8 \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m4 \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ ╰────────────╯ ╰──────────────────────────────────────╯ │ \n", - " ╰────────────────────────────────────────────────────────────────╯ \n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "cluster.details()" ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "id": "8ac46c87-70f1-4c70-9648-881151665355", "metadata": {}, "outputs": [], @@ -284,6 +184,27 @@ "ray_cluster_uri = cluster.cluster_uri()" ] }, + { + "cell_type": "markdown", + "id": "64d65c3c", + "metadata": {}, + "source": [ + "Now we can connect directly to our Ray cluster via the Ray python client:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60276d86", + "metadata": {}, + "outputs": [], + "source": [ + "from codeflare_sdk import generate_cert\n", + "# Create required TLS cert and export the environment variables to enable TLS\n", + "generate_cert.generate_tls_cert(cluster_name, cluster.config.namespace)\n", + "generate_cert.export_env(cluster_name, cluster.config.namespace)" + ] + }, { "cell_type": "markdown", "id": "44dba6a0-8275-4726-8911-6b6ec467b6a3", @@ -294,31 +215,22 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "id": "4c458589-5a17-47c6-a8db-625427ae4fe7", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Ray cluster is up and running: True\n" - ] - } - ], + "outputs": [], "source": [ "#before proceeding make sure the cluster exists and the uri is not empty\n", "assert ray_cluster_uri, \"Ray cluster needs to be started and set before proceeding\"\n", "\n", "import ray\n", - "from ray.air.config import ScalingConfig\n", "\n", "# reset the ray context in case there's already one. \n", "ray.shutdown()\n", "# establish connection to ray cluster\n", "\n", "#install additional libraries that will be required for this training\n", - "runtime_env = {\"pip\": [\"transformers\", \"datasets\", \"evaluate\", \"pyarrow<7.0.0\", \"accelerate\"]}\n", + "runtime_env = {\"pip\": [\"transformers==4.41.2\", \"datasets==2.17.0\", \"accelerate==0.31.0\", \"scikit-learn==1.5.0\"]}\n", "\n", "# NOTE: This will work for in-cluster notebook servers (RHODS/ODH), but not for local machines\n", "# To see how to connect from your laptop, go to demo-notebooks/additional-demos/local_interactive.ipynb\n", @@ -348,78 +260,95 @@ "id": "8bdbe888-4f38-4e9a-ae43-67ce89ff9d42", "metadata": {}, "source": [ - "We are using the code based on the example **[here](https://huggingface.co/docs/transformers/tasks/sequence_classification)** . " + "We are using the code based on the examples **[here](https://huggingface.co/docs/transformers/tasks/sequence_classification)** and **[here](https://docs.ray.io/en/latest/train/getting-started-transformers.html)**. " ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "id": "e69994b4-1a13-43fe-b698-2a5374cb941b", "metadata": {}, "outputs": [], "source": [ "@ray.remote\n", "def train_fn():\n", - " from datasets import load_dataset\n", - " import transformers\n", - " from transformers import AutoTokenizer, TrainingArguments\n", - " from transformers import AutoModelForSequenceClassification\n", + " import os\n", " import numpy as np\n", - " from datasets import load_metric\n", - " import ray\n", - " from ray import tune\n", - " from ray.train.huggingface import HuggingFaceTrainer\n", - "\n", - " dataset = load_dataset(\"imdb\")\n", - " tokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-uncased\")\n", - "\n", - " def tokenize_function(examples):\n", - " return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n", + " from datasets import load_dataset, load_metric\n", + " import transformers\n", + " from transformers import (\n", + " Trainer,\n", + " TrainingArguments,\n", + " AutoTokenizer,\n", + " AutoModelForSequenceClassification,\n", + " )\n", + " import ray.train.huggingface.transformers\n", + " from ray.train import ScalingConfig\n", + " from ray.train.torch import TorchTrainer\n", "\n", - " tokenized_datasets = dataset.map(tokenize_function, batched=True)\n", + " # When running in a multi-node cluster you will need persistent storage that is accessible across all worker nodes. \n", + " # See www.github.com/project-codeflare/codeflare-sdk/tree/main/docs/s3-compatible-storage.md for more information.\n", + " \n", + " def train_func():\n", + " # Datasets\n", + " dataset = load_dataset(\"imdb\")\n", + " tokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-uncased\")\n", "\n", - " #using a fraction of dataset but you can run with the full dataset\n", - " small_train_dataset = tokenized_datasets[\"train\"].shuffle(seed=42).select(range(100))\n", - " small_eval_dataset = tokenized_datasets[\"test\"].shuffle(seed=42).select(range(100))\n", + " def tokenize_function(examples):\n", + " return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n", "\n", - " print(f\"len of train {small_train_dataset} and test {small_eval_dataset}\")\n", + " small_train_dataset = (\n", + " dataset[\"train\"].select(range(100)).map(tokenize_function, batched=True)\n", + " )\n", + " small_eval_dataset = (\n", + " dataset[\"test\"].select(range(100)).map(tokenize_function, batched=True)\n", + " )\n", "\n", - " ray_train_ds = ray.data.from_huggingface(small_train_dataset)\n", - " ray_evaluation_ds = ray.data.from_huggingface(small_eval_dataset)\n", + " # Model\n", + " model = AutoModelForSequenceClassification.from_pretrained(\n", + " \"distilbert-base-uncased\", num_labels=2\n", + " )\n", "\n", - " def compute_metrics(eval_pred):\n", - " metric = load_metric(\"accuracy\")\n", - " logits, labels = eval_pred\n", - " predictions = np.argmax(logits, axis=-1)\n", - " return metric.compute(predictions=predictions, references=labels)\n", + " def compute_metrics(eval_pred):\n", + " metric = load_metric(\"accuracy\")\n", + " logits, labels = eval_pred\n", + " predictions = np.argmax(logits, axis=-1)\n", + " return metric.compute(predictions=predictions, references=labels)\n", "\n", - " def trainer_init_per_worker(train_dataset, eval_dataset, **config):\n", - " model = AutoModelForSequenceClassification.from_pretrained(\"distilbert-base-uncased\", num_labels=2)\n", + " # Hugging Face Trainer\n", + " training_args = TrainingArguments(\n", + " output_dir=\"test_trainer\",\n", + " evaluation_strategy=\"epoch\",\n", + " save_strategy=\"epoch\",\n", + " report_to=\"none\",\n", + " )\n", "\n", - " training_args = TrainingArguments(\"/tmp/hf_imdb/test\", eval_steps=1, disable_tqdm=True, \n", - " num_train_epochs=1, skip_memory_metrics=True,\n", - " learning_rate=2e-5,\n", - " per_device_train_batch_size=16,\n", - " per_device_eval_batch_size=16, \n", - " weight_decay=0.01,)\n", - " return transformers.Trainer(\n", + " trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", - " train_dataset=train_dataset,\n", - " eval_dataset=eval_dataset,\n", - " compute_metrics=compute_metrics\n", + " train_dataset=small_train_dataset,\n", + " eval_dataset=small_eval_dataset,\n", + " compute_metrics=compute_metrics,\n", " )\n", "\n", - " scaling_config = ScalingConfig(num_workers=4, use_gpu=True) #num workers is the number of gpus\n", "\n", - " # we are using the ray native HuggingFaceTrainer, but you can swap out to use non ray Huggingface Trainer. Both have the same method signature. \n", - " # the ray native HFTrainer has built in support for scaling to multiple GPUs\n", - " trainer = HuggingFaceTrainer(\n", - " trainer_init_per_worker=trainer_init_per_worker,\n", - " scaling_config=scaling_config,\n", - " datasets={\"train\": ray_train_ds, \"evaluation\": ray_evaluation_ds},\n", + " callback = ray.train.huggingface.transformers.RayTrainReportCallback()\n", + " trainer.add_callback(callback)\n", + "\n", + " trainer = ray.train.huggingface.transformers.prepare_trainer(trainer)\n", + "\n", + " trainer.train()\n", + "\n", + "\n", + " ray_trainer = TorchTrainer(\n", + " train_func,\n", + " scaling_config=ScalingConfig(num_workers=2, use_gpu=True),\n", + " # Configure persistent storage that is accessible across \n", + " # all worker nodes.\n", + " # Uncomment and update the RunConfig below to include your storage details.\n", + " # run_config=ray.train.RunConfig(storage_path=\"storage path\"),\n", " )\n", - " result = trainer.fit()\n" + " result: ray.train.Result = ray_trainer.fit()" ] }, { @@ -432,972 +361,10 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "id": "7f0985e9-5e88-4d36-ab38-c3001c13f97c", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Downloading builder script: 100%|██████████| 4.31k/4.31k [00:00<00:00, 5.60MB/s]\n", - "Downloading metadata: 100%|██████████| 2.17k/2.17k [00:00<00:00, 3.13MB/s]\n", - "Downloading readme: 100%|██████████| 7.59k/7.59k [00:00<00:00, 9.75MB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[2m\u001b[36m(train_fn pid=250)\u001b[0m Downloading and preparing dataset imdb/plain_text to /home/ray/.cache/huggingface/datasets/imdb/plain_text/1.0.0/2fdd8b9bcadd6e7055e742a706876ba43f19faee861df134affd7a3f60fc38a1...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Downloading data: 0%| | 0.00/84.1M [00:00\n", - "
\n", - "

Ray

\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n", - "\n", - "
Python version:3.8.13
Ray version: 2.1.0
Dashboard:http://10.254.20.41:8265
\n", - "
\n", - "\n" - ], - "text/plain": [ - "ClientContext(dashboard_url='10.254.20.41:8265', python_version='3.8.13', ray_version='2.1.0', ray_commit='23f34d948dae8de9b168667ab27e6cf940b3ae85', protocol_version='2022-10-05', _num_clients=1, _context_to_restore=)" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import ray\n", "\n", @@ -189,7 +133,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "3436eb4a-217c-4109-a3c3-309fda7e2442", "metadata": {}, "outputs": [], @@ -213,72 +157,32 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "5cca1874-2be3-4631-ae48-9adfa45e3af3", "metadata": { - "scrolled": true, "tags": [] }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-06-27 19:14:28,222\tDEBUG worker.py:640 -- Retaining 00ffffffffffffffffffffffffffffffffffffff0100000002000000\n", - "2023-06-27 19:14:28,222\tDEBUG worker.py:564 -- Scheduling task heavy_calculation 0 b'\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00'\n" - ] - } - ], + "outputs": [], "source": [ "ref = heavy_calculation.remote(3000)" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "id": "01172c29-e8bf-41ef-8db5-eccb07906111", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-06-27 19:14:29,202\tDEBUG worker.py:640 -- Retaining 16310a0f0a45af5cffffffffffffffffffffffff0100000001000000\n", - "2023-06-27 19:14:31,224\tDEBUG worker.py:439 -- Internal retry for get [ClientObjectRef(16310a0f0a45af5cffffffffffffffffffffffff0100000001000000)]\n" - ] - }, - { - "data": { - "text/plain": [ - "1789.4644387076714" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "ray.get(ref)" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "9e79b547-a457-4232-b77d-19147067b972", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-06-27 19:14:33,161\tDEBUG dataclient.py:287 -- Got unawaited response connection_cleanup {\n", - "}\n", - "\n", - "2023-06-27 19:14:34,460\tDEBUG dataclient.py:278 -- Shutting down data channel.\n" - ] - } - ], + "outputs": [], "source": [ "ray.cancel(ref)\n", "ray.shutdown()" @@ -286,7 +190,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "2c198f1f-68bf-43ff-a148-02b5cb000ff2", "metadata": {}, "outputs": [], @@ -319,7 +223,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.17" + "version": "3.9.18" }, "vscode": { "interpreter": { diff --git a/demo-notebooks/additional-demos/mnist.py b/demo-notebooks/additional-demos/mnist.py new file mode 100644 index 00000000..6eb663dc --- /dev/null +++ b/demo-notebooks/additional-demos/mnist.py @@ -0,0 +1,160 @@ +# Copyright 2022 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# In[] +import os + +import torch +from pytorch_lightning import LightningModule, Trainer +from pytorch_lightning.callbacks.progress import TQDMProgressBar +from pytorch_lightning.loggers import CSVLogger +from torch import nn +from torch.nn import functional as F +from torch.utils.data import DataLoader, random_split +from torchmetrics import Accuracy +from torchvision import transforms +from torchvision.datasets import MNIST + +PATH_DATASETS = os.environ.get("PATH_DATASETS", ".") +BATCH_SIZE = 256 if torch.cuda.is_available() else 64 +# %% + +print("prior to running the trainer") +print("MASTER_ADDR: is ", os.getenv("MASTER_ADDR")) +print("MASTER_PORT: is ", os.getenv("MASTER_PORT")) + + +class LitMNIST(LightningModule): + def __init__(self, data_dir=PATH_DATASETS, hidden_size=64, learning_rate=2e-4): + super().__init__() + + # Set our init args as class attributes + self.data_dir = data_dir + self.hidden_size = hidden_size + self.learning_rate = learning_rate + + # Hardcode some dataset specific attributes + self.num_classes = 10 + self.dims = (1, 28, 28) + channels, width, height = self.dims + self.transform = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)), + ] + ) + + # Define PyTorch model + self.model = nn.Sequential( + nn.Flatten(), + nn.Linear(channels * width * height, hidden_size), + nn.ReLU(), + nn.Dropout(0.1), + nn.Linear(hidden_size, hidden_size), + nn.ReLU(), + nn.Dropout(0.1), + nn.Linear(hidden_size, self.num_classes), + ) + + self.val_accuracy = Accuracy() + self.test_accuracy = Accuracy() + + def forward(self, x): + x = self.model(x) + return F.log_softmax(x, dim=1) + + def training_step(self, batch, batch_idx): + x, y = batch + logits = self(x) + loss = F.nll_loss(logits, y) + return loss + + def validation_step(self, batch, batch_idx): + x, y = batch + logits = self(x) + loss = F.nll_loss(logits, y) + preds = torch.argmax(logits, dim=1) + self.val_accuracy.update(preds, y) + + # Calling self.log will surface up scalars for you in TensorBoard + self.log("val_loss", loss, prog_bar=True) + self.log("val_acc", self.val_accuracy, prog_bar=True) + + def test_step(self, batch, batch_idx): + x, y = batch + logits = self(x) + loss = F.nll_loss(logits, y) + preds = torch.argmax(logits, dim=1) + self.test_accuracy.update(preds, y) + + # Calling self.log will surface up scalars for you in TensorBoard + self.log("test_loss", loss, prog_bar=True) + self.log("test_acc", self.test_accuracy, prog_bar=True) + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate) + return optimizer + + #################### + # DATA RELATED HOOKS + #################### + + def prepare_data(self): + # download + print("Downloading MNIST dataset...") + MNIST(self.data_dir, train=True, download=True) + MNIST(self.data_dir, train=False, download=True) + + def setup(self, stage=None): + # Assign train/val datasets for use in dataloaders + if stage == "fit" or stage is None: + mnist_full = MNIST(self.data_dir, train=True, transform=self.transform) + self.mnist_train, self.mnist_val = random_split(mnist_full, [55000, 5000]) + + # Assign test dataset for use in dataloader(s) + if stage == "test" or stage is None: + self.mnist_test = MNIST( + self.data_dir, train=False, transform=self.transform + ) + + def train_dataloader(self): + return DataLoader(self.mnist_train, batch_size=BATCH_SIZE) + + def val_dataloader(self): + return DataLoader(self.mnist_val, batch_size=BATCH_SIZE) + + def test_dataloader(self): + return DataLoader(self.mnist_test, batch_size=BATCH_SIZE) + + +# Init DataLoader from MNIST Dataset + +model = LitMNIST() + +print("GROUP: ", int(os.environ.get("GROUP_WORLD_SIZE", 1))) +print("LOCAL: ", int(os.environ.get("LOCAL_WORLD_SIZE", 1))) + +# Initialize a trainer +trainer = Trainer( + accelerator="auto", + # devices=1 if torch.cuda.is_available() else None, # limiting got iPython runs + max_epochs=5, + callbacks=[TQDMProgressBar(refresh_rate=20)], + num_nodes=int(os.environ.get("GROUP_WORLD_SIZE", 1)), + devices=int(os.environ.get("LOCAL_WORLD_SIZE", 1)), + strategy="ddp", +) + +# Train the model ⚡ +trainer.fit(model) diff --git a/demo-notebooks/additional-demos/ray_job_client.ipynb b/demo-notebooks/additional-demos/ray_job_client.ipynb new file mode 100644 index 00000000..42d3faa0 --- /dev/null +++ b/demo-notebooks/additional-demos/ray_job_client.ipynb @@ -0,0 +1,299 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this demo we will go over the basics of the RayJobClient in the SDK" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import pieces from codeflare-sdk\n", + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication, RayJobClient" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create authentication object for user permissions\n", + "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", + "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", + "auth_token = \"XXXXX\" # The auth_token is used later for the RayJobClient\n", + "auth = TokenAuthentication(\n", + " token = auth_token,\n", + " server = \"XXXXX\",\n", + " skip_tls=False\n", + ")\n", + "auth.login()" + ] + }, + { + "cell_type": "markdown", + "id": "18de2d65", + "metadata": {}, + "source": [ + "\n", + "NOTE: The default images used by the CodeFlare SDK for creating a RayCluster resource depend on the installed Python version:\n", + "\n", + "- For Python 3.11: 'quay.io/modh/ray:2.47.1-py311-cu121'\n", + "- For Python 3.12: 'quay.io/modh/ray:2.47.1-py312-cu128'\n", + "\n", + "If you prefer to use a custom Ray image that better suits your needs, you can specify it in the image field to override the default." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create and configure our cluster object\n", + "# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n", + "cluster = Cluster(ClusterConfiguration(\n", + " name='jobtest',\n", + " head_extended_resource_requests={'nvidia.com/gpu':0}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':0},\n", + " num_workers=2,\n", + " worker_cpu_requests=1,\n", + " worker_cpu_limits=1,\n", + " worker_memory_requests=4,\n", + " worker_memory_limits=4,\n", + " # image=\"\", # Optional Field \n", + " write_to_file=False # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n", + " # local_queue=\"local-queue-name\" # Specify the local queue manually\n", + "))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Bring up the cluster\n", + "cluster.apply()\n", + "cluster.wait_ready()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cluster.details()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Ray Job Submission - Authorized Ray Cluster" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Submit a job using an authorized Ray dashboard and the Job Submission Client\n", + "* Provide an entrypoint command directed to your job script\n", + "* Set up your runtime environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Gather the dashboard URL\n", + "ray_dashboard = cluster.cluster_dashboard_uri()\n", + "\n", + "# Create the header for passing your bearer token\n", + "header = {\n", + " 'Authorization': f'Bearer {auth_token}'\n", + "}\n", + "\n", + "# Initialize the RayJobClient\n", + "client = RayJobClient(address=ray_dashboard, headers=header, verify=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Submit an example mnist job using the RayJobClient\n", + "submission_id = client.submit_job(\n", + " entrypoint=\"python mnist.py\",\n", + " runtime_env={\"working_dir\": \"./\",\"pip\": \"requirements.txt\"},\n", + ")\n", + "print(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the job's logs\n", + "client.get_job_logs(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the job's status\n", + "client.get_job_status(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get job related info\n", + "client.get_job_info(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# List all existing jobs\n", + "client.list_jobs()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Iterate through the logs of a job \n", + "async for lines in client.tail_job_logs(submission_id):\n", + " print(lines, end=\"\") " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Delete a job\n", + "# Can run client.cancel_job(submission_id) first if job is still running\n", + "client.delete_job(submission_id)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Unauthorized Ray Cluster with the Ray Job Client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "Initialise the RayJobClient with the Ray Dashboard\n", + "\"\"\"\n", + "ray_dashboard = cluster.cluster_dashboard_uri()\n", + "client = RayJobClient(address=ray_dashboard, verify=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Submit an example mnist job using the RayJobClient\n", + "submission_id = client.submit_job(\n", + " entrypoint=\"python mnist.py\",\n", + " runtime_env={\"working_dir\": \"./\",\"pip\": \"requirements.txt\"},\n", + ")\n", + "print(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Stop the job \n", + "client.stop_job(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Delete the job\n", + "client.delete_job(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cluster.down()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "auth.logout()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.18" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demo-notebooks/additional-demos/remote_ray_job_client.ipynb b/demo-notebooks/additional-demos/remote_ray_job_client.ipynb new file mode 100644 index 00000000..b2be6826 --- /dev/null +++ b/demo-notebooks/additional-demos/remote_ray_job_client.ipynb @@ -0,0 +1,103 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Submit a training job remotely to Ray Dashboard protected by oAuth.\n", + "This notebook will demonstrate how to submit Ray jobs to an existing Raycluster, using the CodeFlare SDK.\n", + "\n", + "### Requirements\n", + "* Ray Cluster running in OpenShift protected by oAuth.\n", + "* The Ray Dashboard URL for the Ray Cluster.\n", + "* An OpenShift authorization token with permissions to access the Route.\n", + "* A training job, defined in python, within the working directory.\n", + "* A requirements.txt or equivalent file containing any additional packages to install onto the Ray images." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import dependencies from codeflare-sdk\n", + "from codeflare_sdk import RayJobClient" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Setup Authentication Configuration \n", + "auth_token = \"XXXX\" # Replace with the actual token\n", + "header = {\n", + " 'Authorization': f'Bearer {auth_token}'\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Gather the dashboard URL (provided by the creator of the RayCluster)\n", + "ray_dashboard = \"XXXX\" # Replace with the Ray dashboard URL" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Initialize the RayJobClient\n", + "client = RayJobClient(address=ray_dashboard, headers=header, verify=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Submit a job using the RayJobClient\n", + "entrypoint_command = \"python XXXX\" # Replace with the training script name\n", + "submission_id = client.submit_job(\n", + " entrypoint=entrypoint_command,\n", + " runtime_env={\"working_dir\": \"./\",\"pip\": \"requirements.txt\"},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the job's status\n", + "client.get_job_status(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the job's logs\n", + "client.get_job_logs(submission_id)" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demo-notebooks/additional-demos/requirements.txt b/demo-notebooks/additional-demos/requirements.txt new file mode 100644 index 00000000..5f86ab53 --- /dev/null +++ b/demo-notebooks/additional-demos/requirements.txt @@ -0,0 +1,5 @@ +pytorch_lightning==1.9.5 +ray_lightning +torchmetrics==0.9.1 +torchvision==0.19.0 +minio diff --git a/demo-notebooks/guided-demos/0_basic_ray.ipynb b/demo-notebooks/guided-demos/0_basic_ray.ipynb index dfe06733..7bc69afa 100644 --- a/demo-notebooks/guided-demos/0_basic_ray.ipynb +++ b/demo-notebooks/guided-demos/0_basic_ray.ipynb @@ -5,7 +5,7 @@ "id": "8d4a42f6", "metadata": {}, "source": [ - "In this first notebook, we will go through the basics of using the SDK to:\n", + "In this notebook, we will go through the basics of using the SDK to:\n", " - Spin up a Ray cluster with our desired resources\n", " - View the status and specs of our Ray cluster\n", " - Take down the Ray cluster when finished" @@ -19,8 +19,7 @@ "outputs": [], "source": [ "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication" ] }, { @@ -46,7 +45,14 @@ "id": "bc27f84c", "metadata": {}, "source": [ - "Here, we want to define our cluster by specifying the resources we require for our batch workload. Below, we define our cluster object (which generates a corresponding AppWrapper)." + "Here, we want to define our cluster by specifying the resources we require for our batch workload. Below, we define our cluster object (which generates a corresponding RayCluster).\n", + "\n", + "NOTE: The default images used by the CodeFlare SDK for creating a RayCluster resource depend on the installed Python version:\n", + "\n", + "- For Python 3.11: 'quay.io/modh/ray:2.47.1-py311-cu121'\n", + "- For Python 3.12: 'quay.io/modh/ray:2.47.1-py312-cu128'\n", + "\n", + "If you prefer to use a custom Ray image that better suits your needs, you can specify it in the image field to override the default." ] }, { @@ -56,18 +62,24 @@ "metadata": {}, "outputs": [], "source": [ - "# Create and configure our cluster object (and appwrapper)\n", + "# Create and configure our cluster object\n", + "# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n", "cluster = Cluster(ClusterConfiguration(\n", - " name='raytest',\n", - " namespace='default',\n", + " name='raytest', \n", + " head_cpu_requests='500m',\n", + " head_cpu_limits='500m',\n", + " head_memory_requests=2,\n", + " head_memory_limits=2,\n", + " head_extended_resource_requests={'nvidia.com/gpu':0}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':0},\n", " num_workers=2,\n", - " min_cpus=1,\n", - " max_cpus=1,\n", - " min_memory=4,\n", - " max_memory=4,\n", - " num_gpus=0,\n", - " image=\"quay.io/project-codeflare/ray:2.5.0-py38-cu116\", #current default\n", - " instascale=False\n", + " worker_cpu_requests='250m',\n", + " worker_cpu_limits=1,\n", + " worker_memory_requests=4,\n", + " worker_memory_limits=4,\n", + " # image=\"\", # Optional Field \n", + " write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n", + " # local_queue=\"local-queue-name\" # Specify the local queue manually\n", "))" ] }, @@ -76,7 +88,7 @@ "id": "12eef53c", "metadata": {}, "source": [ - "Next, we want to bring our cluster up, so we call the `up()` function below to submit our cluster AppWrapper yaml onto the MCAD queue, and begin the process of obtaining our resource cluster." + "To create the Ray Cluster, we can click the `Cluster Up` button to submit our Ray Cluster onto the queue, and begin the process of creating a Ray Cluster resource. Alternatively, you can run the code cell below to do the same." ] }, { @@ -87,7 +99,7 @@ "outputs": [], "source": [ "# Bring up the cluster\n", - "cluster.up()" + "cluster.apply()" ] }, { @@ -191,7 +203,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.13" + "version": "3.9.18" }, "vscode": { "interpreter": { diff --git a/demo-notebooks/guided-demos/1_basic_instascale.ipynb b/demo-notebooks/guided-demos/1_basic_instascale.ipynb deleted file mode 100644 index d0faf5b9..00000000 --- a/demo-notebooks/guided-demos/1_basic_instascale.ipynb +++ /dev/null @@ -1,174 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "9865ee8c", - "metadata": {}, - "source": [ - "In this second notebook, we will go over the basics of using InstaScale to scale up/down necessary resources that are not currently available on your OpenShift Cluster (in cloud environments)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b55bc3ea-4ce3-49bf-bb1f-e209de8ca47a", - "metadata": {}, - "outputs": [], - "source": [ - "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "614daa0c", - "metadata": {}, - "outputs": [], - "source": [ - "# Create authentication object for user permissions\n", - "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", - "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", - "auth = TokenAuthentication(\n", - " token = \"XXXXX\",\n", - " server = \"XXXXX\",\n", - " skip_tls=False\n", - ")\n", - "auth.login()" - ] - }, - { - "cell_type": "markdown", - "id": "bc27f84c", - "metadata": {}, - "source": [ - "This time, we are working in a cloud environment, and our OpenShift cluster does not have the resources needed for our desired workloads. We will use InstaScale to dynamically scale-up guaranteed resources based on our request (that will also automatically scale-down when we are finished working):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0f4bc870-091f-4e11-9642-cba145710159", - "metadata": {}, - "outputs": [], - "source": [ - "# Create and configure our cluster object (and appwrapper)\n", - "cluster = Cluster(ClusterConfiguration(\n", - " name='instascaletest',\n", - " namespace='default',\n", - " num_workers=2,\n", - " min_cpus=2,\n", - " max_cpus=2,\n", - " min_memory=8,\n", - " max_memory=8,\n", - " num_gpus=1,\n", - " instascale=True, # InstaScale now enabled, will scale OCP cluster to guarantee resource request\n", - " machine_types=[\"m5.xlarge\", \"g4dn.xlarge\"] # Head, worker AWS machine types desired\n", - "))" - ] - }, - { - "cell_type": "markdown", - "id": "12eef53c", - "metadata": {}, - "source": [ - "Same as last time, we will bring the cluster up, wait for it to be ready, and confirm that the specs are as-requested:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f0884bbc-c224-4ca0-98a0-02dfa09c2200", - "metadata": {}, - "outputs": [], - "source": [ - "# Bring up the cluster\n", - "cluster.up()\n", - "cluster.wait_ready()" - ] - }, - { - "cell_type": "markdown", - "id": "6abfe904", - "metadata": {}, - "source": [ - "While the resources are being scaled, we can also go into the console and take a look at the InstaScale logs, as well as the new machines/nodes spinning up.\n", - "\n", - "Once the cluster is ready, we can confirm the specs:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7fd45bc5-03c0-4ae5-9ec5-dd1c30f1a084", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.details()" - ] - }, - { - "cell_type": "markdown", - "id": "5af8cd32", - "metadata": {}, - "source": [ - "Finally, we bring our resource cluster down and release/terminate the associated resources, bringing everything back to the way it was before our cluster was brought up." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5f36db0f-31f6-4373-9503-dc3c1c4c3f57", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.down()" - ] - }, - { - "cell_type": "markdown", - "id": "c883caea", - "metadata": {}, - "source": [ - "Once again, we can look at the machines/nodes and see that everything has been successfully scaled down!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0d41b90e", - "metadata": {}, - "outputs": [], - "source": [ - "auth.logout()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.13" - }, - "vscode": { - "interpreter": { - "hash": "f9f85f796d01129d0dd105a088854619f454435301f6ffec2fea96ecbd9be4ac" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/demo-notebooks/guided-demos/1_cluster_job_client.ipynb b/demo-notebooks/guided-demos/1_cluster_job_client.ipynb new file mode 100644 index 00000000..2f042a6d --- /dev/null +++ b/demo-notebooks/guided-demos/1_cluster_job_client.ipynb @@ -0,0 +1,251 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this demo we will go over the basics of the Ray Job Submission Client in the SDK" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import pieces from codeflare-sdk\n", + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create authentication object for user permissions\n", + "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", + "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", + "auth = TokenAuthentication(\n", + " token = \"XXXXX\",\n", + " server = \"XXXXX\",\n", + " skip_tls=False\n", + ")\n", + "auth.login()" + ] + }, + { + "cell_type": "markdown", + "id": "bc27f84c", + "metadata": {}, + "source": [ + "Here, we want to define our cluster by specifying the resources we require for our batch workload. Below, we define our cluster object (which generates a corresponding RayCluster).\n", + "\n", + "NOTE: The default images used by the CodeFlare SDK for creating a RayCluster resource depend on the installed Python version:\n", + "\n", + "- For Python 3.11: 'quay.io/modh/ray:2.47.1-py311-cu121'\n", + "- For Python 3.12: 'quay.io/modh/ray:2.47.1-py312-cu128'\n", + "\n", + "If you prefer to use a custom Ray image that better suits your needs, you can specify it in the image field to override the default." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create and configure our cluster object\n", + "# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n", + "cluster = Cluster(ClusterConfiguration(\n", + " name='jobtest',\n", + " head_cpu_requests=1,\n", + " head_cpu_limits=1,\n", + " head_memory_requests=4,\n", + " head_memory_limits=4,\n", + " head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':1},\n", + " num_workers=2,\n", + " worker_cpu_requests='250m',\n", + " worker_cpu_limits=1,\n", + " worker_memory_requests=4,\n", + " worker_memory_limits=4,\n", + " # image=\"\", # Optional Field \n", + " write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n", + " # local_queue=\"local-queue-name\" # Specify the local queue manually\n", + "))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To create the Ray Cluster, we can tick the `Wait for cluster?` checkbox and click the `Cluster Up` button to submit our Ray Cluster onto the queue, and begin the process of creating a Ray Cluster resource while waiting for the Ray Dashboard to be available. Alternatively, you can run the code cell below to do the same." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Bring up the cluster\n", + "cluster.apply()\n", + "cluster.wait_ready()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cluster.details()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Ray Job Submission" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Initialise the Cluster Job Client \n", + "* Provide an entrypoint command directed to your job script\n", + "* Set up your runtime environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the Job Submission Client\n", + "\"\"\"\n", + "The SDK will automatically gather the dashboard address and authenticate using the Ray Job Submission Client\n", + "\"\"\"\n", + "client = cluster.job_client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Submit an example mnist job using the Job Submission Client\n", + "submission_id = client.submit_job(\n", + " entrypoint=\"python mnist_fashion.py\",\n", + " runtime_env={\"working_dir\": \"./\",\"pip\": \"requirements.txt\"},\n", + ")\n", + "print(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the job's logs\n", + "client.get_job_logs(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the job's status\n", + "client.get_job_status(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get job related info\n", + "client.get_job_info(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# List all existing jobs\n", + "client.list_jobs()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Iterate through the logs of a job \n", + "async for lines in client.tail_job_logs(submission_id):\n", + " print(lines, end=\"\") " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Delete a job\n", + "# Can run client.stop_job(submission_id) first if job is still running\n", + "client.delete_job(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cluster.down()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "auth.logout()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.18" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demo-notebooks/guided-demos/2_basic_interactive.ipynb b/demo-notebooks/guided-demos/2_basic_interactive.ipynb new file mode 100644 index 00000000..683ec236 --- /dev/null +++ b/demo-notebooks/guided-demos/2_basic_interactive.ipynb @@ -0,0 +1,363 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "bbc21043", + "metadata": {}, + "source": [ + "In this notebook, we will go over how to leverage the SDK to directly work interactively with a Ray Cluster during development." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b55bc3ea-4ce3-49bf-bb1f-e209de8ca47a", + "metadata": {}, + "outputs": [], + "source": [ + "# Import pieces from codeflare-sdk\n", + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "614daa0c", + "metadata": {}, + "outputs": [], + "source": [ + "# Create authentication object for user permissions\n", + "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", + "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", + "auth = TokenAuthentication(\n", + " token = \"XXXXX\",\n", + " server = \"XXXXX\",\n", + " skip_tls=False\n", + ")\n", + "auth.login()" + ] + }, + { + "cell_type": "markdown", + "id": "bc27f84c", + "metadata": {}, + "source": [ + "Once again, let's start by running through the same cluster setup as before:\n", + "\n", + "NOTE: The default images used by the CodeFlare SDK for creating a RayCluster resource depend on the installed Python version:\n", + "\n", + "- For Python 3.11: 'quay.io/modh/ray:2.47.1-py311-cu121'\n", + "- For Python 3.12: 'quay.io/modh/ray:2.47.1-py312-cu128'\n", + "\n", + "If you prefer to use a custom Ray image that better suits your needs, you can specify it in the image field to override the default." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0f4bc870-091f-4e11-9642-cba145710159", + "metadata": {}, + "outputs": [], + "source": [ + "# Create and configure our cluster object\n", + "# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n", + "cluster_name = \"interactivetest\"\n", + "cluster = Cluster(ClusterConfiguration(\n", + " name=cluster_name,\n", + " head_cpu_requests=1,\n", + " head_cpu_limits=1,\n", + " head_memory_requests=6,\n", + " head_memory_limits=6,\n", + " head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':1},\n", + " num_workers=2,\n", + " worker_cpu_requests='250m',\n", + " worker_cpu_limits=1,\n", + " worker_memory_requests=4,\n", + " worker_memory_limits=6,\n", + " # image=\"\", # Optional Field \n", + " write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n", + " # local_queue=\"local-queue-name\" # Specify the local queue manually\n", + "))" + ] + }, + { + "cell_type": "markdown", + "id": "6973247b", + "metadata": {}, + "source": [ + "To create the Ray Cluster, we can tick the `Wait for cluster?` checkbox and click the `Cluster Up` button to submit our Ray Cluster onto the queue, and begin the process of creating a Ray Cluster resource while waiting for the Ray Dashboard to be available. Alternatively, you can run the code cell below to do the same." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f0884bbc-c224-4ca0-98a0-02dfa09c2200", + "metadata": {}, + "outputs": [], + "source": [ + "# Bring up the cluster\n", + "cluster.apply()\n", + "cluster.wait_ready()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "df71c1ed", + "metadata": {}, + "outputs": [], + "source": [ + "cluster.details()" + ] + }, + { + "cell_type": "markdown", + "id": "33663f47", + "metadata": {}, + "source": [ + "This time we will demonstrate another potential method of use: working with the Ray cluster interactively.\n", + "\n", + "Using the SDK, we can get both the Ray cluster URI and dashboard URI:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1719bca", + "metadata": {}, + "outputs": [], + "source": [ + "ray_dashboard_uri = cluster.cluster_dashboard_uri()\n", + "ray_cluster_uri = cluster.cluster_uri()\n", + "print(ray_dashboard_uri)\n", + "print(ray_cluster_uri)" + ] + }, + { + "cell_type": "markdown", + "id": "2a2aca6a", + "metadata": {}, + "source": [ + "Now we can connect directly to our Ray cluster via the Ray python client:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c9436436", + "metadata": {}, + "outputs": [], + "source": [ + "from codeflare_sdk import generate_cert\n", + "# Create required TLS cert and export the environment variables to enable TLS\n", + "generate_cert.generate_tls_cert(cluster_name, cluster.config.namespace)\n", + "generate_cert.export_env(cluster_name, cluster.config.namespace)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "300146dc", + "metadata": {}, + "outputs": [], + "source": [ + "# before proceeding make sure the cluster exists and the uri is not empty\n", + "assert ray_cluster_uri, \"Ray cluster needs to be started and set before proceeding\"\n", + "\n", + "import ray\n", + "\n", + "# reset the ray context in case there's already one. \n", + "ray.shutdown()\n", + "# establish connection to ray cluster\n", + "\n", + "# install additional libraries that will be required for model training\n", + "runtime_env = {\"pip\": [\"transformers==4.41.2\", \"datasets==2.17.0\", \"accelerate==0.31.0\", \"scikit-learn==1.5.0\"]}\n", + "# NOTE: This will work for in-cluster notebook servers (RHODS/ODH), but not for local machines\n", + "# To see how to connect from your laptop, go to demo-notebooks/additional-demos/local_interactive.ipynb\n", + "ray.init(address=ray_cluster_uri, runtime_env=runtime_env)\n", + "\n", + "print(\"Ray cluster is up and running: \", ray.is_initialized())" + ] + }, + { + "cell_type": "markdown", + "id": "9711030b", + "metadata": {}, + "source": [ + "Now that we are connected (and have passed in some package requirements), let's try writing some training code:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1b36e0d9", + "metadata": {}, + "outputs": [], + "source": [ + "@ray.remote\n", + "def train_fn():\n", + " import os\n", + " import numpy as np\n", + " from datasets import load_dataset, load_metric\n", + " import transformers\n", + " from transformers import (\n", + " Trainer,\n", + " TrainingArguments,\n", + " AutoTokenizer,\n", + " AutoModelForSequenceClassification,\n", + " )\n", + " import ray.train.huggingface.transformers\n", + " from ray.train import ScalingConfig\n", + " from ray.train.torch import TorchTrainer\n", + "\n", + " # When running in a multi-node cluster you will need persistent storage that is accessible across all worker nodes. \n", + " # See www.github.com/project-codeflare/codeflare-sdk/tree/main/docs/s3-compatible-storage.md for more information.\n", + " \n", + " def train_func():\n", + " # Datasets\n", + " dataset = load_dataset(\"imdb\")\n", + " tokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-uncased\")\n", + "\n", + " def tokenize_function(examples):\n", + " return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n", + "\n", + " small_train_dataset = (\n", + " dataset[\"train\"].select(range(100)).map(tokenize_function, batched=True)\n", + " )\n", + " small_eval_dataset = (\n", + " dataset[\"test\"].select(range(100)).map(tokenize_function, batched=True)\n", + " )\n", + "\n", + " # Model\n", + " model = AutoModelForSequenceClassification.from_pretrained(\n", + " \"distilbert-base-uncased\", num_labels=2\n", + " )\n", + "\n", + " def compute_metrics(eval_pred):\n", + " metric = load_metric(\"accuracy\")\n", + " logits, labels = eval_pred\n", + " predictions = np.argmax(logits, axis=-1)\n", + " return metric.compute(predictions=predictions, references=labels)\n", + "\n", + " # Hugging Face Trainer\n", + " training_args = TrainingArguments(\n", + " output_dir=\"test_trainer\",\n", + " evaluation_strategy=\"epoch\",\n", + " save_strategy=\"epoch\",\n", + " report_to=\"none\",\n", + " )\n", + "\n", + " trainer = Trainer(\n", + " model=model,\n", + " args=training_args,\n", + " train_dataset=small_train_dataset,\n", + " eval_dataset=small_eval_dataset,\n", + " compute_metrics=compute_metrics,\n", + " )\n", + "\n", + "\n", + " callback = ray.train.huggingface.transformers.RayTrainReportCallback()\n", + " trainer.add_callback(callback)\n", + "\n", + " trainer = ray.train.huggingface.transformers.prepare_trainer(trainer)\n", + "\n", + " trainer.train()\n", + "\n", + "\n", + " ray_trainer = TorchTrainer(\n", + " train_func,\n", + " scaling_config=ScalingConfig(\n", + " # num_workers = number of worker nodes with the ray head node included\n", + " num_workers=3,\n", + " use_gpu=True,\n", + " resources_per_worker={\n", + " \"CPU\": 1,\n", + " },\n", + " trainer_resources={\n", + " \"CPU\": 0,\n", + " }\n", + " )\n", + " # Configure persistent storage that is accessible across \n", + " # all worker nodes.\n", + " # Uncomment and update the RunConfig below to include your storage details.\n", + " # run_config=ray.train.RunConfig(storage_path=\"storage path\"),\n", + " )\n", + " result: ray.train.Result = ray_trainer.fit()" + ] + }, + { + "cell_type": "markdown", + "id": "d4d8fd65", + "metadata": {}, + "source": [ + "Once we want to test our code out, we can run the training function we defined above remotely on our Ray cluster:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5901d958", + "metadata": {}, + "outputs": [], + "source": [ + "#call the above cell as a remote ray function\n", + "ray.get(train_fn.remote())" + ] + }, + { + "cell_type": "markdown", + "id": "5af8cd32", + "metadata": {}, + "source": [ + "Once complete, we can bring our Ray cluster down and clean up:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5f36db0f-31f6-4373-9503-dc3c1c4c3f57", + "metadata": {}, + "outputs": [], + "source": [ + "cluster.down()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d41b90e", + "metadata": {}, + "outputs": [], + "source": [ + "auth.logout()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + }, + "vscode": { + "interpreter": { + "hash": "f9f85f796d01129d0dd105a088854619f454435301f6ffec2fea96ecbd9be4ac" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/demo-notebooks/guided-demos/2_basic_jobs.ipynb b/demo-notebooks/guided-demos/2_basic_jobs.ipynb deleted file mode 100644 index da74f9e5..00000000 --- a/demo-notebooks/guided-demos/2_basic_jobs.ipynb +++ /dev/null @@ -1,308 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "464af595", - "metadata": {}, - "source": [ - "In this third notebook, we will go over the basics of submitting jobs via the SDK, either to a Ray cluster or directly to MCAD." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b55bc3ea-4ce3-49bf-bb1f-e209de8ca47a", - "metadata": {}, - "outputs": [], - "source": [ - "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "614daa0c", - "metadata": {}, - "outputs": [], - "source": [ - "# Create authentication object for user permissions\n", - "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", - "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", - "auth = TokenAuthentication(\n", - " token = \"XXXXX\",\n", - " server = \"XXXXX\",\n", - " skip_tls=False\n", - ")\n", - "auth.login()" - ] - }, - { - "cell_type": "markdown", - "id": "bc27f84c", - "metadata": {}, - "source": [ - "Let's start by running through the same cluster setup as before:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0f4bc870-091f-4e11-9642-cba145710159", - "metadata": {}, - "outputs": [], - "source": [ - "# Create and configure our cluster object (and appwrapper)\n", - "cluster = Cluster(ClusterConfiguration(\n", - " name='jobtest',\n", - " namespace='default',\n", - " num_workers=2,\n", - " min_cpus=1,\n", - " max_cpus=1,\n", - " min_memory=4,\n", - " max_memory=4,\n", - " num_gpus=0,\n", - " instascale=False\n", - "))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f0884bbc-c224-4ca0-98a0-02dfa09c2200", - "metadata": {}, - "outputs": [], - "source": [ - "# Bring up the cluster\n", - "cluster.up()\n", - "cluster.wait_ready()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "df71c1ed", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.details()" - ] - }, - { - "cell_type": "markdown", - "id": "33663f47", - "metadata": {}, - "source": [ - "This time, however, we are going to use the CodeFlare SDK to submit batch jobs via TorchX, either to the Ray cluster we have just brought up, or directly to MCAD." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c7b4f232", - "metadata": {}, - "outputs": [], - "source": [ - "from codeflare_sdk.job.jobs import DDPJobDefinition" - ] - }, - { - "cell_type": "markdown", - "id": "83d77b74", - "metadata": {}, - "source": [ - "First, let's begin by submitting to Ray, training a basic NN on the MNIST dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8c2c5138", - "metadata": {}, - "outputs": [], - "source": [ - "jobdef = DDPJobDefinition(\n", - " name=\"mnisttest\",\n", - " script=\"mnist.py\",\n", - " scheduler_args={\"requirements\": \"requirements.txt\"}\n", - ")\n", - "job = jobdef.submit(cluster)" - ] - }, - { - "cell_type": "markdown", - "id": "5b9ae53a", - "metadata": {}, - "source": [ - "Now we can take a look at the status of our submitted job, as well as retrieve the full logs:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6e36c3d9", - "metadata": {}, - "outputs": [], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "834cfb5c", - "metadata": {}, - "outputs": [], - "source": [ - "job.logs()" - ] - }, - { - "cell_type": "markdown", - "id": "c8267fb2", - "metadata": {}, - "source": [ - "You can also view organized logs, status, and other information directly through the Ray cluster's dashboard:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3f3861d0", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.cluster_dashboard_uri()" - ] - }, - { - "cell_type": "markdown", - "id": "5af8cd32", - "metadata": {}, - "source": [ - "Once complete, we can bring our Ray cluster down and clean up:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5f36db0f-31f6-4373-9503-dc3c1c4c3f57", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.down()" - ] - }, - { - "cell_type": "markdown", - "id": "31096641", - "metadata": {}, - "source": [ - "Now, an alternative option for job submission is to submit directly to MCAD, which will schedule pods to run the job with requested resources:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "496139cc", - "metadata": {}, - "outputs": [], - "source": [ - "jobdef = DDPJobDefinition(\n", - " name=\"mnistjob\",\n", - " script=\"mnist.py\",\n", - " scheduler_args={\"namespace\": \"default\"},\n", - " j=\"1x1\",\n", - " gpu=0,\n", - " cpu=1,\n", - " memMB=8000,\n", - " image=\"quay.io/project-codeflare/mnist-job-test:v0.0.1\"\n", - ")\n", - "job = jobdef.submit()" - ] - }, - { - "cell_type": "markdown", - "id": "0837e43b", - "metadata": {}, - "source": [ - "Once again, we can look at job status and logs:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3d18d42c", - "metadata": {}, - "outputs": [], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "36d7ea97", - "metadata": {}, - "outputs": [], - "source": [ - "job.logs()" - ] - }, - { - "cell_type": "markdown", - "id": "aebf376a", - "metadata": {}, - "source": [ - "This time, once the pods complete, we can clean them up alongside any other associated resources. The following command can also be used to delete jobs early for both Ray and MCAD submission:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ebbb0674", - "metadata": {}, - "outputs": [], - "source": [ - "job.cancel()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0d41b90e", - "metadata": {}, - "outputs": [], - "source": [ - "auth.logout()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.13" - }, - "vscode": { - "interpreter": { - "hash": "f9f85f796d01129d0dd105a088854619f454435301f6ffec2fea96ecbd9be4ac" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/demo-notebooks/guided-demos/3_widget_example.ipynb b/demo-notebooks/guided-demos/3_widget_example.ipynb new file mode 100644 index 00000000..8b70e1da --- /dev/null +++ b/demo-notebooks/guided-demos/3_widget_example.ipynb @@ -0,0 +1,133 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "8d4a42f6", + "metadata": {}, + "source": [ + "In this notebook, we will go through the basics of using the SDK to:\n", + " - Spin up a Ray cluster with our desired resources\n", + " - View the status and specs of our Ray cluster\n", + " - Take down the Ray cluster when finished" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b55bc3ea-4ce3-49bf-bb1f-e209de8ca47a", + "metadata": {}, + "outputs": [], + "source": [ + "# Import pieces from codeflare-sdk\n", + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication, view_clusters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "614daa0c", + "metadata": {}, + "outputs": [], + "source": [ + "# Create authentication object for user permissions\n", + "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", + "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", + "auth = TokenAuthentication(\n", + " token = \"XXXXX\",\n", + " server = \"XXXXX\",\n", + " skip_tls=False\n", + ")\n", + "auth.login()" + ] + }, + { + "cell_type": "markdown", + "id": "bc27f84c", + "metadata": {}, + "source": [ + "Here, we want to define our cluster by specifying the resources we require for our batch workload. Below, we define our cluster object (which generates a corresponding RayCluster).\n", + "\n", + "NOTE: The default images used by the CodeFlare SDK for creating a RayCluster resource depend on the installed Python version:\n", + "\n", + "- For Python 3.11: 'quay.io/modh/ray:2.47.1-py311-cu121'\n", + "- For Python 3.12: 'quay.io/modh/ray:2.47.1-py312-cu128'\n", + "\n", + "If you prefer to use a custom Ray image that better suits your needs, you can specify it in the image field to override the default." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0f4bc870-091f-4e11-9642-cba145710159", + "metadata": {}, + "outputs": [], + "source": [ + "# Create and configure our cluster object\n", + "# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n", + "cluster = Cluster(ClusterConfiguration(\n", + " name='widgettest',\n", + " head_cpu_requests='500m',\n", + " head_cpu_limits='500m',\n", + " head_memory_requests=2,\n", + " head_memory_limits=2,\n", + " head_extended_resource_requests={'nvidia.com/gpu':0}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':0},\n", + " num_workers=2,\n", + " worker_cpu_requests='250m',\n", + " worker_cpu_limits=1,\n", + " worker_memory_requests=2,\n", + " worker_memory_limits=2,\n", + " # image=\"\", # Optional Field\n", + " write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources\n", + " # local_queue=\"local-queue-name\" # Specify the local queue manually\n", + "))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3de6403c", + "metadata": {}, + "outputs": [], + "source": [ + "view_clusters()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2d8e6ce3", + "metadata": {}, + "outputs": [], + "source": [ + "cluster.status()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.18" + }, + "vscode": { + "interpreter": { + "hash": "f9f85f796d01129d0dd105a088854619f454435301f6ffec2fea96ecbd9be4ac" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/demo-notebooks/guided-demos/4_gpt.ipynb b/demo-notebooks/guided-demos/4_gpt.ipynb deleted file mode 100644 index 0fdcec96..00000000 --- a/demo-notebooks/guided-demos/4_gpt.ipynb +++ /dev/null @@ -1,228 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "b6c05b69-4ce8-45ef-82d3-bacb2491bee8", - "metadata": {}, - "outputs": [], - "source": [ - "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "32f99bbd-9903-4d38-a4f2-223dec684ae2", - "metadata": {}, - "outputs": [], - "source": [ - "# Create authentication object for user permissions\n", - "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", - "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", - "auth = TokenAuthentication(\n", - " token = \"XXXXX\",\n", - " server = \"XXXXX\",\n", - " skip_tls=False\n", - ")\n", - "auth.login()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3f32119a-c4ee-4163-b103-d9ca3bddbdb5", - "metadata": {}, - "outputs": [], - "source": [ - "cluster = Cluster(ClusterConfiguration(\n", - " name='gptfttest',\n", - " namespace='default',\n", - " num_workers=2,\n", - " min_cpus=2,\n", - " max_cpus=2,\n", - " min_memory=8,\n", - " max_memory=8,\n", - " num_gpus=1,\n", - " instascale=True, #<---instascale enabled\n", - " machine_types=[\"m5.xlarge\", \"g4dn.xlarge\"],\n", - "))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "107c8277-3b3b-4238-a786-a391a662fd7c", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.up()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "730f66ce-adaa-4709-b9cf-22417847e059", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.wait_ready()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "48fac218-2f22-428b-9228-137a4bb0e666", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.details()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9ed5bd75-4230-4c7c-a9e2-0f247890e62a", - "metadata": {}, - "outputs": [], - "source": [ - "from codeflare_sdk.job.jobs import DDPJobDefinition" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "611d203a-35aa-4357-a748-1d01b022fcdb", - "metadata": {}, - "outputs": [], - "source": [ - "arg_list = [\n", - " \"--model_name_or_path\", \"gpt2\",\n", - " \"--dataset_name\", \"wikitext\",\n", - " \"--dataset_config_name\", \"wikitext-2-raw-v1\",\n", - " \"--per_device_train_batch_size\", \"2\",\n", - " \"--per_device_eval_batch_size\", \"2\",\n", - " \"--do_train\",\n", - " \"--do_eval\",\n", - " \"--output_dir\", \"/tmp/test-clm\",\n", - " \"--overwrite_output_dir\"\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8ac7c34f-e227-44c2-a4b1-a57c853ac3a7", - "metadata": {}, - "outputs": [], - "source": [ - "jobdef = DDPJobDefinition(\n", - " name=\"gpttest\",\n", - " script=\"gpt_og.py\",\n", - " script_args=arg_list,\n", - " scheduler_args={\"requirements\": \"requirements_gpt.txt\"}\n", - ")\n", - "job = jobdef.submit(cluster)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1680d287-de46-45f8-b95a-02ba3c83912c", - "metadata": {}, - "outputs": [], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "markdown", - "id": "80bc1961", - "metadata": {}, - "source": [ - "Retrieve raw log output at anytime with:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d25d6198-9941-47e8-857f-9811830cc854", - "metadata": {}, - "outputs": [], - "source": [ - "job.logs()" - ] - }, - { - "cell_type": "markdown", - "id": "876b96b5", - "metadata": {}, - "source": [ - "View live updates for status, logs, and other information with:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "58f8a2e8", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.cluster_dashboard_uri()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7c13eab", - "metadata": {}, - "outputs": [], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "beb1a6b9-d9b3-49b7-b036-09f1d3569b59", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.down()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8398d977-db24-46d0-a7d2-b4e9197808d7", - "metadata": {}, - "outputs": [], - "source": [ - "auth.logout()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.13" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/demo-notebooks/guided-demos/download_mnist_datasets.py b/demo-notebooks/guided-demos/download_mnist_datasets.py new file mode 100644 index 00000000..6493296f --- /dev/null +++ b/demo-notebooks/guided-demos/download_mnist_datasets.py @@ -0,0 +1,46 @@ +# Copyright 2022 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from torchvision.datasets import MNIST +from torchvision import transforms + + +def download_mnist_dataset(destination_dir): + # Ensure the destination directory exists + if not os.path.exists(destination_dir): + os.makedirs(destination_dir) + + # Define transformations + transform = transforms.Compose( + [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] + ) + + # Download the training data + train_set = MNIST( + root=destination_dir, train=True, download=True, transform=transform + ) + + # Download the test data + test_set = MNIST( + root=destination_dir, train=False, download=True, transform=transform + ) + + print(f"MNIST dataset downloaded in {destination_dir}") + + +# Specify the directory where you +destination_dir = os.path.dirname(os.path.abspath(__file__)) + +download_mnist_dataset(destination_dir) diff --git a/demo-notebooks/guided-demos/gpt_og.py b/demo-notebooks/guided-demos/gpt_og.py deleted file mode 100644 index d69e41fc..00000000 --- a/demo-notebooks/guided-demos/gpt_og.py +++ /dev/null @@ -1,728 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2020 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. - -Here is the full list of checkpoints on the hub that can be fine-tuned by this script: -https://huggingface.co/models?filter=text-generation -""" -# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. - -import subprocess - -subprocess.run(["pip", "uninstall", "protobuf"]) -subprocess.run( - [ - "pip", - "install", - "--upgrade", - "--target=/home/ray/workspace", - "-r", - "requirements.txt", - ] -) - -import logging -import math -import os -import sys -from dataclasses import dataclass, field -from itertools import chain -from typing import Optional - -import datasets -import evaluate -import torch -from datasets import load_dataset - -import transformers -from transformers import ( - CONFIG_MAPPING, - MODEL_FOR_CAUSAL_LM_MAPPING, - AutoConfig, - AutoModelForCausalLM, - AutoTokenizer, - HfArgumentParser, - Trainer, - TrainingArguments, - default_data_collator, - is_torch_tpu_available, - set_seed, -) -from transformers.testing_utils import CaptureLogger -from transformers.trainer_utils import get_last_checkpoint -from transformers.utils import check_min_version, send_example_telemetry -from transformers.utils.versions import require_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -# check_min_version("4.29.0.dev0") - -require_version( - "datasets>=1.8.0", - "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt", -) - -logger = logging.getLogger(__name__) - - -MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. - """ - - model_name_or_path: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." - ) - }, - ) - model_type: Optional[str] = field( - default=None, - metadata={ - "help": "If training from scratch, pass a model type from the list: " - + ", ".join(MODEL_TYPES) - }, - ) - config_overrides: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Override some existing default config settings when a model is trained from scratch. Example: " - "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" - ) - }, - ) - config_name: Optional[str] = field( - default=None, - metadata={ - "help": "Pretrained config name or path if not the same as model_name" - }, - ) - tokenizer_name: Optional[str] = field( - default=None, - metadata={ - "help": "Pretrained tokenizer name or path if not the same as model_name" - }, - ) - cache_dir: Optional[str] = field( - default=None, - metadata={ - "help": "Where do you want to store the pretrained models downloaded from huggingface.co" - }, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={ - "help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not." - }, - ) - model_revision: str = field( - default="main", - metadata={ - "help": "The specific model version to use (can be a branch name, tag name or commit id)." - }, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - torch_dtype: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " - "dtype will be automatically derived from the model's weights." - ), - "choices": ["auto", "bfloat16", "float16", "float32"], - }, - ) - low_cpu_mem_usage: bool = field( - default=False, - metadata={ - "help": ( - "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded." - "set True will benefit LLM loading time and RAM consumption." - ) - }, - ) - - def __post_init__(self): - if self.config_overrides is not None and ( - self.config_name is not None or self.model_name_or_path is not None - ): - raise ValueError( - "--config_overrides can't be used in combination with --config_name or --model_name_or_path" - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default=None, - metadata={"help": "The name of the dataset to use (via the datasets library)."}, - ) - dataset_config_name: Optional[str] = field( - default=None, - metadata={ - "help": "The configuration name of the dataset to use (via the datasets library)." - }, - ) - train_file: Optional[str] = field( - default=None, metadata={"help": "The input training data file (a text file)."} - ) - validation_file: Optional[str] = field( - default=None, - metadata={ - "help": "An optional input evaluation data file to evaluate the perplexity on (a text file)." - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) - block_size: Optional[int] = field( - default=None, - metadata={ - "help": ( - "Optional input sequence length after tokenization. " - "The training dataset will be truncated in block of this size for training. " - "Default to the model max input length for single sentence inputs (take into account special tokens)." - ) - }, - ) - overwrite_cache: bool = field( - default=False, - metadata={"help": "Overwrite the cached training and evaluation sets"}, - ) - validation_split_percentage: Optional[int] = field( - default=5, - metadata={ - "help": "The percentage of the train set used as validation set in case there's no validation split" - }, - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - keep_linebreaks: bool = field( - default=True, - metadata={"help": "Whether to keep line breaks when using TXT files or not."}, - ) - - def __post_init__(self): - if self.streaming: - require_version( - "datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`" - ) - - if ( - self.dataset_name is None - and self.train_file is None - and self.validation_file is None - ): - raise ValueError( - "Need either a dataset name or a training/validation file." - ) - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in [ - "csv", - "json", - "txt", - ], "`train_file` should be a csv, a json or a txt file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in [ - "csv", - "json", - "txt", - ], "`validation_file` should be a csv, a json or a txt file." - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser( - (ModelArguments, DataTrainingArguments, TrainingArguments) - ) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file( - json_file=os.path.abspath(sys.argv[1]) - ) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The - # information sent is the one passed as arguments along with your Python/PyTorch versions. - send_example_telemetry("run_clm", model_args, data_args) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - - if training_args.should_log: - # The default of training_args.log_level is passive, so we set log level at info here to have that default. - transformers.utils.logging.set_verbosity_info() - - log_level = training_args.get_process_log_level() - logger.setLevel(log_level) - datasets.utils.logging.set_verbosity(log_level) - transformers.utils.logging.set_verbosity(log_level) - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - logger.info(f"Training/evaluation parameters {training_args}") - - # Detecting last checkpoint. - last_checkpoint = None - if ( - os.path.isdir(training_args.output_dir) - and training_args.do_train - and not training_args.overwrite_output_dir - ): - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif ( - last_checkpoint is not None and training_args.resume_from_checkpoint is None - ): - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - streaming=data_args.streaming, - ) - if "validation" not in raw_datasets.keys(): - raw_datasets["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - streaming=data_args.streaming, - ) - raw_datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - streaming=data_args.streaming, - ) - else: - data_files = {} - dataset_args = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = ( - data_args.train_file.split(".")[-1] - if data_args.train_file is not None - else data_args.validation_file.split(".")[-1] - ) - if extension == "txt": - extension = "text" - dataset_args["keep_linebreaks"] = data_args.keep_linebreaks - raw_datasets = load_dataset( - extension, - data_files=data_files, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - # If no validation data is there, validation_split_percentage will be used to divide the dataset. - if "validation" not in raw_datasets.keys(): - raw_datasets["validation"] = load_dataset( - extension, - data_files=data_files, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - raw_datasets["train"] = load_dataset( - extension, - data_files=data_files, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - - config_kwargs = { - "cache_dir": model_args.cache_dir, - "revision": model_args.model_revision, - "use_auth_token": True if model_args.use_auth_token else None, - } - if model_args.config_name: - config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) - elif model_args.model_name_or_path: - config = AutoConfig.from_pretrained( - model_args.model_name_or_path, **config_kwargs - ) - else: - config = CONFIG_MAPPING[model_args.model_type]() - logger.warning("You are instantiating a new config instance from scratch.") - if model_args.config_overrides is not None: - logger.info(f"Overriding config: {model_args.config_overrides}") - config.update_from_string(model_args.config_overrides) - logger.info(f"New config: {config}") - - tokenizer_kwargs = { - "cache_dir": model_args.cache_dir, - "use_fast": model_args.use_fast_tokenizer, - "revision": model_args.model_revision, - "use_auth_token": True if model_args.use_auth_token else None, - } - if model_args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name, **tokenizer_kwargs - ) - elif model_args.model_name_or_path: - tokenizer = AutoTokenizer.from_pretrained( - model_args.model_name_or_path, **tokenizer_kwargs - ) - else: - raise ValueError( - "You are instantiating a new tokenizer from scratch. This is not supported by this script." - "You can do it from another script, save it, and load it from here, using --tokenizer_name." - ) - - if model_args.model_name_or_path: - torch_dtype = ( - model_args.torch_dtype - if model_args.torch_dtype in ["auto", None] - else getattr(torch, model_args.torch_dtype) - ) - model = AutoModelForCausalLM.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - torch_dtype=torch_dtype, - low_cpu_mem_usage=model_args.low_cpu_mem_usage, - ) - else: - model = AutoModelForCausalLM.from_config(config) - n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) - logger.info( - f"Training new model from scratch - Total size={n_params/2**20:.2f}M params" - ) - - # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch - # on a small vocab and want a smaller embedding size, remove this test. - embedding_size = model.get_input_embeddings().weight.shape[0] - if len(tokenizer) > embedding_size: - model.resize_token_embeddings(len(tokenizer)) - - # Preprocessing the datasets. - # First we tokenize all the texts. - if training_args.do_train: - column_names = list(raw_datasets["train"].features) - else: - column_names = list(raw_datasets["validation"].features) - text_column_name = "text" if "text" in column_names else column_names[0] - - # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function - tok_logger = transformers.utils.logging.get_logger( - "transformers.tokenization_utils_base" - ) - - def tokenize_function(examples): - with CaptureLogger(tok_logger) as cl: - output = tokenizer(examples[text_column_name]) - # clm input could be much much longer than block_size - if "Token indices sequence length is longer than the" in cl.out: - tok_logger.warning( - "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" - " before being passed to the model." - ) - return output - - with training_args.main_process_first(desc="dataset map tokenization"): - if not data_args.streaming: - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on dataset", - ) - else: - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - remove_columns=column_names, - ) - - if data_args.block_size is None: - block_size = tokenizer.model_max_length - if block_size > 1024: - logger.warning( - "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" - " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" - " override this default with `--block_size xxx`." - ) - block_size = 1024 - else: - if data_args.block_size > tokenizer.model_max_length: - logger.warning( - f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" - f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." - ) - block_size = min(data_args.block_size, tokenizer.model_max_length) - - # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. - def group_texts(examples): - # Concatenate all texts. - concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} - total_length = len(concatenated_examples[list(examples.keys())[0]]) - # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can - # customize this part to your needs. - if total_length >= block_size: - total_length = (total_length // block_size) * block_size - # Split by chunks of max_len. - result = { - k: [t[i : i + block_size] for i in range(0, total_length, block_size)] - for k, t in concatenated_examples.items() - } - result["labels"] = result["input_ids"].copy() - return result - - # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder - # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower - # to preprocess. - # - # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: - # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map - - with training_args.main_process_first(desc="grouping texts together"): - if not data_args.streaming: - lm_datasets = tokenized_datasets.map( - group_texts, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - desc=f"Grouping texts in chunks of {block_size}", - ) - else: - lm_datasets = tokenized_datasets.map( - group_texts, - batched=True, - ) - - if training_args.do_train: - if "train" not in tokenized_datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = lm_datasets["train"] - if data_args.max_train_samples is not None: - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - - if training_args.do_eval: - if "validation" not in tokenized_datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_dataset = lm_datasets["validation"] - if data_args.max_eval_samples is not None: - max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) - eval_dataset = eval_dataset.select(range(max_eval_samples)) - - def preprocess_logits_for_metrics(logits, labels): - if isinstance(logits, tuple): - # Depending on the model and config, logits may contain extra tensors, - # like past_key_values, but logits always come first - logits = logits[0] - return logits.argmax(dim=-1) - - metric = evaluate.load("accuracy") - - def compute_metrics(eval_preds): - preds, labels = eval_preds - # preds have the same shape as the labels, after the argmax(-1) has been calculated - # by preprocess_logits_for_metrics but we need to shift the labels - labels = labels[:, 1:].reshape(-1) - preds = preds[:, :-1].reshape(-1) - return metric.compute(predictions=preds, references=labels) - - # Initialize our Trainer - trainer = Trainer( - model=model, - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, - tokenizer=tokenizer, - # Data collator will default to DataCollatorWithPadding, so we change it. - data_collator=default_data_collator, - compute_metrics=compute_metrics - if training_args.do_eval and not is_torch_tpu_available() - else None, - preprocess_logits_for_metrics=preprocess_logits_for_metrics - if training_args.do_eval and not is_torch_tpu_available() - else None, - ) - - # Training - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() # Saves the tokenizer too for easy upload - - metrics = train_result.metrics - - max_train_samples = ( - data_args.max_train_samples - if data_args.max_train_samples is not None - else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - if training_args.do_eval: - logger.info("*** Evaluate ***") - - metrics = trainer.evaluate() - - max_eval_samples = ( - data_args.max_eval_samples - if data_args.max_eval_samples is not None - else len(eval_dataset) - ) - metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) - try: - perplexity = math.exp(metrics["eval_loss"]) - except OverflowError: - perplexity = float("inf") - metrics["perplexity"] = perplexity - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - kwargs = { - "finetuned_from": model_args.model_name_or_path, - "tasks": "text-generation", - } - if data_args.dataset_name is not None: - kwargs["dataset_tags"] = data_args.dataset_name - if data_args.dataset_config_name is not None: - kwargs["dataset_args"] = data_args.dataset_config_name - kwargs[ - "dataset" - ] = f"{data_args.dataset_name} {data_args.dataset_config_name}" - else: - kwargs["dataset"] = data_args.dataset_name - - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/demo-notebooks/guided-demos/mnist_disconnected.py b/demo-notebooks/guided-demos/mnist_disconnected.py new file mode 100644 index 00000000..9fc72130 --- /dev/null +++ b/demo-notebooks/guided-demos/mnist_disconnected.py @@ -0,0 +1,164 @@ +# Copyright 2022 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# In[] +import os + +import torch +from pytorch_lightning import LightningModule, Trainer +from pytorch_lightning.callbacks.progress import TQDMProgressBar +from pytorch_lightning.loggers import CSVLogger +from torch import nn +from torch.nn import functional as F +from torch.utils.data import DataLoader, random_split +from torchmetrics import Accuracy +from torchvision import transforms +from torchvision.datasets import MNIST + +PATH_DATASETS = os.environ.get("PATH_DATASETS", ".") +BATCH_SIZE = 256 if torch.cuda.is_available() else 64 +# %% + +local_minst_path = os.path.dirname(os.path.abspath(__file__)) + +print("prior to running the trainer") +print("MASTER_ADDR: is ", os.getenv("MASTER_ADDR")) +print("MASTER_PORT: is ", os.getenv("MASTER_PORT")) + + +class LitMNIST(LightningModule): + def __init__(self, data_dir=PATH_DATASETS, hidden_size=64, learning_rate=2e-4): + super().__init__() + + # Set our init args as class attributes + self.data_dir = data_dir + self.hidden_size = hidden_size + self.learning_rate = learning_rate + + # Hardcode some dataset specific attributes + self.num_classes = 10 + self.dims = (1, 28, 28) + channels, width, height = self.dims + self.transform = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)), + ] + ) + + # Define PyTorch model + self.model = nn.Sequential( + nn.Flatten(), + nn.Linear(channels * width * height, hidden_size), + nn.ReLU(), + nn.Dropout(0.1), + nn.Linear(hidden_size, hidden_size), + nn.ReLU(), + nn.Dropout(0.1), + nn.Linear(hidden_size, self.num_classes), + ) + + self.val_accuracy = Accuracy() + self.test_accuracy = Accuracy() + + def forward(self, x): + x = self.model(x) + return F.log_softmax(x, dim=1) + + def training_step(self, batch, batch_idx): + x, y = batch + logits = self(x) + loss = F.nll_loss(logits, y) + return loss + + def validation_step(self, batch, batch_idx): + x, y = batch + logits = self(x) + loss = F.nll_loss(logits, y) + preds = torch.argmax(logits, dim=1) + self.val_accuracy.update(preds, y) + + # Calling self.log will surface up scalars for you in TensorBoard + self.log("val_loss", loss, prog_bar=True) + self.log("val_acc", self.val_accuracy, prog_bar=True) + + def test_step(self, batch, batch_idx): + x, y = batch + logits = self(x) + loss = F.nll_loss(logits, y) + preds = torch.argmax(logits, dim=1) + self.test_accuracy.update(preds, y) + + # Calling self.log will surface up scalars for you in TensorBoard + self.log("test_loss", loss, prog_bar=True) + self.log("test_acc", self.test_accuracy, prog_bar=True) + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate) + return optimizer + + #################### + # DATA RELATED HOOKS + #################### + + def prepare_data(self): + # download + print("Preparing MNIST dataset...") + MNIST(self.data_dir, train=True, download=False) + MNIST(self.data_dir, train=False, download=False) + + def setup(self, stage=None): + # Assign train/val datasets for use in dataloaders + if stage == "fit" or stage is None: + mnist_full = MNIST( + self.data_dir, train=True, transform=self.transform, download=False + ) + self.mnist_train, self.mnist_val = random_split(mnist_full, [55000, 5000]) + + # Assign test dataset for use in dataloader(s) + if stage == "test" or stage is None: + self.mnist_test = MNIST( + self.data_dir, train=False, transform=self.transform, download=False + ) + + def train_dataloader(self): + return DataLoader(self.mnist_train, batch_size=BATCH_SIZE) + + def val_dataloader(self): + return DataLoader(self.mnist_val, batch_size=BATCH_SIZE) + + def test_dataloader(self): + return DataLoader(self.mnist_test, batch_size=BATCH_SIZE) + + +# Init DataLoader from MNIST Dataset + +model = LitMNIST(data_dir=local_minst_path) + +print("GROUP: ", int(os.environ.get("GROUP_WORLD_SIZE", 1))) +print("LOCAL: ", int(os.environ.get("LOCAL_WORLD_SIZE", 1))) + +# Initialize a trainer +trainer = Trainer( + accelerator="auto", + # devices=1 if torch.cuda.is_available() else None, # limiting got iPython runs + max_epochs=5, + callbacks=[TQDMProgressBar(refresh_rate=20)], + num_nodes=int(os.environ.get("GROUP_WORLD_SIZE", 1)), + devices=int(os.environ.get("LOCAL_WORLD_SIZE", 1)), + strategy="ddp", +) + +# Train the model ⚡ +trainer.fit(model) diff --git a/demo-notebooks/guided-demos/mnist_fashion.py b/demo-notebooks/guided-demos/mnist_fashion.py new file mode 100644 index 00000000..ba5b2636 --- /dev/null +++ b/demo-notebooks/guided-demos/mnist_fashion.py @@ -0,0 +1,93 @@ +import torch +import torch.nn as nn +import ray +from torch.utils.data import DataLoader +from torchvision import datasets +from torchvision.transforms import ToTensor +from ray.train.torch import TorchTrainer +from ray.train import ScalingConfig + + +def get_dataset(): + return datasets.FashionMNIST( + root="/tmp/data", + train=True, + download=True, + transform=ToTensor(), + ) + + +class NeuralNetwork(nn.Module): + def __init__(self): + super().__init__() + self.flatten = nn.Flatten() + self.linear_relu_stack = nn.Sequential( + nn.Linear(28 * 28, 512), + nn.ReLU(), + nn.Linear(512, 512), + nn.ReLU(), + nn.Linear(512, 10), + ) + + def forward(self, inputs): + inputs = self.flatten(inputs) + logits = self.linear_relu_stack(inputs) + return logits + + +def get_dataset(): + return datasets.FashionMNIST( + root="/tmp/data", + train=True, + download=True, + transform=ToTensor(), + ) + + +def train_func_distributed(): + num_epochs = 3 + batch_size = 64 + + dataset = get_dataset() + dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True) + dataloader = ray.train.torch.prepare_data_loader(dataloader) + + model = NeuralNetwork() + model = ray.train.torch.prepare_model(model) + + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + + for epoch in range(num_epochs): + if ray.train.get_context().get_world_size() > 1: + dataloader.sampler.set_epoch(epoch) + + for inputs, labels in dataloader: + optimizer.zero_grad() + pred = model(inputs) + loss = criterion(pred, labels) + loss.backward() + optimizer.step() + print(f"epoch: {epoch}, loss: {loss.item()}") + + +# For GPU Training, set `use_gpu` to True. +use_gpu = True + +# To learn more about configuring S3 compatible storage check out our docs -> https://github.com/project-codeflare/codeflare-sdk/blob/main/docs/s3-compatible-storage.md +trainer = TorchTrainer( + train_func_distributed, + scaling_config=ScalingConfig( + # num_workers = number of worker nodes with the ray head node included + num_workers=3, + use_gpu=use_gpu, + resources_per_worker={ + "CPU": 1, + }, + trainer_resources={ + "CPU": 0, + }, + ), +) + +results = trainer.fit() diff --git a/demo-notebooks/guided-demos/notebook-ex-outputs/0_basic_ray.ipynb b/demo-notebooks/guided-demos/notebook-ex-outputs/0_basic_ray.ipynb index be4e8a5e..49f7f687 100644 --- a/demo-notebooks/guided-demos/notebook-ex-outputs/0_basic_ray.ipynb +++ b/demo-notebooks/guided-demos/notebook-ex-outputs/0_basic_ray.ipynb @@ -5,7 +5,7 @@ "id": "8d4a42f6", "metadata": {}, "source": [ - "In this first notebook, we will go through the basics of using the SDK to:\n", + "In this notebook, we will go through the basics of using the SDK to:\n", " - Spin up a Ray cluster with our desired resources\n", " - View the status and specs of our Ray cluster\n", " - Take down the Ray cluster when finished" @@ -13,14 +13,13 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "b55bc3ea-4ce3-49bf-bb1f-e209de8ca47a", "metadata": {}, "outputs": [], "source": [ "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication" ] }, { @@ -46,36 +45,37 @@ "id": "bc27f84c", "metadata": {}, "source": [ - "Here, we want to define our cluster by specifying the resources we require for our batch workload. Below, we define our cluster object (which generates a corresponding AppWrapper)." + "Here, we want to define our cluster by specifying the resources we require for our batch workload. Below, we define our cluster object (which generates a corresponding RayCluster).\n", + "\n", + "NOTE: The default images used by the CodeFlare SDK for creating a RayCluster resource depend on the installed Python version:\n", + "\n", + "- For Python 3.11: 'quay.io/modh/ray:2.47.1-py311-cu121'\n", + "- For Python 3.12: 'quay.io/modh/ray:2.47.1-py312-cu128'\n", + "\n", + "If you prefer to use a custom Ray image that better suits your needs, you can specify it in the image field to override the default." ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "0f4bc870-091f-4e11-9642-cba145710159", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Written to: raytest.yaml\n" - ] - } - ], + "outputs": [], "source": [ - "# Create and configure our cluster object (and appwrapper)\n", + "# Create and configure our cluster object\n", + "# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n", "cluster = Cluster(ClusterConfiguration(\n", " name='raytest',\n", - " namespace='default',\n", + " head_extended_resource_requests={'nvidia.com/gpu':0}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':0},\n", " num_workers=2,\n", - " min_cpus=1,\n", - " max_cpus=1,\n", - " min_memory=4,\n", - " max_memory=4,\n", - " num_gpus=0,\n", - " image=\"quay.io/project-codeflare/ray:2.5.0-py38-cu116\", #current default\n", - " instascale=False\n", + " worker_cpu_requests=1,\n", + " worker_cpu_limits=1,\n", + " worker_memory_requests=4,\n", + " worker_memory_limits=4,\n", + " # image=\"\", # Optional Field \n", + " write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n", + " # local_queue=\"local-queue-name\" # Specify the local queue manually\n", "))" ] }, @@ -84,18 +84,18 @@ "id": "12eef53c", "metadata": {}, "source": [ - "Next, we want to bring our cluster up, so we call the `up()` function below to submit our cluster AppWrapper yaml onto the MCAD queue, and begin the process of obtaining our resource cluster." + "Next, we want to bring our cluster up, so we call the `up()` function below to submit our Ray Cluster onto the queue, and begin the process of obtaining our resource cluster." ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "f0884bbc-c224-4ca0-98a0-02dfa09c2200", "metadata": {}, "outputs": [], "source": [ "# Bring up the cluster\n", - "cluster.up()" + "cluster.apply()" ] }, { @@ -108,125 +108,30 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "3c1b4311-2e61-44c9-8225-87c2db11363d", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
╭───────────────────────╮\n",
-       "│    🚀 Cluster Queue   │\n",
-       "│       Status 🚀       │\n",
-       "│ +---------+---------+ │\n",
-       "│ | Name    | Status  | │\n",
-       "│ +=========+=========+ │\n",
-       "│ | raytest | pending | │\n",
-       "│ |         |         | │\n",
-       "│ +---------+---------+ │\n",
-       "╰───────────────────────╯\n",
-       "
\n" - ], - "text/plain": [ - "╭───────────────────────╮\n", - "│ \u001b[3m \u001b[0m\u001b[1;3m 🚀 Cluster Queue\u001b[0m\u001b[3m \u001b[0m │\n", - "│ \u001b[3m \u001b[0m\u001b[1;3mStatus 🚀\u001b[0m\u001b[3m \u001b[0m │\n", - "│ +---------+---------+ │\n", - "│ |\u001b[1m \u001b[0m\u001b[1mName \u001b[0m\u001b[1m \u001b[0m|\u001b[1m \u001b[0m\u001b[1mStatus \u001b[0m\u001b[1m \u001b[0m| │\n", - "│ +=========+=========+ │\n", - "│ |\u001b[36m \u001b[0m\u001b[36mraytest\u001b[0m\u001b[36m \u001b[0m|\u001b[35m \u001b[0m\u001b[35mpending\u001b[0m\u001b[35m \u001b[0m| │\n", - "│ |\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m|\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m| │\n", - "│ +---------+---------+ │\n", - "╰───────────────────────╯\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "(, False)" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "cluster.status()" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "a99d5aff", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Waiting for requested resources to be set up...\n", - "Requested cluster up and running!\n" - ] - } - ], + "outputs": [], "source": [ "cluster.wait_ready()" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "df71c1ed", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
                  🚀 CodeFlare Cluster Status 🚀                  \n",
-       "                                                                  \n",
-       " ╭──────────────────────────────────────────────────────────────╮ \n",
-       " │   Name                                                       │ \n",
-       " │   raytest                                        Active ✅   │ \n",
-       " │                                                              │ \n",
-       " │   URI: ray://raytest-head-svc.default.svc:10001              │ \n",
-       " │                                                              │ \n",
-       " │   Dashboard🔗                                                │ \n",
-       " │                                                              │ \n",
-       " ╰──────────────────────────────────────────────────────────────╯ \n",
-       "
\n" - ], - "text/plain": [ - "\u001b[3m \u001b[0m\u001b[1;3m 🚀 CodeFlare Cluster Status 🚀\u001b[0m\u001b[3m \u001b[0m\n", - "\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\n", - " ╭──────────────────────────────────────────────────────────────╮ \n", - " │ \u001b[1;37;42mName\u001b[0m │ \n", - " │ \u001b[1;4mraytest\u001b[0m Active ✅ │ \n", - " │ │ \n", - " │ \u001b[1mURI:\u001b[0m ray://raytest-head-svc.default.svc:10001 │ \n", - " │ │ \n", - " │ \u001b]8;id=630217;ray-dashboard-raytest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org\u001b\\\u001b[4;34mDashboard🔗\u001b[0m\u001b]8;;\u001b\\ │ \n", - " │ │ \n", - " ╰──────────────────────────────────────────────────────────────╯ \n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "(, True)" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "cluster.status()" ] @@ -241,68 +146,10 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "7fd45bc5-03c0-4ae5-9ec5-dd1c30f1a084", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
                  🚀 CodeFlare Cluster Details 🚀                  \n",
-       "                                                                   \n",
-       " ╭───────────────────────────────────────────────────────────────╮ \n",
-       " │   Name                                                        │ \n",
-       " │   raytest                                        Active ✅    │ \n",
-       " │                                                               │ \n",
-       " │   URI: ray://raytest-head-svc.default.svc:10001               │ \n",
-       " │                                                               │ \n",
-       " │   Dashboard🔗                                                 │ \n",
-       " │                                                               │ \n",
-       " │                       Cluster Resources                       │ \n",
-       " │   ╭── Workers ──╮  ╭───────── Worker specs(each) ─────────╮   │ \n",
-       " │   │  # Workers  │  │  Memory      CPU         GPU         │   │ \n",
-       " │   │             │  │                                      │   │ \n",
-       " │   │  2          │  │  4~4         1           0           │   │ \n",
-       " │   │             │  │                                      │   │ \n",
-       " │   ╰─────────────╯  ╰──────────────────────────────────────╯   │ \n",
-       " ╰───────────────────────────────────────────────────────────────╯ \n",
-       "
\n" - ], - "text/plain": [ - "\u001b[3m \u001b[0m\u001b[1;3m 🚀 CodeFlare Cluster Details 🚀\u001b[0m\u001b[3m \u001b[0m\n", - "\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\n", - " ╭───────────────────────────────────────────────────────────────╮ \n", - " │ \u001b[1;37;42mName\u001b[0m │ \n", - " │ \u001b[1;4mraytest\u001b[0m Active ✅ │ \n", - " │ │ \n", - " │ \u001b[1mURI:\u001b[0m ray://raytest-head-svc.default.svc:10001 │ \n", - " │ │ \n", - " │ \u001b]8;id=623965;http://ray-dashboard-raytest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org\u001b\\\u001b[4;34mDashboard🔗\u001b[0m\u001b]8;;\u001b\\ │ \n", - " │ │ \n", - " │ \u001b[3m Cluster Resources \u001b[0m │ \n", - " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n", - " │ │ \u001b[1m \u001b[0m\u001b[1m# Workers\u001b[0m\u001b[1m \u001b[0m │ │ \u001b[1m \u001b[0m\u001b[1mMemory \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mCPU \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mGPU \u001b[0m\u001b[1m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m2 \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m4~4 \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m1 \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m0 \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n", - " ╰───────────────────────────────────────────────────────────────╯ \n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "RayCluster(name='raytest', status=, workers=2, worker_mem_min=4, worker_mem_max=4, worker_cpu=1, worker_gpu=0, namespace='default', dashboard='http://ray-dashboard-raytest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org')" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "cluster.details()" ] @@ -317,7 +164,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "5f36db0f-31f6-4373-9503-dc3c1c4c3f57", "metadata": {}, "outputs": [], @@ -352,7 +199,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.17" + "version": "3.9.18" }, "vscode": { "interpreter": { diff --git a/demo-notebooks/guided-demos/notebook-ex-outputs/1_basic_instascale.ipynb b/demo-notebooks/guided-demos/notebook-ex-outputs/1_basic_instascale.ipynb deleted file mode 100644 index 97a2b382..00000000 --- a/demo-notebooks/guided-demos/notebook-ex-outputs/1_basic_instascale.ipynb +++ /dev/null @@ -1,249 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "9865ee8c", - "metadata": {}, - "source": [ - "In this second notebook, we will go over the basics of using InstaScale to scale up/down necessary resources that are not currently available on your OpenShift Cluster (in cloud environments)." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "b55bc3ea-4ce3-49bf-bb1f-e209de8ca47a", - "metadata": {}, - "outputs": [], - "source": [ - "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "614daa0c", - "metadata": {}, - "outputs": [], - "source": [ - "# Create authentication object for user permissions\n", - "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", - "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", - "auth = TokenAuthentication(\n", - " token = \"XXXXX\",\n", - " server = \"XXXXX\",\n", - " skip_tls=False\n", - ")\n", - "auth.login()" - ] - }, - { - "cell_type": "markdown", - "id": "bc27f84c", - "metadata": {}, - "source": [ - "This time, we are working in a cloud environment, and our OpenShift cluster does not have the resources needed for our desired workloads. We will use InstaScale to dynamically scale-up guaranteed resources based on our request (that will also automatically scale-down when we are finished working):" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "0f4bc870-091f-4e11-9642-cba145710159", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Written to: instascaletest.yaml\n" - ] - } - ], - "source": [ - "# Create and configure our cluster object (and appwrapper)\n", - "cluster = Cluster(ClusterConfiguration(\n", - " name='instascaletest',\n", - " namespace='default',\n", - " num_workers=2,\n", - " min_cpus=2,\n", - " max_cpus=2,\n", - " min_memory=8,\n", - " max_memory=8,\n", - " num_gpus=1,\n", - " instascale=True, # InstaScale now enabled, will scale OCP cluster to guarantee resource request\n", - " machine_types=[\"m5.xlarge\", \"g4dn.xlarge\"] # Head, worker AWS machine types desired\n", - "))" - ] - }, - { - "cell_type": "markdown", - "id": "12eef53c", - "metadata": {}, - "source": [ - "Same as last time, we will bring the cluster up, wait for it to be ready, and confirm that the specs are as-requested:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "f0884bbc-c224-4ca0-98a0-02dfa09c2200", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Waiting for requested resources to be set up...\n", - "Requested cluster up and running!\n" - ] - } - ], - "source": [ - "# Bring up the cluster\n", - "cluster.up()\n", - "cluster.wait_ready()" - ] - }, - { - "cell_type": "markdown", - "id": "6abfe904", - "metadata": {}, - "source": [ - "While the resources are being scaled, we can also go into the console and take a look at the InstaScale logs, as well as the new machines/nodes spinning up.\n", - "\n", - "Once the cluster is ready, we can confirm the specs:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "7fd45bc5-03c0-4ae5-9ec5-dd1c30f1a084", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
                     🚀 CodeFlare Cluster Details 🚀                     \n",
-       "                                                                         \n",
-       " ╭─────────────────────────────────────────────────────────────────────╮ \n",
-       " │   Name                                                              │ \n",
-       " │   instascaletest                                        Active ✅   │ \n",
-       " │                                                                     │ \n",
-       " │   URI: ray://instascaletest-head-svc.default.svc:10001              │ \n",
-       " │                                                                     │ \n",
-       " │   Dashboard🔗                                                       │ \n",
-       " │                                                                     │ \n",
-       " │                       Cluster Resources                             │ \n",
-       " │   ╭── Workers ──╮  ╭───────── Worker specs(each) ─────────╮         │ \n",
-       " │   │  # Workers  │  │  Memory      CPU         GPU         │         │ \n",
-       " │   │             │  │                                      │         │ \n",
-       " │   │  2          │  │  8~8         2           1           │         │ \n",
-       " │   │             │  │                                      │         │ \n",
-       " │   ╰─────────────╯  ╰──────────────────────────────────────╯         │ \n",
-       " ╰─────────────────────────────────────────────────────────────────────╯ \n",
-       "
\n" - ], - "text/plain": [ - "\u001b[3m \u001b[0m\u001b[1;3m 🚀 CodeFlare Cluster Details 🚀\u001b[0m\u001b[3m \u001b[0m\n", - "\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\n", - " ╭─────────────────────────────────────────────────────────────────────╮ \n", - " │ \u001b[1;37;42mName\u001b[0m │ \n", - " │ \u001b[1;4minstascaletest\u001b[0m Active ✅ │ \n", - " │ │ \n", - " │ \u001b[1mURI:\u001b[0m ray://instascaletest-head-svc.default.svc:10001 │ \n", - " │ │ \n", - " │ \u001b]8;id=65933;http://ray-dashboard-instascaletest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org\u001b\\\u001b[4;34mDashboard🔗\u001b[0m\u001b]8;;\u001b\\ │ \n", - " │ │ \n", - " │ \u001b[3m Cluster Resources \u001b[0m │ \n", - " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n", - " │ │ \u001b[1m \u001b[0m\u001b[1m# Workers\u001b[0m\u001b[1m \u001b[0m │ │ \u001b[1m \u001b[0m\u001b[1mMemory \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mCPU \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mGPU \u001b[0m\u001b[1m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m2 \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m8~8 \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m2 \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m1 \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n", - " ╰─────────────────────────────────────────────────────────────────────╯ \n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "RayCluster(name='instascaletest', status=, workers=2, worker_mem_min=8, worker_mem_max=8, worker_cpu=2, worker_gpu=1, namespace='default', dashboard='http://ray-dashboard-instascaletest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org')" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "cluster.details()" - ] - }, - { - "cell_type": "markdown", - "id": "5af8cd32", - "metadata": {}, - "source": [ - "Finally, we bring our resource cluster down and release/terminate the associated resources, bringing everything back to the way it was before our cluster was brought up." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "5f36db0f-31f6-4373-9503-dc3c1c4c3f57", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.down()" - ] - }, - { - "cell_type": "markdown", - "id": "c883caea", - "metadata": {}, - "source": [ - "Once again, we can look at the machines/nodes and see that everything has been successfully scaled down!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0d41b90e", - "metadata": {}, - "outputs": [], - "source": [ - "auth.logout()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.17" - }, - "vscode": { - "interpreter": { - "hash": "f9f85f796d01129d0dd105a088854619f454435301f6ffec2fea96ecbd9be4ac" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/demo-notebooks/guided-demos/notebook-ex-outputs/1_cluster_job_client.ipynb b/demo-notebooks/guided-demos/notebook-ex-outputs/1_cluster_job_client.ipynb new file mode 100644 index 00000000..913fb919 --- /dev/null +++ b/demo-notebooks/guided-demos/notebook-ex-outputs/1_cluster_job_client.ipynb @@ -0,0 +1,240 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this demo we will go over the basics of the Ray Job Submission Client in the SDK" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import pieces from codeflare-sdk\n", + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create authentication object for user permissions\n", + "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", + "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", + "auth = TokenAuthentication(\n", + " token = \"XXXXX\",\n", + " server = \"XXXXX\",\n", + " skip_tls=False\n", + ")\n", + "auth.login()" + ] + }, + { + "cell_type": "markdown", + "id": "bc27f84c", + "metadata": {}, + "source": [ + "Here, we want to define our cluster by specifying the resources we require for our batch workload. Below, we define our cluster object (which generates a corresponding RayCluster).\n", + "\n", + "NOTE: The default images used by the CodeFlare SDK for creating a RayCluster resource depend on the installed Python version:\n", + "\n", + "- For Python 3.11: 'quay.io/modh/ray:2.47.1-py311-cu121'\n", + "- For Python 3.12: 'quay.io/modh/ray:2.47.1-py312-cu128'\n", + "\n", + "If you prefer to use a custom Ray image that better suits your needs, you can specify it in the image field to override the default." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create and configure our cluster object\n", + "# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n", + "cluster = Cluster(ClusterConfiguration(\n", + " name='jobtest',\n", + " head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':1},\n", + " num_workers=2,\n", + " worker_cpu_requests=1,\n", + " worker_cpu_limits=1,\n", + " worker_memory_requests=4,\n", + " worker_memory_limits=4,\n", + " # image=\"\", # Optional Field \n", + " write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n", + " # local_queue=\"local-queue-name\" # Specify the local queue manually\n", + "))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Bring up the cluster\n", + "cluster.apply()\n", + "cluster.wait_ready()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cluster.details()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Ray Job Submission" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Initialise the Cluster Job Client \n", + "* Provide an entrypoint command directed to your job script\n", + "* Set up your runtime environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the Job Submission Client\n", + "\"\"\"\n", + "The SDK will automatically gather the dashboard address and authenticate using the Ray Job Submission Client\n", + "\"\"\"\n", + "client = cluster.job_client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Submit an example mnist job using the Job Submission Client\n", + "submission_id = client.submit_job(\n", + " entrypoint=\"python mnist_fashion.py\",\n", + " runtime_env={\"working_dir\": \"./\",\"pip\": \"requirements.txt\"},\n", + ")\n", + "print(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the job's logs\n", + "client.get_job_logs(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the job's status\n", + "client.get_job_status(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get job related info\n", + "client.get_job_info(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# List all existing jobs\n", + "client.list_jobs()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Iterate through the logs of a job \n", + "async for lines in client.tail_job_logs(submission_id):\n", + " print(lines, end=\"\") " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Delete a job\n", + "# Can run client.cancel_job(submission_id) first if job is still running\n", + "client.delete_job(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cluster.down()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "auth.logout()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.18" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demo-notebooks/guided-demos/3_basic_interactive.ipynb b/demo-notebooks/guided-demos/notebook-ex-outputs/2_basic_interactive.ipynb similarity index 51% rename from demo-notebooks/guided-demos/3_basic_interactive.ipynb rename to demo-notebooks/guided-demos/notebook-ex-outputs/2_basic_interactive.ipynb index c8b2b1a0..9c816c53 100644 --- a/demo-notebooks/guided-demos/3_basic_interactive.ipynb +++ b/demo-notebooks/guided-demos/notebook-ex-outputs/2_basic_interactive.ipynb @@ -5,7 +5,7 @@ "id": "bbc21043", "metadata": {}, "source": [ - "In this fourth and final notebook, we will go over how to leverage the SDK to directly work interactively with a Ray cluster during development." + "In this notebook, we will go over how to leverage the SDK to directly work interactively with a Ray cluster during development." ] }, { @@ -16,8 +16,7 @@ "outputs": [], "source": [ "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication" ] }, { @@ -43,7 +42,14 @@ "id": "bc27f84c", "metadata": {}, "source": [ - "Once again, let's start by running through the same cluster setup as before:" + "Once again, let's start by running through the same cluster setup as before:\n", + "\n", + "NOTE: The default images used by the CodeFlare SDK for creating a RayCluster resource depend on the installed Python version:\n", + "\n", + "- For Python 3.11: 'quay.io/modh/ray:2.47.1-py311-cu121'\n", + "- For Python 3.12: 'quay.io/modh/ray:2.47.1-py312-cu128'\n", + "\n", + "If you prefer to use a custom Ray image that better suits your needs, you can specify it in the image field to override the default." ] }, { @@ -53,19 +59,21 @@ "metadata": {}, "outputs": [], "source": [ - "# Create and configure our cluster object (and appwrapper)\n", + "# Create and configure our cluster object\n", + "# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n", + "cluster_name = \"interactivetest\"\n", "cluster = Cluster(ClusterConfiguration(\n", - " name='interactivetest',\n", - " namespace='default',\n", + " name=cluster_name,\n", + " head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':1},\n", " num_workers=2,\n", - " min_cpus=2,\n", - " max_cpus=2,\n", - " min_memory=8,\n", - " max_memory=8,\n", - " num_gpus=1,\n", - " instascale=True, #<---instascale enabled\n", - " machine_types=[\"m5.xlarge\", \"g4dn.xlarge\"]\n", - " \n", + " worker_cpu_requests=2,\n", + " worker_cpu_limits=2,\n", + " worker_memory_requests=8,\n", + " worker_memory_limits=8,\n", + " # image=\"\", # Optional Field \n", + " write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n", + " # local_queue=\"local-queue-name\" # Specify the local queue manually\n", "))" ] }, @@ -77,7 +85,7 @@ "outputs": [], "source": [ "# Bring up the cluster\n", - "cluster.up()\n", + "cluster.apply()\n", "cluster.wait_ready()" ] }, @@ -122,6 +130,19 @@ "Now we can connect directly to our Ray cluster via the Ray python client:" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "13eb52f6", + "metadata": {}, + "outputs": [], + "source": [ + "from codeflare_sdk import generate_cert\n", + "# Create required TLS cert and export the environment variables to enable TLS\n", + "generate_cert.generate_tls_cert(cluster_name, cluster.config.namespace)\n", + "generate_cert.export_env(cluster_name, cluster.config.namespace)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -133,15 +154,13 @@ "assert ray_cluster_uri, \"Ray cluster needs to be started and set before proceeding\"\n", "\n", "import ray\n", - "from ray.air.config import ScalingConfig\n", "\n", "# reset the ray context in case there's already one. \n", "ray.shutdown()\n", "# establish connection to ray cluster\n", "\n", "#install additional libraries that will be required for model training\n", - "runtime_env = {\"pip\": [\"transformers\", \"datasets\", \"evaluate\", \"pyarrow<7.0.0\", \"accelerate\"]}\n", - "\n", + "runtime_env = {\"pip\": [\"transformers==4.41.2\", \"datasets==2.17.0\", \"accelerate==0.31.0\", \"scikit-learn==1.5.0\"]}\n", "# NOTE: This will work for in-cluster notebook servers (RHODS/ODH), but not for local machines\n", "# To see how to connect from your laptop, go to demo-notebooks/additional-demos/local_interactive.ipynb\n", "ray.init(address=ray_cluster_uri, runtime_env=runtime_env)\n", @@ -154,7 +173,7 @@ "id": "9711030b", "metadata": {}, "source": [ - "Now that we are connected (and have passed in some package requirements), let's try writing some training code for a DistilBERT transformer model via HuggingFace (using IMDB dataset):" + "Now that we are connected (and have passed in some package requirements), let's try writing some training code:" ] }, { @@ -166,66 +185,83 @@ "source": [ "@ray.remote\n", "def train_fn():\n", - " from datasets import load_dataset\n", - " import transformers\n", - " from transformers import AutoTokenizer, TrainingArguments\n", - " from transformers import AutoModelForSequenceClassification\n", + " import os\n", " import numpy as np\n", - " from datasets import load_metric\n", - " import ray\n", - " from ray import tune\n", - " from ray.train.huggingface import HuggingFaceTrainer\n", - "\n", - " dataset = load_dataset(\"imdb\")\n", - " tokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-uncased\")\n", - "\n", - " def tokenize_function(examples):\n", - " return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n", + " from datasets import load_dataset, load_metric\n", + " import transformers\n", + " from transformers import (\n", + " Trainer,\n", + " TrainingArguments,\n", + " AutoTokenizer,\n", + " AutoModelForSequenceClassification,\n", + " )\n", + " import ray.train.huggingface.transformers\n", + " from ray.train import ScalingConfig\n", + " from ray.train.torch import TorchTrainer\n", "\n", - " tokenized_datasets = dataset.map(tokenize_function, batched=True)\n", + " # When running in a multi-node cluster you will need persistent storage that is accessible across all worker nodes. \n", + " # See www.github.com/project-codeflare/codeflare-sdk/tree/main/docs/s3-compatible-storage.md for more information.\n", + " \n", + " def train_func():\n", + " # Datasets\n", + " dataset = load_dataset(\"imdb\")\n", + " tokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-uncased\")\n", "\n", - " #using a fraction of dataset but you can run with the full dataset\n", - " small_train_dataset = tokenized_datasets[\"train\"].shuffle(seed=42).select(range(100))\n", - " small_eval_dataset = tokenized_datasets[\"test\"].shuffle(seed=42).select(range(100))\n", + " def tokenize_function(examples):\n", + " return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n", "\n", - " print(f\"len of train {small_train_dataset} and test {small_eval_dataset}\")\n", + " small_train_dataset = (\n", + " dataset[\"train\"].select(range(100)).map(tokenize_function, batched=True)\n", + " )\n", + " small_eval_dataset = (\n", + " dataset[\"test\"].select(range(100)).map(tokenize_function, batched=True)\n", + " )\n", "\n", - " ray_train_ds = ray.data.from_huggingface(small_train_dataset)\n", - " ray_evaluation_ds = ray.data.from_huggingface(small_eval_dataset)\n", + " # Model\n", + " model = AutoModelForSequenceClassification.from_pretrained(\n", + " \"distilbert-base-uncased\", num_labels=2\n", + " )\n", "\n", - " def compute_metrics(eval_pred):\n", - " metric = load_metric(\"accuracy\")\n", - " logits, labels = eval_pred\n", - " predictions = np.argmax(logits, axis=-1)\n", - " return metric.compute(predictions=predictions, references=labels)\n", + " def compute_metrics(eval_pred):\n", + " metric = load_metric(\"accuracy\")\n", + " logits, labels = eval_pred\n", + " predictions = np.argmax(logits, axis=-1)\n", + " return metric.compute(predictions=predictions, references=labels)\n", "\n", - " def trainer_init_per_worker(train_dataset, eval_dataset, **config):\n", - " model = AutoModelForSequenceClassification.from_pretrained(\"distilbert-base-uncased\", num_labels=2)\n", + " # Hugging Face Trainer\n", + " training_args = TrainingArguments(\n", + " output_dir=\"test_trainer\",\n", + " evaluation_strategy=\"epoch\",\n", + " save_strategy=\"epoch\",\n", + " report_to=\"none\",\n", + " )\n", "\n", - " training_args = TrainingArguments(\"/tmp/hf_imdb/test\", eval_steps=1, disable_tqdm=True, \n", - " num_train_epochs=1, skip_memory_metrics=True,\n", - " learning_rate=2e-5,\n", - " per_device_train_batch_size=16,\n", - " per_device_eval_batch_size=16, \n", - " weight_decay=0.01,)\n", - " return transformers.Trainer(\n", + " trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", - " train_dataset=train_dataset,\n", - " eval_dataset=eval_dataset,\n", - " compute_metrics=compute_metrics\n", + " train_dataset=small_train_dataset,\n", + " eval_dataset=small_eval_dataset,\n", + " compute_metrics=compute_metrics,\n", " )\n", "\n", - " scaling_config = ScalingConfig(num_workers=2, use_gpu=True) #num workers is the number of gpus\n", "\n", - " # we are using the ray native HuggingFaceTrainer, but you can swap out to use non ray Huggingface Trainer. Both have the same method signature. \n", - " # the ray native HFTrainer has built in support for scaling to multiple GPUs\n", - " trainer = HuggingFaceTrainer(\n", - " trainer_init_per_worker=trainer_init_per_worker,\n", - " scaling_config=scaling_config,\n", - " datasets={\"train\": ray_train_ds, \"evaluation\": ray_evaluation_ds},\n", + " callback = ray.train.huggingface.transformers.RayTrainReportCallback()\n", + " trainer.add_callback(callback)\n", + "\n", + " trainer = ray.train.huggingface.transformers.prepare_trainer(trainer)\n", + "\n", + " trainer.train()\n", + "\n", + "\n", + " ray_trainer = TorchTrainer(\n", + " train_func,\n", + " scaling_config=ScalingConfig(num_workers=3, use_gpu=True),\n", + " # Configure persistent storage that is accessible across \n", + " # all worker nodes.\n", + " # Uncomment and update the RunConfig below to include your storage details.\n", + " # run_config=ray.train.RunConfig(storage_path=\"storage path\"),\n", " )\n", - " result = trainer.fit()" + " result: ray.train.Result = ray_trainer.fit()" ] }, { @@ -292,7 +328,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.13" + "version": "3.8.17" }, "vscode": { "interpreter": { diff --git a/demo-notebooks/guided-demos/notebook-ex-outputs/2_basic_jobs.ipynb b/demo-notebooks/guided-demos/notebook-ex-outputs/2_basic_jobs.ipynb deleted file mode 100644 index 9c612267..00000000 --- a/demo-notebooks/guided-demos/notebook-ex-outputs/2_basic_jobs.ipynb +++ /dev/null @@ -1,519 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "464af595", - "metadata": {}, - "source": [ - "In this third notebook, we will go over the basics of submitting jobs via the SDK, either to a Ray cluster or directly to MCAD." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "b55bc3ea-4ce3-49bf-bb1f-e209de8ca47a", - "metadata": {}, - "outputs": [], - "source": [ - "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "614daa0c", - "metadata": {}, - "outputs": [], - "source": [ - "# Create authentication object for user permissions\n", - "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", - "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", - "auth = TokenAuthentication(\n", - " token = \"XXXXX\",\n", - " server = \"XXXXX\",\n", - " skip_tls=False\n", - ")\n", - "auth.login()" - ] - }, - { - "cell_type": "markdown", - "id": "bc27f84c", - "metadata": {}, - "source": [ - "Let's start by running through the same cluster setup as before:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "0f4bc870-091f-4e11-9642-cba145710159", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Written to: jobtest.yaml\n" - ] - } - ], - "source": [ - "# Create and configure our cluster object (and appwrapper)\n", - "cluster = Cluster(ClusterConfiguration(\n", - " name='jobtest',\n", - " namespace='default',\n", - " num_workers=2,\n", - " min_cpus=1,\n", - " max_cpus=1,\n", - " min_memory=4,\n", - " max_memory=4,\n", - " num_gpus=0,\n", - " instascale=False\n", - "))" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "f0884bbc-c224-4ca0-98a0-02dfa09c2200", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Waiting for requested resources to be set up...\n", - "Requested cluster up and running!\n" - ] - } - ], - "source": [ - "# Bring up the cluster\n", - "cluster.up()\n", - "cluster.wait_ready()" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "df71c1ed", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
                  🚀 CodeFlare Cluster Details 🚀                  \n",
-       "                                                                   \n",
-       " ╭───────────────────────────────────────────────────────────────╮ \n",
-       " │   Name                                                        │ \n",
-       " │   jobtest                                        Active ✅    │ \n",
-       " │                                                               │ \n",
-       " │   URI: ray://jobtest-head-svc.default.svc:10001               │ \n",
-       " │                                                               │ \n",
-       " │   Dashboard🔗                                                 │ \n",
-       " │                                                               │ \n",
-       " │                       Cluster Resources                       │ \n",
-       " │   ╭── Workers ──╮  ╭───────── Worker specs(each) ─────────╮   │ \n",
-       " │   │  # Workers  │  │  Memory      CPU         GPU         │   │ \n",
-       " │   │             │  │                                      │   │ \n",
-       " │   │  2          │  │  4~4         1           0           │   │ \n",
-       " │   │             │  │                                      │   │ \n",
-       " │   ╰─────────────╯  ╰──────────────────────────────────────╯   │ \n",
-       " ╰───────────────────────────────────────────────────────────────╯ \n",
-       "
\n" - ], - "text/plain": [ - "\u001b[3m \u001b[0m\u001b[1;3m 🚀 CodeFlare Cluster Details 🚀\u001b[0m\u001b[3m \u001b[0m\n", - "\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\n", - " ╭───────────────────────────────────────────────────────────────╮ \n", - " │ \u001b[1;37;42mName\u001b[0m │ \n", - " │ \u001b[1;4mjobtest\u001b[0m Active ✅ │ \n", - " │ │ \n", - " │ \u001b[1mURI:\u001b[0m ray://jobtest-head-svc.default.svc:10001 │ \n", - " │ │ \n", - " │ \u001b]8;id=366603;http://ray-dashboard-jobtest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org\u001b\\\u001b[4;34mDashboard🔗\u001b[0m\u001b]8;;\u001b\\ │ \n", - " │ │ \n", - " │ \u001b[3m Cluster Resources \u001b[0m │ \n", - " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n", - " │ │ \u001b[1m \u001b[0m\u001b[1m# Workers\u001b[0m\u001b[1m \u001b[0m │ │ \u001b[1m \u001b[0m\u001b[1mMemory \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mCPU \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mGPU \u001b[0m\u001b[1m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m2 \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m4~4 \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m1 \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m0 \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n", - " ╰───────────────────────────────────────────────────────────────╯ \n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "RayCluster(name='jobtest', status=, workers=2, worker_mem_min=4, worker_mem_max=4, worker_cpu=1, worker_gpu=0, namespace='default', dashboard='http://ray-dashboard-jobtest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org')" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "cluster.details()" - ] - }, - { - "cell_type": "markdown", - "id": "33663f47", - "metadata": {}, - "source": [ - "This time, however, we are going to use the CodeFlare SDK to submit batch jobs via TorchX, either to the Ray cluster we have just brought up, or directly to MCAD." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "c7b4f232", - "metadata": {}, - "outputs": [], - "source": [ - "from codeflare_sdk.job.jobs import DDPJobDefinition" - ] - }, - { - "cell_type": "markdown", - "id": "83d77b74", - "metadata": {}, - "source": [ - "First, let's begin by submitting to Ray, training a basic NN on the MNIST dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "8c2c5138", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "The Ray scheduler does not support port mapping.\n" - ] - } - ], - "source": [ - "jobdef = DDPJobDefinition(\n", - " name=\"mnisttest\",\n", - " script=\"mnist.py\",\n", - " scheduler_args={\"requirements\": \"requirements.txt\"}\n", - ")\n", - "job = jobdef.submit(cluster)" - ] - }, - { - "cell_type": "markdown", - "id": "5b9ae53a", - "metadata": {}, - "source": [ - "Now we can take a look at the status of our submitted job, as well as retrieve the full logs:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "6e36c3d9", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AppStatus:\n", - " msg: !!python/object/apply:ray.dashboard.modules.job.common.JobStatus\n", - " - RUNNING\n", - " num_restarts: -1\n", - " roles:\n", - " - replicas:\n", - " - hostname: \n", - " id: 0\n", - " role: ray\n", - " state: !!python/object/apply:torchx.specs.api.AppState\n", - " - 3\n", - " structured_error_msg: \n", - " role: ray\n", - " state: RUNNING (3)\n", - " structured_error_msg: \n", - " ui_url: null" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "834cfb5c", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'[RayActor(name=\\'mnist\\', command=[\\'bash\\', \\'-c\\', \"torchrun --rdzv_backend static --rdzv_endpoint $TORCHX_RANK0_HOST:49782 --rdzv_id \\'mnisttest-bc7wx5t7hd0lcc\\' --nnodes 2 --nproc_per_node 1 --node_rank \\'0\\' --tee 3 --role \\'\\' mnist.py\"], env={\\'TORCHX_TRACKING_EXPERIMENT_NAME\\': \\'default-experiment\\', \\'LOGLEVEL\\': \\'WARNING\\', \\'TORCHX_JOB_ID\\': \\'ray://torchx/mnisttest-bc7wx5t7hd0lcc\\'}, num_cpus=1, num_gpus=0, min_replicas=2), RayActor(name=\\'mnist\\', command=[\\'bash\\', \\'-c\\', \"torchrun --rdzv_backend static --rdzv_endpoint $TORCHX_RANK0_HOST:49782 --rdzv_id \\'mnisttest-bc7wx5t7hd0lcc\\' --nnodes 2 --nproc_per_node 1 --node_rank \\'1\\' --tee 3 --role \\'\\' mnist.py\"], env={\\'TORCHX_TRACKING_EXPERIMENT_NAME\\': \\'default-experiment\\', \\'LOGLEVEL\\': \\'WARNING\\', \\'TORCHX_JOB_ID\\': \\'ray://torchx/mnisttest-bc7wx5t7hd0lcc\\'}, num_cpus=1, num_gpus=0, min_replicas=2)]\\n2023-08-09 08:05:59,434\\tINFO worker.py:1334 -- Using address 10.129.0.64:6379 set in the environment variable RAY_ADDRESS\\n2023-08-09 08:05:59,434\\tINFO worker.py:1452 -- Connecting to existing Ray cluster at address: 10.129.0.64:6379...\\n2023-08-09 08:05:59,477\\tINFO worker.py:1627 -- Connected to Ray cluster. View the dashboard at \\x1b[1m\\x1b[32mhttp://10.129.0.64:8265 \\x1b[39m\\x1b[22m\\nWaiting for minimum placement group to start.\\nSuccessfully created placement groups\\nrdzv_endpoint set to 10.129.0.64 for actor 811b83022b98b0411b06453c02000000\\nrdzv_endpoint set to 10.129.0.64 for actor 0ae7ff0d51deee52fd0b0ba802000000\\nSuccessfully placed command actors\\nEntering main loop, start executing the script on worker nodes\\nrunning ray.wait on [ObjectRef(e082c90ab8422b00811b83022b98b0411b06453c0200000001000000), ObjectRef(ce868e48e2fa9a940ae7ff0d51deee52fd0b0ba80200000001000000)]\\nrunning ray.wait on [ObjectRef(ce868e48e2fa9a940ae7ff0d51deee52fd0b0ba80200000001000000), ObjectRef(f81ec6ff838b16db811b83022b98b0411b06453c0200000001000000)]\\n'" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job.logs()" - ] - }, - { - "cell_type": "markdown", - "id": "63f46b41", - "metadata": {}, - "source": [ - "You can also view organized logs, status, and other information directly through the Ray cluster's dashboard:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "78ca8ce0", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'http://ray-dashboard-jobtest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org'" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "cluster.cluster_dashboard_uri()" - ] - }, - { - "cell_type": "markdown", - "id": "5af8cd32", - "metadata": {}, - "source": [ - "Once complete, we can bring our Ray cluster down and clean up:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "a7714885", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AppStatus:\n", - " msg: !!python/object/apply:ray.dashboard.modules.job.common.JobStatus\n", - " - SUCCEEDED\n", - " num_restarts: -1\n", - " roles:\n", - " - replicas:\n", - " - hostname: \n", - " id: 0\n", - " role: ray\n", - " state: !!python/object/apply:torchx.specs.api.AppState\n", - " - 4\n", - " structured_error_msg: \n", - " role: ray\n", - " state: SUCCEEDED (4)\n", - " structured_error_msg: \n", - " ui_url: null" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "5f36db0f-31f6-4373-9503-dc3c1c4c3f57", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.down()" - ] - }, - { - "cell_type": "markdown", - "id": "31096641", - "metadata": {}, - "source": [ - "Now, an alternative option for job submission is to submit directly to MCAD, which will schedule pods to run the job with requested resources:" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "496139cc", - "metadata": {}, - "outputs": [], - "source": [ - "jobdef = DDPJobDefinition(\n", - " name=\"mnistjob\",\n", - " script=\"mnist.py\",\n", - " scheduler_args={\"namespace\": \"default\"},\n", - " j=\"1x1\",\n", - " gpu=0,\n", - " cpu=1,\n", - " memMB=8000,\n", - " image=\"quay.io/project-codeflare/mnist-job-test:v0.0.1\"\n", - ")\n", - "job = jobdef.submit()" - ] - }, - { - "cell_type": "markdown", - "id": "0837e43b", - "metadata": {}, - "source": [ - "Once again, we can look at job status and logs:" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "3d18d42c", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/opt/homebrew/lib/python3.8/site-packages/torchx/schedulers/kubernetes_mcad_scheduler.py:1105: UserWarning: Warning - MCAD does not report individual replica statuses, but overall task status. Replica id may not match status\n", - " warnings.warn(msg)\n" - ] - }, - { - "data": { - "text/plain": [ - "AppStatus:\n", - " msg: \n", - " num_restarts: -1\n", - " roles:\n", - " - replicas:\n", - " - hostname: ''\n", - " id: 0\n", - " role: mnist\n", - " state: !!python/object/apply:torchx.specs.api.AppState\n", - " - 2\n", - " structured_error_msg: \n", - " role: mnist\n", - " state: RUNNING (3)\n", - " structured_error_msg: \n", - " ui_url: null" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "36d7ea97", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'2023-08-09T15:10:09.515811959Z [0]:GPU available: False, used: False\\n2023-08-09T15:10:09.515811959Z [0]:TPU available: False, using: 0 TPU cores\\n2023-08-09T15:10:09.515811959Z [0]:IPU available: False, using: 0 IPUs\\n2023-08-09T15:10:09.615986853Z [0]:\\n2023-08-09T15:10:09.615986853Z [0]: 0%| | 0/9912422 [00:00 🚀 CodeFlare Cluster Details 🚀 \n", - " \n", - " ╭──────────────────────────────────────────────────────────────────────╮ \n", - " │ Name │ \n", - " │ interactivetest Active ✅ │ \n", - " │ │ \n", - " │ URI: ray://interactivetest-head-svc.default.svc:10001 │ \n", - " │ │ \n", - " │ Dashboard🔗 │ \n", - " │ │ \n", - " │ Cluster Resources │ \n", - " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n", - " │ │ # Workers │ │ Memory CPU GPU │ │ \n", - " │ │ │ │ │ │ \n", - " │ │ 2 │ │ 8~8 2 1 │ │ \n", - " │ │ │ │ │ │ \n", - " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n", - " ╰──────────────────────────────────────────────────────────────────────╯ \n", - "\n" - ], - "text/plain": [ - "\u001b[3m \u001b[0m\u001b[1;3m 🚀 CodeFlare Cluster Details 🚀\u001b[0m\u001b[3m \u001b[0m\n", - "\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\n", - " ╭──────────────────────────────────────────────────────────────────────╮ \n", - " │ \u001b[1;37;42mName\u001b[0m │ \n", - " │ \u001b[1;4minteractivetest\u001b[0m Active ✅ │ \n", - " │ │ \n", - " │ \u001b[1mURI:\u001b[0m ray://interactivetest-head-svc.default.svc:10001 │ \n", - " │ │ \n", - " │ \u001b]8;id=970589;http://ray-dashboard-interactivetest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org\u001b\\\u001b[4;34mDashboard🔗\u001b[0m\u001b]8;;\u001b\\ │ \n", - " │ │ \n", - " │ \u001b[3m Cluster Resources \u001b[0m │ \n", - " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n", - " │ │ \u001b[1m \u001b[0m\u001b[1m# Workers\u001b[0m\u001b[1m \u001b[0m │ │ \u001b[1m \u001b[0m\u001b[1mMemory \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mCPU \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mGPU \u001b[0m\u001b[1m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m2 \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m8~8 \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m2 \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m1 \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n", - " ╰──────────────────────────────────────────────────────────────────────╯ \n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "RayCluster(name='interactivetest', status=, workers=2, worker_mem_min=8, worker_mem_max=8, worker_cpu=2, worker_gpu=1, namespace='default', dashboard='http://ray-dashboard-interactivetest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org')" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "cluster.details()" - ] - }, - { - "cell_type": "markdown", - "id": "33663f47", - "metadata": {}, - "source": [ - "This time we will demonstrate another potential method of use: working with the Ray cluster interactively.\n", - "\n", - "Using the SDK, we can get both the Ray cluster URI and dashboard URI:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "c1719bca", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "http://ray-dashboard-interactivetest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org\n", - "ray://interactivetest-head-svc.default.svc:10001\n" - ] - } - ], - "source": [ - "ray_dashboard_uri = cluster.cluster_dashboard_uri()\n", - "ray_cluster_uri = cluster.cluster_uri()\n", - "print(ray_dashboard_uri)\n", - "print(ray_cluster_uri)" - ] - }, - { - "cell_type": "markdown", - "id": "2a2aca6a", - "metadata": {}, - "source": [ - "Now we can connect directly to our Ray cluster via the Ray python client:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "300146dc", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Ray cluster is up and running: True\n" - ] - } - ], - "source": [ - "#before proceeding make sure the cluster exists and the uri is not empty\n", - "assert ray_cluster_uri, \"Ray cluster needs to be started and set before proceeding\"\n", - "\n", - "import ray\n", - "from ray.air.config import ScalingConfig\n", - "\n", - "# reset the ray context in case there's already one. \n", - "ray.shutdown()\n", - "# establish connection to ray cluster\n", - "\n", - "#install additional libraries that will be required for model training\n", - "runtime_env = {\"pip\": [\"transformers\", \"datasets\", \"evaluate\", \"pyarrow<7.0.0\", \"accelerate\"]}\n", - "\n", - "# NOTE: This will work for in-cluster notebook servers (RHODS/ODH), but not for local machines\n", - "# To see how to connect from your laptop, go to demo-notebooks/additional-demos/local_interactive.ipynb\n", - "ray.init(address=ray_cluster_uri, runtime_env=runtime_env)\n", - "\n", - "print(\"Ray cluster is up and running: \", ray.is_initialized())" - ] - }, - { - "cell_type": "markdown", - "id": "9711030b", - "metadata": {}, - "source": [ - "Now that we are connected (and have passed in some package requirements), let's try writing some training code for a DistilBERT transformer model via HuggingFace (using IMDB dataset):" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "1b36e0d9", - "metadata": {}, - "outputs": [], - "source": [ - "@ray.remote\n", - "def train_fn():\n", - " from datasets import load_dataset\n", - " import transformers\n", - " from transformers import AutoTokenizer, TrainingArguments\n", - " from transformers import AutoModelForSequenceClassification\n", - " import numpy as np\n", - " from datasets import load_metric\n", - " import ray\n", - " from ray import tune\n", - " from ray.train.huggingface import HuggingFaceTrainer\n", - "\n", - " dataset = load_dataset(\"imdb\")\n", - " tokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-uncased\")\n", - "\n", - " def tokenize_function(examples):\n", - " return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n", - "\n", - " tokenized_datasets = dataset.map(tokenize_function, batched=True)\n", - "\n", - " #using a fraction of dataset but you can run with the full dataset\n", - " small_train_dataset = tokenized_datasets[\"train\"].shuffle(seed=42).select(range(100))\n", - " small_eval_dataset = tokenized_datasets[\"test\"].shuffle(seed=42).select(range(100))\n", - "\n", - " print(f\"len of train {small_train_dataset} and test {small_eval_dataset}\")\n", - "\n", - " ray_train_ds = ray.data.from_huggingface(small_train_dataset)\n", - " ray_evaluation_ds = ray.data.from_huggingface(small_eval_dataset)\n", - "\n", - " def compute_metrics(eval_pred):\n", - " metric = load_metric(\"accuracy\")\n", - " logits, labels = eval_pred\n", - " predictions = np.argmax(logits, axis=-1)\n", - " return metric.compute(predictions=predictions, references=labels)\n", - "\n", - " def trainer_init_per_worker(train_dataset, eval_dataset, **config):\n", - " model = AutoModelForSequenceClassification.from_pretrained(\"distilbert-base-uncased\", num_labels=2)\n", - "\n", - " training_args = TrainingArguments(\"/tmp/hf_imdb/test\", eval_steps=1, disable_tqdm=True, \n", - " num_train_epochs=1, skip_memory_metrics=True,\n", - " learning_rate=2e-5,\n", - " per_device_train_batch_size=16,\n", - " per_device_eval_batch_size=16, \n", - " weight_decay=0.01,)\n", - " return transformers.Trainer(\n", - " model=model,\n", - " args=training_args,\n", - " train_dataset=train_dataset,\n", - " eval_dataset=eval_dataset,\n", - " compute_metrics=compute_metrics\n", - " )\n", - "\n", - " scaling_config = ScalingConfig(num_workers=2, use_gpu=True) #num workers is the number of gpus\n", - "\n", - " # we are using the ray native HuggingFaceTrainer, but you can swap out to use non ray Huggingface Trainer. Both have the same method signature. \n", - " # the ray native HFTrainer has built in support for scaling to multiple GPUs\n", - " trainer = HuggingFaceTrainer(\n", - " trainer_init_per_worker=trainer_init_per_worker,\n", - " scaling_config=scaling_config,\n", - " datasets={\"train\": ray_train_ds, \"evaluation\": ray_evaluation_ds},\n", - " )\n", - " result = trainer.fit()" - ] - }, - { - "cell_type": "markdown", - "id": "d4d8fd65", - "metadata": {}, - "source": [ - "Once we want to test our code out, we can run the training function we defined above remotely on our Ray cluster:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "5901d958", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Downloading builder script: 100%|██████████| 4.31k/4.31k [00:00<00:00, 20.9MB/s]\n", - "Downloading metadata: 100%|██████████| 2.17k/2.17k [00:00<00:00, 14.1MB/s]\n", - "Downloading readme: 100%|██████████| 7.59k/7.59k [00:00<00:00, 22.9MB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m Downloading and preparing dataset imdb/plain_text to /home/ray/.cache/huggingface/datasets/imdb/plain_text/1.0.0/d613c88cf8fa3bab83b4ded3713f1f74830d1100e171db75bbddb80b3345c9c0...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Downloading data: 0%| | 0.00/84.1M [00:00 AllToAllOperator[RandomizeBlockOrder]\n", - "\u001b[2m\u001b[36m(HuggingFaceTrainer pid=196, ip=10.130.4.19)\u001b[0m 2023-08-09 14:51:58,957\tINFO streaming_executor.py:92 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False)\n", - "\u001b[2m\u001b[36m(HuggingFaceTrainer pid=196, ip=10.130.4.19)\u001b[0m 2023-08-09 14:51:58,958\tINFO streaming_executor.py:94 -- Tip: For detailed progress reporting, run `ray.data.DataContext.get_current().execution_options.verbose_progress = True`\n", - "\u001b[2m\u001b[36m(HuggingFaceTrainer pid=196, ip=10.130.4.19)\u001b[0m 2023-08-09 14:51:58,969\tINFO streaming_executor.py:149 -- Shutting down .\n", - "\u001b[2m\u001b[36m(RayTrainWorker pid=235, ip=10.130.4.19)\u001b[0m 2023-08-09 14:51:58,912\tINFO config.py:86 -- Setting up process group for: env:// [rank=0, world_size=2]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m == Status ==\n", - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m Current time: 2023-08-09 14:52:01 (running for 00:00:10.18)\n", - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m Using FIFO scheduling algorithm.\n", - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m Logical resource usage: 1.0/6 CPUs, 2.0/2 GPUs\n", - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m Result logdir: /home/ray/ray_results/HuggingFaceTrainer_2023-08-09_14-51-51\n", - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m Number of trials: 1/1 (1 RUNNING)\n", - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m +--------------------------------+----------+-----------------+\n", - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m | Trial name | status | loc |\n", - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m |--------------------------------+----------+-----------------|\n", - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m | HuggingFaceTrainer_f2621_00000 | RUNNING | 10.130.4.19:196 |\n", - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m +--------------------------------+----------+-----------------+\n", - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m \n", - "\u001b[2m\u001b[36m(train_fn pid=425)\u001b[0m \n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[2m\u001b[36m(RayTrainWorker pid=235, ip=10.130.4.19)\u001b[0m 2023-08-09 14:52:01,262\tINFO streaming_executor.py:91 -- Executing DAG InputDataBuffer[Input] -> AllToAllOperator[RandomizeBlockOrder]\n", - "\u001b[2m\u001b[36m(RayTrainWorker pid=235, ip=10.130.4.19)\u001b[0m 2023-08-09 14:52:01,262\tINFO streaming_executor.py:92 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False)\n", - "\u001b[2m\u001b[36m(RayTrainWorker pid=235, ip=10.130.4.19)\u001b[0m 2023-08-09 14:52:01,262\tINFO streaming_executor.py:94 -- Tip: For detailed progress reporting, run `ray.data.DataContext.get_current().execution_options.verbose_progress = True`\n", - "\u001b[2m\u001b[36m(RayTrainWorker pid=235, ip=10.130.4.19)\u001b[0m 2023-08-09 14:52:01,274\tINFO streaming_executor.py:149 -- Shutting down .\n", - "\u001b[2m\u001b[36m(RayTrainWorker pid=232, ip=10.129.4.19)\u001b[0m 2023-08-09 14:52:01,252\tINFO streaming_executor.py:91 -- Executing DAG InputDataBuffer[Input] -> AllToAllOperator[RandomizeBlockOrder]\n", - "\u001b[2m\u001b[36m(RayTrainWorker pid=232, ip=10.129.4.19)\u001b[0m 2023-08-09 14:52:01,252\tINFO streaming_executor.py:92 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False)\n", - "\u001b[2m\u001b[36m(RayTrainWorker pid=232, ip=10.129.4.19)\u001b[0m 2023-08-09 14:52:01,252\tINFO streaming_executor.py:94 -- Tip: For detailed progress reporting, run `ray.data.DataContext.get_current().execution_options.verbose_progress = True`\n", - "\u001b[2m\u001b[36m(RayTrainWorker pid=232, ip=10.129.4.19)\u001b[0m 2023-08-09 14:52:01,263\tINFO streaming_executor.py:149 -- Shutting down .\n", - "Downloading (…)lve/main/config.json: 100%|██████████| 483/483 [00:00<00:00, 151kB/s]\n", - "Downloading (…)lve/main/config.json: 100%|██████████| 483/483 [00:00<00:00, 146kB/s]\n", - "Downloading model.safetensors: 0%| | 0.00/268M [00:00 🚀 CodeFlare Cluster Details 🚀 \n", - " \n", - " ╭────────────────────────────────────────────────────────────────╮ \n", - " │ Name │ \n", - " │ gptfttest Active ✅ │ \n", - " │ │ \n", - " │ URI: ray://gptfttest-head-svc.default.svc:10001 │ \n", - " │ │ \n", - " │ Dashboard🔗 │ \n", - " │ │ \n", - " │ Cluster Resources │ \n", - " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n", - " │ │ # Workers │ │ Memory CPU GPU │ │ \n", - " │ │ │ │ │ │ \n", - " │ │ 2 │ │ 8~8 2 1 │ │ \n", - " │ │ │ │ │ │ \n", - " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n", - " ╰────────────────────────────────────────────────────────────────╯ \n", - "\n" - ], - "text/plain": [ - "\u001b[3m \u001b[0m\u001b[1;3m 🚀 CodeFlare Cluster Details 🚀\u001b[0m\u001b[3m \u001b[0m\n", - "\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\n", - " ╭────────────────────────────────────────────────────────────────╮ \n", - " │ \u001b[1;37;42mName\u001b[0m │ \n", - " │ \u001b[1;4mgptfttest\u001b[0m Active ✅ │ \n", - " │ │ \n", - " │ \u001b[1mURI:\u001b[0m ray://gptfttest-head-svc.default.svc:10001 │ \n", - " │ │ \n", - " │ \u001b]8;id=476902;http://ray-dashboard-gptfttest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org\u001b\\\u001b[4;34mDashboard🔗\u001b[0m\u001b]8;;\u001b\\ │ \n", - " │ │ \n", - " │ \u001b[3m Cluster Resources \u001b[0m │ \n", - " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n", - " │ │ \u001b[1m \u001b[0m\u001b[1m# Workers\u001b[0m\u001b[1m \u001b[0m │ │ \u001b[1m \u001b[0m\u001b[1mMemory \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mCPU \u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m\u001b[1mGPU \u001b[0m\u001b[1m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m2 \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m8~8 \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m2 \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m1 \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ │ \u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[36m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m\u001b[35m \u001b[0m │ │ \n", - " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n", - " ╰────────────────────────────────────────────────────────────────╯ \n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "RayCluster(name='gptfttest', status=, workers=2, worker_mem_min=8, worker_mem_max=8, worker_cpu=2, worker_gpu=1, namespace='default', dashboard='http://ray-dashboard-gptfttest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org')" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "cluster.details()" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "9ed5bd75-4230-4c7c-a9e2-0f247890e62a", - "metadata": {}, - "outputs": [], - "source": [ - "from codeflare_sdk.job.jobs import DDPJobDefinition" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "611d203a-35aa-4357-a748-1d01b022fcdb", - "metadata": {}, - "outputs": [], - "source": [ - "arg_list = [\n", - " \"--model_name_or_path\", \"gpt2\",\n", - " \"--dataset_name\", \"wikitext\",\n", - " \"--dataset_config_name\", \"wikitext-2-raw-v1\",\n", - " \"--per_device_train_batch_size\", \"2\",\n", - " \"--per_device_eval_batch_size\", \"2\",\n", - " \"--do_train\",\n", - " \"--do_eval\",\n", - " \"--output_dir\", \"/tmp/test-clm\",\n", - " \"--overwrite_output_dir\"\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "8ac7c34f-e227-44c2-a4b1-a57c853ac3a7", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "The Ray scheduler does not support port mapping.\n" - ] - } - ], - "source": [ - "jobdef = DDPJobDefinition(\n", - " name=\"gpttest\",\n", - " script=\"gpt_og.py\",\n", - " script_args=arg_list,\n", - " scheduler_args={\"requirements\": \"requirements_gpt.txt\"}\n", - ")\n", - "job = jobdef.submit(cluster)" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "1680d287-de46-45f8-b95a-02ba3c83912c", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AppStatus:\n", - " msg: !!python/object/apply:ray.dashboard.modules.job.common.JobStatus\n", - " - RUNNING\n", - " num_restarts: -1\n", - " roles:\n", - " - replicas:\n", - " - hostname: \n", - " id: 0\n", - " role: ray\n", - " state: !!python/object/apply:torchx.specs.api.AppState\n", - " - 3\n", - " structured_error_msg: \n", - " role: ray\n", - " state: RUNNING (3)\n", - " structured_error_msg: \n", - " ui_url: null" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "markdown", - "id": "8222e884-7091-4b74-bdcf-565f25abed11", - "metadata": {}, - "source": [ - "Retrieve raw log output at anytime with:" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "d25d6198-9941-47e8-857f-9811830cc854", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'[RayActor(name=\\'gpt_og\\', command=[\\'bash\\', \\'-c\\', \"torchrun --rdzv_backend static --rdzv_endpoint $TORCHX_RANK0_HOST:49782 --rdzv_id \\'gpttest-tg69zmd1xf19l\\' --nnodes 2 --nproc_per_node 1 --node_rank \\'0\\' --tee 3 --role \\'\\' gpt_og.py --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --do_train --do_eval --output_dir /tmp/test-clm --overwrite_output_dir\"], env={\\'TORCHX_TRACKING_EXPERIMENT_NAME\\': \\'default-experiment\\', \\'LOGLEVEL\\': \\'WARNING\\', \\'TORCHX_JOB_ID\\': \\'ray://torchx/gpttest-tg69zmd1xf19l\\'}, num_cpus=2, num_gpus=1, min_replicas=2), RayActor(name=\\'gpt_og\\', command=[\\'bash\\', \\'-c\\', \"torchrun --rdzv_backend static --rdzv_endpoint $TORCHX_RANK0_HOST:49782 --rdzv_id \\'gpttest-tg69zmd1xf19l\\' --nnodes 2 --nproc_per_node 1 --node_rank \\'1\\' --tee 3 --role \\'\\' gpt_og.py --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --do_train --do_eval --output_dir /tmp/test-clm --overwrite_output_dir\"], env={\\'TORCHX_TRACKING_EXPERIMENT_NAME\\': \\'default-experiment\\', \\'LOGLEVEL\\': \\'WARNING\\', \\'TORCHX_JOB_ID\\': \\'ray://torchx/gpttest-tg69zmd1xf19l\\'}, num_cpus=2, num_gpus=1, min_replicas=2)]\\n2023-08-09 15:39:16,608\\tINFO worker.py:1334 -- Using address 10.129.6.9:6379 set in the environment variable RAY_ADDRESS\\n2023-08-09 15:39:16,608\\tINFO worker.py:1452 -- Connecting to existing Ray cluster at address: 10.129.6.9:6379...\\n2023-08-09 15:39:16,667\\tINFO worker.py:1627 -- Connected to Ray cluster. View the dashboard at \\x1b[1m\\x1b[32mhttp://10.129.6.9:8265 \\x1b[39m\\x1b[22m\\nWaiting for minimum placement group to start.\\nSuccessfully created placement groups\\nrdzv_endpoint set to 10.128.6.19 for actor b557571d0e8b5ce27f34a7e802000000\\nrdzv_endpoint set to 10.128.6.19 for actor 8826a9b5f730773c7c99241102000000\\nSuccessfully placed command actors\\nEntering main loop, start executing the script on worker nodes\\nrunning ray.wait on [ObjectRef(e082c90ab8422b00b557571d0e8b5ce27f34a7e80200000001000000), ObjectRef(ce868e48e2fa9a948826a9b5f730773c7c9924110200000001000000)]\\nrunning ray.wait on [ObjectRef(ce868e48e2fa9a948826a9b5f730773c7c9924110200000001000000), ObjectRef(f81ec6ff838b16dbb557571d0e8b5ce27f34a7e80200000001000000)]\\nrunning ray.wait on [ObjectRef(f81ec6ff838b16dbb557571d0e8b5ce27f34a7e80200000001000000), ObjectRef(32b0eec39cfa87ac8826a9b5f730773c7c9924110200000001000000)]\\n'" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job.logs()" - ] - }, - { - "cell_type": "markdown", - "id": "92b0a8ec-c2ee-43ce-abd7-368c769abd57", - "metadata": {}, - "source": [ - "View live updates for status, logs, and other information with:" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "36bfa974-d923-44d2-b079-4c42b66152f7", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'http://ray-dashboard-gptfttest-default.apps.meyceoz-07122023.psap.aws.rhperfscale.org'" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "cluster.cluster_dashboard_uri()" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "d7b27d0a-b0e3-421c-a3f5-d9db97890f7c", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AppStatus:\n", - " msg: !!python/object/apply:ray.dashboard.modules.job.common.JobStatus\n", - " - SUCCEEDED\n", - " num_restarts: -1\n", - " roles:\n", - " - replicas:\n", - " - hostname: \n", - " id: 0\n", - " role: ray\n", - " state: !!python/object/apply:torchx.specs.api.AppState\n", - " - 4\n", - " structured_error_msg: \n", - " role: ray\n", - " state: SUCCEEDED (4)\n", - " structured_error_msg: \n", - " ui_url: null" - ] - }, - "execution_count": 24, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "beb1a6b9-d9b3-49b7-b036-09f1d3569b59", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.down()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8398d977-db24-46d0-a7d2-b4e9197808d7", - "metadata": {}, - "outputs": [], - "source": [ - "auth.logout()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.17" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/demo-notebooks/guided-demos/notebook-ex-outputs/gpt_og.py b/demo-notebooks/guided-demos/notebook-ex-outputs/gpt_og.py deleted file mode 100644 index d69e41fc..00000000 --- a/demo-notebooks/guided-demos/notebook-ex-outputs/gpt_og.py +++ /dev/null @@ -1,728 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2020 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. - -Here is the full list of checkpoints on the hub that can be fine-tuned by this script: -https://huggingface.co/models?filter=text-generation -""" -# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. - -import subprocess - -subprocess.run(["pip", "uninstall", "protobuf"]) -subprocess.run( - [ - "pip", - "install", - "--upgrade", - "--target=/home/ray/workspace", - "-r", - "requirements.txt", - ] -) - -import logging -import math -import os -import sys -from dataclasses import dataclass, field -from itertools import chain -from typing import Optional - -import datasets -import evaluate -import torch -from datasets import load_dataset - -import transformers -from transformers import ( - CONFIG_MAPPING, - MODEL_FOR_CAUSAL_LM_MAPPING, - AutoConfig, - AutoModelForCausalLM, - AutoTokenizer, - HfArgumentParser, - Trainer, - TrainingArguments, - default_data_collator, - is_torch_tpu_available, - set_seed, -) -from transformers.testing_utils import CaptureLogger -from transformers.trainer_utils import get_last_checkpoint -from transformers.utils import check_min_version, send_example_telemetry -from transformers.utils.versions import require_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -# check_min_version("4.29.0.dev0") - -require_version( - "datasets>=1.8.0", - "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt", -) - -logger = logging.getLogger(__name__) - - -MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. - """ - - model_name_or_path: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." - ) - }, - ) - model_type: Optional[str] = field( - default=None, - metadata={ - "help": "If training from scratch, pass a model type from the list: " - + ", ".join(MODEL_TYPES) - }, - ) - config_overrides: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Override some existing default config settings when a model is trained from scratch. Example: " - "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" - ) - }, - ) - config_name: Optional[str] = field( - default=None, - metadata={ - "help": "Pretrained config name or path if not the same as model_name" - }, - ) - tokenizer_name: Optional[str] = field( - default=None, - metadata={ - "help": "Pretrained tokenizer name or path if not the same as model_name" - }, - ) - cache_dir: Optional[str] = field( - default=None, - metadata={ - "help": "Where do you want to store the pretrained models downloaded from huggingface.co" - }, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={ - "help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not." - }, - ) - model_revision: str = field( - default="main", - metadata={ - "help": "The specific model version to use (can be a branch name, tag name or commit id)." - }, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - torch_dtype: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " - "dtype will be automatically derived from the model's weights." - ), - "choices": ["auto", "bfloat16", "float16", "float32"], - }, - ) - low_cpu_mem_usage: bool = field( - default=False, - metadata={ - "help": ( - "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded." - "set True will benefit LLM loading time and RAM consumption." - ) - }, - ) - - def __post_init__(self): - if self.config_overrides is not None and ( - self.config_name is not None or self.model_name_or_path is not None - ): - raise ValueError( - "--config_overrides can't be used in combination with --config_name or --model_name_or_path" - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default=None, - metadata={"help": "The name of the dataset to use (via the datasets library)."}, - ) - dataset_config_name: Optional[str] = field( - default=None, - metadata={ - "help": "The configuration name of the dataset to use (via the datasets library)." - }, - ) - train_file: Optional[str] = field( - default=None, metadata={"help": "The input training data file (a text file)."} - ) - validation_file: Optional[str] = field( - default=None, - metadata={ - "help": "An optional input evaluation data file to evaluate the perplexity on (a text file)." - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) - block_size: Optional[int] = field( - default=None, - metadata={ - "help": ( - "Optional input sequence length after tokenization. " - "The training dataset will be truncated in block of this size for training. " - "Default to the model max input length for single sentence inputs (take into account special tokens)." - ) - }, - ) - overwrite_cache: bool = field( - default=False, - metadata={"help": "Overwrite the cached training and evaluation sets"}, - ) - validation_split_percentage: Optional[int] = field( - default=5, - metadata={ - "help": "The percentage of the train set used as validation set in case there's no validation split" - }, - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - keep_linebreaks: bool = field( - default=True, - metadata={"help": "Whether to keep line breaks when using TXT files or not."}, - ) - - def __post_init__(self): - if self.streaming: - require_version( - "datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`" - ) - - if ( - self.dataset_name is None - and self.train_file is None - and self.validation_file is None - ): - raise ValueError( - "Need either a dataset name or a training/validation file." - ) - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in [ - "csv", - "json", - "txt", - ], "`train_file` should be a csv, a json or a txt file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in [ - "csv", - "json", - "txt", - ], "`validation_file` should be a csv, a json or a txt file." - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser( - (ModelArguments, DataTrainingArguments, TrainingArguments) - ) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file( - json_file=os.path.abspath(sys.argv[1]) - ) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The - # information sent is the one passed as arguments along with your Python/PyTorch versions. - send_example_telemetry("run_clm", model_args, data_args) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - - if training_args.should_log: - # The default of training_args.log_level is passive, so we set log level at info here to have that default. - transformers.utils.logging.set_verbosity_info() - - log_level = training_args.get_process_log_level() - logger.setLevel(log_level) - datasets.utils.logging.set_verbosity(log_level) - transformers.utils.logging.set_verbosity(log_level) - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - logger.info(f"Training/evaluation parameters {training_args}") - - # Detecting last checkpoint. - last_checkpoint = None - if ( - os.path.isdir(training_args.output_dir) - and training_args.do_train - and not training_args.overwrite_output_dir - ): - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif ( - last_checkpoint is not None and training_args.resume_from_checkpoint is None - ): - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - streaming=data_args.streaming, - ) - if "validation" not in raw_datasets.keys(): - raw_datasets["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - streaming=data_args.streaming, - ) - raw_datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - streaming=data_args.streaming, - ) - else: - data_files = {} - dataset_args = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = ( - data_args.train_file.split(".")[-1] - if data_args.train_file is not None - else data_args.validation_file.split(".")[-1] - ) - if extension == "txt": - extension = "text" - dataset_args["keep_linebreaks"] = data_args.keep_linebreaks - raw_datasets = load_dataset( - extension, - data_files=data_files, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - # If no validation data is there, validation_split_percentage will be used to divide the dataset. - if "validation" not in raw_datasets.keys(): - raw_datasets["validation"] = load_dataset( - extension, - data_files=data_files, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - raw_datasets["train"] = load_dataset( - extension, - data_files=data_files, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - - config_kwargs = { - "cache_dir": model_args.cache_dir, - "revision": model_args.model_revision, - "use_auth_token": True if model_args.use_auth_token else None, - } - if model_args.config_name: - config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) - elif model_args.model_name_or_path: - config = AutoConfig.from_pretrained( - model_args.model_name_or_path, **config_kwargs - ) - else: - config = CONFIG_MAPPING[model_args.model_type]() - logger.warning("You are instantiating a new config instance from scratch.") - if model_args.config_overrides is not None: - logger.info(f"Overriding config: {model_args.config_overrides}") - config.update_from_string(model_args.config_overrides) - logger.info(f"New config: {config}") - - tokenizer_kwargs = { - "cache_dir": model_args.cache_dir, - "use_fast": model_args.use_fast_tokenizer, - "revision": model_args.model_revision, - "use_auth_token": True if model_args.use_auth_token else None, - } - if model_args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name, **tokenizer_kwargs - ) - elif model_args.model_name_or_path: - tokenizer = AutoTokenizer.from_pretrained( - model_args.model_name_or_path, **tokenizer_kwargs - ) - else: - raise ValueError( - "You are instantiating a new tokenizer from scratch. This is not supported by this script." - "You can do it from another script, save it, and load it from here, using --tokenizer_name." - ) - - if model_args.model_name_or_path: - torch_dtype = ( - model_args.torch_dtype - if model_args.torch_dtype in ["auto", None] - else getattr(torch, model_args.torch_dtype) - ) - model = AutoModelForCausalLM.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - torch_dtype=torch_dtype, - low_cpu_mem_usage=model_args.low_cpu_mem_usage, - ) - else: - model = AutoModelForCausalLM.from_config(config) - n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) - logger.info( - f"Training new model from scratch - Total size={n_params/2**20:.2f}M params" - ) - - # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch - # on a small vocab and want a smaller embedding size, remove this test. - embedding_size = model.get_input_embeddings().weight.shape[0] - if len(tokenizer) > embedding_size: - model.resize_token_embeddings(len(tokenizer)) - - # Preprocessing the datasets. - # First we tokenize all the texts. - if training_args.do_train: - column_names = list(raw_datasets["train"].features) - else: - column_names = list(raw_datasets["validation"].features) - text_column_name = "text" if "text" in column_names else column_names[0] - - # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function - tok_logger = transformers.utils.logging.get_logger( - "transformers.tokenization_utils_base" - ) - - def tokenize_function(examples): - with CaptureLogger(tok_logger) as cl: - output = tokenizer(examples[text_column_name]) - # clm input could be much much longer than block_size - if "Token indices sequence length is longer than the" in cl.out: - tok_logger.warning( - "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" - " before being passed to the model." - ) - return output - - with training_args.main_process_first(desc="dataset map tokenization"): - if not data_args.streaming: - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on dataset", - ) - else: - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - remove_columns=column_names, - ) - - if data_args.block_size is None: - block_size = tokenizer.model_max_length - if block_size > 1024: - logger.warning( - "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" - " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" - " override this default with `--block_size xxx`." - ) - block_size = 1024 - else: - if data_args.block_size > tokenizer.model_max_length: - logger.warning( - f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" - f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." - ) - block_size = min(data_args.block_size, tokenizer.model_max_length) - - # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. - def group_texts(examples): - # Concatenate all texts. - concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} - total_length = len(concatenated_examples[list(examples.keys())[0]]) - # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can - # customize this part to your needs. - if total_length >= block_size: - total_length = (total_length // block_size) * block_size - # Split by chunks of max_len. - result = { - k: [t[i : i + block_size] for i in range(0, total_length, block_size)] - for k, t in concatenated_examples.items() - } - result["labels"] = result["input_ids"].copy() - return result - - # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder - # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower - # to preprocess. - # - # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: - # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map - - with training_args.main_process_first(desc="grouping texts together"): - if not data_args.streaming: - lm_datasets = tokenized_datasets.map( - group_texts, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - desc=f"Grouping texts in chunks of {block_size}", - ) - else: - lm_datasets = tokenized_datasets.map( - group_texts, - batched=True, - ) - - if training_args.do_train: - if "train" not in tokenized_datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = lm_datasets["train"] - if data_args.max_train_samples is not None: - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - - if training_args.do_eval: - if "validation" not in tokenized_datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_dataset = lm_datasets["validation"] - if data_args.max_eval_samples is not None: - max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) - eval_dataset = eval_dataset.select(range(max_eval_samples)) - - def preprocess_logits_for_metrics(logits, labels): - if isinstance(logits, tuple): - # Depending on the model and config, logits may contain extra tensors, - # like past_key_values, but logits always come first - logits = logits[0] - return logits.argmax(dim=-1) - - metric = evaluate.load("accuracy") - - def compute_metrics(eval_preds): - preds, labels = eval_preds - # preds have the same shape as the labels, after the argmax(-1) has been calculated - # by preprocess_logits_for_metrics but we need to shift the labels - labels = labels[:, 1:].reshape(-1) - preds = preds[:, :-1].reshape(-1) - return metric.compute(predictions=preds, references=labels) - - # Initialize our Trainer - trainer = Trainer( - model=model, - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, - tokenizer=tokenizer, - # Data collator will default to DataCollatorWithPadding, so we change it. - data_collator=default_data_collator, - compute_metrics=compute_metrics - if training_args.do_eval and not is_torch_tpu_available() - else None, - preprocess_logits_for_metrics=preprocess_logits_for_metrics - if training_args.do_eval and not is_torch_tpu_available() - else None, - ) - - # Training - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() # Saves the tokenizer too for easy upload - - metrics = train_result.metrics - - max_train_samples = ( - data_args.max_train_samples - if data_args.max_train_samples is not None - else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - if training_args.do_eval: - logger.info("*** Evaluate ***") - - metrics = trainer.evaluate() - - max_eval_samples = ( - data_args.max_eval_samples - if data_args.max_eval_samples is not None - else len(eval_dataset) - ) - metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) - try: - perplexity = math.exp(metrics["eval_loss"]) - except OverflowError: - perplexity = float("inf") - metrics["perplexity"] = perplexity - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - kwargs = { - "finetuned_from": model_args.model_name_or_path, - "tasks": "text-generation", - } - if data_args.dataset_name is not None: - kwargs["dataset_tags"] = data_args.dataset_name - if data_args.dataset_config_name is not None: - kwargs["dataset_args"] = data_args.dataset_config_name - kwargs[ - "dataset" - ] = f"{data_args.dataset_name} {data_args.dataset_config_name}" - else: - kwargs["dataset"] = data_args.dataset_name - - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/demo-notebooks/guided-demos/notebook-ex-outputs/gptfttest.yaml b/demo-notebooks/guided-demos/notebook-ex-outputs/gptfttest.yaml deleted file mode 100644 index 427491dc..00000000 --- a/demo-notebooks/guided-demos/notebook-ex-outputs/gptfttest.yaml +++ /dev/null @@ -1,193 +0,0 @@ -apiVersion: workload.codeflare.dev/v1beta1 -kind: AppWrapper -metadata: - labels: - orderedinstance: m5.xlarge_g4dn.xlarge - name: gptfttest - namespace: default -spec: - priority: 9 - resources: - GenericItems: - - custompodresources: - - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - replicas: 1 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 1 - replicas: 2 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 1 - generictemplate: - apiVersion: ray.io/v1alpha1 - kind: RayCluster - metadata: - labels: - appwrapper.mcad.ibm.com: gptfttest - controller-tools.k8s.io: '1.0' - name: gptfttest - namespace: default - spec: - autoscalerOptions: - idleTimeoutSeconds: 60 - imagePullPolicy: Always - resources: - limits: - cpu: 500m - memory: 512Mi - requests: - cpu: 500m - memory: 512Mi - upscalingMode: Default - enableInTreeAutoscaling: false - headGroupSpec: - rayStartParams: - block: 'true' - dashboard-host: 0.0.0.0 - num-gpus: '0' - serviceType: ClusterIP - template: - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: gptfttest - operator: In - values: - - gptfttest - containers: - - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: RAY_USE_TLS - value: '0' - - name: RAY_TLS_SERVER_CERT - value: /home/ray/workspace/tls/server.crt - - name: RAY_TLS_SERVER_KEY - value: /home/ray/workspace/tls/server.key - - name: RAY_TLS_CA_CERT - value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 - imagePullPolicy: Always - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - ray stop - name: ray-head - ports: - - containerPort: 6379 - name: gcs - - containerPort: 8265 - name: dashboard - - containerPort: 10001 - name: client - resources: - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - imagePullSecrets: [] - rayVersion: 2.1.0 - workerGroupSpecs: - - groupName: small-group-gptfttest - maxReplicas: 2 - minReplicas: 2 - rayStartParams: - block: 'true' - num-gpus: '1' - replicas: 2 - template: - metadata: - annotations: - key: value - labels: - key: value - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: gptfttest - operator: In - values: - - gptfttest - containers: - - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: RAY_USE_TLS - value: '0' - - name: RAY_TLS_SERVER_CERT - value: /home/ray/workspace/tls/server.crt - - name: RAY_TLS_SERVER_KEY - value: /home/ray/workspace/tls/server.key - - name: RAY_TLS_CA_CERT - value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - ray stop - name: machine-learning - resources: - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 1 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 1 - imagePullSecrets: [] - initContainers: - - command: - - sh - - -c - - until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; - do echo waiting for myservice; sleep 2; done - image: busybox:1.28 - name: init-myservice - replicas: 1 - - generictemplate: - apiVersion: route.openshift.io/v1 - kind: Route - metadata: - labels: - odh-ray-cluster-service: gptfttest-head-svc - name: ray-dashboard-gptfttest - namespace: default - spec: - port: - targetPort: dashboard - to: - kind: Service - name: gptfttest-head-svc - replicas: 1 - Items: [] diff --git a/demo-notebooks/guided-demos/notebook-ex-outputs/instascaletest.yaml b/demo-notebooks/guided-demos/notebook-ex-outputs/instascaletest.yaml deleted file mode 100644 index 58ccc1ae..00000000 --- a/demo-notebooks/guided-demos/notebook-ex-outputs/instascaletest.yaml +++ /dev/null @@ -1,193 +0,0 @@ -apiVersion: workload.codeflare.dev/v1beta1 -kind: AppWrapper -metadata: - labels: - orderedinstance: m5.xlarge_g4dn.xlarge - name: instascaletest - namespace: default -spec: - priority: 9 - resources: - GenericItems: - - custompodresources: - - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - replicas: 1 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 1 - replicas: 2 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 1 - generictemplate: - apiVersion: ray.io/v1alpha1 - kind: RayCluster - metadata: - labels: - appwrapper.mcad.ibm.com: instascaletest - controller-tools.k8s.io: '1.0' - name: instascaletest - namespace: default - spec: - autoscalerOptions: - idleTimeoutSeconds: 60 - imagePullPolicy: Always - resources: - limits: - cpu: 500m - memory: 512Mi - requests: - cpu: 500m - memory: 512Mi - upscalingMode: Default - enableInTreeAutoscaling: false - headGroupSpec: - rayStartParams: - block: 'true' - dashboard-host: 0.0.0.0 - num-gpus: '0' - serviceType: ClusterIP - template: - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: instascaletest - operator: In - values: - - instascaletest - containers: - - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: RAY_USE_TLS - value: '0' - - name: RAY_TLS_SERVER_CERT - value: /home/ray/workspace/tls/server.crt - - name: RAY_TLS_SERVER_KEY - value: /home/ray/workspace/tls/server.key - - name: RAY_TLS_CA_CERT - value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 - imagePullPolicy: Always - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - ray stop - name: ray-head - ports: - - containerPort: 6379 - name: gcs - - containerPort: 8265 - name: dashboard - - containerPort: 10001 - name: client - resources: - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - imagePullSecrets: [] - rayVersion: 2.1.0 - workerGroupSpecs: - - groupName: small-group-instascaletest - maxReplicas: 2 - minReplicas: 2 - rayStartParams: - block: 'true' - num-gpus: '1' - replicas: 2 - template: - metadata: - annotations: - key: value - labels: - key: value - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: instascaletest - operator: In - values: - - instascaletest - containers: - - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: RAY_USE_TLS - value: '0' - - name: RAY_TLS_SERVER_CERT - value: /home/ray/workspace/tls/server.crt - - name: RAY_TLS_SERVER_KEY - value: /home/ray/workspace/tls/server.key - - name: RAY_TLS_CA_CERT - value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - ray stop - name: machine-learning - resources: - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 1 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 1 - imagePullSecrets: [] - initContainers: - - command: - - sh - - -c - - until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; - do echo waiting for myservice; sleep 2; done - image: busybox:1.28 - name: init-myservice - replicas: 1 - - generictemplate: - apiVersion: route.openshift.io/v1 - kind: Route - metadata: - labels: - odh-ray-cluster-service: instascaletest-head-svc - name: ray-dashboard-instascaletest - namespace: default - spec: - port: - targetPort: dashboard - to: - kind: Service - name: instascaletest-head-svc - replicas: 1 - Items: [] diff --git a/demo-notebooks/guided-demos/notebook-ex-outputs/interactivetest.yaml b/demo-notebooks/guided-demos/notebook-ex-outputs/interactivetest.yaml index 83e74e93..443da33c 100644 --- a/demo-notebooks/guided-demos/notebook-ex-outputs/interactivetest.yaml +++ b/demo-notebooks/guided-demos/notebook-ex-outputs/interactivetest.yaml @@ -29,7 +29,7 @@ spec: memory: 8G nvidia.com/gpu: 1 generictemplate: - apiVersion: ray.io/v1alpha1 + apiVersion: ray.io/v1 kind: RayCluster metadata: labels: @@ -81,7 +81,7 @@ spec: value: /home/ray/workspace/tls/server.key - name: RAY_TLS_CA_CERT value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 + image: quay.io/modh/ray:2.47.1-py311-cu121 imagePullPolicy: Always lifecycle: preStop: @@ -108,7 +108,7 @@ spec: memory: 8G nvidia.com/gpu: 0 imagePullSecrets: [] - rayVersion: 2.1.0 + rayVersion: 2.47.1 workerGroupSpecs: - groupName: small-group-interactivetest maxReplicas: 2 @@ -147,7 +147,7 @@ spec: value: /home/ray/workspace/tls/server.key - name: RAY_TLS_CA_CERT value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 + image: quay.io/modh/ray:2.47.1-py311-cu121 lifecycle: preStop: exec: @@ -166,14 +166,6 @@ spec: memory: 8G nvidia.com/gpu: 1 imagePullSecrets: [] - initContainers: - - command: - - sh - - -c - - until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; - do echo waiting for myservice; sleep 2; done - image: busybox:1.28 - name: init-myservice replicas: 1 - generictemplate: apiVersion: route.openshift.io/v1 diff --git a/demo-notebooks/guided-demos/notebook-ex-outputs/jobtest.yaml b/demo-notebooks/guided-demos/notebook-ex-outputs/jobtest.yaml index ef397681..5d5b0b0e 100644 --- a/demo-notebooks/guided-demos/notebook-ex-outputs/jobtest.yaml +++ b/demo-notebooks/guided-demos/notebook-ex-outputs/jobtest.yaml @@ -27,7 +27,7 @@ spec: memory: 4G nvidia.com/gpu: 0 generictemplate: - apiVersion: ray.io/v1alpha1 + apiVersion: ray.io/v1 kind: RayCluster metadata: labels: @@ -70,7 +70,7 @@ spec: value: /home/ray/workspace/tls/server.key - name: RAY_TLS_CA_CERT value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 + image: quay.io/modh/ray:2.47.1-py311-cu121 imagePullPolicy: Always lifecycle: preStop: @@ -97,7 +97,7 @@ spec: memory: 8G nvidia.com/gpu: 0 imagePullSecrets: [] - rayVersion: 2.1.0 + rayVersion: 2.47.1 workerGroupSpecs: - groupName: small-group-jobtest maxReplicas: 2 @@ -127,7 +127,7 @@ spec: value: /home/ray/workspace/tls/server.key - name: RAY_TLS_CA_CERT value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 + image: quay.io/modh/ray:2.47.1-py311-cu121 lifecycle: preStop: exec: @@ -146,14 +146,6 @@ spec: memory: 4G nvidia.com/gpu: 0 imagePullSecrets: [] - initContainers: - - command: - - sh - - -c - - until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; - do echo waiting for myservice; sleep 2; done - image: busybox:1.28 - name: init-myservice replicas: 1 - generictemplate: apiVersion: route.openshift.io/v1 diff --git a/demo-notebooks/guided-demos/notebook-ex-outputs/raytest.yaml b/demo-notebooks/guided-demos/notebook-ex-outputs/raytest.yaml index 96d9b8af..81796687 100644 --- a/demo-notebooks/guided-demos/notebook-ex-outputs/raytest.yaml +++ b/demo-notebooks/guided-demos/notebook-ex-outputs/raytest.yaml @@ -27,7 +27,7 @@ spec: memory: 4G nvidia.com/gpu: 0 generictemplate: - apiVersion: ray.io/v1alpha1 + apiVersion: ray.io/v1 kind: RayCluster metadata: labels: @@ -70,7 +70,7 @@ spec: value: /home/ray/workspace/tls/server.key - name: RAY_TLS_CA_CERT value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 + image: quay.io/modh/ray:2.47.1-py311-cu121 imagePullPolicy: Always lifecycle: preStop: @@ -97,7 +97,7 @@ spec: memory: 8G nvidia.com/gpu: 0 imagePullSecrets: [] - rayVersion: 2.1.0 + rayVersion: 2.47.1 workerGroupSpecs: - groupName: small-group-raytest maxReplicas: 2 @@ -127,7 +127,7 @@ spec: value: /home/ray/workspace/tls/server.key - name: RAY_TLS_CA_CERT value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 + image: quay.io/modh/ray:2.47.1-py311-cu121 lifecycle: preStop: exec: @@ -146,14 +146,6 @@ spec: memory: 4G nvidia.com/gpu: 0 imagePullSecrets: [] - initContainers: - - command: - - sh - - -c - - until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; - do echo waiting for myservice; sleep 2; done - image: busybox:1.28 - name: init-myservice replicas: 1 - generictemplate: apiVersion: route.openshift.io/v1 diff --git a/demo-notebooks/guided-demos/notebook-ex-outputs/requirements.txt b/demo-notebooks/guided-demos/notebook-ex-outputs/requirements.txt index 7266b064..c748bec2 100644 --- a/demo-notebooks/guided-demos/notebook-ex-outputs/requirements.txt +++ b/demo-notebooks/guided-demos/notebook-ex-outputs/requirements.txt @@ -1,4 +1,4 @@ -pytorch_lightning==1.5.10 +pytorch_lightning==2.4.0 ray_lightning -torchmetrics==0.9.1 -torchvision==0.12.0 +torchmetrics==1.6.0 +torchvision==0.20.1 diff --git a/demo-notebooks/guided-demos/notebook-ex-outputs/requirements_gpt.txt b/demo-notebooks/guided-demos/notebook-ex-outputs/requirements_gpt.txt deleted file mode 100644 index bd6c4f52..00000000 --- a/demo-notebooks/guided-demos/notebook-ex-outputs/requirements_gpt.txt +++ /dev/null @@ -1,8 +0,0 @@ -accelerate >= 0.12.0 -torch >= 1.3 -datasets >= 1.8.0 -sentencepiece != 0.1.92 -evaluate -scikit-learn -transformers==4.28.1 -protobuf<=3.20.1,>=3.8.0 diff --git a/demo-notebooks/guided-demos/preview_nbs/0_basic_ray.ipynb b/demo-notebooks/guided-demos/preview_nbs/0_basic_ray.ipynb index dfe06733..49f7f687 100644 --- a/demo-notebooks/guided-demos/preview_nbs/0_basic_ray.ipynb +++ b/demo-notebooks/guided-demos/preview_nbs/0_basic_ray.ipynb @@ -5,7 +5,7 @@ "id": "8d4a42f6", "metadata": {}, "source": [ - "In this first notebook, we will go through the basics of using the SDK to:\n", + "In this notebook, we will go through the basics of using the SDK to:\n", " - Spin up a Ray cluster with our desired resources\n", " - View the status and specs of our Ray cluster\n", " - Take down the Ray cluster when finished" @@ -19,8 +19,7 @@ "outputs": [], "source": [ "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication" ] }, { @@ -46,7 +45,14 @@ "id": "bc27f84c", "metadata": {}, "source": [ - "Here, we want to define our cluster by specifying the resources we require for our batch workload. Below, we define our cluster object (which generates a corresponding AppWrapper)." + "Here, we want to define our cluster by specifying the resources we require for our batch workload. Below, we define our cluster object (which generates a corresponding RayCluster).\n", + "\n", + "NOTE: The default images used by the CodeFlare SDK for creating a RayCluster resource depend on the installed Python version:\n", + "\n", + "- For Python 3.11: 'quay.io/modh/ray:2.47.1-py311-cu121'\n", + "- For Python 3.12: 'quay.io/modh/ray:2.47.1-py312-cu128'\n", + "\n", + "If you prefer to use a custom Ray image that better suits your needs, you can specify it in the image field to override the default." ] }, { @@ -56,18 +62,20 @@ "metadata": {}, "outputs": [], "source": [ - "# Create and configure our cluster object (and appwrapper)\n", + "# Create and configure our cluster object\n", + "# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n", "cluster = Cluster(ClusterConfiguration(\n", " name='raytest',\n", - " namespace='default',\n", + " head_extended_resource_requests={'nvidia.com/gpu':0}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':0},\n", " num_workers=2,\n", - " min_cpus=1,\n", - " max_cpus=1,\n", - " min_memory=4,\n", - " max_memory=4,\n", - " num_gpus=0,\n", - " image=\"quay.io/project-codeflare/ray:2.5.0-py38-cu116\", #current default\n", - " instascale=False\n", + " worker_cpu_requests=1,\n", + " worker_cpu_limits=1,\n", + " worker_memory_requests=4,\n", + " worker_memory_limits=4,\n", + " # image=\"\", # Optional Field \n", + " write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n", + " # local_queue=\"local-queue-name\" # Specify the local queue manually\n", "))" ] }, @@ -76,7 +84,7 @@ "id": "12eef53c", "metadata": {}, "source": [ - "Next, we want to bring our cluster up, so we call the `up()` function below to submit our cluster AppWrapper yaml onto the MCAD queue, and begin the process of obtaining our resource cluster." + "Next, we want to bring our cluster up, so we call the `up()` function below to submit our Ray Cluster onto the queue, and begin the process of obtaining our resource cluster." ] }, { @@ -87,7 +95,7 @@ "outputs": [], "source": [ "# Bring up the cluster\n", - "cluster.up()" + "cluster.apply()" ] }, { @@ -191,7 +199,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.13" + "version": "3.9.18" }, "vscode": { "interpreter": { diff --git a/demo-notebooks/guided-demos/preview_nbs/1_basic_instascale.ipynb b/demo-notebooks/guided-demos/preview_nbs/1_basic_instascale.ipynb deleted file mode 100644 index d0faf5b9..00000000 --- a/demo-notebooks/guided-demos/preview_nbs/1_basic_instascale.ipynb +++ /dev/null @@ -1,174 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "9865ee8c", - "metadata": {}, - "source": [ - "In this second notebook, we will go over the basics of using InstaScale to scale up/down necessary resources that are not currently available on your OpenShift Cluster (in cloud environments)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b55bc3ea-4ce3-49bf-bb1f-e209de8ca47a", - "metadata": {}, - "outputs": [], - "source": [ - "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "614daa0c", - "metadata": {}, - "outputs": [], - "source": [ - "# Create authentication object for user permissions\n", - "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", - "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", - "auth = TokenAuthentication(\n", - " token = \"XXXXX\",\n", - " server = \"XXXXX\",\n", - " skip_tls=False\n", - ")\n", - "auth.login()" - ] - }, - { - "cell_type": "markdown", - "id": "bc27f84c", - "metadata": {}, - "source": [ - "This time, we are working in a cloud environment, and our OpenShift cluster does not have the resources needed for our desired workloads. We will use InstaScale to dynamically scale-up guaranteed resources based on our request (that will also automatically scale-down when we are finished working):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0f4bc870-091f-4e11-9642-cba145710159", - "metadata": {}, - "outputs": [], - "source": [ - "# Create and configure our cluster object (and appwrapper)\n", - "cluster = Cluster(ClusterConfiguration(\n", - " name='instascaletest',\n", - " namespace='default',\n", - " num_workers=2,\n", - " min_cpus=2,\n", - " max_cpus=2,\n", - " min_memory=8,\n", - " max_memory=8,\n", - " num_gpus=1,\n", - " instascale=True, # InstaScale now enabled, will scale OCP cluster to guarantee resource request\n", - " machine_types=[\"m5.xlarge\", \"g4dn.xlarge\"] # Head, worker AWS machine types desired\n", - "))" - ] - }, - { - "cell_type": "markdown", - "id": "12eef53c", - "metadata": {}, - "source": [ - "Same as last time, we will bring the cluster up, wait for it to be ready, and confirm that the specs are as-requested:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f0884bbc-c224-4ca0-98a0-02dfa09c2200", - "metadata": {}, - "outputs": [], - "source": [ - "# Bring up the cluster\n", - "cluster.up()\n", - "cluster.wait_ready()" - ] - }, - { - "cell_type": "markdown", - "id": "6abfe904", - "metadata": {}, - "source": [ - "While the resources are being scaled, we can also go into the console and take a look at the InstaScale logs, as well as the new machines/nodes spinning up.\n", - "\n", - "Once the cluster is ready, we can confirm the specs:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7fd45bc5-03c0-4ae5-9ec5-dd1c30f1a084", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.details()" - ] - }, - { - "cell_type": "markdown", - "id": "5af8cd32", - "metadata": {}, - "source": [ - "Finally, we bring our resource cluster down and release/terminate the associated resources, bringing everything back to the way it was before our cluster was brought up." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5f36db0f-31f6-4373-9503-dc3c1c4c3f57", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.down()" - ] - }, - { - "cell_type": "markdown", - "id": "c883caea", - "metadata": {}, - "source": [ - "Once again, we can look at the machines/nodes and see that everything has been successfully scaled down!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0d41b90e", - "metadata": {}, - "outputs": [], - "source": [ - "auth.logout()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.13" - }, - "vscode": { - "interpreter": { - "hash": "f9f85f796d01129d0dd105a088854619f454435301f6ffec2fea96ecbd9be4ac" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/demo-notebooks/guided-demos/preview_nbs/1_cluster_job_client.ipynb b/demo-notebooks/guided-demos/preview_nbs/1_cluster_job_client.ipynb new file mode 100644 index 00000000..3c7b7876 --- /dev/null +++ b/demo-notebooks/guided-demos/preview_nbs/1_cluster_job_client.ipynb @@ -0,0 +1,240 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this demo we will go over the basics of the Ray Job Submission Client in the SDK" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import pieces from codeflare-sdk\n", + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create authentication object for user permissions\n", + "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", + "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", + "auth = TokenAuthentication(\n", + " token = \"XXXXX\",\n", + " server = \"XXXXX\",\n", + " skip_tls=False\n", + ")\n", + "auth.login()" + ] + }, + { + "cell_type": "markdown", + "id": "bc27f84c", + "metadata": {}, + "source": [ + "Here, we want to define our cluster by specifying the resources we require for our batch workload. Below, we define our cluster object (which generates a corresponding RayCluster).\n", + "\n", + "NOTE: The default images used by the CodeFlare SDK for creating a RayCluster resource depend on the installed Python version:\n", + "\n", + "- For Python 3.11: 'quay.io/modh/ray:2.47.1-py311-cu121'\n", + "- For Python 3.12: 'quay.io/modh/ray:2.47.1-py312-cu128'\n", + "\n", + "If you prefer to use a custom Ray image that better suits your needs, you can specify it in the image field to override the default." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create and configure our cluster object\n", + "# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n", + "cluster = Cluster(ClusterConfiguration(\n", + " name='jobtest',\n", + " head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':1},\n", + " num_workers=2,\n", + " worker_cpu_requests=1,\n", + " worker_cpu_limits=1,\n", + " worker_memory_requests=4,\n", + " worker_memory_limits=4,\n", + " # image=\"\", # Optional Field \n", + " write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources\n", + " # local_queue=\"local-queue-name\" # Specify the local queue manually\n", + "))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Bring up the cluster\n", + "cluster.apply()\n", + "cluster.wait_ready()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cluster.details()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Ray Job Submission" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Initialise the Cluster Job Client \n", + "* Provide an entrypoint command directed to your job script\n", + "* Set up your runtime environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the Job Submission Client\n", + "\"\"\"\n", + "The SDK will automatically gather the dashboard address and authenticate using the Ray Job Submission Client\n", + "\"\"\"\n", + "client = cluster.job_client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Submit an example mnist job using the Job Submission Client\n", + "submission_id = client.submit_job(\n", + " entrypoint=\"python mnist_fashion.py\",\n", + " runtime_env={\"working_dir\": \"./\",\"pip\": \"requirements.txt\"},\n", + ")\n", + "print(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the job's logs\n", + "client.get_job_logs(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the job's status\n", + "client.get_job_status(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get job related info\n", + "client.get_job_info(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# List all existing jobs\n", + "client.list_jobs()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Iterate through the logs of a job \n", + "async for lines in client.tail_job_logs(submission_id):\n", + " print(lines, end=\"\") " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Delete a job\n", + "# Can run client.cancel_job(submission_id) first if job is still running\n", + "client.delete_job(submission_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cluster.down()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "auth.logout()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.18" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demo-notebooks/guided-demos/preview_nbs/3_basic_interactive.ipynb b/demo-notebooks/guided-demos/preview_nbs/2_basic_interactive.ipynb similarity index 51% rename from demo-notebooks/guided-demos/preview_nbs/3_basic_interactive.ipynb rename to demo-notebooks/guided-demos/preview_nbs/2_basic_interactive.ipynb index c8b2b1a0..1de3fc9c 100644 --- a/demo-notebooks/guided-demos/preview_nbs/3_basic_interactive.ipynb +++ b/demo-notebooks/guided-demos/preview_nbs/2_basic_interactive.ipynb @@ -5,7 +5,7 @@ "id": "bbc21043", "metadata": {}, "source": [ - "In this fourth and final notebook, we will go over how to leverage the SDK to directly work interactively with a Ray cluster during development." + "In this notebook, we will go over how to leverage the SDK to directly work interactively with a Ray Cluster during development." ] }, { @@ -16,8 +16,7 @@ "outputs": [], "source": [ "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication" ] }, { @@ -43,7 +42,14 @@ "id": "bc27f84c", "metadata": {}, "source": [ - "Once again, let's start by running through the same cluster setup as before:" + "Once again, let's start by running through the same cluster setup as before:\n", + "\n", + "NOTE: The default images used by the CodeFlare SDK for creating a RayCluster resource depend on the installed Python version:\n", + "\n", + "- For Python 3.11: 'quay.io/modh/ray:2.47.1-py311-cu121'\n", + "- For Python 3.12: 'quay.io/modh/ray:2.47.1-py312-cu128'\n", + "\n", + "If you prefer to use a custom Ray image that better suits your needs, you can specify it in the image field to override the default." ] }, { @@ -53,19 +59,21 @@ "metadata": {}, "outputs": [], "source": [ - "# Create and configure our cluster object (and appwrapper)\n", + "# Create and configure our cluster object\n", + "# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n", + "cluster_name = \"interactivetest\"\n", "cluster = Cluster(ClusterConfiguration(\n", - " name='interactivetest',\n", - " namespace='default',\n", + " name=cluster_name,\n", + " head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':1},\n", " num_workers=2,\n", - " min_cpus=2,\n", - " max_cpus=2,\n", - " min_memory=8,\n", - " max_memory=8,\n", - " num_gpus=1,\n", - " instascale=True, #<---instascale enabled\n", - " machine_types=[\"m5.xlarge\", \"g4dn.xlarge\"]\n", - " \n", + " worker_cpu_requests=2,\n", + " worker_cpu_limits=2,\n", + " worker_memory_requests=8,\n", + " worker_memory_limits=8,\n", + " # image=\"\", # Optional Field \n", + " write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n", + " # local_queue=\"local-queue-name\" # Specify the local queue manually\n", "))" ] }, @@ -77,7 +85,7 @@ "outputs": [], "source": [ "# Bring up the cluster\n", - "cluster.up()\n", + "cluster.apply()\n", "cluster.wait_ready()" ] }, @@ -122,6 +130,19 @@ "Now we can connect directly to our Ray cluster via the Ray python client:" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "e5308271", + "metadata": {}, + "outputs": [], + "source": [ + "from codeflare_sdk import generate_cert\n", + "# Create required TLS cert and export the environment variables to enable TLS\n", + "generate_cert.generate_tls_cert(cluster_name, cluster.config.namespace)\n", + "generate_cert.export_env(cluster_name, cluster.config.namespace)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -133,15 +154,13 @@ "assert ray_cluster_uri, \"Ray cluster needs to be started and set before proceeding\"\n", "\n", "import ray\n", - "from ray.air.config import ScalingConfig\n", "\n", "# reset the ray context in case there's already one. \n", "ray.shutdown()\n", "# establish connection to ray cluster\n", "\n", "#install additional libraries that will be required for model training\n", - "runtime_env = {\"pip\": [\"transformers\", \"datasets\", \"evaluate\", \"pyarrow<7.0.0\", \"accelerate\"]}\n", - "\n", + "runtime_env = {\"pip\": [\"transformers==4.41.2\", \"datasets==2.17.0\", \"accelerate==0.31.0\", \"scikit-learn==1.5.0\"]}\n", "# NOTE: This will work for in-cluster notebook servers (RHODS/ODH), but not for local machines\n", "# To see how to connect from your laptop, go to demo-notebooks/additional-demos/local_interactive.ipynb\n", "ray.init(address=ray_cluster_uri, runtime_env=runtime_env)\n", @@ -154,7 +173,7 @@ "id": "9711030b", "metadata": {}, "source": [ - "Now that we are connected (and have passed in some package requirements), let's try writing some training code for a DistilBERT transformer model via HuggingFace (using IMDB dataset):" + "Now that we are connected (and have passed in some package requirements), let's try writing some training code:" ] }, { @@ -166,66 +185,83 @@ "source": [ "@ray.remote\n", "def train_fn():\n", - " from datasets import load_dataset\n", - " import transformers\n", - " from transformers import AutoTokenizer, TrainingArguments\n", - " from transformers import AutoModelForSequenceClassification\n", + " import os\n", " import numpy as np\n", - " from datasets import load_metric\n", - " import ray\n", - " from ray import tune\n", - " from ray.train.huggingface import HuggingFaceTrainer\n", - "\n", - " dataset = load_dataset(\"imdb\")\n", - " tokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-uncased\")\n", - "\n", - " def tokenize_function(examples):\n", - " return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n", + " from datasets import load_dataset, load_metric\n", + " import transformers\n", + " from transformers import (\n", + " Trainer,\n", + " TrainingArguments,\n", + " AutoTokenizer,\n", + " AutoModelForSequenceClassification,\n", + " )\n", + " import ray.train.huggingface.transformers\n", + " from ray.train import ScalingConfig\n", + " from ray.train.torch import TorchTrainer\n", "\n", - " tokenized_datasets = dataset.map(tokenize_function, batched=True)\n", + " # When running in a multi-node cluster you will need persistent storage that is accessible across all worker nodes. \n", + " # See www.github.com/project-codeflare/codeflare-sdk/tree/main/docs/s3-compatible-storage.md for more information.\n", + " \n", + " def train_func():\n", + " # Datasets\n", + " dataset = load_dataset(\"imdb\")\n", + " tokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-uncased\")\n", "\n", - " #using a fraction of dataset but you can run with the full dataset\n", - " small_train_dataset = tokenized_datasets[\"train\"].shuffle(seed=42).select(range(100))\n", - " small_eval_dataset = tokenized_datasets[\"test\"].shuffle(seed=42).select(range(100))\n", + " def tokenize_function(examples):\n", + " return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n", "\n", - " print(f\"len of train {small_train_dataset} and test {small_eval_dataset}\")\n", + " small_train_dataset = (\n", + " dataset[\"train\"].select(range(100)).map(tokenize_function, batched=True)\n", + " )\n", + " small_eval_dataset = (\n", + " dataset[\"test\"].select(range(100)).map(tokenize_function, batched=True)\n", + " )\n", "\n", - " ray_train_ds = ray.data.from_huggingface(small_train_dataset)\n", - " ray_evaluation_ds = ray.data.from_huggingface(small_eval_dataset)\n", + " # Model\n", + " model = AutoModelForSequenceClassification.from_pretrained(\n", + " \"distilbert-base-uncased\", num_labels=2\n", + " )\n", "\n", - " def compute_metrics(eval_pred):\n", - " metric = load_metric(\"accuracy\")\n", - " logits, labels = eval_pred\n", - " predictions = np.argmax(logits, axis=-1)\n", - " return metric.compute(predictions=predictions, references=labels)\n", + " def compute_metrics(eval_pred):\n", + " metric = load_metric(\"accuracy\")\n", + " logits, labels = eval_pred\n", + " predictions = np.argmax(logits, axis=-1)\n", + " return metric.compute(predictions=predictions, references=labels)\n", "\n", - " def trainer_init_per_worker(train_dataset, eval_dataset, **config):\n", - " model = AutoModelForSequenceClassification.from_pretrained(\"distilbert-base-uncased\", num_labels=2)\n", + " # Hugging Face Trainer\n", + " training_args = TrainingArguments(\n", + " output_dir=\"test_trainer\",\n", + " evaluation_strategy=\"epoch\",\n", + " save_strategy=\"epoch\",\n", + " report_to=\"none\",\n", + " )\n", "\n", - " training_args = TrainingArguments(\"/tmp/hf_imdb/test\", eval_steps=1, disable_tqdm=True, \n", - " num_train_epochs=1, skip_memory_metrics=True,\n", - " learning_rate=2e-5,\n", - " per_device_train_batch_size=16,\n", - " per_device_eval_batch_size=16, \n", - " weight_decay=0.01,)\n", - " return transformers.Trainer(\n", + " trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", - " train_dataset=train_dataset,\n", - " eval_dataset=eval_dataset,\n", - " compute_metrics=compute_metrics\n", + " train_dataset=small_train_dataset,\n", + " eval_dataset=small_eval_dataset,\n", + " compute_metrics=compute_metrics,\n", " )\n", "\n", - " scaling_config = ScalingConfig(num_workers=2, use_gpu=True) #num workers is the number of gpus\n", "\n", - " # we are using the ray native HuggingFaceTrainer, but you can swap out to use non ray Huggingface Trainer. Both have the same method signature. \n", - " # the ray native HFTrainer has built in support for scaling to multiple GPUs\n", - " trainer = HuggingFaceTrainer(\n", - " trainer_init_per_worker=trainer_init_per_worker,\n", - " scaling_config=scaling_config,\n", - " datasets={\"train\": ray_train_ds, \"evaluation\": ray_evaluation_ds},\n", + " callback = ray.train.huggingface.transformers.RayTrainReportCallback()\n", + " trainer.add_callback(callback)\n", + "\n", + " trainer = ray.train.huggingface.transformers.prepare_trainer(trainer)\n", + "\n", + " trainer.train()\n", + "\n", + "\n", + " ray_trainer = TorchTrainer(\n", + " train_func,\n", + " scaling_config=ScalingConfig(num_workers=3, use_gpu=True),\n", + " # Configure persistent storage that is accessible across \n", + " # all worker nodes.\n", + " # Uncomment and update the RunConfig below to include your storage details.\n", + " # run_config=ray.train.RunConfig(storage_path=\"storage path\"),\n", " )\n", - " result = trainer.fit()" + " result: ray.train.Result = ray_trainer.fit()" ] }, { @@ -292,7 +328,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.13" + "version": "3.9.18" }, "vscode": { "interpreter": { diff --git a/demo-notebooks/guided-demos/preview_nbs/2_basic_jobs.ipynb b/demo-notebooks/guided-demos/preview_nbs/2_basic_jobs.ipynb deleted file mode 100644 index 4ac4f00a..00000000 --- a/demo-notebooks/guided-demos/preview_nbs/2_basic_jobs.ipynb +++ /dev/null @@ -1,308 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "464af595", - "metadata": {}, - "source": [ - "In this third notebook, we will go over the basics of submitting jobs via the SDK, either to a Ray cluster or directly to MCAD." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b55bc3ea-4ce3-49bf-bb1f-e209de8ca47a", - "metadata": {}, - "outputs": [], - "source": [ - "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "614daa0c", - "metadata": {}, - "outputs": [], - "source": [ - "# Create authentication object for user permissions\n", - "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", - "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", - "auth = TokenAuthentication(\n", - " token = \"XXXXX\",\n", - " server = \"XXXXX\",\n", - " skip_tls=False\n", - ")\n", - "auth.login()" - ] - }, - { - "cell_type": "markdown", - "id": "bc27f84c", - "metadata": {}, - "source": [ - "Let's start by running through the same cluster setup as before:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0f4bc870-091f-4e11-9642-cba145710159", - "metadata": {}, - "outputs": [], - "source": [ - "# Create and configure our cluster object (and appwrapper)\n", - "cluster = Cluster(ClusterConfiguration(\n", - " name='jobtest',\n", - " namespace='default',\n", - " num_workers=2,\n", - " min_cpus=1,\n", - " max_cpus=1,\n", - " min_memory=4,\n", - " max_memory=4,\n", - " num_gpus=0,\n", - " instascale=False\n", - "))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f0884bbc-c224-4ca0-98a0-02dfa09c2200", - "metadata": {}, - "outputs": [], - "source": [ - "# Bring up the cluster\n", - "cluster.up()\n", - "cluster.wait_ready()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "df71c1ed", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.details()" - ] - }, - { - "cell_type": "markdown", - "id": "33663f47", - "metadata": {}, - "source": [ - "This time, however, we are going to use the CodeFlare SDK to submit batch jobs via TorchX, either to the Ray cluster we have just brought up, or directly to MCAD." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c7b4f232", - "metadata": {}, - "outputs": [], - "source": [ - "from codeflare_sdk.job.jobs import DDPJobDefinition" - ] - }, - { - "cell_type": "markdown", - "id": "83d77b74", - "metadata": {}, - "source": [ - "First, let's begin by submitting to Ray, training a basic NN on the MNIST dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8c2c5138", - "metadata": {}, - "outputs": [], - "source": [ - "jobdef = DDPJobDefinition(\n", - " name=\"mnisttest\",\n", - " script=\"mnist.py\",\n", - " scheduler_args={\"requirements\": \"requirements.txt\"}\n", - ")\n", - "job = jobdef.submit(cluster)" - ] - }, - { - "cell_type": "markdown", - "id": "5b9ae53a", - "metadata": {}, - "source": [ - "Now we can take a look at the status of our submitted job, as well as retrieve the full logs:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6e36c3d9", - "metadata": {}, - "outputs": [], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "834cfb5c", - "metadata": {}, - "outputs": [], - "source": [ - "job.logs()" - ] - }, - { - "cell_type": "markdown", - "id": "4067ef60", - "metadata": {}, - "source": [ - "You can also view organized logs, status, and other information directly through the Ray cluster's dashboard:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19055243", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.cluster_dashboard_uri()" - ] - }, - { - "cell_type": "markdown", - "id": "5af8cd32", - "metadata": {}, - "source": [ - "Once complete, we can bring our Ray cluster down and clean up:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5f36db0f-31f6-4373-9503-dc3c1c4c3f57", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.down()" - ] - }, - { - "cell_type": "markdown", - "id": "31096641", - "metadata": {}, - "source": [ - "Now, an alternative option for job submission is to submit directly to MCAD, which will schedule pods to run the job with requested resources:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "496139cc", - "metadata": {}, - "outputs": [], - "source": [ - "jobdef = DDPJobDefinition(\n", - " name=\"mnistjob\",\n", - " script=\"mnist.py\",\n", - " scheduler_args={\"namespace\": \"default\"},\n", - " j=\"1x1\",\n", - " gpu=0,\n", - " cpu=1,\n", - " memMB=8000,\n", - " image=\"quay.io/project-codeflare/mnist-job-test:v0.0.1\"\n", - ")\n", - "job = jobdef.submit()" - ] - }, - { - "cell_type": "markdown", - "id": "0837e43b", - "metadata": {}, - "source": [ - "Once again, we can look at job status and logs:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3d18d42c", - "metadata": {}, - "outputs": [], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "36d7ea97", - "metadata": {}, - "outputs": [], - "source": [ - "job.logs()" - ] - }, - { - "cell_type": "markdown", - "id": "aebf376a", - "metadata": {}, - "source": [ - "This time, once the pods complete, we can clean them up alongside any other associated resources. The following command can also be used to delete jobs early for both Ray and MCAD submission:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ebbb0674", - "metadata": {}, - "outputs": [], - "source": [ - "job.cancel()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0d41b90e", - "metadata": {}, - "outputs": [], - "source": [ - "auth.logout()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.13" - }, - "vscode": { - "interpreter": { - "hash": "f9f85f796d01129d0dd105a088854619f454435301f6ffec2fea96ecbd9be4ac" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/demo-notebooks/guided-demos/preview_nbs/4_gpt.ipynb b/demo-notebooks/guided-demos/preview_nbs/4_gpt.ipynb deleted file mode 100644 index 455bb9aa..00000000 --- a/demo-notebooks/guided-demos/preview_nbs/4_gpt.ipynb +++ /dev/null @@ -1,228 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "b6c05b69-4ce8-45ef-82d3-bacb2491bee8", - "metadata": {}, - "outputs": [], - "source": [ - "# Import pieces from codeflare-sdk\n", - "from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration\n", - "from codeflare_sdk.cluster.auth import TokenAuthentication" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "32f99bbd-9903-4d38-a4f2-223dec684ae2", - "metadata": {}, - "outputs": [], - "source": [ - "# Create authentication object for user permissions\n", - "# IF unused, SDK will automatically check for default kubeconfig, then in-cluster config\n", - "# KubeConfigFileAuthentication can also be used to specify kubeconfig path manually\n", - "auth = TokenAuthentication(\n", - " token = \"XXXXX\",\n", - " server = \"XXXXX\",\n", - " skip_tls=False\n", - ")\n", - "auth.login()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3f32119a-c4ee-4163-b103-d9ca3bddbdb5", - "metadata": {}, - "outputs": [], - "source": [ - "cluster = Cluster(ClusterConfiguration(\n", - " name='gptfttest',\n", - " namespace='default',\n", - " num_workers=2,\n", - " min_cpus=2,\n", - " max_cpus=2,\n", - " min_memory=8,\n", - " max_memory=8,\n", - " num_gpus=1,\n", - " instascale=True, #<---instascale enabled\n", - " machine_types=[\"m5.xlarge\", \"g4dn.xlarge\"],\n", - "))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "107c8277-3b3b-4238-a786-a391a662fd7c", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.up()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "730f66ce-adaa-4709-b9cf-22417847e059", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.wait_ready()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "48fac218-2f22-428b-9228-137a4bb0e666", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.details()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9ed5bd75-4230-4c7c-a9e2-0f247890e62a", - "metadata": {}, - "outputs": [], - "source": [ - "from codeflare_sdk.job.jobs import DDPJobDefinition" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "611d203a-35aa-4357-a748-1d01b022fcdb", - "metadata": {}, - "outputs": [], - "source": [ - "arg_list = [\n", - " \"--model_name_or_path\", \"gpt2\",\n", - " \"--dataset_name\", \"wikitext\",\n", - " \"--dataset_config_name\", \"wikitext-2-raw-v1\",\n", - " \"--per_device_train_batch_size\", \"2\",\n", - " \"--per_device_eval_batch_size\", \"2\",\n", - " \"--do_train\",\n", - " \"--do_eval\",\n", - " \"--output_dir\", \"/tmp/test-clm\",\n", - " \"--overwrite_output_dir\"\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8ac7c34f-e227-44c2-a4b1-a57c853ac3a7", - "metadata": {}, - "outputs": [], - "source": [ - "jobdef = DDPJobDefinition(\n", - " name=\"gpttest\",\n", - " script=\"gpt_og.py\",\n", - " script_args=arg_list,\n", - " scheduler_args={\"requirements\": \"requirements_gpt.txt\"}\n", - ")\n", - "job = jobdef.submit(cluster)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1680d287-de46-45f8-b95a-02ba3c83912c", - "metadata": {}, - "outputs": [], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "markdown", - "id": "d310e6a8", - "metadata": {}, - "source": [ - "Retrieve raw log output at anytime with:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d25d6198-9941-47e8-857f-9811830cc854", - "metadata": {}, - "outputs": [], - "source": [ - "job.logs()" - ] - }, - { - "cell_type": "markdown", - "id": "cb27d5e0", - "metadata": {}, - "source": [ - "View live updates for status, logs, and other information with:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d2c51953", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.cluster_dashboard_uri()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b14bbde1", - "metadata": {}, - "outputs": [], - "source": [ - "job.status()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "beb1a6b9-d9b3-49b7-b036-09f1d3569b59", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.down()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8398d977-db24-46d0-a7d2-b4e9197808d7", - "metadata": {}, - "outputs": [], - "source": [ - "auth.logout()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.13" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/demo-notebooks/guided-demos/preview_nbs/gpt_og.py b/demo-notebooks/guided-demos/preview_nbs/gpt_og.py deleted file mode 100644 index d69e41fc..00000000 --- a/demo-notebooks/guided-demos/preview_nbs/gpt_og.py +++ /dev/null @@ -1,728 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2020 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. - -Here is the full list of checkpoints on the hub that can be fine-tuned by this script: -https://huggingface.co/models?filter=text-generation -""" -# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. - -import subprocess - -subprocess.run(["pip", "uninstall", "protobuf"]) -subprocess.run( - [ - "pip", - "install", - "--upgrade", - "--target=/home/ray/workspace", - "-r", - "requirements.txt", - ] -) - -import logging -import math -import os -import sys -from dataclasses import dataclass, field -from itertools import chain -from typing import Optional - -import datasets -import evaluate -import torch -from datasets import load_dataset - -import transformers -from transformers import ( - CONFIG_MAPPING, - MODEL_FOR_CAUSAL_LM_MAPPING, - AutoConfig, - AutoModelForCausalLM, - AutoTokenizer, - HfArgumentParser, - Trainer, - TrainingArguments, - default_data_collator, - is_torch_tpu_available, - set_seed, -) -from transformers.testing_utils import CaptureLogger -from transformers.trainer_utils import get_last_checkpoint -from transformers.utils import check_min_version, send_example_telemetry -from transformers.utils.versions import require_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -# check_min_version("4.29.0.dev0") - -require_version( - "datasets>=1.8.0", - "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt", -) - -logger = logging.getLogger(__name__) - - -MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. - """ - - model_name_or_path: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." - ) - }, - ) - model_type: Optional[str] = field( - default=None, - metadata={ - "help": "If training from scratch, pass a model type from the list: " - + ", ".join(MODEL_TYPES) - }, - ) - config_overrides: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Override some existing default config settings when a model is trained from scratch. Example: " - "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" - ) - }, - ) - config_name: Optional[str] = field( - default=None, - metadata={ - "help": "Pretrained config name or path if not the same as model_name" - }, - ) - tokenizer_name: Optional[str] = field( - default=None, - metadata={ - "help": "Pretrained tokenizer name or path if not the same as model_name" - }, - ) - cache_dir: Optional[str] = field( - default=None, - metadata={ - "help": "Where do you want to store the pretrained models downloaded from huggingface.co" - }, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={ - "help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not." - }, - ) - model_revision: str = field( - default="main", - metadata={ - "help": "The specific model version to use (can be a branch name, tag name or commit id)." - }, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - torch_dtype: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " - "dtype will be automatically derived from the model's weights." - ), - "choices": ["auto", "bfloat16", "float16", "float32"], - }, - ) - low_cpu_mem_usage: bool = field( - default=False, - metadata={ - "help": ( - "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded." - "set True will benefit LLM loading time and RAM consumption." - ) - }, - ) - - def __post_init__(self): - if self.config_overrides is not None and ( - self.config_name is not None or self.model_name_or_path is not None - ): - raise ValueError( - "--config_overrides can't be used in combination with --config_name or --model_name_or_path" - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default=None, - metadata={"help": "The name of the dataset to use (via the datasets library)."}, - ) - dataset_config_name: Optional[str] = field( - default=None, - metadata={ - "help": "The configuration name of the dataset to use (via the datasets library)." - }, - ) - train_file: Optional[str] = field( - default=None, metadata={"help": "The input training data file (a text file)."} - ) - validation_file: Optional[str] = field( - default=None, - metadata={ - "help": "An optional input evaluation data file to evaluate the perplexity on (a text file)." - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) - block_size: Optional[int] = field( - default=None, - metadata={ - "help": ( - "Optional input sequence length after tokenization. " - "The training dataset will be truncated in block of this size for training. " - "Default to the model max input length for single sentence inputs (take into account special tokens)." - ) - }, - ) - overwrite_cache: bool = field( - default=False, - metadata={"help": "Overwrite the cached training and evaluation sets"}, - ) - validation_split_percentage: Optional[int] = field( - default=5, - metadata={ - "help": "The percentage of the train set used as validation set in case there's no validation split" - }, - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - keep_linebreaks: bool = field( - default=True, - metadata={"help": "Whether to keep line breaks when using TXT files or not."}, - ) - - def __post_init__(self): - if self.streaming: - require_version( - "datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`" - ) - - if ( - self.dataset_name is None - and self.train_file is None - and self.validation_file is None - ): - raise ValueError( - "Need either a dataset name or a training/validation file." - ) - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in [ - "csv", - "json", - "txt", - ], "`train_file` should be a csv, a json or a txt file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in [ - "csv", - "json", - "txt", - ], "`validation_file` should be a csv, a json or a txt file." - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser( - (ModelArguments, DataTrainingArguments, TrainingArguments) - ) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file( - json_file=os.path.abspath(sys.argv[1]) - ) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The - # information sent is the one passed as arguments along with your Python/PyTorch versions. - send_example_telemetry("run_clm", model_args, data_args) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - - if training_args.should_log: - # The default of training_args.log_level is passive, so we set log level at info here to have that default. - transformers.utils.logging.set_verbosity_info() - - log_level = training_args.get_process_log_level() - logger.setLevel(log_level) - datasets.utils.logging.set_verbosity(log_level) - transformers.utils.logging.set_verbosity(log_level) - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - logger.info(f"Training/evaluation parameters {training_args}") - - # Detecting last checkpoint. - last_checkpoint = None - if ( - os.path.isdir(training_args.output_dir) - and training_args.do_train - and not training_args.overwrite_output_dir - ): - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif ( - last_checkpoint is not None and training_args.resume_from_checkpoint is None - ): - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - streaming=data_args.streaming, - ) - if "validation" not in raw_datasets.keys(): - raw_datasets["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - streaming=data_args.streaming, - ) - raw_datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - streaming=data_args.streaming, - ) - else: - data_files = {} - dataset_args = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = ( - data_args.train_file.split(".")[-1] - if data_args.train_file is not None - else data_args.validation_file.split(".")[-1] - ) - if extension == "txt": - extension = "text" - dataset_args["keep_linebreaks"] = data_args.keep_linebreaks - raw_datasets = load_dataset( - extension, - data_files=data_files, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - # If no validation data is there, validation_split_percentage will be used to divide the dataset. - if "validation" not in raw_datasets.keys(): - raw_datasets["validation"] = load_dataset( - extension, - data_files=data_files, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - raw_datasets["train"] = load_dataset( - extension, - data_files=data_files, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - - config_kwargs = { - "cache_dir": model_args.cache_dir, - "revision": model_args.model_revision, - "use_auth_token": True if model_args.use_auth_token else None, - } - if model_args.config_name: - config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) - elif model_args.model_name_or_path: - config = AutoConfig.from_pretrained( - model_args.model_name_or_path, **config_kwargs - ) - else: - config = CONFIG_MAPPING[model_args.model_type]() - logger.warning("You are instantiating a new config instance from scratch.") - if model_args.config_overrides is not None: - logger.info(f"Overriding config: {model_args.config_overrides}") - config.update_from_string(model_args.config_overrides) - logger.info(f"New config: {config}") - - tokenizer_kwargs = { - "cache_dir": model_args.cache_dir, - "use_fast": model_args.use_fast_tokenizer, - "revision": model_args.model_revision, - "use_auth_token": True if model_args.use_auth_token else None, - } - if model_args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name, **tokenizer_kwargs - ) - elif model_args.model_name_or_path: - tokenizer = AutoTokenizer.from_pretrained( - model_args.model_name_or_path, **tokenizer_kwargs - ) - else: - raise ValueError( - "You are instantiating a new tokenizer from scratch. This is not supported by this script." - "You can do it from another script, save it, and load it from here, using --tokenizer_name." - ) - - if model_args.model_name_or_path: - torch_dtype = ( - model_args.torch_dtype - if model_args.torch_dtype in ["auto", None] - else getattr(torch, model_args.torch_dtype) - ) - model = AutoModelForCausalLM.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - torch_dtype=torch_dtype, - low_cpu_mem_usage=model_args.low_cpu_mem_usage, - ) - else: - model = AutoModelForCausalLM.from_config(config) - n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) - logger.info( - f"Training new model from scratch - Total size={n_params/2**20:.2f}M params" - ) - - # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch - # on a small vocab and want a smaller embedding size, remove this test. - embedding_size = model.get_input_embeddings().weight.shape[0] - if len(tokenizer) > embedding_size: - model.resize_token_embeddings(len(tokenizer)) - - # Preprocessing the datasets. - # First we tokenize all the texts. - if training_args.do_train: - column_names = list(raw_datasets["train"].features) - else: - column_names = list(raw_datasets["validation"].features) - text_column_name = "text" if "text" in column_names else column_names[0] - - # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function - tok_logger = transformers.utils.logging.get_logger( - "transformers.tokenization_utils_base" - ) - - def tokenize_function(examples): - with CaptureLogger(tok_logger) as cl: - output = tokenizer(examples[text_column_name]) - # clm input could be much much longer than block_size - if "Token indices sequence length is longer than the" in cl.out: - tok_logger.warning( - "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" - " before being passed to the model." - ) - return output - - with training_args.main_process_first(desc="dataset map tokenization"): - if not data_args.streaming: - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on dataset", - ) - else: - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - remove_columns=column_names, - ) - - if data_args.block_size is None: - block_size = tokenizer.model_max_length - if block_size > 1024: - logger.warning( - "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" - " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" - " override this default with `--block_size xxx`." - ) - block_size = 1024 - else: - if data_args.block_size > tokenizer.model_max_length: - logger.warning( - f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" - f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." - ) - block_size = min(data_args.block_size, tokenizer.model_max_length) - - # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. - def group_texts(examples): - # Concatenate all texts. - concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} - total_length = len(concatenated_examples[list(examples.keys())[0]]) - # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can - # customize this part to your needs. - if total_length >= block_size: - total_length = (total_length // block_size) * block_size - # Split by chunks of max_len. - result = { - k: [t[i : i + block_size] for i in range(0, total_length, block_size)] - for k, t in concatenated_examples.items() - } - result["labels"] = result["input_ids"].copy() - return result - - # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder - # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower - # to preprocess. - # - # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: - # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map - - with training_args.main_process_first(desc="grouping texts together"): - if not data_args.streaming: - lm_datasets = tokenized_datasets.map( - group_texts, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - desc=f"Grouping texts in chunks of {block_size}", - ) - else: - lm_datasets = tokenized_datasets.map( - group_texts, - batched=True, - ) - - if training_args.do_train: - if "train" not in tokenized_datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = lm_datasets["train"] - if data_args.max_train_samples is not None: - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - - if training_args.do_eval: - if "validation" not in tokenized_datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_dataset = lm_datasets["validation"] - if data_args.max_eval_samples is not None: - max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) - eval_dataset = eval_dataset.select(range(max_eval_samples)) - - def preprocess_logits_for_metrics(logits, labels): - if isinstance(logits, tuple): - # Depending on the model and config, logits may contain extra tensors, - # like past_key_values, but logits always come first - logits = logits[0] - return logits.argmax(dim=-1) - - metric = evaluate.load("accuracy") - - def compute_metrics(eval_preds): - preds, labels = eval_preds - # preds have the same shape as the labels, after the argmax(-1) has been calculated - # by preprocess_logits_for_metrics but we need to shift the labels - labels = labels[:, 1:].reshape(-1) - preds = preds[:, :-1].reshape(-1) - return metric.compute(predictions=preds, references=labels) - - # Initialize our Trainer - trainer = Trainer( - model=model, - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, - tokenizer=tokenizer, - # Data collator will default to DataCollatorWithPadding, so we change it. - data_collator=default_data_collator, - compute_metrics=compute_metrics - if training_args.do_eval and not is_torch_tpu_available() - else None, - preprocess_logits_for_metrics=preprocess_logits_for_metrics - if training_args.do_eval and not is_torch_tpu_available() - else None, - ) - - # Training - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() # Saves the tokenizer too for easy upload - - metrics = train_result.metrics - - max_train_samples = ( - data_args.max_train_samples - if data_args.max_train_samples is not None - else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - if training_args.do_eval: - logger.info("*** Evaluate ***") - - metrics = trainer.evaluate() - - max_eval_samples = ( - data_args.max_eval_samples - if data_args.max_eval_samples is not None - else len(eval_dataset) - ) - metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) - try: - perplexity = math.exp(metrics["eval_loss"]) - except OverflowError: - perplexity = float("inf") - metrics["perplexity"] = perplexity - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - kwargs = { - "finetuned_from": model_args.model_name_or_path, - "tasks": "text-generation", - } - if data_args.dataset_name is not None: - kwargs["dataset_tags"] = data_args.dataset_name - if data_args.dataset_config_name is not None: - kwargs["dataset_args"] = data_args.dataset_config_name - kwargs[ - "dataset" - ] = f"{data_args.dataset_name} {data_args.dataset_config_name}" - else: - kwargs["dataset"] = data_args.dataset_name - - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/demo-notebooks/guided-demos/preview_nbs/requirements.txt b/demo-notebooks/guided-demos/preview_nbs/requirements.txt index 7266b064..c748bec2 100644 --- a/demo-notebooks/guided-demos/preview_nbs/requirements.txt +++ b/demo-notebooks/guided-demos/preview_nbs/requirements.txt @@ -1,4 +1,4 @@ -pytorch_lightning==1.5.10 +pytorch_lightning==2.4.0 ray_lightning -torchmetrics==0.9.1 -torchvision==0.12.0 +torchmetrics==1.6.0 +torchvision==0.20.1 diff --git a/demo-notebooks/guided-demos/preview_nbs/requirements_gpt.txt b/demo-notebooks/guided-demos/preview_nbs/requirements_gpt.txt deleted file mode 100644 index bd6c4f52..00000000 --- a/demo-notebooks/guided-demos/preview_nbs/requirements_gpt.txt +++ /dev/null @@ -1,8 +0,0 @@ -accelerate >= 0.12.0 -torch >= 1.3 -datasets >= 1.8.0 -sentencepiece != 0.1.92 -evaluate -scikit-learn -transformers==4.28.1 -protobuf<=3.20.1,>=3.8.0 diff --git a/demo-notebooks/guided-demos/requirements.txt b/demo-notebooks/guided-demos/requirements.txt index 7266b064..c748bec2 100644 --- a/demo-notebooks/guided-demos/requirements.txt +++ b/demo-notebooks/guided-demos/requirements.txt @@ -1,4 +1,4 @@ -pytorch_lightning==1.5.10 +pytorch_lightning==2.4.0 ray_lightning -torchmetrics==0.9.1 -torchvision==0.12.0 +torchmetrics==1.6.0 +torchvision==0.20.1 diff --git a/demo-notebooks/guided-demos/requirements_gpt.txt b/demo-notebooks/guided-demos/requirements_gpt.txt deleted file mode 100644 index bd6c4f52..00000000 --- a/demo-notebooks/guided-demos/requirements_gpt.txt +++ /dev/null @@ -1,8 +0,0 @@ -accelerate >= 0.12.0 -torch >= 1.3 -datasets >= 1.8.0 -sentencepiece != 0.1.92 -evaluate -scikit-learn -transformers==4.28.1 -protobuf<=3.20.1,>=3.8.0 diff --git a/docs/cluster/auth.html b/docs/cluster/auth.html deleted file mode 100644 index 0389fbf7..00000000 --- a/docs/cluster/auth.html +++ /dev/null @@ -1,723 +0,0 @@ - - - - - - -codeflare_sdk.cluster.auth API documentation - - - - - - - - - - - -
-
-
-

Module codeflare_sdk.cluster.auth

-
-
-

The auth sub-module contains the definitions for the Authentication objects, which represent -the methods by which a user can authenticate to their cluster(s). The abstract class, Authentication, -contains two required methods login() and logout(). Users can use one of the existing concrete classes to -authenticate to their cluster or add their own custom concrete classes here.

-
- -Expand source code - -
# Copyright 2022 IBM, Red Hat
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-The auth sub-module contains the definitions for the Authentication objects, which represent
-the methods by which a user can authenticate to their cluster(s). The abstract class, `Authentication`,
-contains two required methods `login()` and `logout()`. Users can use one of the existing concrete classes to
-authenticate to their cluster or add their own custom concrete classes here.
-"""
-
-import abc
-from kubernetes import client, config
-import os
-import urllib3
-from ..utils.kube_api_helpers import _kube_api_error_handling
-
-global api_client
-api_client = None
-global config_path
-config_path = None
-
-
-class Authentication(metaclass=abc.ABCMeta):
-    """
-    An abstract class that defines the necessary methods for authenticating to a remote environment.
-    Specifically, this class defines the need for a `login()` and a `logout()` function.
-    """
-
-    def login(self):
-        """
-        Method for logging in to a remote cluster.
-        """
-        pass
-
-    def logout(self):
-        """
-        Method for logging out of the remote cluster.
-        """
-        pass
-
-
-class KubeConfiguration(metaclass=abc.ABCMeta):
-    """
-    An abstract class that defines the method for loading a user defined config file using the `load_kube_config()` function
-    """
-
-    def load_kube_config(self):
-        """
-        Method for setting your Kubernetes configuration to a certain file
-        """
-        pass
-
-    def logout(self):
-        """
-        Method for logging out of the remote cluster
-        """
-        pass
-
-
-class TokenAuthentication(Authentication):
-    """
-    `TokenAuthentication` is a subclass of `Authentication`. It can be used to authenticate to a Kubernetes
-    cluster when the user has an API token and the API server address.
-    """
-
-    def __init__(
-        self,
-        token: str,
-        server: str,
-        skip_tls: bool = False,
-        ca_cert_path: str = None,
-    ):
-        """
-        Initialize a TokenAuthentication object that requires a value for `token`, the API Token
-        and `server`, the API server address for authenticating to a Kubernetes cluster.
-        """
-
-        self.token = token
-        self.server = server
-        self.skip_tls = skip_tls
-        self.ca_cert_path = ca_cert_path
-
-    def login(self) -> str:
-        """
-        This function is used to log in to a Kubernetes cluster using the user's API token and API server address.
-        Depending on the cluster, a user can choose to login in with `--insecure-skip-tls-verify` by setting `skip_tls`
-        to `True` or `--certificate-authority` by setting `skip_tls` to False and providing a path to a ca bundle with `ca_cert_path`.
-        """
-        global config_path
-        global api_client
-        try:
-            configuration = client.Configuration()
-            configuration.api_key_prefix["authorization"] = "Bearer"
-            configuration.host = self.server
-            configuration.api_key["authorization"] = self.token
-            if self.skip_tls == False and self.ca_cert_path == None:
-                configuration.verify_ssl = True
-            elif self.skip_tls == False:
-                configuration.ssl_ca_cert = self.ca_cert_path
-            else:
-                urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
-                print("Insecure request warnings have been disabled")
-                configuration.verify_ssl = False
-
-            api_client = client.ApiClient(configuration)
-            client.AuthenticationApi(api_client).get_api_group()
-            config_path = None
-            return "Logged into %s" % self.server
-        except client.ApiException:  # pragma: no cover
-            api_client = None
-            print("Authentication Error please provide the correct token + server")
-
-    def logout(self) -> str:
-        """
-        This function is used to logout of a Kubernetes cluster.
-        """
-        global config_path
-        config_path = None
-        global api_client
-        api_client = None
-        return "Successfully logged out of %s" % self.server
-
-
-class KubeConfigFileAuthentication(KubeConfiguration):
-    """
-    A class that defines the necessary methods for passing a user's own Kubernetes config file.
-    Specifically this class defines the `load_kube_config()` and `config_check()` functions.
-    """
-
-    def __init__(self, kube_config_path: str = None):
-        self.kube_config_path = kube_config_path
-
-    def load_kube_config(self):
-        """
-        Function for loading a user's own predefined Kubernetes config file.
-        """
-        global config_path
-        global api_client
-        try:
-            if self.kube_config_path == None:
-                return "Please specify a config file path"
-            config_path = self.kube_config_path
-            api_client = None
-            config.load_kube_config(config_path)
-            response = "Loaded user config file at path %s" % self.kube_config_path
-        except config.ConfigException:  # pragma: no cover
-            config_path = None
-            raise Exception("Please specify a config file path")
-        return response
-
-
-def config_check() -> str:
-    """
-    Function for loading the config file at the default config location ~/.kube/config if the user has not
-    specified their own config file or has logged in with their token and server.
-    """
-    global config_path
-    global api_client
-    home_directory = os.path.expanduser("~")
-    if config_path == None and api_client == None:
-        if os.path.isfile("%s/.kube/config" % home_directory):
-            try:
-                config.load_kube_config()
-            except Exception as e:  # pragma: no cover
-                _kube_api_error_handling(e)
-        elif "KUBERNETES_PORT" in os.environ:
-            try:
-                config.load_incluster_config()
-            except Exception as e:  # pragma: no cover
-                _kube_api_error_handling(e)
-        else:
-            raise PermissionError(
-                "Action not permitted, have you put in correct/up-to-date auth credentials?"
-            )
-
-    if config_path != None and api_client == None:
-        return config_path
-
-
-def api_config_handler() -> str:
-    """
-    This function is used to load the api client if the user has logged in
-    """
-    if api_client != None and config_path == None:
-        return api_client
-    else:
-        return None
-
-
-
-
-
-
-
-

Functions

-
-
-def api_config_handler() ‑> str -
-
-

This function is used to load the api client if the user has logged in

-
- -Expand source code - -
def api_config_handler() -> str:
-    """
-    This function is used to load the api client if the user has logged in
-    """
-    if api_client != None and config_path == None:
-        return api_client
-    else:
-        return None
-
-
-
-def config_check() ‑> str -
-
-

Function for loading the config file at the default config location ~/.kube/config if the user has not -specified their own config file or has logged in with their token and server.

-
- -Expand source code - -
def config_check() -> str:
-    """
-    Function for loading the config file at the default config location ~/.kube/config if the user has not
-    specified their own config file or has logged in with their token and server.
-    """
-    global config_path
-    global api_client
-    home_directory = os.path.expanduser("~")
-    if config_path == None and api_client == None:
-        if os.path.isfile("%s/.kube/config" % home_directory):
-            try:
-                config.load_kube_config()
-            except Exception as e:  # pragma: no cover
-                _kube_api_error_handling(e)
-        elif "KUBERNETES_PORT" in os.environ:
-            try:
-                config.load_incluster_config()
-            except Exception as e:  # pragma: no cover
-                _kube_api_error_handling(e)
-        else:
-            raise PermissionError(
-                "Action not permitted, have you put in correct/up-to-date auth credentials?"
-            )
-
-    if config_path != None and api_client == None:
-        return config_path
-
-
-
-
-
-

Classes

-
-
-class Authentication -
-
-

An abstract class that defines the necessary methods for authenticating to a remote environment. -Specifically, this class defines the need for a login() and a logout() function.

-
- -Expand source code - -
class Authentication(metaclass=abc.ABCMeta):
-    """
-    An abstract class that defines the necessary methods for authenticating to a remote environment.
-    Specifically, this class defines the need for a `login()` and a `logout()` function.
-    """
-
-    def login(self):
-        """
-        Method for logging in to a remote cluster.
-        """
-        pass
-
-    def logout(self):
-        """
-        Method for logging out of the remote cluster.
-        """
-        pass
-
-

Subclasses

- -

Methods

-
-
-def login(self) -
-
-

Method for logging in to a remote cluster.

-
- -Expand source code - -
def login(self):
-    """
-    Method for logging in to a remote cluster.
-    """
-    pass
-
-
-
-def logout(self) -
-
-

Method for logging out of the remote cluster.

-
- -Expand source code - -
def logout(self):
-    """
-    Method for logging out of the remote cluster.
-    """
-    pass
-
-
-
-
-
-class KubeConfigFileAuthentication -(kube_config_path: str = None) -
-
-

A class that defines the necessary methods for passing a user's own Kubernetes config file. -Specifically this class defines the load_kube_config() and config_check() functions.

-
- -Expand source code - -
class KubeConfigFileAuthentication(KubeConfiguration):
-    """
-    A class that defines the necessary methods for passing a user's own Kubernetes config file.
-    Specifically this class defines the `load_kube_config()` and `config_check()` functions.
-    """
-
-    def __init__(self, kube_config_path: str = None):
-        self.kube_config_path = kube_config_path
-
-    def load_kube_config(self):
-        """
-        Function for loading a user's own predefined Kubernetes config file.
-        """
-        global config_path
-        global api_client
-        try:
-            if self.kube_config_path == None:
-                return "Please specify a config file path"
-            config_path = self.kube_config_path
-            api_client = None
-            config.load_kube_config(config_path)
-            response = "Loaded user config file at path %s" % self.kube_config_path
-        except config.ConfigException:  # pragma: no cover
-            config_path = None
-            raise Exception("Please specify a config file path")
-        return response
-
-

Ancestors

- -

Methods

-
-
-def load_kube_config(self) -
-
-

Function for loading a user's own predefined Kubernetes config file.

-
- -Expand source code - -
def load_kube_config(self):
-    """
-    Function for loading a user's own predefined Kubernetes config file.
-    """
-    global config_path
-    global api_client
-    try:
-        if self.kube_config_path == None:
-            return "Please specify a config file path"
-        config_path = self.kube_config_path
-        api_client = None
-        config.load_kube_config(config_path)
-        response = "Loaded user config file at path %s" % self.kube_config_path
-    except config.ConfigException:  # pragma: no cover
-        config_path = None
-        raise Exception("Please specify a config file path")
-    return response
-
-
-
-

Inherited members

- -
-
-class KubeConfiguration -
-
-

An abstract class that defines the method for loading a user defined config file using the load_kube_config() function

-
- -Expand source code - -
class KubeConfiguration(metaclass=abc.ABCMeta):
-    """
-    An abstract class that defines the method for loading a user defined config file using the `load_kube_config()` function
-    """
-
-    def load_kube_config(self):
-        """
-        Method for setting your Kubernetes configuration to a certain file
-        """
-        pass
-
-    def logout(self):
-        """
-        Method for logging out of the remote cluster
-        """
-        pass
-
-

Subclasses

- -

Methods

-
-
-def load_kube_config(self) -
-
-

Method for setting your Kubernetes configuration to a certain file

-
- -Expand source code - -
def load_kube_config(self):
-    """
-    Method for setting your Kubernetes configuration to a certain file
-    """
-    pass
-
-
-
-def logout(self) -
-
-

Method for logging out of the remote cluster

-
- -Expand source code - -
def logout(self):
-    """
-    Method for logging out of the remote cluster
-    """
-    pass
-
-
-
-
-
-class TokenAuthentication -(token: str, server: str, skip_tls: bool = False, ca_cert_path: str = None) -
-
-

TokenAuthentication is a subclass of Authentication. It can be used to authenticate to a Kubernetes -cluster when the user has an API token and the API server address.

-

Initialize a TokenAuthentication object that requires a value for token, the API Token -and server, the API server address for authenticating to a Kubernetes cluster.

-
- -Expand source code - -
class TokenAuthentication(Authentication):
-    """
-    `TokenAuthentication` is a subclass of `Authentication`. It can be used to authenticate to a Kubernetes
-    cluster when the user has an API token and the API server address.
-    """
-
-    def __init__(
-        self,
-        token: str,
-        server: str,
-        skip_tls: bool = False,
-        ca_cert_path: str = None,
-    ):
-        """
-        Initialize a TokenAuthentication object that requires a value for `token`, the API Token
-        and `server`, the API server address for authenticating to a Kubernetes cluster.
-        """
-
-        self.token = token
-        self.server = server
-        self.skip_tls = skip_tls
-        self.ca_cert_path = ca_cert_path
-
-    def login(self) -> str:
-        """
-        This function is used to log in to a Kubernetes cluster using the user's API token and API server address.
-        Depending on the cluster, a user can choose to login in with `--insecure-skip-tls-verify` by setting `skip_tls`
-        to `True` or `--certificate-authority` by setting `skip_tls` to False and providing a path to a ca bundle with `ca_cert_path`.
-        """
-        global config_path
-        global api_client
-        try:
-            configuration = client.Configuration()
-            configuration.api_key_prefix["authorization"] = "Bearer"
-            configuration.host = self.server
-            configuration.api_key["authorization"] = self.token
-            if self.skip_tls == False and self.ca_cert_path == None:
-                configuration.verify_ssl = True
-            elif self.skip_tls == False:
-                configuration.ssl_ca_cert = self.ca_cert_path
-            else:
-                urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
-                print("Insecure request warnings have been disabled")
-                configuration.verify_ssl = False
-
-            api_client = client.ApiClient(configuration)
-            client.AuthenticationApi(api_client).get_api_group()
-            config_path = None
-            return "Logged into %s" % self.server
-        except client.ApiException:  # pragma: no cover
-            api_client = None
-            print("Authentication Error please provide the correct token + server")
-
-    def logout(self) -> str:
-        """
-        This function is used to logout of a Kubernetes cluster.
-        """
-        global config_path
-        config_path = None
-        global api_client
-        api_client = None
-        return "Successfully logged out of %s" % self.server
-
-

Ancestors

- -

Methods

-
-
-def login(self) ‑> str -
-
-

This function is used to log in to a Kubernetes cluster using the user's API token and API server address. -Depending on the cluster, a user can choose to login in with --insecure-skip-tls-verify by setting skip_tls -to True or --certificate-authority by setting skip_tls to False and providing a path to a ca bundle with ca_cert_path.

-
- -Expand source code - -
def login(self) -> str:
-    """
-    This function is used to log in to a Kubernetes cluster using the user's API token and API server address.
-    Depending on the cluster, a user can choose to login in with `--insecure-skip-tls-verify` by setting `skip_tls`
-    to `True` or `--certificate-authority` by setting `skip_tls` to False and providing a path to a ca bundle with `ca_cert_path`.
-    """
-    global config_path
-    global api_client
-    try:
-        configuration = client.Configuration()
-        configuration.api_key_prefix["authorization"] = "Bearer"
-        configuration.host = self.server
-        configuration.api_key["authorization"] = self.token
-        if self.skip_tls == False and self.ca_cert_path == None:
-            configuration.verify_ssl = True
-        elif self.skip_tls == False:
-            configuration.ssl_ca_cert = self.ca_cert_path
-        else:
-            urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
-            print("Insecure request warnings have been disabled")
-            configuration.verify_ssl = False
-
-        api_client = client.ApiClient(configuration)
-        client.AuthenticationApi(api_client).get_api_group()
-        config_path = None
-        return "Logged into %s" % self.server
-    except client.ApiException:  # pragma: no cover
-        api_client = None
-        print("Authentication Error please provide the correct token + server")
-
-
-
-def logout(self) ‑> str -
-
-

This function is used to logout of a Kubernetes cluster.

-
- -Expand source code - -
def logout(self) -> str:
-    """
-    This function is used to logout of a Kubernetes cluster.
-    """
-    global config_path
-    config_path = None
-    global api_client
-    api_client = None
-    return "Successfully logged out of %s" % self.server
-
-
-
-
-
-
-
- -
- - - diff --git a/docs/cluster/awload.html b/docs/cluster/awload.html deleted file mode 100644 index 57b407e8..00000000 --- a/docs/cluster/awload.html +++ /dev/null @@ -1,328 +0,0 @@ - - - - - - -codeflare_sdk.cluster.awload API documentation - - - - - - - - - - - -
-
-
-

Module codeflare_sdk.cluster.awload

-
-
-

The awload sub-module contains the definition of the AWManager object, which handles -submission and deletion of existing AppWrappers from a user's file system.

-
- -Expand source code - -
# Copyright 2022 IBM, Red Hat
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-The awload sub-module contains the definition of the AWManager object, which handles
-submission and deletion of existing AppWrappers from a user's file system.
-"""
-
-from os.path import isfile
-import errno
-import os
-import yaml
-
-from kubernetes import client, config
-from ..utils.kube_api_helpers import _kube_api_error_handling
-from .auth import config_check, api_config_handler
-
-
-class AWManager:
-    """
-    An object for submitting and removing existing AppWrapper yamls
-    to be added to the MCAD queue.
-    """
-
-    def __init__(self, filename: str) -> None:
-        """
-        Create the AppWrapper Manager object by passing in an
-        AppWrapper yaml file
-        """
-        if not isfile(filename):
-            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), filename)
-        self.filename = filename
-        try:
-            with open(self.filename) as f:
-                self.awyaml = yaml.load(f, Loader=yaml.FullLoader)
-            assert self.awyaml["kind"] == "AppWrapper"
-            self.name = self.awyaml["metadata"]["name"]
-            self.namespace = self.awyaml["metadata"]["namespace"]
-        except:
-            raise ValueError(
-                f"{filename } is not a correctly formatted AppWrapper yaml"
-            )
-        self.submitted = False
-
-    def submit(self) -> None:
-        """
-        Attempts to create the AppWrapper custom resource using the yaml file
-        """
-        try:
-            config_check()
-            api_instance = client.CustomObjectsApi(api_config_handler())
-            api_instance.create_namespaced_custom_object(
-                group="workload.codeflare.dev",
-                version="v1beta1",
-                namespace=self.namespace,
-                plural="appwrappers",
-                body=self.awyaml,
-            )
-        except Exception as e:
-            return _kube_api_error_handling(e)
-
-        self.submitted = True
-        print(f"AppWrapper {self.filename} submitted!")
-
-    def remove(self) -> None:
-        """
-        Attempts to delete the AppWrapper custom resource matching the name in the yaml,
-        if submitted by this manager.
-        """
-        if not self.submitted:
-            print("AppWrapper not submitted by this manager yet, nothing to remove")
-            return
-
-        try:
-            config_check()
-            api_instance = client.CustomObjectsApi(api_config_handler())
-            api_instance.delete_namespaced_custom_object(
-                group="workload.codeflare.dev",
-                version="v1beta1",
-                namespace=self.namespace,
-                plural="appwrappers",
-                name=self.name,
-            )
-        except Exception as e:
-            return _kube_api_error_handling(e)
-
-        self.submitted = False
-        print(f"AppWrapper {self.name} removed!")
-
-
-
-
-
-
-
-
-
-

Classes

-
-
-class AWManager -(filename: str) -
-
-

An object for submitting and removing existing AppWrapper yamls -to be added to the MCAD queue.

-

Create the AppWrapper Manager object by passing in an -AppWrapper yaml file

-
- -Expand source code - -
class AWManager:
-    """
-    An object for submitting and removing existing AppWrapper yamls
-    to be added to the MCAD queue.
-    """
-
-    def __init__(self, filename: str) -> None:
-        """
-        Create the AppWrapper Manager object by passing in an
-        AppWrapper yaml file
-        """
-        if not isfile(filename):
-            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), filename)
-        self.filename = filename
-        try:
-            with open(self.filename) as f:
-                self.awyaml = yaml.load(f, Loader=yaml.FullLoader)
-            assert self.awyaml["kind"] == "AppWrapper"
-            self.name = self.awyaml["metadata"]["name"]
-            self.namespace = self.awyaml["metadata"]["namespace"]
-        except:
-            raise ValueError(
-                f"{filename } is not a correctly formatted AppWrapper yaml"
-            )
-        self.submitted = False
-
-    def submit(self) -> None:
-        """
-        Attempts to create the AppWrapper custom resource using the yaml file
-        """
-        try:
-            config_check()
-            api_instance = client.CustomObjectsApi(api_config_handler())
-            api_instance.create_namespaced_custom_object(
-                group="workload.codeflare.dev",
-                version="v1beta1",
-                namespace=self.namespace,
-                plural="appwrappers",
-                body=self.awyaml,
-            )
-        except Exception as e:
-            return _kube_api_error_handling(e)
-
-        self.submitted = True
-        print(f"AppWrapper {self.filename} submitted!")
-
-    def remove(self) -> None:
-        """
-        Attempts to delete the AppWrapper custom resource matching the name in the yaml,
-        if submitted by this manager.
-        """
-        if not self.submitted:
-            print("AppWrapper not submitted by this manager yet, nothing to remove")
-            return
-
-        try:
-            config_check()
-            api_instance = client.CustomObjectsApi(api_config_handler())
-            api_instance.delete_namespaced_custom_object(
-                group="workload.codeflare.dev",
-                version="v1beta1",
-                namespace=self.namespace,
-                plural="appwrappers",
-                name=self.name,
-            )
-        except Exception as e:
-            return _kube_api_error_handling(e)
-
-        self.submitted = False
-        print(f"AppWrapper {self.name} removed!")
-
-

Methods

-
-
-def remove(self) ‑> None -
-
-

Attempts to delete the AppWrapper custom resource matching the name in the yaml, -if submitted by this manager.

-
- -Expand source code - -
def remove(self) -> None:
-    """
-    Attempts to delete the AppWrapper custom resource matching the name in the yaml,
-    if submitted by this manager.
-    """
-    if not self.submitted:
-        print("AppWrapper not submitted by this manager yet, nothing to remove")
-        return
-
-    try:
-        config_check()
-        api_instance = client.CustomObjectsApi(api_config_handler())
-        api_instance.delete_namespaced_custom_object(
-            group="workload.codeflare.dev",
-            version="v1beta1",
-            namespace=self.namespace,
-            plural="appwrappers",
-            name=self.name,
-        )
-    except Exception as e:
-        return _kube_api_error_handling(e)
-
-    self.submitted = False
-    print(f"AppWrapper {self.name} removed!")
-
-
-
-def submit(self) ‑> None -
-
-

Attempts to create the AppWrapper custom resource using the yaml file

-
- -Expand source code - -
def submit(self) -> None:
-    """
-    Attempts to create the AppWrapper custom resource using the yaml file
-    """
-    try:
-        config_check()
-        api_instance = client.CustomObjectsApi(api_config_handler())
-        api_instance.create_namespaced_custom_object(
-            group="workload.codeflare.dev",
-            version="v1beta1",
-            namespace=self.namespace,
-            plural="appwrappers",
-            body=self.awyaml,
-        )
-    except Exception as e:
-        return _kube_api_error_handling(e)
-
-    self.submitted = True
-    print(f"AppWrapper {self.filename} submitted!")
-
-
-
-
-
-
-
- -
- - - diff --git a/docs/cluster/cluster.html b/docs/cluster/cluster.html deleted file mode 100644 index dbb6cec5..00000000 --- a/docs/cluster/cluster.html +++ /dev/null @@ -1,1747 +0,0 @@ - - - - - - -codeflare_sdk.cluster.cluster API documentation - - - - - - - - - - - -
-
-
-

Module codeflare_sdk.cluster.cluster

-
-
-

The cluster sub-module contains the definition of the Cluster object, which represents -the resources requested by the user. It also contains functions for checking the -cluster setup queue, a list of all existing clusters, and the user's working namespace.

-
- -Expand source code - -
# Copyright 2022 IBM, Red Hat
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-The cluster sub-module contains the definition of the Cluster object, which represents
-the resources requested by the user. It also contains functions for checking the
-cluster setup queue, a list of all existing clusters, and the user's working namespace.
-"""
-
-from time import sleep
-from typing import List, Optional, Tuple, Dict
-
-from ray.job_submission import JobSubmissionClient
-
-from .auth import config_check, api_config_handler
-from ..utils import pretty_print
-from ..utils.generate_yaml import generate_appwrapper
-from ..utils.kube_api_helpers import _kube_api_error_handling
-from .config import ClusterConfiguration
-from .model import (
-    AppWrapper,
-    AppWrapperStatus,
-    CodeFlareClusterStatus,
-    RayCluster,
-    RayClusterStatus,
-)
-from kubernetes import client, config
-import yaml
-import os
-import requests
-
-
-class Cluster:
-    """
-    An object for requesting, bringing up, and taking down resources.
-    Can also be used for seeing the resource cluster status and details.
-
-    Note that currently, the underlying implementation is a Ray cluster.
-    """
-
-    torchx_scheduler = "ray"
-
-    def __init__(self, config: ClusterConfiguration):
-        """
-        Create the resource cluster object by passing in a ClusterConfiguration
-        (defined in the config sub-module). An AppWrapper will then be generated
-        based off of the configured resources to represent the desired cluster
-        request.
-        """
-        self.config = config
-        self.app_wrapper_yaml = self.create_app_wrapper()
-        self.app_wrapper_name = self.app_wrapper_yaml.split(".")[0]
-
-    def evaluate_dispatch_priority(self):
-        priority_class = self.config.dispatch_priority
-
-        try:
-            config_check()
-            api_instance = client.CustomObjectsApi(api_config_handler())
-            priority_classes = api_instance.list_cluster_custom_object(
-                group="scheduling.k8s.io",
-                version="v1",
-                plural="priorityclasses",
-            )
-        except Exception as e:  # pragma: no cover
-            return _kube_api_error_handling(e)
-
-        for pc in priority_classes["items"]:
-            if pc["metadata"]["name"] == priority_class:
-                return pc["value"]
-        print(f"Priority class {priority_class} is not available in the cluster")
-        return None
-
-    def create_app_wrapper(self):
-        """
-        Called upon cluster object creation, creates an AppWrapper yaml based on
-        the specifications of the ClusterConfiguration.
-        """
-
-        if self.config.namespace is None:
-            self.config.namespace = get_current_namespace()
-            if self.config.namespace is None:
-                print("Please specify with namespace=<your_current_namespace>")
-            elif type(self.config.namespace) is not str:
-                raise TypeError(
-                    f"Namespace {self.config.namespace} is of type {type(self.config.namespace)}. Check your Kubernetes Authentication."
-                )
-
-        # Before attempting to create the cluster AW, let's evaluate the ClusterConfig
-        if self.config.dispatch_priority:
-            priority_val = self.evaluate_dispatch_priority()
-            if priority_val == None:
-                raise ValueError(
-                    "Invalid Cluster Configuration, AppWrapper not generated"
-                )
-        else:
-            priority_val = None
-
-        name = self.config.name
-        namespace = self.config.namespace
-        min_cpu = self.config.min_cpus
-        max_cpu = self.config.max_cpus
-        min_memory = self.config.min_memory
-        max_memory = self.config.max_memory
-        gpu = self.config.num_gpus
-        workers = self.config.num_workers
-        template = self.config.template
-        image = self.config.image
-        instascale = self.config.instascale
-        instance_types = self.config.machine_types
-        env = self.config.envs
-        local_interactive = self.config.local_interactive
-        image_pull_secrets = self.config.image_pull_secrets
-        dispatch_priority = self.config.dispatch_priority
-        return generate_appwrapper(
-            name=name,
-            namespace=namespace,
-            min_cpu=min_cpu,
-            max_cpu=max_cpu,
-            min_memory=min_memory,
-            max_memory=max_memory,
-            gpu=gpu,
-            workers=workers,
-            template=template,
-            image=image,
-            instascale=instascale,
-            instance_types=instance_types,
-            env=env,
-            local_interactive=local_interactive,
-            image_pull_secrets=image_pull_secrets,
-            dispatch_priority=dispatch_priority,
-            priority_val=priority_val,
-        )
-
-    # creates a new cluster with the provided or default spec
-    def up(self):
-        """
-        Applies the AppWrapper yaml, pushing the resource request onto
-        the MCAD queue.
-        """
-        namespace = self.config.namespace
-        try:
-            config_check()
-            api_instance = client.CustomObjectsApi(api_config_handler())
-            with open(self.app_wrapper_yaml) as f:
-                aw = yaml.load(f, Loader=yaml.FullLoader)
-            api_instance.create_namespaced_custom_object(
-                group="workload.codeflare.dev",
-                version="v1beta1",
-                namespace=namespace,
-                plural="appwrappers",
-                body=aw,
-            )
-        except Exception as e:  # pragma: no cover
-            return _kube_api_error_handling(e)
-
-    def down(self):
-        """
-        Deletes the AppWrapper yaml, scaling-down and deleting all resources
-        associated with the cluster.
-        """
-        namespace = self.config.namespace
-        try:
-            config_check()
-            api_instance = client.CustomObjectsApi(api_config_handler())
-            api_instance.delete_namespaced_custom_object(
-                group="workload.codeflare.dev",
-                version="v1beta1",
-                namespace=namespace,
-                plural="appwrappers",
-                name=self.app_wrapper_name,
-            )
-        except Exception as e:  # pragma: no cover
-            return _kube_api_error_handling(e)
-
-    def status(
-        self, print_to_console: bool = True
-    ) -> Tuple[CodeFlareClusterStatus, bool]:
-        """
-        Returns the requested cluster's status, as well as whether or not
-        it is ready for use.
-        """
-        ready = False
-        status = CodeFlareClusterStatus.UNKNOWN
-        # check the app wrapper status
-        appwrapper = _app_wrapper_status(self.config.name, self.config.namespace)
-        if appwrapper:
-            if appwrapper.status in [
-                AppWrapperStatus.RUNNING,
-                AppWrapperStatus.COMPLETED,
-                AppWrapperStatus.RUNNING_HOLD_COMPLETION,
-            ]:
-                ready = False
-                status = CodeFlareClusterStatus.STARTING
-            elif appwrapper.status in [
-                AppWrapperStatus.FAILED,
-                AppWrapperStatus.DELETED,
-            ]:
-                ready = False
-                status = CodeFlareClusterStatus.FAILED  # should deleted be separate
-                return status, ready  # exit early, no need to check ray status
-            elif appwrapper.status in [
-                AppWrapperStatus.PENDING,
-                AppWrapperStatus.QUEUEING,
-            ]:
-                ready = False
-                if appwrapper.status == AppWrapperStatus.PENDING:
-                    status = CodeFlareClusterStatus.QUEUED
-                else:
-                    status = CodeFlareClusterStatus.QUEUEING
-                if print_to_console:
-                    pretty_print.print_app_wrappers_status([appwrapper])
-                return (
-                    status,
-                    ready,
-                )  # no need to check the ray status since still in queue
-
-        # check the ray cluster status
-        cluster = _ray_cluster_status(self.config.name, self.config.namespace)
-        if cluster and not cluster.status == RayClusterStatus.UNKNOWN:
-            if cluster.status == RayClusterStatus.READY:
-                ready = True
-                status = CodeFlareClusterStatus.READY
-            elif cluster.status in [
-                RayClusterStatus.UNHEALTHY,
-                RayClusterStatus.FAILED,
-            ]:
-                ready = False
-                status = CodeFlareClusterStatus.FAILED
-
-            if print_to_console:
-                # overriding the number of gpus with requested
-                cluster.worker_gpu = self.config.num_gpus
-                pretty_print.print_cluster_status(cluster)
-        elif print_to_console:
-            if status == CodeFlareClusterStatus.UNKNOWN:
-                pretty_print.print_no_resources_found()
-            else:
-                pretty_print.print_app_wrappers_status([appwrapper], starting=True)
-
-        return status, ready
-
-    def is_dashboard_ready(self) -> bool:
-        response = requests.get(self.cluster_dashboard_uri(), timeout=5)
-        if response.status_code == 200:
-            return True
-        else:
-            return False
-
-    def wait_ready(self, timeout: Optional[int] = None):
-        """
-        Waits for requested cluster to be ready, up to an optional timeout (s).
-        Checks every five seconds.
-        """
-        print("Waiting for requested resources to be set up...")
-        ready = False
-        dashboard_ready = False
-        status = None
-        time = 0
-        while not ready or not dashboard_ready:
-            status, ready = self.status(print_to_console=False)
-            dashboard_ready = self.is_dashboard_ready()
-            if status == CodeFlareClusterStatus.UNKNOWN:
-                print(
-                    "WARNING: Current cluster status is unknown, have you run cluster.up yet?"
-                )
-            if not ready or not dashboard_ready:
-                if timeout and time >= timeout:
-                    raise TimeoutError(f"wait() timed out after waiting {timeout}s")
-                sleep(5)
-                time += 5
-        print("Requested cluster and dashboard are up and running!")
-
-    def details(self, print_to_console: bool = True) -> RayCluster:
-        cluster = _copy_to_ray(self)
-        if print_to_console:
-            pretty_print.print_clusters([cluster])
-        return cluster
-
-    def cluster_uri(self) -> str:
-        """
-        Returns a string containing the cluster's URI.
-        """
-        return f"ray://{self.config.name}-head-svc.{self.config.namespace}.svc:10001"
-
-    def cluster_dashboard_uri(self) -> str:
-        """
-        Returns a string containing the cluster's dashboard URI.
-        """
-        try:
-            config_check()
-            api_instance = client.CustomObjectsApi(api_config_handler())
-            routes = api_instance.list_namespaced_custom_object(
-                group="route.openshift.io",
-                version="v1",
-                namespace=self.config.namespace,
-                plural="routes",
-            )
-        except Exception as e:  # pragma: no cover
-            return _kube_api_error_handling(e)
-
-        for route in routes["items"]:
-            if route["metadata"]["name"] == f"ray-dashboard-{self.config.name}":
-                protocol = "https" if route["spec"].get("tls") else "http"
-                return f"{protocol}://{route['spec']['host']}"
-        return "Dashboard route not available yet, have you run cluster.up()?"
-
-    def list_jobs(self) -> List:
-        """
-        This method accesses the head ray node in your cluster and lists the running jobs.
-        """
-        dashboard_route = self.cluster_dashboard_uri()
-        client = JobSubmissionClient(dashboard_route)
-        return client.list_jobs()
-
-    def job_status(self, job_id: str) -> str:
-        """
-        This method accesses the head ray node in your cluster and returns the job status for the provided job id.
-        """
-        dashboard_route = self.cluster_dashboard_uri()
-        client = JobSubmissionClient(dashboard_route)
-        return client.get_job_status(job_id)
-
-    def job_logs(self, job_id: str) -> str:
-        """
-        This method accesses the head ray node in your cluster and returns the logs for the provided job id.
-        """
-        dashboard_route = self.cluster_dashboard_uri()
-        client = JobSubmissionClient(dashboard_route)
-        return client.get_job_logs(job_id)
-
-    def torchx_config(
-        self, working_dir: str = None, requirements: str = None
-    ) -> Dict[str, str]:
-        dashboard_address = f"{self.cluster_dashboard_uri().lstrip('http://')}"
-        to_return = {
-            "cluster_name": self.config.name,
-            "dashboard_address": dashboard_address,
-        }
-        if working_dir:
-            to_return["working_dir"] = working_dir
-        if requirements:
-            to_return["requirements"] = requirements
-        return to_return
-
-    def from_k8_cluster_object(rc):
-        machine_types = (
-            rc["metadata"]["labels"]["orderedinstance"].split("_")
-            if "orderedinstance" in rc["metadata"]["labels"]
-            else []
-        )
-        local_interactive = (
-            "volumeMounts"
-            in rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][0]
-        )
-        cluster_config = ClusterConfiguration(
-            name=rc["metadata"]["name"],
-            namespace=rc["metadata"]["namespace"],
-            machine_types=machine_types,
-            num_workers=rc["spec"]["workerGroupSpecs"][0]["minReplicas"],
-            min_cpus=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-                "containers"
-            ][0]["resources"]["requests"]["cpu"],
-            max_cpus=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-                "containers"
-            ][0]["resources"]["limits"]["cpu"],
-            min_memory=int(
-                rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][0][
-                    "resources"
-                ]["requests"]["memory"][:-1]
-            ),
-            max_memory=int(
-                rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][0][
-                    "resources"
-                ]["limits"]["memory"][:-1]
-            ),
-            num_gpus=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-                "containers"
-            ][0]["resources"]["limits"]["nvidia.com/gpu"],
-            instascale=True if machine_types else False,
-            image=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][
-                0
-            ]["image"],
-            local_interactive=local_interactive,
-        )
-        return Cluster(cluster_config)
-
-    def local_client_url(self):
-        if self.config.local_interactive == True:
-            ingress_domain = _get_ingress_domain()
-            return f"ray://rayclient-{self.config.name}-{self.config.namespace}.{ingress_domain}"
-        else:
-            return "None"
-
-
-def list_all_clusters(namespace: str, print_to_console: bool = True):
-    """
-    Returns (and prints by default) a list of all clusters in a given namespace.
-    """
-    clusters = _get_ray_clusters(namespace)
-    if print_to_console:
-        pretty_print.print_clusters(clusters)
-    return clusters
-
-
-def list_all_queued(namespace: str, print_to_console: bool = True):
-    """
-    Returns (and prints by default) a list of all currently queued-up AppWrappers
-    in a given namespace.
-    """
-    app_wrappers = _get_app_wrappers(
-        namespace, filter=[AppWrapperStatus.RUNNING, AppWrapperStatus.PENDING]
-    )
-    if print_to_console:
-        pretty_print.print_app_wrappers_status(app_wrappers)
-    return app_wrappers
-
-
-def get_current_namespace():  # pragma: no cover
-    if api_config_handler() != None:
-        if os.path.isfile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"):
-            try:
-                file = open(
-                    "/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r"
-                )
-                active_context = file.readline().strip("\n")
-                return active_context
-            except Exception as e:
-                print("Unable to find current namespace")
-                return None
-        else:
-            print("Unable to find current namespace")
-            return None
-    else:
-        try:
-            _, active_context = config.list_kube_config_contexts(config_check())
-        except Exception as e:
-            return _kube_api_error_handling(e)
-        try:
-            return active_context["context"]["namespace"]
-        except KeyError:
-            return None
-
-
-def get_cluster(cluster_name: str, namespace: str = "default"):
-    try:
-        config.load_kube_config()
-        api_instance = client.CustomObjectsApi()
-        rcs = api_instance.list_namespaced_custom_object(
-            group="ray.io",
-            version="v1alpha1",
-            namespace=namespace,
-            plural="rayclusters",
-        )
-    except Exception as e:
-        return _kube_api_error_handling(e)
-
-    for rc in rcs["items"]:
-        if rc["metadata"]["name"] == cluster_name:
-            return Cluster.from_k8_cluster_object(rc)
-    raise FileNotFoundError(
-        f"Cluster {cluster_name} is not found in {namespace} namespace"
-    )
-
-
-# private methods
-def _get_ingress_domain():
-    try:
-        config.load_kube_config()
-        api_client = client.CustomObjectsApi(api_config_handler())
-        ingress = api_client.get_cluster_custom_object(
-            "config.openshift.io", "v1", "ingresses", "cluster"
-        )
-    except Exception as e:  # pragma: no cover
-        return _kube_api_error_handling(e)
-    return ingress["spec"]["domain"]
-
-
-def _app_wrapper_status(name, namespace="default") -> Optional[AppWrapper]:
-    try:
-        config_check()
-        api_instance = client.CustomObjectsApi(api_config_handler())
-        aws = api_instance.list_namespaced_custom_object(
-            group="workload.codeflare.dev",
-            version="v1beta1",
-            namespace=namespace,
-            plural="appwrappers",
-        )
-    except Exception as e:  # pragma: no cover
-        return _kube_api_error_handling(e)
-
-    for aw in aws["items"]:
-        if aw["metadata"]["name"] == name:
-            return _map_to_app_wrapper(aw)
-    return None
-
-
-def _ray_cluster_status(name, namespace="default") -> Optional[RayCluster]:
-    try:
-        config_check()
-        api_instance = client.CustomObjectsApi(api_config_handler())
-        rcs = api_instance.list_namespaced_custom_object(
-            group="ray.io",
-            version="v1alpha1",
-            namespace=namespace,
-            plural="rayclusters",
-        )
-    except Exception as e:  # pragma: no cover
-        return _kube_api_error_handling(e)
-
-    for rc in rcs["items"]:
-        if rc["metadata"]["name"] == name:
-            return _map_to_ray_cluster(rc)
-    return None
-
-
-def _get_ray_clusters(namespace="default") -> List[RayCluster]:
-    list_of_clusters = []
-    try:
-        config_check()
-        api_instance = client.CustomObjectsApi(api_config_handler())
-        rcs = api_instance.list_namespaced_custom_object(
-            group="ray.io",
-            version="v1alpha1",
-            namespace=namespace,
-            plural="rayclusters",
-        )
-    except Exception as e:  # pragma: no cover
-        return _kube_api_error_handling(e)
-
-    for rc in rcs["items"]:
-        list_of_clusters.append(_map_to_ray_cluster(rc))
-    return list_of_clusters
-
-
-def _get_app_wrappers(
-    namespace="default", filter=List[AppWrapperStatus]
-) -> List[AppWrapper]:
-    list_of_app_wrappers = []
-
-    try:
-        config_check()
-        api_instance = client.CustomObjectsApi(api_config_handler())
-        aws = api_instance.list_namespaced_custom_object(
-            group="workload.codeflare.dev",
-            version="v1beta1",
-            namespace=namespace,
-            plural="appwrappers",
-        )
-    except Exception as e:  # pragma: no cover
-        return _kube_api_error_handling(e)
-
-    for item in aws["items"]:
-        app_wrapper = _map_to_app_wrapper(item)
-        if filter and app_wrapper.status in filter:
-            list_of_app_wrappers.append(app_wrapper)
-        else:
-            # Unsure what the purpose of the filter is
-            list_of_app_wrappers.append(app_wrapper)
-    return list_of_app_wrappers
-
-
-def _map_to_ray_cluster(rc) -> Optional[RayCluster]:
-    if "state" in rc["status"]:
-        status = RayClusterStatus(rc["status"]["state"].lower())
-    else:
-        status = RayClusterStatus.UNKNOWN
-
-    config_check()
-    api_instance = client.CustomObjectsApi(api_config_handler())
-    routes = api_instance.list_namespaced_custom_object(
-        group="route.openshift.io",
-        version="v1",
-        namespace=rc["metadata"]["namespace"],
-        plural="routes",
-    )
-    ray_route = None
-    for route in routes["items"]:
-        if route["metadata"]["name"] == f"ray-dashboard-{rc['metadata']['name']}":
-            protocol = "https" if route["spec"].get("tls") else "http"
-            ray_route = f"{protocol}://{route['spec']['host']}"
-
-    return RayCluster(
-        name=rc["metadata"]["name"],
-        status=status,
-        # for now we are not using autoscaling so same replicas is fine
-        workers=rc["spec"]["workerGroupSpecs"][0]["replicas"],
-        worker_mem_max=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-            "containers"
-        ][0]["resources"]["limits"]["memory"],
-        worker_mem_min=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-            "containers"
-        ][0]["resources"]["requests"]["memory"],
-        worker_cpu=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][
-            0
-        ]["resources"]["limits"]["cpu"],
-        worker_gpu=0,  # hard to detect currently how many gpus, can override it with what the user asked for
-        namespace=rc["metadata"]["namespace"],
-        dashboard=ray_route,
-    )
-
-
-def _map_to_app_wrapper(aw) -> AppWrapper:
-    if "status" in aw and "canrun" in aw["status"]:
-        return AppWrapper(
-            name=aw["metadata"]["name"],
-            status=AppWrapperStatus(aw["status"]["state"].lower()),
-            can_run=aw["status"]["canrun"],
-            job_state=aw["status"]["queuejobstate"],
-        )
-    return AppWrapper(
-        name=aw["metadata"]["name"],
-        status=AppWrapperStatus("queueing"),
-        can_run=False,
-        job_state="Still adding to queue",
-    )
-
-
-def _copy_to_ray(cluster: Cluster) -> RayCluster:
-    ray = RayCluster(
-        name=cluster.config.name,
-        status=cluster.status(print_to_console=False)[0],
-        workers=cluster.config.num_workers,
-        worker_mem_min=cluster.config.min_memory,
-        worker_mem_max=cluster.config.max_memory,
-        worker_cpu=cluster.config.min_cpus,
-        worker_gpu=cluster.config.num_gpus,
-        namespace=cluster.config.namespace,
-        dashboard=cluster.cluster_dashboard_uri(),
-    )
-    if ray.status == CodeFlareClusterStatus.READY:
-        ray.status = RayClusterStatus.READY
-    return ray
-
-
-
-
-
-
-
-

Functions

-
-
-def get_cluster(cluster_name: str, namespace: str = 'default') -
-
-
-
- -Expand source code - -
def get_cluster(cluster_name: str, namespace: str = "default"):
-    try:
-        config.load_kube_config()
-        api_instance = client.CustomObjectsApi()
-        rcs = api_instance.list_namespaced_custom_object(
-            group="ray.io",
-            version="v1alpha1",
-            namespace=namespace,
-            plural="rayclusters",
-        )
-    except Exception as e:
-        return _kube_api_error_handling(e)
-
-    for rc in rcs["items"]:
-        if rc["metadata"]["name"] == cluster_name:
-            return Cluster.from_k8_cluster_object(rc)
-    raise FileNotFoundError(
-        f"Cluster {cluster_name} is not found in {namespace} namespace"
-    )
-
-
-
-def get_current_namespace() -
-
-
-
- -Expand source code - -
def get_current_namespace():  # pragma: no cover
-    if api_config_handler() != None:
-        if os.path.isfile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"):
-            try:
-                file = open(
-                    "/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r"
-                )
-                active_context = file.readline().strip("\n")
-                return active_context
-            except Exception as e:
-                print("Unable to find current namespace")
-                return None
-        else:
-            print("Unable to find current namespace")
-            return None
-    else:
-        try:
-            _, active_context = config.list_kube_config_contexts(config_check())
-        except Exception as e:
-            return _kube_api_error_handling(e)
-        try:
-            return active_context["context"]["namespace"]
-        except KeyError:
-            return None
-
-
-
-def list_all_clusters(namespace: str, print_to_console: bool = True) -
-
-

Returns (and prints by default) a list of all clusters in a given namespace.

-
- -Expand source code - -
def list_all_clusters(namespace: str, print_to_console: bool = True):
-    """
-    Returns (and prints by default) a list of all clusters in a given namespace.
-    """
-    clusters = _get_ray_clusters(namespace)
-    if print_to_console:
-        pretty_print.print_clusters(clusters)
-    return clusters
-
-
-
-def list_all_queued(namespace: str, print_to_console: bool = True) -
-
-

Returns (and prints by default) a list of all currently queued-up AppWrappers -in a given namespace.

-
- -Expand source code - -
def list_all_queued(namespace: str, print_to_console: bool = True):
-    """
-    Returns (and prints by default) a list of all currently queued-up AppWrappers
-    in a given namespace.
-    """
-    app_wrappers = _get_app_wrappers(
-        namespace, filter=[AppWrapperStatus.RUNNING, AppWrapperStatus.PENDING]
-    )
-    if print_to_console:
-        pretty_print.print_app_wrappers_status(app_wrappers)
-    return app_wrappers
-
-
-
-
-
-

Classes

-
-
-class Cluster -(config: ClusterConfiguration) -
-
-

An object for requesting, bringing up, and taking down resources. -Can also be used for seeing the resource cluster status and details.

-

Note that currently, the underlying implementation is a Ray cluster.

-

Create the resource cluster object by passing in a ClusterConfiguration -(defined in the config sub-module). An AppWrapper will then be generated -based off of the configured resources to represent the desired cluster -request.

-
- -Expand source code - -
class Cluster:
-    """
-    An object for requesting, bringing up, and taking down resources.
-    Can also be used for seeing the resource cluster status and details.
-
-    Note that currently, the underlying implementation is a Ray cluster.
-    """
-
-    torchx_scheduler = "ray"
-
-    def __init__(self, config: ClusterConfiguration):
-        """
-        Create the resource cluster object by passing in a ClusterConfiguration
-        (defined in the config sub-module). An AppWrapper will then be generated
-        based off of the configured resources to represent the desired cluster
-        request.
-        """
-        self.config = config
-        self.app_wrapper_yaml = self.create_app_wrapper()
-        self.app_wrapper_name = self.app_wrapper_yaml.split(".")[0]
-
-    def evaluate_dispatch_priority(self):
-        priority_class = self.config.dispatch_priority
-
-        try:
-            config_check()
-            api_instance = client.CustomObjectsApi(api_config_handler())
-            priority_classes = api_instance.list_cluster_custom_object(
-                group="scheduling.k8s.io",
-                version="v1",
-                plural="priorityclasses",
-            )
-        except Exception as e:  # pragma: no cover
-            return _kube_api_error_handling(e)
-
-        for pc in priority_classes["items"]:
-            if pc["metadata"]["name"] == priority_class:
-                return pc["value"]
-        print(f"Priority class {priority_class} is not available in the cluster")
-        return None
-
-    def create_app_wrapper(self):
-        """
-        Called upon cluster object creation, creates an AppWrapper yaml based on
-        the specifications of the ClusterConfiguration.
-        """
-
-        if self.config.namespace is None:
-            self.config.namespace = get_current_namespace()
-            if self.config.namespace is None:
-                print("Please specify with namespace=<your_current_namespace>")
-            elif type(self.config.namespace) is not str:
-                raise TypeError(
-                    f"Namespace {self.config.namespace} is of type {type(self.config.namespace)}. Check your Kubernetes Authentication."
-                )
-
-        # Before attempting to create the cluster AW, let's evaluate the ClusterConfig
-        if self.config.dispatch_priority:
-            priority_val = self.evaluate_dispatch_priority()
-            if priority_val == None:
-                raise ValueError(
-                    "Invalid Cluster Configuration, AppWrapper not generated"
-                )
-        else:
-            priority_val = None
-
-        name = self.config.name
-        namespace = self.config.namespace
-        min_cpu = self.config.min_cpus
-        max_cpu = self.config.max_cpus
-        min_memory = self.config.min_memory
-        max_memory = self.config.max_memory
-        gpu = self.config.num_gpus
-        workers = self.config.num_workers
-        template = self.config.template
-        image = self.config.image
-        instascale = self.config.instascale
-        instance_types = self.config.machine_types
-        env = self.config.envs
-        local_interactive = self.config.local_interactive
-        image_pull_secrets = self.config.image_pull_secrets
-        dispatch_priority = self.config.dispatch_priority
-        return generate_appwrapper(
-            name=name,
-            namespace=namespace,
-            min_cpu=min_cpu,
-            max_cpu=max_cpu,
-            min_memory=min_memory,
-            max_memory=max_memory,
-            gpu=gpu,
-            workers=workers,
-            template=template,
-            image=image,
-            instascale=instascale,
-            instance_types=instance_types,
-            env=env,
-            local_interactive=local_interactive,
-            image_pull_secrets=image_pull_secrets,
-            dispatch_priority=dispatch_priority,
-            priority_val=priority_val,
-        )
-
-    # creates a new cluster with the provided or default spec
-    def up(self):
-        """
-        Applies the AppWrapper yaml, pushing the resource request onto
-        the MCAD queue.
-        """
-        namespace = self.config.namespace
-        try:
-            config_check()
-            api_instance = client.CustomObjectsApi(api_config_handler())
-            with open(self.app_wrapper_yaml) as f:
-                aw = yaml.load(f, Loader=yaml.FullLoader)
-            api_instance.create_namespaced_custom_object(
-                group="workload.codeflare.dev",
-                version="v1beta1",
-                namespace=namespace,
-                plural="appwrappers",
-                body=aw,
-            )
-        except Exception as e:  # pragma: no cover
-            return _kube_api_error_handling(e)
-
-    def down(self):
-        """
-        Deletes the AppWrapper yaml, scaling-down and deleting all resources
-        associated with the cluster.
-        """
-        namespace = self.config.namespace
-        try:
-            config_check()
-            api_instance = client.CustomObjectsApi(api_config_handler())
-            api_instance.delete_namespaced_custom_object(
-                group="workload.codeflare.dev",
-                version="v1beta1",
-                namespace=namespace,
-                plural="appwrappers",
-                name=self.app_wrapper_name,
-            )
-        except Exception as e:  # pragma: no cover
-            return _kube_api_error_handling(e)
-
-    def status(
-        self, print_to_console: bool = True
-    ) -> Tuple[CodeFlareClusterStatus, bool]:
-        """
-        Returns the requested cluster's status, as well as whether or not
-        it is ready for use.
-        """
-        ready = False
-        status = CodeFlareClusterStatus.UNKNOWN
-        # check the app wrapper status
-        appwrapper = _app_wrapper_status(self.config.name, self.config.namespace)
-        if appwrapper:
-            if appwrapper.status in [
-                AppWrapperStatus.RUNNING,
-                AppWrapperStatus.COMPLETED,
-                AppWrapperStatus.RUNNING_HOLD_COMPLETION,
-            ]:
-                ready = False
-                status = CodeFlareClusterStatus.STARTING
-            elif appwrapper.status in [
-                AppWrapperStatus.FAILED,
-                AppWrapperStatus.DELETED,
-            ]:
-                ready = False
-                status = CodeFlareClusterStatus.FAILED  # should deleted be separate
-                return status, ready  # exit early, no need to check ray status
-            elif appwrapper.status in [
-                AppWrapperStatus.PENDING,
-                AppWrapperStatus.QUEUEING,
-            ]:
-                ready = False
-                if appwrapper.status == AppWrapperStatus.PENDING:
-                    status = CodeFlareClusterStatus.QUEUED
-                else:
-                    status = CodeFlareClusterStatus.QUEUEING
-                if print_to_console:
-                    pretty_print.print_app_wrappers_status([appwrapper])
-                return (
-                    status,
-                    ready,
-                )  # no need to check the ray status since still in queue
-
-        # check the ray cluster status
-        cluster = _ray_cluster_status(self.config.name, self.config.namespace)
-        if cluster and not cluster.status == RayClusterStatus.UNKNOWN:
-            if cluster.status == RayClusterStatus.READY:
-                ready = True
-                status = CodeFlareClusterStatus.READY
-            elif cluster.status in [
-                RayClusterStatus.UNHEALTHY,
-                RayClusterStatus.FAILED,
-            ]:
-                ready = False
-                status = CodeFlareClusterStatus.FAILED
-
-            if print_to_console:
-                # overriding the number of gpus with requested
-                cluster.worker_gpu = self.config.num_gpus
-                pretty_print.print_cluster_status(cluster)
-        elif print_to_console:
-            if status == CodeFlareClusterStatus.UNKNOWN:
-                pretty_print.print_no_resources_found()
-            else:
-                pretty_print.print_app_wrappers_status([appwrapper], starting=True)
-
-        return status, ready
-
-    def is_dashboard_ready(self) -> bool:
-        response = requests.get(self.cluster_dashboard_uri(), timeout=5)
-        if response.status_code == 200:
-            return True
-        else:
-            return False
-
-    def wait_ready(self, timeout: Optional[int] = None):
-        """
-        Waits for requested cluster to be ready, up to an optional timeout (s).
-        Checks every five seconds.
-        """
-        print("Waiting for requested resources to be set up...")
-        ready = False
-        dashboard_ready = False
-        status = None
-        time = 0
-        while not ready or not dashboard_ready:
-            status, ready = self.status(print_to_console=False)
-            dashboard_ready = self.is_dashboard_ready()
-            if status == CodeFlareClusterStatus.UNKNOWN:
-                print(
-                    "WARNING: Current cluster status is unknown, have you run cluster.up yet?"
-                )
-            if not ready or not dashboard_ready:
-                if timeout and time >= timeout:
-                    raise TimeoutError(f"wait() timed out after waiting {timeout}s")
-                sleep(5)
-                time += 5
-        print("Requested cluster and dashboard are up and running!")
-
-    def details(self, print_to_console: bool = True) -> RayCluster:
-        cluster = _copy_to_ray(self)
-        if print_to_console:
-            pretty_print.print_clusters([cluster])
-        return cluster
-
-    def cluster_uri(self) -> str:
-        """
-        Returns a string containing the cluster's URI.
-        """
-        return f"ray://{self.config.name}-head-svc.{self.config.namespace}.svc:10001"
-
-    def cluster_dashboard_uri(self) -> str:
-        """
-        Returns a string containing the cluster's dashboard URI.
-        """
-        try:
-            config_check()
-            api_instance = client.CustomObjectsApi(api_config_handler())
-            routes = api_instance.list_namespaced_custom_object(
-                group="route.openshift.io",
-                version="v1",
-                namespace=self.config.namespace,
-                plural="routes",
-            )
-        except Exception as e:  # pragma: no cover
-            return _kube_api_error_handling(e)
-
-        for route in routes["items"]:
-            if route["metadata"]["name"] == f"ray-dashboard-{self.config.name}":
-                protocol = "https" if route["spec"].get("tls") else "http"
-                return f"{protocol}://{route['spec']['host']}"
-        return "Dashboard route not available yet, have you run cluster.up()?"
-
-    def list_jobs(self) -> List:
-        """
-        This method accesses the head ray node in your cluster and lists the running jobs.
-        """
-        dashboard_route = self.cluster_dashboard_uri()
-        client = JobSubmissionClient(dashboard_route)
-        return client.list_jobs()
-
-    def job_status(self, job_id: str) -> str:
-        """
-        This method accesses the head ray node in your cluster and returns the job status for the provided job id.
-        """
-        dashboard_route = self.cluster_dashboard_uri()
-        client = JobSubmissionClient(dashboard_route)
-        return client.get_job_status(job_id)
-
-    def job_logs(self, job_id: str) -> str:
-        """
-        This method accesses the head ray node in your cluster and returns the logs for the provided job id.
-        """
-        dashboard_route = self.cluster_dashboard_uri()
-        client = JobSubmissionClient(dashboard_route)
-        return client.get_job_logs(job_id)
-
-    def torchx_config(
-        self, working_dir: str = None, requirements: str = None
-    ) -> Dict[str, str]:
-        dashboard_address = f"{self.cluster_dashboard_uri().lstrip('http://')}"
-        to_return = {
-            "cluster_name": self.config.name,
-            "dashboard_address": dashboard_address,
-        }
-        if working_dir:
-            to_return["working_dir"] = working_dir
-        if requirements:
-            to_return["requirements"] = requirements
-        return to_return
-
-    def from_k8_cluster_object(rc):
-        machine_types = (
-            rc["metadata"]["labels"]["orderedinstance"].split("_")
-            if "orderedinstance" in rc["metadata"]["labels"]
-            else []
-        )
-        local_interactive = (
-            "volumeMounts"
-            in rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][0]
-        )
-        cluster_config = ClusterConfiguration(
-            name=rc["metadata"]["name"],
-            namespace=rc["metadata"]["namespace"],
-            machine_types=machine_types,
-            num_workers=rc["spec"]["workerGroupSpecs"][0]["minReplicas"],
-            min_cpus=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-                "containers"
-            ][0]["resources"]["requests"]["cpu"],
-            max_cpus=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-                "containers"
-            ][0]["resources"]["limits"]["cpu"],
-            min_memory=int(
-                rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][0][
-                    "resources"
-                ]["requests"]["memory"][:-1]
-            ),
-            max_memory=int(
-                rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][0][
-                    "resources"
-                ]["limits"]["memory"][:-1]
-            ),
-            num_gpus=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-                "containers"
-            ][0]["resources"]["limits"]["nvidia.com/gpu"],
-            instascale=True if machine_types else False,
-            image=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][
-                0
-            ]["image"],
-            local_interactive=local_interactive,
-        )
-        return Cluster(cluster_config)
-
-    def local_client_url(self):
-        if self.config.local_interactive == True:
-            ingress_domain = _get_ingress_domain()
-            return f"ray://rayclient-{self.config.name}-{self.config.namespace}.{ingress_domain}"
-        else:
-            return "None"
-
-

Class variables

-
-
var torchx_scheduler
-
-
-
-
-

Methods

-
-
-def cluster_dashboard_uri(self) ‑> str -
-
-

Returns a string containing the cluster's dashboard URI.

-
- -Expand source code - -
def cluster_dashboard_uri(self) -> str:
-    """
-    Returns a string containing the cluster's dashboard URI.
-    """
-    try:
-        config_check()
-        api_instance = client.CustomObjectsApi(api_config_handler())
-        routes = api_instance.list_namespaced_custom_object(
-            group="route.openshift.io",
-            version="v1",
-            namespace=self.config.namespace,
-            plural="routes",
-        )
-    except Exception as e:  # pragma: no cover
-        return _kube_api_error_handling(e)
-
-    for route in routes["items"]:
-        if route["metadata"]["name"] == f"ray-dashboard-{self.config.name}":
-            protocol = "https" if route["spec"].get("tls") else "http"
-            return f"{protocol}://{route['spec']['host']}"
-    return "Dashboard route not available yet, have you run cluster.up()?"
-
-
-
-def cluster_uri(self) ‑> str -
-
-

Returns a string containing the cluster's URI.

-
- -Expand source code - -
def cluster_uri(self) -> str:
-    """
-    Returns a string containing the cluster's URI.
-    """
-    return f"ray://{self.config.name}-head-svc.{self.config.namespace}.svc:10001"
-
-
-
-def create_app_wrapper(self) -
-
-

Called upon cluster object creation, creates an AppWrapper yaml based on -the specifications of the ClusterConfiguration.

-
- -Expand source code - -
def create_app_wrapper(self):
-    """
-    Called upon cluster object creation, creates an AppWrapper yaml based on
-    the specifications of the ClusterConfiguration.
-    """
-
-    if self.config.namespace is None:
-        self.config.namespace = get_current_namespace()
-        if self.config.namespace is None:
-            print("Please specify with namespace=<your_current_namespace>")
-        elif type(self.config.namespace) is not str:
-            raise TypeError(
-                f"Namespace {self.config.namespace} is of type {type(self.config.namespace)}. Check your Kubernetes Authentication."
-            )
-
-    # Before attempting to create the cluster AW, let's evaluate the ClusterConfig
-    if self.config.dispatch_priority:
-        priority_val = self.evaluate_dispatch_priority()
-        if priority_val == None:
-            raise ValueError(
-                "Invalid Cluster Configuration, AppWrapper not generated"
-            )
-    else:
-        priority_val = None
-
-    name = self.config.name
-    namespace = self.config.namespace
-    min_cpu = self.config.min_cpus
-    max_cpu = self.config.max_cpus
-    min_memory = self.config.min_memory
-    max_memory = self.config.max_memory
-    gpu = self.config.num_gpus
-    workers = self.config.num_workers
-    template = self.config.template
-    image = self.config.image
-    instascale = self.config.instascale
-    instance_types = self.config.machine_types
-    env = self.config.envs
-    local_interactive = self.config.local_interactive
-    image_pull_secrets = self.config.image_pull_secrets
-    dispatch_priority = self.config.dispatch_priority
-    return generate_appwrapper(
-        name=name,
-        namespace=namespace,
-        min_cpu=min_cpu,
-        max_cpu=max_cpu,
-        min_memory=min_memory,
-        max_memory=max_memory,
-        gpu=gpu,
-        workers=workers,
-        template=template,
-        image=image,
-        instascale=instascale,
-        instance_types=instance_types,
-        env=env,
-        local_interactive=local_interactive,
-        image_pull_secrets=image_pull_secrets,
-        dispatch_priority=dispatch_priority,
-        priority_val=priority_val,
-    )
-
-
-
-def details(self, print_to_console: bool = True) ‑> RayCluster -
-
-
-
- -Expand source code - -
def details(self, print_to_console: bool = True) -> RayCluster:
-    cluster = _copy_to_ray(self)
-    if print_to_console:
-        pretty_print.print_clusters([cluster])
-    return cluster
-
-
-
-def down(self) -
-
-

Deletes the AppWrapper yaml, scaling-down and deleting all resources -associated with the cluster.

-
- -Expand source code - -
def down(self):
-    """
-    Deletes the AppWrapper yaml, scaling-down and deleting all resources
-    associated with the cluster.
-    """
-    namespace = self.config.namespace
-    try:
-        config_check()
-        api_instance = client.CustomObjectsApi(api_config_handler())
-        api_instance.delete_namespaced_custom_object(
-            group="workload.codeflare.dev",
-            version="v1beta1",
-            namespace=namespace,
-            plural="appwrappers",
-            name=self.app_wrapper_name,
-        )
-    except Exception as e:  # pragma: no cover
-        return _kube_api_error_handling(e)
-
-
-
-def evaluate_dispatch_priority(self) -
-
-
-
- -Expand source code - -
def evaluate_dispatch_priority(self):
-    priority_class = self.config.dispatch_priority
-
-    try:
-        config_check()
-        api_instance = client.CustomObjectsApi(api_config_handler())
-        priority_classes = api_instance.list_cluster_custom_object(
-            group="scheduling.k8s.io",
-            version="v1",
-            plural="priorityclasses",
-        )
-    except Exception as e:  # pragma: no cover
-        return _kube_api_error_handling(e)
-
-    for pc in priority_classes["items"]:
-        if pc["metadata"]["name"] == priority_class:
-            return pc["value"]
-    print(f"Priority class {priority_class} is not available in the cluster")
-    return None
-
-
-
-def from_k8_cluster_object(rc) -
-
-
-
- -Expand source code - -
def from_k8_cluster_object(rc):
-    machine_types = (
-        rc["metadata"]["labels"]["orderedinstance"].split("_")
-        if "orderedinstance" in rc["metadata"]["labels"]
-        else []
-    )
-    local_interactive = (
-        "volumeMounts"
-        in rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][0]
-    )
-    cluster_config = ClusterConfiguration(
-        name=rc["metadata"]["name"],
-        namespace=rc["metadata"]["namespace"],
-        machine_types=machine_types,
-        num_workers=rc["spec"]["workerGroupSpecs"][0]["minReplicas"],
-        min_cpus=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-            "containers"
-        ][0]["resources"]["requests"]["cpu"],
-        max_cpus=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-            "containers"
-        ][0]["resources"]["limits"]["cpu"],
-        min_memory=int(
-            rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][0][
-                "resources"
-            ]["requests"]["memory"][:-1]
-        ),
-        max_memory=int(
-            rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][0][
-                "resources"
-            ]["limits"]["memory"][:-1]
-        ),
-        num_gpus=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-            "containers"
-        ][0]["resources"]["limits"]["nvidia.com/gpu"],
-        instascale=True if machine_types else False,
-        image=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][
-            0
-        ]["image"],
-        local_interactive=local_interactive,
-    )
-    return Cluster(cluster_config)
-
-
-
-def is_dashboard_ready(self) ‑> bool -
-
-
-
- -Expand source code - -
def is_dashboard_ready(self) -> bool:
-    response = requests.get(self.cluster_dashboard_uri(), timeout=5)
-    if response.status_code == 200:
-        return True
-    else:
-        return False
-
-
-
-def job_logs(self, job_id: str) ‑> str -
-
-

This method accesses the head ray node in your cluster and returns the logs for the provided job id.

-
- -Expand source code - -
def job_logs(self, job_id: str) -> str:
-    """
-    This method accesses the head ray node in your cluster and returns the logs for the provided job id.
-    """
-    dashboard_route = self.cluster_dashboard_uri()
-    client = JobSubmissionClient(dashboard_route)
-    return client.get_job_logs(job_id)
-
-
-
-def job_status(self, job_id: str) ‑> str -
-
-

This method accesses the head ray node in your cluster and returns the job status for the provided job id.

-
- -Expand source code - -
def job_status(self, job_id: str) -> str:
-    """
-    This method accesses the head ray node in your cluster and returns the job status for the provided job id.
-    """
-    dashboard_route = self.cluster_dashboard_uri()
-    client = JobSubmissionClient(dashboard_route)
-    return client.get_job_status(job_id)
-
-
-
-def list_jobs(self) ‑> List[~T] -
-
-

This method accesses the head ray node in your cluster and lists the running jobs.

-
- -Expand source code - -
def list_jobs(self) -> List:
-    """
-    This method accesses the head ray node in your cluster and lists the running jobs.
-    """
-    dashboard_route = self.cluster_dashboard_uri()
-    client = JobSubmissionClient(dashboard_route)
-    return client.list_jobs()
-
-
-
-def local_client_url(self) -
-
-
-
- -Expand source code - -
def local_client_url(self):
-    if self.config.local_interactive == True:
-        ingress_domain = _get_ingress_domain()
-        return f"ray://rayclient-{self.config.name}-{self.config.namespace}.{ingress_domain}"
-    else:
-        return "None"
-
-
-
-def status(self, print_to_console: bool = True) ‑> Tuple[CodeFlareClusterStatus, bool] -
-
-

Returns the requested cluster's status, as well as whether or not -it is ready for use.

-
- -Expand source code - -
def status(
-    self, print_to_console: bool = True
-) -> Tuple[CodeFlareClusterStatus, bool]:
-    """
-    Returns the requested cluster's status, as well as whether or not
-    it is ready for use.
-    """
-    ready = False
-    status = CodeFlareClusterStatus.UNKNOWN
-    # check the app wrapper status
-    appwrapper = _app_wrapper_status(self.config.name, self.config.namespace)
-    if appwrapper:
-        if appwrapper.status in [
-            AppWrapperStatus.RUNNING,
-            AppWrapperStatus.COMPLETED,
-            AppWrapperStatus.RUNNING_HOLD_COMPLETION,
-        ]:
-            ready = False
-            status = CodeFlareClusterStatus.STARTING
-        elif appwrapper.status in [
-            AppWrapperStatus.FAILED,
-            AppWrapperStatus.DELETED,
-        ]:
-            ready = False
-            status = CodeFlareClusterStatus.FAILED  # should deleted be separate
-            return status, ready  # exit early, no need to check ray status
-        elif appwrapper.status in [
-            AppWrapperStatus.PENDING,
-            AppWrapperStatus.QUEUEING,
-        ]:
-            ready = False
-            if appwrapper.status == AppWrapperStatus.PENDING:
-                status = CodeFlareClusterStatus.QUEUED
-            else:
-                status = CodeFlareClusterStatus.QUEUEING
-            if print_to_console:
-                pretty_print.print_app_wrappers_status([appwrapper])
-            return (
-                status,
-                ready,
-            )  # no need to check the ray status since still in queue
-
-    # check the ray cluster status
-    cluster = _ray_cluster_status(self.config.name, self.config.namespace)
-    if cluster and not cluster.status == RayClusterStatus.UNKNOWN:
-        if cluster.status == RayClusterStatus.READY:
-            ready = True
-            status = CodeFlareClusterStatus.READY
-        elif cluster.status in [
-            RayClusterStatus.UNHEALTHY,
-            RayClusterStatus.FAILED,
-        ]:
-            ready = False
-            status = CodeFlareClusterStatus.FAILED
-
-        if print_to_console:
-            # overriding the number of gpus with requested
-            cluster.worker_gpu = self.config.num_gpus
-            pretty_print.print_cluster_status(cluster)
-    elif print_to_console:
-        if status == CodeFlareClusterStatus.UNKNOWN:
-            pretty_print.print_no_resources_found()
-        else:
-            pretty_print.print_app_wrappers_status([appwrapper], starting=True)
-
-    return status, ready
-
-
-
-def torchx_config(self, working_dir: str = None, requirements: str = None) ‑> Dict[str, str] -
-
-
-
- -Expand source code - -
def torchx_config(
-    self, working_dir: str = None, requirements: str = None
-) -> Dict[str, str]:
-    dashboard_address = f"{self.cluster_dashboard_uri().lstrip('http://')}"
-    to_return = {
-        "cluster_name": self.config.name,
-        "dashboard_address": dashboard_address,
-    }
-    if working_dir:
-        to_return["working_dir"] = working_dir
-    if requirements:
-        to_return["requirements"] = requirements
-    return to_return
-
-
-
-def up(self) -
-
-

Applies the AppWrapper yaml, pushing the resource request onto -the MCAD queue.

-
- -Expand source code - -
def up(self):
-    """
-    Applies the AppWrapper yaml, pushing the resource request onto
-    the MCAD queue.
-    """
-    namespace = self.config.namespace
-    try:
-        config_check()
-        api_instance = client.CustomObjectsApi(api_config_handler())
-        with open(self.app_wrapper_yaml) as f:
-            aw = yaml.load(f, Loader=yaml.FullLoader)
-        api_instance.create_namespaced_custom_object(
-            group="workload.codeflare.dev",
-            version="v1beta1",
-            namespace=namespace,
-            plural="appwrappers",
-            body=aw,
-        )
-    except Exception as e:  # pragma: no cover
-        return _kube_api_error_handling(e)
-
-
-
-def wait_ready(self, timeout: Optional[int] = None) -
-
-

Waits for requested cluster to be ready, up to an optional timeout (s). -Checks every five seconds.

-
- -Expand source code - -
def wait_ready(self, timeout: Optional[int] = None):
-    """
-    Waits for requested cluster to be ready, up to an optional timeout (s).
-    Checks every five seconds.
-    """
-    print("Waiting for requested resources to be set up...")
-    ready = False
-    dashboard_ready = False
-    status = None
-    time = 0
-    while not ready or not dashboard_ready:
-        status, ready = self.status(print_to_console=False)
-        dashboard_ready = self.is_dashboard_ready()
-        if status == CodeFlareClusterStatus.UNKNOWN:
-            print(
-                "WARNING: Current cluster status is unknown, have you run cluster.up yet?"
-            )
-        if not ready or not dashboard_ready:
-            if timeout and time >= timeout:
-                raise TimeoutError(f"wait() timed out after waiting {timeout}s")
-            sleep(5)
-            time += 5
-    print("Requested cluster and dashboard are up and running!")
-
-
-
-
-
-
-
- -
- - - diff --git a/docs/cluster/config.html b/docs/cluster/config.html deleted file mode 100644 index 0575c01c..00000000 --- a/docs/cluster/config.html +++ /dev/null @@ -1,248 +0,0 @@ - - - - - - -codeflare_sdk.cluster.config API documentation - - - - - - - - - - - -
-
-
-

Module codeflare_sdk.cluster.config

-
-
-

The config sub-module contains the definition of the ClusterConfiguration dataclass, -which is used to specify resource requirements and other details when creating a -Cluster object.

-
- -Expand source code - -
# Copyright 2022 IBM, Red Hat
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-The config sub-module contains the definition of the ClusterConfiguration dataclass,
-which is used to specify resource requirements and other details when creating a
-Cluster object.
-"""
-
-from dataclasses import dataclass, field
-import pathlib
-
-dir = pathlib.Path(__file__).parent.parent.resolve()
-
-
-@dataclass
-class ClusterConfiguration:
-    """
-    This dataclass is used to specify resource requirements and other details, and
-    is passed in as an argument when creating a Cluster object.
-    """
-
-    name: str
-    namespace: str = None
-    head_info: list = field(default_factory=list)
-    machine_types: list = field(default_factory=list)  # ["m4.xlarge", "g4dn.xlarge"]
-    min_cpus: int = 1
-    max_cpus: int = 1
-    num_workers: int = 1
-    min_memory: int = 2
-    max_memory: int = 2
-    num_gpus: int = 0
-    template: str = f"{dir}/templates/base-template.yaml"
-    instascale: bool = False
-    envs: dict = field(default_factory=dict)
-    image: str = "quay.io/project-codeflare/ray:2.5.0-py38-cu116"
-    local_interactive: bool = False
-    image_pull_secrets: list = field(default_factory=list)
-    dispatch_priority: str = None
-
-
-
-
-
-
-
-
-
-

Classes

-
-
-class ClusterConfiguration -(name: str, namespace: str = None, head_info: list = <factory>, machine_types: list = <factory>, min_cpus: int = 1, max_cpus: int = 1, num_workers: int = 1, min_memory: int = 2, max_memory: int = 2, num_gpus: int = 0, template: str = '/home/runner/work/codeflare-sdk/codeflare-sdk/src/codeflare_sdk/templates/base-template.yaml', instascale: bool = False, envs: dict = <factory>, image: str = 'quay.io/project-codeflare/ray:2.5.0-py38-cu116', local_interactive: bool = False, image_pull_secrets: list = <factory>, dispatch_priority: str = None) -
-
-

This dataclass is used to specify resource requirements and other details, and -is passed in as an argument when creating a Cluster object.

-
- -Expand source code - -
class ClusterConfiguration:
-    """
-    This dataclass is used to specify resource requirements and other details, and
-    is passed in as an argument when creating a Cluster object.
-    """
-
-    name: str
-    namespace: str = None
-    head_info: list = field(default_factory=list)
-    machine_types: list = field(default_factory=list)  # ["m4.xlarge", "g4dn.xlarge"]
-    min_cpus: int = 1
-    max_cpus: int = 1
-    num_workers: int = 1
-    min_memory: int = 2
-    max_memory: int = 2
-    num_gpus: int = 0
-    template: str = f"{dir}/templates/base-template.yaml"
-    instascale: bool = False
-    envs: dict = field(default_factory=dict)
-    image: str = "quay.io/project-codeflare/ray:2.5.0-py38-cu116"
-    local_interactive: bool = False
-    image_pull_secrets: list = field(default_factory=list)
-    dispatch_priority: str = None
-
-

Class variables

-
-
var dispatch_priority : str
-
-
-
-
var envs : dict
-
-
-
-
var head_info : list
-
-
-
-
var image : str
-
-
-
-
var image_pull_secrets : list
-
-
-
-
var instascale : bool
-
-
-
-
var local_interactive : bool
-
-
-
-
var machine_types : list
-
-
-
-
var max_cpus : int
-
-
-
-
var max_memory : int
-
-
-
-
var min_cpus : int
-
-
-
-
var min_memory : int
-
-
-
-
var name : str
-
-
-
-
var namespace : str
-
-
-
-
var num_gpus : int
-
-
-
-
var num_workers : int
-
-
-
-
var template : str
-
-
-
-
-
-
-
-
- -
- - - diff --git a/docs/cluster/index.html b/docs/cluster/index.html deleted file mode 100644 index a6027e6f..00000000 --- a/docs/cluster/index.html +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - -codeflare_sdk.cluster API documentation - - - - - - - - - - - -
-
-
-

Module codeflare_sdk.cluster

-
-
-
-
-

Sub-modules

-
-
codeflare_sdk.cluster.auth
-
-

The auth sub-module contains the definitions for the Authentication objects, which represent -the methods by which a user can authenticate to their …

-
-
codeflare_sdk.cluster.awload
-
-

The awload sub-module contains the definition of the AWManager object, which handles -submission and deletion of existing AppWrappers from a user's …

-
-
codeflare_sdk.cluster.cluster
-
-

The cluster sub-module contains the definition of the Cluster object, which represents -the resources requested by the user. It also contains functions …

-
-
codeflare_sdk.cluster.config
-
-

The config sub-module contains the definition of the ClusterConfiguration dataclass, -which is used to specify resource requirements and other details …

-
-
codeflare_sdk.cluster.model
-
-

The model sub-module defines Enums containing information for Ray cluster -states and AppWrapper states, and CodeFlare cluster states, as well as -…

-
-
-
-
-
-
-
-
-
-
- -
- - - diff --git a/docs/cluster/model.html b/docs/cluster/model.html deleted file mode 100644 index 7d911255..00000000 --- a/docs/cluster/model.html +++ /dev/null @@ -1,478 +0,0 @@ - - - - - - -codeflare_sdk.cluster.model API documentation - - - - - - - - - - - -
-
-
-

Module codeflare_sdk.cluster.model

-
-
-

The model sub-module defines Enums containing information for Ray cluster -states and AppWrapper states, and CodeFlare cluster states, as well as -dataclasses to store information for Ray clusters and AppWrappers.

-
- -Expand source code - -
# Copyright 2022 IBM, Red Hat
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-The model sub-module defines Enums containing information for Ray cluster
-states and AppWrapper states, and CodeFlare cluster states, as well as
-dataclasses to store information for Ray clusters and AppWrappers.
-"""
-
-from dataclasses import dataclass
-from enum import Enum
-
-
-class RayClusterStatus(Enum):
-    """
-    Defines the possible reportable states of a Ray cluster.
-    """
-
-    # https://github.com/ray-project/kuberay/blob/master/ray-operator/apis/ray/v1alpha1/raycluster_types.go#L95
-    READY = "ready"
-    UNHEALTHY = "unhealthy"
-    FAILED = "failed"
-    UNKNOWN = "unknown"
-
-
-class AppWrapperStatus(Enum):
-    """
-    Defines the possible reportable states of an AppWrapper.
-    """
-
-    QUEUEING = "queueing"
-    PENDING = "pending"
-    RUNNING = "running"
-    FAILED = "failed"
-    DELETED = "deleted"
-    COMPLETED = "completed"
-    RUNNING_HOLD_COMPLETION = "runningholdcompletion"
-
-
-class CodeFlareClusterStatus(Enum):
-    """
-    Defines the possible reportable states of a Codeflare cluster.
-    """
-
-    READY = 1
-    STARTING = 2
-    QUEUED = 3
-    QUEUEING = 4
-    FAILED = 5
-    UNKNOWN = 6
-
-
-@dataclass
-class RayCluster:
-    """
-    For storing information about a Ray cluster.
-    """
-
-    name: str
-    status: RayClusterStatus
-    workers: int
-    worker_mem_min: str
-    worker_mem_max: str
-    worker_cpu: int
-    worker_gpu: int
-    namespace: str
-    dashboard: str
-
-
-@dataclass
-class AppWrapper:
-    """
-    For storing information about an AppWrapper.
-    """
-
-    name: str
-    status: AppWrapperStatus
-    can_run: bool
-    job_state: str
-
-
-
-
-
-
-
-
-
-

Classes

-
-
-class AppWrapper -(name: str, status: AppWrapperStatus, can_run: bool, job_state: str) -
-
-

For storing information about an AppWrapper.

-
- -Expand source code - -
class AppWrapper:
-    """
-    For storing information about an AppWrapper.
-    """
-
-    name: str
-    status: AppWrapperStatus
-    can_run: bool
-    job_state: str
-
-

Class variables

-
-
var can_run : bool
-
-
-
-
var job_state : str
-
-
-
-
var name : str
-
-
-
-
var statusAppWrapperStatus
-
-
-
-
-
-
-class AppWrapperStatus -(value, names=None, *, module=None, qualname=None, type=None, start=1) -
-
-

Defines the possible reportable states of an AppWrapper.

-
- -Expand source code - -
class AppWrapperStatus(Enum):
-    """
-    Defines the possible reportable states of an AppWrapper.
-    """
-
-    QUEUEING = "queueing"
-    PENDING = "pending"
-    RUNNING = "running"
-    FAILED = "failed"
-    DELETED = "deleted"
-    COMPLETED = "completed"
-    RUNNING_HOLD_COMPLETION = "runningholdcompletion"
-
-

Ancestors

-
    -
  • enum.Enum
  • -
-

Class variables

-
-
var COMPLETED
-
-
-
-
var DELETED
-
-
-
-
var FAILED
-
-
-
-
var PENDING
-
-
-
-
var QUEUEING
-
-
-
-
var RUNNING
-
-
-
-
var RUNNING_HOLD_COMPLETION
-
-
-
-
-
-
-class CodeFlareClusterStatus -(value, names=None, *, module=None, qualname=None, type=None, start=1) -
-
-

Defines the possible reportable states of a Codeflare cluster.

-
- -Expand source code - -
class CodeFlareClusterStatus(Enum):
-    """
-    Defines the possible reportable states of a Codeflare cluster.
-    """
-
-    READY = 1
-    STARTING = 2
-    QUEUED = 3
-    QUEUEING = 4
-    FAILED = 5
-    UNKNOWN = 6
-
-

Ancestors

-
    -
  • enum.Enum
  • -
-

Class variables

-
-
var FAILED
-
-
-
-
var QUEUED
-
-
-
-
var QUEUEING
-
-
-
-
var READY
-
-
-
-
var STARTING
-
-
-
-
var UNKNOWN
-
-
-
-
-
-
-class RayCluster -(name: str, status: RayClusterStatus, workers: int, worker_mem_min: str, worker_mem_max: str, worker_cpu: int, worker_gpu: int, namespace: str, dashboard: str) -
-
-

For storing information about a Ray cluster.

-
- -Expand source code - -
class RayCluster:
-    """
-    For storing information about a Ray cluster.
-    """
-
-    name: str
-    status: RayClusterStatus
-    workers: int
-    worker_mem_min: str
-    worker_mem_max: str
-    worker_cpu: int
-    worker_gpu: int
-    namespace: str
-    dashboard: str
-
-

Class variables

-
-
var dashboard : str
-
-
-
-
var name : str
-
-
-
-
var namespace : str
-
-
-
-
var statusRayClusterStatus
-
-
-
-
var worker_cpu : int
-
-
-
-
var worker_gpu : int
-
-
-
-
var worker_mem_max : str
-
-
-
-
var worker_mem_min : str
-
-
-
-
var workers : int
-
-
-
-
-
-
-class RayClusterStatus -(value, names=None, *, module=None, qualname=None, type=None, start=1) -
-
-

Defines the possible reportable states of a Ray cluster.

-
- -Expand source code - -
class RayClusterStatus(Enum):
-    """
-    Defines the possible reportable states of a Ray cluster.
-    """
-
-    # https://github.com/ray-project/kuberay/blob/master/ray-operator/apis/ray/v1alpha1/raycluster_types.go#L95
-    READY = "ready"
-    UNHEALTHY = "unhealthy"
-    FAILED = "failed"
-    UNKNOWN = "unknown"
-
-

Ancestors

-
    -
  • enum.Enum
  • -
-

Class variables

-
-
var FAILED
-
-
-
-
var READY
-
-
-
-
var UNHEALTHY
-
-
-
-
var UNKNOWN
-
-
-
-
-
-
-
-
- -
- - - diff --git a/docs/designs/CodeFlare-SDK-design-doc.md b/docs/designs/CodeFlare-SDK-design-doc.md new file mode 100644 index 00000000..aecdfd56 --- /dev/null +++ b/docs/designs/CodeFlare-SDK-design-doc.md @@ -0,0 +1,141 @@ +# CodeFlare SDK Design Document + +Author: [@varshaprasad96](https://github.com/varshaprasad96) + +## Introduction +This document outlines the design of the Project CodeFlare SDK, a Python SDK that facilitates interactions between users and the distributed workloads component of Red Hat OpenShift AI(RHOAI)/ OpenDataHub(ODH). Users, in this instance, are both data scientists and MLOps Engineers. The SDK provides a high-level abstraction for managing machine learning(ML) workflows, jobs and distributed computing resources. + +This document should be followed when adding new functionality along with being updated as the design evolves. + +## Objectives +1. Provide a Pythonic, user-friendly interface for interaction with distributed workloads components. +2. Integrate with Kueue for managing workload queues, quota management, job orchestration and scheduling. +3. Provide separate functionalities for data scientists/ MLops engineers and platform administrators. +4. Allow data scientists to manage ML workflows, workloads and jobs. +5. Leverage Ray and [Kubeflow][Kubeflow] for workloads and jobs. +6. Ensure extensibility to accommodate future integrations with other tools and frameworks. +7. Follow best practices in the codebase to make it easier for developers to maintain the project. + +## Architecture + +### CodeFlare stack components overview: +1. [CodeFlare SDK][codeflare_sdk_gh]: Primary user-facing component, offering a Python-native interface that bridges users with the underlying services in the stack. This SDK abstracts away much of the complexity, providing a unified and accessible method for submitting and managing the running of distributed AI workloads on Kubernetes clusters. + +2. [CodeFlare Operator][codeflare_operator_gh]: Manages the platform requirements for the running of the CodeFlare components: + 1. Ensuring necessary configurations are present on cluster to enable access to Ray dashboards (includes ingress and cert management). + 2. Validate and mutate Ray cluster/AppWrapper CRUD requests (admission policy). + 3. Creates Ray related Routes/Ingresses based on if the environment is OpenShift/Kubernetes. + +3. [AppWrapper][appwrapper_gh]: AppWrapper groups multiple Kubernetes resources into a single, logical workload for easier management. It is integrated with Kueue and accepts the k8s components to be created and managed through a template in the spec which is untyped in the form of RawExtension. + +4. [KubeRay][kuberay_gh]: Operator designed specifically for managing and orchestrating Ray clusters on Kubernetes. It automates the creation and lifecycle management of Ray clusters and jobs, ensuring that they can scale and operate efficiently on K8s. KubeRay also provides a Python client library, enabling developers and data scientists to interact with Ray components directly within their applications. + +5. [Kueue][kueue_gh]: Batch workload queuing and scheduling system designed to optimize resource allocation in K8s clusters. It ensures that Ray Clusters are only scheduled when sufficient resources are available, preventing resource contention and maximizing cluster efficiency. This component plays a critical role in balancing resource availability with the demand for distributed workloads, facilitating efficient job execution. + +At a high level, the interaction between a data scientist, CodeFlare SDK and the rest of the components are explained below: + +![Diagram-1](../images/codeflare_stack_arch.png) + +The role of the SDK in the model training and tuning stack remains consistent on the client side, regardless of any changes to the components it interacts with on the cluster. + +![Diagram-2](../images/codeflare_sdk.png) + +#### Considerations while designing SDK: +1. Ease of use and integration: The SDK’s primary role is to abstract Kubernetes specifics. It should provide simple interfaces for interacting with any of the model training components on the server side. +2. Lightweight: The SDK runs client-side and should minimize resource consumption. It must prioritize responsiveness and user experience. For example, using a polling mechanism to fetch status instead of a constantly watching resources. +3. Extensibility: The SDK currently integrates well with the CodeFlare stack, which uses Ray and the TorchX (pytorch) distributed framework. In the future, components used for distributed training/tuning (as seen in figure [2]) should remain interchangeable. +4. Security: The SDK must ensure users see only the information they are authorized to view. It's the responsibility of the SDK to generate an authenticated request by attaching the right credentials/token during every API call. +5. Typed Object Creation: The client, to the extent possible should allow the creation of known, typed K8s resources. This prevents arbitrary payloads from reaching the server which could be a threat. +6. Release Versioning: The SDK adheres to the Semantic Versioning format and should follow the specifications outlined in the [SemVer standard][semver_std]. +7. Version Compatibility: The SDK must maintain compatibility between client and server versions. Backward compatibility should be ensured even if one side is upgraded. + +#### Codebase Modularization: +The CodeFlare-SDK should adopt a modular architecture to support the seamless addition and modification of components. The project structure must meet the following key requirements: + +1. Component Structure: The SDK should consist of clearly separated components to promote maintainability and scalability. For example: + - Distinct Python packages should be created for command modules, helpers, and utilities that interface with Kubernetes (OpenShift) clusters. + - Separate packages should be defined for framework-specific clusters, such as Ray. +2. Self-Contained Packages: Each Python package should function independently as a self-contained module, encapsulating dedicated functionality. This approach will simplify management, improve maintainability, and enable the SDK to evolve more efficiently. +3. Testing Framework: + - Unit tests should be integrated within each submodule, ensuring comprehensive coverage that directly correlates with the corresponding code. + - Integration and upgrade tests, which span multiple components, should be managed independently to validate both individual modules and the system’s end-to-end functionality. + +An example of a modular structure would be: +``` +codeflare_sdk/ +├── build/ +│ ├── requirements.txt # Dependencies required for building the project. +│ ├── Dockerfile # Docker configuration for containerizing the SDK. +│ └── image_stream.yaml # YAML file for building and deploying OpenShift image streams. +├── docs/ # Documentation files for guiding users and contributors. Includes design docs. +├── examples/ # Demonstrative examples showcasing SDK usage. +│ ├── guided_demos/ +│ └── additional_demos/ +├── src/ +│ └── codeflare_sdk/ +│ ├── common/ # Self-contained modules shared across SDK components. +│ │ ├── kubernetes_cluster/ # APIs for interacting with K8s clusters (e.g., authentication). +│ │ │ ├── __init__.py +│ │ │ ├── file.py +│ │ │ └── test_test.py # Unit tests contained in the same module. +│ │ ├── kueue/ # API interactions related to Kueue job queuing. +│ │ ├── widgets/ # Reusable UI components for visualization. +│ │ └── utils/ # General utility functions and helpers used across project. +│ ├── ray/ # Framework-specific APIs for Ray clusters. +│ │ ├── cluster/ # Cluster management utilities for Ray. +│ │ ├── client/ # Ray client interaction modules. +│ │ └── appwrapper/ +├── tests/ # End-to-end and upgrade testing modules. +│ ├── e2e/ +│ └── upgrade/ +├── poetry.lock # Project configuration and dependency management (Poetry). +└── pyproject.toml + +``` + +### Client side best practices: +1. The SDK should perform client-side validation of user inputs before sending them to the API server, to the extent possible. These validations should be limited to client-side checks, and the SDK is not responsible for performing validations that require server-side data. The SDK does not need to maintain the state of server-side objects. Examples of client-side validations include verifying user inputs, validating YAML schemas, and checking resource requests/limits based on available data. +2. Any call made to the server should have a client instance passed through it. By default, the SDK should use the same client for all operations unless otherwise specified. For example, the client used to list clusters should also be used to fetch cluster details. The codebase should be designed so that these operations are either methods of the same client (e.g., a k8s client, RayJob client, TrainingClient) or the client instance is passed as an argument to the methods. +3. Errors returned from the API server, to the extent possible, should be wrapped and handled explicitly. + +## Test Coverage: +The project should aim to maintain reasonable test coverage to ensure code quality. Wherever possible, new lines of code should be accompanied by corresponding test cases. Reviewers are encouraged to exercise discretion when merging pull requests (PRs) that result in a drop in test coverage percentage compared to the current state. To aid in tracking coverage, external tools like Codecov can be integrated with the repository, allowing for easier monitoring and ensuring that the codebase maintains a robust level of test coverage over time. + +## Release: +A new version of CodeFlare SDK will be released once every three weeks. +For details on the release support matrix with other CodeFlare components, refer [here][codeflare_compatibility_matrix]. +RHOAI and ODH support matrix: CodeFlare SDK APIs fall under [Tier 2][RH_customer_API_support] support on `RHOAI` platform. This implies than an API would be support in for `n-1` major versions for a minimum of 9 months. + +## Security: +Currently, users must manually manage mTLS certificates and re-authenticate Ray clients when connecting to Ray clusters, though the CodeFlare SDK offers helpers to streamline this process (as seen [here][cert_generation]). In the future, integration with a service mesh like Istio will automate mTLS setup and certificate management via a sidecar proxy, eliminating the need for manual certificate handling and client re-authentication. + +### CVE Management: +We currently use two approaches for scanning CVEs: +1. Dependabot is enabled to automatically bump dependencies with CVE fixes in the upstream repository. +2. A downstream Snyk scanner is used to identify vulnerabilities. + +Depending on the severity of the issue, fixes are prioritized, and a new release with the necessary patches is tagged within the following three-week release window. +To streamline the CVE management process, it is recommended to automate security scans across multiple branches in the upstream repositories. This ensures that the SDK version used with each Notebook release remains free from vulnerabilities throughout its support cycle. + +## Documentation and Examples: +The following are the documentation sources for users which will need to remain updated: +1. Upstream documentation - includes `README` and examples in `docs/` folder. +2. Demo notebooks - The NBs which are to be tested and updated to ensure that they are up-to-date with the most recent changes. +3. For RHOAI/ODH customers - [Official documentation][DW_RHOAI_docs] will be updated after every release. + +The API documentation for the users should be clear, up-to-date with any changes, and if possible be generated automatically using appropriate tools to ensure accuracy and consistency. + +Implementation History: +- 2024-10-07: Initial revision + +[Kubeflow]: https://www.kubeflow.org +[codeflare_sdk_gh]: https://github.com/project-codeflare/codeflare-sdk +[codeflare_operator_gh]: https://github.com/project-codeflare/codeflare-operator +[appwrapper_gh]: https://github.com/project-codeflare/appwrapper +[kuberay_gh]: https://github.com/ray-project/kuberay +[kueue_gh]: https://github.com/kubernetes-sigs/kueue +[codeflare_compatibility_matrix]: https://github.com/project-codeflare/codeflare-operator?tab=readme-ov-file#codeflare-operator +[RH_customer_API_support]: https://access.redhat.com/articles/7047935 +[DW_RHOAI_docs]: https://docs.redhat.com/en/documentation/red_hat_openshift_ai_self-managed/2-latest/html-single/working_with_distributed_workloads/index +[cert_generation]: https://github.com/project-codeflare/codeflare-sdk/blob/main/src/codeflare_sdk/common/utils/generate_cert.py +[semver_std]: https://semver.org/#semantic-versioning-specification-semver diff --git a/CodeFlareSDK_Design_Doc.md b/docs/designs/History/CodeFlareSDK_Design_Doc.md similarity index 98% rename from CodeFlareSDK_Design_Doc.md rename to docs/designs/History/CodeFlareSDK_Design_Doc.md index 0274f65d..4992406b 100644 --- a/CodeFlareSDK_Design_Doc.md +++ b/docs/designs/History/CodeFlareSDK_Design_Doc.md @@ -1,3 +1,4 @@ +> 📄 **Note**: This is an older version of the document. The latest version is available [here](../CodeFlare-SDK-design-doc). # CodeFlare SDK Design Doc ## Context and Scope diff --git a/docs/generate-documentation.md b/docs/generate-documentation.md new file mode 100644 index 00000000..75b5c7c6 --- /dev/null +++ b/docs/generate-documentation.md @@ -0,0 +1,14 @@ +# Generate CodeFlare Documentation with Sphinx +The following is a short guide on how you can use Sphinx to auto-generate code documentation. Documentation for the latest SDK release can be found [here](https://project-codeflare.github.io/codeflare-sdk/index.html). + +1. Clone the CodeFlare SDK +``` bash +git clone https://github.com/project-codeflare/codeflare-sdk.git +``` +2. [Install Sphinx](https://www.sphinx-doc.org/en/master/usage/installation.html) +3. Run the below command to generate code documentation +``` bash +sphinx-apidoc -o docs/sphinx src/codeflare_sdk "**/*test_*" --force # Generates RST files +make html -C docs/sphinx # Builds HTML files +``` +4. You can access the docs locally at `docs/sphinx/_build/html/index.html` diff --git a/docs/images/codeflare_sdk.png b/docs/images/codeflare_sdk.png new file mode 100644 index 00000000..e33638d8 Binary files /dev/null and b/docs/images/codeflare_sdk.png differ diff --git a/docs/images/codeflare_stack_arch.png b/docs/images/codeflare_stack_arch.png new file mode 100644 index 00000000..1e177fec Binary files /dev/null and b/docs/images/codeflare_stack_arch.png differ diff --git a/docs/index.html b/docs/index.html deleted file mode 100644 index bd408f76..00000000 --- a/docs/index.html +++ /dev/null @@ -1,70 +0,0 @@ - - - - - - -codeflare_sdk API documentation - - - - - - - - - - - -
- - -
- - - diff --git a/docs/job/index.html b/docs/job/index.html deleted file mode 100644 index 2360deec..00000000 --- a/docs/job/index.html +++ /dev/null @@ -1,65 +0,0 @@ - - - - - - -codeflare_sdk.job API documentation - - - - - - - - - - - -
- - -
- - - diff --git a/docs/job/jobs.html b/docs/job/jobs.html deleted file mode 100644 index 96ea4744..00000000 --- a/docs/job/jobs.html +++ /dev/null @@ -1,596 +0,0 @@ - - - - - - -codeflare_sdk.job.jobs API documentation - - - - - - - - - - - -
-
-
-

Module codeflare_sdk.job.jobs

-
-
-
- -Expand source code - -
# Copyright 2023 IBM, Red Hat
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import abc
-from typing import TYPE_CHECKING, Optional, Dict, List
-from pathlib import Path
-
-from torchx.components.dist import ddp
-from torchx.runner import get_runner
-from torchx.specs import AppHandle, parse_app_handle, AppDryRunInfo
-
-if TYPE_CHECKING:
-    from ..cluster.cluster import Cluster
-from ..cluster.cluster import get_current_namespace
-
-all_jobs: List["Job"] = []
-torchx_runner = get_runner()
-
-
-class JobDefinition(metaclass=abc.ABCMeta):
-    def _dry_run(self, cluster: "Cluster"):
-        pass
-
-    def submit(self, cluster: "Cluster"):
-        pass
-
-
-class Job(metaclass=abc.ABCMeta):
-    def status(self):
-        pass
-
-    def logs(self):
-        pass
-
-
-class DDPJobDefinition(JobDefinition):
-    def __init__(
-        self,
-        script: Optional[str] = None,
-        m: Optional[str] = None,
-        script_args: Optional[List[str]] = None,
-        name: Optional[str] = None,
-        cpu: Optional[int] = None,
-        gpu: Optional[int] = None,
-        memMB: Optional[int] = None,
-        h: Optional[str] = None,
-        j: Optional[str] = None,
-        env: Optional[Dict[str, str]] = None,
-        max_retries: int = 0,
-        mounts: Optional[List[str]] = None,
-        rdzv_port: int = 29500,
-        rdzv_backend: str = None,
-        scheduler_args: Optional[Dict[str, str]] = None,
-        image: Optional[str] = None,
-        workspace: Optional[str] = f"file://{Path.cwd()}",
-    ):
-        if bool(script) == bool(m):  # logical XOR
-            raise ValueError(
-                "Exactly one of the following arguments must be defined: [script, m]."
-            )
-        self.script = script
-        self.m = m
-        self.script_args: List[str] = script_args if script_args is not None else []
-        self.name = name
-        self.cpu = cpu
-        self.gpu = gpu
-        self.memMB = memMB
-        self.h = h
-        self.j = j
-        self.env: Dict[str, str] = env if env is not None else dict()
-        self.max_retries = max_retries
-        self.mounts: List[str] = mounts if mounts is not None else []
-        self.rdzv_port = rdzv_port
-        self.rdzv_backend = rdzv_backend
-        self.scheduler_args: Dict[str, str] = (
-            scheduler_args if scheduler_args is not None else dict()
-        )
-        self.image = image
-        self.workspace = workspace
-
-    def _dry_run(self, cluster: "Cluster"):
-        j = f"{cluster.config.num_workers}x{max(cluster.config.num_gpus, 1)}"  # # of proc. = # of gpus
-        return torchx_runner.dryrun(
-            app=ddp(
-                *self.script_args,
-                script=self.script,
-                m=self.m,
-                name=self.name,
-                h=self.h,
-                cpu=self.cpu if self.cpu is not None else cluster.config.max_cpus,
-                gpu=self.gpu if self.gpu is not None else cluster.config.num_gpus,
-                memMB=self.memMB
-                if self.memMB is not None
-                else cluster.config.max_memory * 1024,
-                j=self.j if self.j is not None else j,
-                env=self.env,
-                max_retries=self.max_retries,
-                rdzv_port=self.rdzv_port,
-                rdzv_backend=self.rdzv_backend
-                if self.rdzv_backend is not None
-                else "static",
-                mounts=self.mounts,
-            ),
-            scheduler=cluster.torchx_scheduler,
-            cfg=cluster.torchx_config(**self.scheduler_args),
-            workspace=self.workspace,
-        )
-
-    def _missing_spec(self, spec: str):
-        raise ValueError(f"Job definition missing arg: {spec}")
-
-    def _dry_run_no_cluster(self):
-        if self.scheduler_args is not None:
-            if self.scheduler_args.get("namespace") is None:
-                self.scheduler_args["namespace"] = get_current_namespace()
-        return torchx_runner.dryrun(
-            app=ddp(
-                *self.script_args,
-                script=self.script,
-                m=self.m,
-                name=self.name if self.name is not None else self._missing_spec("name"),
-                h=self.h,
-                cpu=self.cpu
-                if self.cpu is not None
-                else self._missing_spec("cpu (# cpus per worker)"),
-                gpu=self.gpu
-                if self.gpu is not None
-                else self._missing_spec("gpu (# gpus per worker)"),
-                memMB=self.memMB
-                if self.memMB is not None
-                else self._missing_spec("memMB (memory in MB)"),
-                j=self.j
-                if self.j is not None
-                else self._missing_spec(
-                    "j (`workers`x`procs`)"
-                ),  # # of proc. = # of gpus,
-                env=self.env,  # should this still exist?
-                max_retries=self.max_retries,
-                rdzv_port=self.rdzv_port,  # should this still exist?
-                rdzv_backend=self.rdzv_backend
-                if self.rdzv_backend is not None
-                else "c10d",
-                mounts=self.mounts,
-                image=self.image
-                if self.image is not None
-                else self._missing_spec("image"),
-            ),
-            scheduler="kubernetes_mcad",
-            cfg=self.scheduler_args,
-            workspace="",
-        )
-
-    def submit(self, cluster: "Cluster" = None) -> "Job":
-        return DDPJob(self, cluster)
-
-
-class DDPJob(Job):
-    def __init__(self, job_definition: "DDPJobDefinition", cluster: "Cluster" = None):
-        self.job_definition = job_definition
-        self.cluster = cluster
-        if self.cluster:
-            self._app_handle = torchx_runner.schedule(job_definition._dry_run(cluster))
-        else:
-            self._app_handle = torchx_runner.schedule(
-                job_definition._dry_run_no_cluster()
-            )
-        all_jobs.append(self)
-
-    def status(self) -> str:
-        return torchx_runner.status(self._app_handle)
-
-    def logs(self) -> str:
-        return "".join(torchx_runner.log_lines(self._app_handle, None))
-
-    def cancel(self):
-        torchx_runner.cancel(self._app_handle)
-
-
-
-
-
-
-
-
-
-

Classes

-
-
-class DDPJob -(job_definition: DDPJobDefinition, cluster: Cluster = None) -
-
-
-
- -Expand source code - -
class DDPJob(Job):
-    def __init__(self, job_definition: "DDPJobDefinition", cluster: "Cluster" = None):
-        self.job_definition = job_definition
-        self.cluster = cluster
-        if self.cluster:
-            self._app_handle = torchx_runner.schedule(job_definition._dry_run(cluster))
-        else:
-            self._app_handle = torchx_runner.schedule(
-                job_definition._dry_run_no_cluster()
-            )
-        all_jobs.append(self)
-
-    def status(self) -> str:
-        return torchx_runner.status(self._app_handle)
-
-    def logs(self) -> str:
-        return "".join(torchx_runner.log_lines(self._app_handle, None))
-
-    def cancel(self):
-        torchx_runner.cancel(self._app_handle)
-
-

Ancestors

- -

Methods

-
-
-def cancel(self) -
-
-
-
- -Expand source code - -
def cancel(self):
-    torchx_runner.cancel(self._app_handle)
-
-
-
-def logs(self) ‑> str -
-
-
-
- -Expand source code - -
def logs(self) -> str:
-    return "".join(torchx_runner.log_lines(self._app_handle, None))
-
-
-
-def status(self) ‑> str -
-
-
-
- -Expand source code - -
def status(self) -> str:
-    return torchx_runner.status(self._app_handle)
-
-
-
-
-
-class DDPJobDefinition -(script: Optional[str] = None, m: Optional[str] = None, script_args: Optional[List[str]] = None, name: Optional[str] = None, cpu: Optional[int] = None, gpu: Optional[int] = None, memMB: Optional[int] = None, h: Optional[str] = None, j: Optional[str] = None, env: Optional[Dict[str, str]] = None, max_retries: int = 0, mounts: Optional[List[str]] = None, rdzv_port: int = 29500, rdzv_backend: str = None, scheduler_args: Optional[Dict[str, str]] = None, image: Optional[str] = None, workspace: Optional[str] = 'file:///home/runner/work/codeflare-sdk/codeflare-sdk') -
-
-
-
- -Expand source code - -
class DDPJobDefinition(JobDefinition):
-    def __init__(
-        self,
-        script: Optional[str] = None,
-        m: Optional[str] = None,
-        script_args: Optional[List[str]] = None,
-        name: Optional[str] = None,
-        cpu: Optional[int] = None,
-        gpu: Optional[int] = None,
-        memMB: Optional[int] = None,
-        h: Optional[str] = None,
-        j: Optional[str] = None,
-        env: Optional[Dict[str, str]] = None,
-        max_retries: int = 0,
-        mounts: Optional[List[str]] = None,
-        rdzv_port: int = 29500,
-        rdzv_backend: str = None,
-        scheduler_args: Optional[Dict[str, str]] = None,
-        image: Optional[str] = None,
-        workspace: Optional[str] = f"file://{Path.cwd()}",
-    ):
-        if bool(script) == bool(m):  # logical XOR
-            raise ValueError(
-                "Exactly one of the following arguments must be defined: [script, m]."
-            )
-        self.script = script
-        self.m = m
-        self.script_args: List[str] = script_args if script_args is not None else []
-        self.name = name
-        self.cpu = cpu
-        self.gpu = gpu
-        self.memMB = memMB
-        self.h = h
-        self.j = j
-        self.env: Dict[str, str] = env if env is not None else dict()
-        self.max_retries = max_retries
-        self.mounts: List[str] = mounts if mounts is not None else []
-        self.rdzv_port = rdzv_port
-        self.rdzv_backend = rdzv_backend
-        self.scheduler_args: Dict[str, str] = (
-            scheduler_args if scheduler_args is not None else dict()
-        )
-        self.image = image
-        self.workspace = workspace
-
-    def _dry_run(self, cluster: "Cluster"):
-        j = f"{cluster.config.num_workers}x{max(cluster.config.num_gpus, 1)}"  # # of proc. = # of gpus
-        return torchx_runner.dryrun(
-            app=ddp(
-                *self.script_args,
-                script=self.script,
-                m=self.m,
-                name=self.name,
-                h=self.h,
-                cpu=self.cpu if self.cpu is not None else cluster.config.max_cpus,
-                gpu=self.gpu if self.gpu is not None else cluster.config.num_gpus,
-                memMB=self.memMB
-                if self.memMB is not None
-                else cluster.config.max_memory * 1024,
-                j=self.j if self.j is not None else j,
-                env=self.env,
-                max_retries=self.max_retries,
-                rdzv_port=self.rdzv_port,
-                rdzv_backend=self.rdzv_backend
-                if self.rdzv_backend is not None
-                else "static",
-                mounts=self.mounts,
-            ),
-            scheduler=cluster.torchx_scheduler,
-            cfg=cluster.torchx_config(**self.scheduler_args),
-            workspace=self.workspace,
-        )
-
-    def _missing_spec(self, spec: str):
-        raise ValueError(f"Job definition missing arg: {spec}")
-
-    def _dry_run_no_cluster(self):
-        if self.scheduler_args is not None:
-            if self.scheduler_args.get("namespace") is None:
-                self.scheduler_args["namespace"] = get_current_namespace()
-        return torchx_runner.dryrun(
-            app=ddp(
-                *self.script_args,
-                script=self.script,
-                m=self.m,
-                name=self.name if self.name is not None else self._missing_spec("name"),
-                h=self.h,
-                cpu=self.cpu
-                if self.cpu is not None
-                else self._missing_spec("cpu (# cpus per worker)"),
-                gpu=self.gpu
-                if self.gpu is not None
-                else self._missing_spec("gpu (# gpus per worker)"),
-                memMB=self.memMB
-                if self.memMB is not None
-                else self._missing_spec("memMB (memory in MB)"),
-                j=self.j
-                if self.j is not None
-                else self._missing_spec(
-                    "j (`workers`x`procs`)"
-                ),  # # of proc. = # of gpus,
-                env=self.env,  # should this still exist?
-                max_retries=self.max_retries,
-                rdzv_port=self.rdzv_port,  # should this still exist?
-                rdzv_backend=self.rdzv_backend
-                if self.rdzv_backend is not None
-                else "c10d",
-                mounts=self.mounts,
-                image=self.image
-                if self.image is not None
-                else self._missing_spec("image"),
-            ),
-            scheduler="kubernetes_mcad",
-            cfg=self.scheduler_args,
-            workspace="",
-        )
-
-    def submit(self, cluster: "Cluster" = None) -> "Job":
-        return DDPJob(self, cluster)
-
-

Ancestors

- -

Methods

-
-
-def submit(self, cluster: Cluster = None) ‑> Job -
-
-
-
- -Expand source code - -
def submit(self, cluster: "Cluster" = None) -> "Job":
-    return DDPJob(self, cluster)
-
-
-
-
-
-class Job -
-
-
-
- -Expand source code - -
class Job(metaclass=abc.ABCMeta):
-    def status(self):
-        pass
-
-    def logs(self):
-        pass
-
-

Subclasses

- -

Methods

-
-
-def logs(self) -
-
-
-
- -Expand source code - -
def logs(self):
-    pass
-
-
-
-def status(self) -
-
-
-
- -Expand source code - -
def status(self):
-    pass
-
-
-
-
-
-class JobDefinition -
-
-
-
- -Expand source code - -
class JobDefinition(metaclass=abc.ABCMeta):
-    def _dry_run(self, cluster: "Cluster"):
-        pass
-
-    def submit(self, cluster: "Cluster"):
-        pass
-
-

Subclasses

- -

Methods

-
-
-def submit(self, cluster: Cluster) -
-
-
-
- -Expand source code - -
def submit(self, cluster: "Cluster"):
-    pass
-
-
-
-
-
-
-
- -
- - - diff --git a/docs/sphinx/Makefile b/docs/sphinx/Makefile new file mode 100644 index 00000000..d4bb2cbb --- /dev/null +++ b/docs/sphinx/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py new file mode 100644 index 00000000..75f6f16f --- /dev/null +++ b/docs/sphinx/conf.py @@ -0,0 +1,38 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +import os +import sys + +sys.path.insert(0, os.path.abspath("..")) + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = "CodeFlare SDK" +copyright = "2024, Project CodeFlare" +author = "Project CodeFlare" +release = "v0.21.1" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.todo", + "sphinx.ext.viewcode", + "sphinx.ext.autosummary", + "sphinx_rtd_theme", +] + +templates_path = ["_templates"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = "sphinx_rtd_theme" +html_static_path = ["_static"] diff --git a/docs/sphinx/index.rst b/docs/sphinx/index.rst new file mode 100644 index 00000000..3c6fe876 --- /dev/null +++ b/docs/sphinx/index.rst @@ -0,0 +1,34 @@ +.. CodeFlare SDK documentation master file, created by + sphinx-quickstart on Thu Oct 10 11:27:58 2024. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +CodeFlare SDK documentation +=========================== + +The CodeFlare SDK is an intuitive, easy-to-use python interface for batch resource requesting, access, job submission, and observation. Simplifying the developer's life while enabling access to high-performance compute resources, either in the cloud or on-prem. + + +.. toctree:: + :maxdepth: 2 + :caption: Code Documentation: + + modules + +.. toctree:: + :maxdepth: 1 + :caption: User Documentation: + + user-docs/authentication + user-docs/cluster-configuration + user-docs/ray-cluster-interaction + user-docs/e2e + user-docs/s3-compatible-storage + user-docs/setup-kueue + user-docs/ui-widgets + +Quick Links +=========== +- `PyPi `__ +- `GitHub `__ +- `OpenShift AI Documentation `__ diff --git a/docs/sphinx/make.bat b/docs/sphinx/make.bat new file mode 100644 index 00000000..32bb2452 --- /dev/null +++ b/docs/sphinx/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/sphinx/user-docs/authentication.rst b/docs/sphinx/user-docs/authentication.rst new file mode 100644 index 00000000..82441d56 --- /dev/null +++ b/docs/sphinx/user-docs/authentication.rst @@ -0,0 +1,66 @@ +Authentication via the CodeFlare SDK +==================================== + +Currently there are four ways of authenticating to your cluster via the +SDK. Authenticating with your cluster allows you to perform actions such +as creating Ray Clusters and Job Submission. + +Method 1 Token Authentication +----------------------------- + +This is how a typical user would authenticate to their cluster using +``TokenAuthentication``. + +:: + + from codeflare_sdk import TokenAuthentication + + auth = TokenAuthentication( + token = "XXXXX", + server = "XXXXX", + skip_tls=False, + # ca_cert_path="/path/to/cert" + ) + auth.login() + # log out with auth.logout() + +Setting ``skip_tls=True`` allows interaction with an HTTPS server +bypassing the server certificate checks although this is not secure. You +can pass a custom certificate to ``TokenAuthentication`` by using +``ca_cert_path="/path/to/cert"`` when authenticating provided +``skip_tls=False``. Alternatively you can set the environment variable +``CF_SDK_CA_CERT_PATH`` to the path of your custom certificate. + +Method 2 Kubernetes Config File Authentication (Default location) +----------------------------------------------------------------- + +If a user has authenticated to their cluster by alternate means e.g. run +a login command like ``oc login --token= --server=`` +their kubernetes config file should have updated. If the user has not +specifically authenticated through the SDK by other means such as +``TokenAuthentication`` then the SDK will try to use their default +Kubernetes config file located at ``"$HOME/.kube/config"``. + +Method 3 Specifying a Kubernetes Config File +-------------------------------------------- + +A user can specify a config file via a different authentication class +``KubeConfigFileAuthentication`` for authenticating with the SDK. This +is what loading a custom config file would typically look like. + +:: + + from codeflare_sdk import KubeConfigFileAuthentication + + auth = KubeConfigFileAuthentication( + kube_config_path="/path/to/config", + ) + auth.load_kube_config() + # log out with auth.logout() + +Method 4 In-Cluster Authentication +---------------------------------- + +If a user does not authenticate by any of the means detailed above and +does not have a config file at ``"$HOME/.kube/config"`` the SDK will try +to authenticate with the in-cluster configuration file. diff --git a/docs/sphinx/user-docs/cluster-configuration.rst b/docs/sphinx/user-docs/cluster-configuration.rst new file mode 100644 index 00000000..f8212823 --- /dev/null +++ b/docs/sphinx/user-docs/cluster-configuration.rst @@ -0,0 +1,157 @@ +Ray Cluster Configuration +========================= + +To create Ray Clusters using the CodeFlare SDK a cluster configuration +needs to be created first. This is what a typical cluster configuration +would look like; Note: The values for CPU and Memory are at the minimum +requirements for creating the Ray Cluster. + +.. code:: python + + from codeflare_sdk import Cluster, ClusterConfiguration + + cluster = Cluster(ClusterConfiguration( + name='ray-example', # Mandatory Field + namespace='default', # Default None + head_cpu_requests=1, # Default 2 + head_cpu_limits=1, # Default 2 + head_memory_requests=1, # Default 8 + head_memory_limits=1, # Default 8 + head_extended_resource_requests={'nvidia.com/gpu':0}, # Default 0 + worker_extended_resource_requests={'nvidia.com/gpu':0}, # Default 0 + num_workers=1, # Default 1 + worker_cpu_requests=1, # Default 1 + worker_cpu_limits=1, # Default 1 + worker_memory_requests=2, # Default 2 + worker_memory_limits=2, # Default 2 + # image="", # Optional Field + labels={"exampleLabel": "example", "secondLabel": "example"}, + annotations={"key1":"value1", "key2":"value2"}, + volumes=[], # See Custom Volumes/Volume Mounts + volume_mounts=[], # See Custom Volumes/Volume Mounts + )) + +.. note:: + The default images used by the CodeFlare SDK for creating + a RayCluster resource depend on the installed Python version: + + - For Python 3.11: `quay.io/modh/ray:2.47.1-py311-cu121` + + If you prefer to use a custom Ray image that better suits your + needs, you can specify it in the image field to override the default. + If you are using ROCm compatible GPUs you + can use `quay.io/modh/ray:2.47.1-py311-rocm62`. You can also find + documentation on building a custom image + `here `__. + +Ray Usage Statistics +------------------- + +By default, Ray usage statistics collection is **disabled** in Ray Clusters created with the Codeflare SDK. This prevents statistics from being captured and sent externally. If you want to enable usage statistics collection, you can simply set the ``enable_usage_stats`` parameter to ``True`` in your cluster configuration: + +.. code:: python + + from codeflare_sdk import Cluster, ClusterConfiguration + + cluster = Cluster(ClusterConfiguration( + name='ray-example', + namespace='default', + enable_usage_stats=True + )) + +This will automatically set the ``RAY_USAGE_STATS_ENABLED`` environment variable to ``1`` for all Ray pods in the cluster. If you do not set this parameter, usage statistics will remain disabled (``RAY_USAGE_STATS_ENABLED=0``). + +The ``labels={"exampleLabel": "example"}`` parameter can be used to +apply additional labels to the RayCluster resource. + +After creating their ``cluster``, a user can call ``cluster.up()`` and +``cluster.down()`` to respectively create or remove the Ray Cluster. + +Custom Volumes/Volume Mounts +---------------------------- +| To add custom Volumes and Volume Mounts to your Ray Cluster you need to create two lists ``volumes`` and ``volume_mounts``. The lists consist of ``V1Volume`` and ``V1VolumeMount`` objects respectively. +| Populating these parameters will create Volumes and Volume Mounts for the head and each worker pod. + +.. code:: python + + from kubernetes.client import V1Volume, V1VolumeMount, V1EmptyDirVolumeSource, V1ConfigMapVolumeSource, V1KeyToPath, V1SecretVolumeSource + # In this example we are using the Config Map, EmptyDir and Secret Volume types + volume_mounts_list = [ + V1VolumeMount( + mount_path="/home/ray/test1", + name = "test" + ), + V1VolumeMount( + mount_path = "/home/ray/test2", + name = "test2", + ), + V1VolumeMount( + mount_path = "/home/ray/test3", + name = "test3", + ) + ] + + volumes_list = [ + V1Volume( + name="test", + empty_dir=V1EmptyDirVolumeSource(size_limit="2Gi"), + ), + V1Volume( + name="test2", + config_map=V1ConfigMapVolumeSource( + name="test-config-map", + items=[V1KeyToPath(key="test", path="data.txt")] + ) + ), + V1Volume( + name="test3", + secret=V1SecretVolumeSource( + secret_name="test-secret" + ) + ) + ] + +| For more information on creating Volumes and Volume Mounts with Python check out the Python Kubernetes docs (`Volumes `__, `Volume Mounts `__). +| You can also find further information on Volumes and Volume Mounts by visiting the Kubernetes `documentation `__. + +GCS Fault Tolerance +------------------ +By default, the state of the Ray cluster is transient to the head Pod. Whatever triggers a restart of the head Pod results in losing that state, including Ray Cluster history. To make Ray cluster state persistent you can enable Global Control Service (GCS) fault tolerance with an external Redis storage. + +To configure GCS fault tolerance you need to set the following parameters: + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Parameter + - Description + * - ``enable_gcs_ft`` + - Boolean to enable GCS fault tolerance + * - ``redis_address`` + - Address of the external Redis service, ex: "redis:6379" + * - ``redis_password_secret`` + - Dictionary with 'name' and 'key' fields specifying the Kubernetes secret for Redis password + * - ``external_storage_namespace`` + - Custom storage namespace for GCS fault tolerance (by default, KubeRay sets it to the RayCluster's UID) + +Example configuration: + +.. code:: python + + from codeflare_sdk import Cluster, ClusterConfiguration + + cluster = Cluster(ClusterConfiguration( + name='ray-cluster-with-persistence', + num_workers=2, + enable_gcs_ft=True, + redis_address="redis:6379", + redis_password_secret={ + "name": "redis-password-secret", + "key": "password" + }, + # external_storage_namespace="my-custom-namespace" # Optional: Custom namespace for GCS data in Redis + )) + +.. note:: + You need to have a Redis instance deployed in your Kubernetes cluster before using this feature. diff --git a/docs/sphinx/user-docs/e2e.rst b/docs/sphinx/user-docs/e2e.rst new file mode 100644 index 00000000..6f3d1462 --- /dev/null +++ b/docs/sphinx/user-docs/e2e.rst @@ -0,0 +1,211 @@ +Running e2e tests locally +========================= + +Pre-requisites +^^^^^^^^^^^^^^ + +- We recommend using Python 3.11, along with Poetry. + +On KinD clusters +---------------- + +Pre-requisite for KinD clusters: please add in your local ``/etc/hosts`` +file ``127.0.0.1 kind``. This will map your localhost IP address to the +KinD cluster's hostname. This is already performed on `GitHub +Actions `__ + +If the system you run on contains NVidia GPU then you can enable the GPU +support in KinD, this will allow you to run also GPU tests. To enable +GPU on KinD follow `these +instructions `__. + +- Setup Phase: + + - Pull the `codeflare-operator + repo `__ + and run the following make targets: + + :: + + make kind-e2e + export CLUSTER_HOSTNAME=kind + make setup-e2e + make deploy -e IMG=quay.io/project-codeflare/codeflare-operator:v1.3.0 + + For running tests locally on Kind cluster, we need to disable `rayDashboardOAuthEnabled` in `codeflare-operator-config` ConfigMap and then restart CodeFlare Operator + + - **(Optional)** - Create and add ``sdk-user`` with limited + permissions to the cluster to run through the e2e tests: + + :: + + # Get KinD certificates + docker cp kind-control-plane:/etc/kubernetes/pki/ca.crt . + docker cp kind-control-plane:/etc/kubernetes/pki/ca.key . + + # Generate certificates for new user + openssl genrsa -out user.key 2048 + openssl req -new -key user.key -out user.csr -subj '/CN=sdk-user/O=tenant' + openssl x509 -req -in user.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out user.crt -days 360 + + # Add generated certificated to KinD context + user_crt=$(base64 --wrap=0 user.crt) + user_key=$(base64 --wrap=0 user.key) + yq eval -i ".contexts += {\"context\": {\"cluster\": \"kind-kind\", \"user\": \"sdk-user\"}, \"name\": \"sdk-user\"}" $HOME/.kube/config + yq eval -i ".users += {\"name\": \"sdk-user\", \"user\": {\"client-certificate-data\": \"$user_crt\", \"client-key-data\": \"$user_key\"}}" $HOME/.kube/config + cat $HOME/.kube/config + + # Cleanup + rm ca.crt + rm ca.srl + rm ca.key + rm user.crt + rm user.key + rm user.csr + + # Add RBAC permissions to sdk-user + kubectl create clusterrole list-ingresses --verb=get,list --resource=ingresses + kubectl create clusterrolebinding sdk-user-list-ingresses --clusterrole=list-ingresses --user=sdk-user + kubectl create clusterrole appwrapper-creator --verb=get,list,create,delete,patch --resource=appwrappers + kubectl create clusterrolebinding sdk-user-appwrapper-creator --clusterrole=appwrapper-creator --user=sdk-user + kubectl create clusterrole namespace-creator --verb=get,list,create,delete,patch --resource=namespaces + kubectl create clusterrolebinding sdk-user-namespace-creator --clusterrole=namespace-creator --user=sdk-user + kubectl create clusterrole list-rayclusters --verb=get,list --resource=rayclusters + kubectl create clusterrolebinding sdk-user-list-rayclusters --clusterrole=list-rayclusters --user=sdk-user + kubectl config use-context sdk-user + + - Install the latest development version of kueue + + :: + + kubectl apply --server-side -k "github.com/opendatahub-io/kueue/config/rhoai?ref=dev" + +- Test Phase: + + - Once we have the codeflare-operator, kuberay-operator and kueue + running and ready, we can run the e2e test on the codeflare-sdk + repository: + + :: + + poetry install --with test,docs + poetry run pytest -v -s ./tests/e2e/mnist_raycluster_sdk_kind_test.py + + - If the cluster doesn't have NVidia GPU support then we need to + disable NVidia GPU tests by providing proper marker: + + :: + + poetry install --with test,docs + poetry run pytest -v -s ./tests/e2e/mnist_raycluster_sdk_kind_test.py -m 'kind and not nvidia_gpu' + +On OpenShift clusters +--------------------- + +- Setup Phase: + + - Pull the `codeflare-operator + repo `__ + and run the following make targets: + + :: + + + make setup-e2e + make deploy -e IMG=quay.io/project-codeflare/codeflare-operator:v1.3.0 + + - Install the latest development version of kueue + + :: + + kubectl apply --server-side -k "github.com/opendatahub-io/kueue/config/rhoai?ref=dev" + +If the system you run on contains NVidia GPU then you can enable the GPU +support on OpenShift, this will allow you to run also GPU tests. To +enable GPU on OpenShift follow `these +instructions `__. +Currently the SDK doesn't support tolerations, so e2e tests can't be +executed on nodes with taint (i.e. GPU taint). + +- Test Phase: + + - Once we have the codeflare-operator, kuberay-operator and kueue + running and ready, we can run the e2e test on the codeflare-sdk + repository: + + :: + + poetry install --with test,docs + poetry run pytest -v -s ./tests/e2e/mnist_raycluster_sdk_test.py + + - To run the multiple tests based on the cluster environment, we can + run the e2e tests by marking -m with cluster environment (kind or + openshift) + + :: + + poetry run pytest -v -s ./tests/e2e -m openshift + + - By default tests configured with timeout of ``15 minutes``. If + necessary, we can override the timeout using ``--timeout`` option + + :: + + poetry run pytest -v -s ./tests/e2e -m openshift --timeout=1200 + +On OpenShift Disconnected clusters +---------------------------------- + +- In addition to setup phase mentioned above in case of Openshift + cluster, Disconnected environment requires following pre-requisites : + + - Mirror Image registry : + + - Image mirror registry is used to host set of container images + required locally for the applications and services. This + ensures to pull images without needing an external network + connection. It also ensures continuous operation and deployment + capabilities in a network-isolated environment. + + - PYPI Mirror Index : + + - When trying to install Python packages in a disconnected + environment, the pip command might fail because the connection + cannot install packages from external URLs. This issue can be + resolved by setting up PIP Mirror Index on separate endpoint in + same environment. + + - S3 compatible storage : + + - Some of our distributed training examples require an external + storage solution so that all nodes can access the same data in + disconnected environment (For example: common-datasets and + model files). + + - Minio S3 compatible storage type instance can be deployed in + disconnected environment using + ``/tests/e2e/minio_deployment.yaml`` or using support methods + in e2e test suite. + + - The following are environment variables for configuring PIP + index URl for accessing the common-python packages required and + the S3 or Minio storage for your Ray Train script or + interactive session. + + :: + + export RAY_IMAGE=quay.io/project-codeflare/ray@sha256: (prefer image digest over image tag in disocnnected environment) + PIP_INDEX_URL=https:///root/pypi/+simple/ \ + PIP_TRUSTED_HOST= \ + AWS_DEFAULT_ENDPOINT= \ + AWS_ACCESS_KEY_ID= \ + AWS_SECRET_ACCESS_KEY= \ + AWS_STORAGE_BUCKET= + AWS_STORAGE_BUCKET_MNIST_DIR= + + .. note:: + When using the Python Minio client to connect to a minio + storage bucket, the ``AWS_DEFAULT_ENDPOINT`` environment + variable by default expects secure endpoint where user can use + endpoint url with https/http prefix for autodetection of + secure/insecure endpoint. diff --git a/docs/sphinx/user-docs/images/ui-buttons.png b/docs/sphinx/user-docs/images/ui-buttons.png new file mode 100644 index 00000000..a2749292 Binary files /dev/null and b/docs/sphinx/user-docs/images/ui-buttons.png differ diff --git a/docs/sphinx/user-docs/images/ui-view-clusters.png b/docs/sphinx/user-docs/images/ui-view-clusters.png new file mode 100644 index 00000000..f0b12338 Binary files /dev/null and b/docs/sphinx/user-docs/images/ui-view-clusters.png differ diff --git a/docs/sphinx/user-docs/ray-cluster-interaction.rst b/docs/sphinx/user-docs/ray-cluster-interaction.rst new file mode 100644 index 00000000..717f8067 --- /dev/null +++ b/docs/sphinx/user-docs/ray-cluster-interaction.rst @@ -0,0 +1,96 @@ +Ray Cluster Interaction +======================= + +The CodeFlare SDK offers multiple ways to interact with Ray Clusters +including the below methods. + +get_cluster() +------------- + +The ``get_cluster()`` function is used to initialise a ``Cluster`` +object from a pre-existing Ray Cluster/AppWrapper. Below is an example +of it's usage: + +:: + + from codeflare_sdk import get_cluster + cluster = get_cluster(cluster_name="raytest", namespace="example", is_appwrapper=False, write_to_file=False) + -> output: Yaml resources loaded for raytest + cluster.status() + -> output: + 🚀 CodeFlare Cluster Status 🚀 + ╭─────────────────────────────────────────────────────────────────╮ + │ Name │ + │ raytest Active ✅ │ + │ │ + │ URI: ray://raytest-head-svc.example.svc:10001 │ + │ │ + │ Dashboard🔗 │ + │ │ + ╰─────────────────────────────────────────────────────────────────╯ + (, True) + cluster.down() + cluster.up() # This function will create an exact copy of the retrieved Ray Cluster only if the Ray Cluster has been previously deleted. + +| These are the parameters the ``get_cluster()`` function accepts: +| ``cluster_name: str # Required`` -> The name of the Ray Cluster. +| ``namespace: str # Default: "default"`` -> The namespace of the Ray Cluster. +| ``is_appwrapper: bool # Default: False`` -> When set to +| ``True`` the function will attempt to retrieve an AppWrapper instead of a Ray Cluster. +| ``write_to_file: bool # Default: False`` -> When set to ``True`` the Ray Cluster/AppWrapper will be written to a file similar to how it is done in ``ClusterConfiguration``. + +list_all_queued() +----------------- + +| The ``list_all_queued()`` function returns (and prints by default) a list of all currently queued-up Ray Clusters in a given namespace. +| It accepts the following parameters: +| ``namespace: str # Required`` -> The namespace you want to retrieve the list from. +| ``print_to_console: bool # Default: True`` -> Allows the user to print the list to their console. +| ``appwrapper: bool # Default: False`` -> When set to ``True`` allows the user to list queued AppWrappers. + +list_all_clusters() +------------------- + +| The ``list_all_clusters()`` function will return a list of detailed descriptions of Ray Clusters to the console by default. +| It accepts the following parameters: +| ``namespace: str # Required`` -> The namespace you want to retrieve the list from. +| ``print_to_console: bool # Default: True`` -> A boolean that allows the user to print the list to their console. + +.. note:: + + The following methods require a ``Cluster`` object to be + initialized. See :doc:`./cluster-configuration` + +cluster.up() +------------ + +| The ``cluster.up()`` function creates a Ray Cluster in the given namespace. + +cluster.apply() +------------ + +| The ``cluster.apply()`` function applies a Ray Cluster in the given namespace. If the cluster already exists, it is updated. +| If it does not exist it is created. + +cluster.down() +-------------- + +| The ``cluster.down()`` function deletes the Ray Cluster in the given namespace. + +cluster.status() +---------------- + +| The ``cluster.status()`` function prints out the status of the Ray Cluster's state with a link to the Ray Dashboard. + +cluster.details() +----------------- + +| The ``cluster.details()`` function prints out a detailed description of the Ray Cluster's status, worker resources and a link to the Ray Dashboard. + +cluster.wait_ready() +-------------------- + +| The ``cluster.wait_ready()`` function waits for the requested cluster to be ready, up to an optional timeout and checks every 5 seconds. +| It accepts the following parameters: +| ``timeout: Optional[int] # Default: None`` -> Allows the user to define a timeout for the ``wait_ready()`` function. +| ``dashboard_check: bool # Default: True`` -> If enabled the ``wait_ready()`` function will wait until the Ray Dashboard is ready too. diff --git a/docs/sphinx/user-docs/s3-compatible-storage.rst b/docs/sphinx/user-docs/s3-compatible-storage.rst new file mode 100644 index 00000000..0ca2cc0d --- /dev/null +++ b/docs/sphinx/user-docs/s3-compatible-storage.rst @@ -0,0 +1,86 @@ +S3 compatible storage with Ray Train examples +============================================= + +Some of our distributed training examples require an external storage +solution so that all nodes can access the same data. The following are +examples for configuring S3 or Minio storage for your Ray Train script +or interactive session. + +S3 Bucket +--------- + +In your Python Script add the following environment variables: + +.. code:: python + + os.environ["AWS_ACCESS_KEY_ID"] = "XXXXXXXX" + os.environ["AWS_SECRET_ACCESS_KEY"] = "XXXXXXXX" + os.environ["AWS_DEFAULT_REGION"] = "XXXXXXXX" + +Alternatively you can specify these variables in your runtime +environment on Job Submission. + +.. code:: python + + submission_id = client.submit_job( + entrypoint=..., + runtime_env={ + "env_vars": { + "AWS_ACCESS_KEY_ID": os.environ.get('AWS_ACCESS_KEY_ID'), + "AWS_SECRET_ACCESS_KEY": os.environ.get('AWS_SECRET_ACCESS_KEY'), + "AWS_DEFAULT_REGION": os.environ.get('AWS_DEFAULT_REGION') + }, + } + ) + +In your Trainer configuration you can specify a ``run_config`` which +will utilise your external storage. + +.. code:: python + + trainer = TorchTrainer( + train_func_distributed, + scaling_config=scaling_config, + run_config = ray.train.RunConfig(storage_path="s3://BUCKET_NAME/SUB_PATH/", name="unique_run_name") + ) + +To learn more about Amazon S3 Storage you can find information +`here `__. + +Minio Bucket +------------ + +In your Python Script add the following function for configuring your +run_config: + +.. code:: python + + import s3fs + import pyarrow + + def get_minio_run_config(): + s3_fs = s3fs.S3FileSystem( + key = os.getenv('MINIO_ACCESS_KEY', "XXXXX"), + secret = os.getenv('MINIO_SECRET_ACCESS_KEY', "XXXXX"), + endpoint_url = os.getenv('MINIO_URL', "XXXXX") + ) + custom_fs = pyarrow.fs.PyFileSystem(pyarrow.fs.FSSpecHandler(s3_fs)) + run_config = ray.train.RunConfig(storage_path='training', storage_filesystem=custom_fs) + return run_config + +You can update the ``run_config`` to further suit your needs above. +Lastly the new ``run_config`` must be added to the Trainer: + +.. code:: python + + trainer = TorchTrainer( + train_func_distributed, + scaling_config=scaling_config, + run_config = get_minio_run_config() + ) + +To find more information on creating a Minio Bucket compatible with +RHOAI you can refer to this +`documentation `__. +Note: You must have ``s3fs`` and ``pyarrow`` installed in your +environment for this method. diff --git a/docs/sphinx/user-docs/setup-kueue.rst b/docs/sphinx/user-docs/setup-kueue.rst new file mode 100644 index 00000000..1f2bdc04 --- /dev/null +++ b/docs/sphinx/user-docs/setup-kueue.rst @@ -0,0 +1,108 @@ +Basic Kueue Resources configuration +=================================== + +Introduction: +------------- + +This document is designed for administrators who have Kueue installed on +their cluster. We will walk through the process of setting up essential +Kueue resources, namely Cluster Queue, Resource Flavor, and Local Queue. + +1. Resource Flavor: +------------------- + +Resource Flavors allow the cluster admin to reflect differing resource capabilities +of nodes within a clusters, such as CPU, memory, GPU, etc. These can then be assigned +to workloads to ensure they are executed on nodes with appropriate resources. + +The YAML configuration provided below creates an empty Resource Flavor +named default-flavor. It serves as a starting point and does not specify +any detailed resource characteristics. + +.. code:: yaml + + apiVersion: kueue.x-k8s.io/v1beta1 + kind: ResourceFlavor + metadata: + name: default-flavor + +For more detailed information on Resource Flavor configuration options, +refer to the Kueue documentation: `Resource Flavor +Configuration `__ + +2. Cluster Queue: +----------------- + +A Cluster Queue represents a shared queue across the entire cluster. It +allows the cluster admin to define global settings for workload +prioritization and resource allocation. + +When setting up a Cluster Queue in Kueue, it’s crucial that the resource +specifications match the actual capacities and operational requirements +of your cluster. The example provided outlines a basic setup; however, +each cluster may have different resource availabilities and needs. + +.. code:: yaml + + apiVersion: kueue.x-k8s.io/v1beta1 + kind: ClusterQueue + metadata: + name: "cluster-queue" + spec: + namespaceSelector: {} # match all. + resourceGroups: + - coveredResources: ["cpu", "memory", "pods", "nvidia.com/gpu"] + flavors: + - name: "default-flavor" + resources: + - name: "cpu" + nominalQuota: 9 + - name: "memory" + nominalQuota: 36Gi + - name: "pods" + nominalQuota: 5 + - name: "nvidia.com/gpu" + nominalQuota: '0' + +For more detailed information on Cluster Queue configuration options, +refer to the Kueue documentation: `Cluster Queue +Configuration `__ + +3. Local Queue (With Default Annotation): +----------------------------------------- + +A Local Queue represents a queue associated with a specific namespace +within the cluster. It allows namespace-level control over workload +prioritization and resource allocation. + +.. code:: yaml + + apiVersion: kueue.x-k8s.io/v1beta1 + kind: LocalQueue + metadata: + namespace: team-a + name: team-a-queue + annotations: + kueue.x-k8s.io/default-queue: "true" + spec: + clusterQueue: cluster-queue + +In the LocalQueue configuration provided above, the annotations field +specifies ``kueue.x-k8s.io/default-queue: "true"``. This annotation +indicates that the team-a-queue is designated as the default queue for +the team-a namespace. When this is set, any workloads submitted to the +team-a namespace without explicitly specifying a queue will +automatically be routed to the team-a-queue. + +For more detailed information on Local Queue configuration options, +refer to the Kueue documentation: `Local Queue +Configuration `__ + +Conclusion: +----------- + +By following the steps outlined in this document, the cluster admin can +successfully create the basic Kueue resources necessary for workload +management in the cluster. For more advanced configurations and +features, please refer to the comprehensive `Kueue +documentation `__. diff --git a/docs/sphinx/user-docs/ui-widgets.rst b/docs/sphinx/user-docs/ui-widgets.rst new file mode 100644 index 00000000..92335423 --- /dev/null +++ b/docs/sphinx/user-docs/ui-widgets.rst @@ -0,0 +1,57 @@ +Jupyter UI Widgets +================== + +Below are some examples of the Jupyter UI Widgets that are included in +the CodeFlare SDK. + +.. note:: + To use the widgets functionality you must be using the CodeFlare SDK in a Jupyter Notebook environment. + +Cluster Up/Down Buttons +----------------------- + +The Cluster Up/Down buttons appear after successfully initialising your +`ClusterConfiguration `__. +There are two buttons and a checkbox ``Cluster Up``, ``Cluster Down`` +and ``Wait for Cluster?`` which mimic the +`cluster.up() `__, +`cluster.down() `__ and +`cluster.wait_ready() `__ +functionality. + +After initialising their ``ClusterConfiguration`` a user can select the +``Wait for Cluster?`` checkbox then click the ``Cluster Up`` button to +create their Ray Cluster and wait until it is ready. The cluster can be +deleted by clicking the ``Cluster Down`` button. + +.. image:: images/ui-buttons.png + :alt: An image of the up/down ui buttons + +View Clusters UI Table +---------------------- + +The View Clusters UI Table allows a user to see a list of Ray Clusters +with information on their configuration including number of workers, CPU +requests and limits along with the clusters status. + +.. image:: images/ui-view-clusters.png + :alt: An image of the view clusters ui table + +Above is a list of two Ray Clusters ``raytest`` and ``raytest2`` each of +those headings is clickable and will update the table to view the +selected Cluster's information. There are four buttons under the table +``Cluster Down``, ``View Jobs``, ``Open Ray Dashboard``, and ``Refresh Data``. \* The +``Cluster Down`` button will delete the selected Cluster. \* The +``View Jobs`` button will try to open the Ray Dashboard's Jobs view in a +Web Browser. The link will also be printed to the console. \* The +``Open Ray Dashboard`` button will try to open the Ray Dashboard view in +a Web Browser. The link will also be printed to the console. \* The +``Refresh Data`` button will refresh the list of RayClusters, the spec, and +the status of the Ray Cluster. + +The UI Table can be viewed by calling the following function. + +.. code:: python + + from codeflare_sdk import view_clusters + view_clusters() # Accepts namespace parameter but will try to gather the namespace from the current context diff --git a/docs/utils/generate_cert.html b/docs/utils/generate_cert.html deleted file mode 100644 index b41846f9..00000000 --- a/docs/utils/generate_cert.html +++ /dev/null @@ -1,396 +0,0 @@ - - - - - - -codeflare_sdk.utils.generate_cert API documentation - - - - - - - - - - - -
-
-
-

Module codeflare_sdk.utils.generate_cert

-
-
-
- -Expand source code - -
# Copyright 2022 IBM, Red Hat
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import base64
-import os
-from cryptography.hazmat.primitives import serialization, hashes
-from cryptography.hazmat.primitives.asymmetric import rsa
-from cryptography import x509
-from cryptography.x509.oid import NameOID
-import datetime
-from ..cluster.auth import config_check, api_config_handler
-from kubernetes import client, config
-
-
-def generate_ca_cert(days: int = 30):
-    # Generate base64 encoded ca.key and ca.cert
-    # Similar to:
-    # openssl req -x509 -nodes -newkey rsa:2048 -keyout ca.key -days 1826 -out ca.crt -subj '/CN=root-ca'
-    # base64 -i ca.crt -i ca.key
-
-    private_key = rsa.generate_private_key(
-        public_exponent=65537,
-        key_size=2048,
-    )
-
-    key = base64.b64encode(
-        private_key.private_bytes(
-            serialization.Encoding.PEM,
-            serialization.PrivateFormat.PKCS8,
-            serialization.NoEncryption(),
-        )
-    ).decode("utf-8")
-
-    # Generate Certificate
-    one_day = datetime.timedelta(1, 0, 0)
-    public_key = private_key.public_key()
-    builder = (
-        x509.CertificateBuilder()
-        .subject_name(
-            x509.Name(
-                [
-                    x509.NameAttribute(NameOID.COMMON_NAME, "root-ca"),
-                ]
-            )
-        )
-        .issuer_name(
-            x509.Name(
-                [
-                    x509.NameAttribute(NameOID.COMMON_NAME, "root-ca"),
-                ]
-            )
-        )
-        .not_valid_before(datetime.datetime.today() - one_day)
-        .not_valid_after(datetime.datetime.today() + (one_day * days))
-        .serial_number(x509.random_serial_number())
-        .public_key(public_key)
-    )
-    certificate = base64.b64encode(
-        builder.sign(private_key=private_key, algorithm=hashes.SHA256()).public_bytes(
-            serialization.Encoding.PEM
-        )
-    ).decode("utf-8")
-    return key, certificate
-
-
-def generate_tls_cert(cluster_name, namespace, days=30):
-    # Create a folder tls-<cluster>-<namespace> and store three files: ca.crt, tls.crt, and tls.key
-    tls_dir = os.path.join(os.getcwd(), f"tls-{cluster_name}-{namespace}")
-    if not os.path.exists(tls_dir):
-        os.makedirs(tls_dir)
-
-    # Similar to:
-    # oc get secret ca-secret-<cluster-name> -o template='{{index .data "ca.key"}}'
-    # oc get secret ca-secret-<cluster-name> -o template='{{index .data "ca.crt"}}'|base64 -d > ${TLSDIR}/ca.crt
-    config_check()
-    v1 = client.CoreV1Api(api_config_handler())
-    secret = v1.read_namespaced_secret(f"ca-secret-{cluster_name}", namespace).data
-    ca_cert = secret.get("ca.crt")
-    ca_key = secret.get("ca.key")
-
-    with open(os.path.join(tls_dir, "ca.crt"), "w") as f:
-        f.write(base64.b64decode(ca_cert).decode("utf-8"))
-
-    # Generate tls.key and signed tls.cert locally for ray client
-    # Similar to running these commands:
-    # openssl req -nodes -newkey rsa:2048 -keyout ${TLSDIR}/tls.key -out ${TLSDIR}/tls.csr -subj '/CN=local'
-    # cat <<EOF >${TLSDIR}/domain.ext
-    # authorityKeyIdentifier=keyid,issuer
-    # basicConstraints=CA:FALSE
-    # subjectAltName = @alt_names
-    # [alt_names]
-    # DNS.1 = 127.0.0.1
-    # DNS.2 = localhost
-    # EOF
-    # openssl x509 -req -CA ${TLSDIR}/ca.crt -CAkey ${TLSDIR}/ca.key -in ${TLSDIR}/tls.csr -out ${TLSDIR}/tls.crt -days 365 -CAcreateserial -extfile ${TLSDIR}/domain.ext
-    key = rsa.generate_private_key(
-        public_exponent=65537,
-        key_size=2048,
-    )
-
-    tls_key = key.private_bytes(
-        serialization.Encoding.PEM,
-        serialization.PrivateFormat.PKCS8,
-        serialization.NoEncryption(),
-    )
-    with open(os.path.join(tls_dir, "tls.key"), "w") as f:
-        f.write(tls_key.decode("utf-8"))
-
-    one_day = datetime.timedelta(1, 0, 0)
-    tls_cert = (
-        x509.CertificateBuilder()
-        .issuer_name(
-            x509.Name(
-                [
-                    x509.NameAttribute(NameOID.COMMON_NAME, "root-ca"),
-                ]
-            )
-        )
-        .subject_name(
-            x509.Name(
-                [
-                    x509.NameAttribute(NameOID.COMMON_NAME, "local"),
-                ]
-            )
-        )
-        .public_key(key.public_key())
-        .not_valid_before(datetime.datetime.today() - one_day)
-        .not_valid_after(datetime.datetime.today() + (one_day * days))
-        .serial_number(x509.random_serial_number())
-        .add_extension(
-            x509.SubjectAlternativeName(
-                [x509.DNSName("localhost"), x509.DNSName("127.0.0.1")]
-            ),
-            False,
-        )
-        .sign(
-            serialization.load_pem_private_key(base64.b64decode(ca_key), None),
-            hashes.SHA256(),
-        )
-    )
-
-    with open(os.path.join(tls_dir, "tls.crt"), "w") as f:
-        f.write(tls_cert.public_bytes(serialization.Encoding.PEM).decode("utf-8"))
-
-
-def export_env(cluster_name, namespace):
-    tls_dir = os.path.join(os.getcwd(), f"tls-{cluster_name}-{namespace}")
-    os.environ["RAY_USE_TLS"] = "1"
-    os.environ["RAY_TLS_SERVER_CERT"] = os.path.join(tls_dir, "tls.crt")
-    os.environ["RAY_TLS_SERVER_KEY"] = os.path.join(tls_dir, "tls.key")
-    os.environ["RAY_TLS_CA_CERT"] = os.path.join(tls_dir, "ca.crt")
-
-
-
-
-
-
-
-

Functions

-
-
-def export_env(cluster_name, namespace) -
-
-
-
- -Expand source code - -
def export_env(cluster_name, namespace):
-    tls_dir = os.path.join(os.getcwd(), f"tls-{cluster_name}-{namespace}")
-    os.environ["RAY_USE_TLS"] = "1"
-    os.environ["RAY_TLS_SERVER_CERT"] = os.path.join(tls_dir, "tls.crt")
-    os.environ["RAY_TLS_SERVER_KEY"] = os.path.join(tls_dir, "tls.key")
-    os.environ["RAY_TLS_CA_CERT"] = os.path.join(tls_dir, "ca.crt")
-
-
-
-def generate_ca_cert(days: int = 30) -
-
-
-
- -Expand source code - -
def generate_ca_cert(days: int = 30):
-    # Generate base64 encoded ca.key and ca.cert
-    # Similar to:
-    # openssl req -x509 -nodes -newkey rsa:2048 -keyout ca.key -days 1826 -out ca.crt -subj '/CN=root-ca'
-    # base64 -i ca.crt -i ca.key
-
-    private_key = rsa.generate_private_key(
-        public_exponent=65537,
-        key_size=2048,
-    )
-
-    key = base64.b64encode(
-        private_key.private_bytes(
-            serialization.Encoding.PEM,
-            serialization.PrivateFormat.PKCS8,
-            serialization.NoEncryption(),
-        )
-    ).decode("utf-8")
-
-    # Generate Certificate
-    one_day = datetime.timedelta(1, 0, 0)
-    public_key = private_key.public_key()
-    builder = (
-        x509.CertificateBuilder()
-        .subject_name(
-            x509.Name(
-                [
-                    x509.NameAttribute(NameOID.COMMON_NAME, "root-ca"),
-                ]
-            )
-        )
-        .issuer_name(
-            x509.Name(
-                [
-                    x509.NameAttribute(NameOID.COMMON_NAME, "root-ca"),
-                ]
-            )
-        )
-        .not_valid_before(datetime.datetime.today() - one_day)
-        .not_valid_after(datetime.datetime.today() + (one_day * days))
-        .serial_number(x509.random_serial_number())
-        .public_key(public_key)
-    )
-    certificate = base64.b64encode(
-        builder.sign(private_key=private_key, algorithm=hashes.SHA256()).public_bytes(
-            serialization.Encoding.PEM
-        )
-    ).decode("utf-8")
-    return key, certificate
-
-
-
-def generate_tls_cert(cluster_name, namespace, days=30) -
-
-
-
- -Expand source code - -
def generate_tls_cert(cluster_name, namespace, days=30):
-    # Create a folder tls-<cluster>-<namespace> and store three files: ca.crt, tls.crt, and tls.key
-    tls_dir = os.path.join(os.getcwd(), f"tls-{cluster_name}-{namespace}")
-    if not os.path.exists(tls_dir):
-        os.makedirs(tls_dir)
-
-    # Similar to:
-    # oc get secret ca-secret-<cluster-name> -o template='{{index .data "ca.key"}}'
-    # oc get secret ca-secret-<cluster-name> -o template='{{index .data "ca.crt"}}'|base64 -d > ${TLSDIR}/ca.crt
-    config_check()
-    v1 = client.CoreV1Api(api_config_handler())
-    secret = v1.read_namespaced_secret(f"ca-secret-{cluster_name}", namespace).data
-    ca_cert = secret.get("ca.crt")
-    ca_key = secret.get("ca.key")
-
-    with open(os.path.join(tls_dir, "ca.crt"), "w") as f:
-        f.write(base64.b64decode(ca_cert).decode("utf-8"))
-
-    # Generate tls.key and signed tls.cert locally for ray client
-    # Similar to running these commands:
-    # openssl req -nodes -newkey rsa:2048 -keyout ${TLSDIR}/tls.key -out ${TLSDIR}/tls.csr -subj '/CN=local'
-    # cat <<EOF >${TLSDIR}/domain.ext
-    # authorityKeyIdentifier=keyid,issuer
-    # basicConstraints=CA:FALSE
-    # subjectAltName = @alt_names
-    # [alt_names]
-    # DNS.1 = 127.0.0.1
-    # DNS.2 = localhost
-    # EOF
-    # openssl x509 -req -CA ${TLSDIR}/ca.crt -CAkey ${TLSDIR}/ca.key -in ${TLSDIR}/tls.csr -out ${TLSDIR}/tls.crt -days 365 -CAcreateserial -extfile ${TLSDIR}/domain.ext
-    key = rsa.generate_private_key(
-        public_exponent=65537,
-        key_size=2048,
-    )
-
-    tls_key = key.private_bytes(
-        serialization.Encoding.PEM,
-        serialization.PrivateFormat.PKCS8,
-        serialization.NoEncryption(),
-    )
-    with open(os.path.join(tls_dir, "tls.key"), "w") as f:
-        f.write(tls_key.decode("utf-8"))
-
-    one_day = datetime.timedelta(1, 0, 0)
-    tls_cert = (
-        x509.CertificateBuilder()
-        .issuer_name(
-            x509.Name(
-                [
-                    x509.NameAttribute(NameOID.COMMON_NAME, "root-ca"),
-                ]
-            )
-        )
-        .subject_name(
-            x509.Name(
-                [
-                    x509.NameAttribute(NameOID.COMMON_NAME, "local"),
-                ]
-            )
-        )
-        .public_key(key.public_key())
-        .not_valid_before(datetime.datetime.today() - one_day)
-        .not_valid_after(datetime.datetime.today() + (one_day * days))
-        .serial_number(x509.random_serial_number())
-        .add_extension(
-            x509.SubjectAlternativeName(
-                [x509.DNSName("localhost"), x509.DNSName("127.0.0.1")]
-            ),
-            False,
-        )
-        .sign(
-            serialization.load_pem_private_key(base64.b64decode(ca_key), None),
-            hashes.SHA256(),
-        )
-    )
-
-    with open(os.path.join(tls_dir, "tls.crt"), "w") as f:
-        f.write(tls_cert.public_bytes(serialization.Encoding.PEM).decode("utf-8"))
-
-
-
-
-
-
-
- -
- - - diff --git a/docs/utils/generate_yaml.html b/docs/utils/generate_yaml.html deleted file mode 100644 index ea1e7302..00000000 --- a/docs/utils/generate_yaml.html +++ /dev/null @@ -1,1033 +0,0 @@ - - - - - - -codeflare_sdk.utils.generate_yaml API documentation - - - - - - - - - - - -
-
-
-

Module codeflare_sdk.utils.generate_yaml

-
-
-

This sub-module exists primarily to be used internally by the Cluster object -(in the cluster sub-module) for AppWrapper generation.

-
- -Expand source code - -
# Copyright 2022 IBM, Red Hat
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-This sub-module exists primarily to be used internally by the Cluster object
-(in the cluster sub-module) for AppWrapper generation.
-"""
-
-import yaml
-import sys
-import argparse
-import uuid
-from kubernetes import client, config
-from .kube_api_helpers import _kube_api_error_handling
-from ..cluster.auth import api_config_handler
-
-
-def read_template(template):
-    with open(template, "r") as stream:
-        try:
-            return yaml.safe_load(stream)
-        except yaml.YAMLError as exc:
-            print(exc)
-
-
-def gen_names(name):
-    if not name:
-        gen_id = str(uuid.uuid4())
-        appwrapper_name = "appwrapper-" + gen_id
-        cluster_name = "cluster-" + gen_id
-        return appwrapper_name, cluster_name
-    else:
-        return name, name
-
-
-def update_dashboard_route(route_item, cluster_name, namespace):
-    metadata = route_item.get("generictemplate", {}).get("metadata")
-    metadata["name"] = f"ray-dashboard-{cluster_name}"
-    metadata["namespace"] = namespace
-    metadata["labels"]["odh-ray-cluster-service"] = f"{cluster_name}-head-svc"
-    spec = route_item.get("generictemplate", {}).get("spec")
-    spec["to"]["name"] = f"{cluster_name}-head-svc"
-
-
-# ToDo: refactor the update_x_route() functions
-def update_rayclient_route(route_item, cluster_name, namespace):
-    metadata = route_item.get("generictemplate", {}).get("metadata")
-    metadata["name"] = f"rayclient-{cluster_name}"
-    metadata["namespace"] = namespace
-    metadata["labels"]["odh-ray-cluster-service"] = f"{cluster_name}-head-svc"
-    spec = route_item.get("generictemplate", {}).get("spec")
-    spec["to"]["name"] = f"{cluster_name}-head-svc"
-
-
-def update_names(yaml, item, appwrapper_name, cluster_name, namespace):
-    metadata = yaml.get("metadata")
-    metadata["name"] = appwrapper_name
-    metadata["namespace"] = namespace
-    lower_meta = item.get("generictemplate", {}).get("metadata")
-    lower_meta["labels"]["appwrapper.mcad.ibm.com"] = appwrapper_name
-    lower_meta["name"] = cluster_name
-    lower_meta["namespace"] = namespace
-
-
-def update_labels(yaml, instascale, instance_types):
-    metadata = yaml.get("metadata")
-    if instascale:
-        if not len(instance_types) > 0:
-            sys.exit(
-                "If instascale is set to true, must provide at least one instance type"
-            )
-        type_str = ""
-        for type in instance_types:
-            type_str += type + "_"
-        type_str = type_str[:-1]
-        metadata["labels"]["orderedinstance"] = type_str
-    else:
-        metadata.pop("labels")
-
-
-def update_priority(yaml, item, dispatch_priority, priority_val):
-    spec = yaml.get("spec")
-    if dispatch_priority is not None:
-        if priority_val:
-            spec["priority"] = priority_val
-        else:
-            raise ValueError(
-                "AW generation error: Priority value is None, while dispatch_priority is defined"
-            )
-        head = item.get("generictemplate").get("spec").get("headGroupSpec")
-        worker = item.get("generictemplate").get("spec").get("workerGroupSpecs")[0]
-        head["template"]["spec"]["priorityClassName"] = dispatch_priority
-        worker["template"]["spec"]["priorityClassName"] = dispatch_priority
-    else:
-        spec.pop("priority")
-
-
-def update_custompodresources(
-    item, min_cpu, max_cpu, min_memory, max_memory, gpu, workers
-):
-    if "custompodresources" in item.keys():
-        custompodresources = item.get("custompodresources")
-        for i in range(len(custompodresources)):
-            if i == 0:
-                # Leave head node resources as template default
-                continue
-            resource = custompodresources[i]
-            for k, v in resource.items():
-                if k == "replicas" and i == 1:
-                    resource[k] = workers
-                if k == "requests" or k == "limits":
-                    for spec, _ in v.items():
-                        if spec == "cpu":
-                            if k == "limits":
-                                resource[k][spec] = max_cpu
-                            else:
-                                resource[k][spec] = min_cpu
-                        if spec == "memory":
-                            if k == "limits":
-                                resource[k][spec] = str(max_memory) + "G"
-                            else:
-                                resource[k][spec] = str(min_memory) + "G"
-                        if spec == "nvidia.com/gpu":
-                            if i == 0:
-                                resource[k][spec] = 0
-                            else:
-                                resource[k][spec] = gpu
-    else:
-        sys.exit("Error: malformed template")
-
-
-def update_affinity(spec, appwrapper_name, instascale):
-    if instascale:
-        node_selector_terms = (
-            spec.get("affinity")
-            .get("nodeAffinity")
-            .get("requiredDuringSchedulingIgnoredDuringExecution")
-            .get("nodeSelectorTerms")
-        )
-        node_selector_terms[0]["matchExpressions"][0]["values"][0] = appwrapper_name
-        node_selector_terms[0]["matchExpressions"][0]["key"] = appwrapper_name
-    else:
-        spec.pop("affinity")
-
-
-def update_image(spec, image):
-    containers = spec.get("containers")
-    for container in containers:
-        container["image"] = image
-
-
-def update_image_pull_secrets(spec, image_pull_secrets):
-    template_secrets = spec.get("imagePullSecrets", [])
-    spec["imagePullSecrets"] = template_secrets + [
-        {"name": x} for x in image_pull_secrets
-    ]
-
-
-def update_env(spec, env):
-    containers = spec.get("containers")
-    for container in containers:
-        if env:
-            if "env" in container:
-                container["env"].extend(env)
-            else:
-                container["env"] = env
-
-
-def update_resources(spec, min_cpu, max_cpu, min_memory, max_memory, gpu):
-    container = spec.get("containers")
-    for resource in container:
-        requests = resource.get("resources").get("requests")
-        if requests is not None:
-            requests["cpu"] = min_cpu
-            requests["memory"] = str(min_memory) + "G"
-            requests["nvidia.com/gpu"] = gpu
-        limits = resource.get("resources").get("limits")
-        if limits is not None:
-            limits["cpu"] = max_cpu
-            limits["memory"] = str(max_memory) + "G"
-            limits["nvidia.com/gpu"] = gpu
-
-
-def update_nodes(
-    item,
-    appwrapper_name,
-    min_cpu,
-    max_cpu,
-    min_memory,
-    max_memory,
-    gpu,
-    workers,
-    image,
-    instascale,
-    env,
-    image_pull_secrets,
-):
-    if "generictemplate" in item.keys():
-        head = item.get("generictemplate").get("spec").get("headGroupSpec")
-        worker = item.get("generictemplate").get("spec").get("workerGroupSpecs")[0]
-
-        # Head counts as first worker
-        worker["replicas"] = workers
-        worker["minReplicas"] = workers
-        worker["maxReplicas"] = workers
-        worker["groupName"] = "small-group-" + appwrapper_name
-        worker["rayStartParams"]["num-gpus"] = str(int(gpu))
-
-        for comp in [head, worker]:
-            spec = comp.get("template").get("spec")
-            update_affinity(spec, appwrapper_name, instascale)
-            update_image_pull_secrets(spec, image_pull_secrets)
-            update_image(spec, image)
-            update_env(spec, env)
-            if comp == head:
-                # TODO: Eventually add head node configuration outside of template
-                continue
-            else:
-                update_resources(spec, min_cpu, max_cpu, min_memory, max_memory, gpu)
-
-
-def update_ca_secret(ca_secret_item, cluster_name, namespace):
-    from . import generate_cert
-
-    metadata = ca_secret_item.get("generictemplate", {}).get("metadata")
-    metadata["name"] = f"ca-secret-{cluster_name}"
-    metadata["namespace"] = namespace
-    metadata["labels"]["odh-ray-cluster-service"] = f"{cluster_name}-head-svc"
-    data = ca_secret_item.get("generictemplate", {}).get("data")
-    data["ca.key"], data["ca.crt"] = generate_cert.generate_ca_cert(365)
-
-
-def enable_local_interactive(resources, cluster_name, namespace):
-    rayclient_route_item = resources["resources"].get("GenericItems")[2]
-    ca_secret_item = resources["resources"].get("GenericItems")[3]
-    item = resources["resources"].get("GenericItems")[0]
-    update_rayclient_route(rayclient_route_item, cluster_name, namespace)
-    update_ca_secret(ca_secret_item, cluster_name, namespace)
-    # update_ca_secret_volumes
-    item["generictemplate"]["spec"]["headGroupSpec"]["template"]["spec"]["volumes"][0][
-        "secret"
-    ]["secretName"] = f"ca-secret-{cluster_name}"
-    item["generictemplate"]["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-        "volumes"
-    ][0]["secret"]["secretName"] = f"ca-secret-{cluster_name}"
-    # update_tls_env
-    item["generictemplate"]["spec"]["headGroupSpec"]["template"]["spec"]["containers"][
-        0
-    ]["env"][1]["value"] = "1"
-    item["generictemplate"]["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-        "containers"
-    ][0]["env"][1]["value"] = "1"
-    # update_init_container
-    command = item["generictemplate"]["spec"]["headGroupSpec"]["template"]["spec"][
-        "initContainers"
-    ][0].get("command")[2]
-
-    command = command.replace("deployment-name", cluster_name)
-    try:
-        config.load_kube_config()
-        api_client = client.CustomObjectsApi(api_config_handler())
-        ingress = api_client.get_cluster_custom_object(
-            "config.openshift.io", "v1", "ingresses", "cluster"
-        )
-    except Exception as e:  # pragma: no cover
-        return _kube_api_error_handling(e)
-    domain = ingress["spec"]["domain"]
-    command = command.replace("server-name", domain)
-
-    item["generictemplate"]["spec"]["headGroupSpec"]["template"]["spec"][
-        "initContainers"
-    ][0].get("command")[2] = command
-
-
-def disable_raycluster_tls(resources):
-    generic_template_spec = resources["GenericItems"][0]["generictemplate"]["spec"]
-
-    if "volumes" in generic_template_spec["headGroupSpec"]["template"]["spec"]:
-        del generic_template_spec["headGroupSpec"]["template"]["spec"]["volumes"]
-
-    if (
-        "volumeMounts"
-        in generic_template_spec["headGroupSpec"]["template"]["spec"]["containers"][0]
-    ):
-        del generic_template_spec["headGroupSpec"]["template"]["spec"]["containers"][0][
-            "volumeMounts"
-        ]
-
-    if "initContainers" in generic_template_spec["headGroupSpec"]["template"]["spec"]:
-        del generic_template_spec["headGroupSpec"]["template"]["spec"]["initContainers"]
-
-    if "volumes" in generic_template_spec["workerGroupSpecs"][0]["template"]["spec"]:
-        del generic_template_spec["workerGroupSpecs"][0]["template"]["spec"]["volumes"]
-
-    if (
-        "volumeMounts"
-        in generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][
-            "containers"
-        ][0]
-    ):
-        del generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][
-            "containers"
-        ][0]["volumeMounts"]
-
-    for i in range(
-        len(
-            generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][
-                "initContainers"
-            ]
-        )
-    ):
-        if (
-            generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][
-                "initContainers"
-            ][i]["name"]
-            == "create-cert"
-        ):
-            del generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][
-                "initContainers"
-            ][i]
-
-    updated_items = []
-    for i in resources["GenericItems"][:]:
-        if "rayclient-deployment-name" in i["generictemplate"]["metadata"]["name"]:
-            continue
-        if "ca-secret-deployment-name" in i["generictemplate"]["metadata"]["name"]:
-            continue
-        updated_items.append(i)
-
-    resources["GenericItems"] = updated_items
-
-
-def write_user_appwrapper(user_yaml, output_file_name):
-    with open(output_file_name, "w") as outfile:
-        yaml.dump(user_yaml, outfile, default_flow_style=False)
-    print(f"Written to: {output_file_name}")
-
-
-def generate_appwrapper(
-    name: str,
-    namespace: str,
-    min_cpu: int,
-    max_cpu: int,
-    min_memory: int,
-    max_memory: int,
-    gpu: int,
-    workers: int,
-    template: str,
-    image: str,
-    instascale: bool,
-    instance_types: list,
-    env,
-    local_interactive: bool,
-    image_pull_secrets: list,
-    dispatch_priority: str,
-    priority_val: int,
-):
-    user_yaml = read_template(template)
-    appwrapper_name, cluster_name = gen_names(name)
-    resources = user_yaml.get("spec", "resources")
-    item = resources["resources"].get("GenericItems")[0]
-    route_item = resources["resources"].get("GenericItems")[1]
-    update_names(user_yaml, item, appwrapper_name, cluster_name, namespace)
-    update_labels(user_yaml, instascale, instance_types)
-    update_priority(user_yaml, item, dispatch_priority, priority_val)
-    update_custompodresources(
-        item, min_cpu, max_cpu, min_memory, max_memory, gpu, workers
-    )
-    update_nodes(
-        item,
-        appwrapper_name,
-        min_cpu,
-        max_cpu,
-        min_memory,
-        max_memory,
-        gpu,
-        workers,
-        image,
-        instascale,
-        env,
-        image_pull_secrets,
-    )
-    update_dashboard_route(route_item, cluster_name, namespace)
-    if local_interactive:
-        enable_local_interactive(resources, cluster_name, namespace)
-    else:
-        disable_raycluster_tls(resources["resources"])
-    outfile = appwrapper_name + ".yaml"
-    write_user_appwrapper(user_yaml, outfile)
-    return outfile
-
-
-
-
-
-
-
-

Functions

-
-
-def disable_raycluster_tls(resources) -
-
-
-
- -Expand source code - -
def disable_raycluster_tls(resources):
-    generic_template_spec = resources["GenericItems"][0]["generictemplate"]["spec"]
-
-    if "volumes" in generic_template_spec["headGroupSpec"]["template"]["spec"]:
-        del generic_template_spec["headGroupSpec"]["template"]["spec"]["volumes"]
-
-    if (
-        "volumeMounts"
-        in generic_template_spec["headGroupSpec"]["template"]["spec"]["containers"][0]
-    ):
-        del generic_template_spec["headGroupSpec"]["template"]["spec"]["containers"][0][
-            "volumeMounts"
-        ]
-
-    if "initContainers" in generic_template_spec["headGroupSpec"]["template"]["spec"]:
-        del generic_template_spec["headGroupSpec"]["template"]["spec"]["initContainers"]
-
-    if "volumes" in generic_template_spec["workerGroupSpecs"][0]["template"]["spec"]:
-        del generic_template_spec["workerGroupSpecs"][0]["template"]["spec"]["volumes"]
-
-    if (
-        "volumeMounts"
-        in generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][
-            "containers"
-        ][0]
-    ):
-        del generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][
-            "containers"
-        ][0]["volumeMounts"]
-
-    for i in range(
-        len(
-            generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][
-                "initContainers"
-            ]
-        )
-    ):
-        if (
-            generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][
-                "initContainers"
-            ][i]["name"]
-            == "create-cert"
-        ):
-            del generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][
-                "initContainers"
-            ][i]
-
-    updated_items = []
-    for i in resources["GenericItems"][:]:
-        if "rayclient-deployment-name" in i["generictemplate"]["metadata"]["name"]:
-            continue
-        if "ca-secret-deployment-name" in i["generictemplate"]["metadata"]["name"]:
-            continue
-        updated_items.append(i)
-
-    resources["GenericItems"] = updated_items
-
-
-
-def enable_local_interactive(resources, cluster_name, namespace) -
-
-
-
- -Expand source code - -
def enable_local_interactive(resources, cluster_name, namespace):
-    rayclient_route_item = resources["resources"].get("GenericItems")[2]
-    ca_secret_item = resources["resources"].get("GenericItems")[3]
-    item = resources["resources"].get("GenericItems")[0]
-    update_rayclient_route(rayclient_route_item, cluster_name, namespace)
-    update_ca_secret(ca_secret_item, cluster_name, namespace)
-    # update_ca_secret_volumes
-    item["generictemplate"]["spec"]["headGroupSpec"]["template"]["spec"]["volumes"][0][
-        "secret"
-    ]["secretName"] = f"ca-secret-{cluster_name}"
-    item["generictemplate"]["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-        "volumes"
-    ][0]["secret"]["secretName"] = f"ca-secret-{cluster_name}"
-    # update_tls_env
-    item["generictemplate"]["spec"]["headGroupSpec"]["template"]["spec"]["containers"][
-        0
-    ]["env"][1]["value"] = "1"
-    item["generictemplate"]["spec"]["workerGroupSpecs"][0]["template"]["spec"][
-        "containers"
-    ][0]["env"][1]["value"] = "1"
-    # update_init_container
-    command = item["generictemplate"]["spec"]["headGroupSpec"]["template"]["spec"][
-        "initContainers"
-    ][0].get("command")[2]
-
-    command = command.replace("deployment-name", cluster_name)
-    try:
-        config.load_kube_config()
-        api_client = client.CustomObjectsApi(api_config_handler())
-        ingress = api_client.get_cluster_custom_object(
-            "config.openshift.io", "v1", "ingresses", "cluster"
-        )
-    except Exception as e:  # pragma: no cover
-        return _kube_api_error_handling(e)
-    domain = ingress["spec"]["domain"]
-    command = command.replace("server-name", domain)
-
-    item["generictemplate"]["spec"]["headGroupSpec"]["template"]["spec"][
-        "initContainers"
-    ][0].get("command")[2] = command
-
-
-
-def gen_names(name) -
-
-
-
- -Expand source code - -
def gen_names(name):
-    if not name:
-        gen_id = str(uuid.uuid4())
-        appwrapper_name = "appwrapper-" + gen_id
-        cluster_name = "cluster-" + gen_id
-        return appwrapper_name, cluster_name
-    else:
-        return name, name
-
-
-
-def generate_appwrapper(name: str, namespace: str, min_cpu: int, max_cpu: int, min_memory: int, max_memory: int, gpu: int, workers: int, template: str, image: str, instascale: bool, instance_types: list, env, local_interactive: bool, image_pull_secrets: list, dispatch_priority: str, priority_val: int) -
-
-
-
- -Expand source code - -
def generate_appwrapper(
-    name: str,
-    namespace: str,
-    min_cpu: int,
-    max_cpu: int,
-    min_memory: int,
-    max_memory: int,
-    gpu: int,
-    workers: int,
-    template: str,
-    image: str,
-    instascale: bool,
-    instance_types: list,
-    env,
-    local_interactive: bool,
-    image_pull_secrets: list,
-    dispatch_priority: str,
-    priority_val: int,
-):
-    user_yaml = read_template(template)
-    appwrapper_name, cluster_name = gen_names(name)
-    resources = user_yaml.get("spec", "resources")
-    item = resources["resources"].get("GenericItems")[0]
-    route_item = resources["resources"].get("GenericItems")[1]
-    update_names(user_yaml, item, appwrapper_name, cluster_name, namespace)
-    update_labels(user_yaml, instascale, instance_types)
-    update_priority(user_yaml, item, dispatch_priority, priority_val)
-    update_custompodresources(
-        item, min_cpu, max_cpu, min_memory, max_memory, gpu, workers
-    )
-    update_nodes(
-        item,
-        appwrapper_name,
-        min_cpu,
-        max_cpu,
-        min_memory,
-        max_memory,
-        gpu,
-        workers,
-        image,
-        instascale,
-        env,
-        image_pull_secrets,
-    )
-    update_dashboard_route(route_item, cluster_name, namespace)
-    if local_interactive:
-        enable_local_interactive(resources, cluster_name, namespace)
-    else:
-        disable_raycluster_tls(resources["resources"])
-    outfile = appwrapper_name + ".yaml"
-    write_user_appwrapper(user_yaml, outfile)
-    return outfile
-
-
-
-def read_template(template) -
-
-
-
- -Expand source code - -
def read_template(template):
-    with open(template, "r") as stream:
-        try:
-            return yaml.safe_load(stream)
-        except yaml.YAMLError as exc:
-            print(exc)
-
-
-
-def update_affinity(spec, appwrapper_name, instascale) -
-
-
-
- -Expand source code - -
def update_affinity(spec, appwrapper_name, instascale):
-    if instascale:
-        node_selector_terms = (
-            spec.get("affinity")
-            .get("nodeAffinity")
-            .get("requiredDuringSchedulingIgnoredDuringExecution")
-            .get("nodeSelectorTerms")
-        )
-        node_selector_terms[0]["matchExpressions"][0]["values"][0] = appwrapper_name
-        node_selector_terms[0]["matchExpressions"][0]["key"] = appwrapper_name
-    else:
-        spec.pop("affinity")
-
-
-
-def update_ca_secret(ca_secret_item, cluster_name, namespace) -
-
-
-
- -Expand source code - -
def update_ca_secret(ca_secret_item, cluster_name, namespace):
-    from . import generate_cert
-
-    metadata = ca_secret_item.get("generictemplate", {}).get("metadata")
-    metadata["name"] = f"ca-secret-{cluster_name}"
-    metadata["namespace"] = namespace
-    metadata["labels"]["odh-ray-cluster-service"] = f"{cluster_name}-head-svc"
-    data = ca_secret_item.get("generictemplate", {}).get("data")
-    data["ca.key"], data["ca.crt"] = generate_cert.generate_ca_cert(365)
-
-
-
-def update_custompodresources(item, min_cpu, max_cpu, min_memory, max_memory, gpu, workers) -
-
-
-
- -Expand source code - -
def update_custompodresources(
-    item, min_cpu, max_cpu, min_memory, max_memory, gpu, workers
-):
-    if "custompodresources" in item.keys():
-        custompodresources = item.get("custompodresources")
-        for i in range(len(custompodresources)):
-            if i == 0:
-                # Leave head node resources as template default
-                continue
-            resource = custompodresources[i]
-            for k, v in resource.items():
-                if k == "replicas" and i == 1:
-                    resource[k] = workers
-                if k == "requests" or k == "limits":
-                    for spec, _ in v.items():
-                        if spec == "cpu":
-                            if k == "limits":
-                                resource[k][spec] = max_cpu
-                            else:
-                                resource[k][spec] = min_cpu
-                        if spec == "memory":
-                            if k == "limits":
-                                resource[k][spec] = str(max_memory) + "G"
-                            else:
-                                resource[k][spec] = str(min_memory) + "G"
-                        if spec == "nvidia.com/gpu":
-                            if i == 0:
-                                resource[k][spec] = 0
-                            else:
-                                resource[k][spec] = gpu
-    else:
-        sys.exit("Error: malformed template")
-
-
-
-def update_dashboard_route(route_item, cluster_name, namespace) -
-
-
-
- -Expand source code - -
def update_dashboard_route(route_item, cluster_name, namespace):
-    metadata = route_item.get("generictemplate", {}).get("metadata")
-    metadata["name"] = f"ray-dashboard-{cluster_name}"
-    metadata["namespace"] = namespace
-    metadata["labels"]["odh-ray-cluster-service"] = f"{cluster_name}-head-svc"
-    spec = route_item.get("generictemplate", {}).get("spec")
-    spec["to"]["name"] = f"{cluster_name}-head-svc"
-
-
-
-def update_env(spec, env) -
-
-
-
- -Expand source code - -
def update_env(spec, env):
-    containers = spec.get("containers")
-    for container in containers:
-        if env:
-            if "env" in container:
-                container["env"].extend(env)
-            else:
-                container["env"] = env
-
-
-
-def update_image(spec, image) -
-
-
-
- -Expand source code - -
def update_image(spec, image):
-    containers = spec.get("containers")
-    for container in containers:
-        container["image"] = image
-
-
-
-def update_image_pull_secrets(spec, image_pull_secrets) -
-
-
-
- -Expand source code - -
def update_image_pull_secrets(spec, image_pull_secrets):
-    template_secrets = spec.get("imagePullSecrets", [])
-    spec["imagePullSecrets"] = template_secrets + [
-        {"name": x} for x in image_pull_secrets
-    ]
-
-
-
-def update_labels(yaml, instascale, instance_types) -
-
-
-
- -Expand source code - -
def update_labels(yaml, instascale, instance_types):
-    metadata = yaml.get("metadata")
-    if instascale:
-        if not len(instance_types) > 0:
-            sys.exit(
-                "If instascale is set to true, must provide at least one instance type"
-            )
-        type_str = ""
-        for type in instance_types:
-            type_str += type + "_"
-        type_str = type_str[:-1]
-        metadata["labels"]["orderedinstance"] = type_str
-    else:
-        metadata.pop("labels")
-
-
-
-def update_names(yaml, item, appwrapper_name, cluster_name, namespace) -
-
-
-
- -Expand source code - -
def update_names(yaml, item, appwrapper_name, cluster_name, namespace):
-    metadata = yaml.get("metadata")
-    metadata["name"] = appwrapper_name
-    metadata["namespace"] = namespace
-    lower_meta = item.get("generictemplate", {}).get("metadata")
-    lower_meta["labels"]["appwrapper.mcad.ibm.com"] = appwrapper_name
-    lower_meta["name"] = cluster_name
-    lower_meta["namespace"] = namespace
-
-
-
-def update_nodes(item, appwrapper_name, min_cpu, max_cpu, min_memory, max_memory, gpu, workers, image, instascale, env, image_pull_secrets) -
-
-
-
- -Expand source code - -
def update_nodes(
-    item,
-    appwrapper_name,
-    min_cpu,
-    max_cpu,
-    min_memory,
-    max_memory,
-    gpu,
-    workers,
-    image,
-    instascale,
-    env,
-    image_pull_secrets,
-):
-    if "generictemplate" in item.keys():
-        head = item.get("generictemplate").get("spec").get("headGroupSpec")
-        worker = item.get("generictemplate").get("spec").get("workerGroupSpecs")[0]
-
-        # Head counts as first worker
-        worker["replicas"] = workers
-        worker["minReplicas"] = workers
-        worker["maxReplicas"] = workers
-        worker["groupName"] = "small-group-" + appwrapper_name
-        worker["rayStartParams"]["num-gpus"] = str(int(gpu))
-
-        for comp in [head, worker]:
-            spec = comp.get("template").get("spec")
-            update_affinity(spec, appwrapper_name, instascale)
-            update_image_pull_secrets(spec, image_pull_secrets)
-            update_image(spec, image)
-            update_env(spec, env)
-            if comp == head:
-                # TODO: Eventually add head node configuration outside of template
-                continue
-            else:
-                update_resources(spec, min_cpu, max_cpu, min_memory, max_memory, gpu)
-
-
-
-def update_priority(yaml, item, dispatch_priority, priority_val) -
-
-
-
- -Expand source code - -
def update_priority(yaml, item, dispatch_priority, priority_val):
-    spec = yaml.get("spec")
-    if dispatch_priority is not None:
-        if priority_val:
-            spec["priority"] = priority_val
-        else:
-            raise ValueError(
-                "AW generation error: Priority value is None, while dispatch_priority is defined"
-            )
-        head = item.get("generictemplate").get("spec").get("headGroupSpec")
-        worker = item.get("generictemplate").get("spec").get("workerGroupSpecs")[0]
-        head["template"]["spec"]["priorityClassName"] = dispatch_priority
-        worker["template"]["spec"]["priorityClassName"] = dispatch_priority
-    else:
-        spec.pop("priority")
-
-
-
-def update_rayclient_route(route_item, cluster_name, namespace) -
-
-
-
- -Expand source code - -
def update_rayclient_route(route_item, cluster_name, namespace):
-    metadata = route_item.get("generictemplate", {}).get("metadata")
-    metadata["name"] = f"rayclient-{cluster_name}"
-    metadata["namespace"] = namespace
-    metadata["labels"]["odh-ray-cluster-service"] = f"{cluster_name}-head-svc"
-    spec = route_item.get("generictemplate", {}).get("spec")
-    spec["to"]["name"] = f"{cluster_name}-head-svc"
-
-
-
-def update_resources(spec, min_cpu, max_cpu, min_memory, max_memory, gpu) -
-
-
-
- -Expand source code - -
def update_resources(spec, min_cpu, max_cpu, min_memory, max_memory, gpu):
-    container = spec.get("containers")
-    for resource in container:
-        requests = resource.get("resources").get("requests")
-        if requests is not None:
-            requests["cpu"] = min_cpu
-            requests["memory"] = str(min_memory) + "G"
-            requests["nvidia.com/gpu"] = gpu
-        limits = resource.get("resources").get("limits")
-        if limits is not None:
-            limits["cpu"] = max_cpu
-            limits["memory"] = str(max_memory) + "G"
-            limits["nvidia.com/gpu"] = gpu
-
-
-
-def write_user_appwrapper(user_yaml, output_file_name) -
-
-
-
- -Expand source code - -
def write_user_appwrapper(user_yaml, output_file_name):
-    with open(output_file_name, "w") as outfile:
-        yaml.dump(user_yaml, outfile, default_flow_style=False)
-    print(f"Written to: {output_file_name}")
-
-
-
-
-
-
-
- -
- - - diff --git a/docs/utils/index.html b/docs/utils/index.html deleted file mode 100644 index 1eb081d2..00000000 --- a/docs/utils/index.html +++ /dev/null @@ -1,83 +0,0 @@ - - - - - - -codeflare_sdk.utils API documentation - - - - - - - - - - - -
-
-
-

Module codeflare_sdk.utils

-
-
-
-
-

Sub-modules

-
-
codeflare_sdk.utils.generate_cert
-
-
-
-
codeflare_sdk.utils.generate_yaml
-
-

This sub-module exists primarily to be used internally by the Cluster object -(in the cluster sub-module) for AppWrapper generation.

-
-
codeflare_sdk.utils.kube_api_helpers
-
-

This sub-module exists primarily to be used internally for any Kubernetes -API error handling or wrapping.

-
-
codeflare_sdk.utils.pretty_print
-
-

This sub-module exists primarily to be used internally by the Cluster object -(in the cluster sub-module) for pretty-printing cluster status and details.

-
-
-
-
-
-
-
-
-
-
- -
- - - diff --git a/docs/utils/kube_api_helpers.html b/docs/utils/kube_api_helpers.html deleted file mode 100644 index 4c2ecb78..00000000 --- a/docs/utils/kube_api_helpers.html +++ /dev/null @@ -1,105 +0,0 @@ - - - - - - -codeflare_sdk.utils.kube_api_helpers API documentation - - - - - - - - - - - -
-
-
-

Module codeflare_sdk.utils.kube_api_helpers

-
-
-

This sub-module exists primarily to be used internally for any Kubernetes -API error handling or wrapping.

-
- -Expand source code - -
# Copyright 2022 IBM, Red Hat
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-This sub-module exists primarily to be used internally for any Kubernetes
-API error handling or wrapping.
-"""
-
-import executing
-from kubernetes import client, config
-
-
-# private methods
-def _kube_api_error_handling(e: Exception):  # pragma: no cover
-    perm_msg = (
-        "Action not permitted, have you put in correct/up-to-date auth credentials?"
-    )
-    nf_msg = "No instances found, nothing to be done."
-    exists_msg = "Resource with this name already exists."
-    if type(e) == config.ConfigException:
-        raise PermissionError(perm_msg)
-    if type(e) == executing.executing.NotOneValueFound:
-        print(nf_msg)
-        return
-    if type(e) == client.ApiException:
-        if e.reason == "Not Found":
-            print(nf_msg)
-            return
-        elif e.reason == "Unauthorized" or e.reason == "Forbidden":
-            raise PermissionError(perm_msg)
-        elif e.reason == "Conflict":
-            raise FileExistsError(exists_msg)
-    raise e
-
-
-
-
-
-
-
-
-
-
-
- -
- - - diff --git a/docs/utils/pretty_print.html b/docs/utils/pretty_print.html deleted file mode 100644 index 5ff38db1..00000000 --- a/docs/utils/pretty_print.html +++ /dev/null @@ -1,449 +0,0 @@ - - - - - - -codeflare_sdk.utils.pretty_print API documentation - - - - - - - - - - - -
-
-
-

Module codeflare_sdk.utils.pretty_print

-
-
-

This sub-module exists primarily to be used internally by the Cluster object -(in the cluster sub-module) for pretty-printing cluster status and details.

-
- -Expand source code - -
# Copyright 2022 IBM, Red Hat
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-This sub-module exists primarily to be used internally by the Cluster object
-(in the cluster sub-module) for pretty-printing cluster status and details.
-"""
-
-from rich import print
-from rich.table import Table
-from rich.console import Console
-from rich.layout import Layout
-from rich.panel import Panel
-from rich import box
-from typing import List
-from ..cluster.model import RayCluster, AppWrapper, RayClusterStatus
-
-
-def print_no_resources_found():
-    console = Console()
-    console.print(Panel("[red]No resources found, have you run cluster.up() yet?"))
-
-
-def print_app_wrappers_status(app_wrappers: List[AppWrapper], starting: bool = False):
-    if not app_wrappers:
-        print_no_resources_found()
-        return  # shortcircuit
-
-    console = Console()
-    table = Table(
-        box=box.ASCII_DOUBLE_HEAD,
-        title="[bold] :rocket: Cluster Queue Status :rocket:",
-    )
-    table.add_column("Name", style="cyan", no_wrap=True)
-    table.add_column("Status", style="magenta")
-
-    for app_wrapper in app_wrappers:
-        name = app_wrapper.name
-        status = app_wrapper.status.value
-        if starting:
-            status += " (starting)"
-        table.add_row(name, status)
-        table.add_row("")  # empty row for spacing
-
-    console.print(Panel.fit(table))
-
-
-def print_cluster_status(cluster: RayCluster):
-    "Pretty prints the status of a passed-in cluster"
-    if not cluster:
-        print_no_resources_found()
-        return
-
-    console = Console()
-    status = (
-        "Active :white_heavy_check_mark:"
-        if cluster.status == RayClusterStatus.READY
-        else "Inactive :x:"
-    )
-    name = cluster.name
-    dashboard = cluster.dashboard
-    # owned = bool(cluster["userOwned"])
-    owned = True
-
-    #'table0' to display the cluster name, status, url, and dashboard link
-    table0 = Table(box=None, show_header=False)
-    if owned:
-        table0.add_row("[white on green][bold]Name")
-    else:
-        table0.add_row("")
-    table0.add_row("[bold underline]" + name, status)
-    table0.add_row()
-    # fixme harcded to default for now
-    table0.add_row(
-        f"[bold]URI:[/bold] ray://{cluster.name}-head-svc.{cluster.namespace}.svc:10001"
-    )  # format that is used to generate the name of the service
-    table0.add_row()
-    table0.add_row(f"[link={dashboard} blue underline]Dashboard:link:[/link]")
-    table0.add_row("")  # empty row for spacing
-
-    # table4 to display table0 and table3, one below the other
-    table4 = Table(box=None, show_header=False)
-    table4.add_row(table0)
-
-    # Encompass all details of the cluster in a single panel
-    table5 = Table(box=None, title="[bold] :rocket: CodeFlare Cluster Status :rocket:")
-    table5.add_row(Panel.fit(table4))
-    console.print(table5)
-
-
-def print_clusters(clusters: List[RayCluster]):
-    if not clusters:
-        print_no_resources_found()
-        return  # shortcircuit
-
-    console = Console()
-    title_printed = False
-
-    for cluster in clusters:
-        status = (
-            "Active :white_heavy_check_mark:"
-            if cluster.status == RayClusterStatus.READY
-            else "Inactive :x:"
-        )
-        name = cluster.name
-        dashboard = cluster.dashboard
-        workers = str(cluster.workers)
-        memory = str(cluster.worker_mem_min) + "~" + str(cluster.worker_mem_max)
-        cpu = str(cluster.worker_cpu)
-        gpu = str(cluster.worker_gpu)
-        # owned = bool(cluster["userOwned"])
-        owned = True
-
-        #'table0' to display the cluster name, status, url, and dashboard link
-        table0 = Table(box=None, show_header=False)
-        if owned:
-            table0.add_row("[white on green][bold]Name")
-        else:
-            table0.add_row("")
-        table0.add_row("[bold underline]" + name, status)
-        table0.add_row()
-        # fixme harcded to default for now
-        table0.add_row(
-            f"[bold]URI:[/bold] ray://{cluster.name}-head-svc.{cluster.namespace}.svc:10001"
-        )  # format that is used to generate the name of the service
-        table0.add_row()
-        table0.add_row(f"[link={dashboard} blue underline]Dashboard:link:[/link]")
-        table0.add_row("")  # empty row for spacing
-
-        #'table1' to display the worker counts
-        table1 = Table(box=None)
-        table1.add_row()
-        table1.add_column("# Workers", style="magenta")
-        table1.add_row()
-        table1.add_row(workers)
-        table1.add_row()
-
-        #'table2' to display the worker resources
-        table2 = Table(box=None)
-        table2.add_column("Memory", style="cyan", no_wrap=True, min_width=10)
-        table2.add_column("CPU", style="magenta", min_width=10)
-        table2.add_column("GPU", style="magenta", min_width=10)
-        table2.add_row()
-        table2.add_row(memory, cpu, gpu)
-        table2.add_row()
-
-        # panels to encompass table1 and table2 into separate cards
-        panel_1 = Panel.fit(table1, title="Workers")
-        panel_2 = Panel.fit(table2, title="Worker specs(each)")
-
-        # table3 to display panel_1 and panel_2 side-by-side in a single row
-        table3 = Table(box=None, show_header=False, title="Cluster Resources")
-        table3.add_row(panel_1, panel_2)
-
-        # table4 to display table0 and table3, one below the other
-        table4 = Table(box=None, show_header=False)
-        table4.add_row(table0)
-        table4.add_row(table3)
-
-        # Encompass all details of the cluster in a single panel
-        if not title_printed:
-            # If first cluster in the list, then create a table with title "Codeflare clusters".
-            # This is done to ensure the title is center aligned on the cluster display tables, rather
-            # than being center aligned on the console/terminal if we simply use console.print(title)
-
-            table5 = Table(
-                box=None, title="[bold] :rocket: CodeFlare Cluster Details :rocket:"
-            )
-            table5.add_row(Panel.fit(table4))
-            console.print(table5)
-            title_printed = True
-        else:
-            console.print(Panel.fit(table4))
-
-
-
-
-
-
-
-

Functions

-
-
-def print_app_wrappers_status(app_wrappers: List[AppWrapper], starting: bool = False) -
-
-
-
- -Expand source code - -
def print_app_wrappers_status(app_wrappers: List[AppWrapper], starting: bool = False):
-    if not app_wrappers:
-        print_no_resources_found()
-        return  # shortcircuit
-
-    console = Console()
-    table = Table(
-        box=box.ASCII_DOUBLE_HEAD,
-        title="[bold] :rocket: Cluster Queue Status :rocket:",
-    )
-    table.add_column("Name", style="cyan", no_wrap=True)
-    table.add_column("Status", style="magenta")
-
-    for app_wrapper in app_wrappers:
-        name = app_wrapper.name
-        status = app_wrapper.status.value
-        if starting:
-            status += " (starting)"
-        table.add_row(name, status)
-        table.add_row("")  # empty row for spacing
-
-    console.print(Panel.fit(table))
-
-
-
-def print_cluster_status(cluster: RayCluster) -
-
-

Pretty prints the status of a passed-in cluster

-
- -Expand source code - -
def print_cluster_status(cluster: RayCluster):
-    "Pretty prints the status of a passed-in cluster"
-    if not cluster:
-        print_no_resources_found()
-        return
-
-    console = Console()
-    status = (
-        "Active :white_heavy_check_mark:"
-        if cluster.status == RayClusterStatus.READY
-        else "Inactive :x:"
-    )
-    name = cluster.name
-    dashboard = cluster.dashboard
-    # owned = bool(cluster["userOwned"])
-    owned = True
-
-    #'table0' to display the cluster name, status, url, and dashboard link
-    table0 = Table(box=None, show_header=False)
-    if owned:
-        table0.add_row("[white on green][bold]Name")
-    else:
-        table0.add_row("")
-    table0.add_row("[bold underline]" + name, status)
-    table0.add_row()
-    # fixme harcded to default for now
-    table0.add_row(
-        f"[bold]URI:[/bold] ray://{cluster.name}-head-svc.{cluster.namespace}.svc:10001"
-    )  # format that is used to generate the name of the service
-    table0.add_row()
-    table0.add_row(f"[link={dashboard} blue underline]Dashboard:link:[/link]")
-    table0.add_row("")  # empty row for spacing
-
-    # table4 to display table0 and table3, one below the other
-    table4 = Table(box=None, show_header=False)
-    table4.add_row(table0)
-
-    # Encompass all details of the cluster in a single panel
-    table5 = Table(box=None, title="[bold] :rocket: CodeFlare Cluster Status :rocket:")
-    table5.add_row(Panel.fit(table4))
-    console.print(table5)
-
-
-
-def print_clusters(clusters: List[RayCluster]) -
-
-
-
- -Expand source code - -
def print_clusters(clusters: List[RayCluster]):
-    if not clusters:
-        print_no_resources_found()
-        return  # shortcircuit
-
-    console = Console()
-    title_printed = False
-
-    for cluster in clusters:
-        status = (
-            "Active :white_heavy_check_mark:"
-            if cluster.status == RayClusterStatus.READY
-            else "Inactive :x:"
-        )
-        name = cluster.name
-        dashboard = cluster.dashboard
-        workers = str(cluster.workers)
-        memory = str(cluster.worker_mem_min) + "~" + str(cluster.worker_mem_max)
-        cpu = str(cluster.worker_cpu)
-        gpu = str(cluster.worker_gpu)
-        # owned = bool(cluster["userOwned"])
-        owned = True
-
-        #'table0' to display the cluster name, status, url, and dashboard link
-        table0 = Table(box=None, show_header=False)
-        if owned:
-            table0.add_row("[white on green][bold]Name")
-        else:
-            table0.add_row("")
-        table0.add_row("[bold underline]" + name, status)
-        table0.add_row()
-        # fixme harcded to default for now
-        table0.add_row(
-            f"[bold]URI:[/bold] ray://{cluster.name}-head-svc.{cluster.namespace}.svc:10001"
-        )  # format that is used to generate the name of the service
-        table0.add_row()
-        table0.add_row(f"[link={dashboard} blue underline]Dashboard:link:[/link]")
-        table0.add_row("")  # empty row for spacing
-
-        #'table1' to display the worker counts
-        table1 = Table(box=None)
-        table1.add_row()
-        table1.add_column("# Workers", style="magenta")
-        table1.add_row()
-        table1.add_row(workers)
-        table1.add_row()
-
-        #'table2' to display the worker resources
-        table2 = Table(box=None)
-        table2.add_column("Memory", style="cyan", no_wrap=True, min_width=10)
-        table2.add_column("CPU", style="magenta", min_width=10)
-        table2.add_column("GPU", style="magenta", min_width=10)
-        table2.add_row()
-        table2.add_row(memory, cpu, gpu)
-        table2.add_row()
-
-        # panels to encompass table1 and table2 into separate cards
-        panel_1 = Panel.fit(table1, title="Workers")
-        panel_2 = Panel.fit(table2, title="Worker specs(each)")
-
-        # table3 to display panel_1 and panel_2 side-by-side in a single row
-        table3 = Table(box=None, show_header=False, title="Cluster Resources")
-        table3.add_row(panel_1, panel_2)
-
-        # table4 to display table0 and table3, one below the other
-        table4 = Table(box=None, show_header=False)
-        table4.add_row(table0)
-        table4.add_row(table3)
-
-        # Encompass all details of the cluster in a single panel
-        if not title_printed:
-            # If first cluster in the list, then create a table with title "Codeflare clusters".
-            # This is done to ensure the title is center aligned on the cluster display tables, rather
-            # than being center aligned on the console/terminal if we simply use console.print(title)
-
-            table5 = Table(
-                box=None, title="[bold] :rocket: CodeFlare Cluster Details :rocket:"
-            )
-            table5.add_row(Panel.fit(table4))
-            console.print(table5)
-            title_printed = True
-        else:
-            console.print(Panel.fit(table4))
-
-
-
-def print_no_resources_found() -
-
-
-
- -Expand source code - -
def print_no_resources_found():
-    console = Console()
-    console.print(Panel("[red]No resources found, have you run cluster.up() yet?"))
-
-
-
-
-
-
-
- -
- - - diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..49e45352 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,4753 @@ +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +description = "Happy Eyeballs for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8"}, + {file = "aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558"}, +] + +[[package]] +name = "aiohttp" +version = "3.12.12" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "aiohttp-3.12.12-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6f25e9d274d6abbb15254f76f100c3984d6b9ad6e66263cc60a465dd5c7e48f5"}, + {file = "aiohttp-3.12.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b8ec3c1a1c13d24941b5b913607e57b9364e4c0ea69d5363181467492c4b2ba6"}, + {file = "aiohttp-3.12.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81ef2f9253c327c211cb7b06ea2edd90e637cf21c347b894d540466b8d304e08"}, + {file = "aiohttp-3.12.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28ded835c3663fd41c9ad44685811b11e34e6ac9a7516a30bfce13f6abba4496"}, + {file = "aiohttp-3.12.12-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a4b78ccf254fc10605b263996949a94ca3f50e4f9100e05137d6583e266b711e"}, + {file = "aiohttp-3.12.12-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4f4a5af90d5232c41bb857568fe7d11ed84408653ec9da1ff999cc30258b9bd1"}, + {file = "aiohttp-3.12.12-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffa5205c2f53f1120e93fdf2eca41b0f6344db131bc421246ee82c1e1038a14a"}, + {file = "aiohttp-3.12.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f68301660f0d7a3eddfb84f959f78a8f9db98c76a49b5235508fa16edaad0f7c"}, + {file = "aiohttp-3.12.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db874d3b0c92fdbb553751af9d2733b378c25cc83cd9dfba87f12fafd2dc9cd5"}, + {file = "aiohttp-3.12.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5e53cf9c201b45838a2d07b1f2d5f7fec9666db7979240002ce64f9b8a1e0cf2"}, + {file = "aiohttp-3.12.12-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:8687cc5f32b4e328c233acd387d09a1b477007896b2f03c1c823a0fd05f63883"}, + {file = "aiohttp-3.12.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5ee537ad29de716a3d8dc46c609908de0c25ffeebf93cd94a03d64cdc07d66d0"}, + {file = "aiohttp-3.12.12-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:411f821be5af6af11dc5bed6c6c1dc6b6b25b91737d968ec2756f9baa75e5f9b"}, + {file = "aiohttp-3.12.12-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f90319d94cf5f9786773237f24bd235a7b5959089f1af8ec1154580a3434b503"}, + {file = "aiohttp-3.12.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:73b148e606f34e9d513c451fd65efe1091772659ca5703338a396a99f60108ff"}, + {file = "aiohttp-3.12.12-cp310-cp310-win32.whl", hash = "sha256:d40e7bfd577fdc8a92b72f35dfbdd3ec90f1bc8a72a42037fefe34d4eca2d4a1"}, + {file = "aiohttp-3.12.12-cp310-cp310-win_amd64.whl", hash = "sha256:65c7804a2343893d6dea9fce69811aea0a9ac47f68312cf2e3ee1668cd9a387f"}, + {file = "aiohttp-3.12.12-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:38823fe0d8bc059b3eaedb263fe427d887c7032e72b4ef92c472953285f0e658"}, + {file = "aiohttp-3.12.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:10237f2c34711215d04ed21da63852ce023608299554080a45c576215d9df81c"}, + {file = "aiohttp-3.12.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:563ec477c0dc6d56fc7f943a3475b5acdb399c7686c30f5a98ada24bb7562c7a"}, + {file = "aiohttp-3.12.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3d05c46a61aca7c47df74afff818bc06a251ab95d95ff80b53665edfe1e0bdf"}, + {file = "aiohttp-3.12.12-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:277c882916759b4a6b6dc7e2ceb124aad071b3c6456487808d9ab13e1b448d57"}, + {file = "aiohttp-3.12.12-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:216abf74b324b0f4e67041dd4fb2819613909a825904f8a51701fbcd40c09cd7"}, + {file = "aiohttp-3.12.12-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65d6cefad286459b68e7f867b9586a821fb7f121057b88f02f536ef570992329"}, + {file = "aiohttp-3.12.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:feaaaff61966b5f4b4eae0b79fc79427f49484e4cfa5ab7d138ecd933ab540a8"}, + {file = "aiohttp-3.12.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a05917780b7cad1755784b16cfaad806bc16029a93d15f063ca60185b7d9ba05"}, + {file = "aiohttp-3.12.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:082c5ec6d262c1b2ee01c63f4fb9152c17f11692bf16f0f100ad94a7a287d456"}, + {file = "aiohttp-3.12.12-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:b265a3a8b379b38696ac78bdef943bdc4f4a5d6bed1a3fb5c75c6bab1ecea422"}, + {file = "aiohttp-3.12.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:2e0f2e208914ecbc4b2a3b7b4daa759d0c587d9a0b451bb0835ac47fae7fa735"}, + {file = "aiohttp-3.12.12-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:9923b025845b72f64d167bca221113377c8ffabd0a351dc18fb839d401ee8e22"}, + {file = "aiohttp-3.12.12-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:1ebb213445900527831fecc70e185bf142fdfe5f2a691075f22d63c65ee3c35a"}, + {file = "aiohttp-3.12.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6fc369fb273a8328077d37798b77c1e65676709af5c182cb74bd169ca9defe81"}, + {file = "aiohttp-3.12.12-cp311-cp311-win32.whl", hash = "sha256:58ecd10fda6a44c311cd3742cfd2aea8c4c600338e9f27cb37434d9f5ca9ddaa"}, + {file = "aiohttp-3.12.12-cp311-cp311-win_amd64.whl", hash = "sha256:b0066e88f30be00badffb5ef8f2281532b9a9020863d873ae15f7c147770b6ec"}, + {file = "aiohttp-3.12.12-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:98451ce9ce229d092f278a74a7c2a06b3aa72984673c87796126d7ccade893e9"}, + {file = "aiohttp-3.12.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:adbac7286d89245e1aff42e948503fdc6edf6d5d65c8e305a67c40f6a8fb95f4"}, + {file = "aiohttp-3.12.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0728882115bfa85cbd8d0f664c8ccc0cfd5bd3789dd837596785450ae52fac31"}, + {file = "aiohttp-3.12.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf3b9d9e767f9d0e09fb1a31516410fc741a62cc08754578c40abc497d09540"}, + {file = "aiohttp-3.12.12-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c944860e86b9f77a462321a440ccf6fa10f5719bb9d026f6b0b11307b1c96c7b"}, + {file = "aiohttp-3.12.12-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b1979e1f0c98c06fd0cd940988833b102fa3aa56751f6c40ffe85cabc51f6fd"}, + {file = "aiohttp-3.12.12-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:120b7dd084e96cfdad85acea2ce1e7708c70a26db913eabb8d7b417c728f5d84"}, + {file = "aiohttp-3.12.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e58f5ae79649ffa247081c2e8c85e31d29623cf2a3137dda985ae05c9478aae"}, + {file = "aiohttp-3.12.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aa5f049e3e2745b0141f13e5a64e7c48b1a1427ed18bbb7957b348f282fee56"}, + {file = "aiohttp-3.12.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7163cc9cf3722d90f1822f8a38b211e3ae2fc651c63bb55449f03dc1b3ff1d44"}, + {file = "aiohttp-3.12.12-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ef97c4d035b721de6607f3980fa3e4ef0ec3aca76474b5789b7fac286a8c4e23"}, + {file = "aiohttp-3.12.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:1c14448d6a86acadc3f7b2f4cc385d1fb390acb6f37dce27f86fe629410d92e3"}, + {file = "aiohttp-3.12.12-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a1b6df6255cfc493454c79221183d64007dd5080bcda100db29b7ff181b8832c"}, + {file = "aiohttp-3.12.12-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:60fc7338dfb0626c2927bfbac4785de3ea2e2bbe3d328ba5f3ece123edda4977"}, + {file = "aiohttp-3.12.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d2afc72207ef4c9d4ca9fcd00689a6a37ef2d625600c3d757b5c2b80c9d0cf9a"}, + {file = "aiohttp-3.12.12-cp312-cp312-win32.whl", hash = "sha256:8098a48f93b2cbcdb5778e7c9a0e0375363e40ad692348e6e65c3b70d593b27c"}, + {file = "aiohttp-3.12.12-cp312-cp312-win_amd64.whl", hash = "sha256:d1c1879b2e0fc337d7a1b63fe950553c2b9e93c071cf95928aeea1902d441403"}, + {file = "aiohttp-3.12.12-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ea5d604318234427929d486954e3199aded65f41593ac57aa0241ab93dda3d15"}, + {file = "aiohttp-3.12.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e03ff38250b8b572dce6fcd7b6fb6ee398bb8a59e6aa199009c5322d721df4fc"}, + {file = "aiohttp-3.12.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:71125b1fc2b6a94bccc63bbece620906a4dead336d2051f8af9cbf04480bc5af"}, + {file = "aiohttp-3.12.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:784a66f9f853a22c6b8c2bd0ff157f9b879700f468d6d72cfa99167df08c5c9c"}, + {file = "aiohttp-3.12.12-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a5be0b58670b54301404bd1840e4902570a1c3be00358e2700919cb1ea73c438"}, + {file = "aiohttp-3.12.12-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8f13566fc7bf5a728275b434bc3bdea87a7ed3ad5f734102b02ca59d9b510f"}, + {file = "aiohttp-3.12.12-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d736e57d1901683bc9be648aa308cb73e646252c74b4c639c35dcd401ed385ea"}, + {file = "aiohttp-3.12.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2007eaa7aae9102f211c519d1ec196bd3cecb1944a095db19eeaf132b798738"}, + {file = "aiohttp-3.12.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a813e61583cab6d5cdbaa34bc28863acdb92f9f46e11de1b3b9251a1e8238f6"}, + {file = "aiohttp-3.12.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e408293aa910b0aea48b86a28eace41d497a85ba16c20f619f0c604597ef996c"}, + {file = "aiohttp-3.12.12-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:f3d31faf290f5a30acba46b388465b67c6dbe8655d183e9efe2f6a1d594e6d9d"}, + {file = "aiohttp-3.12.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0b84731697325b023902aa643bd1726d999f5bc7854bc28b17ff410a81151d4b"}, + {file = "aiohttp-3.12.12-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a324c6852b6e327811748446e56cc9bb6eaa58710557922183175816e82a4234"}, + {file = "aiohttp-3.12.12-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:22fd867fbd72612dcf670c90486dbcbaf702cb807fb0b42bc0b7a142a573574a"}, + {file = "aiohttp-3.12.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3e092f1a970223794a4bf620a26c0e4e4e8e36bccae9b0b5da35e6d8ee598a03"}, + {file = "aiohttp-3.12.12-cp313-cp313-win32.whl", hash = "sha256:7f5f5eb8717ef8ba15ab35fcde5a70ad28bbdc34157595d1cddd888a985f5aae"}, + {file = "aiohttp-3.12.12-cp313-cp313-win_amd64.whl", hash = "sha256:ace2499bdd03c329c054dc4b47361f2b19d5aa470f7db5c7e0e989336761b33c"}, + {file = "aiohttp-3.12.12-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0d0b1c27c05a7d39a50e946ec5f94c3af4ffadd33fa5f20705df42fb0a72ca14"}, + {file = "aiohttp-3.12.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e5928847e6f7b7434921fbabf73fa5609d1f2bf4c25d9d4522b1fcc3b51995cb"}, + {file = "aiohttp-3.12.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7678147c3c85a7ae61559b06411346272ed40a08f54bc05357079a63127c9718"}, + {file = "aiohttp-3.12.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f50057f36f2a1d8e750b273bb966bec9f69ee1e0a20725ae081610501f25d555"}, + {file = "aiohttp-3.12.12-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5e834f0f11ff5805d11f0f22b627c75eadfaf91377b457875e4e3affd0b924f"}, + {file = "aiohttp-3.12.12-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f94b2e2dea19d09745ef02ed483192260750f18731876a5c76f1c254b841443a"}, + {file = "aiohttp-3.12.12-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b434bfb49564dc1c318989a0ab1d3000d23e5cfd00d8295dc9d5a44324cdd42d"}, + {file = "aiohttp-3.12.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ed76bc80177ddb7c5c93e1a6440b115ed2c92a3063420ac55206fd0832a6459"}, + {file = "aiohttp-3.12.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1282a9acd378f2aed8dc79c01e702b1d5fd260ad083926a88ec7e987c4e0ade"}, + {file = "aiohttp-3.12.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:09a213c13fba321586edab1528b530799645b82bd64d79b779eb8d47ceea155a"}, + {file = "aiohttp-3.12.12-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:72eae16a9233561d315e72ae78ed9fc65ab3db0196e56cb2d329c755d694f137"}, + {file = "aiohttp-3.12.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f25990c507dbbeefd5a6a17df32a4ace634f7b20a38211d1b9609410c7f67a24"}, + {file = "aiohttp-3.12.12-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:3a2aa255417c8ccf1b39359cd0a3d63ae3b5ced83958dbebc4d9113327c0536a"}, + {file = "aiohttp-3.12.12-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:a4c53b89b3f838e9c25f943d1257efff10b348cb56895f408ddbcb0ec953a2ad"}, + {file = "aiohttp-3.12.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b5a49c2dcb32114455ad503e8354624d85ab311cbe032da03965882492a9cb98"}, + {file = "aiohttp-3.12.12-cp39-cp39-win32.whl", hash = "sha256:74fddc0ba8cea6b9c5bd732eb9d97853543586596b86391f8de5d4f6c2a0e068"}, + {file = "aiohttp-3.12.12-cp39-cp39-win_amd64.whl", hash = "sha256:ddf40ba4a1d0b4d232dc47d2b98ae7e937dcbc40bb5f2746bce0af490a64526f"}, + {file = "aiohttp-3.12.12.tar.gz", hash = "sha256:05875595d2483d96cb61fa9f64e75262d7ac6251a7e3c811d8e26f7d721760bd"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.5.0" +aiosignal = ">=1.1.2" +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +propcache = ">=0.2.0" +yarl = ">=1.17.0,<2.0" + +[package.extras] +speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.3.0)", "brotlicffi ; platform_python_implementation != \"CPython\""] + +[[package]] +name = "aiohttp-cors" +version = "0.8.1" +description = "CORS support for aiohttp" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "aiohttp_cors-0.8.1-py3-none-any.whl", hash = "sha256:3180cf304c5c712d626b9162b195b1db7ddf976a2a25172b35bb2448b890a80d"}, + {file = "aiohttp_cors-0.8.1.tar.gz", hash = "sha256:ccacf9cb84b64939ea15f859a146af1f662a6b1d68175754a07315e305fb1403"}, +] + +[package.dependencies] +aiohttp = ">=3.9" + +[[package]] +name = "aiosignal" +version = "1.3.2" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, + {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "alabaster" +version = "0.7.16" +description = "A light, configurable Sphinx theme" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"}, + {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"}, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.9.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"}, + {file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"}, +] + +[package.dependencies] +idna = ">=2.8" +sniffio = ">=1.1" +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} + +[package.extras] +doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] +test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "appnope" +version = "0.1.4" +description = "Disable App Nap on macOS >= 10.9" +optional = false +python-versions = ">=3.6" +groups = ["test"] +markers = "platform_system == \"Darwin\"" +files = [ + {file = "appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c"}, + {file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"}, +] + +[[package]] +name = "argon2-cffi" +version = "25.1.0" +description = "Argon2 for Python" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "argon2_cffi-25.1.0-py3-none-any.whl", hash = "sha256:fdc8b074db390fccb6eb4a3604ae7231f219aa669a2652e0f20e16ba513d5741"}, + {file = "argon2_cffi-25.1.0.tar.gz", hash = "sha256:694ae5cc8a42f4c4e2bf2ca0e64e51e23a040c6a517a85074683d3959e1346c1"}, +] + +[package.dependencies] +argon2-cffi-bindings = "*" + +[[package]] +name = "argon2-cffi-bindings" +version = "21.2.0" +description = "Low-level CFFI bindings for Argon2" +optional = false +python-versions = ">=3.6" +groups = ["test"] +files = [ + {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"}, + {file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"}, +] + +[package.dependencies] +cffi = ">=1.0.1" + +[package.extras] +dev = ["cogapp", "pre-commit", "pytest", "wheel"] +tests = ["pytest"] + +[[package]] +name = "arrow" +version = "1.3.0" +description = "Better dates & times for Python" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80"}, + {file = "arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85"}, +] + +[package.dependencies] +python-dateutil = ">=2.7.0" +types-python-dateutil = ">=2.8.10" + +[package.extras] +doc = ["doc8", "sphinx (>=7.0.0)", "sphinx-autobuild", "sphinx-autodoc-typehints", "sphinx_rtd_theme (>=1.3.0)"] +test = ["dateparser (==1.*)", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "pytz (==2021.1)", "simplejson (==3.*)"] + +[[package]] +name = "asttokens" +version = "3.0.0" +description = "Annotate AST trees with source code positions" +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2"}, + {file = "asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7"}, +] + +[package.extras] +astroid = ["astroid (>=2,<4)"] +test = ["astroid (>=2,<4)", "pytest", "pytest-cov", "pytest-xdist"] + +[[package]] +name = "async-lru" +version = "2.0.5" +description = "Simple LRU cache for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "async_lru-2.0.5-py3-none-any.whl", hash = "sha256:ab95404d8d2605310d345932697371a5f40def0487c03d6d0ad9138de52c9943"}, + {file = "async_lru-2.0.5.tar.gz", hash = "sha256:481d52ccdd27275f42c43a928b4a50c3bfb2d67af4e78b170e3e0bb39c66e5bb"}, +] + +[[package]] +name = "attrs" +version = "25.3.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, + {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, +] + +[package.extras] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] + +[[package]] +name = "babel" +version = "2.17.0" +description = "Internationalization utilities" +optional = false +python-versions = ">=3.8" +groups = ["docs", "test"] +files = [ + {file = "babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2"}, + {file = "babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d"}, +] + +[package.extras] +dev = ["backports.zoneinfo ; python_version < \"3.9\"", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata ; sys_platform == \"win32\""] + +[[package]] +name = "bcrypt" +version = "4.3.0" +description = "Modern password hashing for your software and your servers" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "bcrypt-4.3.0-cp313-cp313t-macosx_10_12_universal2.whl", hash = "sha256:f01e060f14b6b57bbb72fc5b4a83ac21c443c9a2ee708e04a10e9192f90a6281"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5eeac541cefd0bb887a371ef73c62c3cd78535e4887b310626036a7c0a817bb"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59e1aa0e2cd871b08ca146ed08445038f42ff75968c7ae50d2fdd7860ade2180"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:0042b2e342e9ae3d2ed22727c1262f76cc4f345683b5c1715f0250cf4277294f"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74a8d21a09f5e025a9a23e7c0fd2c7fe8e7503e4d356c0a2c1486ba010619f09"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:0142b2cb84a009f8452c8c5a33ace5e3dfec4159e7735f5afe9a4d50a8ea722d"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_aarch64.whl", hash = "sha256:12fa6ce40cde3f0b899729dbd7d5e8811cb892d31b6f7d0334a1f37748b789fd"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_x86_64.whl", hash = "sha256:5bd3cca1f2aa5dbcf39e2aa13dd094ea181f48959e1071265de49cc2b82525af"}, + {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:335a420cfd63fc5bc27308e929bee231c15c85cc4c496610ffb17923abf7f231"}, + {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:0e30e5e67aed0187a1764911af023043b4542e70a7461ad20e837e94d23e1d6c"}, + {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b8d62290ebefd49ee0b3ce7500f5dbdcf13b81402c05f6dafab9a1e1b27212f"}, + {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2ef6630e0ec01376f59a006dc72918b1bf436c3b571b80fa1968d775fa02fe7d"}, + {file = "bcrypt-4.3.0-cp313-cp313t-win32.whl", hash = "sha256:7a4be4cbf241afee43f1c3969b9103a41b40bcb3a3f467ab19f891d9bc4642e4"}, + {file = "bcrypt-4.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c1949bf259a388863ced887c7861da1df681cb2388645766c89fdfd9004c669"}, + {file = "bcrypt-4.3.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:f81b0ed2639568bf14749112298f9e4e2b28853dab50a8b357e31798686a036d"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:864f8f19adbe13b7de11ba15d85d4a428c7e2f344bac110f667676a0ff84924b"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e36506d001e93bffe59754397572f21bb5dc7c83f54454c990c74a468cd589e"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:842d08d75d9fe9fb94b18b071090220697f9f184d4547179b60734846461ed59"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7c03296b85cb87db865d91da79bf63d5609284fc0cab9472fdd8367bbd830753"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:62f26585e8b219cdc909b6a0069efc5e4267e25d4a3770a364ac58024f62a761"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:beeefe437218a65322fbd0069eb437e7c98137e08f22c4660ac2dc795c31f8bb"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:97eea7408db3a5bcce4a55d13245ab3fa566e23b4c67cd227062bb49e26c585d"}, + {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:191354ebfe305e84f344c5964c7cd5f924a3bfc5d405c75ad07f232b6dffb49f"}, + {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:41261d64150858eeb5ff43c753c4b216991e0ae16614a308a15d909503617732"}, + {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:33752b1ba962ee793fa2b6321404bf20011fe45b9afd2a842139de3011898fef"}, + {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:50e6e80a4bfd23a25f5c05b90167c19030cf9f87930f7cb2eacb99f45d1c3304"}, + {file = "bcrypt-4.3.0-cp38-abi3-win32.whl", hash = "sha256:67a561c4d9fb9465ec866177e7aebcad08fe23aaf6fbd692a6fab69088abfc51"}, + {file = "bcrypt-4.3.0-cp38-abi3-win_amd64.whl", hash = "sha256:584027857bc2843772114717a7490a37f68da563b3620f78a849bcb54dc11e62"}, + {file = "bcrypt-4.3.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d3efb1157edebfd9128e4e46e2ac1a64e0c1fe46fb023158a407c7892b0f8c3"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08bacc884fd302b611226c01014eca277d48f0a05187666bca23aac0dad6fe24"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6746e6fec103fcd509b96bacdfdaa2fbde9a553245dbada284435173a6f1aef"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:afe327968aaf13fc143a56a3360cb27d4ad0345e34da12c7290f1b00b8fe9a8b"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d9af79d322e735b1fc33404b5765108ae0ff232d4b54666d46730f8ac1a43676"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f1e3ffa1365e8702dc48c8b360fef8d7afeca482809c5e45e653af82ccd088c1"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3004df1b323d10021fda07a813fd33e0fd57bef0e9a480bb143877f6cba996fe"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:531457e5c839d8caea9b589a1bcfe3756b0547d7814e9ce3d437f17da75c32b0"}, + {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:17a854d9a7a476a89dcef6c8bd119ad23e0f82557afbd2c442777a16408e614f"}, + {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6fb1fd3ab08c0cbc6826a2e0447610c6f09e983a281b919ed721ad32236b8b23"}, + {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e965a9c1e9a393b8005031ff52583cedc15b7884fce7deb8b0346388837d6cfe"}, + {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:79e70b8342a33b52b55d93b3a59223a844962bef479f6a0ea318ebbcadf71505"}, + {file = "bcrypt-4.3.0-cp39-abi3-win32.whl", hash = "sha256:b4d4e57f0a63fd0b358eb765063ff661328f69a04494427265950c71b992a39a"}, + {file = "bcrypt-4.3.0-cp39-abi3-win_amd64.whl", hash = "sha256:e53e074b120f2877a35cc6c736b8eb161377caae8925c17688bd46ba56daaa5b"}, + {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c950d682f0952bafcceaf709761da0a32a942272fad381081b51096ffa46cea1"}, + {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:107d53b5c67e0bbc3f03ebf5b030e0403d24dda980f8e244795335ba7b4a027d"}, + {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:b693dbb82b3c27a1604a3dff5bfc5418a7e6a781bb795288141e5f80cf3a3492"}, + {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:b6354d3760fcd31994a14c89659dee887f1351a06e5dac3c1142307172a79f90"}, + {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a839320bf27d474e52ef8cb16449bb2ce0ba03ca9f44daba6d93fa1d8828e48a"}, + {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:bdc6a24e754a555d7316fa4774e64c6c3997d27ed2d1964d55920c7c227bc4ce"}, + {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:55a935b8e9a1d2def0626c4269db3fcd26728cbff1e84f0341465c31c4ee56d8"}, + {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:57967b7a28d855313a963aaea51bf6df89f833db4320da458e5b3c5ab6d4c938"}, + {file = "bcrypt-4.3.0.tar.gz", hash = "sha256:3a3fd2204178b6d2adcf09cb4f6426ffef54762577a7c9b54c159008cb288c18"}, +] + +[package.extras] +tests = ["pytest (>=3.2.1,!=3.3.0)"] +typecheck = ["mypy"] + +[[package]] +name = "beautifulsoup4" +version = "4.13.4" +description = "Screen-scraping library" +optional = false +python-versions = ">=3.7.0" +groups = ["test"] +files = [ + {file = "beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b"}, + {file = "beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195"}, +] + +[package.dependencies] +soupsieve = ">1.2" +typing-extensions = ">=4.0.0" + +[package.extras] +cchardet = ["cchardet"] +chardet = ["chardet"] +charset-normalizer = ["charset-normalizer"] +html5lib = ["html5lib"] +lxml = ["lxml"] + +[[package]] +name = "bleach" +version = "6.2.0" +description = "An easy safelist-based HTML-sanitizing tool." +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"}, + {file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"}, +] + +[package.dependencies] +tinycss2 = {version = ">=1.1.0,<1.5", optional = true, markers = "extra == \"css\""} +webencodings = "*" + +[package.extras] +css = ["tinycss2 (>=1.1.0,<1.5)"] + +[[package]] +name = "cachetools" +version = "5.5.2" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a"}, + {file = "cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4"}, +] + +[[package]] +name = "certifi" +version = "2025.4.26" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +groups = ["main", "docs", "test"] +files = [ + {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"}, + {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, +] + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["main", "docs", "test"] +files = [ + {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"}, + {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"}, + {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"}, +] + +[[package]] +name = "click" +version = "8.2.1" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, + {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "docs", "test"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] +markers = {main = "platform_system == \"Windows\" or sys_platform == \"win32\"", docs = "sys_platform == \"win32\"", test = "sys_platform == \"win32\""} + +[[package]] +name = "colorful" +version = "0.5.6" +description = "Terminal string styling done right, in Python." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "colorful-0.5.6-py2.py3-none-any.whl", hash = "sha256:eab8c1c809f5025ad2b5238a50bd691e26850da8cac8f90d660ede6ea1af9f1e"}, + {file = "colorful-0.5.6.tar.gz", hash = "sha256:b56d5c01db1dac4898308ea889edcb113fbee3e6ec5df4bacffd61d5241b5b8d"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "comm" +version = "0.2.2" +description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3"}, + {file = "comm-0.2.2.tar.gz", hash = "sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e"}, +] + +[package.dependencies] +traitlets = ">=4" + +[package.extras] +test = ["pytest"] + +[[package]] +name = "coverage" +version = "7.6.4" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "coverage-7.6.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5f8ae553cba74085db385d489c7a792ad66f7f9ba2ee85bfa508aeb84cf0ba07"}, + {file = "coverage-7.6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8165b796df0bd42e10527a3f493c592ba494f16ef3c8b531288e3d0d72c1f6f0"}, + {file = "coverage-7.6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7c8b95bf47db6d19096a5e052ffca0a05f335bc63cef281a6e8fe864d450a72"}, + {file = "coverage-7.6.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ed9281d1b52628e81393f5eaee24a45cbd64965f41857559c2b7ff19385df51"}, + {file = "coverage-7.6.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0809082ee480bb8f7416507538243c8863ac74fd8a5d2485c46f0f7499f2b491"}, + {file = "coverage-7.6.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d541423cdd416b78626b55f123412fcf979d22a2c39fce251b350de38c15c15b"}, + {file = "coverage-7.6.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58809e238a8a12a625c70450b48e8767cff9eb67c62e6154a642b21ddf79baea"}, + {file = "coverage-7.6.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c9b8e184898ed014884ca84c70562b4a82cbc63b044d366fedc68bc2b2f3394a"}, + {file = "coverage-7.6.4-cp310-cp310-win32.whl", hash = "sha256:6bd818b7ea14bc6e1f06e241e8234508b21edf1b242d49831831a9450e2f35fa"}, + {file = "coverage-7.6.4-cp310-cp310-win_amd64.whl", hash = "sha256:06babbb8f4e74b063dbaeb74ad68dfce9186c595a15f11f5d5683f748fa1d172"}, + {file = "coverage-7.6.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:73d2b73584446e66ee633eaad1a56aad577c077f46c35ca3283cd687b7715b0b"}, + {file = "coverage-7.6.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:51b44306032045b383a7a8a2c13878de375117946d68dcb54308111f39775a25"}, + {file = "coverage-7.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3fb02fe73bed561fa12d279a417b432e5b50fe03e8d663d61b3d5990f29546"}, + {file = "coverage-7.6.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed8fe9189d2beb6edc14d3ad19800626e1d9f2d975e436f84e19efb7fa19469b"}, + {file = "coverage-7.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b369ead6527d025a0fe7bd3864e46dbee3aa8f652d48df6174f8d0bac9e26e0e"}, + {file = "coverage-7.6.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ade3ca1e5f0ff46b678b66201f7ff477e8fa11fb537f3b55c3f0568fbfe6e718"}, + {file = "coverage-7.6.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:27fb4a050aaf18772db513091c9c13f6cb94ed40eacdef8dad8411d92d9992db"}, + {file = "coverage-7.6.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4f704f0998911abf728a7783799444fcbbe8261c4a6c166f667937ae6a8aa522"}, + {file = "coverage-7.6.4-cp311-cp311-win32.whl", hash = "sha256:29155cd511ee058e260db648b6182c419422a0d2e9a4fa44501898cf918866cf"}, + {file = "coverage-7.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:8902dd6a30173d4ef09954bfcb24b5d7b5190cf14a43170e386979651e09ba19"}, + {file = "coverage-7.6.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:12394842a3a8affa3ba62b0d4ab7e9e210c5e366fbac3e8b2a68636fb19892c2"}, + {file = "coverage-7.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2b6b4c83d8e8ea79f27ab80778c19bc037759aea298da4b56621f4474ffeb117"}, + {file = "coverage-7.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d5b8007f81b88696d06f7df0cb9af0d3b835fe0c8dbf489bad70b45f0e45613"}, + {file = "coverage-7.6.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b57b768feb866f44eeed9f46975f3d6406380275c5ddfe22f531a2bf187eda27"}, + {file = "coverage-7.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5915fcdec0e54ee229926868e9b08586376cae1f5faa9bbaf8faf3561b393d52"}, + {file = "coverage-7.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0b58c672d14f16ed92a48db984612f5ce3836ae7d72cdd161001cc54512571f2"}, + {file = "coverage-7.6.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2fdef0d83a2d08d69b1f2210a93c416d54e14d9eb398f6ab2f0a209433db19e1"}, + {file = "coverage-7.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8cf717ee42012be8c0cb205dbbf18ffa9003c4cbf4ad078db47b95e10748eec5"}, + {file = "coverage-7.6.4-cp312-cp312-win32.whl", hash = "sha256:7bb92c539a624cf86296dd0c68cd5cc286c9eef2d0c3b8b192b604ce9de20a17"}, + {file = "coverage-7.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:1032e178b76a4e2b5b32e19d0fd0abbce4b58e77a1ca695820d10e491fa32b08"}, + {file = "coverage-7.6.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:023bf8ee3ec6d35af9c1c6ccc1d18fa69afa1cb29eaac57cb064dbb262a517f9"}, + {file = "coverage-7.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0ac3d42cb51c4b12df9c5f0dd2f13a4f24f01943627120ec4d293c9181219ba"}, + {file = "coverage-7.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8fe4984b431f8621ca53d9380901f62bfb54ff759a1348cd140490ada7b693c"}, + {file = "coverage-7.6.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5fbd612f8a091954a0c8dd4c0b571b973487277d26476f8480bfa4b2a65b5d06"}, + {file = "coverage-7.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dacbc52de979f2823a819571f2e3a350a7e36b8cb7484cdb1e289bceaf35305f"}, + {file = "coverage-7.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:dab4d16dfef34b185032580e2f2f89253d302facba093d5fa9dbe04f569c4f4b"}, + {file = "coverage-7.6.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:862264b12ebb65ad8d863d51f17758b1684560b66ab02770d4f0baf2ff75da21"}, + {file = "coverage-7.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5beb1ee382ad32afe424097de57134175fea3faf847b9af002cc7895be4e2a5a"}, + {file = "coverage-7.6.4-cp313-cp313-win32.whl", hash = "sha256:bf20494da9653f6410213424f5f8ad0ed885e01f7e8e59811f572bdb20b8972e"}, + {file = "coverage-7.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:182e6cd5c040cec0a1c8d415a87b67ed01193ed9ad458ee427741c7d8513d963"}, + {file = "coverage-7.6.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a181e99301a0ae128493a24cfe5cfb5b488c4e0bf2f8702091473d033494d04f"}, + {file = "coverage-7.6.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:df57bdbeffe694e7842092c5e2e0bc80fff7f43379d465f932ef36f027179806"}, + {file = "coverage-7.6.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bcd1069e710600e8e4cf27f65c90c7843fa8edfb4520fb0ccb88894cad08b11"}, + {file = "coverage-7.6.4-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99b41d18e6b2a48ba949418db48159d7a2e81c5cc290fc934b7d2380515bd0e3"}, + {file = "coverage-7.6.4-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1e54712ba3474f34b7ef7a41e65bd9037ad47916ccb1cc78769bae324c01a"}, + {file = "coverage-7.6.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:53d202fd109416ce011578f321460795abfe10bb901b883cafd9b3ef851bacfc"}, + {file = "coverage-7.6.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:c48167910a8f644671de9f2083a23630fbf7a1cb70ce939440cd3328e0919f70"}, + {file = "coverage-7.6.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cc8ff50b50ce532de2fa7a7daae9dd12f0a699bfcd47f20945364e5c31799fef"}, + {file = "coverage-7.6.4-cp313-cp313t-win32.whl", hash = "sha256:b8d3a03d9bfcaf5b0141d07a88456bb6a4c3ce55c080712fec8418ef3610230e"}, + {file = "coverage-7.6.4-cp313-cp313t-win_amd64.whl", hash = "sha256:f3ddf056d3ebcf6ce47bdaf56142af51bb7fad09e4af310241e9db7a3a8022e1"}, + {file = "coverage-7.6.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9cb7fa111d21a6b55cbf633039f7bc2749e74932e3aa7cb7333f675a58a58bf3"}, + {file = "coverage-7.6.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:11a223a14e91a4693d2d0755c7a043db43d96a7450b4f356d506c2562c48642c"}, + {file = "coverage-7.6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a413a096c4cbac202433c850ee43fa326d2e871b24554da8327b01632673a076"}, + {file = "coverage-7.6.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00a1d69c112ff5149cabe60d2e2ee948752c975d95f1e1096742e6077affd376"}, + {file = "coverage-7.6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f76846299ba5c54d12c91d776d9605ae33f8ae2b9d1d3c3703cf2db1a67f2c0"}, + {file = "coverage-7.6.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fe439416eb6380de434886b00c859304338f8b19f6f54811984f3420a2e03858"}, + {file = "coverage-7.6.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0294ca37f1ba500667b1aef631e48d875ced93ad5e06fa665a3295bdd1d95111"}, + {file = "coverage-7.6.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6f01ba56b1c0e9d149f9ac85a2f999724895229eb36bd997b61e62999e9b0901"}, + {file = "coverage-7.6.4-cp39-cp39-win32.whl", hash = "sha256:bc66f0bf1d7730a17430a50163bb264ba9ded56739112368ba985ddaa9c3bd09"}, + {file = "coverage-7.6.4-cp39-cp39-win_amd64.whl", hash = "sha256:c481b47f6b5845064c65a7bc78bc0860e635a9b055af0df46fdf1c58cebf8e8f"}, + {file = "coverage-7.6.4-pp39.pp310-none-any.whl", hash = "sha256:3c65d37f3a9ebb703e710befdc489a38683a5b152242664b973a7b7b22348a4e"}, + {file = "coverage-7.6.4.tar.gz", hash = "sha256:29fc0f17b1d3fea332f8001d4558f8214af7f1d87a345f3a133c901d60347c73"}, +] + +[package.extras] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] + +[[package]] +name = "cryptography" +version = "43.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, + {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, + {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, + {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, + {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, + {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, + {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "debugpy" +version = "1.8.14" +description = "An implementation of the Debug Adapter Protocol for Python" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "debugpy-1.8.14-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:93fee753097e85623cab1c0e6a68c76308cd9f13ffdf44127e6fab4fbf024339"}, + {file = "debugpy-1.8.14-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d937d93ae4fa51cdc94d3e865f535f185d5f9748efb41d0d49e33bf3365bd79"}, + {file = "debugpy-1.8.14-cp310-cp310-win32.whl", hash = "sha256:c442f20577b38cc7a9aafecffe1094f78f07fb8423c3dddb384e6b8f49fd2987"}, + {file = "debugpy-1.8.14-cp310-cp310-win_amd64.whl", hash = "sha256:f117dedda6d969c5c9483e23f573b38f4e39412845c7bc487b6f2648df30fe84"}, + {file = "debugpy-1.8.14-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:1b2ac8c13b2645e0b1eaf30e816404990fbdb168e193322be8f545e8c01644a9"}, + {file = "debugpy-1.8.14-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf431c343a99384ac7eab2f763980724834f933a271e90496944195318c619e2"}, + {file = "debugpy-1.8.14-cp311-cp311-win32.whl", hash = "sha256:c99295c76161ad8d507b413cd33422d7c542889fbb73035889420ac1fad354f2"}, + {file = "debugpy-1.8.14-cp311-cp311-win_amd64.whl", hash = "sha256:7816acea4a46d7e4e50ad8d09d963a680ecc814ae31cdef3622eb05ccacf7b01"}, + {file = "debugpy-1.8.14-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:8899c17920d089cfa23e6005ad9f22582fd86f144b23acb9feeda59e84405b84"}, + {file = "debugpy-1.8.14-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6bb5c0dcf80ad5dbc7b7d6eac484e2af34bdacdf81df09b6a3e62792b722826"}, + {file = "debugpy-1.8.14-cp312-cp312-win32.whl", hash = "sha256:281d44d248a0e1791ad0eafdbbd2912ff0de9eec48022a5bfbc332957487ed3f"}, + {file = "debugpy-1.8.14-cp312-cp312-win_amd64.whl", hash = "sha256:5aa56ef8538893e4502a7d79047fe39b1dae08d9ae257074c6464a7b290b806f"}, + {file = "debugpy-1.8.14-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:329a15d0660ee09fec6786acdb6e0443d595f64f5d096fc3e3ccf09a4259033f"}, + {file = "debugpy-1.8.14-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f920c7f9af409d90f5fd26e313e119d908b0dd2952c2393cd3247a462331f15"}, + {file = "debugpy-1.8.14-cp313-cp313-win32.whl", hash = "sha256:3784ec6e8600c66cbdd4ca2726c72d8ca781e94bce2f396cc606d458146f8f4e"}, + {file = "debugpy-1.8.14-cp313-cp313-win_amd64.whl", hash = "sha256:684eaf43c95a3ec39a96f1f5195a7ff3d4144e4a18d69bb66beeb1a6de605d6e"}, + {file = "debugpy-1.8.14-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:d5582bcbe42917bc6bbe5c12db1bffdf21f6bfc28d4554b738bf08d50dc0c8c3"}, + {file = "debugpy-1.8.14-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5349b7c3735b766a281873fbe32ca9cca343d4cc11ba4a743f84cb854339ff35"}, + {file = "debugpy-1.8.14-cp38-cp38-win32.whl", hash = "sha256:7118d462fe9724c887d355eef395fae68bc764fd862cdca94e70dcb9ade8a23d"}, + {file = "debugpy-1.8.14-cp38-cp38-win_amd64.whl", hash = "sha256:d235e4fa78af2de4e5609073972700523e372cf5601742449970110d565ca28c"}, + {file = "debugpy-1.8.14-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:413512d35ff52c2fb0fd2d65e69f373ffd24f0ecb1fac514c04a668599c5ce7f"}, + {file = "debugpy-1.8.14-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c9156f7524a0d70b7a7e22b2e311d8ba76a15496fb00730e46dcdeedb9e1eea"}, + {file = "debugpy-1.8.14-cp39-cp39-win32.whl", hash = "sha256:b44985f97cc3dd9d52c42eb59ee9d7ee0c4e7ecd62bca704891f997de4cef23d"}, + {file = "debugpy-1.8.14-cp39-cp39-win_amd64.whl", hash = "sha256:b1528cfee6c1b1c698eb10b6b096c598738a8238822d218173d21c3086de8123"}, + {file = "debugpy-1.8.14-py2.py3-none-any.whl", hash = "sha256:5cd9a579d553b6cb9759a7908a41988ee6280b961f24f63336835d9418216a20"}, + {file = "debugpy-1.8.14.tar.gz", hash = "sha256:7cd287184318416850aa8b60ac90105837bb1e59531898c07569d197d2ed5322"}, +] + +[[package]] +name = "decorator" +version = "5.2.1" +description = "Decorators for Humans" +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a"}, + {file = "decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360"}, +] + +[[package]] +name = "defusedxml" +version = "0.7.1" +description = "XML bomb protection for Python stdlib modules" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["test"] +files = [ + {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, + {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, +] + +[[package]] +name = "distlib" +version = "0.3.9" +description = "Distribution utilities" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, +] + +[[package]] +name = "docutils" +version = "0.21.2" +description = "Docutils -- Python Documentation Utilities" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2"}, + {file = "docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f"}, +] + +[[package]] +name = "durationpy" +version = "0.10" +description = "Module for converting between datetime.timedelta and Go's Duration strings." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "durationpy-0.10-py3-none-any.whl", hash = "sha256:3b41e1b601234296b4fb368338fdcd3e13e0b4fb5b67345948f4f2bf9868b286"}, + {file = "durationpy-0.10.tar.gz", hash = "sha256:1fa6893409a6e739c9c72334fc65cca1f355dbdd93405d30f726deb5bde42fba"}, +] + +[[package]] +name = "executing" +version = "1.2.0" +description = "Get the currently executing AST node of a frame, and other information" +optional = false +python-versions = "*" +groups = ["main", "test"] +files = [ + {file = "executing-1.2.0-py2.py3-none-any.whl", hash = "sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc"}, + {file = "executing-1.2.0.tar.gz", hash = "sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107"}, +] + +[package.extras] +tests = ["asttokens", "littleutils", "pytest", "rich ; python_version >= \"3.11\""] + +[[package]] +name = "fastjsonschema" +version = "2.21.1" +description = "Fastest Python implementation of JSON schema" +optional = false +python-versions = "*" +groups = ["test"] +files = [ + {file = "fastjsonschema-2.21.1-py3-none-any.whl", hash = "sha256:c9e5b7e908310918cf494a434eeb31384dd84a98b57a30bcb1f535015b554667"}, + {file = "fastjsonschema-2.21.1.tar.gz", hash = "sha256:794d4f0a58f848961ba16af7b9c85a3e88cd360df008c59aac6fc5ae9323b5d4"}, +] + +[package.extras] +devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] + +[[package]] +name = "filelock" +version = "3.18.0" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"}, + {file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] + +[[package]] +name = "fqdn" +version = "1.5.1" +description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" +optional = false +python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" +groups = ["test"] +files = [ + {file = "fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014"}, + {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"}, +] + +[[package]] +name = "frozenlist" +version = "1.7.0" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "frozenlist-1.7.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cc4df77d638aa2ed703b878dd093725b72a824c3c546c076e8fdf276f78ee84a"}, + {file = "frozenlist-1.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:716a9973a2cc963160394f701964fe25012600f3d311f60c790400b00e568b61"}, + {file = "frozenlist-1.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0fd1bad056a3600047fb9462cff4c5322cebc59ebf5d0a3725e0ee78955001d"}, + {file = "frozenlist-1.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3789ebc19cb811163e70fe2bd354cea097254ce6e707ae42e56f45e31e96cb8e"}, + {file = "frozenlist-1.7.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af369aa35ee34f132fcfad5be45fbfcde0e3a5f6a1ec0712857f286b7d20cca9"}, + {file = "frozenlist-1.7.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac64b6478722eeb7a3313d494f8342ef3478dff539d17002f849101b212ef97c"}, + {file = "frozenlist-1.7.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f89f65d85774f1797239693cef07ad4c97fdd0639544bad9ac4b869782eb1981"}, + {file = "frozenlist-1.7.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1073557c941395fdfcfac13eb2456cb8aad89f9de27bae29fabca8e563b12615"}, + {file = "frozenlist-1.7.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed8d2fa095aae4bdc7fdd80351009a48d286635edffee66bf865e37a9125c50"}, + {file = "frozenlist-1.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:24c34bea555fe42d9f928ba0a740c553088500377448febecaa82cc3e88aa1fa"}, + {file = "frozenlist-1.7.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:69cac419ac6a6baad202c85aaf467b65ac860ac2e7f2ac1686dc40dbb52f6577"}, + {file = "frozenlist-1.7.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:960d67d0611f4c87da7e2ae2eacf7ea81a5be967861e0c63cf205215afbfac59"}, + {file = "frozenlist-1.7.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:41be2964bd4b15bf575e5daee5a5ce7ed3115320fb3c2b71fca05582ffa4dc9e"}, + {file = "frozenlist-1.7.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:46d84d49e00c9429238a7ce02dc0be8f6d7cd0cd405abd1bebdc991bf27c15bd"}, + {file = "frozenlist-1.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:15900082e886edb37480335d9d518cec978afc69ccbc30bd18610b7c1b22a718"}, + {file = "frozenlist-1.7.0-cp310-cp310-win32.whl", hash = "sha256:400ddd24ab4e55014bba442d917203c73b2846391dd42ca5e38ff52bb18c3c5e"}, + {file = "frozenlist-1.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:6eb93efb8101ef39d32d50bce242c84bcbddb4f7e9febfa7b524532a239b4464"}, + {file = "frozenlist-1.7.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:aa51e147a66b2d74de1e6e2cf5921890de6b0f4820b257465101d7f37b49fb5a"}, + {file = "frozenlist-1.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9b35db7ce1cd71d36ba24f80f0c9e7cff73a28d7a74e91fe83e23d27c7828750"}, + {file = "frozenlist-1.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:34a69a85e34ff37791e94542065c8416c1afbf820b68f720452f636d5fb990cd"}, + {file = "frozenlist-1.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a646531fa8d82c87fe4bb2e596f23173caec9185bfbca5d583b4ccfb95183e2"}, + {file = "frozenlist-1.7.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:79b2ffbba483f4ed36a0f236ccb85fbb16e670c9238313709638167670ba235f"}, + {file = "frozenlist-1.7.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a26f205c9ca5829cbf82bb2a84b5c36f7184c4316617d7ef1b271a56720d6b30"}, + {file = "frozenlist-1.7.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bcacfad3185a623fa11ea0e0634aac7b691aa925d50a440f39b458e41c561d98"}, + {file = "frozenlist-1.7.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72c1b0fe8fe451b34f12dce46445ddf14bd2a5bcad7e324987194dc8e3a74c86"}, + {file = "frozenlist-1.7.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61d1a5baeaac6c0798ff6edfaeaa00e0e412d49946c53fae8d4b8e8b3566c4ae"}, + {file = "frozenlist-1.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7edf5c043c062462f09b6820de9854bf28cc6cc5b6714b383149745e287181a8"}, + {file = "frozenlist-1.7.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:d50ac7627b3a1bd2dcef6f9da89a772694ec04d9a61b66cf87f7d9446b4a0c31"}, + {file = "frozenlist-1.7.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ce48b2fece5aeb45265bb7a58259f45027db0abff478e3077e12b05b17fb9da7"}, + {file = "frozenlist-1.7.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:fe2365ae915a1fafd982c146754e1de6ab3478def8a59c86e1f7242d794f97d5"}, + {file = "frozenlist-1.7.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:45a6f2fdbd10e074e8814eb98b05292f27bad7d1883afbe009d96abdcf3bc898"}, + {file = "frozenlist-1.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:21884e23cffabb157a9dd7e353779077bf5b8f9a58e9b262c6caad2ef5f80a56"}, + {file = "frozenlist-1.7.0-cp311-cp311-win32.whl", hash = "sha256:284d233a8953d7b24f9159b8a3496fc1ddc00f4db99c324bd5fb5f22d8698ea7"}, + {file = "frozenlist-1.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:387cbfdcde2f2353f19c2f66bbb52406d06ed77519ac7ee21be0232147c2592d"}, + {file = "frozenlist-1.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3dbf9952c4bb0e90e98aec1bd992b3318685005702656bc6f67c1a32b76787f2"}, + {file = "frozenlist-1.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1f5906d3359300b8a9bb194239491122e6cf1444c2efb88865426f170c262cdb"}, + {file = "frozenlist-1.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3dabd5a8f84573c8d10d8859a50ea2dec01eea372031929871368c09fa103478"}, + {file = "frozenlist-1.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa57daa5917f1738064f302bf2626281a1cb01920c32f711fbc7bc36111058a8"}, + {file = "frozenlist-1.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c193dda2b6d49f4c4398962810fa7d7c78f032bf45572b3e04dd5249dff27e08"}, + {file = "frozenlist-1.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe2b675cf0aaa6d61bf8fbffd3c274b3c9b7b1623beb3809df8a81399a4a9c4"}, + {file = "frozenlist-1.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fc5d5cda37f62b262405cf9652cf0856839c4be8ee41be0afe8858f17f4c94b"}, + {file = "frozenlist-1.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0d5ce521d1dd7d620198829b87ea002956e4319002ef0bc8d3e6d045cb4646e"}, + {file = "frozenlist-1.7.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:488d0a7d6a0008ca0db273c542098a0fa9e7dfaa7e57f70acef43f32b3f69dca"}, + {file = "frozenlist-1.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:15a7eaba63983d22c54d255b854e8108e7e5f3e89f647fc854bd77a237e767df"}, + {file = "frozenlist-1.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1eaa7e9c6d15df825bf255649e05bd8a74b04a4d2baa1ae46d9c2d00b2ca2cb5"}, + {file = "frozenlist-1.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4389e06714cfa9d47ab87f784a7c5be91d3934cd6e9a7b85beef808297cc025"}, + {file = "frozenlist-1.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:73bd45e1488c40b63fe5a7df892baf9e2a4d4bb6409a2b3b78ac1c6236178e01"}, + {file = "frozenlist-1.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99886d98e1643269760e5fe0df31e5ae7050788dd288947f7f007209b8c33f08"}, + {file = "frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:290a172aae5a4c278c6da8a96222e6337744cd9c77313efe33d5670b9f65fc43"}, + {file = "frozenlist-1.7.0-cp312-cp312-win32.whl", hash = "sha256:426c7bc70e07cfebc178bc4c2bf2d861d720c4fff172181eeb4a4c41d4ca2ad3"}, + {file = "frozenlist-1.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:563b72efe5da92e02eb68c59cb37205457c977aa7a449ed1b37e6939e5c47c6a"}, + {file = "frozenlist-1.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee80eeda5e2a4e660651370ebffd1286542b67e268aa1ac8d6dbe973120ef7ee"}, + {file = "frozenlist-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d1a81c85417b914139e3a9b995d4a1c84559afc839a93cf2cb7f15e6e5f6ed2d"}, + {file = "frozenlist-1.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cbb65198a9132ebc334f237d7b0df163e4de83fb4f2bdfe46c1e654bdb0c5d43"}, + {file = "frozenlist-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dab46c723eeb2c255a64f9dc05b8dd601fde66d6b19cdb82b2e09cc6ff8d8b5d"}, + {file = "frozenlist-1.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6aeac207a759d0dedd2e40745575ae32ab30926ff4fa49b1635def65806fddee"}, + {file = "frozenlist-1.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bd8c4e58ad14b4fa7802b8be49d47993182fdd4023393899632c88fd8cd994eb"}, + {file = "frozenlist-1.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04fb24d104f425da3540ed83cbfc31388a586a7696142004c577fa61c6298c3f"}, + {file = "frozenlist-1.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a5c505156368e4ea6b53b5ac23c92d7edc864537ff911d2fb24c140bb175e60"}, + {file = "frozenlist-1.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd7eb96a675f18aa5c553eb7ddc24a43c8c18f22e1f9925528128c052cdbe00"}, + {file = "frozenlist-1.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05579bf020096fe05a764f1f84cd104a12f78eaab68842d036772dc6d4870b4b"}, + {file = "frozenlist-1.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:376b6222d114e97eeec13d46c486facd41d4f43bab626b7c3f6a8b4e81a5192c"}, + {file = "frozenlist-1.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0aa7e176ebe115379b5b1c95b4096fb1c17cce0847402e227e712c27bdb5a949"}, + {file = "frozenlist-1.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3fbba20e662b9c2130dc771e332a99eff5da078b2b2648153a40669a6d0e36ca"}, + {file = "frozenlist-1.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f4410a0a601d349dd406b5713fec59b4cee7e71678d5b17edda7f4655a940b"}, + {file = "frozenlist-1.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2cdfaaec6a2f9327bf43c933c0319a7c429058e8537c508964a133dffee412e"}, + {file = "frozenlist-1.7.0-cp313-cp313-win32.whl", hash = "sha256:5fc4df05a6591c7768459caba1b342d9ec23fa16195e744939ba5914596ae3e1"}, + {file = "frozenlist-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:52109052b9791a3e6b5d1b65f4b909703984b770694d3eb64fad124c835d7cba"}, + {file = "frozenlist-1.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a6f86e4193bb0e235ef6ce3dde5cbabed887e0b11f516ce8a0f4d3b33078ec2d"}, + {file = "frozenlist-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:82d664628865abeb32d90ae497fb93df398a69bb3434463d172b80fc25b0dd7d"}, + {file = "frozenlist-1.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:912a7e8375a1c9a68325a902f3953191b7b292aa3c3fb0d71a216221deca460b"}, + {file = "frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537c2777167488d539bc5de2ad262efc44388230e5118868e172dd4a552b146"}, + {file = "frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f34560fb1b4c3e30ba35fa9a13894ba39e5acfc5f60f57d8accde65f46cc5e74"}, + {file = "frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:acd03d224b0175f5a850edc104ac19040d35419eddad04e7cf2d5986d98427f1"}, + {file = "frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2038310bc582f3d6a09b3816ab01737d60bf7b1ec70f5356b09e84fb7408ab1"}, + {file = "frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c05e4c8e5f36e5e088caa1bf78a687528f83c043706640a92cb76cd6999384"}, + {file = "frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:765bb588c86e47d0b68f23c1bee323d4b703218037765dcf3f25c838c6fecceb"}, + {file = "frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:32dc2e08c67d86d0969714dd484fd60ff08ff81d1a1e40a77dd34a387e6ebc0c"}, + {file = "frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:c0303e597eb5a5321b4de9c68e9845ac8f290d2ab3f3e2c864437d3c5a30cd65"}, + {file = "frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a47f2abb4e29b3a8d0b530f7c3598badc6b134562b1a5caee867f7c62fee51e3"}, + {file = "frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:3d688126c242a6fabbd92e02633414d40f50bb6002fa4cf995a1d18051525657"}, + {file = "frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:4e7e9652b3d367c7bd449a727dc79d5043f48b88d0cbfd4f9f1060cf2b414104"}, + {file = "frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1a85e345b4c43db8b842cab1feb41be5cc0b10a1830e6295b69d7310f99becaf"}, + {file = "frozenlist-1.7.0-cp313-cp313t-win32.whl", hash = "sha256:3a14027124ddb70dfcee5148979998066897e79f89f64b13328595c4bdf77c81"}, + {file = "frozenlist-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3bf8010d71d4507775f658e9823210b7427be36625b387221642725b515dcf3e"}, + {file = "frozenlist-1.7.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cea3dbd15aea1341ea2de490574a4a37ca080b2ae24e4b4f4b51b9057b4c3630"}, + {file = "frozenlist-1.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7d536ee086b23fecc36c2073c371572374ff50ef4db515e4e503925361c24f71"}, + {file = "frozenlist-1.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dfcebf56f703cb2e346315431699f00db126d158455e513bd14089d992101e44"}, + {file = "frozenlist-1.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:974c5336e61d6e7eb1ea5b929cb645e882aadab0095c5a6974a111e6479f8878"}, + {file = "frozenlist-1.7.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c70db4a0ab5ab20878432c40563573229a7ed9241506181bba12f6b7d0dc41cb"}, + {file = "frozenlist-1.7.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1137b78384eebaf70560a36b7b229f752fb64d463d38d1304939984d5cb887b6"}, + {file = "frozenlist-1.7.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e793a9f01b3e8b5c0bc646fb59140ce0efcc580d22a3468d70766091beb81b35"}, + {file = "frozenlist-1.7.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74739ba8e4e38221d2c5c03d90a7e542cb8ad681915f4ca8f68d04f810ee0a87"}, + {file = "frozenlist-1.7.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e63344c4e929b1a01e29bc184bbb5fd82954869033765bfe8d65d09e336a677"}, + {file = "frozenlist-1.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2ea2a7369eb76de2217a842f22087913cdf75f63cf1307b9024ab82dfb525938"}, + {file = "frozenlist-1.7.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:836b42f472a0e006e02499cef9352ce8097f33df43baaba3e0a28a964c26c7d2"}, + {file = "frozenlist-1.7.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e22b9a99741294b2571667c07d9f8cceec07cb92aae5ccda39ea1b6052ed4319"}, + {file = "frozenlist-1.7.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:9a19e85cc503d958abe5218953df722748d87172f71b73cf3c9257a91b999890"}, + {file = "frozenlist-1.7.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:f22dac33bb3ee8fe3e013aa7b91dc12f60d61d05b7fe32191ffa84c3aafe77bd"}, + {file = "frozenlist-1.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ccec739a99e4ccf664ea0775149f2749b8a6418eb5b8384b4dc0a7d15d304cb"}, + {file = "frozenlist-1.7.0-cp39-cp39-win32.whl", hash = "sha256:b3950f11058310008a87757f3eee16a8e1ca97979833239439586857bc25482e"}, + {file = "frozenlist-1.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:43a82fce6769c70f2f5a06248b614a7d268080a9d20f7457ef10ecee5af82b63"}, + {file = "frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e"}, + {file = "frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f"}, +] + +[[package]] +name = "fsspec" +version = "2025.5.1" +description = "File-system specification" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "fsspec-2025.5.1-py3-none-any.whl", hash = "sha256:24d3a2e663d5fc735ab256263c4075f374a174c3410c0b25e5bd1970bceaa462"}, + {file = "fsspec-2025.5.1.tar.gz", hash = "sha256:2e55e47a540b91843b755e83ded97c6e897fa0942b11490113f09e9c443c2475"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +dev = ["pre-commit", "ruff"] +doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] +test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] +test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] +tqdm = ["tqdm"] + +[[package]] +name = "google-api-core" +version = "2.25.0" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_api_core-2.25.0-py3-none-any.whl", hash = "sha256:1db79d1281dcf9f3d10023283299ba38f3dc9f639ec41085968fd23e5bcf512e"}, + {file = "google_api_core-2.25.0.tar.gz", hash = "sha256:9b548e688702f82a34ed8409fb8a6961166f0b7795032f0be8f48308dff4333a"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.0" +googleapis-common-protos = ">=1.56.2,<2.0.0" +proto-plus = [ + {version = ">=1.22.3,<2.0.0"}, + {version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""}, +] +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0" +requests = ">=2.18.0,<3.0.0" + +[package.extras] +async-rest = ["google-auth[aiohttp] (>=2.35.0,<3.0.0)"] +grpc = ["grpcio (>=1.33.2,<2.0.0)", "grpcio (>=1.49.1,<2.0.0) ; python_version >= \"3.11\"", "grpcio-status (>=1.33.2,<2.0.0)", "grpcio-status (>=1.49.1,<2.0.0) ; python_version >= \"3.11\""] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.0)"] + +[[package]] +name = "google-auth" +version = "2.40.3" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_auth-2.40.3-py2.py3-none-any.whl", hash = "sha256:1370d4593e86213563547f97a92752fc658456fe4514c809544f330fed45a7ca"}, + {file = "google_auth-2.40.3.tar.gz", hash = "sha256:500c3a29adedeb36ea9cf24b8d10858e152f2412e3ca37829b3fa18e33d63b77"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0)", "requests (>=2.20.0,<3.0.0)"] +enterprise-cert = ["cryptography", "pyopenssl"] +pyjwt = ["cryptography (<39.0.0) ; python_version < \"3.8\"", "cryptography (>=38.0.3)", "pyjwt (>=2.0)"] +pyopenssl = ["cryptography (<39.0.0) ; python_version < \"3.8\"", "cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0)"] +testing = ["aiohttp (<3.10.0)", "aiohttp (>=3.6.2,<4.0.0)", "aioresponses", "cryptography (<39.0.0) ; python_version < \"3.8\"", "cryptography (>=38.0.3)", "flask", "freezegun", "grpcio", "mock", "oauth2client", "packaging", "pyjwt (>=2.0)", "pyopenssl (<24.3.0)", "pyopenssl (>=20.0.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-localserver", "pyu2f (>=0.1.5)", "requests (>=2.20.0,<3.0.0)", "responses", "urllib3"] +urllib3 = ["packaging", "urllib3"] + +[[package]] +name = "googleapis-common-protos" +version = "1.70.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8"}, + {file = "googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257"}, +] + +[package.dependencies] +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0)"] + +[[package]] +name = "grpcio" +version = "1.73.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "grpcio-1.73.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:d050197eeed50f858ef6c51ab09514856f957dba7b1f7812698260fc9cc417f6"}, + {file = "grpcio-1.73.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:ebb8d5f4b0200916fb292a964a4d41210de92aba9007e33d8551d85800ea16cb"}, + {file = "grpcio-1.73.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:c0811331b469e3f15dda5f90ab71bcd9681189a83944fd6dc908e2c9249041ef"}, + {file = "grpcio-1.73.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12787c791c3993d0ea1cc8bf90393647e9a586066b3b322949365d2772ba965b"}, + {file = "grpcio-1.73.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c17771e884fddf152f2a0df12478e8d02853e5b602a10a9a9f1f52fa02b1d32"}, + {file = "grpcio-1.73.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:275e23d4c428c26b51857bbd95fcb8e528783597207ec592571e4372b300a29f"}, + {file = "grpcio-1.73.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9ffc972b530bf73ef0f948f799482a1bf12d9b6f33406a8e6387c0ca2098a833"}, + {file = "grpcio-1.73.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ebd8d269df64aff092b2cec5e015d8ae09c7e90888b5c35c24fdca719a2c9f35"}, + {file = "grpcio-1.73.0-cp310-cp310-win32.whl", hash = "sha256:072d8154b8f74300ed362c01d54af8b93200c1a9077aeaea79828d48598514f1"}, + {file = "grpcio-1.73.0-cp310-cp310-win_amd64.whl", hash = "sha256:ce953d9d2100e1078a76a9dc2b7338d5415924dc59c69a15bf6e734db8a0f1ca"}, + {file = "grpcio-1.73.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:51036f641f171eebe5fa7aaca5abbd6150f0c338dab3a58f9111354240fe36ec"}, + {file = "grpcio-1.73.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:d12bbb88381ea00bdd92c55aff3da3391fd85bc902c41275c8447b86f036ce0f"}, + {file = "grpcio-1.73.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:483c507c2328ed0e01bc1adb13d1eada05cc737ec301d8e5a8f4a90f387f1790"}, + {file = "grpcio-1.73.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c201a34aa960c962d0ce23fe5f423f97e9d4b518ad605eae6d0a82171809caaa"}, + {file = "grpcio-1.73.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:859f70c8e435e8e1fa060e04297c6818ffc81ca9ebd4940e180490958229a45a"}, + {file = "grpcio-1.73.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e2459a27c6886e7e687e4e407778425f3c6a971fa17a16420227bda39574d64b"}, + {file = "grpcio-1.73.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e0084d4559ee3dbdcce9395e1bc90fdd0262529b32c417a39ecbc18da8074ac7"}, + {file = "grpcio-1.73.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef5fff73d5f724755693a464d444ee0a448c6cdfd3c1616a9223f736c622617d"}, + {file = "grpcio-1.73.0-cp311-cp311-win32.whl", hash = "sha256:965a16b71a8eeef91fc4df1dc40dc39c344887249174053814f8a8e18449c4c3"}, + {file = "grpcio-1.73.0-cp311-cp311-win_amd64.whl", hash = "sha256:b71a7b4483d1f753bbc11089ff0f6fa63b49c97a9cc20552cded3fcad466d23b"}, + {file = "grpcio-1.73.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:fb9d7c27089d9ba3746f18d2109eb530ef2a37452d2ff50f5a6696cd39167d3b"}, + {file = "grpcio-1.73.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:128ba2ebdac41e41554d492b82c34586a90ebd0766f8ebd72160c0e3a57b9155"}, + {file = "grpcio-1.73.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:068ecc415f79408d57a7f146f54cdf9f0acb4b301a52a9e563973dc981e82f3d"}, + {file = "grpcio-1.73.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ddc1cfb2240f84d35d559ade18f69dcd4257dbaa5ba0de1a565d903aaab2968"}, + {file = "grpcio-1.73.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e53007f70d9783f53b41b4cf38ed39a8e348011437e4c287eee7dd1d39d54b2f"}, + {file = "grpcio-1.73.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4dd8d8d092efede7d6f48d695ba2592046acd04ccf421436dd7ed52677a9ad29"}, + {file = "grpcio-1.73.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:70176093d0a95b44d24baa9c034bb67bfe2b6b5f7ebc2836f4093c97010e17fd"}, + {file = "grpcio-1.73.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:085ebe876373ca095e24ced95c8f440495ed0b574c491f7f4f714ff794bbcd10"}, + {file = "grpcio-1.73.0-cp312-cp312-win32.whl", hash = "sha256:cfc556c1d6aef02c727ec7d0016827a73bfe67193e47c546f7cadd3ee6bf1a60"}, + {file = "grpcio-1.73.0-cp312-cp312-win_amd64.whl", hash = "sha256:bbf45d59d090bf69f1e4e1594832aaf40aa84b31659af3c5e2c3f6a35202791a"}, + {file = "grpcio-1.73.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:da1d677018ef423202aca6d73a8d3b2cb245699eb7f50eb5f74cae15a8e1f724"}, + {file = "grpcio-1.73.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:36bf93f6a657f37c131d9dd2c391b867abf1426a86727c3575393e9e11dadb0d"}, + {file = "grpcio-1.73.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:d84000367508ade791d90c2bafbd905574b5ced8056397027a77a215d601ba15"}, + {file = "grpcio-1.73.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c98ba1d928a178ce33f3425ff823318040a2b7ef875d30a0073565e5ceb058d9"}, + {file = "grpcio-1.73.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a73c72922dfd30b396a5f25bb3a4590195ee45ecde7ee068acb0892d2900cf07"}, + {file = "grpcio-1.73.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:10e8edc035724aba0346a432060fd192b42bd03675d083c01553cab071a28da5"}, + {file = "grpcio-1.73.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f5cdc332b503c33b1643b12ea933582c7b081957c8bc2ea4cc4bc58054a09288"}, + {file = "grpcio-1.73.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:07ad7c57233c2109e4ac999cb9c2710c3b8e3f491a73b058b0ce431f31ed8145"}, + {file = "grpcio-1.73.0-cp313-cp313-win32.whl", hash = "sha256:0eb5df4f41ea10bda99a802b2a292d85be28958ede2a50f2beb8c7fc9a738419"}, + {file = "grpcio-1.73.0-cp313-cp313-win_amd64.whl", hash = "sha256:38cf518cc54cd0c47c9539cefa8888549fcc067db0b0c66a46535ca8032020c4"}, + {file = "grpcio-1.73.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:1284850607901cfe1475852d808e5a102133461ec9380bc3fc9ebc0686ee8e32"}, + {file = "grpcio-1.73.0-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:0e092a4b28eefb63eec00d09ef33291cd4c3a0875cde29aec4d11d74434d222c"}, + {file = "grpcio-1.73.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:33577fe7febffe8ebad458744cfee8914e0c10b09f0ff073a6b149a84df8ab8f"}, + {file = "grpcio-1.73.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:60813d8a16420d01fa0da1fc7ebfaaa49a7e5051b0337cd48f4f950eb249a08e"}, + {file = "grpcio-1.73.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a9c957dc65e5d474378d7bcc557e9184576605d4b4539e8ead6e351d7ccce20"}, + {file = "grpcio-1.73.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3902b71407d021163ea93c70c8531551f71ae742db15b66826cf8825707d2908"}, + {file = "grpcio-1.73.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1dd7fa7276dcf061e2d5f9316604499eea06b1b23e34a9380572d74fe59915a8"}, + {file = "grpcio-1.73.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2d1510c4ea473110cb46a010555f2c1a279d1c256edb276e17fa571ba1e8927c"}, + {file = "grpcio-1.73.0-cp39-cp39-win32.whl", hash = "sha256:d0a1517b2005ba1235a1190b98509264bf72e231215dfeef8db9a5a92868789e"}, + {file = "grpcio-1.73.0-cp39-cp39-win_amd64.whl", hash = "sha256:6228f7eb6d9f785f38b589d49957fca5df3d5b5349e77d2d89b14e390165344c"}, + {file = "grpcio-1.73.0.tar.gz", hash = "sha256:3af4c30918a7f0d39de500d11255f8d9da4f30e94a2033e70fe2a720e184bd8e"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.73.0)"] + +[[package]] +name = "h11" +version = "0.16.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, + {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, + {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.16" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httpx" +version = "0.28.1" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" + +[package.extras] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["main", "docs", "test"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "imagesize" +version = "1.4.1" +description = "Getting image size from png/jpeg/jpeg2000/gif file" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["docs"] +files = [ + {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, + {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"}, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, + {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, +] + +[package.dependencies] +zipp = ">=3.20" + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] + +[[package]] +name = "iniconfig" +version = "2.1.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + +[[package]] +name = "ipykernel" +version = "6.29.5" +description = "IPython Kernel for Jupyter" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5"}, + {file = "ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215"}, +] + +[package.dependencies] +appnope = {version = "*", markers = "platform_system == \"Darwin\""} +comm = ">=0.1.1" +debugpy = ">=1.6.5" +ipython = ">=7.23.1" +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +matplotlib-inline = ">=0.1" +nest-asyncio = "*" +packaging = "*" +psutil = "*" +pyzmq = ">=24" +tornado = ">=6.1" +traitlets = ">=5.4.0" + +[package.extras] +cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] +pyqt5 = ["pyqt5"] +pyside6 = ["pyside6"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "ipython" +version = "9.3.0" +description = "IPython: Productive Interactive Computing" +optional = false +python-versions = ">=3.11" +groups = ["main", "test"] +files = [ + {file = "ipython-9.3.0-py3-none-any.whl", hash = "sha256:1a0b6dd9221a1f5dddf725b57ac0cb6fddc7b5f470576231ae9162b9b3455a04"}, + {file = "ipython-9.3.0.tar.gz", hash = "sha256:79eb896f9f23f50ad16c3bc205f686f6e030ad246cc309c6279a242b14afe9d8"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +decorator = "*" +ipython-pygments-lexers = "*" +jedi = ">=0.16" +matplotlib-inline = "*" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""} +prompt_toolkit = ">=3.0.41,<3.1.0" +pygments = ">=2.4.0" +stack_data = "*" +traitlets = ">=5.13.0" +typing_extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} + +[package.extras] +all = ["ipython[doc,matplotlib,test,test-extra]"] +black = ["black"] +doc = ["docrepr", "exceptiongroup", "intersphinx_registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinx_toml (==0.0.4)", "typing_extensions"] +matplotlib = ["matplotlib"] +test = ["packaging", "pytest", "pytest-asyncio (<0.22)", "testpath"] +test-extra = ["curio", "ipykernel", "ipython[test]", "jupyter_ai", "matplotlib (!=3.2.0)", "nbclient", "nbformat", "numpy (>=1.23)", "pandas", "trio"] + +[[package]] +name = "ipython-pygments-lexers" +version = "1.1.1" +description = "Defines a variety of Pygments lexers for highlighting IPython code." +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c"}, + {file = "ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81"}, +] + +[package.dependencies] +pygments = "*" + +[[package]] +name = "ipywidgets" +version = "8.1.2" +description = "Jupyter interactive widgets" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "ipywidgets-8.1.2-py3-none-any.whl", hash = "sha256:bbe43850d79fb5e906b14801d6c01402857996864d1e5b6fa62dd2ee35559f60"}, + {file = "ipywidgets-8.1.2.tar.gz", hash = "sha256:d0b9b41e49bae926a866e613a39b0f0097745d2b9f1f3dd406641b4a57ec42c9"}, +] + +[package.dependencies] +comm = ">=0.1.3" +ipython = ">=6.1.0" +jupyterlab-widgets = ">=3.0.10,<3.1.0" +traitlets = ">=4.3.1" +widgetsnbextension = ">=4.0.10,<4.1.0" + +[package.extras] +test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] + +[[package]] +name = "isoduration" +version = "20.11.0" +description = "Operations with ISO 8601 durations" +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042"}, + {file = "isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9"}, +] + +[package.dependencies] +arrow = ">=0.15.0" + +[[package]] +name = "jedi" +version = "0.19.2" +description = "An autocompletion tool for Python that can be used for text editors." +optional = false +python-versions = ">=3.6" +groups = ["main", "test"] +files = [ + {file = "jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9"}, + {file = "jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0"}, +] + +[package.dependencies] +parso = ">=0.8.4,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] + +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["docs", "test"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "json5" +version = "0.12.0" +description = "A Python implementation of the JSON5 data format." +optional = false +python-versions = ">=3.8.0" +groups = ["test"] +files = [ + {file = "json5-0.12.0-py3-none-any.whl", hash = "sha256:6d37aa6c08b0609f16e1ec5ff94697e2cbbfbad5ac112afa05794da9ab7810db"}, + {file = "json5-0.12.0.tar.gz", hash = "sha256:0b4b6ff56801a1c7dc817b0241bca4ce474a0e6a163bfef3fc594d3fd263ff3a"}, +] + +[package.extras] +dev = ["build (==1.2.2.post1)", "coverage (==7.5.4) ; python_version < \"3.9\"", "coverage (==7.8.0) ; python_version >= \"3.9\"", "mypy (==1.14.1) ; python_version < \"3.9\"", "mypy (==1.15.0) ; python_version >= \"3.9\"", "pip (==25.0.1)", "pylint (==3.2.7) ; python_version < \"3.9\"", "pylint (==3.3.6) ; python_version >= \"3.9\"", "ruff (==0.11.2)", "twine (==6.1.0)", "uv (==0.6.11)"] + +[[package]] +name = "jsonpointer" +version = "3.0.0" +description = "Identify specific nodes in a JSON document (RFC 6901)" +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, + {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, +] + +[[package]] +name = "jsonschema" +version = "4.24.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.9" +groups = ["main", "test"] +files = [ + {file = "jsonschema-4.24.0-py3-none-any.whl", hash = "sha256:a462455f19f5faf404a7902952b6f0e3ce868f3ee09a359b05eca6673bd8412d"}, + {file = "jsonschema-4.24.0.tar.gz", hash = "sha256:0b4e8069eb12aedfa881333004bccaec24ecef5a8a6a4b6df142b2cc9599d196"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +fqdn = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +idna = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +isoduration = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +jsonpointer = {version = ">1.13", optional = true, markers = "extra == \"format-nongpl\""} +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rfc3339-validator = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +rfc3986-validator = {version = ">0.1.0", optional = true, markers = "extra == \"format-nongpl\""} +rpds-py = ">=0.7.1" +uri-template = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +webcolors = {version = ">=24.6.0", optional = true, markers = "extra == \"format-nongpl\""} + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] + +[[package]] +name = "jsonschema-specifications" +version = "2025.4.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.9" +groups = ["main", "test"] +files = [ + {file = "jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af"}, + {file = "jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "jupyter-client" +version = "8.6.3" +description = "Jupyter protocol implementation and client libraries" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f"}, + {file = "jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419"}, +] + +[package.dependencies] +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +python-dateutil = ">=2.8.2" +pyzmq = ">=23.0" +tornado = ">=6.2" +traitlets = ">=5.3" + +[package.extras] +docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko ; sys_platform == \"win32\"", "pre-commit", "pytest (<8.2.0)", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] + +[[package]] +name = "jupyter-core" +version = "5.8.1" +description = "Jupyter core package. A base package on which Jupyter projects rely." +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "jupyter_core-5.8.1-py3-none-any.whl", hash = "sha256:c28d268fc90fb53f1338ded2eb410704c5449a358406e8a948b75706e24863d0"}, + {file = "jupyter_core-5.8.1.tar.gz", hash = "sha256:0a5f9706f70e64786b75acba995988915ebd4601c8a52e534a40b51c95f59941"}, +] + +[package.dependencies] +platformdirs = ">=2.5" +pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} +traitlets = ">=5.3" + +[package.extras] +docs = ["intersphinx-registry", "myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-spelling", "traitlets"] +test = ["ipykernel", "pre-commit", "pytest (<9)", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "jupyter-events" +version = "0.12.0" +description = "Jupyter Event System library" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "jupyter_events-0.12.0-py3-none-any.whl", hash = "sha256:6464b2fa5ad10451c3d35fabc75eab39556ae1e2853ad0c0cc31b656731a97fb"}, + {file = "jupyter_events-0.12.0.tar.gz", hash = "sha256:fc3fce98865f6784c9cd0a56a20644fc6098f21c8c33834a8d9fe383c17e554b"}, +] + +[package.dependencies] +jsonschema = {version = ">=4.18.0", extras = ["format-nongpl"]} +packaging = "*" +python-json-logger = ">=2.0.4" +pyyaml = ">=5.3" +referencing = "*" +rfc3339-validator = "*" +rfc3986-validator = ">=0.1.1" +traitlets = ">=5.3" + +[package.extras] +cli = ["click", "rich"] +docs = ["jupyterlite-sphinx", "myst-parser", "pydata-sphinx-theme (>=0.16)", "sphinx (>=8)", "sphinxcontrib-spelling"] +test = ["click", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.19.0)", "pytest-console-scripts", "rich"] + +[[package]] +name = "jupyter-lsp" +version = "2.2.5" +description = "Multi-Language Server WebSocket proxy for Jupyter Notebook/Lab server" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "jupyter-lsp-2.2.5.tar.gz", hash = "sha256:793147a05ad446f809fd53ef1cd19a9f5256fd0a2d6b7ce943a982cb4f545001"}, + {file = "jupyter_lsp-2.2.5-py3-none-any.whl", hash = "sha256:45fbddbd505f3fbfb0b6cb2f1bc5e15e83ab7c79cd6e89416b248cb3c00c11da"}, +] + +[package.dependencies] +jupyter-server = ">=1.1.2" + +[[package]] +name = "jupyter-server" +version = "2.16.0" +description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "jupyter_server-2.16.0-py3-none-any.whl", hash = "sha256:3d8db5be3bc64403b1c65b400a1d7f4647a5ce743f3b20dbdefe8ddb7b55af9e"}, + {file = "jupyter_server-2.16.0.tar.gz", hash = "sha256:65d4b44fdf2dcbbdfe0aa1ace4a842d4aaf746a2b7b168134d5aaed35621b7f6"}, +] + +[package.dependencies] +anyio = ">=3.1.0" +argon2-cffi = ">=21.1" +jinja2 = ">=3.0.3" +jupyter-client = ">=7.4.4" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +jupyter-events = ">=0.11.0" +jupyter-server-terminals = ">=0.4.4" +nbconvert = ">=6.4.4" +nbformat = ">=5.3.0" +overrides = ">=5.0" +packaging = ">=22.0" +prometheus-client = ">=0.9" +pywinpty = {version = ">=2.0.1", markers = "os_name == \"nt\""} +pyzmq = ">=24" +send2trash = ">=1.8.2" +terminado = ">=0.8.3" +tornado = ">=6.2.0" +traitlets = ">=5.6.0" +websocket-client = ">=1.7" + +[package.extras] +docs = ["ipykernel", "jinja2", "jupyter-client", "myst-parser", "nbformat", "prometheus-client", "pydata-sphinx-theme", "send2trash", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-openapi (>=0.8.0)", "sphinxcontrib-spelling", "sphinxemoji", "tornado", "typing-extensions"] +test = ["flaky", "ipykernel", "pre-commit", "pytest (>=7.0,<9)", "pytest-console-scripts", "pytest-jupyter[server] (>=0.7)", "pytest-timeout", "requests"] + +[[package]] +name = "jupyter-server-terminals" +version = "0.5.3" +description = "A Jupyter Server Extension Providing Terminals." +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "jupyter_server_terminals-0.5.3-py3-none-any.whl", hash = "sha256:41ee0d7dc0ebf2809c668e0fc726dfaf258fcd3e769568996ca731b6194ae9aa"}, + {file = "jupyter_server_terminals-0.5.3.tar.gz", hash = "sha256:5ae0295167220e9ace0edcfdb212afd2b01ee8d179fe6f23c899590e9b8a5269"}, +] + +[package.dependencies] +pywinpty = {version = ">=2.0.3", markers = "os_name == \"nt\""} +terminado = ">=0.8.3" + +[package.extras] +docs = ["jinja2", "jupyter-server", "mistune (<4.0)", "myst-parser", "nbformat", "packaging", "pydata-sphinx-theme", "sphinxcontrib-github-alt", "sphinxcontrib-openapi", "sphinxcontrib-spelling", "sphinxemoji", "tornado"] +test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (>=0.5.3)", "pytest-timeout"] + +[[package]] +name = "jupyterlab" +version = "4.3.1" +description = "JupyterLab computational environment" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "jupyterlab-4.3.1-py3-none-any.whl", hash = "sha256:2d9a1c305bc748e277819a17a5d5e22452e533e835f4237b2f30f3b0e491e01f"}, + {file = "jupyterlab-4.3.1.tar.gz", hash = "sha256:a4a338327556443521731d82f2a6ccf926df478914ca029616621704d47c3c65"}, +] + +[package.dependencies] +async-lru = ">=1.0.0" +httpx = ">=0.25.0" +ipykernel = ">=6.5.0" +jinja2 = ">=3.0.3" +jupyter-core = "*" +jupyter-lsp = ">=2.0.0" +jupyter-server = ">=2.4.0,<3" +jupyterlab-server = ">=2.27.1,<3" +notebook-shim = ">=0.2" +packaging = "*" +setuptools = ">=40.1.0" +tornado = ">=6.2.0" +traitlets = "*" + +[package.extras] +dev = ["build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.6.9)"] +docs = ["jsx-lexer", "myst-parser", "pydata-sphinx-theme (>=0.13.0)", "pytest", "pytest-check-links", "pytest-jupyter", "sphinx (>=1.8,<8.1.0)", "sphinx-copybutton"] +docs-screenshots = ["altair (==5.4.1)", "ipython (==8.16.1)", "ipywidgets (==8.1.5)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.2.post3)", "matplotlib (==3.9.2)", "nbconvert (>=7.0.0)", "pandas (==2.2.3)", "scipy (==1.14.1)", "vega-datasets (==0.9.0)"] +test = ["coverage", "pytest (>=7.0)", "pytest-check-links (>=0.7)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter (>=0.5.3)", "pytest-timeout", "pytest-tornasync", "requests", "requests-cache", "virtualenv"] +upgrade-extension = ["copier (>=9,<10)", "jinja2-time (<0.3)", "pydantic (<3.0)", "pyyaml-include (<3.0)", "tomli-w (<2.0)"] + +[[package]] +name = "jupyterlab-pygments" +version = "0.3.0" +description = "Pygments theme using JupyterLab CSS variables" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780"}, + {file = "jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d"}, +] + +[[package]] +name = "jupyterlab-server" +version = "2.27.3" +description = "A set of server components for JupyterLab and JupyterLab like applications." +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "jupyterlab_server-2.27.3-py3-none-any.whl", hash = "sha256:e697488f66c3db49df675158a77b3b017520d772c6e1548c7d9bcc5df7944ee4"}, + {file = "jupyterlab_server-2.27.3.tar.gz", hash = "sha256:eb36caca59e74471988f0ae25c77945610b887f777255aa21f8065def9e51ed4"}, +] + +[package.dependencies] +babel = ">=2.10" +jinja2 = ">=3.0.3" +json5 = ">=0.9.0" +jsonschema = ">=4.18.0" +jupyter-server = ">=1.21,<3" +packaging = ">=21.3" +requests = ">=2.31" + +[package.extras] +docs = ["autodoc-traits", "jinja2 (<3.2.0)", "mistune (<4)", "myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-copybutton", "sphinxcontrib-openapi (>0.8)"] +openapi = ["openapi-core (>=0.18.0,<0.19.0)", "ruamel-yaml"] +test = ["hatch", "ipykernel", "openapi-core (>=0.18.0,<0.19.0)", "openapi-spec-validator (>=0.6.0,<0.8.0)", "pytest (>=7.0,<8)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter[server] (>=0.6.2)", "pytest-timeout", "requests-mock", "ruamel-yaml", "sphinxcontrib-spelling", "strict-rfc3339", "werkzeug"] + +[[package]] +name = "jupyterlab-widgets" +version = "3.0.15" +description = "Jupyter interactive widgets for JupyterLab" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jupyterlab_widgets-3.0.15-py3-none-any.whl", hash = "sha256:d59023d7d7ef71400d51e6fee9a88867f6e65e10a4201605d2d7f3e8f012a31c"}, + {file = "jupyterlab_widgets-3.0.15.tar.gz", hash = "sha256:2920888a0c2922351a9202817957a68c07d99673504d6cd37345299e971bb08b"}, +] + +[[package]] +name = "kubernetes" +version = "33.1.0" +description = "Kubernetes python client" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "kubernetes-33.1.0-py2.py3-none-any.whl", hash = "sha256:544de42b24b64287f7e0aa9513c93cb503f7f40eea39b20f66810011a86eabc5"}, + {file = "kubernetes-33.1.0.tar.gz", hash = "sha256:f64d829843a54c251061a8e7a14523b521f2dc5c896cf6d65ccf348648a88993"}, +] + +[package.dependencies] +certifi = ">=14.05.14" +durationpy = ">=0.7" +google-auth = ">=1.0.1" +oauthlib = ">=3.2.2" +python-dateutil = ">=2.5.3" +pyyaml = ">=5.4.1" +requests = "*" +requests-oauthlib = "*" +six = ">=1.9.0" +urllib3 = ">=1.24.2" +websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0" + +[package.extras] +adal = ["adal (>=1.0.2)"] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["docs", "test"] +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +description = "Inline Matplotlib backend for Jupyter" +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, + {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, +] + +[package.dependencies] +traitlets = "*" + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "mistune" +version = "3.1.3" +description = "A sane and fast Markdown parser with useful plugins and renderers" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "mistune-3.1.3-py3-none-any.whl", hash = "sha256:1a32314113cff28aa6432e99e522677c8587fd83e3d51c29b82a52409c842bd9"}, + {file = "mistune-3.1.3.tar.gz", hash = "sha256:a7035c21782b2becb6be62f8f25d3df81ccb4d6fa477a6525b15af06539f02a0"}, +] + +[[package]] +name = "msgpack" +version = "1.1.0" +description = "MessagePack serializer" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b"}, + {file = "msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044"}, + {file = "msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5"}, + {file = "msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88"}, + {file = "msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"}, + {file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"}, + {file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c"}, + {file = "msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc"}, + {file = "msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c40ffa9a15d74e05ba1fe2681ea33b9caffd886675412612d93ab17b58ea2fec"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1ba6136e650898082d9d5a5217d5906d1e138024f836ff48691784bbe1adf96"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0856a2b7e8dcb874be44fea031d22e5b3a19121be92a1e098f46068a11b0870"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:471e27a5787a2e3f974ba023f9e265a8c7cfd373632247deb225617e3100a3c7"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:646afc8102935a388ffc3914b336d22d1c2d6209c773f3eb5dd4d6d3b6f8c1cb"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13599f8829cfbe0158f6456374e9eea9f44eee08076291771d8ae93eda56607f"}, + {file = "msgpack-1.1.0-cp38-cp38-win32.whl", hash = "sha256:8a84efb768fb968381e525eeeb3d92857e4985aacc39f3c47ffd00eb4509315b"}, + {file = "msgpack-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:879a7b7b0ad82481c52d3c7eb99bf6f0645dbdec5134a4bddbd16f3506947feb"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8"}, + {file = "msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd"}, + {file = "msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325"}, + {file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"}, +] + +[[package]] +name = "multidict" +version = "6.4.4" +description = "multidict implementation" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "multidict-6.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8adee3ac041145ffe4488ea73fa0a622b464cc25340d98be76924d0cda8545ff"}, + {file = "multidict-6.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b61e98c3e2a861035aaccd207da585bdcacef65fe01d7a0d07478efac005e028"}, + {file = "multidict-6.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:75493f28dbadecdbb59130e74fe935288813301a8554dc32f0c631b6bdcdf8b0"}, + {file = "multidict-6.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffc3c6a37e048b5395ee235e4a2a0d639c2349dffa32d9367a42fc20d399772"}, + {file = "multidict-6.4.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:87cb72263946b301570b0f63855569a24ee8758aaae2cd182aae7d95fbc92ca7"}, + {file = "multidict-6.4.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bbf7bd39822fd07e3609b6b4467af4c404dd2b88ee314837ad1830a7f4a8299"}, + {file = "multidict-6.4.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1f7cbd4f1f44ddf5fd86a8675b7679176eae770f2fc88115d6dddb6cefb59bc"}, + {file = "multidict-6.4.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb5ac9e5bfce0e6282e7f59ff7b7b9a74aa8e5c60d38186a4637f5aa764046ad"}, + {file = "multidict-6.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4efc31dfef8c4eeb95b6b17d799eedad88c4902daba39ce637e23a17ea078915"}, + {file = "multidict-6.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9fcad2945b1b91c29ef2b4050f590bfcb68d8ac8e0995a74e659aa57e8d78e01"}, + {file = "multidict-6.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:d877447e7368c7320832acb7159557e49b21ea10ffeb135c1077dbbc0816b598"}, + {file = "multidict-6.4.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:33a12ebac9f380714c298cbfd3e5b9c0c4e89c75fe612ae496512ee51028915f"}, + {file = "multidict-6.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:0f14ea68d29b43a9bf37953881b1e3eb75b2739e896ba4a6aa4ad4c5b9ffa145"}, + {file = "multidict-6.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0327ad2c747a6600e4797d115d3c38a220fdb28e54983abe8964fd17e95ae83c"}, + {file = "multidict-6.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d1a20707492db9719a05fc62ee215fd2c29b22b47c1b1ba347f9abc831e26683"}, + {file = "multidict-6.4.4-cp310-cp310-win32.whl", hash = "sha256:d83f18315b9fca5db2452d1881ef20f79593c4aa824095b62cb280019ef7aa3d"}, + {file = "multidict-6.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:9c17341ee04545fd962ae07330cb5a39977294c883485c8d74634669b1f7fe04"}, + {file = "multidict-6.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4f5f29794ac0e73d2a06ac03fd18870adc0135a9d384f4a306a951188ed02f95"}, + {file = "multidict-6.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c04157266344158ebd57b7120d9b0b35812285d26d0e78193e17ef57bfe2979a"}, + {file = "multidict-6.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bb61ffd3ab8310d93427e460f565322c44ef12769f51f77277b4abad7b6f7223"}, + {file = "multidict-6.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e0ba18a9afd495f17c351d08ebbc4284e9c9f7971d715f196b79636a4d0de44"}, + {file = "multidict-6.4.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9faf1b1dcaadf9f900d23a0e6d6c8eadd6a95795a0e57fcca73acce0eb912065"}, + {file = "multidict-6.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a4d1cb1327c6082c4fce4e2a438483390964c02213bc6b8d782cf782c9b1471f"}, + {file = "multidict-6.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:941f1bec2f5dbd51feeb40aea654c2747f811ab01bdd3422a48a4e4576b7d76a"}, + {file = "multidict-6.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5f8a146184da7ea12910a4cec51ef85e44f6268467fb489c3caf0cd512f29c2"}, + {file = "multidict-6.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:232b7237e57ec3c09be97206bfb83a0aa1c5d7d377faa019c68a210fa35831f1"}, + {file = "multidict-6.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:55ae0721c1513e5e3210bca4fc98456b980b0c2c016679d3d723119b6b202c42"}, + {file = "multidict-6.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:51d662c072579f63137919d7bb8fc250655ce79f00c82ecf11cab678f335062e"}, + {file = "multidict-6.4.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0e05c39962baa0bb19a6b210e9b1422c35c093b651d64246b6c2e1a7e242d9fd"}, + {file = "multidict-6.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d5b1cc3ab8c31d9ebf0faa6e3540fb91257590da330ffe6d2393d4208e638925"}, + {file = "multidict-6.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:93ec84488a384cd7b8a29c2c7f467137d8a73f6fe38bb810ecf29d1ade011a7c"}, + {file = "multidict-6.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b308402608493638763abc95f9dc0030bbd6ac6aff784512e8ac3da73a88af08"}, + {file = "multidict-6.4.4-cp311-cp311-win32.whl", hash = "sha256:343892a27d1a04d6ae455ecece12904d242d299ada01633d94c4f431d68a8c49"}, + {file = "multidict-6.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:73484a94f55359780c0f458bbd3c39cb9cf9c182552177d2136e828269dee529"}, + {file = "multidict-6.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:dc388f75a1c00000824bf28b7633e40854f4127ede80512b44c3cfeeea1839a2"}, + {file = "multidict-6.4.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:98af87593a666f739d9dba5d0ae86e01b0e1a9cfcd2e30d2d361fbbbd1a9162d"}, + {file = "multidict-6.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aff4cafea2d120327d55eadd6b7f1136a8e5a0ecf6fb3b6863e8aca32cd8e50a"}, + {file = "multidict-6.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:169c4ba7858176b797fe551d6e99040c531c775d2d57b31bcf4de6d7a669847f"}, + {file = "multidict-6.4.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b9eb4c59c54421a32b3273d4239865cb14ead53a606db066d7130ac80cc8ec93"}, + {file = "multidict-6.4.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7cf3bd54c56aa16fdb40028d545eaa8d051402b61533c21e84046e05513d5780"}, + {file = "multidict-6.4.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f682c42003c7264134bfe886376299db4cc0c6cd06a3295b41b347044bcb5482"}, + {file = "multidict-6.4.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a920f9cf2abdf6e493c519492d892c362007f113c94da4c239ae88429835bad1"}, + {file = "multidict-6.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:530d86827a2df6504526106b4c104ba19044594f8722d3e87714e847c74a0275"}, + {file = "multidict-6.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ecde56ea2439b96ed8a8d826b50c57364612ddac0438c39e473fafad7ae1c23b"}, + {file = "multidict-6.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:dc8c9736d8574b560634775ac0def6bdc1661fc63fa27ffdfc7264c565bcb4f2"}, + {file = "multidict-6.4.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7f3d3b3c34867579ea47cbd6c1f2ce23fbfd20a273b6f9e3177e256584f1eacc"}, + {file = "multidict-6.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:87a728af265e08f96b6318ebe3c0f68b9335131f461efab2fc64cc84a44aa6ed"}, + {file = "multidict-6.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9f193eeda1857f8e8d3079a4abd258f42ef4a4bc87388452ed1e1c4d2b0c8740"}, + {file = "multidict-6.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:be06e73c06415199200e9a2324a11252a3d62030319919cde5e6950ffeccf72e"}, + {file = "multidict-6.4.4-cp312-cp312-win32.whl", hash = "sha256:622f26ea6a7e19b7c48dd9228071f571b2fbbd57a8cd71c061e848f281550e6b"}, + {file = "multidict-6.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:5e2bcda30d5009996ff439e02a9f2b5c3d64a20151d34898c000a6281faa3781"}, + {file = "multidict-6.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:82ffabefc8d84c2742ad19c37f02cde5ec2a1ee172d19944d380f920a340e4b9"}, + {file = "multidict-6.4.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6a2f58a66fe2c22615ad26156354005391e26a2f3721c3621504cd87c1ea87bf"}, + {file = "multidict-6.4.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5883d6ee0fd9d8a48e9174df47540b7545909841ac82354c7ae4cbe9952603bd"}, + {file = "multidict-6.4.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9abcf56a9511653fa1d052bfc55fbe53dbee8f34e68bd6a5a038731b0ca42d15"}, + {file = "multidict-6.4.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6ed5ae5605d4ad5a049fad2a28bb7193400700ce2f4ae484ab702d1e3749c3f9"}, + {file = "multidict-6.4.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbfcb60396f9bcfa63e017a180c3105b8c123a63e9d1428a36544e7d37ca9e20"}, + {file = "multidict-6.4.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0f1987787f5f1e2076b59692352ab29a955b09ccc433c1f6b8e8e18666f608b"}, + {file = "multidict-6.4.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0121ccce8c812047d8d43d691a1ad7641f72c4f730474878a5aeae1b8ead8c"}, + {file = "multidict-6.4.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83ec4967114295b8afd120a8eec579920c882831a3e4c3331d591a8e5bfbbc0f"}, + {file = "multidict-6.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:995f985e2e268deaf17867801b859a282e0448633f1310e3704b30616d269d69"}, + {file = "multidict-6.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:d832c608f94b9f92a0ec8b7e949be7792a642b6e535fcf32f3e28fab69eeb046"}, + {file = "multidict-6.4.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d21c1212171cf7da703c5b0b7a0e85be23b720818aef502ad187d627316d5645"}, + {file = "multidict-6.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:cbebaa076aaecad3d4bb4c008ecc73b09274c952cf6a1b78ccfd689e51f5a5b0"}, + {file = "multidict-6.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:c93a6fb06cc8e5d3628b2b5fda215a5db01e8f08fc15fadd65662d9b857acbe4"}, + {file = "multidict-6.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8cd8f81f1310182362fb0c7898145ea9c9b08a71081c5963b40ee3e3cac589b1"}, + {file = "multidict-6.4.4-cp313-cp313-win32.whl", hash = "sha256:3e9f1cd61a0ab857154205fb0b1f3d3ace88d27ebd1409ab7af5096e409614cd"}, + {file = "multidict-6.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:8ffb40b74400e4455785c2fa37eba434269149ec525fc8329858c862e4b35373"}, + {file = "multidict-6.4.4-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:6a602151dbf177be2450ef38966f4be3467d41a86c6a845070d12e17c858a156"}, + {file = "multidict-6.4.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0d2b9712211b860d123815a80b859075d86a4d54787e247d7fbee9db6832cf1c"}, + {file = "multidict-6.4.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d2fa86af59f8fc1972e121ade052145f6da22758f6996a197d69bb52f8204e7e"}, + {file = "multidict-6.4.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50855d03e9e4d66eab6947ba688ffb714616f985838077bc4b490e769e48da51"}, + {file = "multidict-6.4.4-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:5bce06b83be23225be1905dcdb6b789064fae92499fbc458f59a8c0e68718601"}, + {file = "multidict-6.4.4-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66ed0731f8e5dfd8369a883b6e564aca085fb9289aacabd9decd70568b9a30de"}, + {file = "multidict-6.4.4-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:329ae97fc2f56f44d91bc47fe0972b1f52d21c4b7a2ac97040da02577e2daca2"}, + {file = "multidict-6.4.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c27e5dcf520923d6474d98b96749e6805f7677e93aaaf62656005b8643f907ab"}, + {file = "multidict-6.4.4-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:058cc59b9e9b143cc56715e59e22941a5d868c322242278d28123a5d09cdf6b0"}, + {file = "multidict-6.4.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:69133376bc9a03f8c47343d33f91f74a99c339e8b58cea90433d8e24bb298031"}, + {file = "multidict-6.4.4-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:d6b15c55721b1b115c5ba178c77104123745b1417527ad9641a4c5e2047450f0"}, + {file = "multidict-6.4.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a887b77f51d3d41e6e1a63cf3bc7ddf24de5939d9ff69441387dfefa58ac2e26"}, + {file = "multidict-6.4.4-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:632a3bf8f1787f7ef7d3c2f68a7bde5be2f702906f8b5842ad6da9d974d0aab3"}, + {file = "multidict-6.4.4-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:a145c550900deb7540973c5cdb183b0d24bed6b80bf7bddf33ed8f569082535e"}, + {file = "multidict-6.4.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cc5d83c6619ca5c9672cb78b39ed8542f1975a803dee2cda114ff73cbb076edd"}, + {file = "multidict-6.4.4-cp313-cp313t-win32.whl", hash = "sha256:3312f63261b9df49be9d57aaa6abf53a6ad96d93b24f9cc16cf979956355ce6e"}, + {file = "multidict-6.4.4-cp313-cp313t-win_amd64.whl", hash = "sha256:ba852168d814b2c73333073e1c7116d9395bea69575a01b0b3c89d2d5a87c8fb"}, + {file = "multidict-6.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:603f39bd1cf85705c6c1ba59644b480dfe495e6ee2b877908de93322705ad7cf"}, + {file = "multidict-6.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fc60f91c02e11dfbe3ff4e1219c085695c339af72d1641800fe6075b91850c8f"}, + {file = "multidict-6.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:496bcf01c76a70a31c3d746fd39383aad8d685ce6331e4c709e9af4ced5fa221"}, + {file = "multidict-6.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4219390fb5bf8e548e77b428bb36a21d9382960db5321b74d9d9987148074d6b"}, + {file = "multidict-6.4.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3ef4e9096ff86dfdcbd4a78253090ba13b1d183daa11b973e842465d94ae1772"}, + {file = "multidict-6.4.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:49a29d7133b1fc214e818bbe025a77cc6025ed9a4f407d2850373ddde07fd04a"}, + {file = "multidict-6.4.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e32053d6d3a8b0dfe49fde05b496731a0e6099a4df92154641c00aa76786aef5"}, + {file = "multidict-6.4.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cc403092a49509e8ef2d2fd636a8ecefc4698cc57bbe894606b14579bc2a955"}, + {file = "multidict-6.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5363f9b2a7f3910e5c87d8b1855c478c05a2dc559ac57308117424dfaad6805c"}, + {file = "multidict-6.4.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2e543a40e4946cf70a88a3be87837a3ae0aebd9058ba49e91cacb0b2cd631e2b"}, + {file = "multidict-6.4.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:60d849912350da557fe7de20aa8cf394aada6980d0052cc829eeda4a0db1c1db"}, + {file = "multidict-6.4.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:19d08b4f22eae45bb018b9f06e2838c1e4b853c67628ef8ae126d99de0da6395"}, + {file = "multidict-6.4.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d693307856d1ef08041e8b6ff01d5b4618715007d288490ce2c7e29013c12b9a"}, + {file = "multidict-6.4.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:fad6daaed41021934917f4fb03ca2db8d8a4d79bf89b17ebe77228eb6710c003"}, + {file = "multidict-6.4.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c10d17371bff801af0daf8b073c30b6cf14215784dc08cd5c43ab5b7b8029bbc"}, + {file = "multidict-6.4.4-cp39-cp39-win32.whl", hash = "sha256:7e23f2f841fcb3ebd4724a40032d32e0892fbba4143e43d2a9e7695c5e50e6bd"}, + {file = "multidict-6.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d7b50b673ffb4ff4366e7ab43cf1f0aef4bd3608735c5fbdf0bdb6f690da411"}, + {file = "multidict-6.4.4-py3-none-any.whl", hash = "sha256:bd4557071b561a8b3b6075c3ce93cf9bfb6182cb241805c3d66ced3b75eff4ac"}, + {file = "multidict-6.4.4.tar.gz", hash = "sha256:69ee9e6ba214b5245031b76233dd95408a0fd57fdb019ddcc1ead4790932a8e8"}, +] + +[[package]] +name = "nbclient" +version = "0.10.2" +description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." +optional = false +python-versions = ">=3.9.0" +groups = ["test"] +files = [ + {file = "nbclient-0.10.2-py3-none-any.whl", hash = "sha256:4ffee11e788b4a27fabeb7955547e4318a5298f34342a4bfd01f2e1faaeadc3d"}, + {file = "nbclient-0.10.2.tar.gz", hash = "sha256:90b7fc6b810630db87a6d0c2250b1f0ab4cf4d3c27a299b0cde78a4ed3fd9193"}, +] + +[package.dependencies] +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +nbformat = ">=5.1" +traitlets = ">=5.4" + +[package.extras] +dev = ["pre-commit"] +docs = ["autodoc-traits", "flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "mock", "moto", "myst-parser", "nbconvert (>=7.1.0)", "pytest (>=7.0,<8)", "pytest-asyncio", "pytest-cov (>=4.0)", "sphinx (>=1.7)", "sphinx-book-theme", "sphinxcontrib-spelling", "testpath", "xmltodict"] +test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.1.0)", "pytest (>=7.0,<8)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"] + +[[package]] +name = "nbconvert" +version = "7.16.6" +description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)." +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "nbconvert-7.16.6-py3-none-any.whl", hash = "sha256:1375a7b67e0c2883678c48e506dc320febb57685e5ee67faa51b18a90f3a712b"}, + {file = "nbconvert-7.16.6.tar.gz", hash = "sha256:576a7e37c6480da7b8465eefa66c17844243816ce1ccc372633c6b71c3c0f582"}, +] + +[package.dependencies] +beautifulsoup4 = "*" +bleach = {version = "!=5.0.0", extras = ["css"]} +defusedxml = "*" +jinja2 = ">=3.0" +jupyter-core = ">=4.7" +jupyterlab-pygments = "*" +markupsafe = ">=2.0" +mistune = ">=2.0.3,<4" +nbclient = ">=0.5.0" +nbformat = ">=5.7" +packaging = "*" +pandocfilters = ">=1.4.1" +pygments = ">=2.4.1" +traitlets = ">=5.1" + +[package.extras] +all = ["flaky", "ipykernel", "ipython", "ipywidgets (>=7.5)", "myst-parser", "nbsphinx (>=0.2.12)", "playwright", "pydata-sphinx-theme", "pyqtwebengine (>=5.15)", "pytest (>=7)", "sphinx (==5.0.2)", "sphinxcontrib-spelling", "tornado (>=6.1)"] +docs = ["ipykernel", "ipython", "myst-parser", "nbsphinx (>=0.2.12)", "pydata-sphinx-theme", "sphinx (==5.0.2)", "sphinxcontrib-spelling"] +qtpdf = ["pyqtwebengine (>=5.15)"] +qtpng = ["pyqtwebengine (>=5.15)"] +serve = ["tornado (>=6.1)"] +test = ["flaky", "ipykernel", "ipywidgets (>=7.5)", "pytest (>=7)"] +webpdf = ["playwright"] + +[[package]] +name = "nbformat" +version = "5.10.4" +description = "The Jupyter Notebook format" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b"}, + {file = "nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a"}, +] + +[package.dependencies] +fastjsonschema = ">=2.15" +jsonschema = ">=2.6" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +traitlets = ">=5.1" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["pep440", "pre-commit", "pytest", "testpath"] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +description = "Patch asyncio to allow nested event loops" +optional = false +python-versions = ">=3.5" +groups = ["test"] +files = [ + {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, + {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, +] + +[[package]] +name = "notebook-shim" +version = "0.2.4" +description = "A shim layer for notebook traits and config" +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "notebook_shim-0.2.4-py3-none-any.whl", hash = "sha256:411a5be4e9dc882a074ccbcae671eda64cceb068767e9a3419096986560e1cef"}, + {file = "notebook_shim-0.2.4.tar.gz", hash = "sha256:b4b2cfa1b65d98307ca24361f5b30fe785b53c3fd07b7a47e89acb5e6ac638cb"}, +] + +[package.dependencies] +jupyter-server = ">=1.8,<3" + +[package.extras] +test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync"] + +[[package]] +name = "numpy" +version = "2.3.0" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.11" +groups = ["main"] +files = [ + {file = "numpy-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c3c9fdde0fa18afa1099d6257eb82890ea4f3102847e692193b54e00312a9ae9"}, + {file = "numpy-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46d16f72c2192da7b83984aa5455baee640e33a9f1e61e656f29adf55e406c2b"}, + {file = "numpy-2.3.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:a0be278be9307c4ab06b788f2a077f05e180aea817b3e41cebbd5aaf7bd85ed3"}, + {file = "numpy-2.3.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:99224862d1412d2562248d4710126355d3a8db7672170a39d6909ac47687a8a4"}, + {file = "numpy-2.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2393a914db64b0ead0ab80c962e42d09d5f385802006a6c87835acb1f58adb96"}, + {file = "numpy-2.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:7729c8008d55e80784bd113787ce876ca117185c579c0d626f59b87d433ea779"}, + {file = "numpy-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:06d4fb37a8d383b769281714897420c5cc3545c79dc427df57fc9b852ee0bf58"}, + {file = "numpy-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c39ec392b5db5088259c68250e342612db82dc80ce044cf16496cf14cf6bc6f8"}, + {file = "numpy-2.3.0-cp311-cp311-win32.whl", hash = "sha256:ee9d3ee70d62827bc91f3ea5eee33153212c41f639918550ac0475e3588da59f"}, + {file = "numpy-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:43c55b6a860b0eb44d42341438b03513cf3879cb3617afb749ad49307e164edd"}, + {file = "numpy-2.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:2e6a1409eee0cb0316cb64640a49a49ca44deb1a537e6b1121dc7c458a1299a8"}, + {file = "numpy-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:389b85335838155a9076e9ad7f8fdba0827496ec2d2dc32ce69ce7898bde03ba"}, + {file = "numpy-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9498f60cd6bb8238d8eaf468a3d5bb031d34cd12556af53510f05fcf581c1b7e"}, + {file = "numpy-2.3.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:622a65d40d8eb427d8e722fd410ac3ad4958002f109230bc714fa551044ebae2"}, + {file = "numpy-2.3.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:b9446d9d8505aadadb686d51d838f2b6688c9e85636a0c3abaeb55ed54756459"}, + {file = "numpy-2.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:50080245365d75137a2bf46151e975de63146ae6d79f7e6bd5c0e85c9931d06a"}, + {file = "numpy-2.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c24bb4113c66936eeaa0dc1e47c74770453d34f46ee07ae4efd853a2ed1ad10a"}, + {file = "numpy-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4d8d294287fdf685281e671886c6dcdf0291a7c19db3e5cb4178d07ccf6ecc67"}, + {file = "numpy-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6295f81f093b7f5769d1728a6bd8bf7466de2adfa771ede944ce6711382b89dc"}, + {file = "numpy-2.3.0-cp312-cp312-win32.whl", hash = "sha256:e6648078bdd974ef5d15cecc31b0c410e2e24178a6e10bf511e0557eed0f2570"}, + {file = "numpy-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:0898c67a58cdaaf29994bc0e2c65230fd4de0ac40afaf1584ed0b02cd74c6fdd"}, + {file = "numpy-2.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:bd8df082b6c4695753ad6193018c05aac465d634834dca47a3ae06d4bb22d9ea"}, + {file = "numpy-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5754ab5595bfa2c2387d241296e0381c21f44a4b90a776c3c1d39eede13a746a"}, + {file = "numpy-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d11fa02f77752d8099573d64e5fe33de3229b6632036ec08f7080f46b6649959"}, + {file = "numpy-2.3.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:aba48d17e87688a765ab1cd557882052f238e2f36545dfa8e29e6a91aef77afe"}, + {file = "numpy-2.3.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:4dc58865623023b63b10d52f18abaac3729346a7a46a778381e0e3af4b7f3beb"}, + {file = "numpy-2.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:df470d376f54e052c76517393fa443758fefcdd634645bc9c1f84eafc67087f0"}, + {file = "numpy-2.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:87717eb24d4a8a64683b7a4e91ace04e2f5c7c77872f823f02a94feee186168f"}, + {file = "numpy-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d8fa264d56882b59dcb5ea4d6ab6f31d0c58a57b41aec605848b6eb2ef4a43e8"}, + {file = "numpy-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e651756066a0eaf900916497e20e02fe1ae544187cb0fe88de981671ee7f6270"}, + {file = "numpy-2.3.0-cp313-cp313-win32.whl", hash = "sha256:e43c3cce3b6ae5f94696669ff2a6eafd9a6b9332008bafa4117af70f4b88be6f"}, + {file = "numpy-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:81ae0bf2564cf475f94be4a27ef7bcf8af0c3e28da46770fc904da9abd5279b5"}, + {file = "numpy-2.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:c8738baa52505fa6e82778580b23f945e3578412554d937093eac9205e845e6e"}, + {file = "numpy-2.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:39b27d8b38942a647f048b675f134dd5a567f95bfff481f9109ec308515c51d8"}, + {file = "numpy-2.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0eba4a1ea88f9a6f30f56fdafdeb8da3774349eacddab9581a21234b8535d3d3"}, + {file = "numpy-2.3.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:b0f1f11d0a1da54927436505a5a7670b154eac27f5672afc389661013dfe3d4f"}, + {file = "numpy-2.3.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:690d0a5b60a47e1f9dcec7b77750a4854c0d690e9058b7bef3106e3ae9117808"}, + {file = "numpy-2.3.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:8b51ead2b258284458e570942137155978583e407babc22e3d0ed7af33ce06f8"}, + {file = "numpy-2.3.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:aaf81c7b82c73bd9b45e79cfb9476cb9c29e937494bfe9092c26aece812818ad"}, + {file = "numpy-2.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f420033a20b4f6a2a11f585f93c843ac40686a7c3fa514060a97d9de93e5e72b"}, + {file = "numpy-2.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d344ca32ab482bcf8735d8f95091ad081f97120546f3d250240868430ce52555"}, + {file = "numpy-2.3.0-cp313-cp313t-win32.whl", hash = "sha256:48a2e8eaf76364c32a1feaa60d6925eaf32ed7a040183b807e02674305beef61"}, + {file = "numpy-2.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ba17f93a94e503551f154de210e4d50c5e3ee20f7e7a1b5f6ce3f22d419b93bb"}, + {file = "numpy-2.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:f14e016d9409680959691c109be98c436c6249eaf7f118b424679793607b5944"}, + {file = "numpy-2.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:80b46117c7359de8167cc00a2c7d823bdd505e8c7727ae0871025a86d668283b"}, + {file = "numpy-2.3.0-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:5814a0f43e70c061f47abd5857d120179609ddc32a613138cbb6c4e9e2dbdda5"}, + {file = "numpy-2.3.0-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:ef6c1e88fd6b81ac6d215ed71dc8cd027e54d4bf1d2682d362449097156267a2"}, + {file = "numpy-2.3.0-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:33a5a12a45bb82d9997e2c0b12adae97507ad7c347546190a18ff14c28bbca12"}, + {file = "numpy-2.3.0-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:54dfc8681c1906d239e95ab1508d0a533c4a9505e52ee2d71a5472b04437ef97"}, + {file = "numpy-2.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:e017a8a251ff4d18d71f139e28bdc7c31edba7a507f72b1414ed902cbe48c74d"}, + {file = "numpy-2.3.0.tar.gz", hash = "sha256:581f87f9e9e9db2cba2141400e160e9dd644ee248788d6f90636eeb8fd9260a6"}, +] + +[[package]] +name = "oauthlib" +version = "3.2.2" +description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, + {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, +] + +[package.extras] +rsa = ["cryptography (>=3.0.0)"] +signals = ["blinker (>=1.4.0)"] +signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] + +[[package]] +name = "opencensus" +version = "0.11.4" +description = "A stats collection and distributed tracing framework" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "opencensus-0.11.4-py2.py3-none-any.whl", hash = "sha256:a18487ce68bc19900336e0ff4655c5a116daf10c1b3685ece8d971bddad6a864"}, + {file = "opencensus-0.11.4.tar.gz", hash = "sha256:cbef87d8b8773064ab60e5c2a1ced58bbaa38a6d052c41aec224958ce544eff2"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.0.0,<3.0.0", markers = "python_version >= \"3.6\""} +opencensus-context = ">=0.1.3" +six = ">=1.16,<2.0" + +[[package]] +name = "opencensus-context" +version = "0.1.3" +description = "OpenCensus Runtime Context" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "opencensus-context-0.1.3.tar.gz", hash = "sha256:a03108c3c10d8c80bb5ddf5c8a1f033161fa61972a9917f9b9b3a18517f0088c"}, + {file = "opencensus_context-0.1.3-py2.py3-none-any.whl", hash = "sha256:073bb0590007af276853009fac7e4bab1d523c3f03baf4cb4511ca38967c6039"}, +] + +[[package]] +name = "openshift-client" +version = "1.0.18" +description = "OpenShift python client" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "openshift-client-1.0.18.tar.gz", hash = "sha256:be3979440cfd96788146a3a1650dabe939d4d516eea0b39f87e66d2ab39495b1"}, + {file = "openshift_client-1.0.18-py2.py3-none-any.whl", hash = "sha256:d8a84080307ccd9556f6c62a3707a3e6507baedee36fa425754f67db9ded528b"}, +] + +[package.dependencies] +paramiko = "*" +pyyaml = "*" +six = "*" + +[[package]] +name = "opentelemetry-api" +version = "1.34.1" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_api-1.34.1-py3-none-any.whl", hash = "sha256:b7df4cb0830d5a6c29ad0c0691dbae874d8daefa934b8b1d642de48323d32a8c"}, + {file = "opentelemetry_api-1.34.1.tar.gz", hash = "sha256:64f0bd06d42824843731d05beea88d4d4b6ae59f9fe347ff7dfa2cc14233bbb3"}, +] + +[package.dependencies] +importlib-metadata = ">=6.0,<8.8.0" +typing-extensions = ">=4.5.0" + +[[package]] +name = "opentelemetry-exporter-prometheus" +version = "0.55b1" +description = "Prometheus Metric Exporter for OpenTelemetry" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_exporter_prometheus-0.55b1-py3-none-any.whl", hash = "sha256:f364fbbff9e5de37a112ff104d1185fb1d7e2046c5ab5911e5afebc7ab3ddf0e"}, + {file = "opentelemetry_exporter_prometheus-0.55b1.tar.gz", hash = "sha256:d13ec0b22bf394113ff1ada5da98133a4b051779b803dae183188e26c4bd9ee0"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-sdk = ">=1.34.1,<1.35.0" +prometheus-client = ">=0.5.0,<1.0.0" + +[[package]] +name = "opentelemetry-proto" +version = "1.11.1" +description = "OpenTelemetry Python Proto" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "opentelemetry-proto-1.11.1.tar.gz", hash = "sha256:5df0ec69510a9e2414c0410d91a698ded5a04d3dd37f7d2a3e119e3c42a30647"}, + {file = "opentelemetry_proto-1.11.1-py3-none-any.whl", hash = "sha256:4d4663123b4777823aa533f478c6cef3ecbcf696d8dc6ac7fd6a90f37a01eafd"}, +] + +[package.dependencies] +protobuf = ">=3.13.0" + +[[package]] +name = "opentelemetry-sdk" +version = "1.34.1" +description = "OpenTelemetry Python SDK" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_sdk-1.34.1-py3-none-any.whl", hash = "sha256:308effad4059562f1d92163c61c8141df649da24ce361827812c40abb2a1e96e"}, + {file = "opentelemetry_sdk-1.34.1.tar.gz", hash = "sha256:8091db0d763fcd6098d4781bbc80ff0971f94e260739aa6afe6fd379cdf3aa4d"}, +] + +[package.dependencies] +opentelemetry-api = "1.34.1" +opentelemetry-semantic-conventions = "0.55b1" +typing-extensions = ">=4.5.0" + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.55b1" +description = "OpenTelemetry Semantic Conventions" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_semantic_conventions-0.55b1-py3-none-any.whl", hash = "sha256:5da81dfdf7d52e3d37f8fe88d5e771e191de924cfff5f550ab0b8f7b2409baed"}, + {file = "opentelemetry_semantic_conventions-0.55b1.tar.gz", hash = "sha256:ef95b1f009159c28d7a7849f5cbc71c4c34c845bb514d66adfdf1b3fff3598b3"}, +] + +[package.dependencies] +opentelemetry-api = "1.34.1" +typing-extensions = ">=4.5.0" + +[[package]] +name = "overrides" +version = "7.7.0" +description = "A decorator to automatically detect mismatch when overriding a method." +optional = false +python-versions = ">=3.6" +groups = ["test"] +files = [ + {file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"}, + {file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"}, +] + +[[package]] +name = "packaging" +version = "25.0" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main", "docs", "test"] +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "pandas" +version = "2.3.0" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pandas-2.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:625466edd01d43b75b1883a64d859168e4556261a5035b32f9d743b67ef44634"}, + {file = "pandas-2.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6872d695c896f00df46b71648eea332279ef4077a409e2fe94220208b6bb675"}, + {file = "pandas-2.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4dd97c19bd06bc557ad787a15b6489d2614ddaab5d104a0310eb314c724b2d2"}, + {file = "pandas-2.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:034abd6f3db8b9880aaee98f4f5d4dbec7c4829938463ec046517220b2f8574e"}, + {file = "pandas-2.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23c2b2dc5213810208ca0b80b8666670eb4660bbfd9d45f58592cc4ddcfd62e1"}, + {file = "pandas-2.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:39ff73ec07be5e90330cc6ff5705c651ace83374189dcdcb46e6ff54b4a72cd6"}, + {file = "pandas-2.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:40cecc4ea5abd2921682b57532baea5588cc5f80f0231c624056b146887274d2"}, + {file = "pandas-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8adff9f138fc614347ff33812046787f7d43b3cef7c0f0171b3340cae333f6ca"}, + {file = "pandas-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e5f08eb9a445d07720776df6e641975665c9ea12c9d8a331e0f6890f2dcd76ef"}, + {file = "pandas-2.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa35c266c8cd1a67d75971a1912b185b492d257092bdd2709bbdebe574ed228d"}, + {file = "pandas-2.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14a0cc77b0f089d2d2ffe3007db58f170dae9b9f54e569b299db871a3ab5bf46"}, + {file = "pandas-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c06f6f144ad0a1bf84699aeea7eff6068ca5c63ceb404798198af7eb86082e33"}, + {file = "pandas-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ed16339bc354a73e0a609df36d256672c7d296f3f767ac07257801aa064ff73c"}, + {file = "pandas-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:fa07e138b3f6c04addfeaf56cc7fdb96c3b68a3fe5e5401251f231fce40a0d7a"}, + {file = "pandas-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2eb4728a18dcd2908c7fccf74a982e241b467d178724545a48d0caf534b38ebf"}, + {file = "pandas-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9d8c3187be7479ea5c3d30c32a5d73d62a621166675063b2edd21bc47614027"}, + {file = "pandas-2.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ff730713d4c4f2f1c860e36c005c7cefc1c7c80c21c0688fd605aa43c9fcf09"}, + {file = "pandas-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba24af48643b12ffe49b27065d3babd52702d95ab70f50e1b34f71ca703e2c0d"}, + {file = "pandas-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:404d681c698e3c8a40a61d0cd9412cc7364ab9a9cc6e144ae2992e11a2e77a20"}, + {file = "pandas-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6021910b086b3ca756755e86ddc64e0ddafd5e58e076c72cb1585162e5ad259b"}, + {file = "pandas-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:094e271a15b579650ebf4c5155c05dcd2a14fd4fdd72cf4854b2f7ad31ea30be"}, + {file = "pandas-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2c7e2fc25f89a49a11599ec1e76821322439d90820108309bf42130d2f36c983"}, + {file = "pandas-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c6da97aeb6a6d233fb6b17986234cc723b396b50a3c6804776351994f2a658fd"}, + {file = "pandas-2.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb32dc743b52467d488e7a7c8039b821da2826a9ba4f85b89ea95274f863280f"}, + {file = "pandas-2.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:213cd63c43263dbb522c1f8a7c9d072e25900f6975596f883f4bebd77295d4f3"}, + {file = "pandas-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1d2b33e68d0ce64e26a4acc2e72d747292084f4e8db4c847c6f5f6cbe56ed6d8"}, + {file = "pandas-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:430a63bae10b5086995db1b02694996336e5a8ac9a96b4200572b413dfdfccb9"}, + {file = "pandas-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4930255e28ff5545e2ca404637bcc56f031893142773b3468dc021c6c32a1390"}, + {file = "pandas-2.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:f925f1ef673b4bd0271b1809b72b3270384f2b7d9d14a189b12b7fc02574d575"}, + {file = "pandas-2.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e78ad363ddb873a631e92a3c063ade1ecfb34cae71e9a2be6ad100f875ac1042"}, + {file = "pandas-2.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:951805d146922aed8357e4cc5671b8b0b9be1027f0619cea132a9f3f65f2f09c"}, + {file = "pandas-2.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a881bc1309f3fce34696d07b00f13335c41f5f5a8770a33b09ebe23261cfc67"}, + {file = "pandas-2.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e1991bbb96f4050b09b5f811253c4f3cf05ee89a589379aa36cd623f21a31d6f"}, + {file = "pandas-2.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bb3be958022198531eb7ec2008cfc78c5b1eed51af8600c6c5d9160d89d8d249"}, + {file = "pandas-2.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9efc0acbbffb5236fbdf0409c04edce96bec4bdaa649d49985427bd1ec73e085"}, + {file = "pandas-2.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:75651c14fde635e680496148a8526b328e09fe0572d9ae9b638648c46a544ba3"}, + {file = "pandas-2.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf5be867a0541a9fb47a4be0c5790a4bccd5b77b92f0a59eeec9375fafc2aa14"}, + {file = "pandas-2.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84141f722d45d0c2a89544dd29d35b3abfc13d2250ed7e68394eda7564bd6324"}, + {file = "pandas-2.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f95a2aef32614ed86216d3c450ab12a4e82084e8102e355707a1d96e33d51c34"}, + {file = "pandas-2.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e0f51973ba93a9f97185049326d75b942b9aeb472bec616a129806facb129ebb"}, + {file = "pandas-2.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:b198687ca9c8529662213538a9bb1e60fa0bf0f6af89292eb68fea28743fcd5a"}, + {file = "pandas-2.3.0.tar.gz", hash = "sha256:34600ab34ebf1131a7613a260a61dbe8b62c188ec0ea4c296da7c9a06b004133"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + +[[package]] +name = "pandocfilters" +version = "1.5.1" +description = "Utilities for writing pandoc filters in python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["test"] +files = [ + {file = "pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc"}, + {file = "pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e"}, +] + +[[package]] +name = "paramiko" +version = "3.5.1" +description = "SSH2 protocol library" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "paramiko-3.5.1-py3-none-any.whl", hash = "sha256:43b9a0501fc2b5e70680388d9346cf252cfb7d00b0667c39e80eb43a408b8f61"}, + {file = "paramiko-3.5.1.tar.gz", hash = "sha256:b2c665bc45b2b215bd7d7f039901b14b067da00f3a11e6640995fd58f2664822"}, +] + +[package.dependencies] +bcrypt = ">=3.2" +cryptography = ">=3.3" +pynacl = ">=1.5" + +[package.extras] +all = ["gssapi (>=1.4.1) ; platform_system != \"Windows\"", "invoke (>=2.0)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8) ; platform_system == \"Windows\""] +gssapi = ["gssapi (>=1.4.1) ; platform_system != \"Windows\"", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8) ; platform_system == \"Windows\""] +invoke = ["invoke (>=2.0)"] + +[[package]] +name = "parso" +version = "0.8.4" +description = "A Python Parser" +optional = false +python-versions = ">=3.6" +groups = ["main", "test"] +files = [ + {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, + {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, +] + +[package.extras] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["docopt", "pytest"] + +[[package]] +name = "pexpect" +version = "4.9.0" +description = "Pexpect allows easy control of interactive console applications." +optional = false +python-versions = "*" +groups = ["main", "test"] +markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\"" +files = [ + {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, + {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + +[[package]] +name = "platformdirs" +version = "4.3.8" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.9" +groups = ["main", "test"] +files = [ + {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, + {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.14.1)"] + +[[package]] +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] + +[[package]] +name = "prometheus-client" +version = "0.22.1" +description = "Python client for the Prometheus monitoring system." +optional = false +python-versions = ">=3.9" +groups = ["main", "test"] +files = [ + {file = "prometheus_client-0.22.1-py3-none-any.whl", hash = "sha256:cca895342e308174341b2cbf99a56bef291fbc0ef7b9e5412a0f26d653ba7094"}, + {file = "prometheus_client-0.22.1.tar.gz", hash = "sha256:190f1331e783cf21eb60bca559354e0a4d4378facecf78f5428c39b675d20d28"}, +] + +[package.extras] +twisted = ["twisted"] + +[[package]] +name = "prompt-toolkit" +version = "3.0.51" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07"}, + {file = "prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed"}, +] + +[package.dependencies] +wcwidth = "*" + +[[package]] +name = "propcache" +version = "0.3.2" +description = "Accelerated property cache" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "propcache-0.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:22d9962a358aedbb7a2e36187ff273adeaab9743373a272976d2e348d08c7770"}, + {file = "propcache-0.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0d0fda578d1dc3f77b6b5a5dce3b9ad69a8250a891760a548df850a5e8da87f3"}, + {file = "propcache-0.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3def3da3ac3ce41562d85db655d18ebac740cb3fa4367f11a52b3da9d03a5cc3"}, + {file = "propcache-0.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bec58347a5a6cebf239daba9bda37dffec5b8d2ce004d9fe4edef3d2815137e"}, + {file = "propcache-0.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55ffda449a507e9fbd4aca1a7d9aa6753b07d6166140e5a18d2ac9bc49eac220"}, + {file = "propcache-0.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64a67fb39229a8a8491dd42f864e5e263155e729c2e7ff723d6e25f596b1e8cb"}, + {file = "propcache-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9da1cf97b92b51253d5b68cf5a2b9e0dafca095e36b7f2da335e27dc6172a614"}, + {file = "propcache-0.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5f559e127134b07425134b4065be45b166183fdcb433cb6c24c8e4149056ad50"}, + {file = "propcache-0.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:aff2e4e06435d61f11a428360a932138d0ec288b0a31dd9bd78d200bd4a2b339"}, + {file = "propcache-0.3.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:4927842833830942a5d0a56e6f4839bc484785b8e1ce8d287359794818633ba0"}, + {file = "propcache-0.3.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6107ddd08b02654a30fb8ad7a132021759d750a82578b94cd55ee2772b6ebea2"}, + {file = "propcache-0.3.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:70bd8b9cd6b519e12859c99f3fc9a93f375ebd22a50296c3a295028bea73b9e7"}, + {file = "propcache-0.3.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2183111651d710d3097338dd1893fcf09c9f54e27ff1a8795495a16a469cc90b"}, + {file = "propcache-0.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fb075ad271405dcad8e2a7ffc9a750a3bf70e533bd86e89f0603e607b93aa64c"}, + {file = "propcache-0.3.2-cp310-cp310-win32.whl", hash = "sha256:404d70768080d3d3bdb41d0771037da19d8340d50b08e104ca0e7f9ce55fce70"}, + {file = "propcache-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:7435d766f978b4ede777002e6b3b6641dd229cd1da8d3d3106a45770365f9ad9"}, + {file = "propcache-0.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0b8d2f607bd8f80ddc04088bc2a037fdd17884a6fcadc47a96e334d72f3717be"}, + {file = "propcache-0.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:06766d8f34733416e2e34f46fea488ad5d60726bb9481d3cddf89a6fa2d9603f"}, + {file = "propcache-0.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2dc1f4a1df4fecf4e6f68013575ff4af84ef6f478fe5344317a65d38a8e6dc9"}, + {file = "propcache-0.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be29c4f4810c5789cf10ddf6af80b041c724e629fa51e308a7a0fb19ed1ef7bf"}, + {file = "propcache-0.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59d61f6970ecbd8ff2e9360304d5c8876a6abd4530cb752c06586849ac8a9dc9"}, + {file = "propcache-0.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:62180e0b8dbb6b004baec00a7983e4cc52f5ada9cd11f48c3528d8cfa7b96a66"}, + {file = "propcache-0.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c144ca294a204c470f18cf4c9d78887810d04a3e2fbb30eea903575a779159df"}, + {file = "propcache-0.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5c2a784234c28854878d68978265617aa6dc0780e53d44b4d67f3651a17a9a2"}, + {file = "propcache-0.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5745bc7acdafa978ca1642891b82c19238eadc78ba2aaa293c6863b304e552d7"}, + {file = "propcache-0.3.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:c0075bf773d66fa8c9d41f66cc132ecc75e5bb9dd7cce3cfd14adc5ca184cb95"}, + {file = "propcache-0.3.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5f57aa0847730daceff0497f417c9de353c575d8da3579162cc74ac294c5369e"}, + {file = "propcache-0.3.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:eef914c014bf72d18efb55619447e0aecd5fb7c2e3fa7441e2e5d6099bddff7e"}, + {file = "propcache-0.3.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2a4092e8549031e82facf3decdbc0883755d5bbcc62d3aea9d9e185549936dcf"}, + {file = "propcache-0.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:85871b050f174bc0bfb437efbdb68aaf860611953ed12418e4361bc9c392749e"}, + {file = "propcache-0.3.2-cp311-cp311-win32.whl", hash = "sha256:36c8d9b673ec57900c3554264e630d45980fd302458e4ac801802a7fd2ef7897"}, + {file = "propcache-0.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53af8cb6a781b02d2ea079b5b853ba9430fcbe18a8e3ce647d5982a3ff69f39"}, + {file = "propcache-0.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8de106b6c84506b31c27168582cd3cb3000a6412c16df14a8628e5871ff83c10"}, + {file = "propcache-0.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:28710b0d3975117239c76600ea351934ac7b5ff56e60953474342608dbbb6154"}, + {file = "propcache-0.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce26862344bdf836650ed2487c3d724b00fbfec4233a1013f597b78c1cb73615"}, + {file = "propcache-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bca54bd347a253af2cf4544bbec232ab982f4868de0dd684246b67a51bc6b1db"}, + {file = "propcache-0.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55780d5e9a2ddc59711d727226bb1ba83a22dd32f64ee15594b9392b1f544eb1"}, + {file = "propcache-0.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:035e631be25d6975ed87ab23153db6a73426a48db688070d925aa27e996fe93c"}, + {file = "propcache-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee6f22b6eaa39297c751d0e80c0d3a454f112f5c6481214fcf4c092074cecd67"}, + {file = "propcache-0.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ca3aee1aa955438c4dba34fc20a9f390e4c79967257d830f137bd5a8a32ed3b"}, + {file = "propcache-0.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7a4f30862869fa2b68380d677cc1c5fcf1e0f2b9ea0cf665812895c75d0ca3b8"}, + {file = "propcache-0.3.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b77ec3c257d7816d9f3700013639db7491a434644c906a2578a11daf13176251"}, + {file = "propcache-0.3.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:cab90ac9d3f14b2d5050928483d3d3b8fb6b4018893fc75710e6aa361ecb2474"}, + {file = "propcache-0.3.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0b504d29f3c47cf6b9e936c1852246c83d450e8e063d50562115a6be6d3a2535"}, + {file = "propcache-0.3.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:ce2ac2675a6aa41ddb2a0c9cbff53780a617ac3d43e620f8fd77ba1c84dcfc06"}, + {file = "propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b4239611205294cc433845b914131b2a1f03500ff3c1ed093ed216b82621e1"}, + {file = "propcache-0.3.2-cp312-cp312-win32.whl", hash = "sha256:df4a81b9b53449ebc90cc4deefb052c1dd934ba85012aa912c7ea7b7e38b60c1"}, + {file = "propcache-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7046e79b989d7fe457bb755844019e10f693752d169076138abf17f31380800c"}, + {file = "propcache-0.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ca592ed634a73ca002967458187109265e980422116c0a107cf93d81f95af945"}, + {file = "propcache-0.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9ecb0aad4020e275652ba3975740f241bd12a61f1a784df044cf7477a02bc252"}, + {file = "propcache-0.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7f08f1cc28bd2eade7a8a3d2954ccc673bb02062e3e7da09bc75d843386b342f"}, + {file = "propcache-0.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a342c834734edb4be5ecb1e9fb48cb64b1e2320fccbd8c54bf8da8f2a84c33"}, + {file = "propcache-0.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a544caaae1ac73f1fecfae70ded3e93728831affebd017d53449e3ac052ac1e"}, + {file = "propcache-0.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310d11aa44635298397db47a3ebce7db99a4cc4b9bbdfcf6c98a60c8d5261cf1"}, + {file = "propcache-0.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c1396592321ac83157ac03a2023aa6cc4a3cc3cfdecb71090054c09e5a7cce3"}, + {file = "propcache-0.3.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cabf5b5902272565e78197edb682017d21cf3b550ba0460ee473753f28d23c1"}, + {file = "propcache-0.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0a2f2235ac46a7aa25bdeb03a9e7060f6ecbd213b1f9101c43b3090ffb971ef6"}, + {file = "propcache-0.3.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:92b69e12e34869a6970fd2f3da91669899994b47c98f5d430b781c26f1d9f387"}, + {file = "propcache-0.3.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:54e02207c79968ebbdffc169591009f4474dde3b4679e16634d34c9363ff56b4"}, + {file = "propcache-0.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4adfb44cb588001f68c5466579d3f1157ca07f7504fc91ec87862e2b8e556b88"}, + {file = "propcache-0.3.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fd3e6019dc1261cd0291ee8919dd91fbab7b169bb76aeef6c716833a3f65d206"}, + {file = "propcache-0.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c181cad81158d71c41a2bce88edce078458e2dd5ffee7eddd6b05da85079f43"}, + {file = "propcache-0.3.2-cp313-cp313-win32.whl", hash = "sha256:8a08154613f2249519e549de2330cf8e2071c2887309a7b07fb56098f5170a02"}, + {file = "propcache-0.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e41671f1594fc4ab0a6dec1351864713cb3a279910ae8b58f884a88a0a632c05"}, + {file = "propcache-0.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:9a3cf035bbaf035f109987d9d55dc90e4b0e36e04bbbb95af3055ef17194057b"}, + {file = "propcache-0.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:156c03d07dc1323d8dacaa221fbe028c5c70d16709cdd63502778e6c3ccca1b0"}, + {file = "propcache-0.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74413c0ba02ba86f55cf60d18daab219f7e531620c15f1e23d95563f505efe7e"}, + {file = "propcache-0.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f066b437bb3fa39c58ff97ab2ca351db465157d68ed0440abecb21715eb24b28"}, + {file = "propcache-0.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1304b085c83067914721e7e9d9917d41ad87696bf70f0bc7dee450e9c71ad0a"}, + {file = "propcache-0.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab50cef01b372763a13333b4e54021bdcb291fc9a8e2ccb9c2df98be51bcde6c"}, + {file = "propcache-0.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad3b2a085ec259ad2c2842666b2a0a49dea8463579c606426128925af1ed725"}, + {file = "propcache-0.3.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:261fa020c1c14deafd54c76b014956e2f86991af198c51139faf41c4d5e83892"}, + {file = "propcache-0.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:46d7f8aa79c927e5f987ee3a80205c987717d3659f035c85cf0c3680526bdb44"}, + {file = "propcache-0.3.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:6d8f3f0eebf73e3c0ff0e7853f68be638b4043c65a70517bb575eff54edd8dbe"}, + {file = "propcache-0.3.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:03c89c1b14a5452cf15403e291c0ccd7751d5b9736ecb2c5bab977ad6c5bcd81"}, + {file = "propcache-0.3.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:0cc17efde71e12bbaad086d679ce575268d70bc123a5a71ea7ad76f70ba30bba"}, + {file = "propcache-0.3.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:acdf05d00696bc0447e278bb53cb04ca72354e562cf88ea6f9107df8e7fd9770"}, + {file = "propcache-0.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4445542398bd0b5d32df908031cb1b30d43ac848e20470a878b770ec2dcc6330"}, + {file = "propcache-0.3.2-cp313-cp313t-win32.whl", hash = "sha256:f86e5d7cd03afb3a1db8e9f9f6eff15794e79e791350ac48a8c924e6f439f394"}, + {file = "propcache-0.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9704bedf6e7cbe3c65eca4379a9b53ee6a83749f047808cbb5044d40d7d72198"}, + {file = "propcache-0.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a7fad897f14d92086d6b03fdd2eb844777b0c4d7ec5e3bac0fbae2ab0602bbe5"}, + {file = "propcache-0.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1f43837d4ca000243fd7fd6301947d7cb93360d03cd08369969450cc6b2ce3b4"}, + {file = "propcache-0.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:261df2e9474a5949c46e962065d88eb9b96ce0f2bd30e9d3136bcde84befd8f2"}, + {file = "propcache-0.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e514326b79e51f0a177daab1052bc164d9d9e54133797a3a58d24c9c87a3fe6d"}, + {file = "propcache-0.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4a996adb6904f85894570301939afeee65f072b4fd265ed7e569e8d9058e4ec"}, + {file = "propcache-0.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76cace5d6b2a54e55b137669b30f31aa15977eeed390c7cbfb1dafa8dfe9a701"}, + {file = "propcache-0.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31248e44b81d59d6addbb182c4720f90b44e1efdc19f58112a3c3a1615fb47ef"}, + {file = "propcache-0.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abb7fa19dbf88d3857363e0493b999b8011eea856b846305d8c0512dfdf8fbb1"}, + {file = "propcache-0.3.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d81ac3ae39d38588ad0549e321e6f773a4e7cc68e7751524a22885d5bbadf886"}, + {file = "propcache-0.3.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:cc2782eb0f7a16462285b6f8394bbbd0e1ee5f928034e941ffc444012224171b"}, + {file = "propcache-0.3.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:db429c19a6c7e8a1c320e6a13c99799450f411b02251fb1b75e6217cf4a14fcb"}, + {file = "propcache-0.3.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:21d8759141a9e00a681d35a1f160892a36fb6caa715ba0b832f7747da48fb6ea"}, + {file = "propcache-0.3.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2ca6d378f09adb13837614ad2754fa8afaee330254f404299611bce41a8438cb"}, + {file = "propcache-0.3.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:34a624af06c048946709f4278b4176470073deda88d91342665d95f7c6270fbe"}, + {file = "propcache-0.3.2-cp39-cp39-win32.whl", hash = "sha256:4ba3fef1c30f306b1c274ce0b8baaa2c3cdd91f645c48f06394068f37d3837a1"}, + {file = "propcache-0.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:7a2368eed65fc69a7a7a40b27f22e85e7627b74216f0846b04ba5c116e191ec9"}, + {file = "propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f"}, + {file = "propcache-0.3.2.tar.gz", hash = "sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168"}, +] + +[[package]] +name = "proto-plus" +version = "1.26.1" +description = "Beautiful, Pythonic protocol buffers" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "proto_plus-1.26.1-py3-none-any.whl", hash = "sha256:13285478c2dcf2abb829db158e1047e2f1e8d63a077d94263c2b88b043c75a66"}, + {file = "proto_plus-1.26.1.tar.gz", hash = "sha256:21a515a4c4c0088a773899e23c7bbade3d18f9c66c73edd4c7ee3816bc96a012"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<7.0.0" + +[package.extras] +testing = ["google-api-core (>=1.31.5)"] + +[[package]] +name = "protobuf" +version = "6.31.1" +description = "" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "protobuf-6.31.1-cp310-abi3-win32.whl", hash = "sha256:7fa17d5a29c2e04b7d90e5e32388b8bfd0e7107cd8e616feef7ed3fa6bdab5c9"}, + {file = "protobuf-6.31.1-cp310-abi3-win_amd64.whl", hash = "sha256:426f59d2964864a1a366254fa703b8632dcec0790d8862d30034d8245e1cd447"}, + {file = "protobuf-6.31.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:6f1227473dc43d44ed644425268eb7c2e488ae245d51c6866d19fe158e207402"}, + {file = "protobuf-6.31.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:a40fc12b84c154884d7d4c4ebd675d5b3b5283e155f324049ae396b95ddebc39"}, + {file = "protobuf-6.31.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:4ee898bf66f7a8b0bd21bce523814e6fbd8c6add948045ce958b73af7e8878c6"}, + {file = "protobuf-6.31.1-cp39-cp39-win32.whl", hash = "sha256:0414e3aa5a5f3ff423828e1e6a6e907d6c65c1d5b7e6e975793d5590bdeecc16"}, + {file = "protobuf-6.31.1-cp39-cp39-win_amd64.whl", hash = "sha256:8764cf4587791e7564051b35524b72844f845ad0bb011704c3736cce762d8fe9"}, + {file = "protobuf-6.31.1-py3-none-any.whl", hash = "sha256:720a6c7e6b77288b85063569baae8536671b39f15cc22037ec7045658d80489e"}, + {file = "protobuf-6.31.1.tar.gz", hash = "sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a"}, +] + +[[package]] +name = "psutil" +version = "7.0.0" +description = "Cross-platform lib for process and system monitoring in Python. NOTE: the syntax of this script MUST be kept compatible with Python 2.7." +optional = false +python-versions = ">=3.6" +groups = ["test"] +files = [ + {file = "psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25"}, + {file = "psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da"}, + {file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91"}, + {file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34"}, + {file = "psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993"}, + {file = "psutil-7.0.0-cp36-cp36m-win32.whl", hash = "sha256:84df4eb63e16849689f76b1ffcb36db7b8de703d1bc1fe41773db487621b6c17"}, + {file = "psutil-7.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1e744154a6580bc968a0195fd25e80432d3afec619daf145b9e5ba16cc1d688e"}, + {file = "psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99"}, + {file = "psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553"}, + {file = "psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456"}, +] + +[package.extras] +dev = ["abi3audit", "black (==24.10.0)", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest", "pytest-cov", "pytest-xdist", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "vulture", "wheel"] +test = ["pytest", "pytest-xdist", "setuptools"] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +optional = false +python-versions = "*" +groups = ["main", "test"] +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] +markers = {main = "sys_platform != \"win32\" and sys_platform != \"emscripten\"", test = "sys_platform != \"win32\" and sys_platform != \"emscripten\" or os_name != \"nt\""} + +[[package]] +name = "pure-eval" +version = "0.2.3" +description = "Safely evaluate AST nodes without side effects" +optional = false +python-versions = "*" +groups = ["main", "test"] +files = [ + {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, + {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, +] + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "py-spy" +version = "0.4.0" +description = "Sampling profiler for Python programs" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "py_spy-0.4.0-py2.py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:f2cf3f7130e7d780471faa5957441d3b4e0ec39a79b2c00f4c33d494f7728428"}, + {file = "py_spy-0.4.0-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:47cdda4c34d9b6cb01f3aaeceb2e88faf57da880207fe72ff6ff97e9bb6cc8a9"}, + {file = "py_spy-0.4.0-py2.py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eee3d0bde85ca5cf4f01f012d461180ca76c24835a96f7b5c4ded64eb6a008ab"}, + {file = "py_spy-0.4.0-py2.py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c5f06ffce4c9c98b7fc9f5e67e5e7db591173f1351837633f3f23d9378b1d18a"}, + {file = "py_spy-0.4.0-py2.py3-none-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87573e64dbfdfc89ba2e0f5e2f525aa84e0299c7eb6454b47ea335fde583a7a0"}, + {file = "py_spy-0.4.0-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:8bf2f3702cef367a489faa45177b41a6c31b2a3e5bd78c978d44e29340152f5a"}, + {file = "py_spy-0.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:77d8f637ade38367d944874776f45b703b7ac5938b1f7be8891f3a5876ddbb96"}, + {file = "py_spy-0.4.0.tar.gz", hash = "sha256:806602ce7972782cc9c1e383f339bfc27bfb822d42485e6a3e0530ae5040e1f0"}, +] + +[[package]] +name = "pyarrow" +version = "17.0.0" +description = "Python library for Apache Arrow" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "sys_platform == \"darwin\" and platform_machine == \"x86_64\"" +files = [ + {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"}, + {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da1e060b3876faa11cee287839f9cc7cdc00649f475714b8680a05fd9071d545"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c06d4624c0ad6674364bb46ef38c3132768139ddec1c56582dbac54f2663e2"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:fa3c246cc58cb5a4a5cb407a18f193354ea47dd0648194e6265bd24177982fe8"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047"}, + {file = "pyarrow-17.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5984f416552eea15fd9cee03da53542bf4cddaef5afecefb9aa8d1010c335087"}, + {file = "pyarrow-17.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1c8856e2ef09eb87ecf937104aacfa0708f22dfeb039c363ec99735190ffb977"}, + {file = "pyarrow-17.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e19f569567efcbbd42084e87f948778eb371d308e137a0f97afe19bb860ccb3"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b244dc8e08a23b3e352899a006a26ae7b4d0da7bb636872fa8f5884e70acf15"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b72e87fe3e1db343995562f7fff8aee354b55ee83d13afba65400c178ab2597"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dc5c31c37409dfbc5d014047817cb4ccd8c1ea25d19576acf1a001fe07f5b420"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e3343cb1e88bc2ea605986d4b94948716edc7a8d14afd4e2c097232f729758b4"}, + {file = "pyarrow-17.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:a27532c38f3de9eb3e90ecab63dfda948a8ca859a66e3a47f5f42d1e403c4d03"}, + {file = "pyarrow-17.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9b8a823cea605221e61f34859dcc03207e52e409ccf6354634143e23af7c8d22"}, + {file = "pyarrow-17.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f1e70de6cb5790a50b01d2b686d54aaf73da01266850b05e3af2a1bc89e16053"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0071ce35788c6f9077ff9ecba4858108eebe2ea5a3f7cf2cf55ebc1dbc6ee24a"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:757074882f844411fcca735e39aae74248a1531367a7c80799b4266390ae51cc"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ba11c4f16976e89146781a83833df7f82077cdab7dc6232c897789343f7891a"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b0c6ac301093b42d34410b187bba560b17c0330f64907bfa4f7f7f2444b0cf9b"}, + {file = "pyarrow-17.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:392bc9feabc647338e6c89267635e111d71edad5fcffba204425a7c8d13610d7"}, + {file = "pyarrow-17.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:af5ff82a04b2171415f1410cff7ebb79861afc5dae50be73ce06d6e870615204"}, + {file = "pyarrow-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:edca18eaca89cd6382dfbcff3dd2d87633433043650c07375d095cd3517561d8"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c7916bff914ac5d4a8fe25b7a25e432ff921e72f6f2b7547d1e325c1ad9d155"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f553ca691b9e94b202ff741bdd40f6ccb70cdd5fbf65c187af132f1317de6145"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0cdb0e627c86c373205a2f94a510ac4376fdc523f8bb36beab2e7f204416163c"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d7d192305d9d8bc9082d10f361fc70a73590a4c65cf31c3e6926cd72b76bc35c"}, + {file = "pyarrow-17.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:02dae06ce212d8b3244dd3e7d12d9c4d3046945a5933d28026598e9dbbda1fca"}, + {file = "pyarrow-17.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:13d7a460b412f31e4c0efa1148e1d29bdf18ad1411eb6757d38f8fbdcc8645fb"}, + {file = "pyarrow-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b564a51fbccfab5a04a80453e5ac6c9954a9c5ef2890d1bcf63741909c3f8df"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32503827abbc5aadedfa235f5ece8c4f8f8b0a3cf01066bc8d29de7539532687"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a155acc7f154b9ffcc85497509bcd0d43efb80d6f733b0dc3bb14e281f131c8b"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dec8d129254d0188a49f8a1fc99e0560dc1b85f60af729f47de4046015f9b0a5"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a48ddf5c3c6a6c505904545c25a4ae13646ae1f8ba703c4df4a1bfe4f4006bda"}, + {file = "pyarrow-17.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:42bf93249a083aca230ba7e2786c5f673507fa97bbd9725a1e2754715151a204"}, + {file = "pyarrow-17.0.0.tar.gz", hash = "sha256:4beca9521ed2c0921c1023e68d097d0299b62c362639ea315572a58f3f50fd28"}, +] + +[package.dependencies] +numpy = ">=1.16.6" + +[package.extras] +test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] + +[[package]] +name = "pyarrow" +version = "20.0.0" +description = "Python library for Apache Arrow" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "sys_platform != \"darwin\" or platform_machine != \"x86_64\"" +files = [ + {file = "pyarrow-20.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:c7dd06fd7d7b410ca5dc839cc9d485d2bc4ae5240851bcd45d85105cc90a47d7"}, + {file = "pyarrow-20.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:d5382de8dc34c943249b01c19110783d0d64b207167c728461add1ecc2db88e4"}, + {file = "pyarrow-20.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6415a0d0174487456ddc9beaead703d0ded5966129fa4fd3114d76b5d1c5ceae"}, + {file = "pyarrow-20.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15aa1b3b2587e74328a730457068dc6c89e6dcbf438d4369f572af9d320a25ee"}, + {file = "pyarrow-20.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:5605919fbe67a7948c1f03b9f3727d82846c053cd2ce9303ace791855923fd20"}, + {file = "pyarrow-20.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a5704f29a74b81673d266e5ec1fe376f060627c2e42c5c7651288ed4b0db29e9"}, + {file = "pyarrow-20.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:00138f79ee1b5aca81e2bdedb91e3739b987245e11fa3c826f9e57c5d102fb75"}, + {file = "pyarrow-20.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f2d67ac28f57a362f1a2c1e6fa98bfe2f03230f7e15927aecd067433b1e70ce8"}, + {file = "pyarrow-20.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:4a8b029a07956b8d7bd742ffca25374dd3f634b35e46cc7a7c3fa4c75b297191"}, + {file = "pyarrow-20.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:24ca380585444cb2a31324c546a9a56abbe87e26069189e14bdba19c86c049f0"}, + {file = "pyarrow-20.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:95b330059ddfdc591a3225f2d272123be26c8fa76e8c9ee1a77aad507361cfdb"}, + {file = "pyarrow-20.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f0fb1041267e9968c6d0d2ce3ff92e3928b243e2b6d11eeb84d9ac547308232"}, + {file = "pyarrow-20.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8ff87cc837601532cc8242d2f7e09b4e02404de1b797aee747dd4ba4bd6313f"}, + {file = "pyarrow-20.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:7a3a5dcf54286e6141d5114522cf31dd67a9e7c9133d150799f30ee302a7a1ab"}, + {file = "pyarrow-20.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a6ad3e7758ecf559900261a4df985662df54fb7fdb55e8e3b3aa99b23d526b62"}, + {file = "pyarrow-20.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6bb830757103a6cb300a04610e08d9636f0cd223d32f388418ea893a3e655f1c"}, + {file = "pyarrow-20.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:96e37f0766ecb4514a899d9a3554fadda770fb57ddf42b63d80f14bc20aa7db3"}, + {file = "pyarrow-20.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3346babb516f4b6fd790da99b98bed9708e3f02e734c84971faccb20736848dc"}, + {file = "pyarrow-20.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:75a51a5b0eef32727a247707d4755322cb970be7e935172b6a3a9f9ae98404ba"}, + {file = "pyarrow-20.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:211d5e84cecc640c7a3ab900f930aaff5cd2702177e0d562d426fb7c4f737781"}, + {file = "pyarrow-20.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ba3cf4182828be7a896cbd232aa8dd6a31bd1f9e32776cc3796c012855e1199"}, + {file = "pyarrow-20.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c3a01f313ffe27ac4126f4c2e5ea0f36a5fc6ab51f8726cf41fee4b256680bd"}, + {file = "pyarrow-20.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a2791f69ad72addd33510fec7bb14ee06c2a448e06b649e264c094c5b5f7ce28"}, + {file = "pyarrow-20.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4250e28a22302ce8692d3a0e8ec9d9dde54ec00d237cff4dfa9c1fbf79e472a8"}, + {file = "pyarrow-20.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:89e030dc58fc760e4010148e6ff164d2f44441490280ef1e97a542375e41058e"}, + {file = "pyarrow-20.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6102b4864d77102dbbb72965618e204e550135a940c2534711d5ffa787df2a5a"}, + {file = "pyarrow-20.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:96d6a0a37d9c98be08f5ed6a10831d88d52cac7b13f5287f1e0f625a0de8062b"}, + {file = "pyarrow-20.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:a15532e77b94c61efadde86d10957950392999503b3616b2ffcef7621a002893"}, + {file = "pyarrow-20.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:dd43f58037443af715f34f1322c782ec463a3c8a94a85fdb2d987ceb5658e061"}, + {file = "pyarrow-20.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa0d288143a8585806e3cc7c39566407aab646fb9ece164609dac1cfff45f6ae"}, + {file = "pyarrow-20.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6953f0114f8d6f3d905d98e987d0924dabce59c3cda380bdfaa25a6201563b4"}, + {file = "pyarrow-20.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:991f85b48a8a5e839b2128590ce07611fae48a904cae6cab1f089c5955b57eb5"}, + {file = "pyarrow-20.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:97c8dc984ed09cb07d618d57d8d4b67a5100a30c3818c2fb0b04599f0da2de7b"}, + {file = "pyarrow-20.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9b71daf534f4745818f96c214dbc1e6124d7daf059167330b610fc69b6f3d3e3"}, + {file = "pyarrow-20.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e8b88758f9303fa5a83d6c90e176714b2fd3852e776fc2d7e42a22dd6c2fb368"}, + {file = "pyarrow-20.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:30b3051b7975801c1e1d387e17c588d8ab05ced9b1e14eec57915f79869b5031"}, + {file = "pyarrow-20.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:ca151afa4f9b7bc45bcc791eb9a89e90a9eb2772767d0b1e5389609c7d03db63"}, + {file = "pyarrow-20.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:4680f01ecd86e0dd63e39eb5cd59ef9ff24a9d166db328679e36c108dc993d4c"}, + {file = "pyarrow-20.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f4c8534e2ff059765647aa69b75d6543f9fef59e2cd4c6d18015192565d2b70"}, + {file = "pyarrow-20.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e1f8a47f4b4ae4c69c4d702cfbdfe4d41e18e5c7ef6f1bb1c50918c1e81c57b"}, + {file = "pyarrow-20.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:a1f60dc14658efaa927f8214734f6a01a806d7690be4b3232ba526836d216122"}, + {file = "pyarrow-20.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:204a846dca751428991346976b914d6d2a82ae5b8316a6ed99789ebf976551e6"}, + {file = "pyarrow-20.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f3b117b922af5e4c6b9a9115825726cac7d8b1421c37c2b5e24fbacc8930612c"}, + {file = "pyarrow-20.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e724a3fd23ae5b9c010e7be857f4405ed5e679db5c93e66204db1a69f733936a"}, + {file = "pyarrow-20.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:82f1ee5133bd8f49d31be1299dc07f585136679666b502540db854968576faf9"}, + {file = "pyarrow-20.0.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:1bcbe471ef3349be7714261dea28fe280db574f9d0f77eeccc195a2d161fd861"}, + {file = "pyarrow-20.0.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:a18a14baef7d7ae49247e75641fd8bcbb39f44ed49a9fc4ec2f65d5031aa3b96"}, + {file = "pyarrow-20.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb497649e505dc36542d0e68eca1a3c94ecbe9799cb67b578b55f2441a247fbc"}, + {file = "pyarrow-20.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11529a2283cb1f6271d7c23e4a8f9f8b7fd173f7360776b668e509d712a02eec"}, + {file = "pyarrow-20.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fc1499ed3b4b57ee4e090e1cea6eb3584793fe3d1b4297bbf53f09b434991a5"}, + {file = "pyarrow-20.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:db53390eaf8a4dab4dbd6d93c85c5cf002db24902dbff0ca7d988beb5c9dd15b"}, + {file = "pyarrow-20.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:851c6a8260ad387caf82d2bbf54759130534723e37083111d4ed481cb253cc0d"}, + {file = "pyarrow-20.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e22f80b97a271f0a7d9cd07394a7d348f80d3ac63ed7cc38b6d1b696ab3b2619"}, + {file = "pyarrow-20.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:9965a050048ab02409fb7cbbefeedba04d3d67f2cc899eff505cc084345959ca"}, + {file = "pyarrow-20.0.0.tar.gz", hash = "sha256:febc4a913592573c8d5805091a6c2b5064c8bd6e002131f01061797d91c783c1"}, +] + +[package.extras] +test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] + +[[package]] +name = "pyasn1" +version = "0.6.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a"}, + {file = "pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6"}, +] + +[package.dependencies] +pyasn1 = ">=0.6.1,<0.7.0" + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pydantic" +version = "2.11.7" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, + {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.33.2" +typing-extensions = ">=4.12.2" +typing-inspection = ">=0.4.0" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, + {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pygments" +version = "2.19.1" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +groups = ["main", "docs", "test"] +files = [ + {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, + {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pynacl" +version = "1.5.0" +description = "Python binding to the Networking and Cryptography (NaCl) library" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, + {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, +] + +[package.dependencies] +cffi = ">=1.4.1" + +[package.extras] +docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] +tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] + +[[package]] +name = "pytest" +version = "7.4.0" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "pytest-7.4.0-py3-none-any.whl", hash = "sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32"}, + {file = "pytest-7.4.0.tar.gz", hash = "sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-mock" +version = "3.11.1" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "pytest-mock-3.11.1.tar.gz", hash = "sha256:7f6b125602ac6d743e523ae0bfa71e1a697a2f5534064528c6ff84c2f7c2fc7f"}, + {file = "pytest_mock-3.11.1-py3-none-any.whl", hash = "sha256:21c279fff83d70763b05f8874cc9cfb3fcacd6d354247a976f9529d19f9acf39"}, +] + +[package.dependencies] +pytest = ">=5.0" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "pytest-timeout" +version = "2.3.1" +description = "pytest plugin to abort hanging tests" +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "pytest-timeout-2.3.1.tar.gz", hash = "sha256:12397729125c6ecbdaca01035b9e5239d4db97352320af155b3f5de1ba5165d9"}, + {file = "pytest_timeout-2.3.1-py3-none-any.whl", hash = "sha256:68188cb703edfc6a18fad98dc25a3c61e9f24d644b0b70f33af545219fc7813e"}, +] + +[package.dependencies] +pytest = ">=7.0.0" + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "test"] +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-json-logger" +version = "3.3.0" +description = "JSON Log Formatter for the Python Logging Package" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "python_json_logger-3.3.0-py3-none-any.whl", hash = "sha256:dd980fae8cffb24c13caf6e158d3d61c0d6d22342f932cb6e9deedab3d35eec7"}, + {file = "python_json_logger-3.3.0.tar.gz", hash = "sha256:12b7e74b17775e7d565129296105bbe3910842d9d0eb083fc83a6a617aa8df84"}, +] + +[package.extras] +dev = ["backports.zoneinfo ; python_version < \"3.9\"", "black", "build", "freezegun", "mdx_truly_sane_lists", "mike", "mkdocs", "mkdocs-awesome-pages-plugin", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-material (>=8.5)", "mkdocstrings[python]", "msgspec ; implementation_name != \"pypy\"", "mypy", "orjson ; implementation_name != \"pypy\"", "pylint", "pytest", "tzdata", "validate-pyproject[all]"] + +[[package]] +name = "pytz" +version = "2025.2" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, + {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, +] + +[[package]] +name = "pywin32" +version = "310" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +groups = ["test"] +markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\"" +files = [ + {file = "pywin32-310-cp310-cp310-win32.whl", hash = "sha256:6dd97011efc8bf51d6793a82292419eba2c71cf8e7250cfac03bba284454abc1"}, + {file = "pywin32-310-cp310-cp310-win_amd64.whl", hash = "sha256:c3e78706e4229b915a0821941a84e7ef420bf2b77e08c9dae3c76fd03fd2ae3d"}, + {file = "pywin32-310-cp310-cp310-win_arm64.whl", hash = "sha256:33babed0cf0c92a6f94cc6cc13546ab24ee13e3e800e61ed87609ab91e4c8213"}, + {file = "pywin32-310-cp311-cp311-win32.whl", hash = "sha256:1e765f9564e83011a63321bb9d27ec456a0ed90d3732c4b2e312b855365ed8bd"}, + {file = "pywin32-310-cp311-cp311-win_amd64.whl", hash = "sha256:126298077a9d7c95c53823934f000599f66ec9296b09167810eb24875f32689c"}, + {file = "pywin32-310-cp311-cp311-win_arm64.whl", hash = "sha256:19ec5fc9b1d51c4350be7bb00760ffce46e6c95eaf2f0b2f1150657b1a43c582"}, + {file = "pywin32-310-cp312-cp312-win32.whl", hash = "sha256:8a75a5cc3893e83a108c05d82198880704c44bbaee4d06e442e471d3c9ea4f3d"}, + {file = "pywin32-310-cp312-cp312-win_amd64.whl", hash = "sha256:bf5c397c9a9a19a6f62f3fb821fbf36cac08f03770056711f765ec1503972060"}, + {file = "pywin32-310-cp312-cp312-win_arm64.whl", hash = "sha256:2349cc906eae872d0663d4d6290d13b90621eaf78964bb1578632ff20e152966"}, + {file = "pywin32-310-cp313-cp313-win32.whl", hash = "sha256:5d241a659c496ada3253cd01cfaa779b048e90ce4b2b38cd44168ad555ce74ab"}, + {file = "pywin32-310-cp313-cp313-win_amd64.whl", hash = "sha256:667827eb3a90208ddbdcc9e860c81bde63a135710e21e4cb3348968e4bd5249e"}, + {file = "pywin32-310-cp313-cp313-win_arm64.whl", hash = "sha256:e308f831de771482b7cf692a1f308f8fca701b2d8f9dde6cc440c7da17e47b33"}, + {file = "pywin32-310-cp38-cp38-win32.whl", hash = "sha256:0867beb8addefa2e3979d4084352e4ac6e991ca45373390775f7084cc0209b9c"}, + {file = "pywin32-310-cp38-cp38-win_amd64.whl", hash = "sha256:30f0a9b3138fb5e07eb4973b7077e1883f558e40c578c6925acc7a94c34eaa36"}, + {file = "pywin32-310-cp39-cp39-win32.whl", hash = "sha256:851c8d927af0d879221e616ae1f66145253537bbdd321a77e8ef701b443a9a1a"}, + {file = "pywin32-310-cp39-cp39-win_amd64.whl", hash = "sha256:96867217335559ac619f00ad70e513c0fcf84b8a3af9fc2bba3b59b97da70475"}, +] + +[[package]] +name = "pywinpty" +version = "2.0.15" +description = "Pseudo terminal support for Windows from Python." +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "os_name == \"nt\"" +files = [ + {file = "pywinpty-2.0.15-cp310-cp310-win_amd64.whl", hash = "sha256:8e7f5de756a615a38b96cd86fa3cd65f901ce54ce147a3179c45907fa11b4c4e"}, + {file = "pywinpty-2.0.15-cp311-cp311-win_amd64.whl", hash = "sha256:9a6bcec2df2707aaa9d08b86071970ee32c5026e10bcc3cc5f6f391d85baf7ca"}, + {file = "pywinpty-2.0.15-cp312-cp312-win_amd64.whl", hash = "sha256:83a8f20b430bbc5d8957249f875341a60219a4e971580f2ba694fbfb54a45ebc"}, + {file = "pywinpty-2.0.15-cp313-cp313-win_amd64.whl", hash = "sha256:ab5920877dd632c124b4ed17bc6dd6ef3b9f86cd492b963ffdb1a67b85b0f408"}, + {file = "pywinpty-2.0.15-cp313-cp313t-win_amd64.whl", hash = "sha256:a4560ad8c01e537708d2790dbe7da7d986791de805d89dd0d3697ca59e9e4901"}, + {file = "pywinpty-2.0.15-cp39-cp39-win_amd64.whl", hash = "sha256:d261cd88fcd358cfb48a7ca0700db3e1c088c9c10403c9ebc0d8a8b57aa6a117"}, + {file = "pywinpty-2.0.15.tar.gz", hash = "sha256:312cf39153a8736c617d45ce8b6ad6cd2107de121df91c455b10ce6bba7a39b2"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "pyzmq" +version = "26.4.0" +description = "Python bindings for 0MQ" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "pyzmq-26.4.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:0329bdf83e170ac133f44a233fc651f6ed66ef8e66693b5af7d54f45d1ef5918"}, + {file = "pyzmq-26.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:398a825d2dea96227cf6460ce0a174cf7657d6f6827807d4d1ae9d0f9ae64315"}, + {file = "pyzmq-26.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d52d62edc96787f5c1dfa6c6ccff9b581cfae5a70d94ec4c8da157656c73b5b"}, + {file = "pyzmq-26.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1410c3a3705db68d11eb2424d75894d41cff2f64d948ffe245dd97a9debfebf4"}, + {file = "pyzmq-26.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:7dacb06a9c83b007cc01e8e5277f94c95c453c5851aac5e83efe93e72226353f"}, + {file = "pyzmq-26.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6bab961c8c9b3a4dc94d26e9b2cdf84de9918931d01d6ff38c721a83ab3c0ef5"}, + {file = "pyzmq-26.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7a5c09413b924d96af2aa8b57e76b9b0058284d60e2fc3730ce0f979031d162a"}, + {file = "pyzmq-26.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7d489ac234d38e57f458fdbd12a996bfe990ac028feaf6f3c1e81ff766513d3b"}, + {file = "pyzmq-26.4.0-cp310-cp310-win32.whl", hash = "sha256:dea1c8db78fb1b4b7dc9f8e213d0af3fc8ecd2c51a1d5a3ca1cde1bda034a980"}, + {file = "pyzmq-26.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:fa59e1f5a224b5e04dc6c101d7186058efa68288c2d714aa12d27603ae93318b"}, + {file = "pyzmq-26.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:a651fe2f447672f4a815e22e74630b6b1ec3a1ab670c95e5e5e28dcd4e69bbb5"}, + {file = "pyzmq-26.4.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:bfcf82644c9b45ddd7cd2a041f3ff8dce4a0904429b74d73a439e8cab1bd9e54"}, + {file = "pyzmq-26.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9bcae3979b2654d5289d3490742378b2f3ce804b0b5fd42036074e2bf35b030"}, + {file = "pyzmq-26.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ccdff8ac4246b6fb60dcf3982dfaeeff5dd04f36051fe0632748fc0aa0679c01"}, + {file = "pyzmq-26.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4550af385b442dc2d55ab7717837812799d3674cb12f9a3aa897611839c18e9e"}, + {file = "pyzmq-26.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:2f9f7ffe9db1187a253fca95191854b3fda24696f086e8789d1d449308a34b88"}, + {file = "pyzmq-26.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3709c9ff7ba61589b7372923fd82b99a81932b592a5c7f1a24147c91da9a68d6"}, + {file = "pyzmq-26.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f8f3c30fb2d26ae5ce36b59768ba60fb72507ea9efc72f8f69fa088450cff1df"}, + {file = "pyzmq-26.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:382a4a48c8080e273427fc692037e3f7d2851959ffe40864f2db32646eeb3cef"}, + {file = "pyzmq-26.4.0-cp311-cp311-win32.whl", hash = "sha256:d56aad0517d4c09e3b4f15adebba8f6372c5102c27742a5bdbfc74a7dceb8fca"}, + {file = "pyzmq-26.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:963977ac8baed7058c1e126014f3fe58b3773f45c78cce7af5c26c09b6823896"}, + {file = "pyzmq-26.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:c0c8e8cadc81e44cc5088fcd53b9b3b4ce9344815f6c4a03aec653509296fae3"}, + {file = "pyzmq-26.4.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:5227cb8da4b6f68acfd48d20c588197fd67745c278827d5238c707daf579227b"}, + {file = "pyzmq-26.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1c07a7fa7f7ba86554a2b1bef198c9fed570c08ee062fd2fd6a4dcacd45f905"}, + {file = "pyzmq-26.4.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae775fa83f52f52de73183f7ef5395186f7105d5ed65b1ae65ba27cb1260de2b"}, + {file = "pyzmq-26.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66c760d0226ebd52f1e6b644a9e839b5db1e107a23f2fcd46ec0569a4fdd4e63"}, + {file = "pyzmq-26.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ef8c6ecc1d520debc147173eaa3765d53f06cd8dbe7bd377064cdbc53ab456f5"}, + {file = "pyzmq-26.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3150ef4084e163dec29ae667b10d96aad309b668fac6810c9e8c27cf543d6e0b"}, + {file = "pyzmq-26.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4448c9e55bf8329fa1dcedd32f661bf611214fa70c8e02fee4347bc589d39a84"}, + {file = "pyzmq-26.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e07dde3647afb084d985310d067a3efa6efad0621ee10826f2cb2f9a31b89d2f"}, + {file = "pyzmq-26.4.0-cp312-cp312-win32.whl", hash = "sha256:ba034a32ecf9af72adfa5ee383ad0fd4f4e38cdb62b13624278ef768fe5b5b44"}, + {file = "pyzmq-26.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:056a97aab4064f526ecb32f4343917a4022a5d9efb6b9df990ff72e1879e40be"}, + {file = "pyzmq-26.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:2f23c750e485ce1eb639dbd576d27d168595908aa2d60b149e2d9e34c9df40e0"}, + {file = "pyzmq-26.4.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:c43fac689880f5174d6fc864857d1247fe5cfa22b09ed058a344ca92bf5301e3"}, + {file = "pyzmq-26.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:902aca7eba477657c5fb81c808318460328758e8367ecdd1964b6330c73cae43"}, + {file = "pyzmq-26.4.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5e48a830bfd152fe17fbdeaf99ac5271aa4122521bf0d275b6b24e52ef35eb6"}, + {file = "pyzmq-26.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31be2b6de98c824c06f5574331f805707c667dc8f60cb18580b7de078479891e"}, + {file = "pyzmq-26.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:6332452034be001bbf3206ac59c0d2a7713de5f25bb38b06519fc6967b7cf771"}, + {file = "pyzmq-26.4.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:da8c0f5dd352136853e6a09b1b986ee5278dfddfebd30515e16eae425c872b30"}, + {file = "pyzmq-26.4.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f4ccc1a0a2c9806dda2a2dd118a3b7b681e448f3bb354056cad44a65169f6d86"}, + {file = "pyzmq-26.4.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1c0b5fceadbab461578daf8d1dcc918ebe7ddd2952f748cf30c7cf2de5d51101"}, + {file = "pyzmq-26.4.0-cp313-cp313-win32.whl", hash = "sha256:28e2b0ff5ba4b3dd11062d905682bad33385cfa3cc03e81abd7f0822263e6637"}, + {file = "pyzmq-26.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:23ecc9d241004c10e8b4f49d12ac064cd7000e1643343944a10df98e57bc544b"}, + {file = "pyzmq-26.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:1edb0385c7f025045d6e0f759d4d3afe43c17a3d898914ec6582e6f464203c08"}, + {file = "pyzmq-26.4.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:93a29e882b2ba1db86ba5dd5e88e18e0ac6b627026c5cfbec9983422011b82d4"}, + {file = "pyzmq-26.4.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb45684f276f57110bb89e4300c00f1233ca631f08f5f42528a5c408a79efc4a"}, + {file = "pyzmq-26.4.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f72073e75260cb301aad4258ad6150fa7f57c719b3f498cb91e31df16784d89b"}, + {file = "pyzmq-26.4.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be37e24b13026cfedd233bcbbccd8c0bcd2fdd186216094d095f60076201538d"}, + {file = "pyzmq-26.4.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:237b283044934d26f1eeff4075f751b05d2f3ed42a257fc44386d00df6a270cf"}, + {file = "pyzmq-26.4.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:b30f862f6768b17040929a68432c8a8be77780317f45a353cb17e423127d250c"}, + {file = "pyzmq-26.4.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:c80fcd3504232f13617c6ab501124d373e4895424e65de8b72042333316f64a8"}, + {file = "pyzmq-26.4.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:26a2a7451606b87f67cdeca2c2789d86f605da08b4bd616b1a9981605ca3a364"}, + {file = "pyzmq-26.4.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:831cc53bf6068d46d942af52fa8b0b9d128fb39bcf1f80d468dc9a3ae1da5bfb"}, + {file = "pyzmq-26.4.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:51d18be6193c25bd229524cfac21e39887c8d5e0217b1857998dfbef57c070a4"}, + {file = "pyzmq-26.4.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:445c97854204119ae2232503585ebb4fa7517142f71092cb129e5ee547957a1f"}, + {file = "pyzmq-26.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:807b8f4ad3e6084412c0f3df0613269f552110fa6fb91743e3e306223dbf11a6"}, + {file = "pyzmq-26.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c01d109dd675ac47fa15c0a79d256878d898f90bc10589f808b62d021d2e653c"}, + {file = "pyzmq-26.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0a294026e28679a8dd64c922e59411cb586dad307661b4d8a5c49e7bbca37621"}, + {file = "pyzmq-26.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:22c8dd677274af8dfb1efd05006d6f68fb2f054b17066e308ae20cb3f61028cf"}, + {file = "pyzmq-26.4.0-cp38-cp38-win32.whl", hash = "sha256:14fc678b696bc42c14e2d7f86ac4e97889d5e6b94d366ebcb637a768d2ad01af"}, + {file = "pyzmq-26.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:d1ef0a536662bbbdc8525f7e2ef19e74123ec9c4578e0582ecd41aedc414a169"}, + {file = "pyzmq-26.4.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:a88643de8abd000ce99ca72056a1a2ae15881ee365ecb24dd1d9111e43d57842"}, + {file = "pyzmq-26.4.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0a744ce209ecb557406fb928f3c8c55ce79b16c3eeb682da38ef5059a9af0848"}, + {file = "pyzmq-26.4.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9434540f333332224ecb02ee6278b6c6f11ea1266b48526e73c903119b2f420f"}, + {file = "pyzmq-26.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6c6f0a23e55cd38d27d4c89add963294ea091ebcb104d7fdab0f093bc5abb1c"}, + {file = "pyzmq-26.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6145df55dc2309f6ef72d70576dcd5aabb0fd373311613fe85a5e547c722b780"}, + {file = "pyzmq-26.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2ea81823840ef8c56e5d2f9918e4d571236294fea4d1842b302aebffb9e40997"}, + {file = "pyzmq-26.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cc2abc385dc37835445abe206524fbc0c9e3fce87631dfaa90918a1ba8f425eb"}, + {file = "pyzmq-26.4.0-cp39-cp39-win32.whl", hash = "sha256:41a2508fe7bed4c76b4cf55aacfb8733926f59d440d9ae2b81ee8220633b4d12"}, + {file = "pyzmq-26.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:d4000e8255d6cbce38982e5622ebb90823f3409b7ffe8aeae4337ef7d6d2612a"}, + {file = "pyzmq-26.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:b4f6919d9c120488246bdc2a2f96662fa80d67b35bd6d66218f457e722b3ff64"}, + {file = "pyzmq-26.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:98d948288ce893a2edc5ec3c438fe8de2daa5bbbd6e2e865ec5f966e237084ba"}, + {file = "pyzmq-26.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9f34f5c9e0203ece706a1003f1492a56c06c0632d86cb77bcfe77b56aacf27b"}, + {file = "pyzmq-26.4.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80c9b48aef586ff8b698359ce22f9508937c799cc1d2c9c2f7c95996f2300c94"}, + {file = "pyzmq-26.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f2a5b74009fd50b53b26f65daff23e9853e79aa86e0aa08a53a7628d92d44a"}, + {file = "pyzmq-26.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:61c5f93d7622d84cb3092d7f6398ffc77654c346545313a3737e266fc11a3beb"}, + {file = "pyzmq-26.4.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4478b14cb54a805088299c25a79f27eaf530564a7a4f72bf432a040042b554eb"}, + {file = "pyzmq-26.4.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a28ac29c60e4ba84b5f58605ace8ad495414a724fe7aceb7cf06cd0598d04e1"}, + {file = "pyzmq-26.4.0-pp311-pypy311_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43b03c1ceea27c6520124f4fb2ba9c647409b9abdf9a62388117148a90419494"}, + {file = "pyzmq-26.4.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7731abd23a782851426d4e37deb2057bf9410848a4459b5ede4fe89342e687a9"}, + {file = "pyzmq-26.4.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a222ad02fbe80166b0526c038776e8042cd4e5f0dec1489a006a1df47e9040e0"}, + {file = "pyzmq-26.4.0-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:91c3ffaea475ec8bb1a32d77ebc441dcdd13cd3c4c284a6672b92a0f5ade1917"}, + {file = "pyzmq-26.4.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d9a78a52668bf5c9e7b0da36aa5760a9fc3680144e1445d68e98df78a25082ed"}, + {file = "pyzmq-26.4.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b70cab356ff8c860118b89dc86cd910c73ce2127eb986dada4fbac399ef644cf"}, + {file = "pyzmq-26.4.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acae207d4387780838192326b32d373bb286da0b299e733860e96f80728eb0af"}, + {file = "pyzmq-26.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:f928eafd15794aa4be75463d537348b35503c1e014c5b663f206504ec1a90fe4"}, + {file = "pyzmq-26.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:552b0d2e39987733e1e9e948a0ced6ff75e0ea39ab1a1db2fc36eb60fd8760db"}, + {file = "pyzmq-26.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd670a8aa843f2ee637039bbd412e0d7294a5e588e1ecc9ad98b0cdc050259a4"}, + {file = "pyzmq-26.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d367b7b775a0e1e54a59a2ba3ed4d5e0a31566af97cc9154e34262777dab95ed"}, + {file = "pyzmq-26.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112af16c406e4a93df2caef49f884f4c2bb2b558b0b5577ef0b2465d15c1abc"}, + {file = "pyzmq-26.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c76c298683f82669cab0b6da59071f55238c039738297c69f187a542c6d40099"}, + {file = "pyzmq-26.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:49b6ca2e625b46f499fb081aaf7819a177f41eeb555acb05758aa97f4f95d147"}, + {file = "pyzmq-26.4.0.tar.gz", hash = "sha256:4bd13f85f80962f91a651a7356fe0472791a5f7a92f227822b5acf44795c626d"}, +] + +[package.dependencies] +cffi = {version = "*", markers = "implementation_name == \"pypy\""} + +[[package]] +name = "ray" +version = "2.47.1" +description = "Ray provides a simple, universal API for building distributed applications." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "ray-2.47.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:36a30930e8d265e708df96f37f6f1f5484f4b97090d505912f992e045a69d310"}, + {file = "ray-2.47.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:7c03a1e366d3a868a55f8c2f728f5ce35ac85ddf093ac81d0c1a35bf1c25c377"}, + {file = "ray-2.47.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:6fc7df8657b8df684b77c2d1b643137ad745aa1c12ade34743f06cca79003df0"}, + {file = "ray-2.47.1-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:84a96b4720175a0000521a48eb7aa915f3b419bb5cd6172d8dee005c3f23b813"}, + {file = "ray-2.47.1-cp310-cp310-win_amd64.whl", hash = "sha256:44900a1a72cb3bfb331db160a8975737c25945a97f376c70e72ccf35adf3b744"}, + {file = "ray-2.47.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a640d447e0e6cf63f85b9220c883ec02bb2b8e40a9c1d84efa012795c769ba68"}, + {file = "ray-2.47.1-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:feeba1e715cfd8737d3adcd2018d0cdabb7c6084fa4b093e638e6c7d42f3c956"}, + {file = "ray-2.47.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:db5ff652e9035f03c65e1742a706b76519f6e8a6744cc005396053ac8766fc46"}, + {file = "ray-2.47.1-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:48961229614b2b56a535be510c8abc76e99a9aa7fa195b5c949bd0c6c69af40a"}, + {file = "ray-2.47.1-cp311-cp311-win_amd64.whl", hash = "sha256:bd1cba64070db06bbf79c0e075cdc4529193e2d0b19564f4f057b4193b29e912"}, + {file = "ray-2.47.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:322049c4546cf67e5efdad90c371c5508acbb193e5aaaf4038103c6c5ce1f578"}, + {file = "ray-2.47.1-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:e6d9c78e53ac89cabbc4056aecfec53c506c692e3132af9dae941d6180ef462f"}, + {file = "ray-2.47.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:cd4e7eb475487364b5209963b17cefedcb7fbd3a816fdb6def7ea533ebd72424"}, + {file = "ray-2.47.1-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:3eaeaeec3bbe2ca6493e530c30473d84b8580a7ac3256bb9183d8c63def5a92f"}, + {file = "ray-2.47.1-cp312-cp312-win_amd64.whl", hash = "sha256:601f23ba89918b7b3ffebf967328f7bdb605deaf8c103aad7820dc2722fe450c"}, + {file = "ray-2.47.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8cd625d469ce15391e5f1f44ddf8dd30b2380f917603fa0172661229acb0011f"}, + {file = "ray-2.47.1-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:e578929f58b3f0c59c7544a96d864e26278238b755d13cd19ae798070c848e57"}, + {file = "ray-2.47.1-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:d6ed6d182e25d6f77179dc77bc97a749c81765b13cb671a46db3203029389663"}, + {file = "ray-2.47.1-cp313-cp313-manylinux2014_x86_64.whl", hash = "sha256:252a471e8afb918b105cdbffb4cbebb0143baad75a06c8ffcde27ac317579ccb"}, + {file = "ray-2.47.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c21720f283a3df360ddec002a592ddfbaf520faf4cb1b86562a7b7c196ad96a0"}, + {file = "ray-2.47.1-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:6c7b4abe112c4d698243e30023bcbffe2c2c9a68416b95a6a0d50f9ca5725545"}, + {file = "ray-2.47.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:21f2689c1bbc688f9cd31a18bae2c9582027e91b508073849441167bb5077816"}, + {file = "ray-2.47.1-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:106817f80087d21d24e63f6e56ea5ab7c387a25105eb65e6b783551f569534ea"}, + {file = "ray-2.47.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee02ba9b8cd45c4eadc457183f6d80f1701b85f966d02cdacd5b11867cb7375"}, +] + +[package.dependencies] +aiohttp = {version = ">=3.7", optional = true, markers = "extra == \"default\""} +aiohttp-cors = {version = "*", optional = true, markers = "extra == \"default\""} +click = ">=7.0" +colorful = {version = "*", optional = true, markers = "extra == \"default\""} +filelock = "*" +fsspec = {version = "*", optional = true, markers = "extra == \"data\""} +grpcio = {version = ">=1.42.0", optional = true, markers = "python_version >= \"3.10\" and extra == \"default\""} +jsonschema = "*" +msgpack = ">=1.0.0,<2.0.0" +numpy = {version = ">=1.20", optional = true, markers = "extra == \"data\""} +opencensus = {version = "*", optional = true, markers = "extra == \"default\""} +opentelemetry-exporter-prometheus = {version = "*", optional = true, markers = "extra == \"default\""} +opentelemetry-proto = {version = "*", optional = true, markers = "extra == \"default\""} +opentelemetry-sdk = {version = "*", optional = true, markers = "extra == \"default\""} +packaging = "*" +pandas = {version = ">=1.3", optional = true, markers = "extra == \"data\""} +prometheus-client = {version = ">=0.7.1", optional = true, markers = "extra == \"default\""} +protobuf = ">=3.15.3,<3.19.5 || >3.19.5" +py-spy = [ + {version = ">=0.2.0", optional = true, markers = "python_version < \"3.12\" and extra == \"default\""}, + {version = ">=0.4.0", optional = true, markers = "python_version >= \"3.12\" and extra == \"default\""}, +] +pyarrow = [ + {version = ">=9.0.0,<18", optional = true, markers = "sys_platform == \"darwin\" and platform_machine == \"x86_64\" and extra == \"data\""}, + {version = ">=9.0.0", optional = true, markers = "(sys_platform != \"darwin\" or platform_machine != \"x86_64\") and extra == \"data\""}, +] +pydantic = {version = "<2.0.dev0 || >=2.5.dev0,<3", optional = true, markers = "extra == \"default\""} +pyyaml = "*" +requests = "*" +smart-open = {version = "*", optional = true, markers = "extra == \"default\""} +virtualenv = {version = ">=20.0.24,<20.21.1 || >20.21.1", optional = true, markers = "extra == \"default\""} + +[package.extras] +adag = ["cupy-cuda12x ; sys_platform != \"darwin\""] +air = ["aiohttp (>=3.7)", "aiohttp-cors", "colorful", "fastapi", "fsspec", "grpcio (>=1.32.0) ; python_version < \"3.10\"", "grpcio (>=1.42.0) ; python_version >= \"3.10\"", "numpy (>=1.20)", "opencensus", "opentelemetry-exporter-prometheus", "opentelemetry-proto", "opentelemetry-sdk", "pandas", "pandas (>=1.3)", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0) ; python_version < \"3.12\"", "py-spy (>=0.4.0) ; python_version >= \"3.12\"", "pyarrow (<18) ; sys_platform == \"darwin\" and platform_machine == \"x86_64\"", "pyarrow (>=9.0.0)", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "requests", "smart-open", "starlette", "tensorboardX (>=1.9)", "uvicorn[standard]", "virtualenv (>=20.0.24,!=20.21.1)", "watchfiles"] +all = ["aiohttp (>=3.7)", "aiohttp-cors", "colorful", "cupy-cuda12x ; sys_platform != \"darwin\"", "dm-tree", "fastapi", "fsspec", "grpcio", "grpcio (!=1.56.0) ; sys_platform == \"darwin\"", "grpcio (>=1.32.0) ; python_version < \"3.10\"", "grpcio (>=1.42.0) ; python_version >= \"3.10\"", "gymnasium (==1.0.0)", "lz4", "memray ; sys_platform != \"win32\"", "numpy (>=1.20)", "opencensus", "opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-exporter-prometheus", "opentelemetry-proto", "opentelemetry-sdk", "ormsgpack (==1.7.0)", "pandas", "pandas (>=1.3)", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0) ; python_version < \"3.12\"", "py-spy (>=0.4.0) ; python_version >= \"3.12\"", "pyOpenSSL", "pyarrow (<18) ; sys_platform == \"darwin\" and platform_machine == \"x86_64\"", "pyarrow (>=9.0.0)", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "pyyaml", "requests", "scipy", "smart-open", "starlette", "tensorboardX (>=1.9)", "uvicorn[standard]", "virtualenv (>=20.0.24,!=20.21.1)", "watchfiles"] +all-cpp = ["aiohttp (>=3.7)", "aiohttp-cors", "colorful", "cupy-cuda12x ; sys_platform != \"darwin\"", "dm-tree", "fastapi", "fsspec", "grpcio", "grpcio (!=1.56.0) ; sys_platform == \"darwin\"", "grpcio (>=1.32.0) ; python_version < \"3.10\"", "grpcio (>=1.42.0) ; python_version >= \"3.10\"", "gymnasium (==1.0.0)", "lz4", "memray ; sys_platform != \"win32\"", "numpy (>=1.20)", "opencensus", "opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-exporter-prometheus", "opentelemetry-proto", "opentelemetry-sdk", "ormsgpack (==1.7.0)", "pandas", "pandas (>=1.3)", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0) ; python_version < \"3.12\"", "py-spy (>=0.4.0) ; python_version >= \"3.12\"", "pyOpenSSL", "pyarrow (<18) ; sys_platform == \"darwin\" and platform_machine == \"x86_64\"", "pyarrow (>=9.0.0)", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "pyyaml", "ray-cpp (==2.47.1)", "requests", "scipy", "smart-open", "starlette", "tensorboardX (>=1.9)", "uvicorn[standard]", "virtualenv (>=20.0.24,!=20.21.1)", "watchfiles"] +cgraph = ["cupy-cuda12x ; sys_platform != \"darwin\""] +client = ["grpcio", "grpcio (!=1.56.0) ; sys_platform == \"darwin\""] +cpp = ["ray-cpp (==2.47.1)"] +data = ["fsspec", "numpy (>=1.20)", "pandas (>=1.3)", "pyarrow (<18) ; sys_platform == \"darwin\" and platform_machine == \"x86_64\"", "pyarrow (>=9.0.0)"] +default = ["aiohttp (>=3.7)", "aiohttp-cors", "colorful", "grpcio (>=1.32.0) ; python_version < \"3.10\"", "grpcio (>=1.42.0) ; python_version >= \"3.10\"", "opencensus", "opentelemetry-exporter-prometheus", "opentelemetry-proto", "opentelemetry-sdk", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0) ; python_version < \"3.12\"", "py-spy (>=0.4.0) ; python_version >= \"3.12\"", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "requests", "smart-open", "virtualenv (>=20.0.24,!=20.21.1)"] +llm = ["aiohttp (>=3.7)", "aiohttp-cors", "async-timeout ; python_version < \"3.11\"", "colorful", "fastapi", "fsspec", "grpcio (>=1.32.0) ; python_version < \"3.10\"", "grpcio (>=1.42.0) ; python_version >= \"3.10\"", "jsonref (>=1.1.0)", "jsonschema", "ninja", "numpy (>=1.20)", "opencensus", "opentelemetry-exporter-prometheus", "opentelemetry-proto", "opentelemetry-sdk", "pandas (>=1.3)", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0) ; python_version < \"3.12\"", "py-spy (>=0.4.0) ; python_version >= \"3.12\"", "pyarrow (<18) ; sys_platform == \"darwin\" and platform_machine == \"x86_64\"", "pyarrow (>=9.0.0)", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "requests", "smart-open", "starlette", "typer", "uvicorn[standard]", "virtualenv (>=20.0.24,!=20.21.1)", "vllm (>=0.8.5)", "watchfiles"] +observability = ["memray ; sys_platform != \"win32\"", "opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk"] +rllib = ["dm-tree", "fsspec", "gymnasium (==1.0.0)", "lz4", "ormsgpack (==1.7.0)", "pandas", "pyarrow (<18) ; sys_platform == \"darwin\" and platform_machine == \"x86_64\"", "pyarrow (>=9.0.0)", "pyyaml", "requests", "scipy", "tensorboardX (>=1.9)"] +serve = ["aiohttp (>=3.7)", "aiohttp-cors", "colorful", "fastapi", "grpcio (>=1.32.0) ; python_version < \"3.10\"", "grpcio (>=1.42.0) ; python_version >= \"3.10\"", "opencensus", "opentelemetry-exporter-prometheus", "opentelemetry-proto", "opentelemetry-sdk", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0) ; python_version < \"3.12\"", "py-spy (>=0.4.0) ; python_version >= \"3.12\"", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "requests", "smart-open", "starlette", "uvicorn[standard]", "virtualenv (>=20.0.24,!=20.21.1)", "watchfiles"] +serve-grpc = ["aiohttp (>=3.7)", "aiohttp-cors", "colorful", "fastapi", "grpcio (>=1.32.0) ; python_version < \"3.10\"", "grpcio (>=1.42.0) ; python_version >= \"3.10\"", "opencensus", "opentelemetry-exporter-prometheus", "opentelemetry-proto", "opentelemetry-sdk", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0) ; python_version < \"3.12\"", "py-spy (>=0.4.0) ; python_version >= \"3.12\"", "pyOpenSSL", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "requests", "smart-open", "starlette", "uvicorn[standard]", "virtualenv (>=20.0.24,!=20.21.1)", "watchfiles"] +train = ["fsspec", "pandas", "pyarrow (<18) ; sys_platform == \"darwin\" and platform_machine == \"x86_64\"", "pyarrow (>=9.0.0)", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "requests", "tensorboardX (>=1.9)"] +tune = ["fsspec", "pandas", "pyarrow (<18) ; sys_platform == \"darwin\" and platform_machine == \"x86_64\"", "pyarrow (>=9.0.0)", "requests", "tensorboardX (>=1.9)"] + +[[package]] +name = "referencing" +version = "0.36.2" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.9" +groups = ["main", "test"] +files = [ + {file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"}, + {file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" +typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""} + +[[package]] +name = "requests" +version = "2.32.4" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +groups = ["main", "docs", "test"] +files = [ + {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, + {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset_normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +description = "OAuthlib authentication support for Requests." +optional = false +python-versions = ">=3.4" +groups = ["main"] +files = [ + {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"}, + {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"}, +] + +[package.dependencies] +oauthlib = ">=3.0.0" +requests = ">=2.0.0" + +[package.extras] +rsa = ["oauthlib[signedtoken] (>=3.0.0)"] + +[[package]] +name = "rfc3339-validator" +version = "0.1.4" +description = "A pure python RFC3339 validator" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["test"] +files = [ + {file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"}, + {file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"}, +] + +[package.dependencies] +six = "*" + +[[package]] +name = "rfc3986-validator" +version = "0.1.1" +description = "Pure python rfc3986 validator" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["test"] +files = [ + {file = "rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9"}, + {file = "rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055"}, +] + +[[package]] +name = "rich" +version = "13.9.4" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, + {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "rpds-py" +version = "0.25.1" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.9" +groups = ["main", "test"] +files = [ + {file = "rpds_py-0.25.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f4ad628b5174d5315761b67f212774a32f5bad5e61396d38108bd801c0a8f5d9"}, + {file = "rpds_py-0.25.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c742af695f7525e559c16f1562cf2323db0e3f0fbdcabdf6865b095256b2d40"}, + {file = "rpds_py-0.25.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:605ffe7769e24b1800b4d024d24034405d9404f0bc2f55b6db3362cd34145a6f"}, + {file = "rpds_py-0.25.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ccc6f3ddef93243538be76f8e47045b4aad7a66a212cd3a0f23e34469473d36b"}, + {file = "rpds_py-0.25.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f70316f760174ca04492b5ab01be631a8ae30cadab1d1081035136ba12738cfa"}, + {file = "rpds_py-0.25.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1dafef8df605fdb46edcc0bf1573dea0d6d7b01ba87f85cd04dc855b2b4479e"}, + {file = "rpds_py-0.25.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0701942049095741a8aeb298a31b203e735d1c61f4423511d2b1a41dcd8a16da"}, + {file = "rpds_py-0.25.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e87798852ae0b37c88babb7f7bbbb3e3fecc562a1c340195b44c7e24d403e380"}, + {file = "rpds_py-0.25.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3bcce0edc1488906c2d4c75c94c70a0417e83920dd4c88fec1078c94843a6ce9"}, + {file = "rpds_py-0.25.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e2f6a2347d3440ae789505693a02836383426249d5293541cd712e07e7aecf54"}, + {file = "rpds_py-0.25.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4fd52d3455a0aa997734f3835cbc4c9f32571345143960e7d7ebfe7b5fbfa3b2"}, + {file = "rpds_py-0.25.1-cp310-cp310-win32.whl", hash = "sha256:3f0b1798cae2bbbc9b9db44ee068c556d4737911ad53a4e5093d09d04b3bbc24"}, + {file = "rpds_py-0.25.1-cp310-cp310-win_amd64.whl", hash = "sha256:3ebd879ab996537fc510a2be58c59915b5dd63bccb06d1ef514fee787e05984a"}, + {file = "rpds_py-0.25.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5f048bbf18b1f9120685c6d6bb70cc1a52c8cc11bdd04e643d28d3be0baf666d"}, + {file = "rpds_py-0.25.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fbb0dbba559959fcb5d0735a0f87cdbca9e95dac87982e9b95c0f8f7ad10255"}, + {file = "rpds_py-0.25.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4ca54b9cf9d80b4016a67a0193ebe0bcf29f6b0a96f09db942087e294d3d4c2"}, + {file = "rpds_py-0.25.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ee3e26eb83d39b886d2cb6e06ea701bba82ef30a0de044d34626ede51ec98b0"}, + {file = "rpds_py-0.25.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:89706d0683c73a26f76a5315d893c051324d771196ae8b13e6ffa1ffaf5e574f"}, + {file = "rpds_py-0.25.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2013ee878c76269c7b557a9a9c042335d732e89d482606990b70a839635feb7"}, + {file = "rpds_py-0.25.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45e484db65e5380804afbec784522de84fa95e6bb92ef1bd3325d33d13efaebd"}, + {file = "rpds_py-0.25.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:48d64155d02127c249695abb87d39f0faf410733428d499867606be138161d65"}, + {file = "rpds_py-0.25.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:048893e902132fd6548a2e661fb38bf4896a89eea95ac5816cf443524a85556f"}, + {file = "rpds_py-0.25.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0317177b1e8691ab5879f4f33f4b6dc55ad3b344399e23df2e499de7b10a548d"}, + {file = "rpds_py-0.25.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bffcf57826d77a4151962bf1701374e0fc87f536e56ec46f1abdd6a903354042"}, + {file = "rpds_py-0.25.1-cp311-cp311-win32.whl", hash = "sha256:cda776f1967cb304816173b30994faaf2fd5bcb37e73118a47964a02c348e1bc"}, + {file = "rpds_py-0.25.1-cp311-cp311-win_amd64.whl", hash = "sha256:dc3c1ff0abc91444cd20ec643d0f805df9a3661fcacf9c95000329f3ddf268a4"}, + {file = "rpds_py-0.25.1-cp311-cp311-win_arm64.whl", hash = "sha256:5a3ddb74b0985c4387719fc536faced33cadf2172769540c62e2a94b7b9be1c4"}, + {file = "rpds_py-0.25.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b5ffe453cde61f73fea9430223c81d29e2fbf412a6073951102146c84e19e34c"}, + {file = "rpds_py-0.25.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:115874ae5e2fdcfc16b2aedc95b5eef4aebe91b28e7e21951eda8a5dc0d3461b"}, + {file = "rpds_py-0.25.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a714bf6e5e81b0e570d01f56e0c89c6375101b8463999ead3a93a5d2a4af91fa"}, + {file = "rpds_py-0.25.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:35634369325906bcd01577da4c19e3b9541a15e99f31e91a02d010816b49bfda"}, + {file = "rpds_py-0.25.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4cb2b3ddc16710548801c6fcc0cfcdeeff9dafbc983f77265877793f2660309"}, + {file = "rpds_py-0.25.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9ceca1cf097ed77e1a51f1dbc8d174d10cb5931c188a4505ff9f3e119dfe519b"}, + {file = "rpds_py-0.25.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c2cd1a4b0c2b8c5e31ffff50d09f39906fe351389ba143c195566056c13a7ea"}, + {file = "rpds_py-0.25.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1de336a4b164c9188cb23f3703adb74a7623ab32d20090d0e9bf499a2203ad65"}, + {file = "rpds_py-0.25.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9fca84a15333e925dd59ce01da0ffe2ffe0d6e5d29a9eeba2148916d1824948c"}, + {file = "rpds_py-0.25.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:88ec04afe0c59fa64e2f6ea0dd9657e04fc83e38de90f6de201954b4d4eb59bd"}, + {file = "rpds_py-0.25.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a8bd2f19e312ce3e1d2c635618e8a8d8132892bb746a7cf74780a489f0f6cdcb"}, + {file = "rpds_py-0.25.1-cp312-cp312-win32.whl", hash = "sha256:e5e2f7280d8d0d3ef06f3ec1b4fd598d386cc6f0721e54f09109a8132182fbfe"}, + {file = "rpds_py-0.25.1-cp312-cp312-win_amd64.whl", hash = "sha256:db58483f71c5db67d643857404da360dce3573031586034b7d59f245144cc192"}, + {file = "rpds_py-0.25.1-cp312-cp312-win_arm64.whl", hash = "sha256:6d50841c425d16faf3206ddbba44c21aa3310a0cebc3c1cdfc3e3f4f9f6f5728"}, + {file = "rpds_py-0.25.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:659d87430a8c8c704d52d094f5ba6fa72ef13b4d385b7e542a08fc240cb4a559"}, + {file = "rpds_py-0.25.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68f6f060f0bbdfb0245267da014d3a6da9be127fe3e8cc4a68c6f833f8a23bb1"}, + {file = "rpds_py-0.25.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:083a9513a33e0b92cf6e7a6366036c6bb43ea595332c1ab5c8ae329e4bcc0a9c"}, + {file = "rpds_py-0.25.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:816568614ecb22b18a010c7a12559c19f6fe993526af88e95a76d5a60b8b75fb"}, + {file = "rpds_py-0.25.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c6564c0947a7f52e4792983f8e6cf9bac140438ebf81f527a21d944f2fd0a40"}, + {file = "rpds_py-0.25.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c4a128527fe415d73cf1f70a9a688d06130d5810be69f3b553bf7b45e8acf79"}, + {file = "rpds_py-0.25.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a49e1d7a4978ed554f095430b89ecc23f42014a50ac385eb0c4d163ce213c325"}, + {file = "rpds_py-0.25.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d74ec9bc0e2feb81d3f16946b005748119c0f52a153f6db6a29e8cd68636f295"}, + {file = "rpds_py-0.25.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3af5b4cc10fa41e5bc64e5c198a1b2d2864337f8fcbb9a67e747e34002ce812b"}, + {file = "rpds_py-0.25.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:79dc317a5f1c51fd9c6a0c4f48209c6b8526d0524a6904fc1076476e79b00f98"}, + {file = "rpds_py-0.25.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1521031351865e0181bc585147624d66b3b00a84109b57fcb7a779c3ec3772cd"}, + {file = "rpds_py-0.25.1-cp313-cp313-win32.whl", hash = "sha256:5d473be2b13600b93a5675d78f59e63b51b1ba2d0476893415dfbb5477e65b31"}, + {file = "rpds_py-0.25.1-cp313-cp313-win_amd64.whl", hash = "sha256:a7b74e92a3b212390bdce1d93da9f6488c3878c1d434c5e751cbc202c5e09500"}, + {file = "rpds_py-0.25.1-cp313-cp313-win_arm64.whl", hash = "sha256:dd326a81afe332ede08eb39ab75b301d5676802cdffd3a8f287a5f0b694dc3f5"}, + {file = "rpds_py-0.25.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:a58d1ed49a94d4183483a3ce0af22f20318d4a1434acee255d683ad90bf78129"}, + {file = "rpds_py-0.25.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f251bf23deb8332823aef1da169d5d89fa84c89f67bdfb566c49dea1fccfd50d"}, + {file = "rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dbd586bfa270c1103ece2109314dd423df1fa3d9719928b5d09e4840cec0d72"}, + {file = "rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6d273f136e912aa101a9274c3145dcbddbe4bac560e77e6d5b3c9f6e0ed06d34"}, + {file = "rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:666fa7b1bd0a3810a7f18f6d3a25ccd8866291fbbc3c9b912b917a6715874bb9"}, + {file = "rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:921954d7fbf3fccc7de8f717799304b14b6d9a45bbeec5a8d7408ccbf531faf5"}, + {file = "rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3d86373ff19ca0441ebeb696ef64cb58b8b5cbacffcda5a0ec2f3911732a194"}, + {file = "rpds_py-0.25.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c8980cde3bb8575e7c956a530f2c217c1d6aac453474bf3ea0f9c89868b531b6"}, + {file = "rpds_py-0.25.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8eb8c84ecea987a2523e057c0d950bcb3f789696c0499290b8d7b3107a719d78"}, + {file = "rpds_py-0.25.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:e43a005671a9ed5a650f3bc39e4dbccd6d4326b24fb5ea8be5f3a43a6f576c72"}, + {file = "rpds_py-0.25.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:58f77c60956501a4a627749a6dcb78dac522f249dd96b5c9f1c6af29bfacfb66"}, + {file = "rpds_py-0.25.1-cp313-cp313t-win32.whl", hash = "sha256:2cb9e5b5e26fc02c8a4345048cd9998c2aca7c2712bd1b36da0c72ee969a3523"}, + {file = "rpds_py-0.25.1-cp313-cp313t-win_amd64.whl", hash = "sha256:401ca1c4a20cc0510d3435d89c069fe0a9ae2ee6495135ac46bdd49ec0495763"}, + {file = "rpds_py-0.25.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ce4c8e485a3c59593f1a6f683cf0ea5ab1c1dc94d11eea5619e4fb5228b40fbd"}, + {file = "rpds_py-0.25.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d8222acdb51a22929c3b2ddb236b69c59c72af4019d2cba961e2f9add9b6e634"}, + {file = "rpds_py-0.25.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4593c4eae9b27d22df41cde518b4b9e4464d139e4322e2127daa9b5b981b76be"}, + {file = "rpds_py-0.25.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd035756830c712b64725a76327ce80e82ed12ebab361d3a1cdc0f51ea21acb0"}, + {file = "rpds_py-0.25.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:114a07e85f32b125404f28f2ed0ba431685151c037a26032b213c882f26eb908"}, + {file = "rpds_py-0.25.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dec21e02e6cc932538b5203d3a8bd6aa1480c98c4914cb88eea064ecdbc6396a"}, + {file = "rpds_py-0.25.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09eab132f41bf792c7a0ea1578e55df3f3e7f61888e340779b06050a9a3f16e9"}, + {file = "rpds_py-0.25.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c98f126c4fc697b84c423e387337d5b07e4a61e9feac494362a59fd7a2d9ed80"}, + {file = "rpds_py-0.25.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0e6a327af8ebf6baba1c10fadd04964c1965d375d318f4435d5f3f9651550f4a"}, + {file = "rpds_py-0.25.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:bc120d1132cff853ff617754196d0ac0ae63befe7c8498bd67731ba368abe451"}, + {file = "rpds_py-0.25.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:140f61d9bed7839446bdd44852e30195c8e520f81329b4201ceead4d64eb3a9f"}, + {file = "rpds_py-0.25.1-cp39-cp39-win32.whl", hash = "sha256:9c006f3aadeda131b438c3092124bd196b66312f0caa5823ef09585a669cf449"}, + {file = "rpds_py-0.25.1-cp39-cp39-win_amd64.whl", hash = "sha256:a61d0b2c7c9a0ae45732a77844917b427ff16ad5464b4d4f5e4adb955f582890"}, + {file = "rpds_py-0.25.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b24bf3cd93d5b6ecfbedec73b15f143596c88ee249fa98cefa9a9dc9d92c6f28"}, + {file = "rpds_py-0.25.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:0eb90e94f43e5085623932b68840b6f379f26db7b5c2e6bcef3179bd83c9330f"}, + {file = "rpds_py-0.25.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d50e4864498a9ab639d6d8854b25e80642bd362ff104312d9770b05d66e5fb13"}, + {file = "rpds_py-0.25.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c9409b47ba0650544b0bb3c188243b83654dfe55dcc173a86832314e1a6a35d"}, + {file = "rpds_py-0.25.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:796ad874c89127c91970652a4ee8b00d56368b7e00d3477f4415fe78164c8000"}, + {file = "rpds_py-0.25.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:85608eb70a659bf4c1142b2781083d4b7c0c4e2c90eff11856a9754e965b2540"}, + {file = "rpds_py-0.25.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4feb9211d15d9160bc85fa72fed46432cdc143eb9cf6d5ca377335a921ac37b"}, + {file = "rpds_py-0.25.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ccfa689b9246c48947d31dd9d8b16d89a0ecc8e0e26ea5253068efb6c542b76e"}, + {file = "rpds_py-0.25.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:3c5b317ecbd8226887994852e85de562f7177add602514d4ac40f87de3ae45a8"}, + {file = "rpds_py-0.25.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:454601988aab2c6e8fd49e7634c65476b2b919647626208e376afcd22019eeb8"}, + {file = "rpds_py-0.25.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:1c0c434a53714358532d13539272db75a5ed9df75a4a090a753ac7173ec14e11"}, + {file = "rpds_py-0.25.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f73ce1512e04fbe2bc97836e89830d6b4314c171587a99688082d090f934d20a"}, + {file = "rpds_py-0.25.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ee86d81551ec68a5c25373c5643d343150cc54672b5e9a0cafc93c1870a53954"}, + {file = "rpds_py-0.25.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:89c24300cd4a8e4a51e55c31a8ff3918e6651b241ee8876a42cc2b2a078533ba"}, + {file = "rpds_py-0.25.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:771c16060ff4e79584dc48902a91ba79fd93eade3aa3a12d6d2a4aadaf7d542b"}, + {file = "rpds_py-0.25.1-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:785ffacd0ee61c3e60bdfde93baa6d7c10d86f15655bd706c89da08068dc5038"}, + {file = "rpds_py-0.25.1-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a40046a529cc15cef88ac5ab589f83f739e2d332cb4d7399072242400ed68c9"}, + {file = "rpds_py-0.25.1-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:85fc223d9c76cabe5d0bff82214459189720dc135db45f9f66aa7cffbf9ff6c1"}, + {file = "rpds_py-0.25.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0be9965f93c222fb9b4cc254235b3b2b215796c03ef5ee64f995b1b69af0762"}, + {file = "rpds_py-0.25.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8378fa4a940f3fb509c081e06cb7f7f2adae8cf46ef258b0e0ed7519facd573e"}, + {file = "rpds_py-0.25.1-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:33358883a4490287e67a2c391dfaea4d9359860281db3292b6886bf0be3d8692"}, + {file = "rpds_py-0.25.1-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:1d1fadd539298e70cac2f2cb36f5b8a65f742b9b9f1014dd4ea1f7785e2470bf"}, + {file = "rpds_py-0.25.1-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9a46c2fb2545e21181445515960006e85d22025bd2fe6db23e76daec6eb689fe"}, + {file = "rpds_py-0.25.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:50f2c501a89c9a5f4e454b126193c5495b9fb441a75b298c60591d8a2eb92e1b"}, + {file = "rpds_py-0.25.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d779b325cc8238227c47fbc53964c8cc9a941d5dbae87aa007a1f08f2f77b23"}, + {file = "rpds_py-0.25.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:036ded36bedb727beeabc16dc1dad7cb154b3fa444e936a03b67a86dc6a5066e"}, + {file = "rpds_py-0.25.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:245550f5a1ac98504147cba96ffec8fabc22b610742e9150138e5d60774686d7"}, + {file = "rpds_py-0.25.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff7c23ba0a88cb7b104281a99476cccadf29de2a0ef5ce864959a52675b1ca83"}, + {file = "rpds_py-0.25.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e37caa8cdb3b7cf24786451a0bdb853f6347b8b92005eeb64225ae1db54d1c2b"}, + {file = "rpds_py-0.25.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f2f48ab00181600ee266a095fe815134eb456163f7d6699f525dee471f312cf"}, + {file = "rpds_py-0.25.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e5fc7484fa7dce57e25063b0ec9638ff02a908304f861d81ea49273e43838c1"}, + {file = "rpds_py-0.25.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:d3c10228d6cf6fe2b63d2e7985e94f6916fa46940df46b70449e9ff9297bd3d1"}, + {file = "rpds_py-0.25.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:5d9e40f32745db28c1ef7aad23f6fc458dc1e29945bd6781060f0d15628b8ddf"}, + {file = "rpds_py-0.25.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:35a8d1a24b5936b35c5003313bc177403d8bdef0f8b24f28b1c4a255f94ea992"}, + {file = "rpds_py-0.25.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6099263f526efff9cf3883dfef505518730f7a7a93049b1d90d42e50a22b4793"}, + {file = "rpds_py-0.25.1.tar.gz", hash = "sha256:8960b6dac09b62dac26e75d7e2c4a22efb835d827a7278c34f72b2b84fa160e3"}, +] + +[[package]] +name = "rsa" +version = "4.9.1" +description = "Pure-Python RSA implementation" +optional = false +python-versions = "<4,>=3.6" +groups = ["main"] +files = [ + {file = "rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762"}, + {file = "rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "send2trash" +version = "1.8.3" +description = "Send file to trash natively under Mac OS X, Windows and Linux" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +groups = ["test"] +files = [ + {file = "Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9"}, + {file = "Send2Trash-1.8.3.tar.gz", hash = "sha256:b18e7a3966d99871aefeb00cfbcfdced55ce4871194810fc71f4aa484b953abf"}, +] + +[package.extras] +nativelib = ["pyobjc-framework-Cocoa ; sys_platform == \"darwin\"", "pywin32 ; sys_platform == \"win32\""] +objc = ["pyobjc-framework-Cocoa ; sys_platform == \"darwin\""] +win32 = ["pywin32 ; sys_platform == \"win32\""] + +[[package]] +name = "setuptools" +version = "80.9.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922"}, + {file = "setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] +core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] + +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "test"] +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + +[[package]] +name = "smart-open" +version = "7.1.0" +description = "Utils for streaming large files (S3, HDFS, GCS, Azure Blob Storage, gzip, bz2...)" +optional = false +python-versions = "<4.0,>=3.7" +groups = ["main"] +files = [ + {file = "smart_open-7.1.0-py3-none-any.whl", hash = "sha256:4b8489bb6058196258bafe901730c7db0dcf4f083f316e97269c66f45502055b"}, + {file = "smart_open-7.1.0.tar.gz", hash = "sha256:a4f09f84f0f6d3637c6543aca7b5487438877a21360e7368ccf1f704789752ba"}, +] + +[package.dependencies] +wrapt = "*" + +[package.extras] +all = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage (>=2.6.0)", "paramiko", "requests", "zstandard"] +azure = ["azure-common", "azure-core", "azure-storage-blob"] +gcs = ["google-cloud-storage (>=2.6.0)"] +http = ["requests"] +s3 = ["boto3"] +ssh = ["paramiko"] +test = ["awscli", "azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage (>=2.6.0)", "moto[server]", "numpy", "paramiko", "pyopenssl", "pytest", "pytest-benchmark", "pytest-rerunfailures", "requests", "responses", "zstandard"] +webhdfs = ["requests"] +zst = ["zstandard"] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "snowballstemmer" +version = "3.0.1" +description = "This package provides 32 stemmers for 30 languages generated from Snowball algorithms." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*" +groups = ["docs"] +files = [ + {file = "snowballstemmer-3.0.1-py3-none-any.whl", hash = "sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064"}, + {file = "snowballstemmer-3.0.1.tar.gz", hash = "sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895"}, +] + +[[package]] +name = "soupsieve" +version = "2.7" +description = "A modern CSS selector implementation for Beautiful Soup." +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4"}, + {file = "soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a"}, +] + +[[package]] +name = "sphinx" +version = "7.4.7" +description = "Python documentation generator" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239"}, + {file = "sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe"}, +] + +[package.dependencies] +alabaster = ">=0.7.14,<0.8.0" +babel = ">=2.13" +colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\""} +docutils = ">=0.20,<0.22" +imagesize = ">=1.3" +Jinja2 = ">=3.1" +packaging = ">=23.0" +Pygments = ">=2.17" +requests = ">=2.30.0" +snowballstemmer = ">=2.2" +sphinxcontrib-applehelp = "*" +sphinxcontrib-devhelp = "*" +sphinxcontrib-htmlhelp = ">=2.0.0" +sphinxcontrib-jsmath = "*" +sphinxcontrib-qthelp = "*" +sphinxcontrib-serializinghtml = ">=1.1.9" + +[package.extras] +docs = ["sphinxcontrib-websupport"] +lint = ["flake8 (>=6.0)", "importlib-metadata (>=6.0)", "mypy (==1.10.1)", "pytest (>=6.0)", "ruff (==0.5.2)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-docutils (==0.21.0.20240711)", "types-requests (>=2.30.0)"] +test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=8.0)", "setuptools (>=70.0)", "typing_extensions (>=4.9)"] + +[[package]] +name = "sphinx-rtd-theme" +version = "3.0.1" +description = "Read the Docs theme for Sphinx" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "sphinx_rtd_theme-3.0.1-py2.py3-none-any.whl", hash = "sha256:921c0ece75e90633ee876bd7b148cfaad136b481907ad154ac3669b6fc957916"}, + {file = "sphinx_rtd_theme-3.0.1.tar.gz", hash = "sha256:a4c5745d1b06dfcb80b7704fe532eb765b44065a8fad9851e4258c8804140703"}, +] + +[package.dependencies] +docutils = ">0.18,<0.22" +sphinx = ">=6,<9" +sphinxcontrib-jquery = ">=4,<5" + +[package.extras] +dev = ["bump2version", "transifex-client", "twine", "wheel"] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5"}, + {file = "sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["pytest"] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2"}, + {file = "sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["pytest"] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8"}, + {file = "sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["html5lib", "pytest"] + +[[package]] +name = "sphinxcontrib-jquery" +version = "4.1" +description = "Extension to include jQuery on newer Sphinx releases" +optional = false +python-versions = ">=2.7" +groups = ["docs"] +files = [ + {file = "sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a"}, + {file = "sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae"}, +] + +[package.dependencies] +Sphinx = ">=1.8" + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +description = "A sphinx extension which renders display math in HTML via JavaScript" +optional = false +python-versions = ">=3.5" +groups = ["docs"] +files = [ + {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, + {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, +] + +[package.extras] +test = ["flake8", "mypy", "pytest"] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb"}, + {file = "sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["defusedxml (>=0.7.1)", "pytest"] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331"}, + {file = "sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["pytest"] + +[[package]] +name = "stack-data" +version = "0.6.3" +description = "Extract data from python stack frames and tracebacks for informative displays" +optional = false +python-versions = "*" +groups = ["main", "test"] +files = [ + {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, + {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, +] + +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + +[[package]] +name = "terminado" +version = "0.18.1" +description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library." +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0"}, + {file = "terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e"}, +] + +[package.dependencies] +ptyprocess = {version = "*", markers = "os_name != \"nt\""} +pywinpty = {version = ">=1.1.0", markers = "os_name == \"nt\""} +tornado = ">=6.1.0" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["pre-commit", "pytest (>=7.0)", "pytest-timeout"] +typing = ["mypy (>=1.6,<2.0)", "traitlets (>=5.11.1)"] + +[[package]] +name = "tinycss2" +version = "1.4.0" +description = "A tiny CSS parser" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289"}, + {file = "tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7"}, +] + +[package.dependencies] +webencodings = ">=0.4" + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["pytest", "ruff"] + +[[package]] +name = "tornado" +version = "6.5.1" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "tornado-6.5.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d50065ba7fd11d3bd41bcad0825227cc9a95154bad83239357094c36708001f7"}, + {file = "tornado-6.5.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9e9ca370f717997cb85606d074b0e5b247282cf5e2e1611568b8821afe0342d6"}, + {file = "tornado-6.5.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b77e9dfa7ed69754a54c89d82ef746398be82f749df69c4d3abe75c4d1ff4888"}, + {file = "tornado-6.5.1-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253b76040ee3bab8bcf7ba9feb136436a3787208717a1fb9f2c16b744fba7331"}, + {file = "tornado-6.5.1-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:308473f4cc5a76227157cdf904de33ac268af770b2c5f05ca6c1161d82fdd95e"}, + {file = "tornado-6.5.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:caec6314ce8a81cf69bd89909f4b633b9f523834dc1a352021775d45e51d9401"}, + {file = "tornado-6.5.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:13ce6e3396c24e2808774741331638ee6c2f50b114b97a55c5b442df65fd9692"}, + {file = "tornado-6.5.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5cae6145f4cdf5ab24744526cc0f55a17d76f02c98f4cff9daa08ae9a217448a"}, + {file = "tornado-6.5.1-cp39-abi3-win32.whl", hash = "sha256:e0a36e1bc684dca10b1aa75a31df8bdfed656831489bc1e6a6ebed05dc1ec365"}, + {file = "tornado-6.5.1-cp39-abi3-win_amd64.whl", hash = "sha256:908e7d64567cecd4c2b458075589a775063453aeb1d2a1853eedb806922f568b"}, + {file = "tornado-6.5.1-cp39-abi3-win_arm64.whl", hash = "sha256:02420a0eb7bf617257b9935e2b754d1b63897525d8a289c9d65690d580b4dcf7"}, + {file = "tornado-6.5.1.tar.gz", hash = "sha256:84ceece391e8eb9b2b95578db65e920d2a61070260594819589609ba9bc6308c"}, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +description = "Traitlets Python configuration system" +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20250516" +description = "Typing stubs for python-dateutil" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "types_python_dateutil-2.9.0.20250516-py3-none-any.whl", hash = "sha256:2b2b3f57f9c6a61fba26a9c0ffb9ea5681c9b83e69cd897c6b5f668d9c0cab93"}, + {file = "types_python_dateutil-2.9.0.20250516.tar.gz", hash = "sha256:13e80d6c9c47df23ad773d54b2826bd52dbbb41be87c3f339381c1700ad21ee5"}, +] + +[[package]] +name = "typing-extensions" +version = "4.14.0" +description = "Backported and Experimental Type Hints for Python 3.9+" +optional = false +python-versions = ">=3.9" +groups = ["main", "test"] +files = [ + {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, + {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, + {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, +] + +[package.dependencies] +typing-extensions = ">=4.12.0" + +[[package]] +name = "tzdata" +version = "2025.2" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +groups = ["main"] +files = [ + {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, + {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, +] + +[[package]] +name = "uri-template" +version = "1.3.0" +description = "RFC 6570 URI Template Processor" +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7"}, + {file = "uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363"}, +] + +[package.extras] +dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "mypy", "pep8-naming", "types-PyYAML"] + +[[package]] +name = "urllib3" +version = "2.4.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["main", "docs", "test"] +files = [ + {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, + {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "virtualenv" +version = "20.31.2" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "virtualenv-20.31.2-py3-none-any.whl", hash = "sha256:36efd0d9650ee985f0cad72065001e66d49a6f24eb44d98980f630686243cf11"}, + {file = "virtualenv-20.31.2.tar.gz", hash = "sha256:e10c0a9d02835e592521be48b332b6caee6887f332c111aa79a09b9e79efc2af"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"GraalVM\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] + +[[package]] +name = "wcwidth" +version = "0.2.13" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = "*" +groups = ["main", "test"] +files = [ + {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, + {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, +] + +[[package]] +name = "webcolors" +version = "24.11.1" +description = "A library for working with the color formats defined by HTML and CSS." +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "webcolors-24.11.1-py3-none-any.whl", hash = "sha256:515291393b4cdf0eb19c155749a096f779f7d909f7cceea072791cb9095b92e9"}, + {file = "webcolors-24.11.1.tar.gz", hash = "sha256:ecb3d768f32202af770477b8b65f318fa4f566c22948673a977b00d589dd80f6"}, +] + +[[package]] +name = "webencodings" +version = "0.5.1" +description = "Character encoding aliases for legacy web content" +optional = false +python-versions = "*" +groups = ["test"] +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] + +[[package]] +name = "websocket-client" +version = "1.8.0" +description = "WebSocket client for Python with low level API options" +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, + {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, +] + +[package.extras] +docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] +optional = ["python-socks", "wsaccel"] +test = ["websockets"] + +[[package]] +name = "widgetsnbextension" +version = "4.0.14" +description = "Jupyter interactive widgets for Jupyter Notebook" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "widgetsnbextension-4.0.14-py3-none-any.whl", hash = "sha256:4875a9eaf72fbf5079dc372a51a9f268fc38d46f767cbf85c43a36da5cb9b575"}, + {file = "widgetsnbextension-4.0.14.tar.gz", hash = "sha256:a3629b04e3edb893212df862038c7232f62973373869db5084aed739b437b5af"}, +] + +[[package]] +name = "wrapt" +version = "1.17.2" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62"}, + {file = "wrapt-1.17.2-cp310-cp310-win32.whl", hash = "sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563"}, + {file = "wrapt-1.17.2-cp310-cp310-win_amd64.whl", hash = "sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72"}, + {file = "wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317"}, + {file = "wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9"}, + {file = "wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9"}, + {file = "wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504"}, + {file = "wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a"}, + {file = "wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f"}, + {file = "wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555"}, + {file = "wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f"}, + {file = "wrapt-1.17.2-cp38-cp38-win32.whl", hash = "sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7"}, + {file = "wrapt-1.17.2-cp38-cp38-win_amd64.whl", hash = "sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9"}, + {file = "wrapt-1.17.2-cp39-cp39-win32.whl", hash = "sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb"}, + {file = "wrapt-1.17.2-cp39-cp39-win_amd64.whl", hash = "sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb"}, + {file = "wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8"}, + {file = "wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3"}, +] + +[[package]] +name = "yarl" +version = "1.20.1" +description = "Yet another URL library" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "yarl-1.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6032e6da6abd41e4acda34d75a816012717000fa6839f37124a47fcefc49bec4"}, + {file = "yarl-1.20.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2c7b34d804b8cf9b214f05015c4fee2ebe7ed05cf581e7192c06555c71f4446a"}, + {file = "yarl-1.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c869f2651cc77465f6cd01d938d91a11d9ea5d798738c1dc077f3de0b5e5fed"}, + {file = "yarl-1.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62915e6688eb4d180d93840cda4110995ad50c459bf931b8b3775b37c264af1e"}, + {file = "yarl-1.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:41ebd28167bc6af8abb97fec1a399f412eec5fd61a3ccbe2305a18b84fb4ca73"}, + {file = "yarl-1.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:21242b4288a6d56f04ea193adde174b7e347ac46ce6bc84989ff7c1b1ecea84e"}, + {file = "yarl-1.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bea21cdae6c7eb02ba02a475f37463abfe0a01f5d7200121b03e605d6a0439f8"}, + {file = "yarl-1.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f8a891e4a22a89f5dde7862994485e19db246b70bb288d3ce73a34422e55b23"}, + {file = "yarl-1.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd803820d44c8853a109a34e3660e5a61beae12970da479cf44aa2954019bf70"}, + {file = "yarl-1.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b982fa7f74c80d5c0c7b5b38f908971e513380a10fecea528091405f519b9ebb"}, + {file = "yarl-1.20.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:33f29ecfe0330c570d997bcf1afd304377f2e48f61447f37e846a6058a4d33b2"}, + {file = "yarl-1.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:835ab2cfc74d5eb4a6a528c57f05688099da41cf4957cf08cad38647e4a83b30"}, + {file = "yarl-1.20.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:46b5e0ccf1943a9a6e766b2c2b8c732c55b34e28be57d8daa2b3c1d1d4009309"}, + {file = "yarl-1.20.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:df47c55f7d74127d1b11251fe6397d84afdde0d53b90bedb46a23c0e534f9d24"}, + {file = "yarl-1.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76d12524d05841276b0e22573f28d5fbcb67589836772ae9244d90dd7d66aa13"}, + {file = "yarl-1.20.1-cp310-cp310-win32.whl", hash = "sha256:6c4fbf6b02d70e512d7ade4b1f998f237137f1417ab07ec06358ea04f69134f8"}, + {file = "yarl-1.20.1-cp310-cp310-win_amd64.whl", hash = "sha256:aef6c4d69554d44b7f9d923245f8ad9a707d971e6209d51279196d8e8fe1ae16"}, + {file = "yarl-1.20.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:47ee6188fea634bdfaeb2cc420f5b3b17332e6225ce88149a17c413c77ff269e"}, + {file = "yarl-1.20.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d0f6500f69e8402d513e5eedb77a4e1818691e8f45e6b687147963514d84b44b"}, + {file = "yarl-1.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a8900a42fcdaad568de58887c7b2f602962356908eedb7628eaf6021a6e435b"}, + {file = "yarl-1.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bad6d131fda8ef508b36be3ece16d0902e80b88ea7200f030a0f6c11d9e508d4"}, + {file = "yarl-1.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:df018d92fe22aaebb679a7f89fe0c0f368ec497e3dda6cb81a567610f04501f1"}, + {file = "yarl-1.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f969afbb0a9b63c18d0feecf0db09d164b7a44a053e78a7d05f5df163e43833"}, + {file = "yarl-1.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:812303eb4aa98e302886ccda58d6b099e3576b1b9276161469c25803a8db277d"}, + {file = "yarl-1.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98c4a7d166635147924aa0bf9bfe8d8abad6fffa6102de9c99ea04a1376f91e8"}, + {file = "yarl-1.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12e768f966538e81e6e7550f9086a6236b16e26cd964cf4df35349970f3551cf"}, + {file = "yarl-1.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fe41919b9d899661c5c28a8b4b0acf704510b88f27f0934ac7a7bebdd8938d5e"}, + {file = "yarl-1.20.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:8601bc010d1d7780592f3fc1bdc6c72e2b6466ea34569778422943e1a1f3c389"}, + {file = "yarl-1.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:daadbdc1f2a9033a2399c42646fbd46da7992e868a5fe9513860122d7fe7a73f"}, + {file = "yarl-1.20.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:03aa1e041727cb438ca762628109ef1333498b122e4c76dd858d186a37cec845"}, + {file = "yarl-1.20.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:642980ef5e0fa1de5fa96d905c7e00cb2c47cb468bfcac5a18c58e27dbf8d8d1"}, + {file = "yarl-1.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:86971e2795584fe8c002356d3b97ef6c61862720eeff03db2a7c86b678d85b3e"}, + {file = "yarl-1.20.1-cp311-cp311-win32.whl", hash = "sha256:597f40615b8d25812f14562699e287f0dcc035d25eb74da72cae043bb884d773"}, + {file = "yarl-1.20.1-cp311-cp311-win_amd64.whl", hash = "sha256:26ef53a9e726e61e9cd1cda6b478f17e350fb5800b4bd1cd9fe81c4d91cfeb2e"}, + {file = "yarl-1.20.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdcc4cd244e58593a4379fe60fdee5ac0331f8eb70320a24d591a3be197b94a9"}, + {file = "yarl-1.20.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b29a2c385a5f5b9c7d9347e5812b6f7ab267193c62d282a540b4fc528c8a9d2a"}, + {file = "yarl-1.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1112ae8154186dfe2de4732197f59c05a83dc814849a5ced892b708033f40dc2"}, + {file = "yarl-1.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90bbd29c4fe234233f7fa2b9b121fb63c321830e5d05b45153a2ca68f7d310ee"}, + {file = "yarl-1.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:680e19c7ce3710ac4cd964e90dad99bf9b5029372ba0c7cbfcd55e54d90ea819"}, + {file = "yarl-1.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a979218c1fdb4246a05efc2cc23859d47c89af463a90b99b7c56094daf25a16"}, + {file = "yarl-1.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255b468adf57b4a7b65d8aad5b5138dce6a0752c139965711bdcb81bc370e1b6"}, + {file = "yarl-1.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a97d67108e79cfe22e2b430d80d7571ae57d19f17cda8bb967057ca8a7bf5bfd"}, + {file = "yarl-1.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8570d998db4ddbfb9a590b185a0a33dbf8aafb831d07a5257b4ec9948df9cb0a"}, + {file = "yarl-1.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97c75596019baae7c71ccf1d8cc4738bc08134060d0adfcbe5642f778d1dca38"}, + {file = "yarl-1.20.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1c48912653e63aef91ff988c5432832692ac5a1d8f0fb8a33091520b5bbe19ef"}, + {file = "yarl-1.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4c3ae28f3ae1563c50f3d37f064ddb1511ecc1d5584e88c6b7c63cf7702a6d5f"}, + {file = "yarl-1.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c5e9642f27036283550f5f57dc6156c51084b458570b9d0d96100c8bebb186a8"}, + {file = "yarl-1.20.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2c26b0c49220d5799f7b22c6838409ee9bc58ee5c95361a4d7831f03cc225b5a"}, + {file = "yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564ab3d517e3d01c408c67f2e5247aad4019dcf1969982aba3974b4093279004"}, + {file = "yarl-1.20.1-cp312-cp312-win32.whl", hash = "sha256:daea0d313868da1cf2fac6b2d3a25c6e3a9e879483244be38c8e6a41f1d876a5"}, + {file = "yarl-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:48ea7d7f9be0487339828a4de0360d7ce0efc06524a48e1810f945c45b813698"}, + {file = "yarl-1.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0b5ff0fbb7c9f1b1b5ab53330acbfc5247893069e7716840c8e7d5bb7355038a"}, + {file = "yarl-1.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:14f326acd845c2b2e2eb38fb1346c94f7f3b01a4f5c788f8144f9b630bfff9a3"}, + {file = "yarl-1.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f60e4ad5db23f0b96e49c018596707c3ae89f5d0bd97f0ad3684bcbad899f1e7"}, + {file = "yarl-1.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49bdd1b8e00ce57e68ba51916e4bb04461746e794e7c4d4bbc42ba2f18297691"}, + {file = "yarl-1.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:66252d780b45189975abfed839616e8fd2dbacbdc262105ad7742c6ae58f3e31"}, + {file = "yarl-1.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59174e7332f5d153d8f7452a102b103e2e74035ad085f404df2e40e663a22b28"}, + {file = "yarl-1.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3968ec7d92a0c0f9ac34d5ecfd03869ec0cab0697c91a45db3fbbd95fe1b653"}, + {file = "yarl-1.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1a4fbb50e14396ba3d375f68bfe02215d8e7bc3ec49da8341fe3157f59d2ff5"}, + {file = "yarl-1.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11a62c839c3a8eac2410e951301309426f368388ff2f33799052787035793b02"}, + {file = "yarl-1.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:041eaa14f73ff5a8986b4388ac6bb43a77f2ea09bf1913df7a35d4646db69e53"}, + {file = "yarl-1.20.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:377fae2fef158e8fd9d60b4c8751387b8d1fb121d3d0b8e9b0be07d1b41e83dc"}, + {file = "yarl-1.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1c92f4390e407513f619d49319023664643d3339bd5e5a56a3bebe01bc67ec04"}, + {file = "yarl-1.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d25ddcf954df1754ab0f86bb696af765c5bfaba39b74095f27eececa049ef9a4"}, + {file = "yarl-1.20.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:909313577e9619dcff8c31a0ea2aa0a2a828341d92673015456b3ae492e7317b"}, + {file = "yarl-1.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:793fd0580cb9664548c6b83c63b43c477212c0260891ddf86809e1c06c8b08f1"}, + {file = "yarl-1.20.1-cp313-cp313-win32.whl", hash = "sha256:468f6e40285de5a5b3c44981ca3a319a4b208ccc07d526b20b12aeedcfa654b7"}, + {file = "yarl-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:495b4ef2fea40596bfc0affe3837411d6aa3371abcf31aac0ccc4bdd64d4ef5c"}, + {file = "yarl-1.20.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f60233b98423aab21d249a30eb27c389c14929f47be8430efa7dbd91493a729d"}, + {file = "yarl-1.20.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6f3eff4cc3f03d650d8755c6eefc844edde99d641d0dcf4da3ab27141a5f8ddf"}, + {file = "yarl-1.20.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:69ff8439d8ba832d6bed88af2c2b3445977eba9a4588b787b32945871c2444e3"}, + {file = "yarl-1.20.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cf34efa60eb81dd2645a2e13e00bb98b76c35ab5061a3989c7a70f78c85006d"}, + {file = "yarl-1.20.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8e0fe9364ad0fddab2688ce72cb7a8e61ea42eff3c7caeeb83874a5d479c896c"}, + {file = "yarl-1.20.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f64fbf81878ba914562c672024089e3401974a39767747691c65080a67b18c1"}, + {file = "yarl-1.20.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6342d643bf9a1de97e512e45e4b9560a043347e779a173250824f8b254bd5ce"}, + {file = "yarl-1.20.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56dac5f452ed25eef0f6e3c6a066c6ab68971d96a9fb441791cad0efba6140d3"}, + {file = "yarl-1.20.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7d7f497126d65e2cad8dc5f97d34c27b19199b6414a40cb36b52f41b79014be"}, + {file = "yarl-1.20.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:67e708dfb8e78d8a19169818eeb5c7a80717562de9051bf2413aca8e3696bf16"}, + {file = "yarl-1.20.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:595c07bc79af2494365cc96ddeb772f76272364ef7c80fb892ef9d0649586513"}, + {file = "yarl-1.20.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7bdd2f80f4a7df852ab9ab49484a4dee8030023aa536df41f2d922fd57bf023f"}, + {file = "yarl-1.20.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c03bfebc4ae8d862f853a9757199677ab74ec25424d0ebd68a0027e9c639a390"}, + {file = "yarl-1.20.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:344d1103e9c1523f32a5ed704d576172d2cabed3122ea90b1d4e11fe17c66458"}, + {file = "yarl-1.20.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88cab98aa4e13e1ade8c141daeedd300a4603b7132819c484841bb7af3edce9e"}, + {file = "yarl-1.20.1-cp313-cp313t-win32.whl", hash = "sha256:b121ff6a7cbd4abc28985b6028235491941b9fe8fe226e6fdc539c977ea1739d"}, + {file = "yarl-1.20.1-cp313-cp313t-win_amd64.whl", hash = "sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f"}, + {file = "yarl-1.20.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e42ba79e2efb6845ebab49c7bf20306c4edf74a0b20fc6b2ccdd1a219d12fad3"}, + {file = "yarl-1.20.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:41493b9b7c312ac448b7f0a42a089dffe1d6e6e981a2d76205801a023ed26a2b"}, + {file = "yarl-1.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f5a5928ff5eb13408c62a968ac90d43f8322fd56d87008b8f9dabf3c0f6ee983"}, + {file = "yarl-1.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30c41ad5d717b3961b2dd785593b67d386b73feca30522048d37298fee981805"}, + {file = "yarl-1.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:59febc3969b0781682b469d4aca1a5cab7505a4f7b85acf6db01fa500fa3f6ba"}, + {file = "yarl-1.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d2b6fb3622b7e5bf7a6e5b679a69326b4279e805ed1699d749739a61d242449e"}, + {file = "yarl-1.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:749d73611db8d26a6281086f859ea7ec08f9c4c56cec864e52028c8b328db723"}, + {file = "yarl-1.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9427925776096e664c39e131447aa20ec738bdd77c049c48ea5200db2237e000"}, + {file = "yarl-1.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff70f32aa316393eaf8222d518ce9118148eddb8a53073c2403863b41033eed5"}, + {file = "yarl-1.20.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c7ddf7a09f38667aea38801da8b8d6bfe81df767d9dfc8c88eb45827b195cd1c"}, + {file = "yarl-1.20.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:57edc88517d7fc62b174fcfb2e939fbc486a68315d648d7e74d07fac42cec240"}, + {file = "yarl-1.20.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:dab096ce479d5894d62c26ff4f699ec9072269d514b4edd630a393223f45a0ee"}, + {file = "yarl-1.20.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:14a85f3bd2d7bb255be7183e5d7d6e70add151a98edf56a770d6140f5d5f4010"}, + {file = "yarl-1.20.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c89b5c792685dd9cd3fa9761c1b9f46fc240c2a3265483acc1565769996a3f8"}, + {file = "yarl-1.20.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:69e9b141de5511021942a6866990aea6d111c9042235de90e08f94cf972ca03d"}, + {file = "yarl-1.20.1-cp39-cp39-win32.whl", hash = "sha256:b5f307337819cdfdbb40193cad84978a029f847b0a357fbe49f712063cfc4f06"}, + {file = "yarl-1.20.1-cp39-cp39-win_amd64.whl", hash = "sha256:eae7bfe2069f9c1c5b05fc7fe5d612e5bbc089a39309904ee8b829e322dcad00"}, + {file = "yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77"}, + {file = "yarl-1.20.1.tar.gz", hash = "sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" +propcache = ">=0.2.1" + +[[package]] +name = "zipp" +version = "3.23.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e"}, + {file = "zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[metadata] +lock-version = "2.1" +python-versions = "^3.11" +content-hash = "6720576cf9ff57c7bb15b97e268bb414218f6a053e7e0a5bdd45d022c0847111" diff --git a/pyproject.toml b/pyproject.toml index 52c237d2..71610478 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,10 @@ +[project] +name = "codeflare-sdk" +version = "0.31.1" + [tool.poetry] name = "codeflare-sdk" -version = "0.0.0-dev" +version = "0.31.1" description = "Python SDK for codeflare client" license = "Apache-2.0" @@ -20,29 +24,47 @@ homepage = "https://github.com/project-codeflare/codeflare-sdk" keywords = ['codeflare', 'python', 'sdk', 'client', 'batch', 'scale'] [tool.poetry.dependencies] -python = "^3.7" +python = "^3.11" openshift-client = "1.0.18" -rich = "^12.5" -ray = {version = "2.5.0", extras = ["default"]} -kubernetes = ">= 25.3.0, < 27" -codeflare-torchx = "0.6.0.dev1" -cryptography = "40.0.2" +rich = ">=12.5,<14.0" +ray = {version = "2.47.1", extras = ["data", "default"]} +kubernetes = ">= 27.2.0" +cryptography = "43.0.3" executing = "1.2.0" -pydantic = "< 2" +pydantic = ">= 2.10.6" +ipywidgets = "8.1.2" [tool.poetry.group.docs] optional = true [tool.poetry.group.docs.dependencies] -pdoc3 = "0.10.0" +sphinx = "7.4.7" +sphinx-rtd-theme = "3.0.1" + +[tool.poetry.group.test] +optional = true [tool.poetry.group.test.dependencies] pytest = "7.4.0" -coverage = "7.2.7" +coverage = "7.6.4" pytest-mock = "3.11.1" +pytest-timeout = "2.3.1" +jupyterlab = "4.3.1" [tool.pytest.ini_options] filterwarnings = [ "ignore::DeprecationWarning:pkg_resources", "ignore:pkg_resources is deprecated as an API:DeprecationWarning", ] +markers = [ + "kind", + "openshift", + "nvidia_gpu" +] +addopts = "--timeout=900" +testpaths = ["src/codeflare_sdk"] +collect_ignore = ["src/codeflare_sdk/common/utils/unit_test_support.py"] + +[build-system] +requires = ["poetry-core>=1.6.0"] +build-backend = "poetry.core.masonry.api" diff --git a/requirements-dev.txt b/requirements-dev.txt deleted file mode 100644 index 986bc512..00000000 --- a/requirements-dev.txt +++ /dev/null @@ -1,6 +0,0 @@ -pre-commit -poetry -pytest -pytest-mock -coverage -black==22.3.0 diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index c62afb19..00000000 --- a/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -openshift-client==1.0.18 -rich==12.5.1 -ray[default]==2.5.0 -kubernetes>=25.3.0,<27 -codeflare-torchx==0.6.0.dev1 -pydantic<2 # 2.0+ broke ray[default] see detail: https://github.com/ray-project/ray/pull/37000 -cryptography==40.0.2 -executing==1.2.0 diff --git a/src/codeflare_sdk.egg-info/PKG-INFO b/src/codeflare_sdk.egg-info/PKG-INFO deleted file mode 100644 index c4061c62..00000000 --- a/src/codeflare_sdk.egg-info/PKG-INFO +++ /dev/null @@ -1,4 +0,0 @@ -Metadata-Version: 2.1 -Name: codeflare-sdk -Version: 0.0.0 -License-File: LICENSE diff --git a/src/codeflare_sdk.egg-info/SOURCES.txt b/src/codeflare_sdk.egg-info/SOURCES.txt deleted file mode 100644 index cfea1dbf..00000000 --- a/src/codeflare_sdk.egg-info/SOURCES.txt +++ /dev/null @@ -1,21 +0,0 @@ -LICENSE -README.md -pyproject.toml -src/codeflare_sdk/__init__.py -src/codeflare_sdk.egg-info/PKG-INFO -src/codeflare_sdk.egg-info/SOURCES.txt -src/codeflare_sdk.egg-info/dependency_links.txt -src/codeflare_sdk.egg-info/top_level.txt -src/codeflare_sdk/cluster/__init__.py -src/codeflare_sdk/cluster/auth.py -src/codeflare_sdk/cluster/awload.py -src/codeflare_sdk/cluster/cluster.py -src/codeflare_sdk/cluster/config.py -src/codeflare_sdk/cluster/model.py -src/codeflare_sdk/job/__init__.py -src/codeflare_sdk/job/jobs.py -src/codeflare_sdk/utils/__init__.py -src/codeflare_sdk/utils/generate_cert.py -src/codeflare_sdk/utils/generate_yaml.py -src/codeflare_sdk/utils/kube_api_helpers.py -src/codeflare_sdk/utils/pretty_print.py diff --git a/src/codeflare_sdk.egg-info/dependency_links.txt b/src/codeflare_sdk.egg-info/dependency_links.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/src/codeflare_sdk.egg-info/top_level.txt b/src/codeflare_sdk.egg-info/top_level.txt deleted file mode 100644 index 633675b3..00000000 --- a/src/codeflare_sdk.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -codeflare_sdk diff --git a/src/codeflare_sdk/__init__.py b/src/codeflare_sdk/__init__.py index e69de29b..9ab5c745 100644 --- a/src/codeflare_sdk/__init__.py +++ b/src/codeflare_sdk/__init__.py @@ -0,0 +1,37 @@ +from .ray import ( + Cluster, + ClusterConfiguration, + RayClusterStatus, + CodeFlareClusterStatus, + RayCluster, + get_cluster, + list_all_queued, + list_all_clusters, + AWManager, + AppWrapperStatus, + RayJobClient, +) + +from .common.widgets import view_clusters + +from .common import ( + Authentication, + KubeConfiguration, + TokenAuthentication, + KubeConfigFileAuthentication, +) + +from .common.kueue import ( + list_local_queues, +) + +from .common.utils import generate_cert +from .common.utils.demos import copy_demo_nbs + +from importlib.metadata import version, PackageNotFoundError + +try: + __version__ = version("codeflare-sdk") # use metadata associated with built package + +except PackageNotFoundError: + __version__ = "v0.0.0" diff --git a/src/codeflare_sdk/cluster/cluster.py b/src/codeflare_sdk/cluster/cluster.py deleted file mode 100644 index afad28ba..00000000 --- a/src/codeflare_sdk/cluster/cluster.py +++ /dev/null @@ -1,644 +0,0 @@ -# Copyright 2022 IBM, Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -The cluster sub-module contains the definition of the Cluster object, which represents -the resources requested by the user. It also contains functions for checking the -cluster setup queue, a list of all existing clusters, and the user's working namespace. -""" - -from time import sleep -from typing import List, Optional, Tuple, Dict - -from ray.job_submission import JobSubmissionClient - -from .auth import config_check, api_config_handler -from ..utils import pretty_print -from ..utils.generate_yaml import generate_appwrapper -from ..utils.kube_api_helpers import _kube_api_error_handling -from .config import ClusterConfiguration -from .model import ( - AppWrapper, - AppWrapperStatus, - CodeFlareClusterStatus, - RayCluster, - RayClusterStatus, -) -from kubernetes import client, config -import yaml -import os -import requests - - -class Cluster: - """ - An object for requesting, bringing up, and taking down resources. - Can also be used for seeing the resource cluster status and details. - - Note that currently, the underlying implementation is a Ray cluster. - """ - - torchx_scheduler = "ray" - - def __init__(self, config: ClusterConfiguration): - """ - Create the resource cluster object by passing in a ClusterConfiguration - (defined in the config sub-module). An AppWrapper will then be generated - based off of the configured resources to represent the desired cluster - request. - """ - self.config = config - self.app_wrapper_yaml = self.create_app_wrapper() - self.app_wrapper_name = self.app_wrapper_yaml.split(".")[0] - - def evaluate_dispatch_priority(self): - priority_class = self.config.dispatch_priority - - try: - config_check() - api_instance = client.CustomObjectsApi(api_config_handler()) - priority_classes = api_instance.list_cluster_custom_object( - group="scheduling.k8s.io", - version="v1", - plural="priorityclasses", - ) - except Exception as e: # pragma: no cover - return _kube_api_error_handling(e) - - for pc in priority_classes["items"]: - if pc["metadata"]["name"] == priority_class: - return pc["value"] - print(f"Priority class {priority_class} is not available in the cluster") - return None - - def create_app_wrapper(self): - """ - Called upon cluster object creation, creates an AppWrapper yaml based on - the specifications of the ClusterConfiguration. - """ - - if self.config.namespace is None: - self.config.namespace = get_current_namespace() - if self.config.namespace is None: - print("Please specify with namespace=") - elif type(self.config.namespace) is not str: - raise TypeError( - f"Namespace {self.config.namespace} is of type {type(self.config.namespace)}. Check your Kubernetes Authentication." - ) - - # Before attempting to create the cluster AW, let's evaluate the ClusterConfig - if self.config.dispatch_priority: - priority_val = self.evaluate_dispatch_priority() - if priority_val == None: - raise ValueError( - "Invalid Cluster Configuration, AppWrapper not generated" - ) - else: - priority_val = None - - name = self.config.name - namespace = self.config.namespace - min_cpu = self.config.min_cpus - max_cpu = self.config.max_cpus - min_memory = self.config.min_memory - max_memory = self.config.max_memory - gpu = self.config.num_gpus - workers = self.config.num_workers - template = self.config.template - image = self.config.image - instascale = self.config.instascale - instance_types = self.config.machine_types - env = self.config.envs - local_interactive = self.config.local_interactive - image_pull_secrets = self.config.image_pull_secrets - dispatch_priority = self.config.dispatch_priority - return generate_appwrapper( - name=name, - namespace=namespace, - min_cpu=min_cpu, - max_cpu=max_cpu, - min_memory=min_memory, - max_memory=max_memory, - gpu=gpu, - workers=workers, - template=template, - image=image, - instascale=instascale, - instance_types=instance_types, - env=env, - local_interactive=local_interactive, - image_pull_secrets=image_pull_secrets, - dispatch_priority=dispatch_priority, - priority_val=priority_val, - ) - - # creates a new cluster with the provided or default spec - def up(self): - """ - Applies the AppWrapper yaml, pushing the resource request onto - the MCAD queue. - """ - namespace = self.config.namespace - try: - config_check() - api_instance = client.CustomObjectsApi(api_config_handler()) - with open(self.app_wrapper_yaml) as f: - aw = yaml.load(f, Loader=yaml.FullLoader) - api_instance.create_namespaced_custom_object( - group="workload.codeflare.dev", - version="v1beta1", - namespace=namespace, - plural="appwrappers", - body=aw, - ) - except Exception as e: # pragma: no cover - return _kube_api_error_handling(e) - - def down(self): - """ - Deletes the AppWrapper yaml, scaling-down and deleting all resources - associated with the cluster. - """ - namespace = self.config.namespace - try: - config_check() - api_instance = client.CustomObjectsApi(api_config_handler()) - api_instance.delete_namespaced_custom_object( - group="workload.codeflare.dev", - version="v1beta1", - namespace=namespace, - plural="appwrappers", - name=self.app_wrapper_name, - ) - except Exception as e: # pragma: no cover - return _kube_api_error_handling(e) - - def status( - self, print_to_console: bool = True - ) -> Tuple[CodeFlareClusterStatus, bool]: - """ - Returns the requested cluster's status, as well as whether or not - it is ready for use. - """ - ready = False - status = CodeFlareClusterStatus.UNKNOWN - # check the app wrapper status - appwrapper = _app_wrapper_status(self.config.name, self.config.namespace) - if appwrapper: - if appwrapper.status in [ - AppWrapperStatus.RUNNING, - AppWrapperStatus.COMPLETED, - AppWrapperStatus.RUNNING_HOLD_COMPLETION, - ]: - ready = False - status = CodeFlareClusterStatus.STARTING - elif appwrapper.status in [ - AppWrapperStatus.FAILED, - AppWrapperStatus.DELETED, - ]: - ready = False - status = CodeFlareClusterStatus.FAILED # should deleted be separate - return status, ready # exit early, no need to check ray status - elif appwrapper.status in [ - AppWrapperStatus.PENDING, - AppWrapperStatus.QUEUEING, - ]: - ready = False - if appwrapper.status == AppWrapperStatus.PENDING: - status = CodeFlareClusterStatus.QUEUED - else: - status = CodeFlareClusterStatus.QUEUEING - if print_to_console: - pretty_print.print_app_wrappers_status([appwrapper]) - return ( - status, - ready, - ) # no need to check the ray status since still in queue - - # check the ray cluster status - cluster = _ray_cluster_status(self.config.name, self.config.namespace) - if cluster and not cluster.status == RayClusterStatus.UNKNOWN: - if cluster.status == RayClusterStatus.READY: - ready = True - status = CodeFlareClusterStatus.READY - elif cluster.status in [ - RayClusterStatus.UNHEALTHY, - RayClusterStatus.FAILED, - ]: - ready = False - status = CodeFlareClusterStatus.FAILED - - if print_to_console: - # overriding the number of gpus with requested - cluster.worker_gpu = self.config.num_gpus - pretty_print.print_cluster_status(cluster) - elif print_to_console: - if status == CodeFlareClusterStatus.UNKNOWN: - pretty_print.print_no_resources_found() - else: - pretty_print.print_app_wrappers_status([appwrapper], starting=True) - - return status, ready - - def is_dashboard_ready(self) -> bool: - response = requests.get(self.cluster_dashboard_uri(), timeout=5) - if response.status_code == 200: - return True - else: - return False - - def wait_ready(self, timeout: Optional[int] = None): - """ - Waits for requested cluster to be ready, up to an optional timeout (s). - Checks every five seconds. - """ - print("Waiting for requested resources to be set up...") - ready = False - dashboard_ready = False - status = None - time = 0 - while not ready or not dashboard_ready: - status, ready = self.status(print_to_console=False) - dashboard_ready = self.is_dashboard_ready() - if status == CodeFlareClusterStatus.UNKNOWN: - print( - "WARNING: Current cluster status is unknown, have you run cluster.up yet?" - ) - if not ready or not dashboard_ready: - if timeout and time >= timeout: - raise TimeoutError(f"wait() timed out after waiting {timeout}s") - sleep(5) - time += 5 - print("Requested cluster and dashboard are up and running!") - - def details(self, print_to_console: bool = True) -> RayCluster: - cluster = _copy_to_ray(self) - if print_to_console: - pretty_print.print_clusters([cluster]) - return cluster - - def cluster_uri(self) -> str: - """ - Returns a string containing the cluster's URI. - """ - return f"ray://{self.config.name}-head-svc.{self.config.namespace}.svc:10001" - - def cluster_dashboard_uri(self) -> str: - """ - Returns a string containing the cluster's dashboard URI. - """ - try: - config_check() - api_instance = client.CustomObjectsApi(api_config_handler()) - routes = api_instance.list_namespaced_custom_object( - group="route.openshift.io", - version="v1", - namespace=self.config.namespace, - plural="routes", - ) - except Exception as e: # pragma: no cover - return _kube_api_error_handling(e) - - for route in routes["items"]: - if route["metadata"]["name"] == f"ray-dashboard-{self.config.name}": - protocol = "https" if route["spec"].get("tls") else "http" - return f"{protocol}://{route['spec']['host']}" - return "Dashboard route not available yet, have you run cluster.up()?" - - def list_jobs(self) -> List: - """ - This method accesses the head ray node in your cluster and lists the running jobs. - """ - dashboard_route = self.cluster_dashboard_uri() - client = JobSubmissionClient(dashboard_route) - return client.list_jobs() - - def job_status(self, job_id: str) -> str: - """ - This method accesses the head ray node in your cluster and returns the job status for the provided job id. - """ - dashboard_route = self.cluster_dashboard_uri() - client = JobSubmissionClient(dashboard_route) - return client.get_job_status(job_id) - - def job_logs(self, job_id: str) -> str: - """ - This method accesses the head ray node in your cluster and returns the logs for the provided job id. - """ - dashboard_route = self.cluster_dashboard_uri() - client = JobSubmissionClient(dashboard_route) - return client.get_job_logs(job_id) - - def torchx_config( - self, working_dir: str = None, requirements: str = None - ) -> Dict[str, str]: - dashboard_address = f"{self.cluster_dashboard_uri().lstrip('http://')}" - to_return = { - "cluster_name": self.config.name, - "dashboard_address": dashboard_address, - } - if working_dir: - to_return["working_dir"] = working_dir - if requirements: - to_return["requirements"] = requirements - return to_return - - def from_k8_cluster_object(rc): - machine_types = ( - rc["metadata"]["labels"]["orderedinstance"].split("_") - if "orderedinstance" in rc["metadata"]["labels"] - else [] - ) - local_interactive = ( - "volumeMounts" - in rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][0] - ) - cluster_config = ClusterConfiguration( - name=rc["metadata"]["name"], - namespace=rc["metadata"]["namespace"], - machine_types=machine_types, - num_workers=rc["spec"]["workerGroupSpecs"][0]["minReplicas"], - min_cpus=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][ - "containers" - ][0]["resources"]["requests"]["cpu"], - max_cpus=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][ - "containers" - ][0]["resources"]["limits"]["cpu"], - min_memory=int( - rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][0][ - "resources" - ]["requests"]["memory"][:-1] - ), - max_memory=int( - rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][0][ - "resources" - ]["limits"]["memory"][:-1] - ), - num_gpus=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][ - "containers" - ][0]["resources"]["limits"]["nvidia.com/gpu"], - instascale=True if machine_types else False, - image=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][ - 0 - ]["image"], - local_interactive=local_interactive, - ) - return Cluster(cluster_config) - - def local_client_url(self): - if self.config.local_interactive == True: - ingress_domain = _get_ingress_domain() - return f"ray://rayclient-{self.config.name}-{self.config.namespace}.{ingress_domain}" - else: - return "None" - - -def list_all_clusters(namespace: str, print_to_console: bool = True): - """ - Returns (and prints by default) a list of all clusters in a given namespace. - """ - clusters = _get_ray_clusters(namespace) - if print_to_console: - pretty_print.print_clusters(clusters) - return clusters - - -def list_all_queued(namespace: str, print_to_console: bool = True): - """ - Returns (and prints by default) a list of all currently queued-up AppWrappers - in a given namespace. - """ - app_wrappers = _get_app_wrappers( - namespace, filter=[AppWrapperStatus.RUNNING, AppWrapperStatus.PENDING] - ) - if print_to_console: - pretty_print.print_app_wrappers_status(app_wrappers) - return app_wrappers - - -def get_current_namespace(): # pragma: no cover - if api_config_handler() != None: - if os.path.isfile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"): - try: - file = open( - "/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r" - ) - active_context = file.readline().strip("\n") - return active_context - except Exception as e: - print("Unable to find current namespace") - return None - else: - print("Unable to find current namespace") - return None - else: - try: - _, active_context = config.list_kube_config_contexts(config_check()) - except Exception as e: - return _kube_api_error_handling(e) - try: - return active_context["context"]["namespace"] - except KeyError: - return None - - -def get_cluster(cluster_name: str, namespace: str = "default"): - try: - config.load_kube_config() - api_instance = client.CustomObjectsApi() - rcs = api_instance.list_namespaced_custom_object( - group="ray.io", - version="v1alpha1", - namespace=namespace, - plural="rayclusters", - ) - except Exception as e: - return _kube_api_error_handling(e) - - for rc in rcs["items"]: - if rc["metadata"]["name"] == cluster_name: - return Cluster.from_k8_cluster_object(rc) - raise FileNotFoundError( - f"Cluster {cluster_name} is not found in {namespace} namespace" - ) - - -# private methods -def _get_ingress_domain(): - try: - config.load_kube_config() - api_client = client.CustomObjectsApi(api_config_handler()) - ingress = api_client.get_cluster_custom_object( - "config.openshift.io", "v1", "ingresses", "cluster" - ) - except Exception as e: # pragma: no cover - return _kube_api_error_handling(e) - return ingress["spec"]["domain"] - - -def _app_wrapper_status(name, namespace="default") -> Optional[AppWrapper]: - try: - config_check() - api_instance = client.CustomObjectsApi(api_config_handler()) - aws = api_instance.list_namespaced_custom_object( - group="workload.codeflare.dev", - version="v1beta1", - namespace=namespace, - plural="appwrappers", - ) - except Exception as e: # pragma: no cover - return _kube_api_error_handling(e) - - for aw in aws["items"]: - if aw["metadata"]["name"] == name: - return _map_to_app_wrapper(aw) - return None - - -def _ray_cluster_status(name, namespace="default") -> Optional[RayCluster]: - try: - config_check() - api_instance = client.CustomObjectsApi(api_config_handler()) - rcs = api_instance.list_namespaced_custom_object( - group="ray.io", - version="v1alpha1", - namespace=namespace, - plural="rayclusters", - ) - except Exception as e: # pragma: no cover - return _kube_api_error_handling(e) - - for rc in rcs["items"]: - if rc["metadata"]["name"] == name: - return _map_to_ray_cluster(rc) - return None - - -def _get_ray_clusters(namespace="default") -> List[RayCluster]: - list_of_clusters = [] - try: - config_check() - api_instance = client.CustomObjectsApi(api_config_handler()) - rcs = api_instance.list_namespaced_custom_object( - group="ray.io", - version="v1alpha1", - namespace=namespace, - plural="rayclusters", - ) - except Exception as e: # pragma: no cover - return _kube_api_error_handling(e) - - for rc in rcs["items"]: - list_of_clusters.append(_map_to_ray_cluster(rc)) - return list_of_clusters - - -def _get_app_wrappers( - namespace="default", filter=List[AppWrapperStatus] -) -> List[AppWrapper]: - list_of_app_wrappers = [] - - try: - config_check() - api_instance = client.CustomObjectsApi(api_config_handler()) - aws = api_instance.list_namespaced_custom_object( - group="workload.codeflare.dev", - version="v1beta1", - namespace=namespace, - plural="appwrappers", - ) - except Exception as e: # pragma: no cover - return _kube_api_error_handling(e) - - for item in aws["items"]: - app_wrapper = _map_to_app_wrapper(item) - if filter and app_wrapper.status in filter: - list_of_app_wrappers.append(app_wrapper) - else: - # Unsure what the purpose of the filter is - list_of_app_wrappers.append(app_wrapper) - return list_of_app_wrappers - - -def _map_to_ray_cluster(rc) -> Optional[RayCluster]: - if "state" in rc["status"]: - status = RayClusterStatus(rc["status"]["state"].lower()) - else: - status = RayClusterStatus.UNKNOWN - - config_check() - api_instance = client.CustomObjectsApi(api_config_handler()) - routes = api_instance.list_namespaced_custom_object( - group="route.openshift.io", - version="v1", - namespace=rc["metadata"]["namespace"], - plural="routes", - ) - ray_route = None - for route in routes["items"]: - if route["metadata"]["name"] == f"ray-dashboard-{rc['metadata']['name']}": - protocol = "https" if route["spec"].get("tls") else "http" - ray_route = f"{protocol}://{route['spec']['host']}" - - return RayCluster( - name=rc["metadata"]["name"], - status=status, - # for now we are not using autoscaling so same replicas is fine - workers=rc["spec"]["workerGroupSpecs"][0]["replicas"], - worker_mem_max=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][ - "containers" - ][0]["resources"]["limits"]["memory"], - worker_mem_min=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][ - "containers" - ][0]["resources"]["requests"]["memory"], - worker_cpu=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"]["containers"][ - 0 - ]["resources"]["limits"]["cpu"], - worker_gpu=0, # hard to detect currently how many gpus, can override it with what the user asked for - namespace=rc["metadata"]["namespace"], - dashboard=ray_route, - ) - - -def _map_to_app_wrapper(aw) -> AppWrapper: - if "status" in aw and "canrun" in aw["status"]: - return AppWrapper( - name=aw["metadata"]["name"], - status=AppWrapperStatus(aw["status"]["state"].lower()), - can_run=aw["status"]["canrun"], - job_state=aw["status"]["queuejobstate"], - ) - return AppWrapper( - name=aw["metadata"]["name"], - status=AppWrapperStatus("queueing"), - can_run=False, - job_state="Still adding to queue", - ) - - -def _copy_to_ray(cluster: Cluster) -> RayCluster: - ray = RayCluster( - name=cluster.config.name, - status=cluster.status(print_to_console=False)[0], - workers=cluster.config.num_workers, - worker_mem_min=cluster.config.min_memory, - worker_mem_max=cluster.config.max_memory, - worker_cpu=cluster.config.min_cpus, - worker_gpu=cluster.config.num_gpus, - namespace=cluster.config.namespace, - dashboard=cluster.cluster_dashboard_uri(), - ) - if ray.status == CodeFlareClusterStatus.READY: - ray.status = RayClusterStatus.READY - return ray diff --git a/src/codeflare_sdk/cluster/config.py b/src/codeflare_sdk/cluster/config.py deleted file mode 100644 index cb935e79..00000000 --- a/src/codeflare_sdk/cluster/config.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2022 IBM, Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -The config sub-module contains the definition of the ClusterConfiguration dataclass, -which is used to specify resource requirements and other details when creating a -Cluster object. -""" - -from dataclasses import dataclass, field -import pathlib - -dir = pathlib.Path(__file__).parent.parent.resolve() - - -@dataclass -class ClusterConfiguration: - """ - This dataclass is used to specify resource requirements and other details, and - is passed in as an argument when creating a Cluster object. - """ - - name: str - namespace: str = None - head_info: list = field(default_factory=list) - machine_types: list = field(default_factory=list) # ["m4.xlarge", "g4dn.xlarge"] - min_cpus: int = 1 - max_cpus: int = 1 - num_workers: int = 1 - min_memory: int = 2 - max_memory: int = 2 - num_gpus: int = 0 - template: str = f"{dir}/templates/base-template.yaml" - instascale: bool = False - envs: dict = field(default_factory=dict) - image: str = "quay.io/project-codeflare/ray:2.5.0-py38-cu116" - local_interactive: bool = False - image_pull_secrets: list = field(default_factory=list) - dispatch_priority: str = None diff --git a/src/codeflare_sdk/common/__init__.py b/src/codeflare_sdk/common/__init__.py new file mode 100644 index 00000000..c8bd2d5c --- /dev/null +++ b/src/codeflare_sdk/common/__init__.py @@ -0,0 +1,8 @@ +# Importing everything from the kubernetes_cluster module +from .kubernetes_cluster import ( + Authentication, + KubeConfiguration, + TokenAuthentication, + KubeConfigFileAuthentication, + _kube_api_error_handling, +) diff --git a/src/codeflare_sdk/common/kubernetes_cluster/__init__.py b/src/codeflare_sdk/common/kubernetes_cluster/__init__.py new file mode 100644 index 00000000..beac4d99 --- /dev/null +++ b/src/codeflare_sdk/common/kubernetes_cluster/__init__.py @@ -0,0 +1,10 @@ +from .auth import ( + Authentication, + KubeConfiguration, + TokenAuthentication, + KubeConfigFileAuthentication, + config_check, + get_api_client, +) + +from .kube_api_helpers import _kube_api_error_handling diff --git a/src/codeflare_sdk/cluster/auth.py b/src/codeflare_sdk/common/kubernetes_cluster/auth.py similarity index 73% rename from src/codeflare_sdk/cluster/auth.py rename to src/codeflare_sdk/common/kubernetes_cluster/auth.py index eb739136..db105afc 100644 --- a/src/codeflare_sdk/cluster/auth.py +++ b/src/codeflare_sdk/common/kubernetes_cluster/auth.py @@ -23,13 +23,17 @@ from kubernetes import client, config import os import urllib3 -from ..utils.kube_api_helpers import _kube_api_error_handling +from .kube_api_helpers import _kube_api_error_handling + +from typing import Optional global api_client api_client = None global config_path config_path = None +WORKBENCH_CA_CERT_PATH = "/etc/pki/tls/custom-certs/ca-bundle.crt" + class Authentication(metaclass=abc.ABCMeta): """ @@ -89,7 +93,7 @@ def __init__( self.token = token self.server = server self.skip_tls = skip_tls - self.ca_cert_path = ca_cert_path + self.ca_cert_path = _gen_ca_cert_path(ca_cert_path) def login(self) -> str: """ @@ -104,22 +108,21 @@ def login(self) -> str: configuration.api_key_prefix["authorization"] = "Bearer" configuration.host = self.server configuration.api_key["authorization"] = self.token - if self.skip_tls == False and self.ca_cert_path == None: - configuration.verify_ssl = True - elif self.skip_tls == False: - configuration.ssl_ca_cert = self.ca_cert_path - else: + + if self.skip_tls: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) print("Insecure request warnings have been disabled") configuration.verify_ssl = False api_client = client.ApiClient(configuration) + if not self.skip_tls: + _client_with_cert(api_client, self.ca_cert_path) + client.AuthenticationApi(api_client).get_api_group() config_path = None return "Logged into %s" % self.server - except client.ApiException: # pragma: no cover - api_client = None - print("Authentication Error please provide the correct token + server") + except client.ApiException as e: + _kube_api_error_handling(e) def logout(self) -> str: """ @@ -162,8 +165,20 @@ def load_kube_config(self): def config_check() -> str: """ - Function for loading the config file at the default config location ~/.kube/config if the user has not - specified their own config file or has logged in with their token and server. + Check and load the Kubernetes config from the default location. + + This function checks if a Kubernetes config file exists at the default path + (`~/.kube/config`). If none is provided, it tries to load in-cluster config. + If the `config_path` global variable is set by an external module (e.g., `auth.py`), + this path will be used directly. + + Returns: + str: + The loaded config path if successful. + + Raises: + PermissionError: + If no valid credentials or config file is found. """ global config_path global api_client @@ -188,11 +203,42 @@ def config_check() -> str: return config_path -def api_config_handler() -> str: +def _client_with_cert(client: client.ApiClient, ca_cert_path: Optional[str] = None): + client.configuration.verify_ssl = True + cert_path = _gen_ca_cert_path(ca_cert_path) + if cert_path is None: + client.configuration.ssl_ca_cert = None + elif os.path.isfile(cert_path): + client.configuration.ssl_ca_cert = cert_path + else: + raise FileNotFoundError(f"Certificate file not found at {cert_path}") + + +def _gen_ca_cert_path(ca_cert_path: Optional[str]): + """Gets the path to the default CA certificate file either through env config or default path""" + if ca_cert_path is not None: + return ca_cert_path + elif "CF_SDK_CA_CERT_PATH" in os.environ: + return os.environ.get("CF_SDK_CA_CERT_PATH") + elif os.path.exists(WORKBENCH_CA_CERT_PATH): + return WORKBENCH_CA_CERT_PATH + else: + return None + + +def get_api_client() -> client.ApiClient: """ - This function is used to load the api client if the user has logged in + Retrieve the Kubernetes API client with the default configuration. + + This function returns the current API client instance if already loaded, + or creates a new API client with the default configuration. + + Returns: + client.ApiClient: + The Kubernetes API client object. """ - if api_client != None and config_path == None: + if api_client != None: return api_client - else: - return None + to_return = client.ApiClient() + _client_with_cert(to_return) + return to_return diff --git a/src/codeflare_sdk/common/kubernetes_cluster/kube_api_helpers.py b/src/codeflare_sdk/common/kubernetes_cluster/kube_api_helpers.py new file mode 100644 index 00000000..17a0c248 --- /dev/null +++ b/src/codeflare_sdk/common/kubernetes_cluster/kube_api_helpers.py @@ -0,0 +1,63 @@ +# Copyright 2022 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This sub-module exists primarily to be used internally for any Kubernetes +API error handling or wrapping. +""" + +import executing +from kubernetes import client, config + +ERROR_MESSAGES = { + "Not Found": "The requested resource could not be located.\n" + "Please verify the resource name and namespace.", + "Unauthorized": "Access to the API is unauthorized.\n" + "Check your credentials or permissions.", + "Forbidden": "Access denied to the Kubernetes resource.\n" + "Ensure your role has sufficient permissions for this operation.", + "Conflict": "A conflict occurred with the RayCluster resource.\n" + "Only one RayCluster with the same name is allowed. " + "Please delete or rename the existing RayCluster before creating a new one with the desired name.", +} + + +# private methods +def _kube_api_error_handling( + e: Exception, print_error: bool = True +): # pragma: no cover + def print_message(message: str): + if print_error: + print(message) + + if isinstance(e, client.ApiException): + # Retrieve message based on reason, defaulting if reason is not known + message = ERROR_MESSAGES.get( + e.reason, f"Unexpected API error encountered (Reason: {e.reason})" + ) + full_message = f"{message}\nResponse: {e.body}" + print_message(full_message) + + elif isinstance(e, config.ConfigException): + message = "Configuration error: Unable to load Kubernetes configuration. Verify the config file path and format." + print_message(message) + + elif isinstance(e, executing.executing.NotOneValueFound): + message = "Execution error: Expected exactly one value in the operation but found none or multiple." + print_message(message) + + else: + message = f"Unexpected error:\n{str(e)}" + print_message(message) + raise e diff --git a/src/codeflare_sdk/common/kubernetes_cluster/test_auth.py b/src/codeflare_sdk/common/kubernetes_cluster/test_auth.py new file mode 100644 index 00000000..be9e90f5 --- /dev/null +++ b/src/codeflare_sdk/common/kubernetes_cluster/test_auth.py @@ -0,0 +1,162 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from codeflare_sdk.common.kubernetes_cluster import ( + Authentication, + KubeConfigFileAuthentication, + TokenAuthentication, + config_check, +) +from kubernetes import client, config +import os +from pathlib import Path +import pytest + +parent = Path(__file__).resolve().parents[4] # project directory + + +def test_token_auth_creation(): + token_auth = TokenAuthentication(token="token", server="server") + assert token_auth.token == "token" + assert token_auth.server == "server" + assert token_auth.skip_tls == False + assert token_auth.ca_cert_path == None + + token_auth = TokenAuthentication(token="token", server="server", skip_tls=True) + assert token_auth.token == "token" + assert token_auth.server == "server" + assert token_auth.skip_tls == True + assert token_auth.ca_cert_path == None + + os.environ["CF_SDK_CA_CERT_PATH"] = "/etc/pki/tls/custom-certs/ca-bundle.crt" + token_auth = TokenAuthentication(token="token", server="server", skip_tls=False) + assert token_auth.token == "token" + assert token_auth.server == "server" + assert token_auth.skip_tls == False + assert token_auth.ca_cert_path == "/etc/pki/tls/custom-certs/ca-bundle.crt" + os.environ.pop("CF_SDK_CA_CERT_PATH") + + token_auth = TokenAuthentication( + token="token", + server="server", + skip_tls=False, + ca_cert_path=f"{parent}/tests/auth-test.crt", + ) + assert token_auth.token == "token" + assert token_auth.server == "server" + assert token_auth.skip_tls == False + assert token_auth.ca_cert_path == f"{parent}/tests/auth-test.crt" + + +def test_token_auth_login_logout(mocker): + mocker.patch.object(client, "ApiClient") + + token_auth = TokenAuthentication( + token="testtoken", server="testserver:6443", skip_tls=False, ca_cert_path=None + ) + assert token_auth.login() == ("Logged into testserver:6443") + assert token_auth.logout() == ("Successfully logged out of testserver:6443") + + +def test_token_auth_login_tls(mocker): + mocker.patch.object(client, "ApiClient") + + token_auth = TokenAuthentication( + token="testtoken", server="testserver:6443", skip_tls=True, ca_cert_path=None + ) + assert token_auth.login() == ("Logged into testserver:6443") + token_auth = TokenAuthentication( + token="testtoken", server="testserver:6443", skip_tls=False, ca_cert_path=None + ) + assert token_auth.login() == ("Logged into testserver:6443") + token_auth = TokenAuthentication( + token="testtoken", + server="testserver:6443", + skip_tls=False, + ca_cert_path=f"{parent}/tests/auth-test.crt", + ) + assert token_auth.login() == ("Logged into testserver:6443") + + os.environ["CF_SDK_CA_CERT_PATH"] = f"{parent}/tests/auth-test.crt" + token_auth = TokenAuthentication( + token="testtoken", + server="testserver:6443", + skip_tls=False, + ) + assert token_auth.login() == ("Logged into testserver:6443") + + +def test_config_check_no_config_file(mocker): + mocker.patch("os.path.expanduser", return_value="/mock/home/directory") + mocker.patch("os.path.isfile", return_value=False) + mocker.patch("codeflare_sdk.common.kubernetes_cluster.auth.config_path", None) + mocker.patch("codeflare_sdk.common.kubernetes_cluster.auth.api_client", None) + + with pytest.raises(PermissionError): + config_check() + + +def test_config_check_with_incluster_config(mocker): + mocker.patch("os.path.expanduser", return_value="/mock/home/directory") + mocker.patch("os.path.isfile", return_value=False) + mocker.patch.dict(os.environ, {"KUBERNETES_PORT": "number"}) + mocker.patch("kubernetes.config.load_incluster_config", side_effect=None) + mocker.patch("codeflare_sdk.common.kubernetes_cluster.auth.config_path", None) + mocker.patch("codeflare_sdk.common.kubernetes_cluster.auth.api_client", None) + + result = config_check() + assert result == None + + +def test_config_check_with_existing_config_file(mocker): + mocker.patch("os.path.expanduser", return_value="/mock/home/directory") + mocker.patch("os.path.isfile", return_value=True) + mocker.patch("kubernetes.config.load_kube_config", side_effect=None) + mocker.patch("codeflare_sdk.common.kubernetes_cluster.auth.config_path", None) + mocker.patch("codeflare_sdk.common.kubernetes_cluster.auth.api_client", None) + + result = config_check() + assert result == None + + +def test_config_check_with_config_path_and_no_api_client(mocker): + mocker.patch( + "codeflare_sdk.common.kubernetes_cluster.auth.config_path", "/mock/config/path" + ) + mocker.patch("codeflare_sdk.common.kubernetes_cluster.auth.api_client", None) + result = config_check() + assert result == "/mock/config/path" + + +def test_load_kube_config(mocker): + mocker.patch.object(config, "load_kube_config") + kube_config_auth = KubeConfigFileAuthentication( + kube_config_path="/path/to/your/config" + ) + response = kube_config_auth.load_kube_config() + + assert ( + response + == "Loaded user config file at path %s" % kube_config_auth.kube_config_path + ) + + kube_config_auth = KubeConfigFileAuthentication(kube_config_path=None) + response = kube_config_auth.load_kube_config() + assert response == "Please specify a config file path" + + +def test_auth_coverage(): + abstract = Authentication() + abstract.login() + abstract.logout() diff --git a/src/codeflare_sdk/common/kueue/__init__.py b/src/codeflare_sdk/common/kueue/__init__.py new file mode 100644 index 00000000..c9c641c1 --- /dev/null +++ b/src/codeflare_sdk/common/kueue/__init__.py @@ -0,0 +1,6 @@ +from .kueue import ( + get_default_kueue_name, + local_queue_exists, + add_queue_label, + list_local_queues, +) diff --git a/src/codeflare_sdk/common/kueue/kueue.py b/src/codeflare_sdk/common/kueue/kueue.py new file mode 100644 index 00000000..00f3364a --- /dev/null +++ b/src/codeflare_sdk/common/kueue/kueue.py @@ -0,0 +1,174 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, List +from codeflare_sdk.common import _kube_api_error_handling +from codeflare_sdk.common.kubernetes_cluster.auth import config_check, get_api_client +from kubernetes import client +from kubernetes.client.exceptions import ApiException + + +def get_default_kueue_name(namespace: str) -> Optional[str]: + """ + Retrieves the default Kueue name from the provided namespace. + + This function attempts to fetch the local queues in the given namespace and checks if any of them is annotated + as the default queue. If found, the name of the default queue is returned. + + The default queue is marked with the annotation "kueue.x-k8s.io/default-queue" set to "true." + + Args: + namespace (str): + The Kubernetes namespace where the local queues are located. + + Returns: + Optional[str]: + The name of the default queue if it exists, otherwise None. + """ + try: + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + local_queues = api_instance.list_namespaced_custom_object( + group="kueue.x-k8s.io", + version="v1beta1", + namespace=namespace, + plural="localqueues", + ) + except ApiException as e: # pragma: no cover + if e.status == 404 or e.status == 403: + return + else: + return _kube_api_error_handling(e) + for lq in local_queues["items"]: + if ( + "annotations" in lq["metadata"] + and "kueue.x-k8s.io/default-queue" in lq["metadata"]["annotations"] + and lq["metadata"]["annotations"]["kueue.x-k8s.io/default-queue"].lower() + == "true" + ): + return lq["metadata"]["name"] + + +def list_local_queues( + namespace: Optional[str] = None, flavors: Optional[List[str]] = None +) -> List[dict]: + """ + This function lists all local queues in the namespace provided. + + If no namespace is provided, it will use the current namespace. If flavors is provided, it will only return local + queues that support all the flavors provided. + + Note: + Depending on the version of the local queue API, the available flavors may not be present in the response. + + Args: + namespace (str, optional): + The namespace to list local queues from. Defaults to None. + flavors (List[str], optional): + The flavors to filter local queues by. Defaults to None. + Returns: + List[dict]: + A list of dictionaries containing the name of the local queue and the available flavors + """ + from ...ray.cluster.cluster import get_current_namespace + + if namespace is None: # pragma: no cover + namespace = get_current_namespace() + try: + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + local_queues = api_instance.list_namespaced_custom_object( + group="kueue.x-k8s.io", + version="v1beta1", + namespace=namespace, + plural="localqueues", + ) + except ApiException as e: # pragma: no cover + return _kube_api_error_handling(e) + to_return = [] + for lq in local_queues["items"]: + item = {"name": lq["metadata"]["name"]} + if "flavors" in lq["status"]: + item["flavors"] = [f["name"] for f in lq["status"]["flavors"]] + if flavors is not None and not set(flavors).issubset(set(item["flavors"])): + continue + elif flavors is not None: + continue # NOTE: may be indicative old local queue API and might be worth while raising or warning here + to_return.append(item) + return to_return + + +def local_queue_exists(namespace: str, local_queue_name: str) -> bool: + """ + Checks if a local queue with the provided name exists in the given namespace. + + This function queries the local queues in the specified namespace and verifies if any queue matches the given name. + + Args: + namespace (str): + The namespace where the local queues are located. + local_queue_name (str): + The name of the local queue to check for existence. + + Returns: + bool: + True if the local queue exists, False otherwise. + """ + try: + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + local_queues = api_instance.list_namespaced_custom_object( + group="kueue.x-k8s.io", + version="v1beta1", + namespace=namespace, + plural="localqueues", + ) + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + # check if local queue with the name provided in cluster config exists + for lq in local_queues["items"]: + if lq["metadata"]["name"] == local_queue_name: + return True + return False + + +def add_queue_label(item: dict, namespace: str, local_queue: Optional[str]): + """ + Adds a local queue name label to the provided item. + + If the local queue is not provided, the default local queue for the namespace is used. The function validates if the + local queue exists, and if it does, the local queue name label is added to the resource metadata. + + Args: + item (dict): + The resource where the label will be added. + namespace (str): + The namespace of the local queue. + local_queue (str, optional): + The name of the local queue to use. Defaults to None. + + Raises: + ValueError: + If the provided or default local queue does not exist in the namespace. + """ + lq_name = local_queue or get_default_kueue_name(namespace) + if lq_name == None: + return + elif not local_queue_exists(namespace, lq_name): + raise ValueError( + "local_queue provided does not exist or is not in this namespace. Please provide the correct local_queue name in Cluster Configuration" + ) + if not "labels" in item["metadata"]: + item["metadata"]["labels"] = {} + item["metadata"]["labels"].update({"kueue.x-k8s.io/queue-name": lq_name}) diff --git a/src/codeflare_sdk/common/kueue/test_kueue.py b/src/codeflare_sdk/common/kueue/test_kueue.py new file mode 100644 index 00000000..bbc54e9e --- /dev/null +++ b/src/codeflare_sdk/common/kueue/test_kueue.py @@ -0,0 +1,298 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ..utils.unit_test_support import ( + get_local_queue, + create_cluster_config, + get_template_variables, + apply_template, +) +from unittest.mock import patch +from codeflare_sdk.ray.cluster.cluster import Cluster, ClusterConfiguration +import yaml +import os +import filecmp +from pathlib import Path +from .kueue import list_local_queues, local_queue_exists, add_queue_label + +parent = Path(__file__).resolve().parents[4] # project directory +aw_dir = os.path.expanduser("~/.codeflare/resources/") + + +def test_none_local_queue(mocker): + mocker.patch("kubernetes.client.CustomObjectsApi.list_namespaced_custom_object") + config = ClusterConfiguration(name="unit-test-aw-kueue", namespace="ns") + config.name = "unit-test-aw-kueue" + config.local_queue = None + + cluster = Cluster(config) + assert cluster.config.local_queue == None + + +def test_cluster_creation_no_aw_local_queue(mocker): + # With written resources + # Create Ray Cluster with local queue specified + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch( + "kubernetes.client.CustomObjectsApi.get_cluster_custom_object", + return_value={"spec": {"domain": "apps.cluster.awsroute.org"}}, + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + config = create_cluster_config() + config.name = "unit-test-cluster-kueue" + config.write_to_file = True + config.local_queue = "local-queue-default" + cluster = Cluster(config) + assert cluster.resource_yaml == f"{aw_dir}unit-test-cluster-kueue.yaml" + expected_rc = apply_template( + f"{parent}/tests/test_cluster_yamls/kueue/ray_cluster_kueue.yaml", + get_template_variables(), + ) + + with open(f"{aw_dir}unit-test-cluster-kueue.yaml", "r") as f: + cluster_kueue = yaml.load(f, Loader=yaml.FullLoader) + assert cluster_kueue == expected_rc + + # With resources loaded in memory, no Local Queue specified. + config = create_cluster_config() + config.name = "unit-test-cluster-kueue" + config.write_to_file = False + cluster = Cluster(config) + assert cluster.resource_yaml == expected_rc + + +def test_aw_creation_local_queue(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch( + "kubernetes.client.CustomObjectsApi.get_cluster_custom_object", + return_value={"spec": {"domain": "apps.cluster.awsroute.org"}}, + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + config = create_cluster_config() + config.name = "unit-test-aw-kueue" + config.appwrapper = True + config.write_to_file = True + config.local_queue = "local-queue-default" + cluster = Cluster(config) + assert cluster.resource_yaml == f"{aw_dir}unit-test-aw-kueue.yaml" + expected_rc = apply_template( + f"{parent}/tests/test_cluster_yamls/kueue/aw_kueue.yaml", + get_template_variables(), + ) + + with open(f"{aw_dir}unit-test-aw-kueue.yaml", "r") as f: + aw_kueue = yaml.load(f, Loader=yaml.FullLoader) + assert aw_kueue == expected_rc + + # With resources loaded in memory, no Local Queue specified. + config = create_cluster_config() + config.name = "unit-test-aw-kueue" + config.appwrapper = True + config.write_to_file = False + cluster = Cluster(config) + + assert cluster.resource_yaml == expected_rc + + +def test_get_local_queue_exists_fail(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch( + "kubernetes.client.CustomObjectsApi.get_cluster_custom_object", + return_value={"spec": {"domain": "apps.cluster.awsroute.org"}}, + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + config = create_cluster_config() + config.name = "unit-test-aw-kueue" + config.appwrapper = True + config.write_to_file = True + config.local_queue = "local_queue_doesn't_exist" + try: + Cluster(config) + except ValueError as e: + assert ( + str(e) + == "local_queue provided does not exist or is not in this namespace. Please provide the correct local_queue name in Cluster Configuration" + ) + + +def test_list_local_queues(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value={ + "items": [ + { + "metadata": {"name": "lq1"}, + "status": {"flavors": [{"name": "default"}]}, + }, + { + "metadata": {"name": "lq2"}, + "status": { + "flavors": [{"name": "otherflavor"}, {"name": "default"}] + }, + }, + ] + }, + ) + lqs = list_local_queues("ns") + assert lqs == [ + {"name": "lq1", "flavors": ["default"]}, + {"name": "lq2", "flavors": ["otherflavor", "default"]}, + ] + lqs = list_local_queues("ns", flavors=["otherflavor"]) + assert lqs == [{"name": "lq2", "flavors": ["otherflavor", "default"]}] + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value={ + "items": [ + { + "metadata": {"name": "lq1"}, + "status": {}, + }, + ] + }, + ) + lqs = list_local_queues("ns", flavors=["default"]) + assert lqs == [] + + +def test_local_queue_exists_found(mocker): + # Mock Kubernetes client and list_namespaced_custom_object method + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mock_api_instance = mocker.Mock() + mocker.patch("kubernetes.client.CustomObjectsApi", return_value=mock_api_instance) + mocker.patch("codeflare_sdk.ray.cluster.cluster.config_check") + + # Mock return value for list_namespaced_custom_object + mock_api_instance.list_namespaced_custom_object.return_value = { + "items": [ + {"metadata": {"name": "existing-queue"}}, + {"metadata": {"name": "another-queue"}}, + ] + } + + # Call the function + namespace = "test-namespace" + local_queue_name = "existing-queue" + result = local_queue_exists(namespace, local_queue_name) + + # Assertions + assert result is True + mock_api_instance.list_namespaced_custom_object.assert_called_once_with( + group="kueue.x-k8s.io", + version="v1beta1", + namespace=namespace, + plural="localqueues", + ) + + +def test_local_queue_exists_not_found(mocker): + # Mock Kubernetes client and list_namespaced_custom_object method + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mock_api_instance = mocker.Mock() + mocker.patch("kubernetes.client.CustomObjectsApi", return_value=mock_api_instance) + mocker.patch("codeflare_sdk.ray.cluster.cluster.config_check") + + # Mock return value for list_namespaced_custom_object + mock_api_instance.list_namespaced_custom_object.return_value = { + "items": [ + {"metadata": {"name": "another-queue"}}, + {"metadata": {"name": "different-queue"}}, + ] + } + + # Call the function + namespace = "test-namespace" + local_queue_name = "non-existent-queue" + result = local_queue_exists(namespace, local_queue_name) + + # Assertions + assert result is False + mock_api_instance.list_namespaced_custom_object.assert_called_once_with( + group="kueue.x-k8s.io", + version="v1beta1", + namespace=namespace, + plural="localqueues", + ) + + +import pytest +from unittest import mock # If you're also using mocker from pytest-mock + + +def test_add_queue_label_with_valid_local_queue(mocker): + # Mock the kubernetes.client.CustomObjectsApi and its response + mock_api_instance = mocker.patch("kubernetes.client.CustomObjectsApi") + mock_api_instance.return_value.list_namespaced_custom_object.return_value = { + "items": [ + {"metadata": {"name": "valid-queue"}}, + ] + } + + # Mock other dependencies + mocker.patch("codeflare_sdk.common.kueue.local_queue_exists", return_value=True) + mocker.patch( + "codeflare_sdk.common.kueue.get_default_kueue_name", + return_value="default-queue", + ) + + # Define input item and parameters + item = {"metadata": {}} + namespace = "test-namespace" + local_queue = "valid-queue" + + # Call the function + add_queue_label(item, namespace, local_queue) + + # Assert that the label is added to the item + assert item["metadata"]["labels"] == {"kueue.x-k8s.io/queue-name": "valid-queue"} + + +def test_add_queue_label_with_invalid_local_queue(mocker): + # Mock the kubernetes.client.CustomObjectsApi and its response + mock_api_instance = mocker.patch("kubernetes.client.CustomObjectsApi") + mock_api_instance.return_value.list_namespaced_custom_object.return_value = { + "items": [ + {"metadata": {"name": "valid-queue"}}, + ] + } + + # Mock the local_queue_exists function to return False + mocker.patch("codeflare_sdk.common.kueue.local_queue_exists", return_value=False) + + # Define input item and parameters + item = {"metadata": {}} + namespace = "test-namespace" + local_queue = "invalid-queue" + + # Call the function and expect a ValueError + with pytest.raises( + ValueError, + match="local_queue provided does not exist or is not in this namespace", + ): + add_queue_label(item, namespace, local_queue) + + +# Make sure to always keep this function last +def test_cleanup(): + os.remove(f"{aw_dir}unit-test-cluster-kueue.yaml") + os.remove(f"{aw_dir}unit-test-aw-kueue.yaml") diff --git a/src/codeflare_sdk/cluster/__init__.py b/src/codeflare_sdk/common/utils/__init__.py similarity index 100% rename from src/codeflare_sdk/cluster/__init__.py rename to src/codeflare_sdk/common/utils/__init__.py diff --git a/src/codeflare_sdk/common/utils/constants.py b/src/codeflare_sdk/common/utils/constants.py new file mode 100644 index 00000000..fcd064d6 --- /dev/null +++ b/src/codeflare_sdk/common/utils/constants.py @@ -0,0 +1,14 @@ +RAY_VERSION = "2.47.1" +""" +The below are used to define the default runtime image for the Ray Cluster. +* For python 3.11:ray:2.47.1-py311-cu121 +* For python 3.12:ray:2.47.1-py312-cu128 +""" +CUDA_PY311_RUNTIME_IMAGE = "quay.io/modh/ray@sha256:6d076aeb38ab3c34a6a2ef0f58dc667089aa15826fa08a73273c629333e12f1e" +CUDA_PY312_RUNTIME_IMAGE = "quay.io/modh/ray@sha256:fb6f207de63e442c67bb48955cf0584f3704281faf17b90419cfa274fdec63c5" + +# Centralized image selection +SUPPORTED_PYTHON_VERSIONS = { + "3.11": CUDA_PY311_RUNTIME_IMAGE, + "3.12": CUDA_PY312_RUNTIME_IMAGE, +} diff --git a/src/codeflare_sdk/common/utils/demos.py b/src/codeflare_sdk/common/utils/demos.py new file mode 100644 index 00000000..5c546fe9 --- /dev/null +++ b/src/codeflare_sdk/common/utils/demos.py @@ -0,0 +1,31 @@ +import pathlib +import shutil + +package_dir = pathlib.Path(__file__).parent.parent.parent.resolve() +demo_dir = f"{package_dir}/demo-notebooks" + + +def copy_demo_nbs(dir: str = "./demo-notebooks", overwrite: bool = False): + """ + Copy the demo notebooks from the package to the current working directory + + overwrite=True will overwrite any files that exactly match files written by copy_demo_nbs in the target directory. + Any files that exist in the directory that don't match these values will remain untouched. + + Args: + dir (str): + The directory to copy the demo notebooks to. Defaults to "./demo-notebooks". + overwrite (bool): + Whether to overwrite files in the directory if it already exists. Defaults to False. + + Raises: + FileExistsError: + If the directory already exists. + """ + # does dir exist already? + if overwrite is False and pathlib.Path(dir).exists(): + raise FileExistsError( + f"Directory {dir} already exists. Please remove it or provide a different location." + ) + + shutil.copytree(demo_dir, dir, dirs_exist_ok=True) diff --git a/src/codeflare_sdk/utils/generate_cert.py b/src/codeflare_sdk/common/utils/generate_cert.py similarity index 60% rename from src/codeflare_sdk/utils/generate_cert.py rename to src/codeflare_sdk/common/utils/generate_cert.py index 04b04d3e..7c072da0 100644 --- a/src/codeflare_sdk/utils/generate_cert.py +++ b/src/codeflare_sdk/common/utils/generate_cert.py @@ -19,15 +19,29 @@ from cryptography import x509 from cryptography.x509.oid import NameOID import datetime -from ..cluster.auth import config_check, api_config_handler -from kubernetes import client, config +from ..kubernetes_cluster.auth import ( + config_check, + get_api_client, +) +from kubernetes import client +from .. import _kube_api_error_handling def generate_ca_cert(days: int = 30): - # Generate base64 encoded ca.key and ca.cert - # Similar to: - # openssl req -x509 -nodes -newkey rsa:2048 -keyout ca.key -days 1826 -out ca.crt -subj '/CN=root-ca' - # base64 -i ca.crt -i ca.key + """ + Generates a self-signed CA certificate and private key, encoded in base64 format. + + Similar to: + openssl req -x509 -nodes -newkey rsa:2048 -keyout ca.key -days 1826 -out ca.crt -subj '/CN=root-ca' + + Args: + days (int): + The number of days for which the CA certificate will be valid. Default is 30. + + Returns: + Tuple[str, str]: + A tuple containing the base64-encoded private key and CA certificate. + """ private_key = rsa.generate_private_key( public_exponent=65537, @@ -74,8 +88,64 @@ def generate_ca_cert(days: int = 30): return key, certificate +def get_secret_name(cluster_name, namespace, api_instance): + """ + Retrieves the name of the Kubernetes secret containing the CA certificate for the given Ray cluster. + + Args: + cluster_name (str): + The name of the Ray cluster. + namespace (str): + The Kubernetes namespace where the Ray cluster is located. + api_instance (client.CoreV1Api): + An instance of the Kubernetes CoreV1Api. + + Returns: + str: + The name of the Kubernetes secret containing the CA certificate. + + Raises: + KeyError: + If no secret matching the cluster name is found. + """ + label_selector = f"ray.openshift.ai/cluster-name={cluster_name}" + try: + secrets = api_instance.list_namespaced_secret( + namespace, label_selector=label_selector + ) + for secret in secrets.items: + if ( + f"{cluster_name}-ca-secret-" in secret.metadata.name + ): # Oauth secret share the same label this conditional is to make things more specific + return secret.metadata.name + else: + continue + raise KeyError(f"Unable to gather secret name for {cluster_name}") + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + + def generate_tls_cert(cluster_name, namespace, days=30): - # Create a folder tls-- and store three files: ca.crt, tls.crt, and tls.key + """ + Generates a TLS certificate and key for a Ray cluster, saving them locally along with the CA certificate. + + Args: + cluster_name (str): + The name of the Ray cluster. + namespace (str): + The Kubernetes namespace where the Ray cluster is located. + days (int): + The number of days for which the TLS certificate will be valid. Default is 30. + + Files Created: + - ca.crt: The CA certificate. + - tls.crt: The TLS certificate signed by the CA. + - tls.key: The private key for the TLS certificate. + + Raises: + Exception: + If an error occurs while retrieving the CA secret. + """ tls_dir = os.path.join(os.getcwd(), f"tls-{cluster_name}-{namespace}") if not os.path.exists(tls_dir): os.makedirs(tls_dir) @@ -84,8 +154,12 @@ def generate_tls_cert(cluster_name, namespace, days=30): # oc get secret ca-secret- -o template='{{index .data "ca.key"}}' # oc get secret ca-secret- -o template='{{index .data "ca.crt"}}'|base64 -d > ${TLSDIR}/ca.crt config_check() - v1 = client.CoreV1Api(api_config_handler()) - secret = v1.read_namespaced_secret(f"ca-secret-{cluster_name}", namespace).data + v1 = client.CoreV1Api(get_api_client()) + + # Secrets have a suffix appended to the end so we must list them and gather the secret that includes cluster_name-ca-secret- + secret_name = get_secret_name(cluster_name, namespace, v1) + secret = v1.read_namespaced_secret(secret_name, namespace).data + ca_cert = secret.get("ca.crt") ca_key = secret.get("ca.key") @@ -155,6 +229,21 @@ def generate_tls_cert(cluster_name, namespace, days=30): def export_env(cluster_name, namespace): + """ + Sets environment variables to configure TLS for a Ray cluster. + + Args: + cluster_name (str): + The name of the Ray cluster. + namespace (str): + The Kubernetes namespace where the Ray cluster is located. + + Environment Variables Set: + - RAY_USE_TLS: Enables TLS for Ray. + - RAY_TLS_SERVER_CERT: Path to the TLS server certificate. + - RAY_TLS_SERVER_KEY: Path to the TLS server private key. + - RAY_TLS_CA_CERT: Path to the CA certificate. + """ tls_dir = os.path.join(os.getcwd(), f"tls-{cluster_name}-{namespace}") os.environ["RAY_USE_TLS"] = "1" os.environ["RAY_TLS_SERVER_CERT"] = os.path.join(tls_dir, "tls.crt") diff --git a/src/codeflare_sdk/common/utils/test_generate_cert.py b/src/codeflare_sdk/common/utils/test_generate_cert.py new file mode 100644 index 00000000..b4439c20 --- /dev/null +++ b/src/codeflare_sdk/common/utils/test_generate_cert.py @@ -0,0 +1,114 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 + +from cryptography.hazmat.primitives.serialization import ( + Encoding, + PublicFormat, + load_pem_private_key, +) +from cryptography.x509 import load_pem_x509_certificate +import os +from codeflare_sdk.common.utils.generate_cert import ( + export_env, + generate_ca_cert, + generate_tls_cert, +) +from kubernetes import client + + +def test_generate_ca_cert(): + """ + test the function codeflare_sdk.common.utils.generate_ca_cert generates the correct outputs + """ + key, certificate = generate_ca_cert() + cert = load_pem_x509_certificate(base64.b64decode(certificate)) + private_pub_key_bytes = ( + load_pem_private_key(base64.b64decode(key), password=None) + .public_key() + .public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo) + ) + cert_pub_key_bytes = cert.public_key().public_bytes( + Encoding.PEM, PublicFormat.SubjectPublicKeyInfo + ) + assert type(key) == str + assert type(certificate) == str + # Veirfy ca.cert is self signed + assert cert.verify_directly_issued_by(cert) == None + # Verify cert has the public key bytes from the private key + assert cert_pub_key_bytes == private_pub_key_bytes + + +def secret_ca_retreival(secret_name, namespace): + ca_private_key_bytes, ca_cert = generate_ca_cert() + data = {"ca.crt": ca_cert, "ca.key": ca_private_key_bytes} + assert secret_name == "ca-secret-cluster" + assert namespace == "namespace" + return client.models.V1Secret(data=data) + + +def test_generate_tls_cert(mocker): + """ + test the function codeflare_sdk.common.utils.generate_ca_cert generates the correct outputs + """ + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "codeflare_sdk.common.utils.generate_cert.get_secret_name", + return_value="ca-secret-cluster", + ) + mocker.patch( + "kubernetes.client.CoreV1Api.read_namespaced_secret", + side_effect=secret_ca_retreival, + ) + + generate_tls_cert("cluster", "namespace") + assert os.path.exists("tls-cluster-namespace") + assert os.path.exists(os.path.join("tls-cluster-namespace", "ca.crt")) + assert os.path.exists(os.path.join("tls-cluster-namespace", "tls.crt")) + assert os.path.exists(os.path.join("tls-cluster-namespace", "tls.key")) + + # verify the that the signed tls.crt is issued by the ca_cert (root cert) + with open(os.path.join("tls-cluster-namespace", "tls.crt"), "r") as f: + tls_cert = load_pem_x509_certificate(f.read().encode("utf-8")) + with open(os.path.join("tls-cluster-namespace", "ca.crt"), "r") as f: + root_cert = load_pem_x509_certificate(f.read().encode("utf-8")) + assert tls_cert.verify_directly_issued_by(root_cert) == None + + +def test_export_env(): + """ + test the function codeflare_sdk.common.utils.generate_ca_cert.export_ev generates the correct outputs + """ + tls_dir = "cluster" + ns = "namespace" + export_env(tls_dir, ns) + assert os.environ["RAY_USE_TLS"] == "1" + assert os.environ["RAY_TLS_SERVER_CERT"] == os.path.join( + os.getcwd(), f"tls-{tls_dir}-{ns}", "tls.crt" + ) + assert os.environ["RAY_TLS_SERVER_KEY"] == os.path.join( + os.getcwd(), f"tls-{tls_dir}-{ns}", "tls.key" + ) + assert os.environ["RAY_TLS_CA_CERT"] == os.path.join( + os.getcwd(), f"tls-{tls_dir}-{ns}", "ca.crt" + ) + + +# Make sure to always keep this function last +def test_cleanup(): + os.remove("tls-cluster-namespace/ca.crt") + os.remove("tls-cluster-namespace/tls.crt") + os.remove("tls-cluster-namespace/tls.key") + os.rmdir("tls-cluster-namespace") diff --git a/src/codeflare_sdk/common/utils/unit_test_support.py b/src/codeflare_sdk/common/utils/unit_test_support.py new file mode 100644 index 00000000..653e818c --- /dev/null +++ b/src/codeflare_sdk/common/utils/unit_test_support.py @@ -0,0 +1,566 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import string +import sys +from codeflare_sdk.common.utils import constants +from codeflare_sdk.common.utils.utils import get_ray_image_for_python_version +from codeflare_sdk.ray.cluster.cluster import ( + Cluster, + ClusterConfiguration, +) +import os +import yaml +from pathlib import Path +from kubernetes import client +from kubernetes.client import V1Toleration +from unittest.mock import patch + +parent = Path(__file__).resolve().parents[4] # project directory +aw_dir = os.path.expanduser("~/.codeflare/resources/") + + +def create_cluster_config(num_workers=2, write_to_file=False): + config = ClusterConfiguration( + name="unit-test-cluster", + namespace="ns", + num_workers=num_workers, + worker_cpu_requests=3, + worker_cpu_limits=4, + worker_memory_requests=5, + worker_memory_limits=6, + appwrapper=True, + write_to_file=write_to_file, + ) + return config + + +def create_cluster(mocker, num_workers=2, write_to_file=False): + cluster = Cluster(create_cluster_config(num_workers, write_to_file)) + return cluster + + +def patch_cluster_with_dynamic_client(mocker, cluster, dynamic_client=None): + mocker.patch.object(cluster, "get_dynamic_client", return_value=dynamic_client) + mocker.patch.object(cluster, "down", return_value=None) + mocker.patch.object(cluster, "config_check", return_value=None) + # mocker.patch.object(cluster, "_throw_for_no_raycluster", return_value=None) + + +def create_cluster_wrong_type(): + config = ClusterConfiguration( + name="unit-test-cluster", + namespace="ns", + num_workers=True, + worker_cpu_requests=[], + worker_cpu_limits=4, + worker_memory_requests=5, + worker_memory_limits=6, + worker_extended_resource_requests={"nvidia.com/gpu": 7}, + appwrapper=True, + image_pull_secrets=["unit-test-pull-secret"], + image=constants.CUDA_PY312_RUNTIME_IMAGE, + write_to_file=True, + labels={1: 1}, + ) + return config + + +def get_package_and_version(package_name, requirements_file_path): + with open(requirements_file_path, "r") as file: + for line in file: + if line.strip().startswith(f"{package_name}=="): + return line.strip() + return None + + +def get_local_queue(group, version, namespace, plural): + assert group == "kueue.x-k8s.io" + assert version == "v1beta1" + assert namespace == "ns" + assert plural == "localqueues" + local_queues = { + "apiVersion": "kueue.x-k8s.io/v1beta1", + "items": [ + { + "apiVersion": "kueue.x-k8s.io/v1beta1", + "kind": "LocalQueue", + "metadata": { + "annotations": {"kueue.x-k8s.io/default-queue": "true"}, + "name": "local-queue-default", + "namespace": "ns", + }, + "spec": {"clusterQueue": "cluster-queue"}, + }, + { + "apiVersion": "kueue.x-k8s.io/v1beta1", + "kind": "LocalQueue", + "metadata": { + "name": "team-a-queue", + "namespace": "ns", + }, + "spec": {"clusterQueue": "team-a-queue"}, + }, + ], + "kind": "LocalQueueList", + "metadata": {"continue": "", "resourceVersion": "2266811"}, + } + return local_queues + + +def arg_check_aw_apply_effect(group, version, namespace, plural, body, *args): + assert group == "workload.codeflare.dev" + assert version == "v1beta2" + assert namespace == "ns" + assert plural == "appwrappers" + with open(f"{aw_dir}test.yaml") as f: + aw = yaml.load(f, Loader=yaml.FullLoader) + assert body == aw + assert args == tuple() + + +def arg_check_aw_del_effect(group, version, namespace, plural, name, *args): + assert group == "workload.codeflare.dev" + assert version == "v1beta2" + assert namespace == "ns" + assert plural == "appwrappers" + assert name == "test" + assert args == tuple() + + +def get_cluster_object(file_a, file_b): + with open(file_a) as f: + cluster_a = yaml.load(f, Loader=yaml.FullLoader) + with open(file_b) as f: + cluster_b = yaml.load(f, Loader=yaml.FullLoader) + + return cluster_a, cluster_b + + +def get_ray_obj(group, version, namespace, plural): + # To be used for mocking list_namespaced_custom_object for Ray Clusters + rc_a = apply_template( + f"{parent}/tests/test_cluster_yamls/support_clusters/test-rc-a.yaml", + get_template_variables(), + ) + rc_b = apply_template( + f"{parent}/tests/test_cluster_yamls/support_clusters/test-rc-b.yaml", + get_template_variables(), + ) + + rc_list = {"items": [rc_a, rc_b]} + return rc_list + + +def get_ray_obj_with_status(group, version, namespace, plural): + # To be used for mocking list_namespaced_custom_object for Ray Clusters with statuses + rc_a = apply_template( + f"{parent}/tests/test_cluster_yamls/support_clusters/test-rc-a.yaml", + get_template_variables(), + ) + rc_b = apply_template( + f"{parent}/tests/test_cluster_yamls/support_clusters/test-rc-b.yaml", + get_template_variables(), + ) + + rc_a.update( + { + "status": { + "desiredWorkerReplicas": 1, + "endpoints": { + "client": "10001", + "dashboard": "8265", + "gcs": "6379", + "metrics": "8080", + }, + "head": {"serviceIP": "172.30.179.88"}, + "lastUpdateTime": "2024-03-05T09:55:37Z", + "maxWorkerReplicas": 1, + "minWorkerReplicas": 1, + "observedGeneration": 1, + "state": "ready", + }, + } + ) + rc_b.update( + { + "status": { + "availableWorkerReplicas": 2, + "desiredWorkerReplicas": 1, + "endpoints": { + "client": "10001", + "dashboard": "8265", + "gcs": "6379", + }, + "lastUpdateTime": "2023-02-22T16:26:16Z", + "maxWorkerReplicas": 1, + "minWorkerReplicas": 1, + "state": "suspended", + } + } + ) + + rc_list = {"items": [rc_a, rc_b]} + return rc_list + + +def get_aw_obj(group, version, namespace, plural): + # To be used for mocking list_namespaced_custom_object for AppWrappers + aw_a = apply_template( + f"{parent}/tests/test_cluster_yamls/support_clusters/test-aw-a.yaml", + get_template_variables(), + ) + aw_b = apply_template( + f"{parent}/tests/test_cluster_yamls/support_clusters/test-aw-b.yaml", + get_template_variables(), + ) + + aw_list = {"items": [aw_a, aw_b]} + return aw_list + + +def get_aw_obj_with_status(group, version, namespace, plural): + # To be used for mocking list_namespaced_custom_object for AppWrappers with statuses + aw_a = apply_template( + f"{parent}/tests/test_cluster_yamls/support_clusters/test-aw-a.yaml", + get_template_variables(), + ) + aw_b = apply_template( + f"{parent}/tests/test_cluster_yamls/support_clusters/test-aw-b.yaml", + get_template_variables(), + ) + + aw_a.update( + { + "status": { + "phase": "Running", + }, + } + ) + aw_b.update( + { + "status": { + "phase": "Suspended", + }, + } + ) + + aw_list = {"items": [aw_a, aw_b]} + return aw_list + + +def get_named_aw(group, version, namespace, plural, name): + aws = get_aw_obj("workload.codeflare.dev", "v1beta2", "ns", "appwrappers") + return aws["items"][0] + + +def arg_check_del_effect(group, version, namespace, plural, name, *args): + assert namespace == "ns" + assert args == tuple() + if plural == "appwrappers": + assert group == "workload.codeflare.dev" + assert version == "v1beta2" + assert name == "unit-test-cluster" + elif plural == "rayclusters": + assert group == "ray.io" + assert version == "v1" + assert name == "unit-test-cluster-ray" + elif plural == "ingresses": + assert group == "networking.k8s.io" + assert version == "v1" + assert name == "ray-dashboard-unit-test-cluster-ray" + + +def apply_template(yaml_file_path, variables): + with open(yaml_file_path, "r") as file: + yaml_content = file.read() + + # Create a Template instance and substitute the variables + template = string.Template(yaml_content) + filled_yaml = template.substitute(variables) + + # Now load the filled YAML into a Python object + return yaml.load(filled_yaml, Loader=yaml.FullLoader) + + +def get_expected_image(): + # Use centralized image selection logic (fallback to 3.12 for test consistency) + return get_ray_image_for_python_version(warn_on_unsupported=True) + + +def get_template_variables(): + return { + "image": get_expected_image(), + } + + +def arg_check_apply_effect(group, version, namespace, plural, body, *args): + assert namespace == "ns" + assert args == tuple() + if plural == "appwrappers": + assert group == "workload.codeflare.dev" + assert version == "v1beta2" + elif plural == "rayclusters": + assert group == "ray.io" + assert version == "v1" + elif plural == "ingresses": + assert group == "networking.k8s.io" + assert version == "v1" + elif plural == "routes": + assert group == "route.openshift.io" + assert version == "v1" + else: + assert 1 == 0 + + +def get_obj_none(group, version, namespace, plural): + return {"items": []} + + +def route_list_retrieval(group, version, namespace, plural): + assert group == "route.openshift.io" + assert version == "v1" + assert namespace == "ns" + assert plural == "routes" + return { + "kind": "RouteList", + "apiVersion": "route.openshift.io/v1", + "metadata": {"resourceVersion": "6072398"}, + "items": [ + { + "metadata": { + "name": "ray-dashboard-quicktest", + "namespace": "ns", + }, + "spec": { + "host": "ray-dashboard-quicktest-opendatahub.apps.cluster.awsroute.org", + "to": { + "kind": "Service", + "name": "quicktest-head-svc", + "weight": 100, + }, + "port": {"targetPort": "dashboard"}, + "tls": {"termination": "edge"}, + }, + }, + { + "metadata": { + "name": "rayclient-quicktest", + "namespace": "ns", + }, + "spec": { + "host": "rayclient-quicktest-opendatahub.apps.cluster.awsroute.org", + "to": { + "kind": "Service", + "name": "quicktest-head-svc", + "weight": 100, + }, + "port": {"targetPort": "client"}, + "tls": {"termination": "passthrough"}, + }, + }, + ], + } + + +def ingress_retrieval( + cluster_name="unit-test-cluster", client_ing: bool = False, annotations: dict = None +): + dashboard_ingress = mocked_ingress(8265, cluster_name, annotations) + if client_ing: + client_ingress = mocked_ingress( + 10001, cluster_name=cluster_name, annotations=annotations + ) + mock_ingress_list = client.V1IngressList( + items=[client_ingress, dashboard_ingress] + ) + else: + mock_ingress_list = client.V1IngressList(items=[dashboard_ingress]) + + return mock_ingress_list + + +def mocked_ingress(port, cluster_name="unit-test-cluster", annotations: dict = None): + labels = {"ingress-owner": cluster_name} + if port == 10001: + name = f"rayclient-{cluster_name}" + else: + name = f"ray-dashboard-{cluster_name}" + mock_ingress = client.V1Ingress( + metadata=client.V1ObjectMeta( + name=name, + annotations=annotations, + labels=labels, + owner_references=[ + client.V1OwnerReference( + api_version="v1", kind="Ingress", name=cluster_name, uid="unique-id" + ) + ], + ), + spec=client.V1IngressSpec( + rules=[ + client.V1IngressRule( + host=f"{name}-ns.apps.cluster.awsroute.org", + http=client.V1HTTPIngressRuleValue( + paths=[ + client.V1HTTPIngressPath( + path_type="Prefix", + path="/", + backend=client.V1IngressBackend( + service=client.V1IngressServiceBackend( + name="head-svc-test", + port=client.V1ServiceBackendPort(number=port), + ) + ), + ) + ] + ), + ) + ], + ), + ) + return mock_ingress + + +# Global dictionary to maintain state in the mock +cluster_state = {} + + +# The mock side_effect function for server_side_apply +def mock_server_side_apply(resource, body=None, name=None, namespace=None, **kwargs): + # Simulate the behavior of server_side_apply: + # Update a mock state that represents the cluster's current configuration. + # Stores the state in a global dictionary for simplicity. + + global cluster_state + + if not resource or not body or not name or not namespace: + raise ValueError("Missing required parameters for server_side_apply") + + # Extract worker count from the body if it exists + try: + worker_count = ( + body["spec"]["workerGroupSpecs"][0]["replicas"] + if "spec" in body and "workerGroupSpecs" in body["spec"] + else None + ) + except KeyError: + worker_count = None + + # Apply changes to the cluster_state mock + cluster_state[name] = { + "namespace": namespace, + "worker_count": worker_count, + "body": body, + } + + # Return a response that mimics the behavior of a successful apply + return { + "status": "success", + "applied": True, + "name": name, + "namespace": namespace, + "worker_count": worker_count, + } + + +@patch.dict("os.environ", {"NB_PREFIX": "test-prefix"}) +def create_cluster_all_config_params(mocker, cluster_name, is_appwrapper) -> Cluster: + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + volumes, volume_mounts = get_example_extended_storage_opts() + + config = ClusterConfiguration( + name=cluster_name, + namespace="ns", + head_cpu_requests=4, + head_cpu_limits=8, + head_memory_requests=12, + head_memory_limits=16, + head_extended_resource_requests={"nvidia.com/gpu": 1, "intel.com/gpu": 2}, + head_tolerations=[ + V1Toleration( + key="key1", operator="Equal", value="value1", effect="NoSchedule" + ) + ], + worker_cpu_requests=4, + worker_cpu_limits=8, + worker_tolerations=[ + V1Toleration( + key="key2", operator="Equal", value="value2", effect="NoSchedule" + ) + ], + num_workers=10, + worker_memory_requests=12, + worker_memory_limits=16, + appwrapper=is_appwrapper, + envs={"key1": "value1", "key2": "value2"}, + image="example/ray:tag", + image_pull_secrets=["secret1", "secret2"], + write_to_file=True, + verify_tls=True, + labels={"key1": "value1", "key2": "value2"}, + worker_extended_resource_requests={"nvidia.com/gpu": 1}, + extended_resource_mapping={"example.com/gpu": "GPU", "intel.com/gpu": "TPU"}, + overwrite_default_resource_mapping=True, + local_queue="local-queue-default", + annotations={ + "key1": "value1", + "key2": "value2", + }, + volumes=volumes, + volume_mounts=volume_mounts, + ) + return Cluster(config) + + +def get_example_extended_storage_opts(): + from kubernetes.client import ( + V1Volume, + V1VolumeMount, + V1EmptyDirVolumeSource, + V1ConfigMapVolumeSource, + V1KeyToPath, + V1SecretVolumeSource, + ) + + volume_mounts = [ + V1VolumeMount(mount_path="/home/ray/test1", name="test"), + V1VolumeMount( + mount_path="/home/ray/test2", + name="test2", + ), + V1VolumeMount( + mount_path="/home/ray/test2", + name="test3", + ), + ] + + volumes = [ + V1Volume( + name="test", + empty_dir=V1EmptyDirVolumeSource(size_limit="500Gi"), + ), + V1Volume( + name="test2", + config_map=V1ConfigMapVolumeSource( + name="config-map-test", + items=[V1KeyToPath(key="test", path="/home/ray/test2/data.txt")], + ), + ), + V1Volume(name="test3", secret=V1SecretVolumeSource(secret_name="test-secret")), + ] + return volumes, volume_mounts diff --git a/src/codeflare_sdk/common/utils/utils.py b/src/codeflare_sdk/common/utils/utils.py new file mode 100644 index 00000000..f876e924 --- /dev/null +++ b/src/codeflare_sdk/common/utils/utils.py @@ -0,0 +1,46 @@ +# Copyright 2025 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys + +from codeflare_sdk.common.utils.constants import ( + SUPPORTED_PYTHON_VERSIONS, + CUDA_PY312_RUNTIME_IMAGE, +) + + +def get_ray_image_for_python_version(python_version=None, warn_on_unsupported=True): + """ + Get the appropriate Ray image for a given Python version. + If no version is provided, uses the current runtime Python version. + This prevents us needing to hard code image versions for tests. + + Args: + python_version: Python version string (e.g. "3.11"). If None, detects current version. + warn_on_unsupported: If True, warns and returns None for unsupported versions. + If False, silently falls back to Python 3.12 image. + """ + if python_version is None: + python_version = f"{sys.version_info.major}.{sys.version_info.minor}" + + if python_version in SUPPORTED_PYTHON_VERSIONS: + return SUPPORTED_PYTHON_VERSIONS[python_version] + elif warn_on_unsupported: + import warnings + + warnings.warn( + f"No default Ray image defined for {python_version}. Please provide your own image or use one of the following python versions: {', '.join(SUPPORTED_PYTHON_VERSIONS.keys())}." + ) + return None + else: + return CUDA_PY312_RUNTIME_IMAGE diff --git a/src/codeflare_sdk/common/widgets/__init__.py b/src/codeflare_sdk/common/widgets/__init__.py new file mode 100644 index 00000000..60be4fcd --- /dev/null +++ b/src/codeflare_sdk/common/widgets/__init__.py @@ -0,0 +1,3 @@ +from .widgets import ( + view_clusters, +) diff --git a/src/codeflare_sdk/common/widgets/test_widgets.py b/src/codeflare_sdk/common/widgets/test_widgets.py new file mode 100644 index 00000000..f88d8eb2 --- /dev/null +++ b/src/codeflare_sdk/common/widgets/test_widgets.py @@ -0,0 +1,481 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import codeflare_sdk.common.widgets.widgets as cf_widgets +import pandas as pd +from unittest.mock import MagicMock, patch +from ..utils.unit_test_support import get_local_queue, create_cluster_config +from codeflare_sdk.ray.cluster.cluster import Cluster +from codeflare_sdk.ray.cluster.status import ( + RayCluster, + RayClusterStatus, +) +import pytest +from kubernetes import client + + +@patch.dict( + "os.environ", {"JPY_SESSION_NAME": "example-test"} +) # Mock Jupyter environment variable +def test_cluster_apply_down_buttons(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch( + "kubernetes.client.CustomObjectsApi.get_cluster_custom_object", + return_value={"spec": {"domain": "apps.cluster.awsroute.org"}}, + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + cluster = Cluster(create_cluster_config()) + + with patch("ipywidgets.Button") as MockButton, patch( + "ipywidgets.Checkbox" + ) as MockCheckbox, patch("ipywidgets.Output"), patch("ipywidgets.HBox"), patch( + "ipywidgets.VBox" + ), patch.object( + cluster, "apply" + ) as mock_apply, patch.object( + cluster, "down" + ) as mock_down, patch.object( + cluster, "wait_ready" + ) as mock_wait_ready: + # Create mock button & CheckBox instances + mock_apply_button = MagicMock() + mock_down_button = MagicMock() + mock_wait_ready_check_box = MagicMock() + + # Ensure the mock Button class returns the mock button instances in sequence + MockCheckbox.side_effect = [mock_wait_ready_check_box] + MockButton.side_effect = [mock_apply_button, mock_down_button] + + # Call the method under test + cf_widgets.cluster_apply_down_buttons(cluster) + + # Simulate checkbox being checked or unchecked + mock_wait_ready_check_box.value = True # Simulate checkbox being checked + + # Simulate the button clicks by calling the mock on_click handlers + mock_apply_button.on_click.call_args[0][0]( + None + ) # Simulate clicking "Cluster Apply" + mock_down_button.on_click.call_args[0][0]( + None + ) # Simulate clicking "Cluster Down" + + # Check if the `apply` and `down` methods were called + mock_wait_ready.assert_called_once() + mock_apply.assert_called_once() + mock_down.assert_called_once() + + +@patch.dict("os.environ", {}, clear=True) # Mock environment with no variables +def test_is_notebook_false(): + assert cf_widgets.is_notebook() is False + + +@patch.dict( + "os.environ", {"JPY_SESSION_NAME": "example-test"} +) # Mock Jupyter environment variable +def test_is_notebook_true(): + assert cf_widgets.is_notebook() is True + + +def test_view_clusters(mocker, capsys): + # If is not a notebook environment, a warning should be raised + with pytest.warns( + UserWarning, + match="view_clusters can only be used in a Jupyter Notebook environment.", + ): + result = cf_widgets.view_clusters("default") + + # Assert the function returns None when not in a notebook environment + assert result is None + + # Prepare to run view_clusters when notebook environment is detected + mocker.patch("codeflare_sdk.common.widgets.widgets.is_notebook", return_value=True) + mock_get_current_namespace = mocker.patch( + "codeflare_sdk.ray.cluster.cluster.get_current_namespace", + return_value="default", + ) + namespace = mock_get_current_namespace.return_value + + # Assert the function returns None when no clusters are found + mock_fetch_cluster_data = mocker.patch( + "codeflare_sdk.common.widgets.widgets._fetch_cluster_data", + return_value=pd.DataFrame(), + ) + result = cf_widgets.view_clusters() + captured = capsys.readouterr() + assert mock_fetch_cluster_data.return_value.empty + assert "No clusters found in the default namespace." in captured.out + assert result is None + + # Prepare to run view_clusters with a test DataFrame + mock_fetch_cluster_data = mocker.patch( + "codeflare_sdk.common.widgets.widgets._fetch_cluster_data", + return_value=pd.DataFrame( + { + "Name": ["test-cluster"], + "Namespace": ["default"], + "Num Workers": ["1"], + "Head GPUs": ["0"], + "Worker GPUs": ["0"], + "Head CPU Req~Lim": ["1~1"], + "Head Memory Req~Lim": ["1Gi~1Gi"], + "Worker CPU Req~Lim": ["1~1"], + "Worker Memory Req~Lim": ["1Gi~1Gi"], + "status": ['Ready ✓'], + } + ), + ) + # Create a RayClusterManagerWidgets instance + ray_cluster_manager_instance = cf_widgets.RayClusterManagerWidgets( + ray_clusters_df=mock_fetch_cluster_data.return_value, namespace=namespace + ) + # Patch the constructor of RayClusterManagerWidgets to return our initialized instance + mock_constructor = mocker.patch( + "codeflare_sdk.common.widgets.widgets.RayClusterManagerWidgets", + return_value=ray_cluster_manager_instance, + ) + + # Use a spy to track calls to display_widgets without replacing it + spy_display_widgets = mocker.spy(ray_cluster_manager_instance, "display_widgets") + + cf_widgets.view_clusters() + + mock_constructor.assert_called_once_with( + ray_clusters_df=mock_fetch_cluster_data.return_value, namespace=namespace + ) + + spy_display_widgets.assert_called_once() + + +def test_delete_cluster(mocker, capsys): + name = "test-cluster" + namespace = "default" + + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + + mock_ray_cluster = MagicMock() + mocker.patch( + "kubernetes.client.CustomObjectsApi.get_namespaced_custom_object", + side_effect=[ + mock_ray_cluster, + client.ApiException(status=404), + client.ApiException(status=404), + mock_ray_cluster, + ], + ) + + # In this scenario, the RayCluster exists and the AppWrapper does not. + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._check_aw_exists", return_value=False + ) + mock_delete_rc = mocker.patch( + "kubernetes.client.CustomObjectsApi.delete_namespaced_custom_object" + ) + cf_widgets._delete_cluster(name, namespace) + + mock_delete_rc.assert_called_once_with( + group="ray.io", + version="v1", + namespace=namespace, + plural="rayclusters", + name=name, + ) + + # In this scenario, the AppWrapper exists and the RayCluster does not + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._check_aw_exists", return_value=True + ) + mock_delete_aw = mocker.patch( + "kubernetes.client.CustomObjectsApi.delete_namespaced_custom_object" + ) + cf_widgets._delete_cluster(name, namespace) + + mock_delete_aw.assert_called_once_with( + group="workload.codeflare.dev", + version="v1beta2", + namespace=namespace, + plural="appwrappers", + name=name, + ) + + # In this scenario, the deletion of the resource times out. + with pytest.raises( + TimeoutError, match=f"Timeout waiting for {name} to be deleted." + ): + cf_widgets._delete_cluster(name, namespace, 1) + + +def test_ray_cluster_manager_widgets_init(mocker, capsys): + namespace = "default" + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + test_ray_clusters_df = pd.DataFrame( + { + "Name": ["test-cluster-1", "test-cluster-2"], + "Namespace": [namespace, namespace], + "Num Workers": ["1", "2"], + "Head GPUs": ["0", "0"], + "Worker GPUs": ["0", "0"], + "Head CPU Req~Lim": ["1~1", "1~1"], + "Head Memory Req~Lim": ["1Gi~1Gi", "1Gi~1Gi"], + "Worker CPU Req~Lim": ["1~1", "1~1"], + "Worker Memory Req~Lim": ["1Gi~1Gi", "1Gi~1Gi"], + "status": [ + 'Ready ✓', + 'Ready ✓', + ], + } + ) + mock_fetch_cluster_data = mocker.patch( + "codeflare_sdk.common.widgets.widgets._fetch_cluster_data", + return_value=test_ray_clusters_df, + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster.get_current_namespace", + return_value=namespace, + ) + mock_delete_cluster = mocker.patch( + "codeflare_sdk.common.widgets.widgets._delete_cluster" + ) + + # # Mock ToggleButtons + mock_toggle_buttons = mocker.patch("ipywidgets.ToggleButtons") + mock_button = mocker.patch("ipywidgets.Button") + mock_output = mocker.patch("ipywidgets.Output") + + # Initialize the RayClusterManagerWidgets instance + ray_cluster_manager_instance = cf_widgets.RayClusterManagerWidgets( + ray_clusters_df=test_ray_clusters_df, namespace=namespace + ) + + # Assertions for DataFrame and attributes + assert ray_cluster_manager_instance.ray_clusters_df.equals( + test_ray_clusters_df + ), "ray_clusters_df attribute does not match the input DataFrame" + assert ( + ray_cluster_manager_instance.namespace == namespace + ), f"Expected namespace to be '{namespace}', but got '{ray_cluster_manager_instance.namespace}'" + assert ( + ray_cluster_manager_instance.classification_widget.options + == test_ray_clusters_df["Name"].tolist() + ), "classification_widget options do not match the input DataFrame" + + # Assertions for widgets + mock_toggle_buttons.assert_called_once_with( + options=test_ray_clusters_df["Name"].tolist(), + value=test_ray_clusters_df["Name"].tolist()[0], + description="Select an existing cluster:", + ) + assert ( + ray_cluster_manager_instance.classification_widget + == mock_toggle_buttons.return_value + ), "classification_widget is not set correctly" + assert ( + ray_cluster_manager_instance.delete_button == mock_button.return_value + ), "delete_button is not set correctly" + assert ( + ray_cluster_manager_instance.list_jobs_button == mock_button.return_value + ), "list_jobs_button is not set correctly" + assert ( + ray_cluster_manager_instance.ray_dashboard_button == mock_button.return_value + ), "ray_dashboard_button is not set correctly" + assert ( + ray_cluster_manager_instance.refresh_data_button == mock_button.return_value + ), "refresh_data_button is not set correctly" + assert ( + ray_cluster_manager_instance.raycluster_data_output == mock_output.return_value + ), "raycluster_data_output is not set correctly" + assert ( + ray_cluster_manager_instance.user_output == mock_output.return_value + ), "user_output is not set correctly" + assert ( + ray_cluster_manager_instance.url_output == mock_output.return_value + ), "url_output is not set correctly" + + ### Test button click events + mock_delete_button = MagicMock() + mock_list_jobs_button = MagicMock() + mock_ray_dashboard_button = MagicMock() + mock_refresh_dataframe_button = MagicMock() + + mock_javascript = mocker.patch("codeflare_sdk.common.widgets.widgets.Javascript") + ray_cluster_manager_instance.url_output = MagicMock() + + mock_dashboard_uri = mocker.patch( + "codeflare_sdk.ray.cluster.cluster.Cluster.cluster_dashboard_uri", + return_value="https://ray-dashboard-test-cluster-1-ns.apps.cluster.awsroute.org", + ) + + # Simulate clicking the list jobs button + ray_cluster_manager_instance.classification_widget.value = "test-cluster-1" + ray_cluster_manager_instance._on_list_jobs_button_click(mock_list_jobs_button) + + captured = capsys.readouterr() + assert ( + f"Opening Ray Jobs Dashboard for test-cluster-1 cluster:\n{mock_dashboard_uri.return_value}/#/jobs" + in captured.out + ) + mock_javascript.assert_called_with( + f'window.open("{mock_dashboard_uri.return_value}/#/jobs", "_blank");' + ) + + # Simulate clicking the refresh data button + ray_cluster_manager_instance._on_refresh_data_button_click( + mock_refresh_dataframe_button + ) + mock_fetch_cluster_data.assert_called_with(namespace) + + # Simulate clicking the Ray dashboard button + ray_cluster_manager_instance.classification_widget.value = "test-cluster-1" + ray_cluster_manager_instance._on_ray_dashboard_button_click( + mock_ray_dashboard_button + ) + + captured = capsys.readouterr() + assert ( + f"Opening Ray Dashboard for test-cluster-1 cluster:\n{mock_dashboard_uri.return_value}" + in captured.out + ) + mock_javascript.assert_called_with( + f'window.open("{mock_dashboard_uri.return_value}", "_blank");' + ) + + # Simulate clicking the delete button + ray_cluster_manager_instance.classification_widget.value = "test-cluster-1" + ray_cluster_manager_instance._on_delete_button_click(mock_delete_button) + mock_delete_cluster.assert_called_with("test-cluster-1", namespace) + + mock_fetch_cluster_data.return_value = pd.DataFrame() + ray_cluster_manager_instance.classification_widget.value = "test-cluster-2" + ray_cluster_manager_instance._on_delete_button_click(mock_delete_button) + mock_delete_cluster.assert_called_with("test-cluster-2", namespace) + + # Assert on deletion that the dataframe is empty + assert ( + ray_cluster_manager_instance.ray_clusters_df.empty + ), "Expected DataFrame to be empty after deletion" + + captured = capsys.readouterr() + assert ( + f"Cluster test-cluster-1 in the {namespace} namespace was deleted successfully." + in captured.out + ) + + +def test_fetch_cluster_data(mocker): + # Return empty dataframe when no clusters are found + mocker.patch("codeflare_sdk.ray.cluster.cluster.list_all_clusters", return_value=[]) + df = cf_widgets._fetch_cluster_data(namespace="default") + assert df.empty + + # Create mock RayCluster objects + mock_raycluster1 = MagicMock(spec=RayCluster) + mock_raycluster1.name = "test-cluster-1" + mock_raycluster1.namespace = "default" + mock_raycluster1.num_workers = 1 + mock_raycluster1.head_extended_resources = {"nvidia.com/gpu": "1"} + mock_raycluster1.worker_extended_resources = {"nvidia.com/gpu": "2"} + mock_raycluster1.head_cpu_requests = "500m" + mock_raycluster1.head_cpu_limits = "1000m" + mock_raycluster1.head_mem_requests = "1Gi" + mock_raycluster1.head_mem_limits = "2Gi" + mock_raycluster1.worker_cpu_requests = "1000m" + mock_raycluster1.worker_cpu_limits = "2000m" + mock_raycluster1.worker_mem_requests = "2Gi" + mock_raycluster1.worker_mem_limits = "4Gi" + mock_raycluster1.status = MagicMock() + mock_raycluster1.status.name = "READY" + mock_raycluster1.status = RayClusterStatus.READY + + mock_raycluster2 = MagicMock(spec=RayCluster) + mock_raycluster2.name = "test-cluster-2" + mock_raycluster2.namespace = "default" + mock_raycluster2.num_workers = 2 + mock_raycluster2.head_extended_resources = {} + mock_raycluster2.worker_extended_resources = {} + mock_raycluster2.head_cpu_requests = None + mock_raycluster2.head_cpu_limits = None + mock_raycluster2.head_mem_requests = None + mock_raycluster2.head_mem_limits = None + mock_raycluster2.worker_cpu_requests = None + mock_raycluster2.worker_cpu_limits = None + mock_raycluster2.worker_mem_requests = None + mock_raycluster2.worker_mem_limits = None + mock_raycluster2.status = MagicMock() + mock_raycluster2.status.name = "SUSPENDED" + mock_raycluster2.status = RayClusterStatus.SUSPENDED + + with patch( + "codeflare_sdk.ray.cluster.cluster.list_all_clusters", + return_value=[mock_raycluster1, mock_raycluster2], + ): + # Call the function under test + df = cf_widgets._fetch_cluster_data(namespace="default") + + # Expected DataFrame + expected_data = { + "Name": ["test-cluster-1", "test-cluster-2"], + "Namespace": ["default", "default"], + "Num Workers": [1, 2], + "Head GPUs": ["nvidia.com/gpu: 1", "0"], + "Worker GPUs": ["nvidia.com/gpu: 2", "0"], + "Head CPU Req~Lim": ["500m~1000m", "0~0"], + "Head Memory Req~Lim": ["1Gi~2Gi", "0~0"], + "Worker CPU Req~Lim": ["1000m~2000m", "0~0"], + "Worker Memory Req~Lim": ["2Gi~4Gi", "0~0"], + "status": [ + 'Ready ✓', + 'Suspended ❄️', + ], + } + + expected_df = pd.DataFrame(expected_data) + + # Assert that the DataFrame matches expected + pd.testing.assert_frame_equal( + df.reset_index(drop=True), expected_df.reset_index(drop=True) + ) + + +def test_format_status(): + # Test each possible status + test_cases = [ + (RayClusterStatus.READY, 'Ready ✓'), + ( + RayClusterStatus.SUSPENDED, + 'Suspended ❄️', + ), + (RayClusterStatus.FAILED, 'Failed ✗'), + (RayClusterStatus.UNHEALTHY, 'Unhealthy'), + (RayClusterStatus.UNKNOWN, 'Unknown'), + ] + + for status, expected_output in test_cases: + assert ( + cf_widgets._format_status(status) == expected_output + ), f"Failed for status: {status}" + + # Test an unrecognized status + unrecognized_status = "NotAStatus" + assert ( + cf_widgets._format_status(unrecognized_status) == "NotAStatus" + ), "Failed for unrecognized status" diff --git a/src/codeflare_sdk/common/widgets/widgets.py b/src/codeflare_sdk/common/widgets/widgets.py new file mode 100644 index 00000000..36d896e8 --- /dev/null +++ b/src/codeflare_sdk/common/widgets/widgets.py @@ -0,0 +1,539 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The widgets sub-module contains the ui widgets created using the ipywidgets package. +""" +import contextlib +import io +import os +import warnings +import time +import codeflare_sdk +from kubernetes import client +from kubernetes.client.rest import ApiException +import ipywidgets as widgets +from IPython.display import display, HTML, Javascript +import pandas as pd +from ...ray.cluster.config import ClusterConfiguration +from ...ray.cluster.status import RayClusterStatus +from ..kubernetes_cluster import _kube_api_error_handling +from ..kubernetes_cluster.auth import ( + config_check, + get_api_client, +) + + +class RayClusterManagerWidgets: + """ + The RayClusterManagerWidgets class is responsible for initialising the ToggleButtons, Button, and Output widgets. + It also handles the user interactions and displays the cluster details. + Used when calling the view_clusters function. + """ + + def __init__(self, ray_clusters_df: pd.DataFrame, namespace: str = None): + from ...ray.cluster.cluster import get_current_namespace + + # Data + self.ray_clusters_df = ray_clusters_df + self.namespace = get_current_namespace() if not namespace else namespace + self.raycluster_data_output = widgets.Output() + self.user_output = widgets.Output() + self.url_output = widgets.Output() + + # Widgets + self.classification_widget = widgets.ToggleButtons( + options=ray_clusters_df["Name"].tolist(), + value=ray_clusters_df["Name"].tolist()[0], + description="Select an existing cluster:", + ) + self.delete_button = widgets.Button( + description="Delete Cluster", + icon="trash", + tooltip="Delete the selected cluster", + ) + self.list_jobs_button = widgets.Button( + description="View Jobs", + icon="suitcase", + tooltip="Open the Ray Job Dashboard", + ) + self.ray_dashboard_button = widgets.Button( + description="Open Ray Dashboard", + icon="dashboard", + tooltip="Open the Ray Dashboard in a new tab", + layout=widgets.Layout(width="auto"), + ) + self.refresh_data_button = widgets.Button( + description="Refresh Data", + icon="refresh", + tooltip="Refresh the list of Ray Clusters", + layout=widgets.Layout(width="auto", left="1em"), + ) + + # Set up interactions + self._initialize_callbacks() + self._trigger_initial_display() + + def _initialize_callbacks(self): + """ + Called upon RayClusterManagerWidgets initialisation. + Sets up event handlers and callbacks for UI interactions. + """ + # Observe cluster selection + self.classification_widget.observe( + lambda selection_change: self._on_cluster_click(selection_change), + names="value", + ) + # Set up button clicks + self.delete_button.on_click(lambda b: self._on_delete_button_click(b)) + self.list_jobs_button.on_click(lambda b: self._on_list_jobs_button_click(b)) + self.ray_dashboard_button.on_click( + lambda b: self._on_ray_dashboard_button_click(b) + ) + self.refresh_data_button.on_click( + lambda b: self._on_refresh_data_button_click(b) + ) + + def _trigger_initial_display(self): + """ + Called upon RayClusterManagerWidgets initialisation. + Triggers an initial display update with the current cluster value. + """ + # Trigger display with initial cluster value + initial_value = self.classification_widget.value + self._on_cluster_click({"new": initial_value}) + + def _on_cluster_click(self, selection_change): + """ + _on_cluster_click handles the event when a cluster is selected from the toggle buttons, updating the output with cluster details. + """ + new_value = selection_change["new"] + self.classification_widget.value = new_value + self._refresh_dataframe() + + def _on_delete_button_click(self, b): + """ + _on_delete_button_click handles the event when the Delete Button is clicked, deleting the selected cluster. + """ + cluster_name = self.classification_widget.value + + _delete_cluster(cluster_name, self.namespace) + + with self.user_output: + self.user_output.clear_output() + print( + f"Cluster {cluster_name} in the {self.namespace} namespace was deleted successfully." + ) + + # Refresh the dataframe + self._refresh_dataframe() + + def _on_list_jobs_button_click(self, b): + """ + _on_list_jobs_button_click handles the event when the View Jobs button is clicked, opening the Ray Jobs Dashboard in a new tab + """ + from codeflare_sdk import Cluster + + cluster_name = self.classification_widget.value + + # Suppress from Cluster Object initialisation widgets and outputs + with widgets.Output(), contextlib.redirect_stdout( + io.StringIO() + ), contextlib.redirect_stderr(io.StringIO()): + cluster = Cluster(ClusterConfiguration(cluster_name, self.namespace)) + dashboard_url = cluster.cluster_dashboard_uri() + + with self.user_output: + self.user_output.clear_output() + print( + f"Opening Ray Jobs Dashboard for {cluster_name} cluster:\n{dashboard_url}/#/jobs" + ) + with self.url_output: + display(Javascript(f'window.open("{dashboard_url}/#/jobs", "_blank");')) + + def _on_ray_dashboard_button_click(self, b): + """ + _on_ray_dashboard_button_click handles the event when the Open Ray Dashboard button is clicked, opening the Ray Dashboard in a new tab + """ + from codeflare_sdk import Cluster + + cluster_name = self.classification_widget.value + + # Suppress from Cluster Object initialisation widgets and outputs + with widgets.Output(), contextlib.redirect_stdout( + io.StringIO() + ), contextlib.redirect_stderr(io.StringIO()): + cluster = Cluster(ClusterConfiguration(cluster_name, self.namespace)) + dashboard_url = cluster.cluster_dashboard_uri() + + with self.user_output: + self.user_output.clear_output() + print(f"Opening Ray Dashboard for {cluster_name} cluster:\n{dashboard_url}") + with self.url_output: + display(Javascript(f'window.open("{dashboard_url}", "_blank");')) + + def _on_refresh_data_button_click(self, b): + """ + _on_refresh_button_click handles the event when the Refresh Data button is clicked, refreshing the list of Ray Clusters. + """ + self.refresh_data_button.disabled = True + self._refresh_dataframe() + self.refresh_data_button.disabled = False + + def _refresh_dataframe(self): + """ + _refresh_data function refreshes the list of Ray Clusters. + """ + self.ray_clusters_df = _fetch_cluster_data(self.namespace) + if self.ray_clusters_df.empty: + self.classification_widget.close() + self.delete_button.close() + self.list_jobs_button.close() + self.ray_dashboard_button.close() + self.refresh_data_button.close() + with self.raycluster_data_output: + self.raycluster_data_output.clear_output() + print(f"No clusters found in the {self.namespace} namespace.") + else: + # Store the current selection if it still exists (Was not previously deleted). + selected_cluster = ( + self.classification_widget.value + if self.classification_widget.value + in self.ray_clusters_df["Name"].tolist() + else None + ) + + # Update list of Ray Clusters. + self.classification_widget.options = self.ray_clusters_df["Name"].tolist() + + # If the selected cluster exists, preserve the selection to remain viewing the currently selected cluster. + # If it does not exist, default to the first available cluster. + if selected_cluster: + self.classification_widget.value = selected_cluster + else: + self.classification_widget.value = self.ray_clusters_df["Name"].iloc[0] + + # Update the output with the current Ray Cluster details. + self._display_cluster_details() + + def _display_cluster_details(self): + """ + _display_cluster_details function displays the selected cluster details in the output widget. + """ + self.raycluster_data_output.clear_output() + selected_cluster = self.ray_clusters_df[ + self.ray_clusters_df["Name"] == self.classification_widget.value + ] + with self.raycluster_data_output: + display( + HTML( + selected_cluster[ + [ + "Name", + "Namespace", + "Num Workers", + "Head GPUs", + "Head CPU Req~Lim", + "Head Memory Req~Lim", + "Worker GPUs", + "Worker CPU Req~Lim", + "Worker Memory Req~Lim", + "status", + ] + ].to_html(escape=False, index=False, border=2) + ) + ) + + def display_widgets(self): + display(widgets.VBox([self.classification_widget, self.raycluster_data_output])) + display( + widgets.HBox( + [ + self.delete_button, + self.list_jobs_button, + self.ray_dashboard_button, + self.refresh_data_button, + ] + ), + self.url_output, + self.user_output, + ) + + +def cluster_apply_down_buttons( + cluster: "codeflare_sdk.ray.cluster.cluster.Cluster", +) -> widgets.Button: + """ + The cluster_apply_down_buttons function returns two button widgets for a create and delete button. + The function uses the appwrapper bool to distinguish between resource type for the tool tip. + """ + resource = "Ray Cluster" + if cluster.config.appwrapper: + resource = "AppWrapper" + + apply_button = widgets.Button( + description="Cluster Apply", + tooltip=f"Create the {resource}", + icon="play", + ) + + delete_button = widgets.Button( + description="Cluster Down", + tooltip=f"Delete the {resource}", + icon="trash", + ) + + wait_ready_check = _wait_ready_check_box() + output = widgets.Output() + + # Display the buttons in an HBox wrapped in a VBox which includes the wait_ready Checkbox + button_display = widgets.HBox([apply_button, delete_button]) + display(widgets.VBox([button_display, wait_ready_check]), output) + + def on_apply_button_clicked(b): # Handle the apply button click event + with output: + output.clear_output() + cluster.apply() + + # If the wait_ready Checkbox is clicked(value == True) trigger the wait_ready function + if wait_ready_check.value: + cluster.wait_ready() + + def on_down_button_clicked(b): # Handle the down button click event + with output: + output.clear_output() + cluster.down() + + apply_button.on_click(on_apply_button_clicked) + delete_button.on_click(on_down_button_clicked) + + +def _wait_ready_check_box(): + """ + The wait_ready_check_box function will return a checkbox widget used for waiting for the resource to be in the state READY. + """ + wait_ready_check_box = widgets.Checkbox( + False, + description="Wait for Cluster?", + ) + return wait_ready_check_box + + +def is_notebook() -> bool: + """ + The is_notebook function checks if Jupyter Notebook environment variables exist in the given environment and return True/False based on that. + """ + if ( + "PYDEVD_IPYTHON_COMPATIBLE_DEBUGGING" in os.environ + or "JPY_SESSION_NAME" in os.environ + ): # If running Jupyter NBs in VsCode or RHOAI/ODH display UI buttons + return True + else: + return False + + +def view_clusters(namespace: str = None): + """ + view_clusters function will display existing clusters with their specs, and handle user interactions. + """ + if not is_notebook(): + warnings.warn( + "view_clusters can only be used in a Jupyter Notebook environment." + ) + return # Exit function if not in Jupyter Notebook + + from ...ray.cluster.cluster import get_current_namespace + + if not namespace: + namespace = get_current_namespace() + + ray_clusters_df = _fetch_cluster_data(namespace) + if ray_clusters_df.empty: + print(f"No clusters found in the {namespace} namespace.") + return + + # Initialize the RayClusterManagerWidgets class + ray_cluster_manager = RayClusterManagerWidgets( + ray_clusters_df=ray_clusters_df, namespace=namespace + ) + + # Display the UI components + ray_cluster_manager.display_widgets() + + +def _delete_cluster( + cluster_name: str, + namespace: str, + timeout: int = 5, + interval: int = 1, +): + """ + _delete_cluster function deletes the cluster with the given name and namespace. + It optionally waits for the cluster to be deleted. + """ + from ...ray.cluster.cluster import _check_aw_exists + + try: + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + + if _check_aw_exists(cluster_name, namespace): + api_instance.delete_namespaced_custom_object( + group="workload.codeflare.dev", + version="v1beta2", + namespace=namespace, + plural="appwrappers", + name=cluster_name, + ) + group = "workload.codeflare.dev" + version = "v1beta2" + plural = "appwrappers" + else: + api_instance.delete_namespaced_custom_object( + group="ray.io", + version="v1", + namespace=namespace, + plural="rayclusters", + name=cluster_name, + ) + group = "ray.io" + version = "v1" + plural = "rayclusters" + + # Wait for the resource to be deleted + while timeout > 0: + try: + api_instance.get_namespaced_custom_object( + group=group, + version=version, + namespace=namespace, + plural=plural, + name=cluster_name, + ) + # Retry if resource still exists + time.sleep(interval) + timeout -= interval + if timeout <= 0: + raise TimeoutError( + f"Timeout waiting for {cluster_name} to be deleted." + ) + except ApiException as e: + # Resource is deleted + if e.status == 404: + break + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + + +def _fetch_cluster_data(namespace): + """ + _fetch_cluster_data function fetches all clusters and their spec in a given namespace and returns a DataFrame. + """ + from ...ray.cluster.cluster import list_all_clusters + + rayclusters = list_all_clusters(namespace, False) + if not rayclusters: + return pd.DataFrame() + names = [item.name for item in rayclusters] + namespaces = [item.namespace for item in rayclusters] + num_workers = [item.num_workers for item in rayclusters] + head_extended_resources = [ + ( + f"{list(item.head_extended_resources.keys())[0]}: {list(item.head_extended_resources.values())[0]}" + if item.head_extended_resources + else "0" + ) + for item in rayclusters + ] + worker_extended_resources = [ + ( + f"{list(item.worker_extended_resources.keys())[0]}: {list(item.worker_extended_resources.values())[0]}" + if item.worker_extended_resources + else "0" + ) + for item in rayclusters + ] + head_cpu_requests = [ + item.head_cpu_requests if item.head_cpu_requests else 0 for item in rayclusters + ] + head_cpu_limits = [ + item.head_cpu_limits if item.head_cpu_limits else 0 for item in rayclusters + ] + head_cpu_rl = [ + f"{requests}~{limits}" + for requests, limits in zip(head_cpu_requests, head_cpu_limits) + ] + head_mem_requests = [ + item.head_mem_requests if item.head_mem_requests else 0 for item in rayclusters + ] + head_mem_limits = [ + item.head_mem_limits if item.head_mem_limits else 0 for item in rayclusters + ] + head_mem_rl = [ + f"{requests}~{limits}" + for requests, limits in zip(head_mem_requests, head_mem_limits) + ] + worker_cpu_requests = [ + item.worker_cpu_requests if item.worker_cpu_requests else 0 + for item in rayclusters + ] + worker_cpu_limits = [ + item.worker_cpu_limits if item.worker_cpu_limits else 0 for item in rayclusters + ] + worker_cpu_rl = [ + f"{requests}~{limits}" + for requests, limits in zip(worker_cpu_requests, worker_cpu_limits) + ] + worker_mem_requests = [ + item.worker_mem_requests if item.worker_mem_requests else 0 + for item in rayclusters + ] + worker_mem_limits = [ + item.worker_mem_limits if item.worker_mem_limits else 0 for item in rayclusters + ] + worker_mem_rl = [ + f"{requests}~{limits}" + for requests, limits in zip(worker_mem_requests, worker_mem_limits) + ] + status = [item.status.name for item in rayclusters] + + status = [_format_status(item.status) for item in rayclusters] + + data = { + "Name": names, + "Namespace": namespaces, + "Num Workers": num_workers, + "Head GPUs": head_extended_resources, + "Worker GPUs": worker_extended_resources, + "Head CPU Req~Lim": head_cpu_rl, + "Head Memory Req~Lim": head_mem_rl, + "Worker CPU Req~Lim": worker_cpu_rl, + "Worker Memory Req~Lim": worker_mem_rl, + "status": status, + } + return pd.DataFrame(data) + + +def _format_status(status): + """ + _format_status function formats the status enum. + """ + status_map = { + RayClusterStatus.READY: 'Ready ✓', + RayClusterStatus.SUSPENDED: 'Suspended ❄️', + RayClusterStatus.FAILED: 'Failed ✗', + RayClusterStatus.UNHEALTHY: 'Unhealthy', + RayClusterStatus.UNKNOWN: 'Unknown', + } + return status_map.get(status, status) diff --git a/src/codeflare_sdk/job/jobs.py b/src/codeflare_sdk/job/jobs.py deleted file mode 100644 index b9bb9cdc..00000000 --- a/src/codeflare_sdk/job/jobs.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright 2023 IBM, Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import abc -from typing import TYPE_CHECKING, Optional, Dict, List -from pathlib import Path - -from torchx.components.dist import ddp -from torchx.runner import get_runner -from torchx.specs import AppHandle, parse_app_handle, AppDryRunInfo - -if TYPE_CHECKING: - from ..cluster.cluster import Cluster -from ..cluster.cluster import get_current_namespace - -all_jobs: List["Job"] = [] -torchx_runner = get_runner() - - -class JobDefinition(metaclass=abc.ABCMeta): - def _dry_run(self, cluster: "Cluster"): - pass - - def submit(self, cluster: "Cluster"): - pass - - -class Job(metaclass=abc.ABCMeta): - def status(self): - pass - - def logs(self): - pass - - -class DDPJobDefinition(JobDefinition): - def __init__( - self, - script: Optional[str] = None, - m: Optional[str] = None, - script_args: Optional[List[str]] = None, - name: Optional[str] = None, - cpu: Optional[int] = None, - gpu: Optional[int] = None, - memMB: Optional[int] = None, - h: Optional[str] = None, - j: Optional[str] = None, - env: Optional[Dict[str, str]] = None, - max_retries: int = 0, - mounts: Optional[List[str]] = None, - rdzv_port: int = 29500, - rdzv_backend: str = None, - scheduler_args: Optional[Dict[str, str]] = None, - image: Optional[str] = None, - workspace: Optional[str] = f"file://{Path.cwd()}", - ): - if bool(script) == bool(m): # logical XOR - raise ValueError( - "Exactly one of the following arguments must be defined: [script, m]." - ) - self.script = script - self.m = m - self.script_args: List[str] = script_args if script_args is not None else [] - self.name = name - self.cpu = cpu - self.gpu = gpu - self.memMB = memMB - self.h = h - self.j = j - self.env: Dict[str, str] = env if env is not None else dict() - self.max_retries = max_retries - self.mounts: List[str] = mounts if mounts is not None else [] - self.rdzv_port = rdzv_port - self.rdzv_backend = rdzv_backend - self.scheduler_args: Dict[str, str] = ( - scheduler_args if scheduler_args is not None else dict() - ) - self.image = image - self.workspace = workspace - - def _dry_run(self, cluster: "Cluster"): - j = f"{cluster.config.num_workers}x{max(cluster.config.num_gpus, 1)}" # # of proc. = # of gpus - return torchx_runner.dryrun( - app=ddp( - *self.script_args, - script=self.script, - m=self.m, - name=self.name, - h=self.h, - cpu=self.cpu if self.cpu is not None else cluster.config.max_cpus, - gpu=self.gpu if self.gpu is not None else cluster.config.num_gpus, - memMB=self.memMB - if self.memMB is not None - else cluster.config.max_memory * 1024, - j=self.j if self.j is not None else j, - env=self.env, - max_retries=self.max_retries, - rdzv_port=self.rdzv_port, - rdzv_backend=self.rdzv_backend - if self.rdzv_backend is not None - else "static", - mounts=self.mounts, - ), - scheduler=cluster.torchx_scheduler, - cfg=cluster.torchx_config(**self.scheduler_args), - workspace=self.workspace, - ) - - def _missing_spec(self, spec: str): - raise ValueError(f"Job definition missing arg: {spec}") - - def _dry_run_no_cluster(self): - if self.scheduler_args is not None: - if self.scheduler_args.get("namespace") is None: - self.scheduler_args["namespace"] = get_current_namespace() - return torchx_runner.dryrun( - app=ddp( - *self.script_args, - script=self.script, - m=self.m, - name=self.name if self.name is not None else self._missing_spec("name"), - h=self.h, - cpu=self.cpu - if self.cpu is not None - else self._missing_spec("cpu (# cpus per worker)"), - gpu=self.gpu - if self.gpu is not None - else self._missing_spec("gpu (# gpus per worker)"), - memMB=self.memMB - if self.memMB is not None - else self._missing_spec("memMB (memory in MB)"), - j=self.j - if self.j is not None - else self._missing_spec( - "j (`workers`x`procs`)" - ), # # of proc. = # of gpus, - env=self.env, # should this still exist? - max_retries=self.max_retries, - rdzv_port=self.rdzv_port, # should this still exist? - rdzv_backend=self.rdzv_backend - if self.rdzv_backend is not None - else "c10d", - mounts=self.mounts, - image=self.image - if self.image is not None - else self._missing_spec("image"), - ), - scheduler="kubernetes_mcad", - cfg=self.scheduler_args, - workspace="", - ) - - def submit(self, cluster: "Cluster" = None) -> "Job": - return DDPJob(self, cluster) - - -class DDPJob(Job): - def __init__(self, job_definition: "DDPJobDefinition", cluster: "Cluster" = None): - self.job_definition = job_definition - self.cluster = cluster - if self.cluster: - self._app_handle = torchx_runner.schedule(job_definition._dry_run(cluster)) - else: - self._app_handle = torchx_runner.schedule( - job_definition._dry_run_no_cluster() - ) - all_jobs.append(self) - - def status(self) -> str: - return torchx_runner.status(self._app_handle) - - def logs(self) -> str: - return "".join(torchx_runner.log_lines(self._app_handle, None)) - - def cancel(self): - torchx_runner.cancel(self._app_handle) diff --git a/src/codeflare_sdk/ray/__init__.py b/src/codeflare_sdk/ray/__init__.py new file mode 100644 index 00000000..ab55cc82 --- /dev/null +++ b/src/codeflare_sdk/ray/__init__.py @@ -0,0 +1,16 @@ +from .appwrapper import AppWrapper, AppWrapperStatus, AWManager + +from .client import ( + RayJobClient, +) + +from .cluster import ( + Cluster, + ClusterConfiguration, + get_cluster, + list_all_queued, + list_all_clusters, + RayClusterStatus, + CodeFlareClusterStatus, + RayCluster, +) diff --git a/src/codeflare_sdk/ray/appwrapper/__init__.py b/src/codeflare_sdk/ray/appwrapper/__init__.py new file mode 100644 index 00000000..537fdf8a --- /dev/null +++ b/src/codeflare_sdk/ray/appwrapper/__init__.py @@ -0,0 +1,6 @@ +from .awload import AWManager + +from .status import ( + AppWrapperStatus, + AppWrapper, +) diff --git a/src/codeflare_sdk/cluster/awload.py b/src/codeflare_sdk/ray/appwrapper/awload.py similarity index 87% rename from src/codeflare_sdk/cluster/awload.py rename to src/codeflare_sdk/ray/appwrapper/awload.py index 97d138d5..02794f3d 100644 --- a/src/codeflare_sdk/cluster/awload.py +++ b/src/codeflare_sdk/ray/appwrapper/awload.py @@ -22,15 +22,18 @@ import os import yaml -from kubernetes import client, config -from ..utils.kube_api_helpers import _kube_api_error_handling -from .auth import config_check, api_config_handler +from kubernetes import client +from ...common import _kube_api_error_handling +from ...common.kubernetes_cluster.auth import ( + config_check, + get_api_client, +) class AWManager: """ An object for submitting and removing existing AppWrapper yamls - to be added to the MCAD queue. + to be added to the Kueue localqueue. """ def __init__(self, filename: str) -> None: @@ -59,10 +62,10 @@ def submit(self) -> None: """ try: config_check() - api_instance = client.CustomObjectsApi(api_config_handler()) + api_instance = client.CustomObjectsApi(get_api_client()) api_instance.create_namespaced_custom_object( group="workload.codeflare.dev", - version="v1beta1", + version="v1beta2", namespace=self.namespace, plural="appwrappers", body=self.awyaml, @@ -84,10 +87,10 @@ def remove(self) -> None: try: config_check() - api_instance = client.CustomObjectsApi(api_config_handler()) + api_instance = client.CustomObjectsApi(get_api_client()) api_instance.delete_namespaced_custom_object( group="workload.codeflare.dev", - version="v1beta1", + version="v1beta2", namespace=self.namespace, plural="appwrappers", name=self.name, diff --git a/src/codeflare_sdk/ray/appwrapper/status.py b/src/codeflare_sdk/ray/appwrapper/status.py new file mode 100644 index 00000000..79fe0fd2 --- /dev/null +++ b/src/codeflare_sdk/ray/appwrapper/status.py @@ -0,0 +1,46 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The status sub-module defines Enums containing information for +AppWrapper states, as well as dataclasses to store information for AppWrappers. +""" + +from dataclasses import dataclass +from enum import Enum + + +class AppWrapperStatus(Enum): + """ + Defines the possible reportable phases of an AppWrapper. + """ + + SUSPENDED = "suspended" + RESUMING = "resuming" + RUNNING = "running" + RESETTING = "resetting" + SUSPENDING = "suspending" + SUCCEEDED = "succeeded" + FAILED = "failed" + TERMINATING = "terminating" + + +@dataclass +class AppWrapper: + """ + For storing information about an AppWrapper. + """ + + name: str + status: AppWrapperStatus diff --git a/src/codeflare_sdk/ray/appwrapper/test_awload.py b/src/codeflare_sdk/ray/appwrapper/test_awload.py new file mode 100644 index 00000000..3f45e1a5 --- /dev/null +++ b/src/codeflare_sdk/ray/appwrapper/test_awload.py @@ -0,0 +1,93 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from codeflare_sdk.common.utils.unit_test_support import ( + apply_template, + arg_check_aw_apply_effect, + arg_check_aw_del_effect, + get_template_variables, +) +from codeflare_sdk.ray.appwrapper import AWManager +from codeflare_sdk.ray.cluster import Cluster, ClusterConfiguration +import os +from pathlib import Path + +parent = Path(__file__).resolve().parents[4] # project directory +aw_dir = os.path.expanduser("~/.codeflare/resources/") + + +def test_AWManager_creation(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.client.CustomObjectsApi.list_namespaced_custom_object") + # Create test.yaml + Cluster( + ClusterConfiguration( + name="test", + namespace="ns", + write_to_file=True, + appwrapper=True, + ) + ) + + testaw = AWManager(f"{aw_dir}test.yaml") + assert testaw.name == "test" + assert testaw.namespace == "ns" + assert testaw.submitted == False + try: + testaw = AWManager("fake") + except Exception as e: + assert type(e) == FileNotFoundError + assert str(e) == "[Errno 2] No such file or directory: 'fake'" + try: + testaw = apply_template( + AWManager( + f"{parent}/tests/test_cluster_yamls/appwrapper/test-case-bad.yaml" + ), + get_template_variables(), + ) + except Exception as e: + assert type(e) == ValueError + assert ( + str(e) + == f"{parent}/tests/test_cluster_yamls/appwrapper/test-case-bad.yaml is not a correctly formatted AppWrapper yaml" + ) + + +def test_AWManager_submit_remove(mocker, capsys): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + testaw = AWManager(f"{aw_dir}test.yaml") + testaw.remove() + captured = capsys.readouterr() + assert ( + captured.out + == "AppWrapper not submitted by this manager yet, nothing to remove\n" + ) + assert testaw.submitted == False + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "kubernetes.client.CustomObjectsApi.create_namespaced_custom_object", + side_effect=arg_check_aw_apply_effect, + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.delete_namespaced_custom_object", + side_effect=arg_check_aw_del_effect, + ) + testaw.submit() + assert testaw.submitted == True + testaw.remove() + assert testaw.submitted == False + + +# Make sure to always keep this function last +def test_cleanup(): + os.remove(f"{aw_dir}test.yaml") diff --git a/src/codeflare_sdk/ray/appwrapper/test_status.py b/src/codeflare_sdk/ray/appwrapper/test_status.py new file mode 100644 index 00000000..a3fcf870 --- /dev/null +++ b/src/codeflare_sdk/ray/appwrapper/test_status.py @@ -0,0 +1,105 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from codeflare_sdk.ray.cluster.cluster import ( + _app_wrapper_status, + Cluster, + ClusterConfiguration, +) +from codeflare_sdk.ray.appwrapper import AppWrapper, AppWrapperStatus +from codeflare_sdk.ray.cluster.status import CodeFlareClusterStatus +from codeflare_sdk.common.utils.unit_test_support import get_local_queue +import os + +aw_dir = os.path.expanduser("~/.codeflare/resources/") + + +def test_cluster_status(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + fake_aw = AppWrapper("test", AppWrapperStatus.FAILED) + + cf = Cluster( + ClusterConfiguration( + name="test", + namespace="ns", + write_to_file=True, + appwrapper=True, + local_queue="local-queue-default", + ) + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._app_wrapper_status", return_value=None + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._ray_cluster_status", return_value=None + ) + status, ready = cf.status() + assert status == CodeFlareClusterStatus.UNKNOWN + assert ready == False + + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._app_wrapper_status", return_value=fake_aw + ) + status, ready = cf.status() + assert status == CodeFlareClusterStatus.FAILED + assert ready == False + + fake_aw.status = AppWrapperStatus.SUSPENDED + status, ready = cf.status() + assert status == CodeFlareClusterStatus.QUEUED + assert ready == False + + fake_aw.status = AppWrapperStatus.RESUMING + status, ready = cf.status() + assert status == CodeFlareClusterStatus.STARTING + assert ready == False + + fake_aw.status = AppWrapperStatus.RESETTING + status, ready = cf.status() + assert status == CodeFlareClusterStatus.STARTING + assert ready == False + + fake_aw.status = AppWrapperStatus.RUNNING + status, ready = cf.status() + assert status == CodeFlareClusterStatus.UNKNOWN + assert ready == False + + +def aw_status_fields(group, version, namespace, plural, *args): + assert group == "workload.codeflare.dev" + assert version == "v1beta2" + assert namespace == "test-ns" + assert plural == "appwrappers" + assert args == tuple() + return {"items": []} + + +def test_aw_status(mocker): + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + side_effect=aw_status_fields, + ) + aw = _app_wrapper_status("test-aw", "test-ns") + assert aw == None + + +# Make sure to always keep this function last +def test_cleanup(): + os.remove(f"{aw_dir}test.yaml") diff --git a/src/codeflare_sdk/ray/client/__init__.py b/src/codeflare_sdk/ray/client/__init__.py new file mode 100644 index 00000000..f230eb77 --- /dev/null +++ b/src/codeflare_sdk/ray/client/__init__.py @@ -0,0 +1 @@ +from .ray_jobs import RayJobClient diff --git a/src/codeflare_sdk/ray/client/ray_jobs.py b/src/codeflare_sdk/ray/client/ray_jobs.py new file mode 100644 index 00000000..2c0ceee0 --- /dev/null +++ b/src/codeflare_sdk/ray/client/ray_jobs.py @@ -0,0 +1,238 @@ +# Copyright 2022 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The ray_jobs sub-module contains methods needed to submit jobs and connect to Ray Clusters that were not created by CodeFlare. +The SDK acts as a wrapper for the Ray Job Submission Client. +""" + +from ray.job_submission import JobSubmissionClient +from ray.dashboard.modules.job.pydantic_models import JobDetails +from typing import Iterator, Optional, Dict, Any, Union, List + + +class RayJobClient: + """ + A wrapper class for the Ray Job Submission Client, used for interacting with Ray clusters to manage job + submissions, deletions, and other job-related information. + + Args: + address (Optional[str]): + The Ray cluster's address, which may be either the Ray Client address, HTTP address + of the dashboard server on the head node, or "auto" / "localhost:" for a local cluster. + This is overridden by the RAY_ADDRESS environment variable if set. + create_cluster_if_needed (bool): + If True, a new cluster will be created if not already running at the + specified address. By default, Ray requires an existing cluster. + cookies (Optional[Dict[str, Any]]): + HTTP cookies to send with requests to the job server. + metadata (Optional[Dict[str, Any]]): + Global metadata to store with all jobs, merged with job-specific + metadata during job submission. + headers (Optional[Dict[str, Any]]): + HTTP headers to send with requests to the job server, can be used for + authentication. + verify (Optional[Union[str, bool]]): + If True, verifies the server's TLS certificate. Can also be a path + to trusted certificates. Default is True. + """ + + def __init__( + self, + address: Optional[str] = None, + create_cluster_if_needed: bool = False, + cookies: Optional[Dict[str, Any]] = None, + metadata: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, Any]] = None, + verify: Optional[Union[str, bool]] = True, + ): + self.rayJobClient = JobSubmissionClient( + address=address, + create_cluster_if_needed=create_cluster_if_needed, + cookies=cookies, + metadata=metadata, + headers=headers, + verify=verify, + ) + + def submit_job( + self, + entrypoint: str, + job_id: Optional[str] = None, + runtime_env: Optional[Dict[str, Any]] = None, + metadata: Optional[Dict[str, str]] = None, + submission_id: Optional[str] = None, + entrypoint_num_cpus: Optional[Union[int, float]] = None, + entrypoint_num_gpus: Optional[Union[int, float]] = None, + entrypoint_memory: Optional[int] = None, + entrypoint_resources: Optional[Dict[str, float]] = None, + ) -> str: + """ + Submits a job to the Ray cluster with specified resources and returns the job ID. + + Args: + entrypoint (str): + The command to execute for this job. + job_id (Optional[str]): + Deprecated, use `submission_id`. A unique job identifier. + runtime_env (Optional[Dict[str, Any]]): + The runtime environment for this job. + metadata (Optional[Dict[str, str]]): + Metadata associated with the job, merged with global metadata. + submission_id (Optional[str]): + Unique ID for the job submission. + entrypoint_num_cpus (Optional[Union[int, float]]): + The quantity of CPU cores to reserve for the execution of the entrypoint command, + separately from any tasks or actors launched by it. Defaults to 0. + entrypoint_num_gpus (Optional[Union[int, float]]): + The quantity of GPUs to reserve for the execution of the entrypoint command, + separately from any tasks or actors launched by it. Defaults to 0. + entrypoint_memory (Optional[int]): + The quantity of memory to reserve for the execution of the entrypoint command, + separately from any tasks or actors launched by it. Defaults to 0. + entrypoint_resources (Optional[Dict[str, float]]): + The quantity of custom resources to reserve for the execution of the entrypoint command, + separately from any tasks or actors launched by it. + + Returns: + str: + The unique identifier for the submitted job. + """ + return self.rayJobClient.submit_job( + entrypoint=entrypoint, + job_id=job_id, + runtime_env=runtime_env, + metadata=metadata, + submission_id=submission_id, + entrypoint_num_cpus=entrypoint_num_cpus, + entrypoint_num_gpus=entrypoint_num_gpus, + entrypoint_memory=entrypoint_memory, + entrypoint_resources=entrypoint_resources, + ) + + def delete_job(self, job_id: str) -> (bool, str): + """ + Deletes a job by job ID. + + Args: + job_id (str): + The unique identifier of the job to delete. + + Returns: + tuple(bool, str): + A tuple with deletion status and a message. + """ + deletion_status = self.rayJobClient.delete_job(job_id=job_id) + + if deletion_status: + message = f"Successfully deleted Job {job_id}" + else: + message = f"Failed to delete Job {job_id}" + + return deletion_status, message + + def get_address(self) -> str: + """ + Retrieves the address of the connected Ray cluster. + + Returns: + str: + The Ray cluster's address. + """ + return self.rayJobClient.get_address() + + def get_job_info(self, job_id: str): + """ + Fetches information about a job by job ID. + + Args: + job_id (str): + The unique identifier of the job. + + Returns: + JobInfo: + Information about the job's status, progress, and other details. + """ + return self.rayJobClient.get_job_info(job_id=job_id) + + def get_job_logs(self, job_id: str) -> str: + """ + Retrieves the logs for a specific job by job ID. + + Args: + job_id (str): + The unique identifier of the job. + + Returns: + str: + Logs output from the job. + """ + return self.rayJobClient.get_job_logs(job_id=job_id) + + def get_job_status(self, job_id: str) -> str: + """ + Fetches the current status of a job by job ID. + + Args: + job_id (str): + The unique identifier of the job. + + Returns: + str: + The job's status. + """ + return self.rayJobClient.get_job_status(job_id=job_id) + + def list_jobs(self) -> List[JobDetails]: + """ + Lists all current jobs in the Ray cluster. + + Returns: + List[JobDetails]: + A list of job details for each current job in the cluster. + """ + return self.rayJobClient.list_jobs() + + def stop_job(self, job_id: str) -> (bool, str): + """ + Stops a running job by job ID. + + Args: + job_id (str): + The unique identifier of the job to stop. + + Returns: + tuple(bool, str): + A tuple with the stop status and a message. + """ + stop_job_status = self.rayJobClient.stop_job(job_id=job_id) + if stop_job_status: + message = f"Successfully stopped Job {job_id}" + else: + message = f"Failed to stop Job, {job_id} could have already completed." + return stop_job_status, message + + def tail_job_logs(self, job_id: str) -> Iterator[str]: + """ + Continuously streams the logs of a job. + + Args: + job_id (str): + The unique identifier of the job. + + Returns: + Iterator[str]: + An iterator that yields log entries in real-time. + """ + return self.rayJobClient.tail_job_logs(job_id=job_id) diff --git a/src/codeflare_sdk/ray/client/test_ray_jobs.py b/src/codeflare_sdk/ray/client/test_ray_jobs.py new file mode 100644 index 00000000..cbb27aa7 --- /dev/null +++ b/src/codeflare_sdk/ray/client/test_ray_jobs.py @@ -0,0 +1,173 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ray.job_submission import JobSubmissionClient +from codeflare_sdk.ray.client.ray_jobs import RayJobClient +from codeflare_sdk.common.utils.unit_test_support import get_package_and_version +import pytest + + +# rjc == RayJobClient +@pytest.fixture +def ray_job_client(mocker): + # Creating a fixture to instantiate RayJobClient with a mocked JobSubmissionClient + mocker.patch.object(JobSubmissionClient, "__init__", return_value=None) + return RayJobClient( + "https://ray-dashboard-unit-test-cluster-ns.apps.cluster.awsroute.org" + ) + + +def test_rjc_submit_job(ray_job_client, mocker): + mocked_submit_job = mocker.patch.object( + JobSubmissionClient, "submit_job", return_value="mocked_submission_id" + ) + submission_id = ray_job_client.submit_job(entrypoint={"pip": ["numpy"]}) + + mocked_submit_job.assert_called_once_with( + entrypoint={"pip": ["numpy"]}, + job_id=None, + runtime_env=None, + metadata=None, + submission_id=None, + entrypoint_num_cpus=None, + entrypoint_num_gpus=None, + entrypoint_memory=None, + entrypoint_resources=None, + ) + + assert submission_id == "mocked_submission_id" + + +def test_rjc_delete_job(ray_job_client, mocker): + # Case return True + mocked_delete_job_True = mocker.patch.object( + JobSubmissionClient, "delete_job", return_value=True + ) + result = ray_job_client.delete_job(job_id="mocked_job_id") + + mocked_delete_job_True.assert_called_once_with(job_id="mocked_job_id") + assert result == (True, "Successfully deleted Job mocked_job_id") + + # Case return False + mocked_delete_job_False = mocker.patch.object( + JobSubmissionClient, "delete_job", return_value=(False) + ) + result = ray_job_client.delete_job(job_id="mocked_job_id") + + mocked_delete_job_False.assert_called_once_with(job_id="mocked_job_id") + assert result == (False, "Failed to delete Job mocked_job_id") + + +def test_rjc_stop_job(ray_job_client, mocker): + # Case return True + mocked_stop_job_True = mocker.patch.object( + JobSubmissionClient, "stop_job", return_value=(True) + ) + result = ray_job_client.stop_job(job_id="mocked_job_id") + + mocked_stop_job_True.assert_called_once_with(job_id="mocked_job_id") + assert result == (True, "Successfully stopped Job mocked_job_id") + + # Case return False + mocked_stop_job_False = mocker.patch.object( + JobSubmissionClient, "stop_job", return_value=(False) + ) + result = ray_job_client.stop_job(job_id="mocked_job_id") + + mocked_stop_job_False.assert_called_once_with(job_id="mocked_job_id") + assert result == ( + False, + "Failed to stop Job, mocked_job_id could have already completed.", + ) + + +def test_rjc_address(ray_job_client, mocker): + mocked_rjc_address = mocker.patch.object( + JobSubmissionClient, + "get_address", + return_value="https://ray-dashboard-unit-test-cluster-ns.apps.cluster.awsroute.org", + ) + address = ray_job_client.get_address() + + mocked_rjc_address.assert_called_once() + assert ( + address + == "https://ray-dashboard-unit-test-cluster-ns.apps.cluster.awsroute.org" + ) + + +def test_rjc_get_job_logs(ray_job_client, mocker): + mocked_rjc_get_job_logs = mocker.patch.object( + JobSubmissionClient, "get_job_logs", return_value="Logs" + ) + logs = ray_job_client.get_job_logs(job_id="mocked_job_id") + + mocked_rjc_get_job_logs.assert_called_once_with(job_id="mocked_job_id") + assert logs == "Logs" + + +def test_rjc_get_job_info(ray_job_client, mocker): + job_details_example = "JobDetails(type=, job_id=None, submission_id='mocked_submission_id', driver_info=None, status=, entrypoint='python test.py', message='Job has not started yet. It may be waiting for the runtime environment to be set up.', error_type=None, start_time=1701271760641, end_time=None, metadata={}, runtime_env={'working_dir': 'gcs://_ray_pkg_67de6f0e60d43b19.zip', 'pip': {'packages': ['numpy'], 'pip_check': False}, '_ray_commit': 'b4bba4717f5ba04ee25580fe8f88eed63ef0c5dc'}, driver_agent_http_address=None, driver_node_id=None)" + mocked_rjc_get_job_info = mocker.patch.object( + JobSubmissionClient, "get_job_info", return_value=job_details_example + ) + job_details = ray_job_client.get_job_info(job_id="mocked_job_id") + + mocked_rjc_get_job_info.assert_called_once_with(job_id="mocked_job_id") + assert job_details == job_details_example + + +def test_rjc_get_job_status(ray_job_client, mocker): + job_status_example = "" + mocked_rjc_get_job_status = mocker.patch.object( + JobSubmissionClient, "get_job_status", return_value=job_status_example + ) + job_status = ray_job_client.get_job_status(job_id="mocked_job_id") + + mocked_rjc_get_job_status.assert_called_once_with(job_id="mocked_job_id") + assert job_status == job_status_example + + +def test_rjc_tail_job_logs(ray_job_client, mocker): + logs_example = [ + "Job started...", + "Processing input data...", + "Finalizing results...", + "Job completed successfully.", + ] + mocked_rjc_tail_job_logs = mocker.patch.object( + JobSubmissionClient, "tail_job_logs", return_value=logs_example + ) + job_tail_job_logs = ray_job_client.tail_job_logs(job_id="mocked_job_id") + + mocked_rjc_tail_job_logs.assert_called_once_with(job_id="mocked_job_id") + assert job_tail_job_logs == logs_example + + +def test_rjc_list_jobs(ray_job_client, mocker): + requirements_path = "tests/e2e/mnist_pip_requirements.txt" + pytorch_lightning = get_package_and_version("pytorch_lightning", requirements_path) + torchmetrics = get_package_and_version("torchmetrics", requirements_path) + torchvision = get_package_and_version("torchvision", requirements_path) + jobs_list = [ + f"JobDetails(type=, job_id=None, submission_id='raysubmit_4k2NYS1YbRXYPZCM', driver_info=None, status=, entrypoint='python mnist.py', message='Job finished successfully.', error_type=None, start_time=1701352132585, end_time=1701352192002, metadata={{}}, runtime_env={{'working_dir': 'gcs://_ray_pkg_6200b93a110e8033.zip', 'pip': {{'packages': ['{pytorch_lightning}', 'ray_lightning', '{torchmetrics}', '{torchvision}'], 'pip_check': False}}, '_ray_commit': 'b4bba4717f5ba04ee25580fe8f88eed63ef0c5dc'}}, driver_agent_http_address='http://10.131.0.18:52365', driver_node_id='9fb515995f5fb13ad4db239ceea378333bebf0a2d45b6aa09d02e691')", + f"JobDetails(type=, job_id=None, submission_id='raysubmit_iRuwU8vdkbUZZGvT', driver_info=None, status=, entrypoint='python mnist.py', message='Job was intentionally stopped.', error_type=None, start_time=1701353096163, end_time=1701353097733, metadata={{}}, runtime_env={{'working_dir': 'gcs://_ray_pkg_6200b93a110e8033.zip', 'pip': {{'packages': ['{pytorch_lightning}', 'ray_lightning', '{torchmetrics}', '{torchvision}'], 'pip_check': False}}, '_ray_commit': 'b4bba4717f5ba04ee25580fe8f88eed63ef0c5dc'}}, driver_agent_http_address='http://10.131.0.18:52365', driver_node_id='9fb515995f5fb13ad4db239ceea378333bebf0a2d45b6aa09d02e691')", + ] + mocked_rjc_list_jobs = mocker.patch.object( + JobSubmissionClient, "list_jobs", return_value=jobs_list + ) + job_list_jobs = ray_job_client.list_jobs() + + mocked_rjc_list_jobs.assert_called_once() + assert job_list_jobs == jobs_list diff --git a/src/codeflare_sdk/ray/cluster/__init__.py b/src/codeflare_sdk/ray/cluster/__init__.py new file mode 100644 index 00000000..bf32459b --- /dev/null +++ b/src/codeflare_sdk/ray/cluster/__init__.py @@ -0,0 +1,13 @@ +from .status import ( + RayClusterStatus, + CodeFlareClusterStatus, + RayCluster, +) + +from .cluster import ( + Cluster, + ClusterConfiguration, + get_cluster, + list_all_queued, + list_all_clusters, +) diff --git a/src/codeflare_sdk/ray/cluster/build_ray_cluster.py b/src/codeflare_sdk/ray/cluster/build_ray_cluster.py new file mode 100644 index 00000000..e8b68919 --- /dev/null +++ b/src/codeflare_sdk/ray/cluster/build_ray_cluster.py @@ -0,0 +1,618 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + This sub-module exists primarily to be used internally by the Cluster object + (in the cluster sub-module) for RayCluster/AppWrapper generation. +""" +from typing import List, Union, Tuple, Dict +from ...common import _kube_api_error_handling +from ...common.kubernetes_cluster import get_api_client, config_check +from kubernetes.client.exceptions import ApiException +from ...common.utils.constants import RAY_VERSION +from ...common.utils.utils import get_ray_image_for_python_version +import codeflare_sdk +import os + +from kubernetes import client +from kubernetes.client import ( + V1ObjectMeta, + V1KeyToPath, + V1ConfigMapVolumeSource, + V1Volume, + V1VolumeMount, + V1ResourceRequirements, + V1Container, + V1ContainerPort, + V1Lifecycle, + V1ExecAction, + V1LifecycleHandler, + V1EnvVar, + V1PodTemplateSpec, + V1PodSpec, + V1LocalObjectReference, + V1Toleration, +) + +import yaml +import uuid +import sys +import warnings +import json + +from codeflare_sdk.common.utils import constants + +FORBIDDEN_CUSTOM_RESOURCE_TYPES = ["GPU", "CPU", "memory"] +VOLUME_MOUNTS = [ + V1VolumeMount( + mount_path="/etc/pki/tls/certs/odh-trusted-ca-bundle.crt", + name="odh-trusted-ca-cert", + sub_path="odh-trusted-ca-bundle.crt", + ), + V1VolumeMount( + mount_path="/etc/ssl/certs/odh-trusted-ca-bundle.crt", + name="odh-trusted-ca-cert", + sub_path="odh-trusted-ca-bundle.crt", + ), + V1VolumeMount( + mount_path="/etc/pki/tls/certs/odh-ca-bundle.crt", + name="odh-ca-cert", + sub_path="odh-ca-bundle.crt", + ), + V1VolumeMount( + mount_path="/etc/ssl/certs/odh-ca-bundle.crt", + name="odh-ca-cert", + sub_path="odh-ca-bundle.crt", + ), +] + +VOLUMES = [ + V1Volume( + name="odh-trusted-ca-cert", + config_map=V1ConfigMapVolumeSource( + name="odh-trusted-ca-bundle", + items=[V1KeyToPath(key="ca-bundle.crt", path="odh-trusted-ca-bundle.crt")], + optional=True, + ), + ), + V1Volume( + name="odh-ca-cert", + config_map=V1ConfigMapVolumeSource( + name="odh-trusted-ca-bundle", + items=[V1KeyToPath(key="odh-ca-bundle.crt", path="odh-ca-bundle.crt")], + optional=True, + ), + ), +] + +# Use centralized mapping from constants (so that we only have to update constants.py) +SUPPORTED_PYTHON_VERSIONS = constants.SUPPORTED_PYTHON_VERSIONS + + +# RayCluster/AppWrapper builder function +def build_ray_cluster(cluster: "codeflare_sdk.ray.cluster.Cluster"): + """build_ray_cluster is used for creating a Ray Cluster/AppWrapper dict + + The resource is a dict template which uses Kubernetes Objects for creating metadata, resource requests, + specs and containers. The result is sanitised and returned either as a dict or written as a yaml file. + """ + + # GPU related variables + head_gpu_count, worker_gpu_count = head_worker_gpu_count_from_cluster(cluster) + head_resources, worker_resources = head_worker_extended_resources_from_cluster( + cluster + ) + head_resources = json.dumps(head_resources).replace('"', '\\"') + head_resources = f'"{head_resources}"' + worker_resources = json.dumps(worker_resources).replace('"', '\\"') + worker_resources = f'"{worker_resources}"' + + # Create the Ray Cluster using the V1RayCluster Object + resource = { + "apiVersion": "ray.io/v1", + "kind": "RayCluster", + "metadata": get_metadata(cluster), + "spec": { + "rayVersion": RAY_VERSION, + "enableInTreeAutoscaling": False, + "autoscalerOptions": { + "upscalingMode": "Default", + "idleTimeoutSeconds": 60, + "resources": get_resources("500m", "500m", "512Mi", "512Mi"), + }, + "headGroupSpec": { + "serviceType": "ClusterIP", + "enableIngress": False, + "rayStartParams": { + "dashboard-host": "0.0.0.0", + "block": "true", + "num-gpus": str(head_gpu_count), + "resources": head_resources, + }, + "template": V1PodTemplateSpec( + metadata=V1ObjectMeta(cluster.config.annotations) + if cluster.config.annotations + else None, + spec=get_pod_spec( + cluster, + [get_head_container_spec(cluster)], + cluster.config.head_tolerations, + ), + ), + }, + "workerGroupSpecs": [ + { + "replicas": cluster.config.num_workers, + "minReplicas": cluster.config.num_workers, + "maxReplicas": cluster.config.num_workers, + "groupName": f"small-group-{cluster.config.name}", + "rayStartParams": { + "block": "true", + "num-gpus": str(worker_gpu_count), + "resources": worker_resources, + }, + "template": V1PodTemplateSpec( + metadata=V1ObjectMeta(cluster.config.annotations) + if cluster.config.annotations + else None, + spec=get_pod_spec( + cluster, + [get_worker_container_spec(cluster)], + cluster.config.worker_tolerations, + ), + ), + } + ], + }, + } + + if cluster.config.enable_gcs_ft: + if not cluster.config.redis_address: + raise ValueError( + "redis_address must be provided when enable_gcs_ft is True" + ) + + gcs_ft_options = {"redisAddress": cluster.config.redis_address} + + if cluster.config.external_storage_namespace: + gcs_ft_options[ + "externalStorageNamespace" + ] = cluster.config.external_storage_namespace + + if cluster.config.redis_password_secret: + gcs_ft_options["redisPassword"] = { + "valueFrom": { + "secretKeyRef": { + "name": cluster.config.redis_password_secret["name"], + "key": cluster.config.redis_password_secret["key"], + } + } + } + + resource["spec"]["gcsFaultToleranceOptions"] = gcs_ft_options + + config_check() + k8s_client = get_api_client() or client.ApiClient() + + if cluster.config.appwrapper: + # Wrap the Ray Cluster in an AppWrapper + appwrapper_name, _ = gen_names(cluster.config.name) + resource = wrap_cluster(cluster, appwrapper_name, resource) + + resource = k8s_client.sanitize_for_serialization(resource) + + # write_to_file functionality + if cluster.config.write_to_file: + return write_to_file(cluster, resource) # Writes the file and returns its name + else: + print(f"Yaml resources loaded for {cluster.config.name}") + return resource # Returns the Resource as a dict + + +# Metadata related functions +def get_metadata(cluster: "codeflare_sdk.ray.cluster.Cluster"): + """ + The get_metadata() function builds and returns a V1ObjectMeta Object using cluster configuration parameters + """ + object_meta = V1ObjectMeta( + name=cluster.config.name, + namespace=cluster.config.namespace, + labels=get_labels(cluster), + ) + + # Get the NB annotation if it exists - could be useful in future for a "annotations" parameter. + annotations = with_nb_annotations(cluster.config.annotations) + if annotations != {}: + object_meta.annotations = annotations # As annotations are not a guarantee they are appended to the metadata after creation. + + return object_meta + + +def get_labels(cluster: "codeflare_sdk.ray.cluster.Cluster"): + """ + The get_labels() function generates a dict "labels" which includes the base label, local queue label and user defined labels + """ + labels = { + "controller-tools.k8s.io": "1.0", + } + if cluster.config.labels != {}: + labels.update(cluster.config.labels) + + if cluster.config.appwrapper is False: + add_queue_label(cluster, labels) + + return labels + + +def with_nb_annotations(annotations: dict): + """ + The with_nb_annotations() function generates the annotation for NB Prefix if the SDK is running in a notebook and appends any user set annotations + """ + + # Notebook annotation + nb_prefix = os.environ.get("NB_PREFIX") + if nb_prefix: + annotations.update({"app.kubernetes.io/managed-by": nb_prefix}) + + return annotations + + +# Head/Worker container related functions +def update_image(image) -> str: + """ + The update_image() function automatically sets the image config parameter to a preset image based on Python version if not specified. + This now points to the centralized function in utils.py. + """ + if not image: + # Pull the image based on the matching Python version (or output a warning if not supported) + image = get_ray_image_for_python_version(warn_on_unsupported=True) + return image + + +def get_pod_spec( + cluster: "codeflare_sdk.ray.cluster.Cluster", + containers: List, + tolerations: List[V1Toleration], +) -> V1PodSpec: + """ + The get_pod_spec() function generates a V1PodSpec for the head/worker containers + """ + + pod_spec = V1PodSpec( + containers=containers, + volumes=generate_custom_storage(cluster.config.volumes, VOLUMES), + tolerations=tolerations or None, + ) + + if cluster.config.image_pull_secrets != []: + pod_spec.image_pull_secrets = generate_image_pull_secrets(cluster) + + return pod_spec + + +def generate_image_pull_secrets(cluster: "codeflare_sdk.ray.cluster.Cluster"): + """ + The generate_image_pull_secrets() methods generates a list of V1LocalObjectReference including each of the specified image pull secrets + """ + pull_secrets = [] + for pull_secret in cluster.config.image_pull_secrets: + pull_secrets.append(V1LocalObjectReference(name=pull_secret)) + + return pull_secrets + + +def get_head_container_spec( + cluster: "codeflare_sdk.ray.cluster.Cluster", +): + """ + The get_head_container_spec() function builds and returns a V1Container object including user defined resource requests/limits + """ + head_container = V1Container( + name="ray-head", + image=update_image(cluster.config.image), + image_pull_policy="Always", + ports=[ + V1ContainerPort(name="gcs", container_port=6379), + V1ContainerPort(name="dashboard", container_port=8265), + V1ContainerPort(name="client", container_port=10001), + ], + lifecycle=V1Lifecycle( + pre_stop=V1LifecycleHandler( + _exec=V1ExecAction(["/bin/sh", "-c", "ray stop"]) + ) + ), + resources=get_resources( + cluster.config.head_cpu_requests, + cluster.config.head_cpu_limits, + cluster.config.head_memory_requests, + cluster.config.head_memory_limits, + cluster.config.head_extended_resource_requests, + ), + volume_mounts=generate_custom_storage( + cluster.config.volume_mounts, VOLUME_MOUNTS + ), + ) + if cluster.config.envs != {}: + head_container.env = generate_env_vars(cluster) + + return head_container + + +def generate_env_vars(cluster: "codeflare_sdk.ray.cluster.Cluster"): + """ + The generate_env_vars() builds and returns a V1EnvVar object which is populated by user specified environment variables + """ + envs = [] + for key, value in cluster.config.envs.items(): + env_var = V1EnvVar(name=key, value=value) + envs.append(env_var) + + return envs + + +def get_worker_container_spec( + cluster: "codeflare_sdk.ray.cluster.Cluster", +): + """ + The get_worker_container_spec() function builds and returns a V1Container object including user defined resource requests/limits + """ + worker_container = V1Container( + name="machine-learning", + image=update_image(cluster.config.image), + image_pull_policy="Always", + lifecycle=V1Lifecycle( + pre_stop=V1LifecycleHandler( + _exec=V1ExecAction(["/bin/sh", "-c", "ray stop"]) + ) + ), + resources=get_resources( + cluster.config.worker_cpu_requests, + cluster.config.worker_cpu_limits, + cluster.config.worker_memory_requests, + cluster.config.worker_memory_limits, + cluster.config.worker_extended_resource_requests, + ), + volume_mounts=generate_custom_storage( + cluster.config.volume_mounts, VOLUME_MOUNTS + ), + ) + + if cluster.config.envs != {}: + worker_container.env = generate_env_vars(cluster) + + return worker_container + + +def get_resources( + cpu_requests: Union[int, str], + cpu_limits: Union[int, str], + memory_requests: Union[int, str], + memory_limits: Union[int, str], + custom_extended_resource_requests: Dict[str, int] = None, +): + """ + The get_resources() function generates a V1ResourceRequirements object for cpu/memory request/limits and GPU resources + """ + resource_requirements = V1ResourceRequirements( + requests={"cpu": cpu_requests, "memory": memory_requests}, + limits={"cpu": cpu_limits, "memory": memory_limits}, + ) + + # Append the resource/limit requests with custom extended resources + if custom_extended_resource_requests is not None: + for k in custom_extended_resource_requests.keys(): + resource_requirements.limits[k] = custom_extended_resource_requests[k] + resource_requirements.requests[k] = custom_extended_resource_requests[k] + + return resource_requirements + + +# GPU related functions +def head_worker_gpu_count_from_cluster( + cluster: "codeflare_sdk.ray.cluster.Cluster", +) -> Tuple[int, int]: + """ + The head_worker_gpu_count_from_cluster() function returns the total number of requested GPUs for the head and worker separately + """ + head_gpus = 0 + worker_gpus = 0 + for k in cluster.config.head_extended_resource_requests.keys(): + resource_type = cluster.config.extended_resource_mapping[k] + if resource_type == "GPU": + head_gpus += int(cluster.config.head_extended_resource_requests[k]) + for k in cluster.config.worker_extended_resource_requests.keys(): + resource_type = cluster.config.extended_resource_mapping[k] + if resource_type == "GPU": + worker_gpus += int(cluster.config.worker_extended_resource_requests[k]) + + return head_gpus, worker_gpus + + +def head_worker_extended_resources_from_cluster( + cluster: "codeflare_sdk.ray.cluster.Cluster", +) -> Tuple[dict, dict]: + """ + The head_worker_extended_resources_from_cluster() function returns 2 dicts for head/worker respectively populated by the GPU type requested by the user + """ + head_worker_extended_resources = {}, {} + for k in cluster.config.head_extended_resource_requests.keys(): + resource_type = cluster.config.extended_resource_mapping[k] + if resource_type in FORBIDDEN_CUSTOM_RESOURCE_TYPES: + continue + head_worker_extended_resources[0][ + resource_type + ] = cluster.config.head_extended_resource_requests[ + k + ] + head_worker_extended_resources[ + 0 + ].get( + resource_type, 0 + ) + + for k in cluster.config.worker_extended_resource_requests.keys(): + resource_type = cluster.config.extended_resource_mapping[k] + if resource_type in FORBIDDEN_CUSTOM_RESOURCE_TYPES: + continue + head_worker_extended_resources[1][ + resource_type + ] = cluster.config.worker_extended_resource_requests[ + k + ] + head_worker_extended_resources[ + 1 + ].get( + resource_type, 0 + ) + return head_worker_extended_resources + + +# Local Queue related functions +def add_queue_label(cluster: "codeflare_sdk.ray.cluster.Cluster", labels: dict): + """ + The add_queue_label() function updates the given base labels with the local queue label if Kueue exists on the Cluster + """ + lq_name = cluster.config.local_queue or get_default_local_queue(cluster, labels) + if lq_name == None: + return + elif not local_queue_exists(cluster): + # ValueError removed to pass validation to validating admission policy + print( + "local_queue provided does not exist or is not in this namespace. Please provide the correct local_queue name in Cluster Configuration" + ) + return + labels.update({"kueue.x-k8s.io/queue-name": lq_name}) + + +def local_queue_exists(cluster: "codeflare_sdk.ray.cluster.Cluster"): + """ + The local_queue_exists() checks if the user inputted local_queue exists in the given namespace and returns a bool + """ + # get all local queues in the namespace + try: + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + local_queues = api_instance.list_namespaced_custom_object( + group="kueue.x-k8s.io", + version="v1beta1", + namespace=cluster.config.namespace, + plural="localqueues", + ) + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + # check if local queue with the name provided in cluster config exists + for lq in local_queues["items"]: + if lq["metadata"]["name"] == cluster.config.local_queue: + return True + return False + + +def get_default_local_queue(cluster: "codeflare_sdk.ray.cluster.Cluster", labels: dict): + """ + The get_default_local_queue() function attempts to find a local queue with the default label == true, if that is the case the labels variable is updated with that local queue + """ + try: + # Try to get the default local queue if it exists and append the label list + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + local_queues = api_instance.list_namespaced_custom_object( + group="kueue.x-k8s.io", + version="v1beta1", + namespace=cluster.config.namespace, + plural="localqueues", + ) + except ApiException as e: # pragma: no cover + if e.status == 404 or e.status == 403: + return + else: + return _kube_api_error_handling(e) + + for lq in local_queues["items"]: + if ( + "annotations" in lq["metadata"] + and "kueue.x-k8s.io/default-queue" in lq["metadata"]["annotations"] + and lq["metadata"]["annotations"]["kueue.x-k8s.io/default-queue"].lower() + == "true" + ): + labels.update({"kueue.x-k8s.io/queue-name": lq["metadata"]["name"]}) + + +# AppWrapper related functions +def wrap_cluster( + cluster: "codeflare_sdk.ray.cluster.Cluster", + appwrapper_name: str, + ray_cluster_yaml: dict, +): + """ + Wraps the pre-built Ray Cluster dict in an AppWrapper + """ + wrapping = { + "apiVersion": "workload.codeflare.dev/v1beta2", + "kind": "AppWrapper", + "metadata": {"name": appwrapper_name, "namespace": cluster.config.namespace}, + "spec": {"components": [{"template": ray_cluster_yaml}]}, + } + # Add local queue label if it is necessary + labels = {} + add_queue_label(cluster, labels) + if labels != {}: + wrapping["metadata"]["labels"] = labels + + return wrapping + + +# Etc. +def generate_custom_storage(provided_storage: list, default_storage: list): + """ + The generate_custom_storage function updates the volumes/volume mounts configs with the default volumes/volume mounts. + """ + storage_list = provided_storage.copy() + + if storage_list == []: + storage_list = default_storage + else: + # We append the list of volumes/volume mounts with the defaults and return the full list + for storage in default_storage: + storage_list.append(storage) + + return storage_list + + +def write_to_file(cluster: "codeflare_sdk.ray.cluster.Cluster", resource: dict): + """ + The write_to_file function writes the built Ray Cluster/AppWrapper dict as a yaml file in the .codeflare folder + """ + directory_path = os.path.expanduser("~/.codeflare/resources/") + output_file_name = os.path.join(directory_path, cluster.config.name + ".yaml") + + directory_path = os.path.dirname(output_file_name) + if not os.path.exists(directory_path): + os.makedirs(directory_path) + + with open(output_file_name, "w") as outfile: + yaml.dump(resource, outfile, default_flow_style=False) + + print(f"Written to: {output_file_name}") + return output_file_name + + +def gen_names(name): + """ + Generates a unique name for the appwrapper and Ray Cluster + """ + if not name: + gen_id = str(uuid.uuid4()) + appwrapper_name = "appwrapper-" + gen_id + cluster_name = "cluster-" + gen_id + return appwrapper_name, cluster_name + else: + return name, name diff --git a/src/codeflare_sdk/ray/cluster/cluster.py b/src/codeflare_sdk/ray/cluster/cluster.py new file mode 100644 index 00000000..4eaa2000 --- /dev/null +++ b/src/codeflare_sdk/ray/cluster/cluster.py @@ -0,0 +1,1133 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The cluster sub-module contains the definition of the Cluster object, which represents +the resources requested by the user. It also contains functions for checking the +cluster setup queue, a list of all existing clusters, and the user's working namespace. +""" + +from time import sleep +from typing import List, Optional, Tuple, Dict + +from ray.job_submission import JobSubmissionClient + +from ...common.kubernetes_cluster.auth import ( + config_check, + get_api_client, +) +from . import pretty_print +from .build_ray_cluster import build_ray_cluster, head_worker_gpu_count_from_cluster +from .build_ray_cluster import write_to_file as write_cluster_to_file +from ...common import _kube_api_error_handling + +from .config import ClusterConfiguration +from .status import ( + CodeFlareClusterStatus, + RayCluster, + RayClusterStatus, +) +from ..appwrapper import ( + AppWrapper, + AppWrapperStatus, +) +from ...common.widgets.widgets import ( + cluster_apply_down_buttons, + is_notebook, +) +from kubernetes import client +import yaml +import os +import requests + +from kubernetes import config +from kubernetes.dynamic import DynamicClient +from kubernetes import client as k8s_client +from kubernetes.client.rest import ApiException + +from kubernetes.client.rest import ApiException +import warnings + +CF_SDK_FIELD_MANAGER = "codeflare-sdk" + + +class Cluster: + """ + An object for requesting, bringing up, and taking down resources. + Can also be used for seeing the resource cluster status and details. + + Note that currently, the underlying implementation is a Ray cluster. + """ + + def __init__(self, config: ClusterConfiguration): + """ + Create the resource cluster object by passing in a ClusterConfiguration + (defined in the config sub-module). An AppWrapper will then be generated + based off of the configured resources to represent the desired cluster + request. + """ + self.config = config + self._job_submission_client = None + if self.config is None: + warnings.warn( + "Please provide a ClusterConfiguration to initialise the Cluster object" + ) + return + else: + self.resource_yaml = self.create_resource() + + if is_notebook(): + cluster_apply_down_buttons(self) + + def get_dynamic_client(self): # pragma: no cover + return DynamicClient(get_api_client()) + + def config_check(self): + return config_check() + + @property + def _client_headers(self): + k8_client = get_api_client() + return { + "Authorization": k8_client.configuration.get_api_key_with_prefix( + "authorization" + ) + } + + @property + def _client_verify_tls(self): + return _is_openshift_cluster and self.config.verify_tls + + @property + def job_client(self): + k8client = get_api_client() + if self._job_submission_client: + return self._job_submission_client + if _is_openshift_cluster(): + self._job_submission_client = JobSubmissionClient( + self.cluster_dashboard_uri(), + headers=self._client_headers, + verify=self._client_verify_tls, + ) + else: + self._job_submission_client = JobSubmissionClient( + self.cluster_dashboard_uri() + ) + return self._job_submission_client + + def create_resource(self): + """ + Called upon cluster object creation, creates an AppWrapper yaml based on + the specifications of the ClusterConfiguration. + """ + if self.config.namespace is None: + self.config.namespace = get_current_namespace() + if self.config.namespace is None: + print("Please specify with namespace=") + elif type(self.config.namespace) is not str: + raise TypeError( + f"Namespace {self.config.namespace} is of type {type(self.config.namespace)}. Check your Kubernetes Authentication." + ) + return build_ray_cluster(self) + + # creates a new cluster with the provided or default spec + def up(self): + """ + Applies the Cluster yaml, pushing the resource request onto + the Kueue localqueue. + """ + print( + "WARNING: The up() function is planned for deprecation in favor of apply()." + ) + # check if RayCluster CustomResourceDefinition exists if not throw RuntimeError + self._throw_for_no_raycluster() + namespace = self.config.namespace + + try: + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + if self.config.appwrapper: + if self.config.write_to_file: + with open(self.resource_yaml) as f: + aw = yaml.load(f, Loader=yaml.FullLoader) + api_instance.create_namespaced_custom_object( + group="workload.codeflare.dev", + version="v1beta2", + namespace=namespace, + plural="appwrappers", + body=aw, + ) + else: + api_instance.create_namespaced_custom_object( + group="workload.codeflare.dev", + version="v1beta2", + namespace=namespace, + plural="appwrappers", + body=self.resource_yaml, + ) + print(f"AppWrapper: '{self.config.name}' has successfully been created") + else: + self._component_resources_up(namespace, api_instance) + print( + f"Ray Cluster: '{self.config.name}' has successfully been created" + ) + except Exception as e: # pragma: no cover + if e.status == 422: + print( + "WARNING: RayCluster creation rejected due to invalid Kueue configuration. Please contact your administrator." + ) + else: + print( + "WARNING: Failed to create RayCluster due to unexpected error. Please contact your administrator." + ) + return _kube_api_error_handling(e) + + # Applies a new cluster with the provided or default spec + def apply(self, force=False): + """ + Applies the Cluster yaml using server-side apply. + If 'force' is set to True, conflicts will be forced. + """ + # check if RayCluster CustomResourceDefinition exists if not throw RuntimeError + self._throw_for_no_raycluster() + namespace = self.config.namespace + name = self.config.name + try: + self.config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + crds = self.get_dynamic_client().resources + if self.config.appwrapper: + api_version = "workload.codeflare.dev/v1beta2" + api_instance = crds.get(api_version=api_version, kind="AppWrapper") + # defaulting body to resource_yaml + body = self.resource_yaml + if self.config.write_to_file: + # if write_to_file is True, load the file from AppWrapper yaml and update body + with open(self.resource_yaml) as f: + aw = yaml.load(f, Loader=yaml.FullLoader) + body = aw + api_instance.server_side_apply( + field_manager=CF_SDK_FIELD_MANAGER, + group="workload.codeflare.dev", + version="v1beta2", + namespace=namespace, + plural="appwrappers", + body=body, + force_conflicts=force, + ) + print( + f"AppWrapper: '{name}' configuration has successfully been applied. For optimal resource management, you should delete this Ray Cluster when no longer in use." + ) + else: + api_version = "ray.io/v1" + api_instance = crds.get(api_version=api_version, kind="RayCluster") + self._component_resources_apply( + namespace=namespace, api_instance=api_instance + ) + print( + f"Ray Cluster: '{name}' has successfully been applied. For optimal resource management, you should delete this Ray Cluster when no longer in use." + ) + except AttributeError as e: + raise RuntimeError(f"Failed to initialize DynamicClient: {e}") + except Exception as e: # pragma: no cover + if e.status == 422: + print( + "WARNING: RayCluster creation rejected due to invalid Kueue configuration. Please contact your administrator." + ) + else: + print( + "WARNING: Failed to create RayCluster due to unexpected error. Please contact your administrator." + ) + return _kube_api_error_handling(e) + + def _throw_for_no_raycluster(self): + api_instance = client.CustomObjectsApi(get_api_client()) + try: + api_instance.list_namespaced_custom_object( + group="ray.io", + version="v1", + namespace=self.config.namespace, + plural="rayclusters", + ) + except ApiException as e: + if e.status == 404: + raise RuntimeError( + "RayCluster CustomResourceDefinition unavailable contact your administrator." + ) + else: + raise RuntimeError( + "Failed to get RayCluster CustomResourceDefinition: " + str(e) + ) + + def down(self): + """ + Deletes the AppWrapper yaml, scaling-down and deleting all resources + associated with the cluster. + """ + namespace = self.config.namespace + resource_name = self.config.name + self._throw_for_no_raycluster() + try: + self.config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + if self.config.appwrapper: + api_instance.delete_namespaced_custom_object( + group="workload.codeflare.dev", + version="v1beta2", + namespace=namespace, + plural="appwrappers", + name=resource_name, + ) + print(f"AppWrapper: '{resource_name}' has successfully been deleted") + else: + _delete_resources(resource_name, namespace, api_instance) + print( + f"Ray Cluster: '{self.config.name}' has successfully been deleted" + ) + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + + def status( + self, print_to_console: bool = True + ) -> Tuple[CodeFlareClusterStatus, bool]: + """ + Returns the requested cluster's status, as well as whether or not + it is ready for use. + """ + ready = False + status = CodeFlareClusterStatus.UNKNOWN + if self.config.appwrapper: + # check the app wrapper status + appwrapper = _app_wrapper_status(self.config.name, self.config.namespace) + if appwrapper: + if appwrapper.status in [ + AppWrapperStatus.RESUMING, + AppWrapperStatus.RESETTING, + ]: + ready = False + status = CodeFlareClusterStatus.STARTING + elif appwrapper.status in [ + AppWrapperStatus.FAILED, + ]: + ready = False + status = CodeFlareClusterStatus.FAILED # should deleted be separate + return status, ready # exit early, no need to check ray status + elif appwrapper.status in [ + AppWrapperStatus.SUSPENDED, + AppWrapperStatus.SUSPENDING, + ]: + ready = False + if appwrapper.status == AppWrapperStatus.SUSPENDED: + status = CodeFlareClusterStatus.QUEUED + else: + status = CodeFlareClusterStatus.QUEUEING + if print_to_console: + pretty_print.print_app_wrappers_status([appwrapper]) + return ( + status, + ready, + ) # no need to check the ray status since still in queue + + # check the ray cluster status + cluster = _ray_cluster_status(self.config.name, self.config.namespace) + if cluster: + if cluster.status == RayClusterStatus.SUSPENDED: + ready = False + status = CodeFlareClusterStatus.SUSPENDED + if cluster.status == RayClusterStatus.UNKNOWN: + ready = False + status = CodeFlareClusterStatus.STARTING + if cluster.status == RayClusterStatus.READY: + ready = True + status = CodeFlareClusterStatus.READY + elif cluster.status in [ + RayClusterStatus.UNHEALTHY, + RayClusterStatus.FAILED, + ]: + ready = False + status = CodeFlareClusterStatus.FAILED + + if print_to_console: + # overriding the number of gpus with requested + _, cluster.worker_gpu = head_worker_gpu_count_from_cluster(self) + pretty_print.print_cluster_status(cluster) + elif print_to_console: + if status == CodeFlareClusterStatus.UNKNOWN: + pretty_print.print_no_resources_found() + else: + pretty_print.print_app_wrappers_status([appwrapper], starting=True) + + return status, ready + + def is_dashboard_ready(self) -> bool: + """ + Checks if the cluster's dashboard is ready and accessible. + + This method attempts to send a GET request to the cluster dashboard URI. + If the request is successful (HTTP status code 200), it returns True. + If an SSL error occurs, it returns False, indicating the dashboard is not ready. + + Returns: + bool: + True if the dashboard is ready, False otherwise. + """ + try: + response = requests.get( + self.cluster_dashboard_uri(), + headers=self._client_headers, + timeout=5, + verify=self._client_verify_tls, + ) + except requests.exceptions.SSLError: # pragma no cover + # SSL exception occurs when oauth ingress has been created but cluster is not up + return False + if response.status_code == 200: + return True + else: + return False + + def wait_ready(self, timeout: Optional[int] = None, dashboard_check: bool = True): + """ + Waits for the requested cluster to be ready, up to an optional timeout. + + This method checks the status of the cluster every five seconds until it is + ready or the timeout is reached. If dashboard_check is enabled, it will also + check for the readiness of the dashboard. + + Args: + timeout (Optional[int]): + The maximum time to wait for the cluster to be ready in seconds. If None, waits indefinitely. + dashboard_check (bool): + Flag to determine if the dashboard readiness should + be checked. Defaults to True. + + Raises: + TimeoutError: + If the timeout is reached before the cluster or dashboard is ready. + """ + print("Waiting for requested resources to be set up...") + time = 0 + while True: + if timeout and time >= timeout: + raise TimeoutError( + f"wait() timed out after waiting {timeout}s for cluster to be ready" + ) + status, ready = self.status(print_to_console=False) + if status == CodeFlareClusterStatus.UNKNOWN: + print( + "WARNING: Current cluster status is unknown, have you run cluster.up yet?" + ) + if ready: + break + sleep(5) + time += 5 + print("Requested cluster is up and running!") + + while dashboard_check: + if timeout and time >= timeout: + raise TimeoutError( + f"wait() timed out after waiting {timeout}s for dashboard to be ready" + ) + if self.is_dashboard_ready(): + print("Dashboard is ready!") + break + sleep(5) + time += 5 + + def details(self, print_to_console: bool = True) -> RayCluster: + """ + Retrieves details about the Ray Cluster. + + This method returns a copy of the Ray Cluster information and optionally prints + the details to the console. + + Args: + print_to_console (bool): + Flag to determine if the cluster details should be + printed to the console. Defaults to True. + + Returns: + RayCluster: + A copy of the Ray Cluster details. + """ + cluster = _copy_to_ray(self) + if print_to_console: + pretty_print.print_clusters([cluster]) + return cluster + + def cluster_uri(self) -> str: + """ + Returns a string containing the cluster's URI. + """ + return f"ray://{self.config.name}-head-svc.{self.config.namespace}.svc:10001" + + def cluster_dashboard_uri(self) -> str: + """ + Returns a string containing the cluster's dashboard URI. + """ + config_check() + if _is_openshift_cluster(): + try: + api_instance = client.CustomObjectsApi(get_api_client()) + routes = api_instance.list_namespaced_custom_object( + group="route.openshift.io", + version="v1", + namespace=self.config.namespace, + plural="routes", + ) + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + + for route in routes["items"]: + if route["metadata"][ + "name" + ] == f"ray-dashboard-{self.config.name}" or route["metadata"][ + "name" + ].startswith( + f"{self.config.name}-ingress" + ): + protocol = "https" if route["spec"].get("tls") else "http" + return f"{protocol}://{route['spec']['host']}" + else: + try: + api_instance = client.NetworkingV1Api(get_api_client()) + ingresses = api_instance.list_namespaced_ingress(self.config.namespace) + except Exception as e: # pragma no cover + return _kube_api_error_handling(e) + + for ingress in ingresses.items: + annotations = ingress.metadata.annotations + protocol = "http" + if ( + ingress.metadata.name == f"ray-dashboard-{self.config.name}" + or ingress.metadata.name.startswith(f"{self.config.name}-ingress") + ): + if annotations == None: + protocol = "http" + elif "route.openshift.io/termination" in annotations: + protocol = "https" + return f"{protocol}://{ingress.spec.rules[0].host}" + return "Dashboard not available yet, have you run cluster.up()?" + + def list_jobs(self) -> List: + """ + This method accesses the head ray node in your cluster and lists the running jobs. + """ + return self.job_client.list_jobs() + + def job_status(self, job_id: str) -> str: + """ + This method accesses the head ray node in your cluster and returns the job status for the provided job id. + """ + return self.job_client.get_job_status(job_id) + + def job_logs(self, job_id: str) -> str: + """ + This method accesses the head ray node in your cluster and returns the logs for the provided job id. + """ + return self.job_client.get_job_logs(job_id) + + @staticmethod + def _head_worker_extended_resources_from_rc_dict(rc: Dict) -> Tuple[dict, dict]: + head_extended_resources, worker_extended_resources = {}, {} + for resource in rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][ + "containers" + ][0]["resources"]["limits"].keys(): + if resource in ["memory", "cpu"]: + continue + worker_extended_resources[resource] = rc["spec"]["workerGroupSpecs"][0][ + "template" + ]["spec"]["containers"][0]["resources"]["limits"][resource] + + for resource in rc["spec"]["headGroupSpec"]["template"]["spec"]["containers"][ + 0 + ]["resources"]["limits"].keys(): + if resource in ["memory", "cpu"]: + continue + head_extended_resources[resource] = rc["spec"]["headGroupSpec"]["template"][ + "spec" + ]["containers"][0]["resources"]["limits"][resource] + + return head_extended_resources, worker_extended_resources + + def local_client_url(self): + """ + Constructs the URL for the local Ray client. + + Returns: + str: + The Ray client URL based on the ingress domain. + """ + ingress_domain = _get_ingress_domain(self) + return f"ray://{ingress_domain}" + + def _component_resources_up( + self, namespace: str, api_instance: client.CustomObjectsApi + ): + if self.config.write_to_file: + with open(self.resource_yaml) as f: + ray_cluster = yaml.safe_load(f) + _create_resources(ray_cluster, namespace, api_instance) + else: + _create_resources(self.resource_yaml, namespace, api_instance) + + def _component_resources_apply( + self, namespace: str, api_instance: client.CustomObjectsApi + ): + if self.config.write_to_file: + with open(self.resource_yaml) as f: + ray_cluster = yaml.safe_load(f) + _apply_ray_cluster(ray_cluster, namespace, api_instance) + else: + _apply_ray_cluster(self.resource_yaml, namespace, api_instance) + + def _component_resources_down( + self, namespace: str, api_instance: client.CustomObjectsApi + ): + cluster_name = self.config.name + if self.config.write_to_file: + with open(self.resource_yaml) as f: + yamls = yaml.load_all(f, Loader=yaml.FullLoader) + _delete_resources(yamls, namespace, api_instance, cluster_name) + else: + yamls = yaml.safe_load_all(self.resource_yaml) + _delete_resources(yamls, namespace, api_instance, cluster_name) + + +def list_all_clusters(namespace: str, print_to_console: bool = True): + """ + Returns (and prints by default) a list of all clusters in a given namespace. + """ + clusters = _get_ray_clusters(namespace) + if print_to_console: + pretty_print.print_clusters(clusters) + return clusters + + +def list_all_queued( + namespace: str, print_to_console: bool = True, appwrapper: bool = False +): + """ + Returns (and prints by default) a list of all currently queued-up Ray Clusters + in a given namespace. + """ + if appwrapper: + resources = _get_app_wrappers(namespace, filter=[AppWrapperStatus.SUSPENDED]) + if print_to_console: + pretty_print.print_app_wrappers_status(resources) + else: + resources = _get_ray_clusters( + namespace, filter=[RayClusterStatus.READY, RayClusterStatus.SUSPENDED] + ) + if print_to_console: + pretty_print.print_ray_clusters_status(resources) + return resources + + +def get_current_namespace(): # pragma: no cover + """ + Retrieves the current Kubernetes namespace. + + Returns: + str: + The current namespace or None if not found. + """ + if os.path.isfile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"): + try: + file = open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") + active_context = file.readline().strip("\n") + return active_context + except Exception as e: + print("Unable to find current namespace") + print("trying to gather from current context") + try: + _, active_context = config.list_kube_config_contexts(config_check()) + except Exception as e: + return _kube_api_error_handling(e) + try: + return active_context["context"]["namespace"] + except KeyError: + return None + + +def get_cluster( + cluster_name: str, + namespace: str = "default", + verify_tls: bool = True, + write_to_file: bool = False, +): + """ + Retrieves an existing Ray Cluster or AppWrapper as a Cluster object. + + This function fetches an existing Ray Cluster or AppWrapper from the Kubernetes cluster and returns + it as a `Cluster` object, including its YAML configuration under `Cluster.resource_yaml`. + + Args: + cluster_name (str): + The name of the Ray Cluster or AppWrapper. + namespace (str, optional): + The Kubernetes namespace where the Ray Cluster or AppWrapper is located. Default is "default". + verify_tls (bool, optional): + Whether to verify TLS when connecting to the cluster. Default is True. + write_to_file (bool, optional): + If True, writes the resource configuration to a YAML file. Default is False. + + Returns: + Cluster: + A Cluster object representing the retrieved Ray Cluster or AppWrapper. + + Raises: + Exception: + If the Ray Cluster or AppWrapper cannot be found or does not exist. + """ + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + # Check/Get the AppWrapper if it exists + is_appwrapper = _check_aw_exists(cluster_name, namespace) + if is_appwrapper: + try: + resource = api_instance.get_namespaced_custom_object( + group="workload.codeflare.dev", + version="v1beta2", + namespace=namespace, + plural="appwrappers", + name=cluster_name, + ) + resource_extraction = resource["spec"]["components"][0]["template"] + except Exception as e: + return _kube_api_error_handling(e) + else: + # Get the Ray Cluster + try: + resource = api_instance.get_namespaced_custom_object( + group="ray.io", + version="v1", + namespace=namespace, + plural="rayclusters", + name=cluster_name, + ) + resource_extraction = resource + except Exception as e: + return _kube_api_error_handling(e) + + ( + head_extended_resources, + worker_extended_resources, + ) = Cluster._head_worker_extended_resources_from_rc_dict(resource_extraction) + # Create a Cluster Configuration with just the necessary provided parameters + cluster_config = ClusterConfiguration( + name=cluster_name, + namespace=namespace, + verify_tls=verify_tls, + write_to_file=write_to_file, + appwrapper=is_appwrapper, + head_cpu_limits=resource_extraction["spec"]["headGroupSpec"]["template"][ + "spec" + ]["containers"][0]["resources"]["requests"]["cpu"], + head_cpu_requests=resource_extraction["spec"]["headGroupSpec"]["template"][ + "spec" + ]["containers"][0]["resources"]["limits"]["cpu"], + head_memory_limits=resource_extraction["spec"]["headGroupSpec"]["template"][ + "spec" + ]["containers"][0]["resources"]["requests"]["memory"], + head_memory_requests=resource_extraction["spec"]["headGroupSpec"]["template"][ + "spec" + ]["containers"][0]["resources"]["limits"]["memory"], + num_workers=resource_extraction["spec"]["workerGroupSpecs"][0]["minReplicas"], + worker_cpu_limits=resource_extraction["spec"]["workerGroupSpecs"][0][ + "template" + ]["spec"]["containers"][0]["resources"]["limits"]["cpu"], + worker_cpu_requests=resource_extraction["spec"]["workerGroupSpecs"][0][ + "template" + ]["spec"]["containers"][0]["resources"]["requests"]["cpu"], + worker_memory_limits=resource_extraction["spec"]["workerGroupSpecs"][0][ + "template" + ]["spec"]["containers"][0]["resources"]["requests"]["memory"], + worker_memory_requests=resource_extraction["spec"]["workerGroupSpecs"][0][ + "template" + ]["spec"]["containers"][0]["resources"]["limits"]["memory"], + head_extended_resource_requests=head_extended_resources, + worker_extended_resource_requests=worker_extended_resources, + ) + # Ignore the warning here for the lack of a ClusterConfiguration + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message="Please provide a ClusterConfiguration to initialise the Cluster object", + ) + cluster = Cluster(None) + cluster.config = cluster_config + + # Remove auto-generated fields like creationTimestamp, uid and etc. + remove_autogenerated_fields(resource) + + if write_to_file: + cluster.resource_yaml = write_cluster_to_file(cluster, resource) + else: + # Update the Cluster's resource_yaml to reflect the retrieved Ray Cluster/AppWrapper + cluster.resource_yaml = resource + print(f"Yaml resources loaded for {cluster.config.name}") + + return cluster + + +def remove_autogenerated_fields(resource): + """Recursively remove autogenerated fields from a dictionary.""" + if isinstance(resource, dict): + for key in list(resource.keys()): + if key in [ + "creationTimestamp", + "resourceVersion", + "uid", + "selfLink", + "managedFields", + "finalizers", + "generation", + "status", + "suspend", + "workload.codeflare.dev/user", # AppWrapper field + "workload.codeflare.dev/userid", # AppWrapper field + "podSetInfos", # AppWrapper field + ]: + del resource[key] + else: + remove_autogenerated_fields(resource[key]) + elif isinstance(resource, list): + for item in resource: + remove_autogenerated_fields(item) + + +# private methods +def _delete_resources(name: str, namespace: str, api_instance: client.CustomObjectsApi): + api_instance.delete_namespaced_custom_object( + group="ray.io", + version="v1", + namespace=namespace, + plural="rayclusters", + name=name, + ) + + +def _create_resources(yamls, namespace: str, api_instance: client.CustomObjectsApi): + api_instance.create_namespaced_custom_object( + group="ray.io", + version="v1", + namespace=namespace, + plural="rayclusters", + body=yamls, + ) + + +def _apply_ray_cluster( + yamls, namespace: str, api_instance: client.CustomObjectsApi, force=False +): + api_instance.server_side_apply( + field_manager=CF_SDK_FIELD_MANAGER, + group="ray.io", + version="v1", + namespace=namespace, + plural="rayclusters", + body=yamls, + force_conflicts=force, # Allow forcing conflicts if needed + ) + + +def _check_aw_exists(name: str, namespace: str) -> bool: + try: + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + aws = api_instance.list_namespaced_custom_object( + group="workload.codeflare.dev", + version="v1beta2", + namespace=namespace, + plural="appwrappers", + ) + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e, print_error=False) + for aw in aws["items"]: + if aw["metadata"]["name"] == name: + return True + return False + + +# Cant test this until get_current_namespace is fixed and placed in this function over using `self` +def _get_ingress_domain(self): # pragma: no cover + config_check() + + if self.config.namespace != None: + namespace = self.config.namespace + else: + namespace = get_current_namespace() + domain = None + + if _is_openshift_cluster(): + try: + api_instance = client.CustomObjectsApi(get_api_client()) + + routes = api_instance.list_namespaced_custom_object( + group="route.openshift.io", + version="v1", + namespace=namespace, + plural="routes", + ) + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + + for route in routes["items"]: + if ( + route["spec"]["port"]["targetPort"] == "client" + or route["spec"]["port"]["targetPort"] == 10001 + ): + domain = route["spec"]["host"] + else: + try: + api_client = client.NetworkingV1Api(get_api_client()) + ingresses = api_client.list_namespaced_ingress(namespace) + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + + for ingress in ingresses.items: + if ingress.spec.rules[0].http.paths[0].backend.service.port.number == 10001: + domain = ingress.spec.rules[0].host + return domain + + +def _app_wrapper_status(name, namespace="default") -> Optional[AppWrapper]: + try: + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + aws = api_instance.list_namespaced_custom_object( + group="workload.codeflare.dev", + version="v1beta2", + namespace=namespace, + plural="appwrappers", + ) + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + + for aw in aws["items"]: + if aw["metadata"]["name"] == name: + return _map_to_app_wrapper(aw) + return None + + +def _ray_cluster_status(name, namespace="default") -> Optional[RayCluster]: + try: + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + rcs = api_instance.list_namespaced_custom_object( + group="ray.io", + version="v1", + namespace=namespace, + plural="rayclusters", + ) + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + + for rc in rcs["items"]: + if rc["metadata"]["name"] == name: + return _map_to_ray_cluster(rc) + return None + + +def _get_ray_clusters( + namespace="default", filter: Optional[List[RayClusterStatus]] = None +) -> List[RayCluster]: + list_of_clusters = [] + try: + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + rcs = api_instance.list_namespaced_custom_object( + group="ray.io", + version="v1", + namespace=namespace, + plural="rayclusters", + ) + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + + # Get a list of RCs with the filter if it is passed to the function + if filter is not None: + for rc in rcs["items"]: + ray_cluster = _map_to_ray_cluster(rc) + if filter and ray_cluster.status in filter: + list_of_clusters.append(ray_cluster) + else: + for rc in rcs["items"]: + list_of_clusters.append(_map_to_ray_cluster(rc)) + return list_of_clusters + + +def _get_app_wrappers( + namespace="default", filter=List[AppWrapperStatus] +) -> List[AppWrapper]: + list_of_app_wrappers = [] + + try: + config_check() + api_instance = client.CustomObjectsApi(get_api_client()) + aws = api_instance.list_namespaced_custom_object( + group="workload.codeflare.dev", + version="v1beta2", + namespace=namespace, + plural="appwrappers", + ) + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + + for item in aws["items"]: + app_wrapper = _map_to_app_wrapper(item) + if filter and app_wrapper.status in filter: + list_of_app_wrappers.append(app_wrapper) + else: + # Unsure what the purpose of the filter is + list_of_app_wrappers.append(app_wrapper) + return list_of_app_wrappers + + +def _map_to_ray_cluster(rc) -> Optional[RayCluster]: + if "status" in rc and "state" in rc["status"]: + status = RayClusterStatus(rc["status"]["state"].lower()) + else: + status = RayClusterStatus.UNKNOWN + config_check() + dashboard_url = None + if _is_openshift_cluster(): + try: + api_instance = client.CustomObjectsApi(get_api_client()) + routes = api_instance.list_namespaced_custom_object( + group="route.openshift.io", + version="v1", + namespace=rc["metadata"]["namespace"], + plural="routes", + ) + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) + + for route in routes["items"]: + rc_name = rc["metadata"]["name"] + if route["metadata"]["name"] == f"ray-dashboard-{rc_name}" or route[ + "metadata" + ]["name"].startswith(f"{rc_name}-ingress"): + protocol = "https" if route["spec"].get("tls") else "http" + dashboard_url = f"{protocol}://{route['spec']['host']}" + else: + try: + api_instance = client.NetworkingV1Api(get_api_client()) + ingresses = api_instance.list_namespaced_ingress( + rc["metadata"]["namespace"] + ) + except Exception as e: # pragma no cover + return _kube_api_error_handling(e) + for ingress in ingresses.items: + annotations = ingress.metadata.annotations + protocol = "http" + if ( + ingress.metadata.name == f"ray-dashboard-{rc['metadata']['name']}" + or ingress.metadata.name.startswith(f"{rc['metadata']['name']}-ingress") + ): + if annotations == None: + protocol = "http" + elif "route.openshift.io/termination" in annotations: + protocol = "https" + dashboard_url = f"{protocol}://{ingress.spec.rules[0].host}" + + ( + head_extended_resources, + worker_extended_resources, + ) = Cluster._head_worker_extended_resources_from_rc_dict(rc) + + return RayCluster( + name=rc["metadata"]["name"], + status=status, + # for now we are not using autoscaling so same replicas is fine + num_workers=rc["spec"]["workerGroupSpecs"][0]["replicas"], + worker_mem_limits=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][ + "containers" + ][0]["resources"]["limits"]["memory"], + worker_mem_requests=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][ + "containers" + ][0]["resources"]["requests"]["memory"], + worker_cpu_requests=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][ + "containers" + ][0]["resources"]["requests"]["cpu"], + worker_cpu_limits=rc["spec"]["workerGroupSpecs"][0]["template"]["spec"][ + "containers" + ][0]["resources"]["limits"]["cpu"], + worker_extended_resources=worker_extended_resources, + namespace=rc["metadata"]["namespace"], + head_cpu_requests=rc["spec"]["headGroupSpec"]["template"]["spec"]["containers"][ + 0 + ]["resources"]["requests"]["cpu"], + head_cpu_limits=rc["spec"]["headGroupSpec"]["template"]["spec"]["containers"][ + 0 + ]["resources"]["limits"]["cpu"], + head_mem_requests=rc["spec"]["headGroupSpec"]["template"]["spec"]["containers"][ + 0 + ]["resources"]["requests"]["memory"], + head_mem_limits=rc["spec"]["headGroupSpec"]["template"]["spec"]["containers"][ + 0 + ]["resources"]["limits"]["memory"], + head_extended_resources=head_extended_resources, + dashboard=dashboard_url, + ) + + +def _map_to_app_wrapper(aw) -> AppWrapper: + if "status" in aw: + return AppWrapper( + name=aw["metadata"]["name"], + status=AppWrapperStatus(aw["status"]["phase"].lower()), + ) + return AppWrapper( + name=aw["metadata"]["name"], + status=AppWrapperStatus("suspended"), + ) + + +def _copy_to_ray(cluster: Cluster) -> RayCluster: + ray = RayCluster( + name=cluster.config.name, + status=cluster.status(print_to_console=False)[0], + num_workers=cluster.config.num_workers, + worker_mem_requests=cluster.config.worker_memory_requests, + worker_mem_limits=cluster.config.worker_memory_limits, + worker_cpu_requests=cluster.config.worker_cpu_requests, + worker_cpu_limits=cluster.config.worker_cpu_limits, + worker_extended_resources=cluster.config.worker_extended_resource_requests, + namespace=cluster.config.namespace, + dashboard=cluster.cluster_dashboard_uri(), + head_mem_requests=cluster.config.head_memory_requests, + head_mem_limits=cluster.config.head_memory_limits, + head_cpu_requests=cluster.config.head_cpu_requests, + head_cpu_limits=cluster.config.head_cpu_limits, + head_extended_resources=cluster.config.head_extended_resource_requests, + ) + if ray.status == CodeFlareClusterStatus.READY: + ray.status = RayClusterStatus.READY + return ray + + +# Check if the routes api exists +def _is_openshift_cluster(): + try: + config_check() + for api in client.ApisApi(get_api_client()).get_api_versions().groups: + for v in api.versions: + if "route.openshift.io/v1" in v.group_version: + return True + else: + return False + except Exception as e: # pragma: no cover + return _kube_api_error_handling(e) diff --git a/src/codeflare_sdk/ray/cluster/config.py b/src/codeflare_sdk/ray/cluster/config.py new file mode 100644 index 00000000..dc61de2a --- /dev/null +++ b/src/codeflare_sdk/ray/cluster/config.py @@ -0,0 +1,267 @@ +# Copyright 2022 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The config sub-module contains the definition of the ClusterConfiguration dataclass, +which is used to specify resource requirements and other details when creating a +Cluster object. +""" + +import pathlib +import warnings +from dataclasses import dataclass, field, fields +from typing import Dict, List, Optional, Union, get_args, get_origin +from kubernetes.client import V1Toleration, V1Volume, V1VolumeMount + +dir = pathlib.Path(__file__).parent.parent.resolve() + +# https://docs.ray.io/en/latest/ray-core/scheduling/accelerators.html +DEFAULT_RESOURCE_MAPPING = { + "nvidia.com/gpu": "GPU", + "intel.com/gpu": "GPU", + "amd.com/gpu": "GPU", + "aws.amazon.com/neuroncore": "neuron_cores", + "google.com/tpu": "TPU", + "habana.ai/gaudi": "HPU", + "huawei.com/Ascend910": "NPU", + "huawei.com/Ascend310": "NPU", +} + + +@dataclass +class ClusterConfiguration: + """ + This dataclass is used to specify resource requirements and other details, and + is passed in as an argument when creating a Cluster object. + + Args: + name: + The name of the cluster. + namespace: + The namespace in which the cluster should be created. + head_extended_resource_requests: + A dictionary of extended resource requests for the head node. ex: {"nvidia.com/gpu": 1} + head_tolerations: + List of tolerations for head nodes. + num_workers: + The number of workers to create. + worker_tolerations: + List of tolerations for worker nodes. + appwrapper: + A boolean indicating whether to use an AppWrapper. + envs: + A dictionary of environment variables to set for the cluster. + image: + The image to use for the cluster. + image_pull_secrets: + A list of image pull secrets to use for the cluster. + write_to_file: + A boolean indicating whether to write the cluster configuration to a file. + verify_tls: + A boolean indicating whether to verify TLS when connecting to the cluster. + labels: + A dictionary of labels to apply to the cluster. + worker_extended_resource_requests: + A dictionary of extended resource requests for each worker. ex: {"nvidia.com/gpu": 1} + extended_resource_mapping: + A dictionary of custom resource mappings to map extended resource requests to RayCluster resource names + overwrite_default_resource_mapping: + A boolean indicating whether to overwrite the default resource mapping. + annotations: + A dictionary of annotations to apply to the cluster. + volumes: + A list of V1Volume objects to add to the Cluster + volume_mounts: + A list of V1VolumeMount objects to add to the Cluster + enable_gcs_ft: + A boolean indicating whether to enable GCS fault tolerance. + enable_usage_stats: + A boolean indicating whether to capture and send Ray usage stats externally. + redis_address: + The address of the Redis server to use for GCS fault tolerance, required when enable_gcs_ft is True. + redis_password_secret: + Kubernetes secret reference containing Redis password. ex: {"name": "secret-name", "key": "password-key"} + external_storage_namespace: + The storage namespace to use for GCS fault tolerance. By default, KubeRay sets it to the UID of RayCluster. + """ + + name: str + namespace: Optional[str] = None + head_cpu_requests: Union[int, str] = 2 + head_cpu_limits: Union[int, str] = 2 + head_memory_requests: Union[int, str] = 8 + head_memory_limits: Union[int, str] = 8 + head_extended_resource_requests: Dict[str, Union[str, int]] = field( + default_factory=dict + ) + head_tolerations: Optional[List[V1Toleration]] = None + worker_cpu_requests: Union[int, str] = 1 + worker_cpu_limits: Union[int, str] = 1 + num_workers: int = 1 + worker_memory_requests: Union[int, str] = 2 + worker_memory_limits: Union[int, str] = 2 + worker_tolerations: Optional[List[V1Toleration]] = None + appwrapper: bool = False + envs: Dict[str, str] = field(default_factory=dict) + image: str = "" + image_pull_secrets: List[str] = field(default_factory=list) + write_to_file: bool = False + verify_tls: bool = True + labels: Dict[str, str] = field(default_factory=dict) + worker_extended_resource_requests: Dict[str, Union[str, int]] = field( + default_factory=dict + ) + extended_resource_mapping: Dict[str, str] = field(default_factory=dict) + overwrite_default_resource_mapping: bool = False + local_queue: Optional[str] = None + annotations: Dict[str, str] = field(default_factory=dict) + volumes: list[V1Volume] = field(default_factory=list) + volume_mounts: list[V1VolumeMount] = field(default_factory=list) + enable_gcs_ft: bool = False + enable_usage_stats: bool = False + redis_address: Optional[str] = None + redis_password_secret: Optional[Dict[str, str]] = None + external_storage_namespace: Optional[str] = None + + def __post_init__(self): + if not self.verify_tls: + print( + "Warning: TLS verification has been disabled - Endpoint checks will be bypassed" + ) + + if self.enable_usage_stats: + self.envs["RAY_USAGE_STATS_ENABLED"] = "1" + else: + self.envs["RAY_USAGE_STATS_ENABLED"] = "0" + + if self.enable_gcs_ft: + if not self.redis_address: + raise ValueError( + "redis_address must be provided when enable_gcs_ft is True" + ) + + if self.redis_password_secret and not isinstance( + self.redis_password_secret, dict + ): + raise ValueError( + "redis_password_secret must be a dictionary with 'name' and 'key' fields" + ) + + if self.redis_password_secret and ( + "name" not in self.redis_password_secret + or "key" not in self.redis_password_secret + ): + raise ValueError( + "redis_password_secret must contain both 'name' and 'key' fields" + ) + + self._validate_types() + self._memory_to_string() + self._str_mem_no_unit_add_GB() + self._combine_extended_resource_mapping() + self._validate_extended_resource_requests(self.head_extended_resource_requests) + self._validate_extended_resource_requests( + self.worker_extended_resource_requests + ) + + def _combine_extended_resource_mapping(self): + if overwritten := set(self.extended_resource_mapping.keys()).intersection( + DEFAULT_RESOURCE_MAPPING.keys() + ): + if self.overwrite_default_resource_mapping: + warnings.warn( + f"Overwriting default resource mapping for {overwritten}", + UserWarning, + ) + else: + raise ValueError( + f"Resource mapping already exists for {overwritten}, set overwrite_default_resource_mapping to True to overwrite" + ) + self.extended_resource_mapping = { + **DEFAULT_RESOURCE_MAPPING, + **self.extended_resource_mapping, + } + + def _validate_extended_resource_requests(self, extended_resources: Dict[str, int]): + for k in extended_resources.keys(): + if k not in self.extended_resource_mapping.keys(): + raise ValueError( + f"extended resource '{k}' not found in extended_resource_mapping, available resources are {list(self.extended_resource_mapping.keys())}, to add more supported resources use extended_resource_mapping. i.e. extended_resource_mapping = {{'{k}': 'FOO_BAR'}}" + ) + + def _str_mem_no_unit_add_GB(self): + if ( + isinstance(self.worker_memory_requests, str) + and self.worker_memory_requests.isdecimal() + ): + self.worker_memory_requests = f"{self.worker_memory_requests}G" + if ( + isinstance(self.worker_memory_limits, str) + and self.worker_memory_limits.isdecimal() + ): + self.worker_memory_limits = f"{self.worker_memory_limits}G" + + def _memory_to_string(self): + if isinstance(self.head_memory_requests, int): + self.head_memory_requests = f"{self.head_memory_requests}G" + if isinstance(self.head_memory_limits, int): + self.head_memory_limits = f"{self.head_memory_limits}G" + if isinstance(self.worker_memory_requests, int): + self.worker_memory_requests = f"{self.worker_memory_requests}G" + if isinstance(self.worker_memory_limits, int): + self.worker_memory_limits = f"{self.worker_memory_limits}G" + + def _validate_types(self): + """Validate the types of all fields in the ClusterConfiguration dataclass.""" + errors = [] + for field_info in fields(self): + value = getattr(self, field_info.name) + expected_type = field_info.type + if not self._is_type(value, expected_type): + errors.append(f"'{field_info.name}' should be of type {expected_type}.") + + if errors: + raise TypeError("Type validation failed:\n" + "\n".join(errors)) + + @staticmethod + def _is_type(value, expected_type): + """Check if the value matches the expected type.""" + + def check_type(value, expected_type): + origin_type = get_origin(expected_type) + args = get_args(expected_type) + if origin_type is Union: + return any(check_type(value, union_type) for union_type in args) + if origin_type is list: + if value is not None: + return all(check_type(elem, args[0]) for elem in (value or [])) + else: + return True + if origin_type is dict: + if value is not None: + return all( + check_type(k, args[0]) and check_type(v, args[1]) + for k, v in value.items() + ) + else: + return True + if origin_type is tuple: + return all(check_type(elem, etype) for elem, etype in zip(value, args)) + if expected_type is int: + return isinstance(value, int) and not isinstance(value, bool) + if expected_type is bool: + return isinstance(value, bool) + return isinstance(value, expected_type) + + return check_type(value, expected_type) diff --git a/src/codeflare_sdk/utils/pretty_print.py b/src/codeflare_sdk/ray/cluster/pretty_print.py similarity index 84% rename from src/codeflare_sdk/utils/pretty_print.py rename to src/codeflare_sdk/ray/cluster/pretty_print.py index ca371182..883f14ad 100644 --- a/src/codeflare_sdk/utils/pretty_print.py +++ b/src/codeflare_sdk/ray/cluster/pretty_print.py @@ -24,7 +24,8 @@ from rich.panel import Panel from rich import box from typing import List -from ..cluster.model import RayCluster, AppWrapper, RayClusterStatus +from .status import RayCluster, RayClusterStatus +from ..appwrapper.status import AppWrapper def print_no_resources_found(): @@ -56,6 +57,30 @@ def print_app_wrappers_status(app_wrappers: List[AppWrapper], starting: bool = F console.print(Panel.fit(table)) +def print_ray_clusters_status(app_wrappers: List[AppWrapper], starting: bool = False): + if not app_wrappers: + print_no_resources_found() + return # shortcircuit + + console = Console() + table = Table( + box=box.ASCII_DOUBLE_HEAD, + title="[bold] :rocket: Cluster Queue Status :rocket:", + ) + table.add_column("Name", style="cyan", no_wrap=True) + table.add_column("Status", style="magenta") + + for app_wrapper in app_wrappers: + name = app_wrapper.name + status = app_wrapper.status.value + if starting: + status += " (starting)" + table.add_row(name, status) + table.add_row("") # empty row for spacing + + console.print(Panel.fit(table)) + + def print_cluster_status(cluster: RayCluster): "Pretty prints the status of a passed-in cluster" if not cluster: @@ -70,15 +95,11 @@ def print_cluster_status(cluster: RayCluster): ) name = cluster.name dashboard = cluster.dashboard - # owned = bool(cluster["userOwned"]) - owned = True #'table0' to display the cluster name, status, url, and dashboard link table0 = Table(box=None, show_header=False) - if owned: - table0.add_row("[white on green][bold]Name") - else: - table0.add_row("") + + table0.add_row("[white on green][bold]Name") table0.add_row("[bold underline]" + name, status) table0.add_row() # fixme harcded to default for now @@ -115,19 +136,15 @@ def print_clusters(clusters: List[RayCluster]): ) name = cluster.name dashboard = cluster.dashboard - workers = str(cluster.workers) - memory = str(cluster.worker_mem_min) + "~" + str(cluster.worker_mem_max) - cpu = str(cluster.worker_cpu) - gpu = str(cluster.worker_gpu) - # owned = bool(cluster["userOwned"]) - owned = True + workers = str(cluster.num_workers) + memory = f"{cluster.worker_mem_requests}~{cluster.worker_mem_limits}" + cpu = f"{cluster.worker_cpu_requests}~{cluster.worker_cpu_limits}" + gpu = str(cluster.worker_extended_resources.get("nvidia.com/gpu", 0)) #'table0' to display the cluster name, status, url, and dashboard link table0 = Table(box=None, show_header=False) - if owned: - table0.add_row("[white on green][bold]Name") - else: - table0.add_row("") + + table0.add_row("[white on green][bold]Name") table0.add_row("[bold underline]" + name, status) table0.add_row() # fixme harcded to default for now diff --git a/src/codeflare_sdk/cluster/model.py b/src/codeflare_sdk/ray/cluster/status.py similarity index 57% rename from src/codeflare_sdk/cluster/model.py rename to src/codeflare_sdk/ray/cluster/status.py index 639cc734..136ae302 100644 --- a/src/codeflare_sdk/cluster/model.py +++ b/src/codeflare_sdk/ray/cluster/status.py @@ -1,4 +1,4 @@ -# Copyright 2022 IBM, Red Hat +# Copyright 2024 IBM, Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,13 +13,15 @@ # limitations under the License. """ -The model sub-module defines Enums containing information for Ray cluster -states and AppWrapper states, and CodeFlare cluster states, as well as -dataclasses to store information for Ray clusters and AppWrappers. +The status sub-module defines Enums containing information for Ray cluster +states states, and CodeFlare cluster states, as well as +dataclasses to store information for Ray clusters. """ -from dataclasses import dataclass +from dataclasses import dataclass, field from enum import Enum +import typing +from typing import Union class RayClusterStatus(Enum): @@ -27,25 +29,12 @@ class RayClusterStatus(Enum): Defines the possible reportable states of a Ray cluster. """ - # https://github.com/ray-project/kuberay/blob/master/ray-operator/apis/ray/v1alpha1/raycluster_types.go#L95 + # https://github.com/ray-project/kuberay/blob/master/ray-operator/apis/ray/v1/raycluster_types.go#L112-L117 READY = "ready" UNHEALTHY = "unhealthy" FAILED = "failed" UNKNOWN = "unknown" - - -class AppWrapperStatus(Enum): - """ - Defines the possible reportable states of an AppWrapper. - """ - - QUEUEING = "queueing" - PENDING = "pending" - RUNNING = "running" - FAILED = "failed" - DELETED = "deleted" - COMPLETED = "completed" - RUNNING_HOLD_COMPLETION = "runningholdcompletion" + SUSPENDED = "suspended" class CodeFlareClusterStatus(Enum): @@ -59,6 +48,7 @@ class CodeFlareClusterStatus(Enum): QUEUEING = 4 FAILED = 5 UNKNOWN = 6 + SUSPENDED = 7 @dataclass @@ -69,22 +59,16 @@ class RayCluster: name: str status: RayClusterStatus - workers: int - worker_mem_min: str - worker_mem_max: str - worker_cpu: int - worker_gpu: int + head_cpu_requests: int + head_cpu_limits: int + head_mem_requests: str + head_mem_limits: str + num_workers: int + worker_mem_requests: str + worker_mem_limits: str + worker_cpu_requests: Union[int, str] + worker_cpu_limits: Union[int, str] namespace: str dashboard: str - - -@dataclass -class AppWrapper: - """ - For storing information about an AppWrapper. - """ - - name: str - status: AppWrapperStatus - can_run: bool - job_state: str + worker_extended_resources: typing.Dict[str, int] = field(default_factory=dict) + head_extended_resources: typing.Dict[str, int] = field(default_factory=dict) diff --git a/src/codeflare_sdk/ray/cluster/test_build_ray_cluster.py b/src/codeflare_sdk/ray/cluster/test_build_ray_cluster.py new file mode 100644 index 00000000..f970d945 --- /dev/null +++ b/src/codeflare_sdk/ray/cluster/test_build_ray_cluster.py @@ -0,0 +1,110 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +import sys +from .build_ray_cluster import gen_names, update_image, build_ray_cluster +import uuid +from codeflare_sdk.ray.cluster.cluster import ClusterConfiguration, Cluster + + +def test_gen_names_with_name(mocker): + mocker.patch.object( + uuid, "uuid4", return_value=uuid.UUID("00000000-0000-0000-0000-000000000001") + ) + name = "myname" + appwrapper_name, cluster_name = gen_names(name) + assert appwrapper_name == name + assert cluster_name == name + + +def test_gen_names_without_name(mocker): + mocker.patch.object( + uuid, "uuid4", return_value=uuid.UUID("00000000-0000-0000-0000-000000000001") + ) + appwrapper_name, cluster_name = gen_names(None) + assert appwrapper_name.startswith("appwrapper-") + assert cluster_name.startswith("cluster-") + + +def test_update_image_without_supported_python_version(mocker): + # Mock SUPPORTED_PYTHON_VERSIONS + mocker.patch.dict( + "codeflare_sdk.ray.cluster.build_ray_cluster.SUPPORTED_PYTHON_VERSIONS", + { + "3.11": "ray-py3.11", + "3.12": "ray-py3.12", + }, + ) + + # Create a namedtuple to mock sys.version_info + VersionInfo = namedtuple( + "version_info", ["major", "minor", "micro", "releaselevel", "serial"] + ) + mocker.patch.object(sys, "version_info", VersionInfo(3, 8, 0, "final", 0)) + + # Mock warnings.warn to check if it gets called + warn_mock = mocker.patch("warnings.warn") + + # Call the update_image function with no image provided + image = update_image(None) + + # Assert that the warning was called with the expected message + warn_mock.assert_called_once_with( + "No default Ray image defined for 3.8. Please provide your own image or use one of the following python versions: 3.11, 3.12." + ) + + # Assert that no image was set since the Python version is not supported + assert image is None + + +def test_build_ray_cluster_with_gcs_ft(mocker): + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch("kubernetes.client.CustomObjectsApi.list_namespaced_custom_object") + + cluster = Cluster( + ClusterConfiguration( + name="test", + namespace="ns", + enable_gcs_ft=True, + redis_address="redis:6379", + redis_password_secret={"name": "redis-password-secret", "key": "password"}, + external_storage_namespace="new-ns", + ) + ) + + mocker.patch("codeflare_sdk.ray.cluster.build_ray_cluster.config_check") + mocker.patch( + "codeflare_sdk.ray.cluster.build_ray_cluster.get_api_client", return_value=None + ) + mocker.patch( + "codeflare_sdk.ray.cluster.build_ray_cluster.update_image", return_value=None + ) + + resource = build_ray_cluster(cluster) + + assert "spec" in resource + assert "gcsFaultToleranceOptions" in resource["spec"] + + gcs_ft_options = resource["spec"]["gcsFaultToleranceOptions"] + + assert gcs_ft_options["redisAddress"] == "redis:6379" + assert gcs_ft_options["externalStorageNamespace"] == "new-ns" + assert ( + gcs_ft_options["redisPassword"]["valueFrom"]["secretKeyRef"]["name"] + == "redis-password-secret" + ) + assert ( + gcs_ft_options["redisPassword"]["valueFrom"]["secretKeyRef"]["key"] + == "password" + ) diff --git a/src/codeflare_sdk/ray/cluster/test_cluster.py b/src/codeflare_sdk/ray/cluster/test_cluster.py new file mode 100644 index 00000000..ce684607 --- /dev/null +++ b/src/codeflare_sdk/ray/cluster/test_cluster.py @@ -0,0 +1,762 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from codeflare_sdk.ray.cluster.cluster import ( + Cluster, + ClusterConfiguration, + get_cluster, + list_all_queued, +) +from codeflare_sdk.common.utils.unit_test_support import ( + create_cluster, + arg_check_del_effect, + ingress_retrieval, + arg_check_apply_effect, + get_local_queue, + create_cluster_config, + get_ray_obj, + get_obj_none, + get_ray_obj_with_status, + get_aw_obj_with_status, + patch_cluster_with_dynamic_client, + route_list_retrieval, +) +from codeflare_sdk.ray.cluster.cluster import _is_openshift_cluster +from pathlib import Path +from unittest.mock import MagicMock +from kubernetes import client +import yaml +import filecmp +import os + +parent = Path(__file__).resolve().parents[4] # project directory +expected_clusters_dir = f"{parent}/tests/test_cluster_yamls" +aw_dir = os.path.expanduser("~/.codeflare/resources/") + + +def test_cluster_up_down(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch("codeflare_sdk.ray.cluster.cluster.Cluster._throw_for_no_raycluster") + mocker.patch( + "kubernetes.client.CustomObjectsApi.get_cluster_custom_object", + return_value={"spec": {"domain": ""}}, + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.create_namespaced_custom_object", + side_effect=arg_check_apply_effect, + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.delete_namespaced_custom_object", + side_effect=arg_check_del_effect, + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_cluster_custom_object", + return_value={"items": []}, + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + cluster = create_cluster(mocker) + cluster.up() + cluster.down() + + +def test_cluster_apply_scale_up_scale_down(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mock_dynamic_client = mocker.Mock() + mocker.patch( + "kubernetes.dynamic.DynamicClient.resources", new_callable=mocker.PropertyMock + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster.Cluster.create_resource", + return_value="./tests/test_cluster_yamls/ray/default-ray-cluster.yaml", + ) + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "kubernetes.client.CustomObjectsApi.get_cluster_custom_object", + return_value={"spec": {"domain": "apps.cluster.awsroute.org"}}, + ) + + # Initialize test + initial_num_workers = 1 + scaled_up_num_workers = 2 + + # Step 1: Create cluster with initial workers + cluster = create_cluster(mocker, initial_num_workers) + patch_cluster_with_dynamic_client(mocker, cluster, mock_dynamic_client) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_obj_none("ray.io", "v1", "ns", "rayclusters"), + ) + cluster.apply() + + # Step 2: Scale up the cluster + cluster = create_cluster(mocker, scaled_up_num_workers) + patch_cluster_with_dynamic_client(mocker, cluster, mock_dynamic_client) + cluster.apply() + + # Step 3: Scale down the cluster + cluster = create_cluster(mocker, initial_num_workers) + patch_cluster_with_dynamic_client(mocker, cluster, mock_dynamic_client) + cluster.apply() + + # Tear down + cluster.down() + + +def test_cluster_apply_with_file(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mock_dynamic_client = mocker.Mock() + mocker.patch("codeflare_sdk.ray.cluster.cluster.Cluster._throw_for_no_raycluster") + mocker.patch( + "kubernetes.dynamic.DynamicClient.resources", new_callable=mocker.PropertyMock + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster.Cluster.create_resource", + return_value="./tests/test_cluster_yamls/ray/default-ray-cluster.yaml", + ) + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "kubernetes.client.CustomObjectsApi.get_cluster_custom_object", + return_value={"spec": {"domain": "apps.cluster.awsroute.org"}}, + ) + + # Step 1: Create cluster with initial workers + cluster = create_cluster(mocker, 1, write_to_file=True) + patch_cluster_with_dynamic_client(mocker, cluster, mock_dynamic_client) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_obj_none("ray.io", "v1", "ns", "rayclusters"), + ) + cluster.apply() + # Tear down + cluster.down() + + +def test_cluster_apply_with_appwrapper(mocker): + # Mock Kubernetes client and dynamic client methods + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._check_aw_exists", + return_value=True, + ) + mock_dynamic_client = mocker.Mock() + mocker.patch("codeflare_sdk.ray.cluster.cluster.Cluster._throw_for_no_raycluster") + mocker.patch( + "kubernetes.dynamic.DynamicClient.resources", new_callable=mocker.PropertyMock + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster.Cluster.create_resource", + return_value="./tests/test_cluster_yamls/ray/default-ray-cluster.yaml", + ) + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + + # Create a cluster configuration with appwrapper set to False + cluster = create_cluster(mocker, 1, write_to_file=False) + patch_cluster_with_dynamic_client(mocker, cluster, mock_dynamic_client) + + # Mock listing RayCluster to simulate it doesn't exist + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_obj_none("ray.io", "v1", "ns", "rayclusters"), + ) + # Call the apply method + cluster.apply() + + # Assertions + print("Cluster applied without AppWrapper.") + + +def test_cluster_apply_without_appwrapper_write_to_file(mocker): + # Mock Kubernetes client and dynamic client methods + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._check_aw_exists", + return_value=True, + ) + mock_dynamic_client = mocker.Mock() + mocker.patch("codeflare_sdk.ray.cluster.cluster.Cluster._throw_for_no_raycluster") + mocker.patch( + "kubernetes.dynamic.DynamicClient.resources", new_callable=mocker.PropertyMock + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster.Cluster.create_resource", + return_value="./tests/test_cluster_yamls/ray/default-ray-cluster.yaml", + ) + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + + # Create a cluster configuration with appwrapper set to False + cluster = create_cluster(mocker, 1, write_to_file=True) + patch_cluster_with_dynamic_client(mocker, cluster, mock_dynamic_client) + cluster.config.appwrapper = False + + # Mock listing RayCluster to simulate it doesn't exist + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_obj_none("ray.io", "v1", "ns", "rayclusters"), + ) + # Call the apply method + cluster.apply() + + # Assertions + print("Cluster applied without AppWrapper.") + + +def test_cluster_apply_without_appwrapper(mocker): + # Mock Kubernetes client and dynamic client methods + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mock_dynamic_client = mocker.Mock() + mocker.patch("codeflare_sdk.ray.cluster.cluster.Cluster._throw_for_no_raycluster") + mocker.patch( + "kubernetes.dynamic.DynamicClient.resources", new_callable=mocker.PropertyMock + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster.Cluster.create_resource", + return_value="./tests/test_cluster_yamls/ray/default-ray-cluster.yaml", + ) + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + + # Create a cluster configuration with appwrapper set to False + cluster = create_cluster(mocker, 1, write_to_file=False) + cluster.config.appwrapper = None + patch_cluster_with_dynamic_client(mocker, cluster, mock_dynamic_client) + + # Mock listing RayCluster to simulate it doesn't exist + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_obj_none("ray.io", "v1", "ns", "rayclusters"), + ) + + # Call the apply method + cluster.apply() + + # Assertions + print("Cluster applied without AppWrapper.") + + +def test_cluster_up_down_no_mcad(mocker): + mocker.patch("codeflare_sdk.ray.cluster.cluster.Cluster._throw_for_no_raycluster") + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.create_namespaced_custom_object", + side_effect=arg_check_apply_effect, + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.delete_namespaced_custom_object", + side_effect=arg_check_del_effect, + ) + mocker.patch( + "kubernetes.client.CoreV1Api.create_namespaced_secret", + ) + mocker.patch( + "kubernetes.client.CoreV1Api.delete_namespaced_secret", + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_cluster_custom_object", + return_value={"items": []}, + ) + config = create_cluster_config() + config.name = "unit-test-cluster-ray" + config.appwrapper = False + cluster = Cluster(config) + cluster.up() + cluster.down() + + +def test_cluster_uris(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._get_ingress_domain", + return_value="apps.cluster.awsroute.org", + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + cluster = create_cluster(mocker) + mocker.patch( + "kubernetes.client.NetworkingV1Api.list_namespaced_ingress", + return_value=ingress_retrieval( + cluster_name="unit-test-cluster", + annotations={"route.openshift.io/termination": "passthrough"}, + ), + ) + assert ( + cluster.cluster_dashboard_uri() + == "https://ray-dashboard-unit-test-cluster-ns.apps.cluster.awsroute.org" + ) + mocker.patch( + "kubernetes.client.NetworkingV1Api.list_namespaced_ingress", + return_value=ingress_retrieval(), + ) + assert cluster.cluster_uri() == "ray://unit-test-cluster-head-svc.ns.svc:10001" + assert ( + cluster.cluster_dashboard_uri() + == "http://ray-dashboard-unit-test-cluster-ns.apps.cluster.awsroute.org" + ) + cluster.config.name = "fake" + mocker.patch( + "kubernetes.client.NetworkingV1Api.list_namespaced_ingress", + ) + assert ( + cluster.cluster_dashboard_uri() + == "Dashboard not available yet, have you run cluster.up()?" + ) + + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._is_openshift_cluster", return_value=True + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value={ + "items": [ + { + "metadata": { + "name": "ray-dashboard-unit-test-cluster", + }, + "spec": { + "host": "ray-dashboard-unit-test-cluster-ns.apps.cluster.awsroute.org", + "tls": {}, # Indicating HTTPS + }, + } + ] + }, + ) + cluster = create_cluster(mocker) + assert ( + cluster.cluster_dashboard_uri() + == "http://ray-dashboard-unit-test-cluster-ns.apps.cluster.awsroute.org" + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value={ + "items": [ + { + "metadata": { + "name": "ray-dashboard-unit-test-cluster", + }, + "spec": { + "host": "ray-dashboard-unit-test-cluster-ns.apps.cluster.awsroute.org", + "tls": {"termination": "passthrough"}, # Indicating HTTPS + }, + } + ] + }, + ) + cluster = create_cluster(mocker) + assert ( + cluster.cluster_dashboard_uri() + == "https://ray-dashboard-unit-test-cluster-ns.apps.cluster.awsroute.org" + ) + + +def test_ray_job_wrapping(mocker): + import ray + + def ray_addr(self, *args): + return self._address + + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + cluster = create_cluster(mocker) + mocker.patch( + "ray.job_submission.JobSubmissionClient._check_connection_and_version_with_url", + return_value="None", + ) + mock_res = mocker.patch.object( + ray.job_submission.JobSubmissionClient, "list_jobs", autospec=True + ) + mock_res.side_effect = ray_addr + mocker.patch( + "kubernetes.client.CustomObjectsApi.get_cluster_custom_object", + return_value={"spec": {"domain": ""}}, + ) + mocker.patch( + "kubernetes.client.NetworkingV1Api.list_namespaced_ingress", + return_value=ingress_retrieval(), + ) + assert cluster.list_jobs() == cluster.cluster_dashboard_uri() + + mock_res = mocker.patch.object( + ray.job_submission.JobSubmissionClient, "get_job_status", autospec=True + ) + mock_res.side_effect = ray_addr + assert cluster.job_status("fake_id") == cluster.cluster_dashboard_uri() + + mock_res = mocker.patch.object( + ray.job_submission.JobSubmissionClient, "get_job_logs", autospec=True + ) + mock_res.side_effect = ray_addr + assert cluster.job_logs("fake_id") == cluster.cluster_dashboard_uri() + + +def test_local_client_url(mocker): + mocker.patch( + "kubernetes.client.CustomObjectsApi.get_cluster_custom_object", + return_value={"spec": {"domain": ""}}, + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._get_ingress_domain", + return_value="rayclient-unit-test-cluster-localinter-ns.apps.cluster.awsroute.org", + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster.Cluster.create_resource", + return_value="unit-test-cluster-localinter.yaml", + ) + + cluster_config = ClusterConfiguration( + name="unit-test-cluster-localinter", + namespace="ns", + ) + cluster = Cluster(cluster_config) + assert ( + cluster.local_client_url() + == "ray://rayclient-unit-test-cluster-localinter-ns.apps.cluster.awsroute.org" + ) + + +""" +get_cluster tests +""" + + +def test_get_cluster_no_appwrapper(mocker): + """ + This test uses the "test all params" unit test file as a comparison + """ + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._check_aw_exists", + return_value=False, + ) + + with open(f"{expected_clusters_dir}/ray/unit-test-all-params.yaml") as f: + expected_rc = yaml.load(f, Loader=yaml.FullLoader) + mocker.patch( + "kubernetes.client.CustomObjectsApi.get_namespaced_custom_object", + return_value=expected_rc, + ) + get_cluster("test-all-params", "ns", write_to_file=True) + + with open(f"{aw_dir}test-all-params.yaml") as f: + generated_rc = yaml.load(f, Loader=yaml.FullLoader) + assert generated_rc == expected_rc + + +def test_get_cluster_with_appwrapper(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._check_aw_exists", + return_value=True, + ) + + with open(f"{expected_clusters_dir}/appwrapper/unit-test-all-params.yaml") as f: + expected_aw = yaml.load(f, Loader=yaml.FullLoader) + mocker.patch( + "kubernetes.client.CustomObjectsApi.get_namespaced_custom_object", + return_value=expected_aw, + ) + get_cluster("aw-all-params", "ns", write_to_file=True) + + with open(f"{aw_dir}aw-all-params.yaml") as f: + generated_aw = yaml.load(f, Loader=yaml.FullLoader) + assert generated_aw == expected_aw + + +def test_wait_ready(mocker, capsys): + from codeflare_sdk.ray.cluster.status import CodeFlareClusterStatus + + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch( + "kubernetes.client.NetworkingV1Api.list_namespaced_ingress", + return_value=ingress_retrieval(), + ) + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._app_wrapper_status", return_value=None + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._ray_cluster_status", return_value=None + ) + mocker.patch.object( + client.CustomObjectsApi, + "list_namespaced_custom_object", + return_value={ + "items": [ + { + "metadata": {"name": "ray-dashboard-test"}, + "spec": {"host": "mocked-host"}, + } + ] + }, + ) + mock_response = mocker.Mock() + mock_response.status_code = 200 + mocker.patch("requests.get", return_value=mock_response) + cf = Cluster( + ClusterConfiguration( + name="test", + namespace="ns", + write_to_file=False, + appwrapper=True, + ) + ) + try: + cf.wait_ready(timeout=5) + assert 1 == 0 + except Exception as e: + assert type(e) == TimeoutError + + captured = capsys.readouterr() + assert ( + "WARNING: Current cluster status is unknown, have you run cluster.up yet?" + in captured.out + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster.Cluster.status", + return_value=(True, CodeFlareClusterStatus.READY), + ) + cf.wait_ready() + captured = capsys.readouterr() + assert ( + captured.out + == "Waiting for requested resources to be set up...\nRequested cluster is up and running!\nDashboard is ready!\n" + ) + cf.wait_ready(dashboard_check=False) + captured = capsys.readouterr() + assert ( + captured.out + == "Waiting for requested resources to be set up...\nRequested cluster is up and running!\n" + ) + + +def test_list_queue_appwrappers(mocker, capsys): + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_obj_none( + "workload.codeflare.dev", "v1beta2", "ns", "appwrappers" + ), + ) + list_all_queued("ns", appwrapper=True) + captured = capsys.readouterr() + assert captured.out == ( + "╭──────────────────────────────────────────────────────────────────────────────╮\n" + "│ No resources found, have you run cluster.up() yet? │\n" + "╰──────────────────────────────────────────────────────────────────────────────╯\n" + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_aw_obj_with_status( + "workload.codeflare.dev", "v1beta2", "ns", "appwrappers" + ), + ) + list_all_queued("ns", appwrapper=True) + captured = capsys.readouterr() + print(captured.out) + assert captured.out == ( + "╭────────────────────────────────╮\n" + "│ 🚀 Cluster Queue Status 🚀 │\n" + "│ +----------------+-----------+ │\n" + "│ | Name | Status | │\n" + "│ +================+===========+ │\n" + "│ | test-cluster-a | running | │\n" + "│ | | | │\n" + "│ | test-cluster-b | suspended | │\n" + "│ | | | │\n" + "│ +----------------+-----------+ │\n" + "╰────────────────────────────────╯\n" + ) + + +def test_list_queue_rayclusters(mocker, capsys): + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mock_api = MagicMock() + mock_api.get_api_versions.return_value.groups = [ + MagicMock(versions=[MagicMock(group_version="route.openshift.io/v1")]) + ] + mocker.patch("kubernetes.client.ApisApi", return_value=mock_api) + + assert _is_openshift_cluster() == True + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_obj_none("ray.io", "v1", "ns", "rayclusters"), + ) + + list_all_queued("ns") + captured = capsys.readouterr() + assert captured.out == ( + "╭──────────────────────────────────────────────────────────────────────────────╮\n" + "│ No resources found, have you run cluster.up() yet? │\n" + "╰──────────────────────────────────────────────────────────────────────────────╯\n" + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_ray_obj_with_status("ray.io", "v1", "ns", "rayclusters"), + ) + + list_all_queued("ns") + captured = capsys.readouterr() + # print(captured.out) -> useful for updating the test + assert captured.out == ( + "╭────────────────────────────────╮\n" + "│ 🚀 Cluster Queue Status 🚀 │\n" + "│ +----------------+-----------+ │\n" + "│ | Name | Status | │\n" + "│ +================+===========+ │\n" + "│ | test-cluster-a | ready | │\n" + "│ | | | │\n" + "│ | test-rc-b | suspended | │\n" + "│ | | | │\n" + "│ +----------------+-----------+ │\n" + "╰────────────────────────────────╯\n" + ) + + +def test_list_clusters(mocker, capsys): + from codeflare_sdk.ray.cluster.cluster import list_all_clusters + + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + side_effect=get_obj_none, + ) + mocker.patch( + "kubernetes.client.NetworkingV1Api.list_namespaced_ingress", + ) + list_all_clusters("ns") + captured = capsys.readouterr() + assert captured.out == ( + "╭──────────────────────────────────────────────────────────────────────────────╮\n" + "│ No resources found, have you run cluster.up() yet? │\n" + "╰──────────────────────────────────────────────────────────────────────────────╯\n" + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + side_effect=get_ray_obj, + ) + list_all_clusters("ns") + captured = capsys.readouterr() + # print(captured.out) -> useful for updating the test + assert captured.out == ( + " 🚀 CodeFlare Cluster Details 🚀 \n" + " \n" + " ╭──────────────────────────────────────────────────────────────────╮ \n" + " │ Name │ \n" + " │ test-cluster-a Inactive ❌ │ \n" + " │ │ \n" + " │ URI: ray://test-cluster-a-head-svc.ns.svc:10001 │ \n" + " │ │ \n" + " │ Dashboard🔗 │ \n" + " │ │ \n" + " │ Cluster Resources │ \n" + " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n" + " │ │ # Workers │ │ Memory CPU GPU │ │ \n" + " │ │ │ │ │ │ \n" + " │ │ 1 │ │ 2G~2G 1~1 0 │ │ \n" + " │ │ │ │ │ │ \n" + " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n" + " ╰──────────────────────────────────────────────────────────────────╯ \n" + "╭───────────────────────────────────────────────────────────────╮\n" + "│ Name │\n" + "│ test-rc-b Inactive ❌ │\n" + "│ │\n" + "│ URI: ray://test-rc-b-head-svc.ns.svc:10001 │\n" + "│ │\n" + "│ Dashboard🔗 │\n" + "│ │\n" + "│ Cluster Resources │\n" + "│ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │\n" + "│ │ # Workers │ │ Memory CPU GPU │ │\n" + "│ │ │ │ │ │\n" + "│ │ 1 │ │ 2G~2G 1~1 0 │ │\n" + "│ │ │ │ │ │\n" + "│ ╰─────────────╯ ╰──────────────────────────────────────╯ │\n" + "╰───────────────────────────────────────────────────────────────╯\n" + ) + + +def test_map_to_ray_cluster(mocker): + from codeflare_sdk.ray.cluster.cluster import _map_to_ray_cluster + + mocker.patch("kubernetes.config.load_kube_config") + + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._is_openshift_cluster", return_value=True + ) + + mock_api_client = mocker.MagicMock(spec=client.ApiClient) + mocker.patch( + "codeflare_sdk.common.kubernetes_cluster.auth.get_api_client", + return_value=mock_api_client, + ) + + mock_routes = { + "items": [ + { + "apiVersion": "route.openshift.io/v1", + "kind": "Route", + "metadata": { + "name": "ray-dashboard-test-cluster-a", + "namespace": "ns", + }, + "spec": {"host": "ray-dashboard-test-cluster-a"}, + }, + ] + } + + def custom_side_effect(group, version, namespace, plural, **kwargs): + if plural == "routes": + return mock_routes + elif plural == "rayclusters": + return get_ray_obj("ray.io", "v1", "ns", "rayclusters") + + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + side_effect=custom_side_effect, + ) + + rc = get_ray_obj("ray.io", "v1", "ns", "rayclusters")["items"][0] + rc_name = rc["metadata"]["name"] + rc_dashboard = f"http://ray-dashboard-{rc_name}" + + result = _map_to_ray_cluster(rc) + + assert result is not None + assert result.dashboard == rc_dashboard + + +# Make sure to always keep this function last +def test_cleanup(): + os.remove(f"{aw_dir}test-all-params.yaml") + os.remove(f"{aw_dir}aw-all-params.yaml") diff --git a/src/codeflare_sdk/ray/cluster/test_config.py b/src/codeflare_sdk/ray/cluster/test_config.py new file mode 100644 index 00000000..e405bc5b --- /dev/null +++ b/src/codeflare_sdk/ray/cluster/test_config.py @@ -0,0 +1,232 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from codeflare_sdk.common.utils.unit_test_support import ( + apply_template, + get_example_extended_storage_opts, + create_cluster_wrong_type, + create_cluster_all_config_params, + get_template_variables, +) +from codeflare_sdk.ray.cluster.cluster import ClusterConfiguration, Cluster +from pathlib import Path +import filecmp +import pytest +import os +import yaml + +parent = Path(__file__).resolve().parents[4] # project directory +expected_clusters_dir = f"{parent}/tests/test_cluster_yamls" +aw_dir = os.path.expanduser("~/.codeflare/resources/") + + +def test_default_cluster_creation(mocker): + # Create a Ray Cluster using the default config variables + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.client.CustomObjectsApi.list_namespaced_custom_object") + + cluster = Cluster(ClusterConfiguration(name="default-cluster", namespace="ns")) + + expected_rc = apply_template( + f"{expected_clusters_dir}/ray/default-ray-cluster.yaml", + get_template_variables(), + ) + + assert cluster.resource_yaml == expected_rc + + +def test_default_appwrapper_creation(mocker): + # Create an AppWrapper using the default config variables + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.client.CustomObjectsApi.list_namespaced_custom_object") + + cluster = Cluster( + ClusterConfiguration(name="default-appwrapper", namespace="ns", appwrapper=True) + ) + + expected_aw = apply_template( + f"{expected_clusters_dir}/ray/default-appwrapper.yaml", get_template_variables() + ) + assert cluster.resource_yaml == expected_aw + + +@pytest.mark.filterwarnings("ignore::UserWarning") +def test_config_creation_all_parameters(mocker): + from codeflare_sdk.ray.cluster.config import DEFAULT_RESOURCE_MAPPING + + expected_extended_resource_mapping = DEFAULT_RESOURCE_MAPPING + expected_extended_resource_mapping.update({"example.com/gpu": "GPU"}) + expected_extended_resource_mapping["intel.com/gpu"] = "TPU" + volumes, volume_mounts = get_example_extended_storage_opts() + + cluster = create_cluster_all_config_params(mocker, "test-all-params", False) + assert cluster.config.name == "test-all-params" and cluster.config.namespace == "ns" + assert cluster.config.head_cpu_requests == 4 + assert cluster.config.head_cpu_limits == 8 + assert cluster.config.head_memory_requests == "12G" + assert cluster.config.head_memory_limits == "16G" + assert cluster.config.head_extended_resource_requests == { + "nvidia.com/gpu": 1, + "intel.com/gpu": 2, + } + assert cluster.config.worker_cpu_requests == 4 + assert cluster.config.worker_cpu_limits == 8 + assert cluster.config.num_workers == 10 + assert cluster.config.worker_memory_requests == "12G" + assert cluster.config.worker_memory_limits == "16G" + assert cluster.config.appwrapper == False + assert cluster.config.envs == { + "key1": "value1", + "key2": "value2", + "RAY_USAGE_STATS_ENABLED": "0", + } + assert cluster.config.image == "example/ray:tag" + assert cluster.config.image_pull_secrets == ["secret1", "secret2"] + assert cluster.config.write_to_file == True + assert cluster.config.verify_tls == True + assert cluster.config.labels == {"key1": "value1", "key2": "value2"} + assert cluster.config.worker_extended_resource_requests == {"nvidia.com/gpu": 1} + assert ( + cluster.config.extended_resource_mapping == expected_extended_resource_mapping + ) + assert cluster.config.overwrite_default_resource_mapping == True + assert cluster.config.local_queue == "local-queue-default" + assert cluster.config.annotations == { + "app.kubernetes.io/managed-by": "test-prefix", + "key1": "value1", + "key2": "value2", + } + assert cluster.config.volumes == volumes + assert cluster.config.volume_mounts == volume_mounts + + assert filecmp.cmp( + f"{aw_dir}test-all-params.yaml", + f"{expected_clusters_dir}/ray/unit-test-all-params.yaml", + shallow=True, + ) + + +@pytest.mark.filterwarnings("ignore::UserWarning") +def test_all_config_params_aw(mocker): + create_cluster_all_config_params(mocker, "aw-all-params", True) + + assert filecmp.cmp( + f"{aw_dir}aw-all-params.yaml", + f"{expected_clusters_dir}/appwrapper/unit-test-all-params.yaml", + shallow=True, + ) + + +def test_config_creation_wrong_type(): + with pytest.raises(TypeError) as error_info: + create_cluster_wrong_type() + + assert len(str(error_info.value).splitlines()) == 4 + + +def test_gcs_fault_tolerance_config_validation(): + config = ClusterConfiguration( + name="test", + namespace="ns", + enable_gcs_ft=True, + redis_address="redis:6379", + redis_password_secret={"name": "redis-password-secret", "key": "password"}, + external_storage_namespace="new-ns", + ) + + assert config.enable_gcs_ft is True + assert config.redis_address == "redis:6379" + assert config.redis_password_secret == { + "name": "redis-password-secret", + "key": "password", + } + assert config.external_storage_namespace == "new-ns" + + try: + ClusterConfiguration(name="test", namespace="ns", enable_gcs_ft=True) + except ValueError as e: + assert str(e) in "redis_address must be provided when enable_gcs_ft is True" + + try: + ClusterConfiguration( + name="test", + namespace="ns", + enable_gcs_ft=True, + redis_address="redis:6379", + redis_password_secret={"secret"}, + ) + except ValueError as e: + assert ( + str(e) + in "redis_password_secret must be a dictionary with 'name' and 'key' fields" + ) + + try: + ClusterConfiguration( + name="test", + namespace="ns", + enable_gcs_ft=True, + redis_address="redis:6379", + redis_password_secret={"wrong": "format"}, + ) + except ValueError as e: + assert ( + str(e) in "redis_password_secret must contain both 'name' and 'key' fields" + ) + + +def test_ray_usage_stats_default(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.client.CustomObjectsApi.list_namespaced_custom_object") + + cluster = Cluster( + ClusterConfiguration(name="default-usage-stats-cluster", namespace="ns") + ) + + # Verify that usage stats are disabled by default + assert cluster.config.envs["RAY_USAGE_STATS_ENABLED"] == "0" + + # Check that the environment variable is set in the YAML + head_container = cluster.resource_yaml["spec"]["headGroupSpec"]["template"]["spec"][ + "containers" + ][0] + env_vars = {env["name"]: env["value"] for env in head_container["env"]} + assert env_vars["RAY_USAGE_STATS_ENABLED"] == "0" + + +def test_ray_usage_stats_enabled(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.client.CustomObjectsApi.list_namespaced_custom_object") + + cluster = Cluster( + ClusterConfiguration( + name="usage-stats-enabled-cluster", + namespace="ns", + enable_usage_stats=True, + ) + ) + + assert cluster.config.envs["RAY_USAGE_STATS_ENABLED"] == "1" + + head_container = cluster.resource_yaml["spec"]["headGroupSpec"]["template"]["spec"][ + "containers" + ][0] + env_vars = {env["name"]: env["value"] for env in head_container["env"]} + assert env_vars["RAY_USAGE_STATS_ENABLED"] == "1" + + +# Make sure to always keep this function last +def test_cleanup(): + os.remove(f"{aw_dir}test-all-params.yaml") + os.remove(f"{aw_dir}aw-all-params.yaml") diff --git a/src/codeflare_sdk/ray/cluster/test_pretty_print.py b/src/codeflare_sdk/ray/cluster/test_pretty_print.py new file mode 100644 index 00000000..329a1354 --- /dev/null +++ b/src/codeflare_sdk/ray/cluster/test_pretty_print.py @@ -0,0 +1,209 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from codeflare_sdk.ray.cluster.pretty_print import ( + print_app_wrappers_status, + print_cluster_status, + print_clusters, + print_no_resources_found, +) +from codeflare_sdk.ray.appwrapper.status import AppWrapperStatus, AppWrapper +from codeflare_sdk.ray.cluster.status import ( + RayCluster, + RayClusterStatus, + CodeFlareClusterStatus, +) +from codeflare_sdk.ray.cluster.cluster import ( + Cluster, + ClusterConfiguration, + _copy_to_ray, +) +from codeflare_sdk.common.utils.unit_test_support import get_local_queue + + +def test_print_no_resources(capsys): + try: + print_no_resources_found() + except Exception: + assert 1 == 0 + captured = capsys.readouterr() + assert captured.out == ( + "╭──────────────────────────────────────────────────────────────────────────────╮\n" + "│ No resources found, have you run cluster.up() yet? │\n" + "╰──────────────────────────────────────────────────────────────────────────────╯\n" + ) + + +def test_print_appwrappers(capsys): + aw1 = AppWrapper( + name="awtest1", + status=AppWrapperStatus.SUSPENDED, + ) + aw2 = AppWrapper( + name="awtest2", + status=AppWrapperStatus.RUNNING, + ) + try: + print_app_wrappers_status([aw1, aw2]) + except Exception: + assert 1 == 0 + captured = capsys.readouterr() + assert captured.out == ( + "╭─────────────────────────╮\n" + "│ 🚀 Cluster Queue │\n" + "│ Status 🚀 │\n" + "│ +---------+-----------+ │\n" + "│ | Name | Status | │\n" + "│ +=========+===========+ │\n" + "│ | awtest1 | suspended | │\n" + "│ | | | │\n" + "│ | awtest2 | running | │\n" + "│ | | | │\n" + "│ +---------+-----------+ │\n" + "╰─────────────────────────╯\n" + ) + + +def test_ray_details(mocker, capsys): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + ray1 = RayCluster( + name="raytest1", + status=RayClusterStatus.READY, + num_workers=1, + worker_mem_requests="2G", + worker_mem_limits="2G", + worker_cpu_requests=1, + worker_cpu_limits=1, + namespace="ns", + dashboard="fake-uri", + head_cpu_requests=2, + head_cpu_limits=2, + head_mem_requests=8, + head_mem_limits=8, + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster.Cluster.status", + return_value=(False, CodeFlareClusterStatus.UNKNOWN), + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster.Cluster.cluster_dashboard_uri", + return_value="", + ) + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + cf = Cluster( + ClusterConfiguration( + name="raytest2", + namespace="ns", + appwrapper=True, + local_queue="local-queue-default", + ) + ) + captured = capsys.readouterr() + ray2 = _copy_to_ray(cf) + details = cf.details() + assert details == ray2 + assert ray2.name == "raytest2" + assert ray1.namespace == ray2.namespace + assert ray1.num_workers == ray2.num_workers + assert ray1.worker_mem_requests == ray2.worker_mem_requests + assert ray1.worker_mem_limits == ray2.worker_mem_limits + assert ray1.worker_cpu_requests == ray2.worker_cpu_requests + assert ray1.worker_cpu_limits == ray2.worker_cpu_limits + assert ray1.worker_extended_resources == ray2.worker_extended_resources + try: + print_clusters([ray1, ray2]) + print_cluster_status(ray1) + print_cluster_status(ray2) + except Exception: + assert 0 == 1 + captured = capsys.readouterr() + assert captured.out == ( + " 🚀 CodeFlare Cluster Details 🚀 \n" + " \n" + " ╭───────────────────────────────────────────────────────────────╮ \n" + " │ Name │ \n" + " │ raytest2 Inactive ❌ │ \n" + " │ │ \n" + " │ URI: ray://raytest2-head-svc.ns.svc:10001 │ \n" + " │ │ \n" + " │ Dashboard🔗 │ \n" + " │ │ \n" + " │ Cluster Resources │ \n" + " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n" + " │ │ # Workers │ │ Memory CPU GPU │ │ \n" + " │ │ │ │ │ │ \n" + " │ │ 1 │ │ 2G~2G 1~1 0 │ │ \n" + " │ │ │ │ │ │ \n" + " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n" + " ╰───────────────────────────────────────────────────────────────╯ \n" + " 🚀 CodeFlare Cluster Details 🚀 \n" + " \n" + " ╭───────────────────────────────────────────────────────────────╮ \n" + " │ Name │ \n" + " │ raytest1 Active ✅ │ \n" + " │ │ \n" + " │ URI: ray://raytest1-head-svc.ns.svc:10001 │ \n" + " │ │ \n" + " │ Dashboard🔗 │ \n" + " │ │ \n" + " │ Cluster Resources │ \n" + " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n" + " │ │ # Workers │ │ Memory CPU GPU │ │ \n" + " │ │ │ │ │ │ \n" + " │ │ 1 │ │ 2G~2G 1~1 0 │ │ \n" + " │ │ │ │ │ │ \n" + " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n" + " ╰───────────────────────────────────────────────────────────────╯ \n" + "╭───────────────────────────────────────────────────────────────╮\n" + "│ Name │\n" + "│ raytest2 Inactive ❌ │\n" + "│ │\n" + "│ URI: ray://raytest2-head-svc.ns.svc:10001 │\n" + "│ │\n" + "│ Dashboard🔗 │\n" + "│ │\n" + "│ Cluster Resources │\n" + "│ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │\n" + "│ │ # Workers │ │ Memory CPU GPU │ │\n" + "│ │ │ │ │ │\n" + "│ │ 1 │ │ 2G~2G 1~1 0 │ │\n" + "│ │ │ │ │ │\n" + "│ ╰─────────────╯ ╰──────────────────────────────────────╯ │\n" + "╰───────────────────────────────────────────────────────────────╯\n" + " 🚀 CodeFlare Cluster Status 🚀 \n" + " \n" + " ╭──────────────────────────────────────────────────────────╮ \n" + " │ Name │ \n" + " │ raytest1 Active ✅ │ \n" + " │ │ \n" + " │ URI: ray://raytest1-head-svc.ns.svc:10001 │ \n" + " │ │ \n" + " │ Dashboard🔗 │ \n" + " │ │ \n" + " ╰──────────────────────────────────────────────────────────╯ \n" + " 🚀 CodeFlare Cluster Status 🚀 \n" + " \n" + " ╭────────────────────────────────────────────────────────────╮ \n" + " │ Name │ \n" + " │ raytest2 Inactive ❌ │ \n" + " │ │ \n" + " │ URI: ray://raytest2-head-svc.ns.svc:10001 │ \n" + " │ │ \n" + " │ Dashboard🔗 │ \n" + " │ │ \n" + " ╰────────────────────────────────────────────────────────────╯ \n" + ) diff --git a/src/codeflare_sdk/ray/cluster/test_status.py b/src/codeflare_sdk/ray/cluster/test_status.py new file mode 100644 index 00000000..27eda49e --- /dev/null +++ b/src/codeflare_sdk/ray/cluster/test_status.py @@ -0,0 +1,117 @@ +# Copyright 2024 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from codeflare_sdk.ray.cluster.cluster import ( + Cluster, + ClusterConfiguration, + _ray_cluster_status, +) +from codeflare_sdk.ray.cluster.status import ( + CodeFlareClusterStatus, + RayClusterStatus, + RayCluster, +) +import os +from ...common.utils.unit_test_support import get_local_queue + +aw_dir = os.path.expanduser("~/.codeflare/resources/") + + +def test_cluster_status(mocker): + mocker.patch("kubernetes.client.ApisApi.get_api_versions") + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + + fake_ray = RayCluster( + name="test", + status=RayClusterStatus.UNKNOWN, + num_workers=1, + worker_mem_requests=2, + worker_mem_limits=2, + worker_cpu_requests=1, + worker_cpu_limits=1, + namespace="ns", + dashboard="fake-uri", + head_cpu_requests=2, + head_cpu_limits=2, + head_mem_requests=8, + head_mem_limits=8, + ) + + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + return_value=get_local_queue("kueue.x-k8s.io", "v1beta1", "ns", "localqueues"), + ) + + cf = Cluster( + ClusterConfiguration( + name="test", + namespace="ns", + write_to_file=True, + appwrapper=False, + local_queue="local-queue-default", + ) + ) + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._ray_cluster_status", return_value=None + ) + status, ready = cf.status() + assert status == CodeFlareClusterStatus.UNKNOWN + assert ready == False + + mocker.patch( + "codeflare_sdk.ray.cluster.cluster._ray_cluster_status", return_value=fake_ray + ) + + status, ready = cf.status() + assert status == CodeFlareClusterStatus.STARTING + assert ready == False + + fake_ray.status = RayClusterStatus.FAILED + status, ready = cf.status() + assert status == CodeFlareClusterStatus.FAILED + assert ready == False + + fake_ray.status = RayClusterStatus.UNHEALTHY + status, ready = cf.status() + assert status == CodeFlareClusterStatus.FAILED + assert ready == False + + fake_ray.status = RayClusterStatus.READY + status, ready = cf.status() + assert status == CodeFlareClusterStatus.READY + assert ready == True + + +def rc_status_fields(group, version, namespace, plural, *args): + assert group == "ray.io" + assert version == "v1" + assert namespace == "test-ns" + assert plural == "rayclusters" + assert args == tuple() + return {"items": []} + + +def test_rc_status(mocker): + mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") + mocker.patch( + "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", + side_effect=rc_status_fields, + ) + rc = _ray_cluster_status("test-rc", "test-ns") + assert rc == None + + +# Make sure to always keep this function last +def test_cleanup(): + os.remove(f"{aw_dir}test.yaml") diff --git a/src/codeflare_sdk/templates/base-template.yaml b/src/codeflare_sdk/templates/base-template.yaml deleted file mode 100644 index c2dddb6d..00000000 --- a/src/codeflare_sdk/templates/base-template.yaml +++ /dev/null @@ -1,335 +0,0 @@ -apiVersion: workload.codeflare.dev/v1beta1 -kind: AppWrapper -metadata: - name: aw-kuberay - namespace: default - #new addition - labels: - orderedinstance: "m4.xlarge_g4dn.xlarge" -spec: - priority: 9 - resources: - Items: [] - GenericItems: - - replicas: 1 - #new addition - custompodresources: - - replicas: 1 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - - replicas: 3 - requests: - cpu: 2 - memory: 12G - nvidia.com/gpu: 1 - limits: - cpu: 2 - memory: 12G - nvidia.com/gpu: 1 - generictemplate: - # This config demonstrates KubeRay's Ray autoscaler integration. - # The resource requests and limits in this config are too small for production! - # For an example with more realistic resource configuration, see - # ray-cluster.autoscaler.large.yaml. - apiVersion: ray.io/v1alpha1 - kind: RayCluster - metadata: - labels: - appwrapper.mcad.ibm.com: "aw-kuberay" - controller-tools.k8s.io: "1.0" - # A unique identifier for the head node and workers of this cluster. - name: kuberay-cluster - # finalizers: - # - kubernetes - spec: - # The version of Ray you are using. Make sure all Ray containers are running this version of Ray. - rayVersion: '2.5.0' - # If enableInTreeAutoscaling is true, the autoscaler sidecar will be added to the Ray head pod. - # Ray autoscaler integration is supported only for Ray versions >= 1.11.0 - # Ray autoscaler integration is Beta with KubeRay >= 0.3.0 and Ray >= 2.0.0. - enableInTreeAutoscaling: false - # autoscalerOptions is an OPTIONAL field specifying configuration overrides for the Ray autoscaler. - # The example configuration shown below below represents the DEFAULT values. - # (You may delete autoscalerOptions if the defaults are suitable.) - autoscalerOptions: - # upscalingMode is "Default" or "Aggressive." - # Conservative: Upscaling is rate-limited; the number of pending worker pods is at most the size of the Ray cluster. - # Default: Upscaling is not rate-limited. - # Aggressive: An alias for Default; upscaling is not rate-limited. - upscalingMode: Default - # idleTimeoutSeconds is the number of seconds to wait before scaling down a worker pod which is not using Ray resources. - idleTimeoutSeconds: 60 - # image optionally overrides the autoscaler's container image. - # If instance.spec.rayVersion is at least "2.0.0", the autoscaler will default to the same image as - # the ray container. For older Ray versions, the autoscaler will default to using the Ray 2.0.0 image. - ## image: "my-repo/my-custom-autoscaler-image:tag" - # imagePullPolicy optionally overrides the autoscaler container's image pull policy. - imagePullPolicy: Always - # resources specifies optional resource request and limit overrides for the autoscaler container. - # For large Ray clusters, we recommend monitoring container resource usage to determine if overriding the defaults is required. - resources: - limits: - cpu: "500m" - memory: "512Mi" - requests: - cpu: "500m" - memory: "512Mi" - ######################headGroupSpec################################# - # head group template and specs, (perhaps 'group' is not needed in the name) - headGroupSpec: - # Kubernetes Service Type, valid values are 'ClusterIP', 'NodePort' and 'LoadBalancer' - serviceType: ClusterIP - # logical group name, for this called head-group, also can be functional - # pod type head or worker - # rayNodeType: head # Not needed since it is under the headgroup - # the following params are used to complete the ray start: ray start --head --block ... - rayStartParams: - # Flag "no-monitor" will be automatically set when autoscaling is enabled. - dashboard-host: '0.0.0.0' - block: 'true' - # num-cpus: '1' # can be auto-completed from the limits - # Use `resources` to optionally specify custom resource annotations for the Ray node. - # The value of `resources` is a string-integer mapping. - # Currently, `resources` must be provided in the specific format demonstrated below: - # resources: '"{\"Custom1\": 1, \"Custom2\": 5}"' - num-gpus: '0' - #pod template - template: - spec: - #new addition - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: aw-kuberay - operator: In - values: - - "aw-kuberay" - containers: - # The Ray head pod - - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: RAY_USE_TLS - value: "0" - - name: RAY_TLS_SERVER_CERT - value: /home/ray/workspace/tls/server.crt - - name: RAY_TLS_SERVER_KEY - value: /home/ray/workspace/tls/server.key - - name: RAY_TLS_CA_CERT - value: /home/ray/workspace/tls/ca.crt - name: ray-head - image: rayproject/ray:latest - imagePullPolicy: Always - ports: - - containerPort: 6379 - name: gcs - - containerPort: 8265 - name: dashboard - - containerPort: 10001 - name: client - lifecycle: - preStop: - exec: - command: ["/bin/sh","-c","ray stop"] - resources: - limits: - cpu: 2 - memory: "8G" - nvidia.com/gpu: 0 - requests: - cpu: 2 - memory: "8G" - nvidia.com/gpu: 0 - volumeMounts: - - name: ca-vol - mountPath: "/home/ray/workspace/ca" - readOnly: true - - name: server-cert - mountPath: "/home/ray/workspace/tls" - readOnly: true - initContainers: - - command: - - sh - - -c - - cd /home/ray/workspace/tls && openssl req -nodes -newkey rsa:2048 -keyout server.key -out server.csr -subj '/CN=ray-head' && printf "authorityKeyIdentifier=keyid,issuer\nbasicConstraints=CA:FALSE\nsubjectAltName = @alt_names\n[alt_names]\nDNS.1 = 127.0.0.1\nDNS.2 = localhost\nDNS.3 = ${FQ_RAY_IP}\nDNS.4 = $(awk 'END{print $1}' /etc/hosts)\nDNS.5 = rayclient-deployment-name-$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).server-name">./domain.ext && cp /home/ray/workspace/ca/* . && openssl x509 -req -CA ca.crt -CAkey ca.key -in server.csr -out server.crt -days 365 -CAcreateserial -extfile domain.ext - image: rayproject/ray:2.5.0 - name: create-cert - # securityContext: - # runAsUser: 1000 - # runAsGroup: 1000 - volumeMounts: - - name: ca-vol - mountPath: "/home/ray/workspace/ca" - readOnly: true - - name: server-cert - mountPath: "/home/ray/workspace/tls" - readOnly: false - volumes: - - name: ca-vol - secret: - secretName: ca-secret-deployment-name - optional: false - - name: server-cert - emptyDir: {} - workerGroupSpecs: - # the pod replicas in this group typed worker - - replicas: 3 - minReplicas: 3 - maxReplicas: 3 - # logical group name, for this called small-group, also can be functional - groupName: small-group - # if worker pods need to be added, we can simply increment the replicas - # if worker pods need to be removed, we decrement the replicas, and populate the podsToDelete list - # the operator will remove pods from the list until the number of replicas is satisfied - # when a pod is confirmed to be deleted, its name will be removed from the list below - #scaleStrategy: - # workersToDelete: - # - raycluster-complete-worker-small-group-bdtwh - # - raycluster-complete-worker-small-group-hv457 - # - raycluster-complete-worker-small-group-k8tj7 - # the following params are used to complete the ray start: ray start --block ... - rayStartParams: - block: 'true' - num-gpus: 1 - #pod template - template: - metadata: - labels: - key: value - # annotations for pod - annotations: - key: value - # finalizers: - # - kubernetes - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: aw-kuberay - operator: In - values: - - "aw-kuberay" - initContainers: - # the env var $RAY_IP is set by the operator if missing, with the value of the head service name - - name: init-myservice - image: busybox:1.28 - command: ['sh', '-c', "until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"] - - name: create-cert - image: rayproject/ray:2.5.0 - command: - - sh - - -c - - cd /home/ray/workspace/tls && openssl req -nodes -newkey rsa:2048 -keyout server.key -out server.csr -subj '/CN=ray-head' && printf "authorityKeyIdentifier=keyid,issuer\nbasicConstraints=CA:FALSE\nsubjectAltName = @alt_names\n[alt_names]\nDNS.1 = 127.0.0.1\nDNS.2 = localhost\nDNS.3 = ${FQ_RAY_IP}\nDNS.4 = $(awk 'END{print $1}' /etc/hosts)">./domain.ext && cp /home/ray/workspace/ca/* . && openssl x509 -req -CA ca.crt -CAkey ca.key -in server.csr -out server.crt -days 365 -CAcreateserial -extfile domain.ext - # securityContext: - # runAsUser: 1000 - # runAsGroup: 1000 - volumeMounts: - - name: ca-vol - mountPath: "/home/ray/workspace/ca" - readOnly: true - - name: server-cert - mountPath: "/home/ray/workspace/tls" - readOnly: false - containers: - - name: machine-learning # must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc' - image: rayproject/ray:latest - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: RAY_USE_TLS - value: "0" - - name: RAY_TLS_SERVER_CERT - value: /home/ray/workspace/tls/server.crt - - name: RAY_TLS_SERVER_KEY - value: /home/ray/workspace/tls/server.key - - name: RAY_TLS_CA_CERT - value: /home/ray/workspace/tls/ca.crt - # environment variables to set in the container.Optional. - # Refer to https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ - lifecycle: - preStop: - exec: - command: ["/bin/sh","-c","ray stop"] - resources: - limits: - cpu: "2" - memory: "12G" - nvidia.com/gpu: "1" - requests: - cpu: "2" - memory: "12G" - nvidia.com/gpu: "1" - volumeMounts: - - name: ca-vol - mountPath: "/home/ray/workspace/ca" - readOnly: true - - name: server-cert - mountPath: "/home/ray/workspace/tls" - readOnly: true - volumes: - - name: ca-vol - secret: - secretName: ca-secret-deployment-name - optional: false - - name: server-cert - emptyDir: {} - - replicas: 1 - generictemplate: - kind: Route - apiVersion: route.openshift.io/v1 - metadata: - name: ray-dashboard-deployment-name - namespace: default - labels: - # allows me to return name of service that Ray operator creates - odh-ray-cluster-service: deployment-name-head-svc - spec: - to: - kind: Service - name: deployment-name-head-svc - port: - targetPort: dashboard - - replicas: 1 - generictemplate: - apiVersion: route.openshift.io/v1 - kind: Route - metadata: - name: rayclient-deployment-name - namespace: default - labels: - # allows me to return name of service that Ray operator creates - odh-ray-cluster-service: deployment-name-head-svc - spec: - port: - targetPort: client - tls: - termination: passthrough - to: - kind: Service - name: deployment-name-head-svc - - replicas: 1 - generictemplate: - apiVersion: v1 - data: - ca.crt: generated_crt - ca.key: generated_key - kind: Secret - metadata: - name: ca-secret-deployment-name - labels: - # allows me to return name of service that Ray operator creates - odh-ray-cluster-service: deployment-name-head-svc diff --git a/src/codeflare_sdk/utils/generate_yaml.py b/src/codeflare_sdk/utils/generate_yaml.py deleted file mode 100755 index f128ef8b..00000000 --- a/src/codeflare_sdk/utils/generate_yaml.py +++ /dev/null @@ -1,401 +0,0 @@ -# Copyright 2022 IBM, Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This sub-module exists primarily to be used internally by the Cluster object -(in the cluster sub-module) for AppWrapper generation. -""" - -import yaml -import sys -import argparse -import uuid -from kubernetes import client, config -from .kube_api_helpers import _kube_api_error_handling -from ..cluster.auth import api_config_handler - - -def read_template(template): - with open(template, "r") as stream: - try: - return yaml.safe_load(stream) - except yaml.YAMLError as exc: - print(exc) - - -def gen_names(name): - if not name: - gen_id = str(uuid.uuid4()) - appwrapper_name = "appwrapper-" + gen_id - cluster_name = "cluster-" + gen_id - return appwrapper_name, cluster_name - else: - return name, name - - -def update_dashboard_route(route_item, cluster_name, namespace): - metadata = route_item.get("generictemplate", {}).get("metadata") - metadata["name"] = f"ray-dashboard-{cluster_name}" - metadata["namespace"] = namespace - metadata["labels"]["odh-ray-cluster-service"] = f"{cluster_name}-head-svc" - spec = route_item.get("generictemplate", {}).get("spec") - spec["to"]["name"] = f"{cluster_name}-head-svc" - - -# ToDo: refactor the update_x_route() functions -def update_rayclient_route(route_item, cluster_name, namespace): - metadata = route_item.get("generictemplate", {}).get("metadata") - metadata["name"] = f"rayclient-{cluster_name}" - metadata["namespace"] = namespace - metadata["labels"]["odh-ray-cluster-service"] = f"{cluster_name}-head-svc" - spec = route_item.get("generictemplate", {}).get("spec") - spec["to"]["name"] = f"{cluster_name}-head-svc" - - -def update_names(yaml, item, appwrapper_name, cluster_name, namespace): - metadata = yaml.get("metadata") - metadata["name"] = appwrapper_name - metadata["namespace"] = namespace - lower_meta = item.get("generictemplate", {}).get("metadata") - lower_meta["labels"]["appwrapper.mcad.ibm.com"] = appwrapper_name - lower_meta["name"] = cluster_name - lower_meta["namespace"] = namespace - - -def update_labels(yaml, instascale, instance_types): - metadata = yaml.get("metadata") - if instascale: - if not len(instance_types) > 0: - sys.exit( - "If instascale is set to true, must provide at least one instance type" - ) - type_str = "" - for type in instance_types: - type_str += type + "_" - type_str = type_str[:-1] - metadata["labels"]["orderedinstance"] = type_str - else: - metadata.pop("labels") - - -def update_priority(yaml, item, dispatch_priority, priority_val): - spec = yaml.get("spec") - if dispatch_priority is not None: - if priority_val: - spec["priority"] = priority_val - else: - raise ValueError( - "AW generation error: Priority value is None, while dispatch_priority is defined" - ) - head = item.get("generictemplate").get("spec").get("headGroupSpec") - worker = item.get("generictemplate").get("spec").get("workerGroupSpecs")[0] - head["template"]["spec"]["priorityClassName"] = dispatch_priority - worker["template"]["spec"]["priorityClassName"] = dispatch_priority - else: - spec.pop("priority") - - -def update_custompodresources( - item, min_cpu, max_cpu, min_memory, max_memory, gpu, workers -): - if "custompodresources" in item.keys(): - custompodresources = item.get("custompodresources") - for i in range(len(custompodresources)): - if i == 0: - # Leave head node resources as template default - continue - resource = custompodresources[i] - for k, v in resource.items(): - if k == "replicas" and i == 1: - resource[k] = workers - if k == "requests" or k == "limits": - for spec, _ in v.items(): - if spec == "cpu": - if k == "limits": - resource[k][spec] = max_cpu - else: - resource[k][spec] = min_cpu - if spec == "memory": - if k == "limits": - resource[k][spec] = str(max_memory) + "G" - else: - resource[k][spec] = str(min_memory) + "G" - if spec == "nvidia.com/gpu": - if i == 0: - resource[k][spec] = 0 - else: - resource[k][spec] = gpu - else: - sys.exit("Error: malformed template") - - -def update_affinity(spec, appwrapper_name, instascale): - if instascale: - node_selector_terms = ( - spec.get("affinity") - .get("nodeAffinity") - .get("requiredDuringSchedulingIgnoredDuringExecution") - .get("nodeSelectorTerms") - ) - node_selector_terms[0]["matchExpressions"][0]["values"][0] = appwrapper_name - node_selector_terms[0]["matchExpressions"][0]["key"] = appwrapper_name - else: - spec.pop("affinity") - - -def update_image(spec, image): - containers = spec.get("containers") - for container in containers: - container["image"] = image - - -def update_image_pull_secrets(spec, image_pull_secrets): - template_secrets = spec.get("imagePullSecrets", []) - spec["imagePullSecrets"] = template_secrets + [ - {"name": x} for x in image_pull_secrets - ] - - -def update_env(spec, env): - containers = spec.get("containers") - for container in containers: - if env: - if "env" in container: - container["env"].extend(env) - else: - container["env"] = env - - -def update_resources(spec, min_cpu, max_cpu, min_memory, max_memory, gpu): - container = spec.get("containers") - for resource in container: - requests = resource.get("resources").get("requests") - if requests is not None: - requests["cpu"] = min_cpu - requests["memory"] = str(min_memory) + "G" - requests["nvidia.com/gpu"] = gpu - limits = resource.get("resources").get("limits") - if limits is not None: - limits["cpu"] = max_cpu - limits["memory"] = str(max_memory) + "G" - limits["nvidia.com/gpu"] = gpu - - -def update_nodes( - item, - appwrapper_name, - min_cpu, - max_cpu, - min_memory, - max_memory, - gpu, - workers, - image, - instascale, - env, - image_pull_secrets, -): - if "generictemplate" in item.keys(): - head = item.get("generictemplate").get("spec").get("headGroupSpec") - worker = item.get("generictemplate").get("spec").get("workerGroupSpecs")[0] - - # Head counts as first worker - worker["replicas"] = workers - worker["minReplicas"] = workers - worker["maxReplicas"] = workers - worker["groupName"] = "small-group-" + appwrapper_name - worker["rayStartParams"]["num-gpus"] = str(int(gpu)) - - for comp in [head, worker]: - spec = comp.get("template").get("spec") - update_affinity(spec, appwrapper_name, instascale) - update_image_pull_secrets(spec, image_pull_secrets) - update_image(spec, image) - update_env(spec, env) - if comp == head: - # TODO: Eventually add head node configuration outside of template - continue - else: - update_resources(spec, min_cpu, max_cpu, min_memory, max_memory, gpu) - - -def update_ca_secret(ca_secret_item, cluster_name, namespace): - from . import generate_cert - - metadata = ca_secret_item.get("generictemplate", {}).get("metadata") - metadata["name"] = f"ca-secret-{cluster_name}" - metadata["namespace"] = namespace - metadata["labels"]["odh-ray-cluster-service"] = f"{cluster_name}-head-svc" - data = ca_secret_item.get("generictemplate", {}).get("data") - data["ca.key"], data["ca.crt"] = generate_cert.generate_ca_cert(365) - - -def enable_local_interactive(resources, cluster_name, namespace): - rayclient_route_item = resources["resources"].get("GenericItems")[2] - ca_secret_item = resources["resources"].get("GenericItems")[3] - item = resources["resources"].get("GenericItems")[0] - update_rayclient_route(rayclient_route_item, cluster_name, namespace) - update_ca_secret(ca_secret_item, cluster_name, namespace) - # update_ca_secret_volumes - item["generictemplate"]["spec"]["headGroupSpec"]["template"]["spec"]["volumes"][0][ - "secret" - ]["secretName"] = f"ca-secret-{cluster_name}" - item["generictemplate"]["spec"]["workerGroupSpecs"][0]["template"]["spec"][ - "volumes" - ][0]["secret"]["secretName"] = f"ca-secret-{cluster_name}" - # update_tls_env - item["generictemplate"]["spec"]["headGroupSpec"]["template"]["spec"]["containers"][ - 0 - ]["env"][1]["value"] = "1" - item["generictemplate"]["spec"]["workerGroupSpecs"][0]["template"]["spec"][ - "containers" - ][0]["env"][1]["value"] = "1" - # update_init_container - command = item["generictemplate"]["spec"]["headGroupSpec"]["template"]["spec"][ - "initContainers" - ][0].get("command")[2] - - command = command.replace("deployment-name", cluster_name) - try: - config.load_kube_config() - api_client = client.CustomObjectsApi(api_config_handler()) - ingress = api_client.get_cluster_custom_object( - "config.openshift.io", "v1", "ingresses", "cluster" - ) - except Exception as e: # pragma: no cover - return _kube_api_error_handling(e) - domain = ingress["spec"]["domain"] - command = command.replace("server-name", domain) - - item["generictemplate"]["spec"]["headGroupSpec"]["template"]["spec"][ - "initContainers" - ][0].get("command")[2] = command - - -def disable_raycluster_tls(resources): - generic_template_spec = resources["GenericItems"][0]["generictemplate"]["spec"] - - if "volumes" in generic_template_spec["headGroupSpec"]["template"]["spec"]: - del generic_template_spec["headGroupSpec"]["template"]["spec"]["volumes"] - - if ( - "volumeMounts" - in generic_template_spec["headGroupSpec"]["template"]["spec"]["containers"][0] - ): - del generic_template_spec["headGroupSpec"]["template"]["spec"]["containers"][0][ - "volumeMounts" - ] - - if "initContainers" in generic_template_spec["headGroupSpec"]["template"]["spec"]: - del generic_template_spec["headGroupSpec"]["template"]["spec"]["initContainers"] - - if "volumes" in generic_template_spec["workerGroupSpecs"][0]["template"]["spec"]: - del generic_template_spec["workerGroupSpecs"][0]["template"]["spec"]["volumes"] - - if ( - "volumeMounts" - in generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][ - "containers" - ][0] - ): - del generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][ - "containers" - ][0]["volumeMounts"] - - for i in range( - len( - generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][ - "initContainers" - ] - ) - ): - if ( - generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][ - "initContainers" - ][i]["name"] - == "create-cert" - ): - del generic_template_spec["workerGroupSpecs"][0]["template"]["spec"][ - "initContainers" - ][i] - - updated_items = [] - for i in resources["GenericItems"][:]: - if "rayclient-deployment-name" in i["generictemplate"]["metadata"]["name"]: - continue - if "ca-secret-deployment-name" in i["generictemplate"]["metadata"]["name"]: - continue - updated_items.append(i) - - resources["GenericItems"] = updated_items - - -def write_user_appwrapper(user_yaml, output_file_name): - with open(output_file_name, "w") as outfile: - yaml.dump(user_yaml, outfile, default_flow_style=False) - print(f"Written to: {output_file_name}") - - -def generate_appwrapper( - name: str, - namespace: str, - min_cpu: int, - max_cpu: int, - min_memory: int, - max_memory: int, - gpu: int, - workers: int, - template: str, - image: str, - instascale: bool, - instance_types: list, - env, - local_interactive: bool, - image_pull_secrets: list, - dispatch_priority: str, - priority_val: int, -): - user_yaml = read_template(template) - appwrapper_name, cluster_name = gen_names(name) - resources = user_yaml.get("spec", "resources") - item = resources["resources"].get("GenericItems")[0] - route_item = resources["resources"].get("GenericItems")[1] - update_names(user_yaml, item, appwrapper_name, cluster_name, namespace) - update_labels(user_yaml, instascale, instance_types) - update_priority(user_yaml, item, dispatch_priority, priority_val) - update_custompodresources( - item, min_cpu, max_cpu, min_memory, max_memory, gpu, workers - ) - update_nodes( - item, - appwrapper_name, - min_cpu, - max_cpu, - min_memory, - max_memory, - gpu, - workers, - image, - instascale, - env, - image_pull_secrets, - ) - update_dashboard_route(route_item, cluster_name, namespace) - if local_interactive: - enable_local_interactive(resources, cluster_name, namespace) - else: - disable_raycluster_tls(resources["resources"]) - outfile = appwrapper_name + ".yaml" - write_user_appwrapper(user_yaml, outfile) - return outfile diff --git a/src/codeflare_sdk/utils/kube_api_helpers.py b/src/codeflare_sdk/utils/kube_api_helpers.py deleted file mode 100644 index 58358a05..00000000 --- a/src/codeflare_sdk/utils/kube_api_helpers.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2022 IBM, Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This sub-module exists primarily to be used internally for any Kubernetes -API error handling or wrapping. -""" - -import executing -from kubernetes import client, config - - -# private methods -def _kube_api_error_handling(e: Exception): # pragma: no cover - perm_msg = ( - "Action not permitted, have you put in correct/up-to-date auth credentials?" - ) - nf_msg = "No instances found, nothing to be done." - exists_msg = "Resource with this name already exists." - if type(e) == config.ConfigException: - raise PermissionError(perm_msg) - if type(e) == executing.executing.NotOneValueFound: - print(nf_msg) - return - if type(e) == client.ApiException: - if e.reason == "Not Found": - print(nf_msg) - return - elif e.reason == "Unauthorized" or e.reason == "Forbidden": - raise PermissionError(perm_msg) - elif e.reason == "Conflict": - raise FileExistsError(exists_msg) - raise e diff --git a/src/codeflare_sdk/job/__init__.py b/tests/__init__.py similarity index 100% rename from src/codeflare_sdk/job/__init__.py rename to tests/__init__.py diff --git a/tests/auth-test.crt b/tests/auth-test.crt new file mode 100644 index 00000000..f470c632 --- /dev/null +++ b/tests/auth-test.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDOTCCAiGgAwIBAgIUENjaZDrvhc5uV3j7GI8deZJwc+YwDQYJKoZIhvcNAQEL +BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yNDA1MTMxMTE1NDZaFw0yNTA1 +MTMxMTE1NDZaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw +HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDEYYk81jvPijZXXeI9cByf5EIbOVaBTH7I51J9EKG5 +Y/KRXI43WgvVEiZ3jP8LJnSD79WhBiL6TgadQZje5ndroRYDM9vyqz1OUZapnOO+ +yzl01y/qSsH8Kn88eLAzkE9HSu4QN9PuJtySyksjDFQJ6kjyE8ZHUSorur0FlLLf +IToFgTuaIPDYjvFRchOCfZ7sV/MF7LxqFfFnaWOYvH41ZdvqJiRcVsMi+mYs9/I/ +I72IMXwVnQDVnK8H84ntEmHNN6NoVuMKla0So4/wKcHJSCgS3axLI2Ka2aaaJo9K +l2cn21NOyodF+DaSFy7qaGRXxoTQ2k9tUrSvxkBJvRmBAgMBAAGjITAfMB0GA1Ud +DgQWBBRTK8mO5XMcmR+Xg/PVNFnvz4eubDANBgkqhkiG9w0BAQsFAAOCAQEAlZva +6ws3zRff7u0tWT2JJaE1uPqsuAdHtVvEyAMp2QvYfyrgADTroUTaSU4p6ppX/t7v +ynHhuzR6UOVkuY0/CH1P3UUGrEPNOXT8i2BDwL+j4y2K2aRN8zU0Nu/IVePBhu+4 +Jdt+3P7/MuwiCON5JukgxUYlQKhVhzFj7GOd2+Ca+fh8Siq3tkWDSN54+90fgylQ ++74Yfya1NVabpzLqP3Isqu2XQhEVaBFvj8Yu0h83e3D8LeQToC3mVMF4yy5BZ9Ty +K66YGlGQgszWEUFPEdsB8Dj/iJMhkWXuyc3u/w0s3t7rXeMYYgr+xrEeK+g0oyB5 +xeZuMjd567Znmu5oMw== +-----END CERTIFICATE----- diff --git a/tests/demo_test.py b/tests/demo_test.py deleted file mode 100644 index 65324af0..00000000 --- a/tests/demo_test.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2022 IBM, Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -These were the old tests used during initial demo building, and they will soon be fully deprecated. -""" - -from codeflare_sdk.cluster.cluster import ( - list_all_clusters, - list_all_queued, - _app_wrapper_status, -) -from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration - -import time - -# FIXME - These tests currently assume OC logged in, and not self-contained unit/funcitonal tests - - -def test_cluster_up(): - cluster = Cluster(ClusterConfiguration(name="raycluster-autoscaler")) - cluster.up() - time.sleep(15) - - -def test_list_clusters(): - clusters = list_all_clusters() - - -def test_cluster_status(): - cluster = Cluster(ClusterConfiguration(name="raycluster-autoscaler")) - cluster.status() - - -def test_app_wrapper_status(): - print(_app_wrapper_status("raycluster-autoscaler")) - - -def test_cluster_down(): - cluster = Cluster(ClusterConfiguration(name="raycluster-autoscaler")) - cluster.down() - - -def test_no_resources_found(): - from codeflare_sdk.utils import pretty_print - - pretty_print.print_no_resources_found() - - -def test_list_app_wrappers(): - app_wrappers = list_all_queued() diff --git a/tests/e2e/cluster_apply_kind_test.py b/tests/e2e/cluster_apply_kind_test.py new file mode 100644 index 00000000..398bf73b --- /dev/null +++ b/tests/e2e/cluster_apply_kind_test.py @@ -0,0 +1,156 @@ +from codeflare_sdk import Cluster, ClusterConfiguration +import pytest +from kubernetes import client + +from support import ( + initialize_kubernetes_client, + create_namespace, + delete_namespace, + get_ray_cluster, +) + + +@pytest.mark.kind +class TestRayClusterApply: + def setup_method(self): + initialize_kubernetes_client(self) + + def teardown_method(self): + delete_namespace(self) + + def test_cluster_apply(self): + self.setup_method() + create_namespace(self) + + cluster_name = "test-cluster-apply" + namespace = self.namespace + + # Initial configuration with 1 worker + initial_config = ClusterConfiguration( + name=cluster_name, + namespace=namespace, + num_workers=1, + head_cpu_requests="500m", + head_cpu_limits="1", + head_memory_requests="1Gi", + head_memory_limits="2Gi", + worker_cpu_requests="500m", + worker_cpu_limits="1", + worker_memory_requests="1Gi", + worker_memory_limits="2Gi", + write_to_file=True, + verify_tls=False, + ) + + # Create the cluster + cluster = Cluster(initial_config) + cluster.apply() + + # Wait for the cluster to be ready + cluster.wait_ready() + status = cluster.status() + assert status["ready"], f"Cluster {cluster_name} is not ready: {status}" + + # Verify the cluster is created + ray_cluster = get_ray_cluster(cluster_name, namespace) + assert ray_cluster is not None, "Cluster was not created successfully" + assert ( + ray_cluster["spec"]["workerGroupSpecs"][0]["replicas"] == 1 + ), "Initial worker count does not match" + + # Update configuration with 3 workers + updated_config = ClusterConfiguration( + name=cluster_name, + namespace=namespace, + num_workers=2, + head_cpu_requests="500m", + head_cpu_limits="1", + head_memory_requests="1Gi", + head_memory_limits="2Gi", + worker_cpu_requests="500m", + worker_cpu_limits="1", + worker_memory_requests="1Gi", + worker_memory_limits="2Gi", + write_to_file=True, + verify_tls=False, + ) + + # Apply the updated configuration + cluster.config = updated_config + cluster.apply() + + # Wait for the updated cluster to be ready + cluster.wait_ready() + updated_status = cluster.status() + assert updated_status[ + "ready" + ], f"Cluster {cluster_name} is not ready after update: {updated_status}" + + # Verify the cluster is updated + updated_ray_cluster = get_ray_cluster(cluster_name, namespace) + assert ( + updated_ray_cluster["spec"]["workerGroupSpecs"][0]["replicas"] == 2 + ), "Worker count was not updated" + + # Clean up + cluster.down() + ray_cluster = get_ray_cluster(cluster_name, namespace) + assert ray_cluster is None, "Cluster was not deleted successfully" + + def test_apply_invalid_update(self): + self.setup_method() + create_namespace(self) + + cluster_name = "test-cluster-apply-invalid" + namespace = self.namespace + + # Initial configuration + initial_config = ClusterConfiguration( + name=cluster_name, + namespace=namespace, + num_workers=1, + head_cpu_requests="500m", + head_cpu_limits="1", + head_memory_requests="1Gi", + head_memory_limits="2Gi", + worker_cpu_requests="500m", + worker_cpu_limits="1", + worker_memory_requests="1Gi", + worker_memory_limits="2Gi", + write_to_file=True, + verify_tls=False, + ) + + # Create the cluster + cluster = Cluster(initial_config) + cluster.apply() + + # Wait for the cluster to be ready + cluster.wait_ready() + status = cluster.status() + assert status["ready"], f"Cluster {cluster_name} is not ready: {status}" + + # Update with an invalid configuration (e.g., immutable field change) + invalid_config = ClusterConfiguration( + name=cluster_name, + namespace=namespace, + num_workers=2, + head_cpu_requests="1", + head_cpu_limits="2", # Changing CPU limits (immutable) + head_memory_requests="1Gi", + head_memory_limits="2Gi", + worker_cpu_requests="500m", + worker_cpu_limits="1", + worker_memory_requests="1Gi", + worker_memory_limits="2Gi", + write_to_file=True, + verify_tls=False, + ) + + # Try to apply the invalid configuration and expect failure + cluster.config = invalid_config + with pytest.raises(RuntimeError, match="Immutable fields detected"): + cluster.apply() + + # Clean up + cluster.down() diff --git a/tests/e2e/heterogeneous_clusters_kind_test.py b/tests/e2e/heterogeneous_clusters_kind_test.py new file mode 100644 index 00000000..fb650176 --- /dev/null +++ b/tests/e2e/heterogeneous_clusters_kind_test.py @@ -0,0 +1,74 @@ +from time import sleep +import time +from codeflare_sdk import ( + Cluster, + ClusterConfiguration, +) + +from codeflare_sdk.common.kueue.kueue import list_local_queues + +import pytest + +from support import * + + +@pytest.mark.skip(reason="Skipping heterogenous cluster kind test") +@pytest.mark.kind +class TestHeterogeneousClustersKind: + def setup_method(self): + initialize_kubernetes_client(self) + + def teardown_method(self): + delete_namespace(self) + delete_kueue_resources(self) + + @pytest.mark.nvidia_gpu + def test_heterogeneous_clusters(self): + create_namespace(self) + create_kueue_resources(self, 2, with_labels=True, with_tolerations=True) + self.run_heterogeneous_clusters() + + def run_heterogeneous_clusters( + self, gpu_resource_name="nvidia.com/gpu", number_of_gpus=0 + ): + for flavor in self.resource_flavors: + node_labels = ( + get_flavor_spec(self, flavor).get("spec", {}).get("nodeLabels", {}) + ) + expected_nodes = get_nodes_by_label(self, node_labels) + + print(f"Expected nodes: {expected_nodes}") + cluster_name = f"test-ray-cluster-li-{flavor[-5:]}" + queues = list_local_queues(namespace=self.namespace, flavors=[flavor]) + queue_name = queues[0]["name"] if queues else None + print(f"Using flavor: {flavor}, Queue: {queue_name}") + cluster = Cluster( + ClusterConfiguration( + name=cluster_name, + namespace=self.namespace, + num_workers=1, + head_cpu_requests="500m", + head_cpu_limits="500m", + head_memory_requests=2, + head_memory_limits=2, + worker_cpu_requests="500m", + worker_cpu_limits=1, + worker_memory_requests=1, + worker_memory_limits=4, + worker_extended_resource_requests={ + gpu_resource_name: number_of_gpus + }, + write_to_file=True, + verify_tls=False, + local_queue=queue_name, + ) + ) + cluster.apply() + sleep(5) + node_name = get_pod_node(self, self.namespace, cluster_name) + print(f"Cluster {cluster_name}-{flavor} is running on node: {node_name}") + sleep(5) + assert ( + node_name in expected_nodes + ), f"Node {node_name} is not in the expected nodes for flavor {flavor}." + cluster.down() diff --git a/tests/e2e/heterogeneous_clusters_oauth_test.py b/tests/e2e/heterogeneous_clusters_oauth_test.py new file mode 100644 index 00000000..0fbe4df3 --- /dev/null +++ b/tests/e2e/heterogeneous_clusters_oauth_test.py @@ -0,0 +1,77 @@ +from time import sleep +import time +from codeflare_sdk import ( + Cluster, + ClusterConfiguration, + TokenAuthentication, +) + +from codeflare_sdk.common.kueue.kueue import list_local_queues + +import pytest + +from support import * + + +@pytest.mark.openshift +class TestHeterogeneousClustersOauth: + def setup_method(self): + initialize_kubernetes_client(self) + + def teardown_method(self): + delete_namespace(self) + delete_kueue_resources(self) + + def test_heterogeneous_clusters(self): + create_namespace(self) + create_kueue_resources(self, 2, with_labels=True, with_tolerations=True) + self.run_heterogeneous_clusters() + + def run_heterogeneous_clusters( + self, gpu_resource_name="nvidia.com/gpu", number_of_gpus=0 + ): + ray_image = get_ray_image() + + auth = TokenAuthentication( + token=run_oc_command(["whoami", "--show-token=true"]), + server=run_oc_command(["whoami", "--show-server=true"]), + skip_tls=True, + ) + auth.login() + + for flavor in self.resource_flavors: + node_labels = ( + get_flavor_spec(self, flavor).get("spec", {}).get("nodeLabels", {}) + ) + expected_nodes = get_nodes_by_label(self, node_labels) + + print(f"Expected nodes: {expected_nodes}") + cluster_name = f"test-ray-cluster-li-{flavor[-5:]}" + queues = list_local_queues(namespace=self.namespace, flavors=[flavor]) + queue_name = queues[0]["name"] if queues else None + print(f"Using flavor: {flavor}, Queue: {queue_name}") + cluster = Cluster( + ClusterConfiguration( + namespace=self.namespace, + name=cluster_name, + num_workers=1, + head_cpu_requests=1, + head_cpu_limits=1, + worker_cpu_requests=1, + worker_cpu_limits=1, + worker_memory_requests=1, + worker_memory_limits=4, + image=ray_image, + verify_tls=False, + local_queue=queue_name, + ) + ) + cluster.apply() + sleep(5) + node_name = get_pod_node(self, self.namespace, cluster_name) + print(f"Cluster {cluster_name}-{flavor} is running on node: {node_name}") + sleep(5) + assert ( + node_name in expected_nodes + ), f"Node {node_name} is not in the expected nodes for flavor {flavor}." + cluster.down() diff --git a/tests/e2e/install-codeflare-sdk.sh b/tests/e2e/install-codeflare-sdk.sh new file mode 100644 index 00000000..8ec5e1e6 --- /dev/null +++ b/tests/e2e/install-codeflare-sdk.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +cd .. + +# Install Poetry and configure virtualenvs +pip install poetry +poetry config virtualenvs.create false + +cd codeflare-sdk + +# Lock dependencies and install them +poetry lock +poetry install --with test,docs + +# Return to the workdir +cd .. +cd workdir diff --git a/tests/e2e/local_interactive_sdk_kind_test.py b/tests/e2e/local_interactive_sdk_kind_test.py new file mode 100644 index 00000000..1dd8a2e0 --- /dev/null +++ b/tests/e2e/local_interactive_sdk_kind_test.py @@ -0,0 +1,126 @@ +from codeflare_sdk import ( + Cluster, + ClusterConfiguration, + generate_cert, +) + +import pytest +import ray +import math +import subprocess + +from support import * + + +@pytest.mark.kind +class TestRayLocalInteractiveKind: + def setup_method(self): + initialize_kubernetes_client(self) + self.port_forward_process = None + + def cleanup_port_forward(self): + if self.port_forward_process: + self.port_forward_process.terminate() + self.port_forward_process.wait(timeout=10) + self.port_forward_process = None + + def teardown_method(self): + self.cleanup_port_forward() + delete_namespace(self) + delete_kueue_resources(self) + + def test_local_interactives(self): + self.setup_method() + create_namespace(self) + create_kueue_resources(self) + self.run_local_interactives() + + @pytest.mark.nvidia_gpu + def test_local_interactives_nvidia_gpu(self): + self.setup_method() + create_namespace(self) + create_kueue_resources(self) + self.run_local_interactives(number_of_gpus=1) + + def run_local_interactives( + self, gpu_resource_name="nvidia.com/gpu", number_of_gpus=0 + ): + cluster_name = "test-ray-cluster-li" + + ray.shutdown() + + cluster = Cluster( + ClusterConfiguration( + name=cluster_name, + namespace=self.namespace, + num_workers=1, + head_cpu_requests="500m", + head_cpu_limits="500m", + worker_cpu_requests="500m", + worker_cpu_limits=1, + worker_memory_requests=1, + worker_memory_limits=4, + worker_extended_resource_requests={gpu_resource_name: number_of_gpus}, + verify_tls=False, + ) + ) + + cluster.apply() + + cluster.wait_ready() + cluster.status() + + generate_cert.generate_tls_cert(cluster_name, self.namespace) + generate_cert.export_env(cluster_name, self.namespace) + + print(cluster.local_client_url()) + + @ray.remote(num_gpus=number_of_gpus / 2) + def heavy_calculation_part(num_iterations): + result = 0.0 + for i in range(num_iterations): + for j in range(num_iterations): + for k in range(num_iterations): + result += math.sin(i) * math.cos(j) * math.tan(k) + return result + + @ray.remote(num_gpus=number_of_gpus / 2) + def heavy_calculation(num_iterations): + results = ray.get( + [heavy_calculation_part.remote(num_iterations // 30) for _ in range(30)] + ) + return sum(results) + + # Attempt to port forward + try: + local_port = "20001" + ray_client_port = "10001" + + port_forward_cmd = [ + "kubectl", + "port-forward", + "-n", + self.namespace, + f"svc/{cluster_name}-head-svc", + f"{local_port}:{ray_client_port}", + ] + self.port_forward_process = subprocess.Popen( + port_forward_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL + ) + + client_url = f"ray://localhost:{local_port}" + cluster.status() + + ray.init(address=client_url, logging_level="INFO") + + ref = heavy_calculation.remote(3000) + result = ray.get(ref) + assert ( + result == 1789.4644387076728 + ) # Updated result after moving to Python 3.12 (0.0000000000008% difference to old assertion) + ray.cancel(ref) + ray.shutdown() + + cluster.down() + finally: + self.cleanup_port_forward() diff --git a/tests/e2e/local_interactive_sdk_oauth_test.py b/tests/e2e/local_interactive_sdk_oauth_test.py new file mode 100644 index 00000000..8be0bf9c --- /dev/null +++ b/tests/e2e/local_interactive_sdk_oauth_test.py @@ -0,0 +1,90 @@ +from codeflare_sdk import ( + Cluster, + ClusterConfiguration, + TokenAuthentication, + generate_cert, +) + +import math +import pytest +import ray + +from support import * + + +@pytest.mark.openshift +class TestRayLocalInteractiveOauth: + def setup_method(self): + initialize_kubernetes_client(self) + + def teardown_method(self): + delete_namespace(self) + delete_kueue_resources(self) + + def test_local_interactives(self): + self.setup_method() + create_namespace(self) + create_kueue_resources(self) + self.run_local_interactives() + + def run_local_interactives(self): + ray_image = get_ray_image() + + auth = TokenAuthentication( + token=run_oc_command(["whoami", "--show-token=true"]), + server=run_oc_command(["whoami", "--show-server=true"]), + skip_tls=True, + ) + auth.login() + + cluster_name = "test-ray-cluster-li" + + cluster = Cluster( + ClusterConfiguration( + namespace=self.namespace, + name=cluster_name, + num_workers=1, + head_memory_requests=6, + head_memory_limits=8, + head_cpu_requests=1, + head_cpu_limits=1, + worker_cpu_requests=1, + worker_cpu_limits=1, + worker_memory_requests=1, + worker_memory_limits=4, + image=ray_image, + verify_tls=False, + ) + ) + cluster.apply() + cluster.wait_ready() + + generate_cert.generate_tls_cert(cluster_name, self.namespace) + generate_cert.export_env(cluster_name, self.namespace) + + ray.shutdown() + ray.init(address=cluster.local_client_url(), logging_level="DEBUG") + + @ray.remote + def heavy_calculation_part(num_iterations): + result = 0.0 + for i in range(num_iterations): + for j in range(num_iterations): + for k in range(num_iterations): + result += math.sin(i) * math.cos(j) * math.tan(k) + return result + + @ray.remote + def heavy_calculation(num_iterations): + results = ray.get( + [heavy_calculation_part.remote(num_iterations // 30) for _ in range(30)] + ) + return sum(results) + + ref = heavy_calculation.remote(3000) + result = ray.get(ref) + assert result == 1789.4644387076714 + ray.cancel(ref) + ray.shutdown() + + cluster.down() diff --git a/tests/e2e/minio_deployment.yaml b/tests/e2e/minio_deployment.yaml new file mode 100644 index 00000000..b2cdc54a --- /dev/null +++ b/tests/e2e/minio_deployment.yaml @@ -0,0 +1,128 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: minio-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + volumeMode: Filesystem +--- +kind: Secret +apiVersion: v1 +metadata: + name: minio-secret +stringData: + # change the username and password to your own values. + # ensure that the user is at least 3 characters long and the password at least 8 + minio_root_user: minio + minio_root_password: minio123 +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: minio +spec: + replicas: 1 + selector: + matchLabels: + app: minio + template: + metadata: + creationTimestamp: null + labels: + app: minio + spec: + volumes: + - name: data + persistentVolumeClaim: + claimName: minio-pvc + containers: + - resources: + limits: + cpu: 250m + memory: 1Gi + requests: + cpu: 20m + memory: 100Mi + readinessProbe: + tcpSocket: + port: 9000 + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: /dev/termination-log + name: minio + livenessProbe: + tcpSocket: + port: 9000 + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: minio-secret + key: minio_root_user + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: minio-secret + key: minio_root_password + ports: + - containerPort: 9000 + protocol: TCP + - containerPort: 9090 + protocol: TCP + imagePullPolicy: IfNotPresent + volumeMounts: + - name: data + mountPath: /data + subPath: minio + terminationMessagePolicy: File + image: quay.io/minio/minio:RELEASE.2024-06-22T05-26-45Z + args: + - server + - /data + - --console-address + - :9090 + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + strategy: + type: Recreate + revisionHistoryLimit: 10 + progressDeadlineSeconds: 600 +--- +kind: Service +apiVersion: v1 +metadata: + name: minio-service +spec: + ipFamilies: + - IPv4 + ports: + - name: api + protocol: TCP + port: 9000 + targetPort: 9000 + - name: ui + protocol: TCP + port: 9090 + targetPort: 9090 + internalTrafficPolicy: Cluster + type: ClusterIP + ipFamilyPolicy: SingleStack + sessionAffinity: None + selector: + app: minio diff --git a/tests/e2e/mnist.py b/tests/e2e/mnist.py new file mode 100644 index 00000000..143a6b6c --- /dev/null +++ b/tests/e2e/mnist.py @@ -0,0 +1,256 @@ +# Copyright 2022 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import torch +import requests +from pytorch_lightning import LightningModule, Trainer +from pytorch_lightning.callbacks.progress import TQDMProgressBar +from torch import nn +from torch.nn import functional as F +from torch.utils.data import DataLoader, random_split, RandomSampler +from torchmetrics import Accuracy +from torchvision import transforms +from torchvision.datasets import MNIST +import gzip +import shutil +from minio import Minio + + +PATH_DATASETS = os.environ.get("PATH_DATASETS", ".") +BATCH_SIZE = 256 if torch.cuda.is_available() else 64 + +local_mnist_path = os.path.dirname(os.path.abspath(__file__)) +# %% + +print("prior to running the trainer") +print("MASTER_ADDR: is ", os.getenv("MASTER_ADDR")) +print("MASTER_PORT: is ", os.getenv("MASTER_PORT")) + +print("ACCELERATOR: is ", os.getenv("ACCELERATOR")) +ACCELERATOR = os.getenv("ACCELERATOR") + +STORAGE_BUCKET_EXISTS = "AWS_DEFAULT_ENDPOINT" in os.environ +print("STORAGE_BUCKET_EXISTS: ", STORAGE_BUCKET_EXISTS) + +print( + f'Storage_Bucket_Default_Endpoint : is {os.environ.get("AWS_DEFAULT_ENDPOINT")}' + if "AWS_DEFAULT_ENDPOINT" in os.environ + else "" +) +print( + f'Storage_Bucket_Name : is {os.environ.get("AWS_STORAGE_BUCKET")}' + if "AWS_STORAGE_BUCKET" in os.environ + else "" +) +print( + f'Storage_Bucket_Mnist_Directory : is {os.environ.get("AWS_STORAGE_BUCKET_MNIST_DIR")}' + if "AWS_STORAGE_BUCKET_MNIST_DIR" in os.environ + else "" +) + + +class LitMNIST(LightningModule): + def __init__(self, data_dir=PATH_DATASETS, hidden_size=64, learning_rate=2e-4): + super().__init__() + + # Set our init args as class attributes + self.data_dir = data_dir + self.hidden_size = hidden_size + self.learning_rate = learning_rate + + # Hardcode some dataset specific attributes + self.num_classes = 10 + self.dims = (1, 28, 28) + channels, width, height = self.dims + self.transform = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)), + ] + ) + + # Define PyTorch model + self.model = nn.Sequential( + nn.Flatten(), + nn.Linear(channels * width * height, hidden_size), + nn.ReLU(), + nn.Dropout(0.1), + nn.Linear(hidden_size, hidden_size), + nn.ReLU(), + nn.Dropout(0.1), + nn.Linear(hidden_size, self.num_classes), + ) + + self.val_accuracy = Accuracy() + self.test_accuracy = Accuracy() + + def forward(self, x): + x = self.model(x) + return F.log_softmax(x, dim=1) + + def training_step(self, batch, batch_idx): + x, y = batch + logits = self(x) + loss = F.nll_loss(logits, y) + return loss + + def validation_step(self, batch, batch_idx): + x, y = batch + logits = self(x) + loss = F.nll_loss(logits, y) + preds = torch.argmax(logits, dim=1) + self.val_accuracy.update(preds, y) + + # Calling self.log will surface up scalars for you in TensorBoard + self.log("val_loss", loss, prog_bar=True) + self.log("val_acc", self.val_accuracy, prog_bar=True) + + def test_step(self, batch, batch_idx): + x, y = batch + logits = self(x) + loss = F.nll_loss(logits, y) + preds = torch.argmax(logits, dim=1) + self.test_accuracy.update(preds, y) + + # Calling self.log will surface up scalars for you in TensorBoard + self.log("test_loss", loss, prog_bar=True) + self.log("test_acc", self.test_accuracy, prog_bar=True) + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate) + return optimizer + + #################### + # DATA RELATED HOOKS + #################### + + def prepare_data(self): + # download + print("Downloading MNIST dataset...") + + if ( + STORAGE_BUCKET_EXISTS + and os.environ.get("AWS_DEFAULT_ENDPOINT") != "" + and os.environ.get("AWS_DEFAULT_ENDPOINT") != None + ): + print("Using storage bucket to download datasets...") + + dataset_dir = os.path.join(self.data_dir, "MNIST/raw") + endpoint = os.environ.get("AWS_DEFAULT_ENDPOINT") + access_key = os.environ.get("AWS_ACCESS_KEY_ID") + secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY") + bucket_name = os.environ.get("AWS_STORAGE_BUCKET") + + # remove prefix if specified in storage bucket endpoint url + secure = True + if endpoint.startswith("https://"): + endpoint = endpoint[len("https://") :] + elif endpoint.startswith("http://"): + endpoint = endpoint[len("http://") :] + secure = False + + client = Minio( + endpoint, + access_key=access_key, + secret_key=secret_key, + cert_check=False, + secure=secure, + ) + + if not os.path.exists(dataset_dir): + os.makedirs(dataset_dir) + else: + print(f"Directory '{dataset_dir}' already exists") + + # To download datasets from storage bucket's specific directory, use prefix to provide directory name + prefix = os.environ.get("AWS_STORAGE_BUCKET_MNIST_DIR") + # download all files from prefix folder of storage bucket recursively + for item in client.list_objects(bucket_name, prefix=prefix, recursive=True): + file_name = item.object_name[len(prefix) + 1 :] + dataset_file_path = os.path.join(dataset_dir, file_name) + if not os.path.exists(dataset_file_path): + client.fget_object(bucket_name, item.object_name, dataset_file_path) + else: + print(f"File-path '{dataset_file_path}' already exists") + # Unzip files + with gzip.open(dataset_file_path, "rb") as f_in: + with open(dataset_file_path.split(".")[:-1][0], "wb") as f_out: + shutil.copyfileobj(f_in, f_out) + # delete zip file + os.remove(dataset_file_path) + unzipped_filepath = dataset_file_path.split(".")[0] + if os.path.exists(unzipped_filepath): + print( + f"Unzipped and saved dataset file to path - {unzipped_filepath}" + ) + download_datasets = False + + else: + print("Using default MNIST mirror reference to download datasets...") + download_datasets = True + + MNIST(self.data_dir, train=True, download=download_datasets) + MNIST(self.data_dir, train=False, download=download_datasets) + + def setup(self, stage=None): + # Assign train/val datasets for use in dataloaders + if stage == "fit" or stage is None: + mnist_full = MNIST( + self.data_dir, train=True, transform=self.transform, download=False + ) + self.mnist_train, self.mnist_val = random_split(mnist_full, [55000, 5000]) + + # Assign test dataset for use in dataloader(s) + if stage == "test" or stage is None: + self.mnist_test = MNIST( + self.data_dir, train=False, transform=self.transform, download=False + ) + + def train_dataloader(self): + return DataLoader( + self.mnist_train, + batch_size=BATCH_SIZE, + sampler=RandomSampler(self.mnist_train, num_samples=1000), + ) + + def val_dataloader(self): + return DataLoader(self.mnist_val, batch_size=BATCH_SIZE) + + def test_dataloader(self): + return DataLoader(self.mnist_test, batch_size=BATCH_SIZE) + + +# Init DataLoader from MNIST Dataset + +model = LitMNIST(data_dir=local_mnist_path) + +print("GROUP: ", int(os.environ.get("GROUP_WORLD_SIZE", 1))) +print("LOCAL: ", int(os.environ.get("LOCAL_WORLD_SIZE", 1))) + +# Initialize a trainer +trainer = Trainer( + accelerator=ACCELERATOR, + # devices=1 if torch.cuda.is_available() else None, # limiting got iPython runs + max_epochs=3, + callbacks=[TQDMProgressBar(refresh_rate=20)], + num_nodes=int(os.environ.get("GROUP_WORLD_SIZE", 1)), + devices=int(os.environ.get("LOCAL_WORLD_SIZE", 1)), + replace_sampler_ddp=False, + strategy="ddp", +) + +# Train the model ⚡ +trainer.fit(model) diff --git a/tests/e2e/mnist_pip_requirements.txt b/tests/e2e/mnist_pip_requirements.txt new file mode 100644 index 00000000..60811f18 --- /dev/null +++ b/tests/e2e/mnist_pip_requirements.txt @@ -0,0 +1,4 @@ +pytorch_lightning==1.9.5 +torchmetrics==0.9.1 +torchvision==0.20.1 +minio diff --git a/tests/e2e/mnist_raycluster_sdk_aw_kind_test.py b/tests/e2e/mnist_raycluster_sdk_aw_kind_test.py new file mode 100644 index 00000000..5d06214c --- /dev/null +++ b/tests/e2e/mnist_raycluster_sdk_aw_kind_test.py @@ -0,0 +1,116 @@ +import requests + +from time import sleep + +from codeflare_sdk import Cluster, ClusterConfiguration +from codeflare_sdk.ray.client import RayJobClient + +import pytest + +from support import * + +# This test creates an AppWrapper containing a Ray Cluster and covers the Ray Job submission functionality on Kind Cluster + + +@pytest.mark.kind +class TestRayClusterSDKAppWrapperKind: + def setup_method(self): + initialize_kubernetes_client(self) + + def teardown_method(self): + delete_namespace(self) + delete_kueue_resources(self) + + def test_mnist_ray_cluster_sdk_kind(self): + self.setup_method() + create_namespace(self) + create_kueue_resources(self) + self.run_mnist_raycluster_sdk_kind(accelerator="cpu") + + @pytest.mark.nvidia_gpu + def test_mnist_ray_cluster_sdk_kind_nvidia_gpu(self): + self.setup_method() + create_namespace(self) + create_kueue_resources(self) + self.run_mnist_raycluster_sdk_kind(accelerator="gpu", number_of_gpus=1) + + def run_mnist_raycluster_sdk_kind( + self, accelerator, gpu_resource_name="nvidia.com/gpu", number_of_gpus=0 + ): + cluster = Cluster( + ClusterConfiguration( + name="mnist", + namespace=self.namespace, + num_workers=1, + head_cpu_requests="500m", + head_cpu_limits="500m", + worker_cpu_requests="500m", + worker_cpu_limits=1, + worker_memory_requests=1, + worker_memory_limits=4, + worker_extended_resource_requests={gpu_resource_name: number_of_gpus}, + write_to_file=True, + verify_tls=False, + appwrapper=True, + ) + ) + + cluster.apply() + + cluster.status() + + cluster.wait_ready() + + cluster.status() + + cluster.details() + + self.assert_jobsubmit_withoutlogin_kind(cluster, accelerator, number_of_gpus) + assert_get_cluster_and_jobsubmit( + self, "mnist", accelerator="gpu", number_of_gpus=1 + ) + + # Assertions + + def assert_jobsubmit_withoutlogin_kind(self, cluster, accelerator, number_of_gpus): + ray_dashboard = cluster.cluster_dashboard_uri() + client = RayJobClient(address=ray_dashboard, verify=False) + + submission_id = client.submit_job( + entrypoint="python mnist.py", + runtime_env={ + "working_dir": "./tests/e2e/", + "pip": "./tests/e2e/mnist_pip_requirements.txt", + "env_vars": get_setup_env_variables(ACCELERATOR=accelerator), + }, + entrypoint_num_gpus=number_of_gpus, + ) + print(f"Submitted job with ID: {submission_id}") + done = False + time = 0 + timeout = 900 + while not done: + status = client.get_job_status(submission_id) + if status.is_terminal(): + break + if not done: + print(status) + if timeout and time >= timeout: + raise TimeoutError(f"job has timed out after waiting {timeout}s") + sleep(5) + time += 5 + + logs = client.get_job_logs(submission_id) + print(logs) + + self.assert_job_completion(status) + + client.delete_job(submission_id) + + def assert_job_completion(self, status): + if status == "SUCCEEDED": + print(f"Job has completed: '{status}'") + assert True + else: + print(f"Job has completed: '{status}'") + assert False diff --git a/tests/e2e/mnist_raycluster_sdk_kind_test.py b/tests/e2e/mnist_raycluster_sdk_kind_test.py new file mode 100644 index 00000000..4ba728cf --- /dev/null +++ b/tests/e2e/mnist_raycluster_sdk_kind_test.py @@ -0,0 +1,116 @@ +import requests + +from time import sleep + +from codeflare_sdk import Cluster, ClusterConfiguration +from codeflare_sdk.ray.client import RayJobClient + +import pytest + +from support import * + +# This test creates a Ray Cluster and covers the Ray Job submission functionality on Kind Cluster + + +@pytest.mark.kind +class TestRayClusterSDKKind: + def setup_method(self): + initialize_kubernetes_client(self) + + def teardown_method(self): + delete_namespace(self) + delete_kueue_resources(self) + + def test_mnist_ray_cluster_sdk_kind(self): + self.setup_method() + create_namespace(self) + create_kueue_resources(self) + self.run_mnist_raycluster_sdk_kind(accelerator="cpu") + + @pytest.mark.nvidia_gpu + def test_mnist_ray_cluster_sdk_kind_nvidia_gpu(self): + self.setup_method() + create_namespace(self) + create_kueue_resources(self) + self.run_mnist_raycluster_sdk_kind(accelerator="gpu", number_of_gpus=1) + + def run_mnist_raycluster_sdk_kind( + self, accelerator, gpu_resource_name="nvidia.com/gpu", number_of_gpus=0 + ): + cluster = Cluster( + ClusterConfiguration( + name="mnist", + namespace=self.namespace, + num_workers=1, + head_cpu_requests="500m", + head_cpu_limits="500m", + worker_cpu_requests="500m", + worker_cpu_limits=1, + worker_memory_requests=1, + worker_memory_limits=4, + worker_extended_resource_requests={gpu_resource_name: number_of_gpus}, + write_to_file=True, + verify_tls=False, + ) + ) + + cluster.apply() + + cluster.status() + + cluster.wait_ready() + + cluster.status() + + cluster.details() + + self.assert_jobsubmit_withoutlogin_kind(cluster, accelerator, number_of_gpus) + + assert_get_cluster_and_jobsubmit( + self, "mnist", accelerator="gpu", number_of_gpus=1 + ) + + # Assertions + + def assert_jobsubmit_withoutlogin_kind(self, cluster, accelerator, number_of_gpus): + ray_dashboard = cluster.cluster_dashboard_uri() + client = RayJobClient(address=ray_dashboard, verify=False) + + submission_id = client.submit_job( + entrypoint="python mnist.py", + runtime_env={ + "working_dir": "./tests/e2e/", + "pip": "./tests/e2e/mnist_pip_requirements.txt", + "env_vars": get_setup_env_variables(ACCELERATOR=accelerator), + }, + entrypoint_num_gpus=number_of_gpus, + ) + print(f"Submitted job with ID: {submission_id}") + done = False + time = 0 + timeout = 900 + while not done: + status = client.get_job_status(submission_id) + if status.is_terminal(): + break + if not done: + print(status) + if timeout and time >= timeout: + raise TimeoutError(f"job has timed out after waiting {timeout}s") + sleep(5) + time += 5 + + logs = client.get_job_logs(submission_id) + print(logs) + + self.assert_job_completion(status) + + client.delete_job(submission_id) + + def assert_job_completion(self, status): + if status == "SUCCEEDED": + print(f"Job has completed: '{status}'") + assert True + else: + print(f"Job has completed: '{status}'") + assert False diff --git a/tests/e2e/mnist_raycluster_sdk_oauth_test.py b/tests/e2e/mnist_raycluster_sdk_oauth_test.py new file mode 100644 index 00000000..18447d74 --- /dev/null +++ b/tests/e2e/mnist_raycluster_sdk_oauth_test.py @@ -0,0 +1,144 @@ +import requests + +from time import sleep + +from codeflare_sdk import ( + Cluster, + ClusterConfiguration, + TokenAuthentication, +) +from codeflare_sdk.ray.client import RayJobClient + +import pytest + +from support import * + +# This test creates a Ray Cluster and covers the Ray Job submission with authentication and without authentication functionality on Openshift Cluster + + +@pytest.mark.openshift +class TestRayClusterSDKOauth: + def setup_method(self): + initialize_kubernetes_client(self) + + def teardown_method(self): + delete_namespace(self) + delete_kueue_resources(self) + + def test_mnist_ray_cluster_sdk_auth(self): + self.setup_method() + create_namespace(self) + create_kueue_resources(self) + self.run_mnist_raycluster_sdk_oauth() + + def run_mnist_raycluster_sdk_oauth(self): + ray_image = get_ray_image() + + auth = TokenAuthentication( + token=run_oc_command(["whoami", "--show-token=true"]), + server=run_oc_command(["whoami", "--show-server=true"]), + skip_tls=True, + ) + auth.login() + + cluster = Cluster( + ClusterConfiguration( + name="mnist", + namespace=self.namespace, + num_workers=1, + head_memory_requests=6, + head_memory_limits=8, + worker_cpu_requests=1, + worker_cpu_limits=1, + worker_memory_requests=6, + worker_memory_limits=8, + image=ray_image, + write_to_file=True, + verify_tls=False, + ) + ) + + cluster.apply() + + cluster.status() + + cluster.wait_ready() + + cluster.status() + + cluster.details() + + self.assert_jobsubmit_withoutLogin(cluster) + self.assert_jobsubmit_withlogin(cluster) + assert_get_cluster_and_jobsubmit(self, "mnist") + + # Assertions + + def assert_jobsubmit_withoutLogin(self, cluster): + dashboard_url = cluster.cluster_dashboard_uri() + jobdata = { + "entrypoint": "python mnist.py", + "runtime_env": { + "working_dir": "./tests/e2e/", + "pip": "./tests/e2e/mnist_pip_requirements.txt", + "env_vars": get_setup_env_variables(), + }, + } + try: + response = requests.post( + dashboard_url + "/api/jobs/", verify=False, json=jobdata + ) + if response.status_code == 403: + assert True + else: + response.raise_for_status() + assert False + + except Exception as e: + print(f"An unexpected error occurred. Error: {e}") + assert False + + def assert_jobsubmit_withlogin(self, cluster): + auth_token = run_oc_command(["whoami", "--show-token=true"]) + ray_dashboard = cluster.cluster_dashboard_uri() + header = {"Authorization": f"Bearer {auth_token}"} + client = RayJobClient(address=ray_dashboard, headers=header, verify=False) + + submission_id = client.submit_job( + entrypoint="python mnist.py", + runtime_env={ + "working_dir": "./tests/e2e/", + "pip": "./tests/e2e/mnist_pip_requirements.txt", + "env_vars": get_setup_env_variables(), + }, + entrypoint_num_cpus=1, + ) + print(f"Submitted job with ID: {submission_id}") + done = False + time = 0 + timeout = 900 + while not done: + status = client.get_job_status(submission_id) + if status.is_terminal(): + break + if not done: + print(status) + if timeout and time >= timeout: + raise TimeoutError(f"job has timed out after waiting {timeout}s") + sleep(5) + time += 5 + + logs = client.get_job_logs(submission_id) + print(logs) + + self.assert_job_completion(status) + + client.delete_job(submission_id) + + def assert_job_completion(self, status): + if status == "SUCCEEDED": + print(f"Job has completed: '{status}'") + assert True + else: + print(f"Job has completed: '{status}'") + assert False diff --git a/tests/e2e/mnist_rayjob.py b/tests/e2e/mnist_rayjob.py new file mode 100644 index 00000000..bf47e6da --- /dev/null +++ b/tests/e2e/mnist_rayjob.py @@ -0,0 +1,51 @@ +import sys + +from time import sleep + +from support import * + +from codeflare_sdk.ray.cluster.cluster import get_cluster +from codeflare_sdk.ray.client import RayJobClient + +namespace = sys.argv[1] + +cluster = get_cluster("mnist", namespace) + +cluster.details() + +auth_token = run_oc_command(["whoami", "--show-token=true"]) +ray_dashboard = cluster.cluster_dashboard_uri() +header = {"Authorization": f"Bearer {auth_token}"} +client = RayJobClient(address=ray_dashboard, headers=header, verify=True) + +# Submit the job +submission_id = client.submit_job( + entrypoint="python mnist.py", + runtime_env={"working_dir": "/", "pip": "requirements.txt"}, +) +print(f"Submitted job with ID: {submission_id}") +done = False +time = 0 +timeout = 900 +while not done: + status = client.get_job_status(submission_id) + if status.is_terminal(): + break + if not done: + print(status) + if timeout and time >= timeout: + raise TimeoutError(f"job has timed out after waiting {timeout}s") + sleep(5) + time += 5 + +logs = client.get_job_logs(submission_id) +print(logs) + +client.delete_job(submission_id) +cluster.down() + + +if not status == "SUCCEEDED": + exit(1) +else: + exit(0) diff --git a/tests/e2e/mnist_sleep.py b/tests/e2e/mnist_sleep.py new file mode 100644 index 00000000..628dc74c --- /dev/null +++ b/tests/e2e/mnist_sleep.py @@ -0,0 +1,72 @@ +# Copyright 2022 IBM, Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +from torchvision import datasets, transforms + + +# Define a simple neural network +class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetwork, self).__init__() + self.flatten = nn.Flatten() + self.linear_relu_stack = nn.Sequential( + nn.Linear(28 * 28, 512), + nn.ReLU(), + nn.Linear(512, 512), + nn.ReLU(), + nn.Linear(512, 10), + ) + + def forward(self, x): + x = self.flatten(x) + logits = self.linear_relu_stack(x) + return logits + + +# Define the training function +def train(): + # Sleeping for 24 hours for upgrade test scenario + print("Sleeping for 24 hours before starting the training for upgrade testing...") + time.sleep(24 * 60 * 60) + + # Load dataset + transform = transforms.Compose([transforms.ToTensor()]) + train_dataset = datasets.FashionMNIST( + root="./data", train=True, download=True, transform=transform + ) + train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True) + + # Initialize the neural network, loss function, and optimizer + model = NeuralNetwork() + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + + # Train the model + num_epochs = 3 + for epoch in range(num_epochs): + for inputs, labels in train_loader: + optimizer.zero_grad() + outputs = model(inputs) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}") + + +if __name__ == "__main__": + train() diff --git a/tests/e2e/start_ray_cluster.py b/tests/e2e/start_ray_cluster.py new file mode 100644 index 00000000..bc7f531f --- /dev/null +++ b/tests/e2e/start_ray_cluster.py @@ -0,0 +1,37 @@ +import sys +import os + +from time import sleep + +from codeflare_sdk.ray.cluster.cluster import Cluster, ClusterConfiguration + +namespace = sys.argv[1] +ray_image = os.getenv("RAY_IMAGE") + +cluster = Cluster( + ClusterConfiguration( + name="mnist", + namespace=namespace, + num_workers=1, + head_cpu_requests="500m", + head_cpu_limits="500m", + head_memory_requests=2, + head_memory_limits=2, + worker_cpu_requests="500m", + worker_cpu_limits=1, + worker_memory_requests=1, + worker_memory_limits=2, + image=ray_image, + appwrapper=True, + ) +) + +cluster.apply() + +cluster.status() + +cluster.wait_ready() + +cluster.status() + +cluster.details() diff --git a/tests/e2e/support.py b/tests/e2e/support.py new file mode 100644 index 00000000..fe9261a2 --- /dev/null +++ b/tests/e2e/support.py @@ -0,0 +1,411 @@ +import os +import random +import string +import subprocess +from codeflare_sdk import get_cluster +from kubernetes import client, config +from codeflare_sdk.common.kubernetes_cluster.kube_api_helpers import ( + _kube_api_error_handling, +) +from codeflare_sdk.common.utils import constants +from codeflare_sdk.common.utils.utils import get_ray_image_for_python_version + + +def get_ray_cluster(cluster_name, namespace): + api = client.CustomObjectsApi() + try: + return api.get_namespaced_custom_object( + group="ray.io", + version="v1", + namespace=namespace, + plural="rayclusters", + name=cluster_name, + ) + except client.exceptions.ApiException as e: + if e.status == 404: + return None + raise + + +def get_ray_image(): + return os.getenv( + "RAY_IMAGE", + get_ray_image_for_python_version(warn_on_unsupported=False), + ) + + +def get_setup_env_variables(**kwargs): + env_vars = dict() + + # Use input parameters provided for this function as environment variables + for key, value in kwargs.items(): + env_vars[str(key)] = value + + # Use specified pip index url instead of default(https://pypi.org/simple) if related environment variables exists + if ( + "PIP_INDEX_URL" in os.environ + and os.environ.get("PIP_INDEX_URL") != None + and os.environ.get("PIP_INDEX_URL") != "" + ): + env_vars["PIP_INDEX_URL"] = os.environ.get("PIP_INDEX_URL") + env_vars["PIP_TRUSTED_HOST"] = os.environ.get("PIP_TRUSTED_HOST") + else: + env_vars["PIP_INDEX_URL"] = "https://pypi.org/simple/" + env_vars["PIP_TRUSTED_HOST"] = "pypi.org" + + # Use specified storage bucket reference from which to download datasets + if ( + "AWS_DEFAULT_ENDPOINT" in os.environ + and os.environ.get("AWS_DEFAULT_ENDPOINT") != None + and os.environ.get("AWS_DEFAULT_ENDPOINT") != "" + ): + env_vars["AWS_DEFAULT_ENDPOINT"] = os.environ.get("AWS_DEFAULT_ENDPOINT") + env_vars["AWS_ACCESS_KEY_ID"] = os.environ.get("AWS_ACCESS_KEY_ID") + env_vars["AWS_SECRET_ACCESS_KEY"] = os.environ.get("AWS_SECRET_ACCESS_KEY") + env_vars["AWS_STORAGE_BUCKET"] = os.environ.get("AWS_STORAGE_BUCKET") + env_vars["AWS_STORAGE_BUCKET_MNIST_DIR"] = os.environ.get( + "AWS_STORAGE_BUCKET_MNIST_DIR" + ) + return env_vars + + +def random_choice(): + alphabet = string.ascii_lowercase + string.digits + return "".join(random.choices(alphabet, k=5)) + + +def create_namespace(self): + try: + self.namespace = f"test-ns-{random_choice()}" + namespace_body = client.V1Namespace( + metadata=client.V1ObjectMeta(name=self.namespace) + ) + self.api_instance.create_namespace(namespace_body) + except Exception as e: + return RuntimeError(e) + + +def create_new_resource_flavor(self, num_flavors, with_labels, with_tolerations): + self.resource_flavors = [] + for i in range(num_flavors): + default = i < 1 + resource_flavor = f"test-resource-flavor-{random_choice()}" + create_resource_flavor( + self, resource_flavor, default, with_labels, with_tolerations + ) + self.resource_flavors.append(resource_flavor) + + +def create_new_cluster_queue(self, num_queues): + self.cluster_queues = [] + for i in range(num_queues): + cluster_queue_name = f"test-cluster-queue-{random_choice()}" + create_cluster_queue(self, cluster_queue_name, self.resource_flavors[i]) + self.cluster_queues.append(cluster_queue_name) + + +def create_new_local_queue(self, num_queues): + self.local_queues = [] + for i in range(num_queues): + is_default = i == 0 + local_queue_name = f"test-local-queue-{random_choice()}" + create_local_queue(self, self.cluster_queues[i], local_queue_name, is_default) + self.local_queues.append(local_queue_name) + + +def create_namespace_with_name(self, namespace_name): + self.namespace = namespace_name + try: + namespace_body = client.V1Namespace( + metadata=client.V1ObjectMeta(name=self.namespace) + ) + self.api_instance.create_namespace(namespace_body) + except Exception as e: + return _kube_api_error_handling(e) + + +def delete_namespace(self): + if hasattr(self, "namespace"): + self.api_instance.delete_namespace(self.namespace) + + +def initialize_kubernetes_client(self): + config.load_kube_config() + # Initialize Kubernetes client + self.api_instance = client.CoreV1Api() + self.custom_api = client.CustomObjectsApi(self.api_instance.api_client) + + +def run_oc_command(args): + try: + result = subprocess.run( + ["oc"] + args, capture_output=True, text=True, check=True + ) + return result.stdout.strip() + except subprocess.CalledProcessError as e: + print(f"Error executing 'oc {' '.join(args)}': {e}") + return None + + +def create_cluster_queue(self, cluster_queue, flavor): + cluster_queue_json = { + "apiVersion": "kueue.x-k8s.io/v1beta1", + "kind": "ClusterQueue", + "metadata": {"name": cluster_queue}, + "spec": { + "namespaceSelector": {}, + "resourceGroups": [ + { + "coveredResources": ["cpu", "memory", "nvidia.com/gpu"], + "flavors": [ + { + "name": flavor, + "resources": [ + {"name": "cpu", "nominalQuota": 9}, + {"name": "memory", "nominalQuota": "36Gi"}, + {"name": "nvidia.com/gpu", "nominalQuota": 1}, + ], + }, + ], + } + ], + }, + } + + try: + # Check if cluster-queue exists + self.custom_api.get_cluster_custom_object( + group="kueue.x-k8s.io", + plural="clusterqueues", + version="v1beta1", + name=cluster_queue, + ) + print(f"'{cluster_queue}' already exists") + except: + # create cluster-queue + self.custom_api.create_cluster_custom_object( + group="kueue.x-k8s.io", + plural="clusterqueues", + version="v1beta1", + body=cluster_queue_json, + ) + print(f"'{cluster_queue}' created") + + self.cluster_queue = cluster_queue + + +def create_resource_flavor( + self, flavor, default=True, with_labels=False, with_tolerations=False +): + worker_label, worker_value = os.getenv("WORKER_LABEL", "worker-1=true").split("=") + control_label, control_value = os.getenv( + "CONTROL_LABEL", "ingress-ready=true" + ).split("=") + toleration_key = os.getenv( + "TOLERATION_KEY", "node-role.kubernetes.io/control-plane" + ) + + node_labels = {} + if with_labels: + node_labels = ( + {worker_label: worker_value} if default else {control_label: control_value} + ) + + resource_flavor_json = { + "apiVersion": "kueue.x-k8s.io/v1beta1", + "kind": "ResourceFlavor", + "metadata": {"name": flavor}, + "spec": { + "nodeLabels": node_labels, + **( + { + "tolerations": [ + { + "key": toleration_key, + "operator": "Exists", + "effect": "NoSchedule", + } + ] + } + if with_tolerations + else {} + ), + }, + } + + try: + # Check if resource flavor exists + self.custom_api.get_cluster_custom_object( + group="kueue.x-k8s.io", + plural="resourceflavors", + version="v1beta1", + name=flavor, + ) + print(f"'{flavor}' already exists") + except: + # create kueue resource flavor + self.custom_api.create_cluster_custom_object( + group="kueue.x-k8s.io", + plural="resourceflavors", + version="v1beta1", + body=resource_flavor_json, + ) + print(f"'{flavor}' created!") + + self.resource_flavor = flavor + + +def create_local_queue(self, cluster_queue, local_queue, is_default=True): + local_queue_json = { + "apiVersion": "kueue.x-k8s.io/v1beta1", + "kind": "LocalQueue", + "metadata": { + "namespace": self.namespace, + "name": local_queue, + "annotations": {"kueue.x-k8s.io/default-queue": str(is_default).lower()}, + }, + "spec": {"clusterQueue": cluster_queue}, + } + + try: + # Check if local-queue exists in given namespace + self.custom_api.get_namespaced_custom_object( + group="kueue.x-k8s.io", + namespace=self.namespace, + plural="localqueues", + version="v1beta1", + name=local_queue, + ) + print(f"'{local_queue}' already exists in namespace '{self.namespace}'") + except: + # create local-queue + self.custom_api.create_namespaced_custom_object( + group="kueue.x-k8s.io", + namespace=self.namespace, + plural="localqueues", + version="v1beta1", + body=local_queue_json, + ) + print(f"'{local_queue}' created in namespace '{self.namespace}'") + + self.local_queue = local_queue + + +def create_kueue_resources( + self, resource_ammount=1, with_labels=False, with_tolerations=False +): + print("creating Kueue resources ...") + create_new_resource_flavor(self, resource_ammount, with_labels, with_tolerations) + create_new_cluster_queue(self, resource_ammount) + create_new_local_queue(self, resource_ammount) + + +def delete_kueue_resources(self): + # Delete if given cluster-queue exists + for cq in self.cluster_queues: + try: + self.custom_api.delete_cluster_custom_object( + group="kueue.x-k8s.io", + plural="clusterqueues", + version="v1beta1", + name=cq, + ) + print(f"\n'{cq}' cluster-queue deleted") + except Exception as e: + print(f"\nError deleting cluster-queue '{cq}' : {e}") + + # Delete if given resource-flavor exists + for flavor in self.resource_flavors: + try: + self.custom_api.delete_cluster_custom_object( + group="kueue.x-k8s.io", + plural="resourceflavors", + version="v1beta1", + name=flavor, + ) + print(f"'{flavor}' resource-flavor deleted") + except Exception as e: + print(f"\nError deleting resource-flavor '{flavor}': {e}") + + +def get_pod_node(self, namespace, name): + label_selector = f"ray.io/cluster={name}" + pods = self.api_instance.list_namespaced_pod( + namespace, label_selector=label_selector + ) + if not pods.items: + raise ValueError( + f"Unable to retrieve node name for pod '{name}' in namespace '{namespace}'" + ) + pod = pods.items[0] + node_name = pod.spec.node_name + if node_name is None: + raise ValueError( + f"No node selected for pod '{name}' in namespace '{namespace}'" + ) + return node_name + + +def get_flavor_spec(self, flavor_name): + try: + flavor = self.custom_api.get_cluster_custom_object( + group="kueue.x-k8s.io", + version="v1beta1", + plural="resourceflavors", + name=flavor_name, + ) + return flavor + except client.exceptions.ApiException as e: + if e.status == 404: + print(f"ResourceFlavor '{flavor_name}' not found.") + else: + print(f"Error retrieving ResourceFlavor '{flavor_name}': {e}") + raise + + +def get_nodes_by_label(self, node_labels): + label_selector = ",".join(f"{k}={v}" for k, v in node_labels.items()) + nodes = self.api_instance.list_node(label_selector=label_selector) + return [node.metadata.name for node in nodes.items] + + +def assert_get_cluster_and_jobsubmit( + self, cluster_name, accelerator=None, number_of_gpus=None +): + # Retrieve the cluster + cluster = get_cluster(cluster_name, self.namespace, False) + + cluster.details() + + # Initialize the job client + client = cluster.job_client + + # Submit a job and get the submission ID + env_vars = ( + get_setup_env_variables(ACCELERATOR=accelerator) + if accelerator + else get_setup_env_variables() + ) + submission_id = client.submit_job( + entrypoint="python mnist.py", + runtime_env={ + "working_dir": "./tests/e2e/", + "pip": "./tests/e2e/mnist_pip_requirements.txt", + "env_vars": env_vars, + }, + entrypoint_num_cpus=1 if number_of_gpus is None else None, + entrypoint_num_gpus=number_of_gpus, + ) + print(f"Submitted job with ID: {submission_id}") + + # Fetch the list of jobs and validate + job_list = client.list_jobs() + print(f"List of Jobs: {job_list}") + + # Validate the number of jobs in the list + assert len(job_list) == 1 + + # Validate the submission ID matches + assert job_list[0].submission_id == submission_id + + cluster.down() diff --git a/tests/func_test.py b/tests/func_test.py deleted file mode 100644 index 6b5799c3..00000000 --- a/tests/func_test.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2022 IBM, Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pathlib import Path -import sys - -parent = Path(__file__).resolve().parents[1] -sys.path.append(str(parent) + "/src") - -# COMING SOON! diff --git a/tests/test-case-bad.yaml b/tests/test-case-bad.yaml deleted file mode 100644 index 358f756e..00000000 --- a/tests/test-case-bad.yaml +++ /dev/null @@ -1,175 +0,0 @@ -apiVersion: workload.codeflare.dev/v1beta1 -kind: AppsWrapper -metadata: - labels: - orderedinstance: cpu.small_gpu.large - nam: unit-test-cluster - namspace: ns -spec: - priority: 9 - resources: - GenericItems: - - custompodresources: - - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - replicas: 1 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - - limits: - cpu: 4 - memory: 6G - nvidia.com/gpu: 7 - replicas: 2 - requests: - cpu: 3 - memory: 5G - nvidia.com/gpu: 7 - generictemplate: - apiVersion: ray.io/v1alpha1 - kind: RayCluster - metadata: - labels: - appwrapper.mcad.ibm.com: unit-test-cluster - controller-tools.k8s.io: '1.0' - name: unit-test-cluster - namespace: ns - spec: - autoscalerOptions: - idleTimeoutSeconds: 60 - imagePullPolicy: Always - resources: - limits: - cpu: 500m - memory: 512Mi - requests: - cpu: 500m - memory: 512Mi - upscalingMode: Default - enableInTreeAutoscaling: false - headGroupSpec: - rayStartParams: - block: 'true' - dashboard-host: 0.0.0.0 - num-gpus: '0' - serviceType: ClusterIP - template: - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: unit-test-cluster - operator: In - values: - - unit-test-cluster - containers: - - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 - imagePullPolicy: Always - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - ray stop - name: ray-head - ports: - - containerPort: 6379 - name: gcs - - containerPort: 8265 - name: dashboard - - containerPort: 10001 - name: client - resources: - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - rayVersion: 1.12.0 - workerGroupSpecs: - - groupName: small-group-unit-test-cluster - maxReplicas: 2 - minReplicas: 2 - rayStartParams: - block: 'true' - num-gpus: '7' - replicas: 2 - template: - metadata: - annotations: - key: value - labels: - key: value - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: unit-test-cluster - operator: In - values: - - unit-test-cluster - containers: - - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - ray stop - name: machine-learning - resources: - limits: - cpu: 4 - memory: 6G - nvidia.com/gpu: 7 - requests: - cpu: 3 - memory: 5G - nvidia.com/gpu: 7 - initContainers: - - command: - - sh - - -c - - until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; - do echo waiting for myservice; sleep 2; done - image: busybox:1.28 - name: init-myservice - replicas: 1 - - generictemplate: - apiVersion: route.openshift.io/v1 - kind: Route - metadata: - labels: - odh-ray-cluster-service: unit-test-cluster-head-svc - name: ray-dashboard-unit-test-cluster - namespace: ns - spec: - port: - targetPort: dashboard - to: - kind: Service - name: unit-test-cluster-head-svc - replicas: 1 - Items: [] diff --git a/tests/test-case-prio.yaml b/tests/test-case-prio.yaml deleted file mode 100644 index aadfad7d..00000000 --- a/tests/test-case-prio.yaml +++ /dev/null @@ -1,197 +0,0 @@ -apiVersion: workload.codeflare.dev/v1beta1 -kind: AppWrapper -metadata: - labels: - orderedinstance: cpu.small_gpu.large - name: prio-test-cluster - namespace: ns -spec: - priority: 10 - resources: - GenericItems: - - custompodresources: - - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - replicas: 1 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - - limits: - cpu: 4 - memory: 6G - nvidia.com/gpu: 7 - replicas: 2 - requests: - cpu: 3 - memory: 5G - nvidia.com/gpu: 7 - generictemplate: - apiVersion: ray.io/v1alpha1 - kind: RayCluster - metadata: - labels: - appwrapper.mcad.ibm.com: prio-test-cluster - controller-tools.k8s.io: '1.0' - name: prio-test-cluster - namespace: ns - spec: - autoscalerOptions: - idleTimeoutSeconds: 60 - imagePullPolicy: Always - resources: - limits: - cpu: 500m - memory: 512Mi - requests: - cpu: 500m - memory: 512Mi - upscalingMode: Default - enableInTreeAutoscaling: false - headGroupSpec: - rayStartParams: - block: 'true' - dashboard-host: 0.0.0.0 - num-gpus: '0' - serviceType: ClusterIP - template: - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: prio-test-cluster - operator: In - values: - - prio-test-cluster - containers: - - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: RAY_USE_TLS - value: '0' - - name: RAY_TLS_SERVER_CERT - value: /home/ray/workspace/tls/server.crt - - name: RAY_TLS_SERVER_KEY - value: /home/ray/workspace/tls/server.key - - name: RAY_TLS_CA_CERT - value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 - imagePullPolicy: Always - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - ray stop - name: ray-head - ports: - - containerPort: 6379 - name: gcs - - containerPort: 8265 - name: dashboard - - containerPort: 10001 - name: client - resources: - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - imagePullSecrets: - - name: unit-test-pull-secret - priorityClassName: default - rayVersion: 2.5.0 - workerGroupSpecs: - - groupName: small-group-prio-test-cluster - maxReplicas: 2 - minReplicas: 2 - rayStartParams: - block: 'true' - num-gpus: '7' - replicas: 2 - template: - metadata: - annotations: - key: value - labels: - key: value - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: prio-test-cluster - operator: In - values: - - prio-test-cluster - containers: - - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: RAY_USE_TLS - value: '0' - - name: RAY_TLS_SERVER_CERT - value: /home/ray/workspace/tls/server.crt - - name: RAY_TLS_SERVER_KEY - value: /home/ray/workspace/tls/server.key - - name: RAY_TLS_CA_CERT - value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - ray stop - name: machine-learning - resources: - limits: - cpu: 4 - memory: 6G - nvidia.com/gpu: 7 - requests: - cpu: 3 - memory: 5G - nvidia.com/gpu: 7 - imagePullSecrets: - - name: unit-test-pull-secret - initContainers: - - command: - - sh - - -c - - until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; - do echo waiting for myservice; sleep 2; done - image: busybox:1.28 - name: init-myservice - priorityClassName: default - replicas: 1 - - generictemplate: - apiVersion: route.openshift.io/v1 - kind: Route - metadata: - labels: - odh-ray-cluster-service: prio-test-cluster-head-svc - name: ray-dashboard-prio-test-cluster - namespace: ns - spec: - port: - targetPort: dashboard - to: - kind: Service - name: prio-test-cluster-head-svc - replicas: 1 - Items: [] diff --git a/tests/test-case.yaml b/tests/test-case.yaml deleted file mode 100644 index adaf03db..00000000 --- a/tests/test-case.yaml +++ /dev/null @@ -1,194 +0,0 @@ -apiVersion: workload.codeflare.dev/v1beta1 -kind: AppWrapper -metadata: - labels: - orderedinstance: cpu.small_gpu.large - name: unit-test-cluster - namespace: ns -spec: - resources: - GenericItems: - - custompodresources: - - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - replicas: 1 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - - limits: - cpu: 4 - memory: 6G - nvidia.com/gpu: 7 - replicas: 2 - requests: - cpu: 3 - memory: 5G - nvidia.com/gpu: 7 - generictemplate: - apiVersion: ray.io/v1alpha1 - kind: RayCluster - metadata: - labels: - appwrapper.mcad.ibm.com: unit-test-cluster - controller-tools.k8s.io: '1.0' - name: unit-test-cluster - namespace: ns - spec: - autoscalerOptions: - idleTimeoutSeconds: 60 - imagePullPolicy: Always - resources: - limits: - cpu: 500m - memory: 512Mi - requests: - cpu: 500m - memory: 512Mi - upscalingMode: Default - enableInTreeAutoscaling: false - headGroupSpec: - rayStartParams: - block: 'true' - dashboard-host: 0.0.0.0 - num-gpus: '0' - serviceType: ClusterIP - template: - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: unit-test-cluster - operator: In - values: - - unit-test-cluster - containers: - - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: RAY_USE_TLS - value: '0' - - name: RAY_TLS_SERVER_CERT - value: /home/ray/workspace/tls/server.crt - - name: RAY_TLS_SERVER_KEY - value: /home/ray/workspace/tls/server.key - - name: RAY_TLS_CA_CERT - value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 - imagePullPolicy: Always - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - ray stop - name: ray-head - ports: - - containerPort: 6379 - name: gcs - - containerPort: 8265 - name: dashboard - - containerPort: 10001 - name: client - resources: - limits: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - requests: - cpu: 2 - memory: 8G - nvidia.com/gpu: 0 - imagePullSecrets: - - name: unit-test-pull-secret - rayVersion: 2.5.0 - workerGroupSpecs: - - groupName: small-group-unit-test-cluster - maxReplicas: 2 - minReplicas: 2 - rayStartParams: - block: 'true' - num-gpus: '7' - replicas: 2 - template: - metadata: - annotations: - key: value - labels: - key: value - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: unit-test-cluster - operator: In - values: - - unit-test-cluster - containers: - - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: RAY_USE_TLS - value: '0' - - name: RAY_TLS_SERVER_CERT - value: /home/ray/workspace/tls/server.crt - - name: RAY_TLS_SERVER_KEY - value: /home/ray/workspace/tls/server.key - - name: RAY_TLS_CA_CERT - value: /home/ray/workspace/tls/ca.crt - image: quay.io/project-codeflare/ray:2.5.0-py38-cu116 - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - ray stop - name: machine-learning - resources: - limits: - cpu: 4 - memory: 6G - nvidia.com/gpu: 7 - requests: - cpu: 3 - memory: 5G - nvidia.com/gpu: 7 - imagePullSecrets: - - name: unit-test-pull-secret - initContainers: - - command: - - sh - - -c - - until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; - do echo waiting for myservice; sleep 2; done - image: busybox:1.28 - name: init-myservice - replicas: 1 - - generictemplate: - apiVersion: route.openshift.io/v1 - kind: Route - metadata: - labels: - odh-ray-cluster-service: unit-test-cluster-head-svc - name: ray-dashboard-unit-test-cluster - namespace: ns - spec: - port: - targetPort: dashboard - to: - kind: Service - name: unit-test-cluster-head-svc - replicas: 1 - Items: [] diff --git a/tests/test_cluster_yamls/appwrapper/test-case-bad.yaml b/tests/test_cluster_yamls/appwrapper/test-case-bad.yaml new file mode 100644 index 00000000..a5915820 --- /dev/null +++ b/tests/test_cluster_yamls/appwrapper/test-case-bad.yaml @@ -0,0 +1,109 @@ +apiVersion: workload.codeflare.dev/v1beta2 +kind: AppsWrapper +metadata: + labels: + orderedinstance: cpu.small_gpu.large + nam: unit-test-cluster + namspace: ns +spec: + components: + - template: + apiVersion: ray.io/v1 + kind: RayCluster + metadata: + labels: + controller-tools.k8s.io: '1.0' + name: unit-test-cluster + namespace: ns + spec: + autoscalerOptions: + idleTimeoutSeconds: 60 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + upscalingMode: Default + enableInTreeAutoscaling: false + headGroupSpec: + rayStartParams: + block: 'true' + dashboard-host: 0.0.0.0 + num-gpus: '0' + resources: '"{}"' + serviceType: ClusterIP + template: + spec: + containers: + - env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: "${image}" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: ray-head + ports: + - containerPort: 6379 + name: gcs + - containerPort: 8265 + name: dashboard + - containerPort: 10001 + name: client + resources: + limits: + cpu: 2 + memory: 8G + requests: + cpu: 2 + memory: 8G + rayVersion: 2.47.1 + workerGroupSpecs: + - groupName: small-group-unit-test-cluster + maxReplicas: 2 + minReplicas: 2 + rayStartParams: + block: 'true' + num-gpus: '7' + resources: '"{}"' + replicas: 2 + template: + metadata: + annotations: + key: value + labels: + key: value + spec: + containers: + - env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: "${image}" + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: machine-learning + resources: + limits: + cpu: 4 + memory: 6G + nvidia.com/gpu: 7 + requests: + cpu: 3 + memory: 5G + nvidia.com/gpu: 7 diff --git a/tests/test_cluster_yamls/appwrapper/unit-test-all-params.yaml b/tests/test_cluster_yamls/appwrapper/unit-test-all-params.yaml new file mode 100644 index 00000000..aa097dd9 --- /dev/null +++ b/tests/test_cluster_yamls/appwrapper/unit-test-all-params.yaml @@ -0,0 +1,238 @@ +apiVersion: workload.codeflare.dev/v1beta2 +kind: AppWrapper +metadata: + labels: + kueue.x-k8s.io/queue-name: local-queue-default + name: aw-all-params + namespace: ns +spec: + components: + - template: + apiVersion: ray.io/v1 + kind: RayCluster + metadata: + annotations: + app.kubernetes.io/managed-by: test-prefix + key1: value1 + key2: value2 + labels: + controller-tools.k8s.io: '1.0' + key1: value1 + key2: value2 + name: aw-all-params + namespace: ns + spec: + autoscalerOptions: + idleTimeoutSeconds: 60 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + upscalingMode: Default + enableInTreeAutoscaling: false + headGroupSpec: + enableIngress: false + rayStartParams: + block: 'true' + dashboard-host: 0.0.0.0 + num-gpus: '1' + resources: '"{\"TPU\": 2}"' + serviceType: ClusterIP + template: + metadata: + annotations: + app.kubernetes.io/managed-by: test-prefix + key1: value1 + key2: value2 + spec: + containers: + - env: + - name: key1 + value: value1 + - name: key2 + value: value2 + - name: RAY_USAGE_STATS_ENABLED + value: '0' + image: example/ray:tag + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: ray-head + ports: + - containerPort: 6379 + name: gcs + - containerPort: 8265 + name: dashboard + - containerPort: 10001 + name: client + resources: + limits: + cpu: 8 + intel.com/gpu: 2 + memory: 16G + nvidia.com/gpu: 1 + requests: + cpu: 4 + intel.com/gpu: 2 + memory: 12G + nvidia.com/gpu: 1 + volumeMounts: + - mountPath: /home/ray/test1 + name: test + - mountPath: /home/ray/test2 + name: test2 + - mountPath: /home/ray/test2 + name: test3 + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + imagePullSecrets: + - name: secret1 + - name: secret2 + tolerations: + - effect: NoSchedule + key: key1 + operator: Equal + value: value1 + volumes: + - emptyDir: + sizeLimit: 500Gi + name: test + - configMap: + items: + - key: test + path: /home/ray/test2/data.txt + name: config-map-test + name: test2 + - name: test3 + secret: + secretName: test-secret + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert + rayVersion: 2.47.1 + workerGroupSpecs: + - groupName: small-group-aw-all-params + maxReplicas: 10 + minReplicas: 10 + rayStartParams: + block: 'true' + num-gpus: '1' + resources: '"{}"' + replicas: 10 + template: + metadata: + annotations: + app.kubernetes.io/managed-by: test-prefix + key1: value1 + key2: value2 + spec: + containers: + - env: + - name: key1 + value: value1 + - name: key2 + value: value2 + - name: RAY_USAGE_STATS_ENABLED + value: '0' + image: example/ray:tag + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: machine-learning + resources: + limits: + cpu: 8 + memory: 16G + nvidia.com/gpu: 1 + requests: + cpu: 4 + memory: 12G + nvidia.com/gpu: 1 + volumeMounts: + - mountPath: /home/ray/test1 + name: test + - mountPath: /home/ray/test2 + name: test2 + - mountPath: /home/ray/test2 + name: test3 + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + imagePullSecrets: + - name: secret1 + - name: secret2 + tolerations: + - effect: NoSchedule + key: key2 + operator: Equal + value: value2 + volumes: + - emptyDir: + sizeLimit: 500Gi + name: test + - configMap: + items: + - key: test + path: /home/ray/test2/data.txt + name: config-map-test + name: test2 + - name: test3 + secret: + secretName: test-secret + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert diff --git a/tests/test_cluster_yamls/kueue/aw_kueue.yaml b/tests/test_cluster_yamls/kueue/aw_kueue.yaml new file mode 100644 index 00000000..f5f16406 --- /dev/null +++ b/tests/test_cluster_yamls/kueue/aw_kueue.yaml @@ -0,0 +1,156 @@ +apiVersion: workload.codeflare.dev/v1beta2 +kind: AppWrapper +metadata: + labels: + kueue.x-k8s.io/queue-name: local-queue-default + name: unit-test-aw-kueue + namespace: ns +spec: + components: + - template: + apiVersion: ray.io/v1 + kind: RayCluster + metadata: + labels: + controller-tools.k8s.io: '1.0' + name: unit-test-aw-kueue + namespace: ns + spec: + autoscalerOptions: + idleTimeoutSeconds: 60 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + upscalingMode: Default + enableInTreeAutoscaling: false + headGroupSpec: + enableIngress: false + rayStartParams: + block: 'true' + dashboard-host: 0.0.0.0 + num-gpus: '0' + resources: '"{}"' + serviceType: ClusterIP + template: + spec: + containers: + - image: "${image}" + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: ray-head + ports: + - containerPort: 6379 + name: gcs + - containerPort: 8265 + name: dashboard + - containerPort: 10001 + name: client + resources: + limits: + cpu: 2 + memory: 8G + requests: + cpu: 2 + memory: 8G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + env: + - name: RAY_USAGE_STATS_ENABLED + value: '0' + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert + rayVersion: 2.47.1 + workerGroupSpecs: + - groupName: small-group-unit-test-aw-kueue + maxReplicas: 2 + minReplicas: 2 + rayStartParams: + block: 'true' + num-gpus: '0' + resources: '"{}"' + replicas: 2 + template: + spec: + containers: + - image: "${image}" + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: machine-learning + resources: + limits: + cpu: 4 + memory: 6G + requests: + cpu: 3 + memory: 5G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + env: + - name: RAY_USAGE_STATS_ENABLED + value: '0' + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert diff --git a/tests/test_cluster_yamls/kueue/ray_cluster_kueue.yaml b/tests/test_cluster_yamls/kueue/ray_cluster_kueue.yaml new file mode 100644 index 00000000..d6db4f4a --- /dev/null +++ b/tests/test_cluster_yamls/kueue/ray_cluster_kueue.yaml @@ -0,0 +1,156 @@ +apiVersion: workload.codeflare.dev/v1beta2 +kind: AppWrapper +metadata: + labels: + kueue.x-k8s.io/queue-name: local-queue-default + name: unit-test-cluster-kueue + namespace: ns +spec: + components: + - template: + apiVersion: ray.io/v1 + kind: RayCluster + metadata: + labels: + controller-tools.k8s.io: '1.0' + name: unit-test-cluster-kueue + namespace: ns + spec: + autoscalerOptions: + idleTimeoutSeconds: 60 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + upscalingMode: Default + enableInTreeAutoscaling: false + headGroupSpec: + enableIngress: false + rayStartParams: + block: 'true' + dashboard-host: 0.0.0.0 + num-gpus: '0' + resources: '"{}"' + serviceType: ClusterIP + template: + spec: + containers: + - image: "${image}" + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: ray-head + ports: + - containerPort: 6379 + name: gcs + - containerPort: 8265 + name: dashboard + - containerPort: 10001 + name: client + resources: + limits: + cpu: 2 + memory: 8G + requests: + cpu: 2 + memory: 8G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + env: + - name: RAY_USAGE_STATS_ENABLED + value: '0' + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert + rayVersion: 2.47.1 + workerGroupSpecs: + - groupName: small-group-unit-test-cluster-kueue + maxReplicas: 2 + minReplicas: 2 + rayStartParams: + block: 'true' + num-gpus: '0' + resources: '"{}"' + replicas: 2 + template: + spec: + containers: + - image: "${image}" + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: machine-learning + resources: + limits: + cpu: 4 + memory: 6G + requests: + cpu: 3 + memory: 5G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + env: + - name: RAY_USAGE_STATS_ENABLED + value: '0' + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert diff --git a/tests/test_cluster_yamls/ray/default-appwrapper.yaml b/tests/test_cluster_yamls/ray/default-appwrapper.yaml new file mode 100644 index 00000000..27828163 --- /dev/null +++ b/tests/test_cluster_yamls/ray/default-appwrapper.yaml @@ -0,0 +1,154 @@ +apiVersion: workload.codeflare.dev/v1beta2 +kind: AppWrapper +metadata: + name: default-appwrapper + namespace: ns +spec: + components: + - template: + apiVersion: ray.io/v1 + kind: RayCluster + metadata: + labels: + controller-tools.k8s.io: '1.0' + name: default-appwrapper + namespace: ns + spec: + autoscalerOptions: + idleTimeoutSeconds: 60 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + upscalingMode: Default + enableInTreeAutoscaling: false + headGroupSpec: + enableIngress: false + rayStartParams: + block: 'true' + dashboard-host: 0.0.0.0 + num-gpus: '0' + resources: '"{}"' + serviceType: ClusterIP + template: + spec: + containers: + - image: "${image}" + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: ray-head + ports: + - containerPort: 6379 + name: gcs + - containerPort: 8265 + name: dashboard + - containerPort: 10001 + name: client + env: + - name: RAY_USAGE_STATS_ENABLED + value: '0' + resources: + limits: + cpu: 2 + memory: 8G + requests: + cpu: 2 + memory: 8G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert + rayVersion: 2.47.1 + workerGroupSpecs: + - groupName: small-group-default-appwrapper + maxReplicas: 1 + minReplicas: 1 + rayStartParams: + block: 'true' + num-gpus: '0' + resources: '"{}"' + replicas: 1 + template: + spec: + containers: + - image: "${image}" + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: machine-learning + env: + - name: RAY_USAGE_STATS_ENABLED + value: '0' + resources: + limits: + cpu: 1 + memory: 2G + requests: + cpu: 1 + memory: 2G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert diff --git a/tests/test_cluster_yamls/ray/default-ray-cluster.yaml b/tests/test_cluster_yamls/ray/default-ray-cluster.yaml new file mode 100644 index 00000000..d2e6b05a --- /dev/null +++ b/tests/test_cluster_yamls/ray/default-ray-cluster.yaml @@ -0,0 +1,146 @@ +apiVersion: ray.io/v1 +kind: RayCluster +metadata: + labels: + controller-tools.k8s.io: '1.0' + name: default-cluster + namespace: ns +spec: + autoscalerOptions: + idleTimeoutSeconds: 60 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + upscalingMode: Default + enableInTreeAutoscaling: false + headGroupSpec: + enableIngress: false + rayStartParams: + block: 'true' + dashboard-host: 0.0.0.0 + num-gpus: '0' + resources: '"{}"' + serviceType: ClusterIP + template: + spec: + containers: + - image: "${image}" + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: ray-head + ports: + - containerPort: 6379 + name: gcs + - containerPort: 8265 + name: dashboard + - containerPort: 10001 + name: client + env: + - name: RAY_USAGE_STATS_ENABLED + value: '0' + resources: + limits: + cpu: 2 + memory: 8G + requests: + cpu: 2 + memory: 8G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert + rayVersion: 2.47.1 + workerGroupSpecs: + - groupName: small-group-default-cluster + maxReplicas: 1 + minReplicas: 1 + rayStartParams: + block: 'true' + num-gpus: '0' + resources: '"{}"' + replicas: 1 + template: + spec: + containers: + - image: "${image}" + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: machine-learning + resources: + limits: + cpu: 1 + memory: 2G + requests: + cpu: 1 + memory: 2G + env: + - name: RAY_USAGE_STATS_ENABLED + value: '0' + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert diff --git a/tests/test_cluster_yamls/ray/unit-test-all-params.yaml b/tests/test_cluster_yamls/ray/unit-test-all-params.yaml new file mode 100644 index 00000000..ee0878c1 --- /dev/null +++ b/tests/test_cluster_yamls/ray/unit-test-all-params.yaml @@ -0,0 +1,229 @@ +apiVersion: ray.io/v1 +kind: RayCluster +metadata: + annotations: + app.kubernetes.io/managed-by: test-prefix + key1: value1 + key2: value2 + labels: + controller-tools.k8s.io: '1.0' + key1: value1 + key2: value2 + kueue.x-k8s.io/queue-name: local-queue-default + name: test-all-params + namespace: ns +spec: + autoscalerOptions: + idleTimeoutSeconds: 60 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + upscalingMode: Default + enableInTreeAutoscaling: false + headGroupSpec: + enableIngress: false + rayStartParams: + block: 'true' + dashboard-host: 0.0.0.0 + num-gpus: '1' + resources: '"{\"TPU\": 2}"' + serviceType: ClusterIP + template: + metadata: + annotations: + app.kubernetes.io/managed-by: test-prefix + key1: value1 + key2: value2 + spec: + containers: + - env: + - name: key1 + value: value1 + - name: key2 + value: value2 + - name: RAY_USAGE_STATS_ENABLED + value: '0' + image: example/ray:tag + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: ray-head + ports: + - containerPort: 6379 + name: gcs + - containerPort: 8265 + name: dashboard + - containerPort: 10001 + name: client + resources: + limits: + cpu: 8 + intel.com/gpu: 2 + memory: 16G + nvidia.com/gpu: 1 + requests: + cpu: 4 + intel.com/gpu: 2 + memory: 12G + nvidia.com/gpu: 1 + volumeMounts: + - mountPath: /home/ray/test1 + name: test + - mountPath: /home/ray/test2 + name: test2 + - mountPath: /home/ray/test2 + name: test3 + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + imagePullSecrets: + - name: secret1 + - name: secret2 + tolerations: + - effect: NoSchedule + key: key1 + operator: Equal + value: value1 + volumes: + - emptyDir: + sizeLimit: 500Gi + name: test + - configMap: + items: + - key: test + path: /home/ray/test2/data.txt + name: config-map-test + name: test2 + - name: test3 + secret: + secretName: test-secret + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert + rayVersion: 2.47.1 + workerGroupSpecs: + - groupName: small-group-test-all-params + maxReplicas: 10 + minReplicas: 10 + rayStartParams: + block: 'true' + num-gpus: '1' + resources: '"{}"' + replicas: 10 + template: + metadata: + annotations: + app.kubernetes.io/managed-by: test-prefix + key1: value1 + key2: value2 + spec: + containers: + - env: + - name: key1 + value: value1 + - name: key2 + value: value2 + - name: RAY_USAGE_STATS_ENABLED + value: '0' + image: example/ray:tag + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: machine-learning + resources: + limits: + cpu: 8 + memory: 16G + nvidia.com/gpu: 1 + requests: + cpu: 4 + memory: 12G + nvidia.com/gpu: 1 + volumeMounts: + - mountPath: /home/ray/test1 + name: test + - mountPath: /home/ray/test2 + name: test2 + - mountPath: /home/ray/test2 + name: test3 + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + imagePullSecrets: + - name: secret1 + - name: secret2 + tolerations: + - effect: NoSchedule + key: key2 + operator: Equal + value: value2 + volumes: + - emptyDir: + sizeLimit: 500Gi + name: test + - configMap: + items: + - key: test + path: /home/ray/test2/data.txt + name: config-map-test + name: test2 + - name: test3 + secret: + secretName: test-secret + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert diff --git a/tests/test_cluster_yamls/support_clusters/test-aw-a.yaml b/tests/test_cluster_yamls/support_clusters/test-aw-a.yaml new file mode 100644 index 00000000..49f2c38c --- /dev/null +++ b/tests/test_cluster_yamls/support_clusters/test-aw-a.yaml @@ -0,0 +1,156 @@ +apiVersion: workload.codeflare.dev/v1beta2 +kind: AppWrapper +metadata: + labels: + kueue.x-k8s.io/queue-name: local_default_queue + name: test-cluster-a + namespace: ns +spec: + components: + - template: + apiVersion: ray.io/v1 + kind: RayCluster + metadata: + labels: + controller-tools.k8s.io: '1.0' + name: test-cluster-a + namespace: ns + spec: + autoscalerOptions: + idleTimeoutSeconds: 60 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + upscalingMode: Default + enableInTreeAutoscaling: false + headGroupSpec: + enableIngress: false + rayStartParams: + block: 'true' + dashboard-host: 0.0.0.0 + num-gpus: '0' + resources: '"{}"' + serviceType: ClusterIP + template: + spec: + containers: + - image: "${image}" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: ray-head + ports: + - containerPort: 6379 + name: gcs + - containerPort: 8265 + name: dashboard + - containerPort: 10001 + name: client + resources: + limits: + cpu: 2 + memory: 8G + requests: + cpu: 2 + memory: 8G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + imagePullSecrets: [] + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert + rayVersion: 2.47.1 + workerGroupSpecs: + - groupName: small-group-test-cluster-a + maxReplicas: 1 + minReplicas: 1 + rayStartParams: + block: 'true' + num-gpus: '0' + resources: '"{}"' + replicas: 1 + template: + metadata: + annotations: + key: value + labels: + key: value + spec: + containers: + - image: "${image}" + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: machine-learning + resources: + limits: + cpu: 1 + memory: 2G + requests: + cpu: 1 + memory: 2G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + imagePullSecrets: [] + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert diff --git a/tests/test_cluster_yamls/support_clusters/test-aw-b.yaml b/tests/test_cluster_yamls/support_clusters/test-aw-b.yaml new file mode 100644 index 00000000..aa6fad9c --- /dev/null +++ b/tests/test_cluster_yamls/support_clusters/test-aw-b.yaml @@ -0,0 +1,156 @@ +apiVersion: workload.codeflare.dev/v1beta2 +kind: AppWrapper +metadata: + labels: + kueue.x-k8s.io/queue-name: local_default_queue + name: test-cluster-b + namespace: ns +spec: + components: + - template: + apiVersion: ray.io/v1 + kind: RayCluster + metadata: + labels: + controller-tools.k8s.io: '1.0' + name: test-cluster-b + namespace: ns + spec: + autoscalerOptions: + idleTimeoutSeconds: 60 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + upscalingMode: Default + enableInTreeAutoscaling: false + headGroupSpec: + enableIngress: false + rayStartParams: + block: 'true' + dashboard-host: 0.0.0.0 + num-gpus: '0' + resources: '"{}"' + serviceType: ClusterIP + template: + spec: + containers: + - image: "${image}" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: ray-head + ports: + - containerPort: 6379 + name: gcs + - containerPort: 8265 + name: dashboard + - containerPort: 10001 + name: client + resources: + limits: + cpu: 2 + memory: 8G + requests: + cpu: 2 + memory: 8G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + imagePullSecrets: [] + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert + rayVersion: 2.47.1 + workerGroupSpecs: + - groupName: small-group-test-cluster-b + maxReplicas: 1 + minReplicas: 1 + rayStartParams: + block: 'true' + num-gpus: '0' + resources: '"{}"' + replicas: 1 + template: + metadata: + annotations: + key: value + labels: + key: value + spec: + containers: + - image: "${image}" + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: machine-learning + resources: + limits: + cpu: 1 + memory: 2G + requests: + cpu: 1 + memory: 2G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + imagePullSecrets: [] + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert diff --git a/tests/test_cluster_yamls/support_clusters/test-rc-a.yaml b/tests/test_cluster_yamls/support_clusters/test-rc-a.yaml new file mode 100644 index 00000000..2bb13995 --- /dev/null +++ b/tests/test_cluster_yamls/support_clusters/test-rc-a.yaml @@ -0,0 +1,147 @@ +apiVersion: ray.io/v1 +kind: RayCluster +metadata: + labels: + controller-tools.k8s.io: '1.0' + kueue.x-k8s.io/queue-name: local_default_queue + name: test-cluster-a + namespace: ns +spec: + autoscalerOptions: + idleTimeoutSeconds: 60 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + upscalingMode: Default + enableInTreeAutoscaling: false + headGroupSpec: + enableIngress: false + rayStartParams: + block: 'true' + dashboard-host: 0.0.0.0 + num-gpus: '0' + resources: '"{}"' + serviceType: ClusterIP + template: + spec: + containers: + - image: "${image}" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: ray-head + ports: + - containerPort: 6379 + name: gcs + - containerPort: 8265 + name: dashboard + - containerPort: 10001 + name: client + resources: + limits: + cpu: 2 + memory: 8G + requests: + cpu: 2 + memory: 8G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + imagePullSecrets: [] + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert + rayVersion: 2.47.1 + workerGroupSpecs: + - groupName: small-group-test-cluster-a + maxReplicas: 1 + minReplicas: 1 + rayStartParams: + block: 'true' + num-gpus: '0' + resources: '"{}"' + replicas: 1 + template: + metadata: + annotations: + key: value + labels: + key: value + spec: + containers: + - image: "${image}" + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: machine-learning + resources: + limits: + cpu: 1 + memory: 2G + requests: + cpu: 1 + memory: 2G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + imagePullSecrets: [] + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert diff --git a/tests/test_cluster_yamls/support_clusters/test-rc-b.yaml b/tests/test_cluster_yamls/support_clusters/test-rc-b.yaml new file mode 100644 index 00000000..70f1d5bf --- /dev/null +++ b/tests/test_cluster_yamls/support_clusters/test-rc-b.yaml @@ -0,0 +1,147 @@ +apiVersion: ray.io/v1 +kind: RayCluster +metadata: + labels: + controller-tools.k8s.io: '1.0' + kueue.x-k8s.io/queue-name: local_default_queue + name: test-rc-b + namespace: ns +spec: + autoscalerOptions: + idleTimeoutSeconds: 60 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + upscalingMode: Default + enableInTreeAutoscaling: false + headGroupSpec: + enableIngress: false + rayStartParams: + block: 'true' + dashboard-host: 0.0.0.0 + num-gpus: '0' + resources: '"{}"' + serviceType: ClusterIP + template: + spec: + containers: + - image: "${image}" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: ray-head + ports: + - containerPort: 6379 + name: gcs + - containerPort: 8265 + name: dashboard + - containerPort: 10001 + name: client + resources: + limits: + cpu: 2 + memory: 8G + requests: + cpu: 2 + memory: 8G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + imagePullSecrets: [] + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert + rayVersion: 2.47.1 + workerGroupSpecs: + - groupName: small-group-test-rc-b + maxReplicas: 1 + minReplicas: 1 + rayStartParams: + block: 'true' + num-gpus: '0' + resources: '"{}"' + replicas: 1 + template: + metadata: + annotations: + key: value + labels: + key: value + spec: + containers: + - image: "${image}" + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - ray stop + name: machine-learning + resources: + limits: + cpu: 1 + memory: 2G + requests: + cpu: 1 + memory: 2G + volumeMounts: + - mountPath: /etc/pki/tls/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-trusted-ca-bundle.crt + name: odh-trusted-ca-cert + subPath: odh-trusted-ca-bundle.crt + - mountPath: /etc/pki/tls/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + - mountPath: /etc/ssl/certs/odh-ca-bundle.crt + name: odh-ca-cert + subPath: odh-ca-bundle.crt + imagePullSecrets: [] + volumes: + - configMap: + items: + - key: ca-bundle.crt + path: odh-trusted-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-trusted-ca-cert + - configMap: + items: + - key: odh-ca-bundle.crt + path: odh-ca-bundle.crt + name: odh-trusted-ca-bundle + optional: true + name: odh-ca-cert diff --git a/tests/unit_test.py b/tests/unit_test.py deleted file mode 100644 index b046b1f1..00000000 --- a/tests/unit_test.py +++ /dev/null @@ -1,2294 +0,0 @@ -# Copyright 2022 IBM, Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pathlib import Path -import sys -import filecmp -import os -import re - -parent = Path(__file__).resolve().parents[1] -sys.path.append(str(parent) + "/src") - -from kubernetes import client, config -from codeflare_sdk.cluster.awload import AWManager -from codeflare_sdk.cluster.cluster import ( - Cluster, - ClusterConfiguration, - list_all_clusters, - list_all_queued, - _copy_to_ray, - get_cluster, - _app_wrapper_status, - _ray_cluster_status, -) -from codeflare_sdk.cluster.auth import ( - TokenAuthentication, - Authentication, - KubeConfigFileAuthentication, - config_check, - api_config_handler, -) -from codeflare_sdk.utils.pretty_print import ( - print_no_resources_found, - print_app_wrappers_status, - print_cluster_status, - print_clusters, -) -from codeflare_sdk.cluster.model import ( - AppWrapper, - RayCluster, - AppWrapperStatus, - RayClusterStatus, - CodeFlareClusterStatus, -) -from codeflare_sdk.job.jobs import ( - JobDefinition, - Job, - DDPJobDefinition, - DDPJob, - torchx_runner, -) -from codeflare_sdk.utils.generate_cert import ( - generate_ca_cert, - generate_tls_cert, - export_env, -) - -from unit_test_support import ( - createClusterWithConfig, - createTestDDP, - createDDPJob_no_cluster, - createClusterConfig, - createDDPJob_with_cluster, -) - -import openshift -from openshift.selector import Selector -import ray -from torchx.specs import AppDryRunInfo, AppDef -from torchx.runner import get_runner, Runner -from torchx.schedulers.ray_scheduler import RayJob -from torchx.schedulers.kubernetes_mcad_scheduler import KubernetesMCADJob -import pytest -import yaml - - -# For mocking openshift client results -fake_res = openshift.Result("fake") - - -def arg_side_effect(*args): - fake_res.high_level_operation = args - return fake_res - - -def att_side_effect(self): - return self.high_level_operation - - -def test_token_auth_creation(): - try: - token_auth = TokenAuthentication(token="token", server="server") - assert token_auth.token == "token" - assert token_auth.server == "server" - assert token_auth.skip_tls == False - assert token_auth.ca_cert_path == None - - token_auth = TokenAuthentication(token="token", server="server", skip_tls=True) - assert token_auth.token == "token" - assert token_auth.server == "server" - assert token_auth.skip_tls == True - assert token_auth.ca_cert_path == None - - token_auth = TokenAuthentication(token="token", server="server", skip_tls=False) - assert token_auth.token == "token" - assert token_auth.server == "server" - assert token_auth.skip_tls == False - assert token_auth.ca_cert_path == None - - token_auth = TokenAuthentication( - token="token", server="server", skip_tls=False, ca_cert_path="path/to/cert" - ) - assert token_auth.token == "token" - assert token_auth.server == "server" - assert token_auth.skip_tls == False - assert token_auth.ca_cert_path == "path/to/cert" - - except Exception: - assert 0 == 1 - - -def test_token_auth_login_logout(mocker): - mocker.patch.object(client, "ApiClient") - - token_auth = TokenAuthentication( - token="testtoken", server="testserver:6443", skip_tls=False, ca_cert_path=None - ) - assert token_auth.login() == ("Logged into testserver:6443") - assert token_auth.logout() == ("Successfully logged out of testserver:6443") - - -def test_token_auth_login_tls(mocker): - mocker.patch.object(client, "ApiClient") - - token_auth = TokenAuthentication( - token="testtoken", server="testserver:6443", skip_tls=True, ca_cert_path=None - ) - assert token_auth.login() == ("Logged into testserver:6443") - token_auth = TokenAuthentication( - token="testtoken", server="testserver:6443", skip_tls=False, ca_cert_path=None - ) - assert token_auth.login() == ("Logged into testserver:6443") - token_auth = TokenAuthentication( - token="testtoken", - server="testserver:6443", - skip_tls=False, - ca_cert_path="path/to/cert", - ) - assert token_auth.login() == ("Logged into testserver:6443") - - -def test_config_check_no_config_file(mocker): - mocker.patch("os.path.expanduser", return_value="/mock/home/directory") - mocker.patch("os.path.isfile", return_value=False) - mocker.patch("codeflare_sdk.cluster.auth.config_path", None) - mocker.patch("codeflare_sdk.cluster.auth.api_client", None) - - with pytest.raises(PermissionError) as e: - config_check() - - -def test_config_check_with_incluster_config(mocker): - mocker.patch("os.path.expanduser", return_value="/mock/home/directory") - mocker.patch("os.path.isfile", return_value=False) - mocker.patch.dict(os.environ, {"KUBERNETES_PORT": "number"}) - mocker.patch("kubernetes.config.load_incluster_config", side_effect=None) - mocker.patch("codeflare_sdk.cluster.auth.config_path", None) - mocker.patch("codeflare_sdk.cluster.auth.api_client", None) - - result = config_check() - assert result == None - - -def test_config_check_with_existing_config_file(mocker): - mocker.patch("os.path.expanduser", return_value="/mock/home/directory") - mocker.patch("os.path.isfile", return_value=True) - mocker.patch("kubernetes.config.load_kube_config", side_effect=None) - mocker.patch("codeflare_sdk.cluster.auth.config_path", None) - mocker.patch("codeflare_sdk.cluster.auth.api_client", None) - - result = config_check() - assert result == None - - -def test_config_check_with_config_path_and_no_api_client(mocker): - mocker.patch("codeflare_sdk.cluster.auth.config_path", "/mock/config/path") - mocker.patch("codeflare_sdk.cluster.auth.api_client", None) - result = config_check() - assert result == "/mock/config/path" - - -def test_load_kube_config(mocker): - mocker.patch.object(config, "load_kube_config") - kube_config_auth = KubeConfigFileAuthentication( - kube_config_path="/path/to/your/config" - ) - response = kube_config_auth.load_kube_config() - - assert ( - response - == "Loaded user config file at path %s" % kube_config_auth.kube_config_path - ) - - kube_config_auth = KubeConfigFileAuthentication(kube_config_path=None) - response = kube_config_auth.load_kube_config() - assert response == "Please specify a config file path" - - -def test_auth_coverage(): - abstract = Authentication() - abstract.login() - abstract.logout() - - -def test_config_creation(): - config = createClusterConfig() - - assert config.name == "unit-test-cluster" and config.namespace == "ns" - assert config.num_workers == 2 - assert config.min_cpus == 3 and config.max_cpus == 4 - assert config.min_memory == 5 and config.max_memory == 6 - assert config.num_gpus == 7 - assert config.image == "quay.io/project-codeflare/ray:2.5.0-py38-cu116" - assert config.template == f"{parent}/src/codeflare_sdk/templates/base-template.yaml" - assert config.instascale - assert config.machine_types == ["cpu.small", "gpu.large"] - assert config.image_pull_secrets == ["unit-test-pull-secret"] - assert config.dispatch_priority == None - - -def test_cluster_creation(): - cluster = createClusterWithConfig() - assert cluster.app_wrapper_yaml == "unit-test-cluster.yaml" - assert cluster.app_wrapper_name == "unit-test-cluster" - assert filecmp.cmp( - "unit-test-cluster.yaml", f"{parent}/tests/test-case.yaml", shallow=True - ) - - -def test_cluster_creation_priority(mocker): - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch( - "kubernetes.client.CustomObjectsApi.list_cluster_custom_object", - return_value={"items": [{"metadata": {"name": "default"}, "value": 10}]}, - ) - config = createClusterConfig() - config.name = "prio-test-cluster" - config.dispatch_priority = "default" - cluster = Cluster(config) - assert cluster.app_wrapper_yaml == "prio-test-cluster.yaml" - assert cluster.app_wrapper_name == "prio-test-cluster" - assert filecmp.cmp( - "prio-test-cluster.yaml", f"{parent}/tests/test-case-prio.yaml", shallow=True - ) - - -def test_default_cluster_creation(mocker): - mocker.patch( - "codeflare_sdk.cluster.cluster.get_current_namespace", - return_value="opendatahub", - ) - default_config = ClusterConfiguration( - name="unit-test-default-cluster", - ) - cluster = Cluster(default_config) - - assert cluster.app_wrapper_yaml == "unit-test-default-cluster.yaml" - assert cluster.app_wrapper_name == "unit-test-default-cluster" - assert cluster.config.namespace == "opendatahub" - - -def arg_check_apply_effect(group, version, namespace, plural, body, *args): - assert group == "workload.codeflare.dev" - assert version == "v1beta1" - assert namespace == "ns" - assert plural == "appwrappers" - with open("unit-test-cluster.yaml") as f: - aw = yaml.load(f, Loader=yaml.FullLoader) - assert body == aw - assert args == tuple() - - -def arg_check_del_effect(group, version, namespace, plural, name, *args): - assert group == "workload.codeflare.dev" - assert version == "v1beta1" - assert namespace == "ns" - assert plural == "appwrappers" - assert name == "unit-test-cluster" - assert args == tuple() - - -def test_cluster_up_down(mocker): - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch( - "kubernetes.client.CustomObjectsApi.create_namespaced_custom_object", - side_effect=arg_check_apply_effect, - ) - mocker.patch( - "kubernetes.client.CustomObjectsApi.delete_namespaced_custom_object", - side_effect=arg_check_del_effect, - ) - mocker.patch( - "kubernetes.client.CustomObjectsApi.list_cluster_custom_object", - return_value={"items": []}, - ) - cluster = cluster = createClusterWithConfig() - cluster.up() - cluster.down() - - -def aw_status_fields(group, version, namespace, plural, *args): - assert group == "workload.codeflare.dev" - assert version == "v1beta1" - assert namespace == "test-ns" - assert plural == "appwrappers" - assert args == tuple() - return {"items": []} - - -def test_aw_status(mocker): - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch( - "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", - side_effect=aw_status_fields, - ) - aw = _app_wrapper_status("test-aw", "test-ns") - assert aw == None - - -def rc_status_fields(group, version, namespace, plural, *args): - assert group == "ray.io" - assert version == "v1alpha1" - assert namespace == "test-ns" - assert plural == "rayclusters" - assert args == tuple() - return {"items": []} - - -def test_rc_status(mocker): - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch( - "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", - side_effect=rc_status_fields, - ) - rc = _ray_cluster_status("test-rc", "test-ns") - assert rc == None - - -def uri_retreival(group, version, namespace, plural, *args): - assert group == "route.openshift.io" - assert version == "v1" - assert namespace == "ns" - assert plural == "routes" - assert args == tuple() - return { - "items": [ - { - "metadata": {"name": "ray-dashboard-unit-test-cluster"}, - "spec": { - "host": "ray-dashboard-unit-test-cluster-ns.apps.cluster.awsroute.org" - }, - } - ] - } - - -def test_cluster_uris(mocker): - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch( - "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", - side_effect=uri_retreival, - ) - - cluster = cluster = createClusterWithConfig() - assert cluster.cluster_uri() == "ray://unit-test-cluster-head-svc.ns.svc:10001" - assert ( - cluster.cluster_dashboard_uri() - == "http://ray-dashboard-unit-test-cluster-ns.apps.cluster.awsroute.org" - ) - cluster.config.name = "fake" - assert ( - cluster.cluster_dashboard_uri() - == "Dashboard route not available yet, have you run cluster.up()?" - ) - - -def test_local_client_url(mocker): - mocker.patch( - "kubernetes.client.CustomObjectsApi.get_cluster_custom_object", - return_value={"spec": {"domain": ""}}, - ) - mocker.patch( - "codeflare_sdk.cluster.cluster._get_ingress_domain", - return_value="apps.cluster.awsroute.org", - ) - mocker.patch( - "codeflare_sdk.cluster.cluster.Cluster.create_app_wrapper", - return_value="unit-test-cluster-localinter.yaml", - ) - - cluster_config = ClusterConfiguration( - name="unit-test-cluster-localinter", namespace="ns", local_interactive=True - ) - cluster = Cluster(cluster_config) - assert ( - cluster.local_client_url() - == "ray://rayclient-unit-test-cluster-localinter-ns.apps.cluster.awsroute.org" - ) - - -def ray_addr(self, *args): - return self._address - - -def test_ray_job_wrapping(mocker): - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch( - "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", - side_effect=uri_retreival, - ) - cluster = cluster = createClusterWithConfig() - - mocker.patch( - "ray.job_submission.JobSubmissionClient._check_connection_and_version_with_url", - return_value="None", - ) - mock_res = mocker.patch.object( - ray.job_submission.JobSubmissionClient, "list_jobs", autospec=True - ) - mock_res.side_effect = ray_addr - assert cluster.list_jobs() == cluster.cluster_dashboard_uri() - - mock_res = mocker.patch.object( - ray.job_submission.JobSubmissionClient, "get_job_status", autospec=True - ) - mock_res.side_effect = ray_addr - assert cluster.job_status("fake_id") == cluster.cluster_dashboard_uri() - - mock_res = mocker.patch.object( - ray.job_submission.JobSubmissionClient, "get_job_logs", autospec=True - ) - mock_res.side_effect = ray_addr - assert cluster.job_logs("fake_id") == cluster.cluster_dashboard_uri() - - -def test_print_no_resources(capsys): - try: - print_no_resources_found() - except: - assert 1 == 0 - captured = capsys.readouterr() - assert captured.out == ( - "╭──────────────────────────────────────────────────────────────────────────────╮\n" - "│ No resources found, have you run cluster.up() yet? │\n" - "╰──────────────────────────────────────────────────────────────────────────────╯\n" - ) - - -def test_print_no_cluster(capsys): - try: - print_cluster_status(None) - except: - assert 1 == 0 - captured = capsys.readouterr() - assert captured.out == ( - "╭──────────────────────────────────────────────────────────────────────────────╮\n" - "│ No resources found, have you run cluster.up() yet? │\n" - "╰──────────────────────────────────────────────────────────────────────────────╯\n" - ) - - -def test_print_appwrappers(capsys): - aw1 = AppWrapper( - name="awtest1", - status=AppWrapperStatus.PENDING, - can_run=False, - job_state="queue-state", - ) - aw2 = AppWrapper( - name="awtest2", - status=AppWrapperStatus.RUNNING, - can_run=False, - job_state="queue-state", - ) - try: - print_app_wrappers_status([aw1, aw2]) - except: - assert 1 == 0 - captured = capsys.readouterr() - assert captured.out == ( - "╭───────────────────────╮\n" - "│ 🚀 Cluster Queue │\n" - "│ Status 🚀 │\n" - "│ +---------+---------+ │\n" - "│ | Name | Status | │\n" - "│ +=========+=========+ │\n" - "│ | awtest1 | pending | │\n" - "│ | | | │\n" - "│ | awtest2 | running | │\n" - "│ | | | │\n" - "│ +---------+---------+ │\n" - "╰───────────────────────╯\n" - ) - - -def test_ray_details(mocker, capsys): - ray1 = RayCluster( - name="raytest1", - status=RayClusterStatus.READY, - workers=1, - worker_mem_min=2, - worker_mem_max=2, - worker_cpu=1, - worker_gpu=0, - namespace="ns", - dashboard="fake-uri", - ) - mocker.patch( - "codeflare_sdk.cluster.cluster.Cluster.status", - return_value=(False, CodeFlareClusterStatus.UNKNOWN), - ) - mocker.patch( - "codeflare_sdk.cluster.cluster.Cluster.cluster_dashboard_uri", - return_value="", - ) - cf = Cluster(ClusterConfiguration(name="raytest2", namespace="ns")) - captured = capsys.readouterr() - ray2 = _copy_to_ray(cf) - details = cf.details() - assert details == ray2 - assert ray2.name == "raytest2" - assert ray1.namespace == ray2.namespace - assert ray1.workers == ray2.workers - assert ray1.worker_mem_min == ray2.worker_mem_min - assert ray1.worker_mem_max == ray2.worker_mem_max - assert ray1.worker_cpu == ray2.worker_cpu - assert ray1.worker_gpu == ray2.worker_gpu - try: - print_clusters([ray1, ray2]) - print_cluster_status(ray1) - print_cluster_status(ray2) - except: - assert 0 == 1 - captured = capsys.readouterr() - assert captured.out == ( - " 🚀 CodeFlare Cluster Details 🚀 \n" - " \n" - " ╭───────────────────────────────────────────────────────────────╮ \n" - " │ Name │ \n" - " │ raytest2 Inactive ❌ │ \n" - " │ │ \n" - " │ URI: ray://raytest2-head-svc.ns.svc:10001 │ \n" - " │ │ \n" - " │ Dashboard🔗 │ \n" - " │ │ \n" - " │ Cluster Resources │ \n" - " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n" - " │ │ # Workers │ │ Memory CPU GPU │ │ \n" - " │ │ │ │ │ │ \n" - " │ │ 1 │ │ 2~2 1 0 │ │ \n" - " │ │ │ │ │ │ \n" - " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n" - " ╰───────────────────────────────────────────────────────────────╯ \n" - " 🚀 CodeFlare Cluster Details 🚀 \n" - " \n" - " ╭───────────────────────────────────────────────────────────────╮ \n" - " │ Name │ \n" - " │ raytest1 Active ✅ │ \n" - " │ │ \n" - " │ URI: ray://raytest1-head-svc.ns.svc:10001 │ \n" - " │ │ \n" - " │ Dashboard🔗 │ \n" - " │ │ \n" - " │ Cluster Resources │ \n" - " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n" - " │ │ # Workers │ │ Memory CPU GPU │ │ \n" - " │ │ │ │ │ │ \n" - " │ │ 1 │ │ 2~2 1 0 │ │ \n" - " │ │ │ │ │ │ \n" - " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n" - " ╰───────────────────────────────────────────────────────────────╯ \n" - "╭───────────────────────────────────────────────────────────────╮\n" - "│ Name │\n" - "│ raytest2 Inactive ❌ │\n" - "│ │\n" - "│ URI: ray://raytest2-head-svc.ns.svc:10001 │\n" - "│ │\n" - "│ Dashboard🔗 │\n" - "│ │\n" - "│ Cluster Resources │\n" - "│ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │\n" - "│ │ # Workers │ │ Memory CPU GPU │ │\n" - "│ │ │ │ │ │\n" - "│ │ 1 │ │ 2~2 1 0 │ │\n" - "│ │ │ │ │ │\n" - "│ ╰─────────────╯ ╰──────────────────────────────────────╯ │\n" - "╰───────────────────────────────────────────────────────────────╯\n" - " 🚀 CodeFlare Cluster Status 🚀 \n" - " \n" - " ╭──────────────────────────────────────────────────────────╮ \n" - " │ Name │ \n" - " │ raytest1 Active ✅ │ \n" - " │ │ \n" - " │ URI: ray://raytest1-head-svc.ns.svc:10001 │ \n" - " │ │ \n" - " │ Dashboard🔗 │ \n" - " │ │ \n" - " ╰──────────────────────────────────────────────────────────╯ \n" - " 🚀 CodeFlare Cluster Status 🚀 \n" - " \n" - " ╭────────────────────────────────────────────────────────────╮ \n" - " │ Name │ \n" - " │ raytest2 Inactive ❌ │ \n" - " │ │ \n" - " │ URI: ray://raytest2-head-svc.ns.svc:10001 │ \n" - " │ │ \n" - " │ Dashboard🔗 │ \n" - " │ │ \n" - " ╰────────────────────────────────────────────────────────────╯ \n" - ) - - -def act_side_effect_list(self): - print([self]) - self.out = str(self.high_level_operation) - return [self] - - -def get_obj_none(group, version, namespace, plural): - return {"items": []} - - -def get_ray_obj(group, version, namespace, plural, cls=None): - api_obj = { - "items": [ - { - "apiVersion": "ray.io/v1alpha1", - "kind": "RayCluster", - "metadata": { - "creationTimestamp": "2023-02-22T16:26:07Z", - "generation": 1, - "labels": { - "appwrapper.mcad.ibm.com": "quicktest", - "controller-tools.k8s.io": "1.0", - "resourceName": "quicktest", - "orderedinstance": "m4.xlarge_g4dn.xlarge", - }, - "managedFields": [ - { - "apiVersion": "ray.io/v1alpha1", - "fieldsType": "FieldsV1", - "fieldsV1": { - "f:metadata": { - "f:labels": { - ".": {}, - "f:appwrapper.mcad.ibm.com": {}, - "f:controller-tools.k8s.io": {}, - "f:resourceName": {}, - }, - "f:ownerReferences": { - ".": {}, - 'k:{"uid":"6334fc1b-471e-4876-8e7b-0b2277679235"}': {}, - }, - }, - "f:spec": { - ".": {}, - "f:autoscalerOptions": { - ".": {}, - "f:idleTimeoutSeconds": {}, - "f:imagePullPolicy": {}, - "f:resources": { - ".": {}, - "f:limits": { - ".": {}, - "f:cpu": {}, - "f:memory": {}, - }, - "f:requests": { - ".": {}, - "f:cpu": {}, - "f:memory": {}, - }, - }, - "f:upscalingMode": {}, - }, - "f:enableInTreeAutoscaling": {}, - "f:headGroupSpec": { - ".": {}, - "f:rayStartParams": { - ".": {}, - "f:block": {}, - "f:dashboard-host": {}, - "f:num-gpus": {}, - }, - "f:serviceType": {}, - "f:template": { - ".": {}, - "f:spec": {".": {}, "f:containers": {}}, - }, - }, - "f:rayVersion": {}, - "f:workerGroupSpecs": {}, - }, - }, - "manager": "mcad-controller", - "operation": "Update", - "time": "2023-02-22T16:26:07Z", - }, - { - "apiVersion": "ray.io/v1alpha1", - "fieldsType": "FieldsV1", - "fieldsV1": { - "f:status": { - ".": {}, - "f:availableWorkerReplicas": {}, - "f:desiredWorkerReplicas": {}, - "f:endpoints": { - ".": {}, - "f:client": {}, - "f:dashboard": {}, - "f:gcs": {}, - }, - "f:lastUpdateTime": {}, - "f:maxWorkerReplicas": {}, - "f:minWorkerReplicas": {}, - "f:state": {}, - } - }, - "manager": "manager", - "operation": "Update", - "subresource": "status", - "time": "2023-02-22T16:26:16Z", - }, - ], - "name": "quicktest", - "namespace": "ns", - "ownerReferences": [ - { - "apiVersion": "workload.codeflare.dev/v1beta1", - "blockOwnerDeletion": True, - "controller": True, - "kind": "AppWrapper", - "name": "quicktest", - "uid": "6334fc1b-471e-4876-8e7b-0b2277679235", - } - ], - "resourceVersion": "9482407", - "uid": "44d45d1f-26c8-43e7-841f-831dbd8c1285", - }, - "spec": { - "autoscalerOptions": { - "idleTimeoutSeconds": 60, - "imagePullPolicy": "Always", - "resources": { - "limits": {"cpu": "500m", "memory": "512Mi"}, - "requests": {"cpu": "500m", "memory": "512Mi"}, - }, - "upscalingMode": "Default", - }, - "enableInTreeAutoscaling": False, - "headGroupSpec": { - "rayStartParams": { - "block": "true", - "dashboard-host": "0.0.0.0", - "num-gpus": "0", - }, - "serviceType": "ClusterIP", - "template": { - "spec": { - "containers": [ - { - "image": "ghcr.io/foundation-model-stack/base:ray2.1.0-py38-gpu-pytorch1.12.0cu116-20221213-193103", - "imagePullPolicy": "Always", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "/bin/sh", - "-c", - "ray stop", - ] - } - } - }, - "name": "ray-head", - "ports": [ - { - "containerPort": 6379, - "name": "gcs", - "protocol": "TCP", - }, - { - "containerPort": 8265, - "name": "dashboard", - "protocol": "TCP", - }, - { - "containerPort": 10001, - "name": "client", - "protocol": "TCP", - }, - ], - "resources": { - "limits": { - "cpu": 2, - "memory": "8G", - "nvidia.com/gpu": 0, - }, - "requests": { - "cpu": 2, - "memory": "8G", - "nvidia.com/gpu": 0, - }, - }, - } - ] - } - }, - }, - "rayVersion": "1.12.0", - "workerGroupSpecs": [ - { - "groupName": "small-group-quicktest", - "maxReplicas": 1, - "minReplicas": 1, - "rayStartParams": {"block": "true", "num-gpus": "0"}, - "replicas": 1, - "template": { - "metadata": { - "annotations": {"key": "value"}, - "labels": {"key": "value"}, - }, - "spec": { - "containers": [ - { - "env": [ - { - "name": "MY_POD_IP", - "valueFrom": { - "fieldRef": { - "fieldPath": "status.podIP" - } - }, - } - ], - "image": "ghcr.io/foundation-model-stack/base:ray2.1.0-py38-gpu-pytorch1.12.0cu116-20221213-193103", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "/bin/sh", - "-c", - "ray stop", - ] - } - } - }, - "name": "machine-learning", - "resources": { - "limits": { - "cpu": 1, - "memory": "2G", - "nvidia.com/gpu": 0, - }, - "requests": { - "cpu": 1, - "memory": "2G", - "nvidia.com/gpu": 0, - }, - }, - } - ], - "initContainers": [ - { - "command": [ - "sh", - "-c", - "until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done", - ], - "image": "busybox:1.28", - "name": "init-myservice", - } - ], - }, - }, - } - ], - }, - "status": { - "availableWorkerReplicas": 2, - "desiredWorkerReplicas": 1, - "endpoints": { - "client": "10001", - "dashboard": "8265", - "gcs": "6379", - }, - "lastUpdateTime": "2023-02-22T16:26:16Z", - "maxWorkerReplicas": 1, - "minWorkerReplicas": 1, - "state": "ready", - }, - } - ] - } - return api_obj - - -def get_aw_obj(group, version, namespace, plural): - api_obj1 = { - "items": [ - { - "apiVersion": "workload.codeflare.dev/v1beta1", - "kind": "AppWrapper", - "metadata": { - "annotations": { - "kubectl.kubernetes.io/last-applied-configuration": '{"apiVersion":"codeflare.dev/v1beta1","kind":"AppWrapper","metadata":{"annotations":{},"name":"quicktest1","namespace":"ns"},"spec":{"priority":9,"resources":{"GenericItems":[{"custompodresources":[{"limits":{"cpu":2,"memory":"8G","nvidia.com/gpu":0},"replicas":1,"requests":{"cpu":2,"memory":"8G","nvidia.com/gpu":0}},{"limits":{"cpu":1,"memory":"2G","nvidia.com/gpu":0},"replicas":1,"requests":{"cpu":1,"memory":"2G","nvidia.com/gpu":0}}],"generictemplate":{"apiVersion":"ray.io/v1alpha1","kind":"RayCluster","metadata":{"labels":{"appwrapper.codeflare.dev":"quicktest1","controller-tools.k8s.io":"1.0"},"name":"quicktest1","namespace":"ns"},"spec":{"autoscalerOptions":{"idleTimeoutSeconds":60,"imagePullPolicy":"Always","resources":{"limits":{"cpu":"500m","memory":"512Mi"},"requests":{"cpu":"500m","memory":"512Mi"}},"upscalingMode":"Default"},"enableInTreeAutoscaling":false,"headGroupSpec":{"rayStartParams":{"block":"true","dashboard-host":"0.0.0.0","num-gpus":"0"},"serviceType":"ClusterIP","template":{"spec":{"containers":[{"image":"ghcr.io/foundation-model-stack/base:ray2.1.0-py38-gpu-pytorch1.12.0cu116-20221213-193103","imagePullPolicy":"Always","lifecycle":{"preStop":{"exec":{"command":["/bin/sh","-c","ray stop"]}}},"name":"ray-head","ports":[{"containerPort":6379,"name":"gcs"},{"containerPort":8265,"name":"dashboard"},{"containerPort":10001,"name":"client"}],"resources":{"limits":{"cpu":2,"memory":"8G","nvidia.com/gpu":0},"requests":{"cpu":2,"memory":"8G","nvidia.com/gpu":0}}}]}}},"rayVersion":"1.12.0","workerGroupSpecs":[{"groupName":"small-group-quicktest","maxReplicas":1,"minReplicas":1,"rayStartParams":{"block":"true","num-gpus":"0"},"replicas":1,"template":{"metadata":{"annotations":{"key":"value"},"labels":{"key":"value"}},"spec":{"containers":[{"env":[{"name":"MY_POD_IP","valueFrom":{"fieldRef":{"fieldPath":"status.podIP"}}}],"image":"ghcr.io/foundation-model-stack/base:ray2.1.0-py38-gpu-pytorch1.12.0cu116-20221213-193103","lifecycle":{"preStop":{"exec":{"command":["/bin/sh","-c","ray stop"]}}},"name":"machine-learning","resources":{"limits":{"cpu":1,"memory":"2G","nvidia.com/gpu":0},"requests":{"cpu":1,"memory":"2G","nvidia.com/gpu":0}}}],"initContainers":[{"command":["sh","-c","until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"],"image":"busybox:1.28","name":"init-myservice"}]}}}]}},"replicas":1},{"generictemplate":{"apiVersion":"route.openshift.io/v1","kind":"Route","metadata":{"labels":{"odh-ray-cluster-service":"quicktest-head-svc"},"name":"ray-dashboard-quicktest","namespace":"default"},"spec":{"port":{"targetPort":"dashboard"},"to":{"kind":"Service","name":"quicktest-head-svc"}}},"replica":1}],"Items":[]}}}\n' - }, - "creationTimestamp": "2023-02-22T16:26:07Z", - "generation": 4, - "managedFields": [ - { - "apiVersion": "workload.codeflare.dev/v1beta1", - "fieldsType": "FieldsV1", - "fieldsV1": { - "f:spec": { - "f:resources": { - "f:GenericItems": {}, - "f:metadata": {}, - }, - "f:schedulingSpec": {}, - "f:service": {".": {}, "f:spec": {}}, - }, - "f:status": { - ".": {}, - "f:canrun": {}, - "f:conditions": {}, - "f:controllerfirsttimestamp": {}, - "f:filterignore": {}, - "f:queuejobstate": {}, - "f:sender": {}, - "f:state": {}, - "f:systempriority": {}, - }, - }, - "manager": "Go-http-client", - "operation": "Update", - "time": "2023-02-22T16:26:07Z", - }, - { - "apiVersion": "workload.codeflare.dev/v1beta1", - "fieldsType": "FieldsV1", - "fieldsV1": { - "f:metadata": { - "f:annotations": { - ".": {}, - "f:kubectl.kubernetes.io/last-applied-configuration": {}, - } - }, - "f:spec": { - ".": {}, - "f:priority": {}, - "f:resources": {".": {}, "f:Items": {}}, - }, - }, - "manager": "kubectl-client-side-apply", - "operation": "Update", - "time": "2023-02-22T16:26:07Z", - }, - ], - "name": "quicktest1", - "namespace": "ns", - "resourceVersion": "9482384", - "uid": "6334fc1b-471e-4876-8e7b-0b2277679235", - }, - "spec": { - "priority": 9, - "resources": { - "GenericItems": [ - { - "allocated": 0, - "custompodresources": [ - { - "limits": { - "cpu": "2", - "memory": "8G", - "nvidia.com/gpu": "0", - }, - "replicas": 1, - "requests": { - "cpu": "2", - "memory": "8G", - "nvidia.com/gpu": "0", - }, - }, - { - "limits": { - "cpu": "1", - "memory": "2G", - "nvidia.com/gpu": "0", - }, - "replicas": 1, - "requests": { - "cpu": "1", - "memory": "2G", - "nvidia.com/gpu": "0", - }, - }, - ], - "generictemplate": { - "apiVersion": "ray.io/v1alpha1", - "kind": "RayCluster", - "metadata": { - "labels": { - "appwrapper.mcad.ibm.com": "quicktest1", - "controller-tools.k8s.io": "1.0", - }, - "name": "quicktest1", - "namespace": "ns", - }, - "spec": { - "autoscalerOptions": { - "idleTimeoutSeconds": 60, - "imagePullPolicy": "Always", - "resources": { - "limits": { - "cpu": "500m", - "memory": "512Mi", - }, - "requests": { - "cpu": "500m", - "memory": "512Mi", - }, - }, - "upscalingMode": "Default", - }, - "enableInTreeAutoscaling": False, - "headGroupSpec": { - "rayStartParams": { - "block": "true", - "dashboard-host": "0.0.0.0", - "num-gpus": "0", - }, - "serviceType": "ClusterIP", - "template": { - "spec": { - "containers": [ - { - "image": "ghcr.io/foundation-model-stack/base:ray2.1.0-py38-gpu-pytorch1.12.0cu116-20221213-193103", - "imagePullPolicy": "Always", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "/bin/sh", - "-c", - "ray stop", - ] - } - } - }, - "name": "ray-head", - "ports": [ - { - "containerPort": 6379, - "name": "gcs", - }, - { - "containerPort": 8265, - "name": "dashboard", - }, - { - "containerPort": 10001, - "name": "client", - }, - ], - "resources": { - "limits": { - "cpu": 2, - "memory": "8G", - "nvidia.com/gpu": 0, - }, - "requests": { - "cpu": 2, - "memory": "8G", - "nvidia.com/gpu": 0, - }, - }, - } - ] - } - }, - }, - "rayVersion": "1.12.0", - "workerGroupSpecs": [ - { - "groupName": "small-group-quicktest", - "maxReplicas": 1, - "minReplicas": 1, - "rayStartParams": { - "block": "true", - "num-gpus": "0", - }, - "replicas": 1, - "template": { - "metadata": { - "annotations": {"key": "value"}, - "labels": {"key": "value"}, - }, - "spec": { - "containers": [ - { - "env": [ - { - "name": "MY_POD_IP", - "valueFrom": { - "fieldRef": { - "fieldPath": "status.podIP" - } - }, - } - ], - "image": "ghcr.io/foundation-model-stack/base:ray2.1.0-py38-gpu-pytorch1.12.0cu116-20221213-193103", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "/bin/sh", - "-c", - "ray stop", - ] - } - } - }, - "name": "machine-learning", - "resources": { - "limits": { - "cpu": 1, - "memory": "2G", - "nvidia.com/gpu": 0, - }, - "requests": { - "cpu": 1, - "memory": "2G", - "nvidia.com/gpu": 0, - }, - }, - } - ], - "initContainers": [ - { - "command": [ - "sh", - "-c", - "until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done", - ], - "image": "busybox:1.28", - "name": "init-myservice", - } - ], - }, - }, - } - ], - }, - }, - "metadata": {}, - "priority": 0, - "priorityslope": 0, - "replicas": 1, - }, - { - "allocated": 0, - "generictemplate": { - "apiVersion": "route.openshift.io/v1", - "kind": "Route", - "metadata": { - "labels": { - "odh-ray-cluster-service": "quicktest-head-svc" - }, - "name": "ray-dashboard-quicktest", - "namespace": "default", - }, - "spec": { - "port": {"targetPort": "dashboard"}, - "to": { - "kind": "Service", - "name": "quicktest-head-svc", - }, - }, - }, - "metadata": {}, - "priority": 0, - "priorityslope": 0, - }, - ], - "Items": [], - "metadata": {}, - }, - "schedulingSpec": {}, - "service": {"spec": {}}, - }, - "status": { - "canrun": True, - "conditions": [ - { - "lastTransitionMicroTime": "2023-02-22T16:26:07.559447Z", - "lastUpdateMicroTime": "2023-02-22T16:26:07.559447Z", - "status": "True", - "type": "Init", - }, - { - "lastTransitionMicroTime": "2023-02-22T16:26:07.559551Z", - "lastUpdateMicroTime": "2023-02-22T16:26:07.559551Z", - "reason": "AwaitingHeadOfLine", - "status": "True", - "type": "Queueing", - }, - { - "lastTransitionMicroTime": "2023-02-22T16:26:13.220564Z", - "lastUpdateMicroTime": "2023-02-22T16:26:13.220564Z", - "reason": "AppWrapperRunnable", - "status": "True", - "type": "Dispatched", - }, - ], - "controllerfirsttimestamp": "2023-02-22T16:26:07.559447Z", - "filterignore": True, - "queuejobstate": "Dispatched", - "sender": "before manageQueueJob - afterEtcdDispatching", - "state": "Running", - "systempriority": 9, - }, - }, - { - "apiVersion": "workload.codeflare.dev/v1beta1", - "kind": "AppWrapper", - "metadata": { - "annotations": { - "kubectl.kubernetes.io/last-applied-configuration": '{"apiVersion":"codeflare.dev/v1beta1","kind":"AppWrapper","metadata":{"annotations":{},"name":"quicktest2","namespace":"ns"},"spec":{"priority":9,"resources":{"GenericItems":[{"custompodresources":[{"limits":{"cpu":2,"memory":"8G","nvidia.com/gpu":0},"replicas":1,"requests":{"cpu":2,"memory":"8G","nvidia.com/gpu":0}},{"limits":{"cpu":1,"memory":"2G","nvidia.com/gpu":0},"replicas":1,"requests":{"cpu":1,"memory":"2G","nvidia.com/gpu":0}}],"generictemplate":{"apiVersion":"ray.io/v1alpha1","kind":"RayCluster","metadata":{"labels":{"appwrapper.codeflare.dev":"quicktest2","controller-tools.k8s.io":"1.0"},"name":"quicktest2","namespace":"ns"},"spec":{"autoscalerOptions":{"idleTimeoutSeconds":60,"imagePullPolicy":"Always","resources":{"limits":{"cpu":"500m","memory":"512Mi"},"requests":{"cpu":"500m","memory":"512Mi"}},"upscalingMode":"Default"},"enableInTreeAutoscaling":false,"headGroupSpec":{"rayStartParams":{"block":"true","dashboard-host":"0.0.0.0","num-gpus":"0"},"serviceType":"ClusterIP","template":{"spec":{"containers":[{"image":"ghcr.io/foundation-model-stack/base:ray2.1.0-py38-gpu-pytorch1.12.0cu116-20221213-193103","imagePullPolicy":"Always","lifecycle":{"preStop":{"exec":{"command":["/bin/sh","-c","ray stop"]}}},"name":"ray-head","ports":[{"containerPort":6379,"name":"gcs"},{"containerPort":8265,"name":"dashboard"},{"containerPort":10001,"name":"client"}],"resources":{"limits":{"cpu":2,"memory":"8G","nvidia.com/gpu":0},"requests":{"cpu":2,"memory":"8G","nvidia.com/gpu":0}}}]}}},"rayVersion":"1.12.0","workerGroupSpecs":[{"groupName":"small-group-quicktest","maxReplicas":1,"minReplicas":1,"rayStartParams":{"block":"true","num-gpus":"0"},"replicas":1,"template":{"metadata":{"annotations":{"key":"value"},"labels":{"key":"value"}},"spec":{"containers":[{"env":[{"name":"MY_POD_IP","valueFrom":{"fieldRef":{"fieldPath":"status.podIP"}}}],"image":"ghcr.io/foundation-model-stack/base:ray2.1.0-py38-gpu-pytorch1.12.0cu116-20221213-193103","lifecycle":{"preStop":{"exec":{"command":["/bin/sh","-c","ray stop"]}}},"name":"machine-learning","resources":{"limits":{"cpu":1,"memory":"2G","nvidia.com/gpu":0},"requests":{"cpu":1,"memory":"2G","nvidia.com/gpu":0}}}],"initContainers":[{"command":["sh","-c","until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"],"image":"busybox:1.28","name":"init-myservice"}]}}}]}},"replicas":1},{"generictemplate":{"apiVersion":"route.openshift.io/v1","kind":"Route","metadata":{"labels":{"odh-ray-cluster-service":"quicktest-head-svc"},"name":"ray-dashboard-quicktest","namespace":"default"},"spec":{"port":{"targetPort":"dashboard"},"to":{"kind":"Service","name":"quicktest-head-svc"}}},"replica":1}],"Items":[]}}}\n' - }, - "creationTimestamp": "2023-02-22T16:26:07Z", - "generation": 4, - "managedFields": [ - { - "apiVersion": "workload.codeflare.dev/v1beta1", - "fieldsType": "FieldsV1", - "fieldsV1": { - "f:spec": { - "f:resources": { - "f:GenericItems": {}, - "f:metadata": {}, - }, - "f:schedulingSpec": {}, - "f:service": {".": {}, "f:spec": {}}, - }, - "f:status": { - ".": {}, - "f:canrun": {}, - "f:conditions": {}, - "f:controllerfirsttimestamp": {}, - "f:filterignore": {}, - "f:queuejobstate": {}, - "f:sender": {}, - "f:state": {}, - "f:systempriority": {}, - }, - }, - "manager": "Go-http-client", - "operation": "Update", - "time": "2023-02-22T16:26:07Z", - }, - { - "apiVersion": "workload.codeflare.dev/v1beta1", - "fieldsType": "FieldsV1", - "fieldsV1": { - "f:metadata": { - "f:annotations": { - ".": {}, - "f:kubectl.kubernetes.io/last-applied-configuration": {}, - } - }, - "f:spec": { - ".": {}, - "f:priority": {}, - "f:resources": {".": {}, "f:Items": {}}, - }, - }, - "manager": "kubectl-client-side-apply", - "operation": "Update", - "time": "2023-02-22T16:26:07Z", - }, - ], - "name": "quicktest2", - "namespace": "ns", - "resourceVersion": "9482384", - "uid": "6334fc1b-471e-4876-8e7b-0b2277679235", - }, - "spec": { - "priority": 9, - "resources": { - "GenericItems": [ - { - "allocated": 0, - "custompodresources": [ - { - "limits": { - "cpu": "2", - "memory": "8G", - "nvidia.com/gpu": "0", - }, - "replicas": 1, - "requests": { - "cpu": "2", - "memory": "8G", - "nvidia.com/gpu": "0", - }, - }, - { - "limits": { - "cpu": "1", - "memory": "2G", - "nvidia.com/gpu": "0", - }, - "replicas": 1, - "requests": { - "cpu": "1", - "memory": "2G", - "nvidia.com/gpu": "0", - }, - }, - ], - "generictemplate": { - "apiVersion": "ray.io/v1alpha1", - "kind": "RayCluster", - "metadata": { - "labels": { - "appwrapper.mcad.ibm.com": "quicktest2", - "controller-tools.k8s.io": "1.0", - }, - "name": "quicktest2", - "namespace": "ns", - }, - "spec": { - "autoscalerOptions": { - "idleTimeoutSeconds": 60, - "imagePullPolicy": "Always", - "resources": { - "limits": { - "cpu": "500m", - "memory": "512Mi", - }, - "requests": { - "cpu": "500m", - "memory": "512Mi", - }, - }, - "upscalingMode": "Default", - }, - "enableInTreeAutoscaling": False, - "headGroupSpec": { - "rayStartParams": { - "block": "true", - "dashboard-host": "0.0.0.0", - "num-gpus": "0", - }, - "serviceType": "ClusterIP", - "template": { - "spec": { - "containers": [ - { - "image": "ghcr.io/foundation-model-stack/base:ray2.1.0-py38-gpu-pytorch1.12.0cu116-20221213-193103", - "imagePullPolicy": "Always", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "/bin/sh", - "-c", - "ray stop", - ] - } - } - }, - "name": "ray-head", - "ports": [ - { - "containerPort": 6379, - "name": "gcs", - }, - { - "containerPort": 8265, - "name": "dashboard", - }, - { - "containerPort": 10001, - "name": "client", - }, - ], - "resources": { - "limits": { - "cpu": 2, - "memory": "8G", - "nvidia.com/gpu": 0, - }, - "requests": { - "cpu": 2, - "memory": "8G", - "nvidia.com/gpu": 0, - }, - }, - } - ] - } - }, - }, - "rayVersion": "1.12.0", - "workerGroupSpecs": [ - { - "groupName": "small-group-quicktest", - "maxReplicas": 1, - "minReplicas": 1, - "rayStartParams": { - "block": "true", - "num-gpus": "0", - }, - "replicas": 1, - "template": { - "metadata": { - "annotations": {"key": "value"}, - "labels": {"key": "value"}, - }, - "spec": { - "containers": [ - { - "env": [ - { - "name": "MY_POD_IP", - "valueFrom": { - "fieldRef": { - "fieldPath": "status.podIP" - } - }, - } - ], - "image": "ghcr.io/foundation-model-stack/base:ray2.1.0-py38-gpu-pytorch1.12.0cu116-20221213-193103", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "/bin/sh", - "-c", - "ray stop", - ] - } - } - }, - "name": "machine-learning", - "resources": { - "limits": { - "cpu": 1, - "memory": "2G", - "nvidia.com/gpu": 0, - }, - "requests": { - "cpu": 1, - "memory": "2G", - "nvidia.com/gpu": 0, - }, - }, - } - ], - "initContainers": [ - { - "command": [ - "sh", - "-c", - "until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done", - ], - "image": "busybox:1.28", - "name": "init-myservice", - } - ], - }, - }, - } - ], - }, - }, - "metadata": {}, - "priority": 0, - "priorityslope": 0, - "replicas": 1, - }, - { - "allocated": 0, - "generictemplate": { - "apiVersion": "route.openshift.io/v1", - "kind": "Route", - "metadata": { - "labels": { - "odh-ray-cluster-service": "quicktest-head-svc" - }, - "name": "ray-dashboard-quicktest", - "namespace": "default", - }, - "spec": { - "port": {"targetPort": "dashboard"}, - "to": { - "kind": "Service", - "name": "quicktest-head-svc", - }, - }, - }, - "metadata": {}, - "priority": 0, - "priorityslope": 0, - }, - ], - "Items": [], - "metadata": {}, - }, - "schedulingSpec": {}, - "service": {"spec": {}}, - }, - "status": { - "canrun": True, - "conditions": [ - { - "lastTransitionMicroTime": "2023-02-22T16:26:07.559447Z", - "lastUpdateMicroTime": "2023-02-22T16:26:07.559447Z", - "status": "True", - "type": "Init", - }, - { - "lastTransitionMicroTime": "2023-02-22T16:26:07.559551Z", - "lastUpdateMicroTime": "2023-02-22T16:26:07.559551Z", - "reason": "AwaitingHeadOfLine", - "status": "True", - "type": "Queueing", - }, - { - "lastTransitionMicroTime": "2023-02-22T16:26:13.220564Z", - "lastUpdateMicroTime": "2023-02-22T16:26:13.220564Z", - "reason": "AppWrapperRunnable", - "status": "True", - "type": "Dispatched", - }, - ], - "controllerfirsttimestamp": "2023-02-22T16:26:07.559447Z", - "filterignore": True, - "queuejobstate": "Dispatched", - "sender": "before manageQueueJob - afterEtcdDispatching", - "state": "Pending", - "systempriority": 9, - }, - }, - ] - } - return api_obj1 - - -def test_get_cluster(mocker): - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch( - "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", - side_effect=get_ray_obj, - ) - cluster = get_cluster("quicktest") - cluster_config = cluster.config - assert cluster_config.name == "quicktest" and cluster_config.namespace == "ns" - assert ( - "m4.xlarge" in cluster_config.machine_types - and "g4dn.xlarge" in cluster_config.machine_types - ) - assert cluster_config.min_cpus == 1 and cluster_config.max_cpus == 1 - assert cluster_config.min_memory == 2 and cluster_config.max_memory == 2 - assert cluster_config.num_gpus == 0 - assert cluster_config.instascale - assert ( - cluster_config.image - == "ghcr.io/foundation-model-stack/base:ray2.1.0-py38-gpu-pytorch1.12.0cu116-20221213-193103" - ) - assert cluster_config.num_workers == 1 - - -def test_list_clusters(mocker, capsys): - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch( - "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", - side_effect=get_obj_none, - ) - list_all_clusters("ns") - captured = capsys.readouterr() - assert captured.out == ( - "╭──────────────────────────────────────────────────────────────────────────────╮\n" - "│ No resources found, have you run cluster.up() yet? │\n" - "╰──────────────────────────────────────────────────────────────────────────────╯\n" - ) - mocker.patch( - "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", - side_effect=get_ray_obj, - ) - list_all_clusters("ns") - captured = capsys.readouterr() - assert captured.out == ( - " 🚀 CodeFlare Cluster Details 🚀 \n" - " \n" - " ╭───────────────────────────────────────────────────────────────╮ \n" - " │ Name │ \n" - " │ quicktest Active ✅ │ \n" - " │ │ \n" - " │ URI: ray://quicktest-head-svc.ns.svc:10001 │ \n" - " │ │ \n" - " │ Dashboard🔗 │ \n" - " │ │ \n" - " │ Cluster Resources │ \n" - " │ ╭── Workers ──╮ ╭───────── Worker specs(each) ─────────╮ │ \n" - " │ │ # Workers │ │ Memory CPU GPU │ │ \n" - " │ │ │ │ │ │ \n" - " │ │ 1 │ │ 2G~2G 1 0 │ │ \n" - " │ │ │ │ │ │ \n" - " │ ╰─────────────╯ ╰──────────────────────────────────────╯ │ \n" - " ╰───────────────────────────────────────────────────────────────╯ \n" - ) - - -def test_list_queue(mocker, capsys): - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch( - "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", - side_effect=get_obj_none, - ) - list_all_queued("ns") - captured = capsys.readouterr() - assert captured.out == ( - "╭──────────────────────────────────────────────────────────────────────────────╮\n" - "│ No resources found, have you run cluster.up() yet? │\n" - "╰──────────────────────────────────────────────────────────────────────────────╯\n" - ) - mocker.patch( - "kubernetes.client.CustomObjectsApi.list_namespaced_custom_object", - side_effect=get_aw_obj, - ) - list_all_queued("ns") - captured = capsys.readouterr() - assert captured.out == ( - "╭──────────────────────────╮\n" - "│ 🚀 Cluster Queue Status │\n" - "│ 🚀 │\n" - "│ +------------+---------+ │\n" - "│ | Name | Status | │\n" - "│ +============+=========+ │\n" - "│ | quicktest1 | running | │\n" - "│ | | | │\n" - "│ | quicktest2 | pending | │\n" - "│ | | | │\n" - "│ +------------+---------+ │\n" - "╰──────────────────────────╯\n" - ) - - -def test_cluster_status(mocker): - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - fake_aw = AppWrapper( - "test", AppWrapperStatus.FAILED, can_run=True, job_state="unused" - ) - fake_ray = RayCluster( - name="test", - status=RayClusterStatus.UNKNOWN, - workers=1, - worker_mem_min=2, - worker_mem_max=2, - worker_cpu=1, - worker_gpu=0, - namespace="ns", - dashboard="fake-uri", - ) - cf = Cluster(ClusterConfiguration(name="test", namespace="ns")) - mocker.patch("codeflare_sdk.cluster.cluster._app_wrapper_status", return_value=None) - mocker.patch("codeflare_sdk.cluster.cluster._ray_cluster_status", return_value=None) - status, ready = cf.status() - assert status == CodeFlareClusterStatus.UNKNOWN - assert ready == False - - mocker.patch( - "codeflare_sdk.cluster.cluster._app_wrapper_status", return_value=fake_aw - ) - status, ready = cf.status() - assert status == CodeFlareClusterStatus.FAILED - assert ready == False - - fake_aw.status = AppWrapperStatus.DELETED - status, ready = cf.status() - assert status == CodeFlareClusterStatus.FAILED - assert ready == False - - fake_aw.status = AppWrapperStatus.PENDING - status, ready = cf.status() - assert status == CodeFlareClusterStatus.QUEUED - assert ready == False - - fake_aw.status = AppWrapperStatus.COMPLETED - status, ready = cf.status() - assert status == CodeFlareClusterStatus.STARTING - assert ready == False - - fake_aw.status = AppWrapperStatus.RUNNING_HOLD_COMPLETION - status, ready = cf.status() - assert status == CodeFlareClusterStatus.STARTING - assert ready == False - - fake_aw.status = AppWrapperStatus.RUNNING - status, ready = cf.status() - assert status == CodeFlareClusterStatus.STARTING - assert ready == False - - mocker.patch( - "codeflare_sdk.cluster.cluster._ray_cluster_status", return_value=fake_ray - ) - - status, ready = cf.status() - assert status == CodeFlareClusterStatus.STARTING - assert ready == False - - fake_ray.status = RayClusterStatus.FAILED - status, ready = cf.status() - assert status == CodeFlareClusterStatus.FAILED - assert ready == False - - fake_ray.status = RayClusterStatus.UNHEALTHY - status, ready = cf.status() - assert status == CodeFlareClusterStatus.FAILED - assert ready == False - - fake_ray.status = RayClusterStatus.READY - status, ready = cf.status() - assert status == CodeFlareClusterStatus.READY - assert ready == True - - -def test_wait_ready(mocker, capsys): - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch("codeflare_sdk.cluster.cluster._app_wrapper_status", return_value=None) - mocker.patch("codeflare_sdk.cluster.cluster._ray_cluster_status", return_value=None) - mocker.patch.object( - client.CustomObjectsApi, - "list_namespaced_custom_object", - return_value={ - "items": [ - { - "metadata": {"name": "ray-dashboard-test"}, - "spec": {"host": "mocked-host"}, - } - ] - }, - ) - mock_response = mocker.Mock() - mock_response.status_code = 200 - mocker.patch("requests.get", return_value=mock_response) - cf = Cluster(ClusterConfiguration(name="test", namespace="ns")) - try: - cf.wait_ready(timeout=5) - assert 1 == 0 - except Exception as e: - assert type(e) == TimeoutError - - captured = capsys.readouterr() - assert ( - "WARNING: Current cluster status is unknown, have you run cluster.up yet?" - in captured.out - ) - mocker.patch( - "codeflare_sdk.cluster.cluster.Cluster.status", - return_value=(True, CodeFlareClusterStatus.READY), - ) - cf.wait_ready() - captured = capsys.readouterr() - assert ( - captured.out - == "Waiting for requested resources to be set up...\nRequested cluster and dashboard are up and running!\n" - ) - - -def test_jobdefinition_coverage(): - abstract = JobDefinition() - cluster = createClusterWithConfig() - abstract._dry_run(cluster) - abstract.submit(cluster) - - -def test_job_coverage(): - abstract = Job() - abstract.status() - abstract.logs() - - -def test_DDPJobDefinition_creation(): - ddp = createTestDDP() - assert ddp.script == "test.py" - assert ddp.m == None - assert ddp.script_args == ["test"] - assert ddp.name == "test" - assert ddp.cpu == 1 - assert ddp.gpu == 0 - assert ddp.memMB == 1024 - assert ddp.h == None - assert ddp.j == "2x1" - assert ddp.env == {"test": "test"} - assert ddp.max_retries == 0 - assert ddp.mounts == [] - assert ddp.rdzv_port == 29500 - assert ddp.scheduler_args == {"requirements": "test"} - - -def test_DDPJobDefinition_dry_run(mocker): - """ - Test that the dry run method returns the correct type: AppDryRunInfo, - that the attributes of the returned object are of the correct type, - and that the values from cluster and job definition are correctly passed. - """ - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch( - "codeflare_sdk.cluster.cluster.Cluster.cluster_dashboard_uri", - return_value="", - ) - ddp = createTestDDP() - cluster = createClusterWithConfig() - ddp_job = ddp._dry_run(cluster) - assert type(ddp_job) == AppDryRunInfo - assert ddp_job._fmt is not None - assert type(ddp_job.request) == RayJob - assert type(ddp_job._app) == AppDef - assert type(ddp_job._cfg) == type(dict()) - assert type(ddp_job._scheduler) == type(str()) - - assert ddp_job.request.app_id.startswith("test") - assert ddp_job.request.cluster_name == "unit-test-cluster" - assert ddp_job.request.requirements == "test" - - assert ddp_job._app.roles[0].resource.cpu == 1 - assert ddp_job._app.roles[0].resource.gpu == 0 - assert ddp_job._app.roles[0].resource.memMB == 1024 - - assert ddp_job._cfg["cluster_name"] == "unit-test-cluster" - assert ddp_job._cfg["requirements"] == "test" - - assert ddp_job._scheduler == "ray" - - -def test_DDPJobDefinition_dry_run_no_cluster(mocker): - """ - Test that the dry run method returns the correct type: AppDryRunInfo, - that the attributes of the returned object are of the correct type, - and that the values from cluster and job definition are correctly passed. - """ - - mocker.patch( - "codeflare_sdk.job.jobs.get_current_namespace", - return_value="opendatahub", - ) - - ddp = createTestDDP() - ddp.image = "fake-image" - ddp_job = ddp._dry_run_no_cluster() - assert type(ddp_job) == AppDryRunInfo - assert ddp_job._fmt is not None - assert type(ddp_job.request) == KubernetesMCADJob - assert type(ddp_job._app) == AppDef - assert type(ddp_job._cfg) == type(dict()) - assert type(ddp_job._scheduler) == type(str()) - - assert ( - ddp_job.request.resource["spec"]["resources"]["GenericItems"][0][ - "generictemplate" - ] - .spec.containers[0] - .image - == "fake-image" - ) - - assert ddp_job._app.roles[0].resource.cpu == 1 - assert ddp_job._app.roles[0].resource.gpu == 0 - assert ddp_job._app.roles[0].resource.memMB == 1024 - - assert ddp_job._cfg["requirements"] == "test" - - assert ddp_job._scheduler == "kubernetes_mcad" - - -def test_DDPJobDefinition_dry_run_no_resource_args(mocker): - """ - Test that the dry run correctly gets resources from the cluster object - when the job definition does not specify resources. - """ - mocker.patch( - "codeflare_sdk.cluster.cluster.Cluster.cluster_dashboard_uri", - return_value="", - ) - cluster = createClusterWithConfig() - ddp = DDPJobDefinition( - script="test.py", - m=None, - script_args=["test"], - name="test", - h=None, - env={"test": "test"}, - max_retries=0, - mounts=[], - rdzv_port=29500, - scheduler_args={"requirements": "test"}, - ) - ddp_job = ddp._dry_run(cluster) - - assert ddp_job._app.roles[0].resource.cpu == cluster.config.max_cpus - assert ddp_job._app.roles[0].resource.gpu == cluster.config.num_gpus - assert ddp_job._app.roles[0].resource.memMB == cluster.config.max_memory * 1024 - assert ( - parse_j(ddp_job._app.roles[0].args[1]) - == f"{cluster.config.num_workers}x{cluster.config.num_gpus}" - ) - - -def test_DDPJobDefinition_dry_run_no_cluster_no_resource_args(mocker): - """ - Test that the dry run method returns the correct type: AppDryRunInfo, - that the attributes of the returned object are of the correct type, - and that the values from cluster and job definition are correctly passed. - """ - - mocker.patch( - "codeflare_sdk.job.jobs.get_current_namespace", - return_value="opendatahub", - ) - - ddp = createTestDDP() - try: - ddp._dry_run_no_cluster() - assert 0 == 1 - except ValueError as e: - assert str(e) == "Job definition missing arg: image" - ddp.image = "fake-image" - ddp.name = None - try: - ddp._dry_run_no_cluster() - assert 0 == 1 - except ValueError as e: - assert str(e) == "Job definition missing arg: name" - ddp.name = "fake" - ddp.cpu = None - try: - ddp._dry_run_no_cluster() - assert 0 == 1 - except ValueError as e: - assert str(e) == "Job definition missing arg: cpu (# cpus per worker)" - ddp.cpu = 1 - ddp.gpu = None - try: - ddp._dry_run_no_cluster() - assert 0 == 1 - except ValueError as e: - assert str(e) == "Job definition missing arg: gpu (# gpus per worker)" - ddp.gpu = 1 - ddp.memMB = None - try: - ddp._dry_run_no_cluster() - assert 0 == 1 - except ValueError as e: - assert str(e) == "Job definition missing arg: memMB (memory in MB)" - ddp.memMB = 1 - ddp.j = None - try: - ddp._dry_run_no_cluster() - assert 0 == 1 - except ValueError as e: - assert str(e) == "Job definition missing arg: j (`workers`x`procs`)" - - -def test_DDPJobDefinition_submit(mocker): - """ - Tests that the submit method returns the correct type: DDPJob - And that the attributes of the returned object are of the correct type - """ - mocker.patch( - "codeflare_sdk.cluster.cluster.Cluster.cluster_dashboard_uri", - return_value="fake-dashboard-uri", - ) - ddp_def = createTestDDP() - cluster = createClusterWithConfig() - mocker.patch( - "codeflare_sdk.job.jobs.get_current_namespace", - side_effect="opendatahub", - ) - mocker.patch( - "codeflare_sdk.job.jobs.torchx_runner.schedule", - return_value="fake-dashboard-url", - ) # a fake app_handle - ddp_job = ddp_def.submit(cluster) - assert type(ddp_job) == DDPJob - assert type(ddp_job.job_definition) == DDPJobDefinition - assert type(ddp_job.cluster) == Cluster - assert type(ddp_job._app_handle) == str - assert ddp_job._app_handle == "fake-dashboard-url" - - ddp_def.image = "fake-image" - ddp_job = ddp_def.submit() - assert type(ddp_job) == DDPJob - assert type(ddp_job.job_definition) == DDPJobDefinition - assert ddp_job.cluster == None - assert type(ddp_job._app_handle) == str - assert ddp_job._app_handle == "fake-dashboard-url" - - -def test_DDPJob_creation(mocker): - mocker.patch( - "codeflare_sdk.cluster.cluster.Cluster.cluster_dashboard_uri", - return_value="fake-dashboard-uri", - ) - ddp_def = createTestDDP() - cluster = createClusterWithConfig() - mocker.patch( - "codeflare_sdk.job.jobs.torchx_runner.schedule", - return_value="fake-dashboard-url", - ) # a fake app_handle - ddp_job = createDDPJob_with_cluster(ddp_def, cluster) - assert type(ddp_job) == DDPJob - assert type(ddp_job.job_definition) == DDPJobDefinition - assert type(ddp_job.cluster) == Cluster - assert type(ddp_job._app_handle) == str - assert ddp_job._app_handle == "fake-dashboard-url" - _, args, kwargs = torchx_runner.schedule.mock_calls[0] - assert type(args[0]) == AppDryRunInfo - job_info = args[0] - assert type(job_info.request) == RayJob - assert type(job_info._app) == AppDef - assert type(job_info._cfg) == type(dict()) - assert type(job_info._scheduler) == type(str()) - - -def test_DDPJob_creation_no_cluster(mocker): - ddp_def = createTestDDP() - ddp_def.image = "fake-image" - mocker.patch( - "codeflare_sdk.job.jobs.get_current_namespace", - side_effect="opendatahub", - ) - mocker.patch( - "codeflare_sdk.job.jobs.torchx_runner.schedule", - return_value="fake-app-handle", - ) # a fake app_handle - ddp_job = createDDPJob_no_cluster(ddp_def, None) - assert type(ddp_job) == DDPJob - assert type(ddp_job.job_definition) == DDPJobDefinition - assert ddp_job.cluster == None - assert type(ddp_job._app_handle) == str - assert ddp_job._app_handle == "fake-app-handle" - _, args, kwargs = torchx_runner.schedule.mock_calls[0] - assert type(args[0]) == AppDryRunInfo - job_info = args[0] - assert type(job_info.request) == KubernetesMCADJob - assert type(job_info._app) == AppDef - assert type(job_info._cfg) == type(dict()) - assert type(job_info._scheduler) == type(str()) - - -def test_DDPJob_status(mocker): - # Setup the neccesary mock patches - test_DDPJob_creation(mocker) - ddp_def = createTestDDP() - cluster = createClusterWithConfig() - ddp_job = createDDPJob_with_cluster(ddp_def, cluster) - mocker.patch( - "codeflare_sdk.job.jobs.torchx_runner.status", return_value="fake-status" - ) - assert ddp_job.status() == "fake-status" - _, args, kwargs = torchx_runner.status.mock_calls[0] - assert args[0] == "fake-dashboard-url" - - -def test_DDPJob_logs(mocker): - # Setup the neccesary mock patches - test_DDPJob_creation(mocker) - ddp_def = createTestDDP() - cluster = createClusterWithConfig() - ddp_job = createDDPJob_with_cluster(ddp_def, cluster) - mocker.patch( - "codeflare_sdk.job.jobs.torchx_runner.log_lines", return_value="fake-logs" - ) - assert ddp_job.logs() == "fake-logs" - _, args, kwargs = torchx_runner.log_lines.mock_calls[0] - assert args[0] == "fake-dashboard-url" - - -def arg_check_side_effect(*args): - assert args[0] == "fake-app-handle" - - -def test_DDPJob_cancel(mocker): - # Setup the neccesary mock patches - test_DDPJob_creation_no_cluster(mocker) - ddp_def = createTestDDP() - ddp_def.image = "fake-image" - ddp_job = createDDPJob_no_cluster(ddp_def, None) - mocker.patch( - "openshift.get_project_name", - return_value="opendatahub", - ) - mocker.patch( - "codeflare_sdk.job.jobs.torchx_runner.cancel", side_effect=arg_check_side_effect - ) - ddp_job.cancel() - - -def parse_j(cmd): - pattern = r"--nnodes\s+\d+\s+--nproc_per_node\s+\d+" - match = re.search(pattern, cmd) - if match: - substring = match.group(0) - else: - return None - args = substring.split() - worker = args[1] - gpu = args[3] - return f"{worker}x{gpu}" - - -def test_AWManager_creation(): - testaw = AWManager("test.yaml") - assert testaw.name == "test" - assert testaw.namespace == "ns" - assert testaw.submitted == False - try: - testaw = AWManager("fake") - except Exception as e: - assert type(e) == FileNotFoundError - assert str(e) == "[Errno 2] No such file or directory: 'fake'" - try: - testaw = AWManager("tests/test-case-bad.yaml") - except Exception as e: - assert type(e) == ValueError - assert ( - str(e) - == "tests/test-case-bad.yaml is not a correctly formatted AppWrapper yaml" - ) - - -def arg_check_aw_apply_effect(group, version, namespace, plural, body, *args): - assert group == "workload.codeflare.dev" - assert version == "v1beta1" - assert namespace == "ns" - assert plural == "appwrappers" - with open("test.yaml") as f: - aw = yaml.load(f, Loader=yaml.FullLoader) - assert body == aw - assert args == tuple() - - -def arg_check_aw_del_effect(group, version, namespace, plural, name, *args): - assert group == "workload.codeflare.dev" - assert version == "v1beta1" - assert namespace == "ns" - assert plural == "appwrappers" - assert name == "test" - assert args == tuple() - - -def test_AWManager_submit_remove(mocker, capsys): - testaw = AWManager("test.yaml") - testaw.remove() - captured = capsys.readouterr() - assert ( - captured.out - == "AppWrapper not submitted by this manager yet, nothing to remove\n" - ) - assert testaw.submitted == False - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch( - "kubernetes.client.CustomObjectsApi.create_namespaced_custom_object", - side_effect=arg_check_aw_apply_effect, - ) - mocker.patch( - "kubernetes.client.CustomObjectsApi.delete_namespaced_custom_object", - side_effect=arg_check_aw_del_effect, - ) - testaw.submit() - assert testaw.submitted == True - testaw.remove() - assert testaw.submitted == False - - -from cryptography.x509 import load_pem_x509_certificate -import base64 -from cryptography.hazmat.primitives.serialization import ( - load_pem_private_key, - Encoding, - PublicFormat, -) - - -def test_generate_ca_cert(): - """ - test the function codeflare_sdk.utils.generate_ca_cert generates the correct outputs - """ - key, certificate = generate_ca_cert() - cert = load_pem_x509_certificate(base64.b64decode(certificate)) - private_pub_key_bytes = ( - load_pem_private_key(base64.b64decode(key), password=None) - .public_key() - .public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo) - ) - cert_pub_key_bytes = cert.public_key().public_bytes( - Encoding.PEM, PublicFormat.SubjectPublicKeyInfo - ) - assert type(key) == str - assert type(certificate) == str - # Veirfy ca.cert is self signed - assert cert.verify_directly_issued_by(cert) == None - # Verify cert has the public key bytes from the private key - assert cert_pub_key_bytes == private_pub_key_bytes - - -def secret_ca_retreival(secret_name, namespace): - ca_private_key_bytes, ca_cert = generate_ca_cert() - data = {"ca.crt": ca_cert, "ca.key": ca_private_key_bytes} - assert secret_name == "ca-secret-cluster" - assert namespace == "namespace" - return client.models.V1Secret(data=data) - - -def test_generate_tls_cert(mocker): - """ - test the function codeflare_sdk.utils.generate_ca_cert generates the correct outputs - """ - mocker.patch("kubernetes.config.load_kube_config", return_value="ignore") - mocker.patch( - "kubernetes.client.CoreV1Api.read_namespaced_secret", - side_effect=secret_ca_retreival, - ) - - generate_tls_cert("cluster", "namespace") - assert os.path.exists("tls-cluster-namespace") - assert os.path.exists(os.path.join("tls-cluster-namespace", "ca.crt")) - assert os.path.exists(os.path.join("tls-cluster-namespace", "tls.crt")) - assert os.path.exists(os.path.join("tls-cluster-namespace", "tls.key")) - - # verify the that the signed tls.crt is issued by the ca_cert (root cert) - with open(os.path.join("tls-cluster-namespace", "tls.crt"), "r") as f: - tls_cert = load_pem_x509_certificate(f.read().encode("utf-8")) - with open(os.path.join("tls-cluster-namespace", "ca.crt"), "r") as f: - root_cert = load_pem_x509_certificate(f.read().encode("utf-8")) - assert tls_cert.verify_directly_issued_by(root_cert) == None - - -def test_export_env(): - """ - test the function codeflare_sdk.utils.export_ev generates the correct outputs - """ - tls_dir = "cluster" - ns = "namespace" - export_env(tls_dir, ns) - assert os.environ["RAY_USE_TLS"] == "1" - assert os.environ["RAY_TLS_SERVER_CERT"] == os.path.join( - os.getcwd(), f"tls-{tls_dir}-{ns}", "tls.crt" - ) - assert os.environ["RAY_TLS_SERVER_KEY"] == os.path.join( - os.getcwd(), f"tls-{tls_dir}-{ns}", "tls.key" - ) - assert os.environ["RAY_TLS_CA_CERT"] == os.path.join( - os.getcwd(), f"tls-{tls_dir}-{ns}", "ca.crt" - ) - - -# Make sure to always keep this function last -def test_cleanup(): - os.remove("unit-test-cluster.yaml") - os.remove("prio-test-cluster.yaml") - os.remove("unit-test-default-cluster.yaml") - os.remove("test.yaml") - os.remove("raytest2.yaml") - os.remove("quicktest.yaml") - os.remove("tls-cluster-namespace/ca.crt") - os.remove("tls-cluster-namespace/tls.crt") - os.remove("tls-cluster-namespace/tls.key") - os.rmdir("tls-cluster-namespace") diff --git a/tests/unit_test_support.py b/tests/unit_test_support.py deleted file mode 100644 index a4ea056a..00000000 --- a/tests/unit_test_support.py +++ /dev/null @@ -1,59 +0,0 @@ -from codeflare_sdk.job.jobs import ( - DDPJobDefinition, - DDPJob, -) - -from codeflare_sdk.cluster.cluster import ( - Cluster, - ClusterConfiguration, -) - - -def createTestDDP(): - ddp = DDPJobDefinition( - script="test.py", - m=None, - script_args=["test"], - name="test", - cpu=1, - gpu=0, - memMB=1024, - h=None, - j="2x1", - env={"test": "test"}, - max_retries=0, - mounts=[], - rdzv_port=29500, - scheduler_args={"requirements": "test"}, - ) - return ddp - - -def createDDPJob_no_cluster(ddp_def, cluster): - return DDPJob(ddp_def, cluster) - - -def createClusterConfig(): - config = ClusterConfiguration( - name="unit-test-cluster", - namespace="ns", - num_workers=2, - min_cpus=3, - max_cpus=4, - min_memory=5, - max_memory=6, - num_gpus=7, - instascale=True, - machine_types=["cpu.small", "gpu.large"], - image_pull_secrets=["unit-test-pull-secret"], - ) - return config - - -def createClusterWithConfig(): - cluster = Cluster(createClusterConfig()) - return cluster - - -def createDDPJob_with_cluster(ddp_def, cluster=createClusterWithConfig()): - return DDPJob(ddp_def, cluster) diff --git a/src/codeflare_sdk/utils/__init__.py b/tests/upgrade/__init__.py similarity index 100% rename from src/codeflare_sdk/utils/__init__.py rename to tests/upgrade/__init__.py diff --git a/tests/upgrade/raycluster_sdk_upgrade_sleep_test.py b/tests/upgrade/raycluster_sdk_upgrade_sleep_test.py new file mode 100644 index 00000000..793853d0 --- /dev/null +++ b/tests/upgrade/raycluster_sdk_upgrade_sleep_test.py @@ -0,0 +1,177 @@ +import requests +from time import sleep + +from codeflare_sdk import ( + Cluster, + ClusterConfiguration, + TokenAuthentication, + get_cluster, +) +from codeflare_sdk.ray.client import RayJobClient + +from tests.e2e.support import * + + +from codeflare_sdk.common import _kube_api_error_handling + +namespace = "test-ns-rayupgrade-sleep" +# Global variables for kueue resources +cluster_queue = "cluster-queue-mnist" +flavor = "default-flavor-mnist" +local_queue = "local-queue-mnist" + + +# Creates a Ray cluster , submit RayJob mnist script long running +class TestSetupSleepRayJob: + def setup_method(self): + initialize_kubernetes_client(self) + create_namespace_with_name(self, namespace) + try: + create_cluster_queue(self, cluster_queue, flavor) + create_resource_flavor(self, flavor) + create_local_queue(self, cluster_queue, local_queue) + except Exception as e: + delete_namespace(self) + delete_kueue_resources(self) + return _kube_api_error_handling(e) + + def test_mnist_ray_cluster_sdk_auth(self): + self.run_mnist_raycluster_sdk_oauth() + + def run_mnist_raycluster_sdk_oauth(self): + ray_image = get_ray_image() + + auth = TokenAuthentication( + token=run_oc_command(["whoami", "--show-token=true"]), + server=run_oc_command(["whoami", "--show-server=true"]), + skip_tls=True, + ) + auth.login() + + cluster = Cluster( + ClusterConfiguration( + name="mnist", + namespace=self.namespace, + num_workers=1, + head_cpu_requests=1, + head_cpu_limits=1, + head_memory_requests=4, + head_memory_limits=4, + worker_cpu_requests=1, + worker_cpu_limits=1, + worker_memory_requests=4, + worker_memory_limits=4, + image=ray_image, + write_to_file=True, + verify_tls=False, + ) + ) + + try: + cluster.up() + cluster.status() + # wait for raycluster to be Ready + cluster.wait_ready() + cluster.status() + # Check cluster details + cluster.details() + # Assert the cluster status is READY + _, ready = cluster.status() + assert ready + submission_id = self.assert_jobsubmit() + print(f"Job submitted successfully, job submission id: ", submission_id) + + except Exception as e: + print(f"An unexpected error occurred. Error: ", e) + delete_namespace(self) + delete_kueue_resources(self) + assert False, "Cluster is not ready!" + + def assert_jobsubmit(self): + auth_token = run_oc_command(["whoami", "--show-token=true"]) + cluster = get_cluster("mnist", namespace) + cluster.details() + ray_dashboard = cluster.cluster_dashboard_uri() + header = {"Authorization": f"Bearer {auth_token}"} + client = RayJobClient(address=ray_dashboard, headers=header, verify=False) + + # Submit the job + submission_id = client.submit_job( + entrypoint="python mnist_sleep.py", + runtime_env={ + "working_dir": "./tests/e2e/", + "pip": {"packages": ["torchvision==0.12.0"], "pip_check": False}, + "env_vars": get_setup_env_variables(), + }, + ) + print(f"Submitted job with ID: {submission_id}") + done = False + time = 0 + timeout = 180 + while not done: + status = client.get_job_status(submission_id) + if status.is_terminal(): + break + if status == "RUNNING": + print(f"Job is Running: '{status}'") + assert True + break + if not done: + print(status) + if timeout and time >= timeout: + raise TimeoutError(f"job has timed out after waiting {timeout}s") + sleep(5) + time += 5 + + logs = client.get_job_logs(submission_id) + print(logs) + return submission_id + + +class TestVerifySleepRayJobRunning: + def setup_method(self): + initialize_kubernetes_client(self) + auth = TokenAuthentication( + token=run_oc_command(["whoami", "--show-token=true"]), + server=run_oc_command(["whoami", "--show-server=true"]), + skip_tls=True, + ) + auth.login() + self.namespace = namespace + self.cluster = get_cluster("mnist", self.namespace) + self.cluster_queue = cluster_queue + self.resource_flavor = flavor + if not self.cluster: + raise RuntimeError("TestSetupSleepRayJob needs to be run before this test") + + def teardown_method(self): + delete_namespace(self) + delete_kueue_resources(self) + + def test_mnist_job_running(self): + client = self.get_ray_job_client(self.cluster) + self.assertJobExists(client, 1) + self.assertJobRunning(client) + self.cluster.down() + + def get_ray_job_client(self, cluster): + auth_token = run_oc_command(["whoami", "--show-token=true"]) + ray_dashboard = cluster.cluster_dashboard_uri() + header = {"Authorization": f"Bearer {auth_token}"} + return RayJobClient(address=ray_dashboard, headers=header, verify=False) + + # Assertions + def assertJobExists(self, client, expectedJobsSize): + job_list = client.list_jobs() + assert len(job_list) == expectedJobsSize + + def assertJobRunning(self, client): + job_list = client.list_jobs() + submission_id = job_list[0].submission_id + status = client.get_job_status(submission_id) + if status == "RUNNING": + print(f"Job is Running: '{status}'") + assert True + else: + print(f"Job is not in Running state: '{status}'") + assert False diff --git a/tests/upgrade/raycluster_sdk_upgrade_test.py b/tests/upgrade/raycluster_sdk_upgrade_test.py new file mode 100644 index 00000000..7a6d583e --- /dev/null +++ b/tests/upgrade/raycluster_sdk_upgrade_test.py @@ -0,0 +1,157 @@ +import requests +from time import sleep + +from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication +from codeflare_sdk.ray.client import RayJobClient + +from tests.e2e.support import * +from codeflare_sdk.ray.cluster.cluster import get_cluster + +from codeflare_sdk.common import _kube_api_error_handling + +namespace = "test-ns-rayupgrade" +# Global variables for kueue resources +cluster_queue = "cluster-queue-mnist" +flavor = "default-flavor-mnist" +local_queue = "local-queue-mnist" + + +# Creates a Ray cluster +class TestMNISTRayClusterUp: + def setup_method(self): + initialize_kubernetes_client(self) + create_namespace_with_name(self, namespace) + try: + create_cluster_queue(self, cluster_queue, flavor) + create_resource_flavor(self, flavor) + create_local_queue(self, cluster_queue, local_queue) + except Exception as e: + delete_namespace(self) + delete_kueue_resources(self) + return _kube_api_error_handling(e) + + def test_mnist_ray_cluster_sdk_auth(self): + self.run_mnist_raycluster_sdk_oauth() + + def run_mnist_raycluster_sdk_oauth(self): + ray_image = get_ray_image() + + auth = TokenAuthentication( + token=run_oc_command(["whoami", "--show-token=true"]), + server=run_oc_command(["whoami", "--show-server=true"]), + skip_tls=True, + ) + auth.login() + + cluster = Cluster( + ClusterConfiguration( + name="mnist", + namespace=self.namespace, + num_workers=1, + head_cpu_requests=1, + head_cpu_limits=1, + head_memory_requests=6, + head_memory_limits=8, + worker_cpu_requests=1, + worker_cpu_limits=1, + worker_memory_requests=6, + worker_memory_limits=8, + image=ray_image, + write_to_file=True, + verify_tls=False, + ) + ) + + try: + cluster.up() + cluster.status() + # wait for raycluster to be Ready + cluster.wait_ready() + cluster.status() + # Check cluster details + cluster.details() + # Assert the cluster status is READY + _, ready = cluster.status() + assert ready + + except Exception as e: + print(f"An unexpected error occurred. Error: ", e) + delete_namespace(self) + assert False, "Cluster is not ready!" + + +class TestMnistJobSubmit: + def setup_method(self): + initialize_kubernetes_client(self) + auth = TokenAuthentication( + token=run_oc_command(["whoami", "--show-token=true"]), + server=run_oc_command(["whoami", "--show-server=true"]), + skip_tls=True, + ) + auth.login() + self.namespace = namespace + self.cluster = get_cluster("mnist", self.namespace) + if not self.cluster: + raise RuntimeError("TestRayClusterUp needs to be run before this test") + + def test_mnist_job_submission(self): + self.assert_jobsubmit_withoutLogin(self.cluster) + self.assert_jobsubmit_withlogin(self.cluster) + + # Assertions + def assert_jobsubmit_withoutLogin(self, cluster): + dashboard_url = cluster.cluster_dashboard_uri() + try: + RayJobClient(address=dashboard_url, verify=False) + assert False + except Exception as e: + if e.response.status_code == 403: + assert True + else: + print(f"An unexpected error occurred. Error: {e}") + assert False + + def assert_jobsubmit_withlogin(self, cluster): + auth_token = run_oc_command(["whoami", "--show-token=true"]) + ray_dashboard = cluster.cluster_dashboard_uri() + header = {"Authorization": f"Bearer {auth_token}"} + client = RayJobClient(address=ray_dashboard, headers=header, verify=False) + + # Submit the job + submission_id = client.submit_job( + entrypoint="python mnist.py", + runtime_env={ + "working_dir": "./tests/e2e/", + "pip": "./tests/e2e/mnist_pip_requirements.txt", + "env_vars": get_setup_env_variables(), + }, + ) + print(f"Submitted job with ID: {submission_id}") + done = False + time = 0 + timeout = 900 + while not done: + status = client.get_job_status(submission_id) + if status.is_terminal(): + break + if not done: + print(status) + if timeout and time >= timeout: + raise TimeoutError(f"job has timed out after waiting {timeout}s") + sleep(5) + time += 5 + + logs = client.get_job_logs(submission_id) + print(logs) + + self.assert_job_completion(status) + + client.delete_job(submission_id) + + def assert_job_completion(self, status): + if status == "SUCCEEDED": + print(f"Job has completed: '{status}'") + assert True + else: + print(f"Job has completed: '{status}'") + assert False diff --git a/ui-tests/.yarnrc b/ui-tests/.yarnrc new file mode 100644 index 00000000..0f81e58d --- /dev/null +++ b/ui-tests/.yarnrc @@ -0,0 +1,4 @@ +disable-self-update-check true +ignore-optional true +network-timeout "300000" +registry "https://registry.npmjs.org/" diff --git a/ui-tests/jupyter_server_config.py b/ui-tests/jupyter_server_config.py new file mode 100644 index 00000000..e7983fe7 --- /dev/null +++ b/ui-tests/jupyter_server_config.py @@ -0,0 +1,6 @@ +from jupyterlab.galata import configure_jupyter_server + +configure_jupyter_server(c) + +# Uncomment to set server log level to debug level +# c.ServerApp.log_level = "DEBUG" diff --git a/ui-tests/package.json b/ui-tests/package.json new file mode 100644 index 00000000..41c47066 --- /dev/null +++ b/ui-tests/package.json @@ -0,0 +1,22 @@ +{ + "name": "@jupyter-widgets/ui-tests", + "private": true, + "version": "0.1.0", + "description": "ipywidgets UI Tests", + "scripts": { + "start": "jupyter lab --config ./jupyter_server_config.py", + "start:detached": "jlpm start&", + "test": "npx playwright test", + "test:debug": "PWDEBUG=1 npx playwright test", + "test:report": "http-server ./playwright-report -a localhost -o", + "test:update": "npx playwright test --update-snapshots", + "deduplicate": "jlpm && yarn-deduplicate -s fewer --fail" + }, + "author": "Project Jupyter", + "license": "BSD-3-Clause", + "devDependencies": { + "@jupyterlab/galata": "^5.3.0", + "@playwright/test": "^1.49.0", + "yarn-deduplicate": "^6.0.1" + } +} diff --git a/ui-tests/playwright.config.js b/ui-tests/playwright.config.js new file mode 100644 index 00000000..1ba51f15 --- /dev/null +++ b/ui-tests/playwright.config.js @@ -0,0 +1,13 @@ +const baseConfig = require('@jupyterlab/galata/lib/playwright-config'); + +module.exports = { + ...baseConfig, + timeout: 600000, + webServer: { + command: 'yarn start', + url: 'http://localhost:8888/lab', + timeout: 120 * 1000, + reuseExistingServer: !process.env.CI, + }, + retries: 0, +}; diff --git a/ui-tests/tests/widget_notebook_example.test.ts b/ui-tests/tests/widget_notebook_example.test.ts new file mode 100644 index 00000000..7707f70b --- /dev/null +++ b/ui-tests/tests/widget_notebook_example.test.ts @@ -0,0 +1,201 @@ +// Copyright 2024 IBM, Red Hat +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import { test } from "@jupyterlab/galata"; +import { expect } from "@playwright/test"; +import * as path from "path"; + +test.describe("Visual Regression", () => { + test.beforeEach(async ({ page, tmpPath }) => { + await page.contents.uploadDirectory( + path.resolve(__dirname, "../../demo-notebooks/guided-demos"), + tmpPath + ); + await page.filebrowser.openDirectory(tmpPath); + }); + + test("Run notebook, capture cell outputs, and test widgets", async ({ + page, + tmpPath, + }) => { + const notebook = "3_widget_example.ipynb"; + const namespace = 'default'; + await page.notebook.openByPath(`${tmpPath}/${notebook}`); + await page.notebook.activate(notebook); + + // Hide the cell toolbar before capturing the screenshots + await page.addStyleTag({ content: '.jp-cell-toolbar { display: none !important; }' }); + // Hide the file explorer + await page.keyboard.press('Control+Shift+F'); + + const captures: (Buffer | null)[] = []; // Array to store cell screenshots + const cellCount = await page.notebook.getCellCount(); + console.log(`Cell count: ${cellCount}`); + + // Run all cells and capture their screenshots + await page.notebook.runCellByCell({ + onAfterCellRun: async (cellIndex: number) => { + const cell = await page.notebook.getCellOutput(cellIndex); + if (cell && (await cell.isVisible())) { + captures[cellIndex] = await cell.screenshot(); // Save the screenshot by cell index + } + }, + }); + + await page.notebook.save(); + + // Ensure that each cell's screenshot is captured + for (let i = 0; i < cellCount; i++) { + const image = `widgets-cell-${i}.png`; + + if (captures[i]) { + expect.soft(captures[i]).toMatchSnapshot(image); // Compare pre-existing capture + continue; + } + } + + // At this point, all cells have been ran, and their screenshots have been captured. + // We now interact with the widgets in the notebook. + const applyDownWidgetCellIndex = 3; // 4 on OpenShift + + await waitForWidget(page, applyDownWidgetCellIndex, 'input[type="checkbox"]'); + await waitForWidget(page, applyDownWidgetCellIndex, 'button:has-text("Cluster Down")'); + await waitForWidget(page, applyDownWidgetCellIndex, 'button:has-text("Cluster Apply")'); + + await interactWithWidget(page, applyDownWidgetCellIndex, 'input[type="checkbox"]', async (checkbox) => { + await checkbox.click(); + const isChecked = await checkbox.isChecked(); + expect(isChecked).toBe(true); + }); + + await interactWithWidget(page, applyDownWidgetCellIndex, 'button:has-text("Cluster Down")', async (button) => { + await button.click(); + const clusterDownMessage = await page.waitForSelector('text=The requested resource could not be located.', { timeout: 5000 }); + expect(await clusterDownMessage.innerText()).toContain('The requested resource could not be located.'); + }); + + await interactWithWidget(page, applyDownWidgetCellIndex, 'button:has-text("Cluster Apply")', async (button) => { + await button.click(); + + const successMessage = await page.waitForSelector('text=Ray Cluster: \'widgettest\' has successfully been created', { timeout: 10000 }); + expect(successMessage).not.toBeNull(); + + const resourcesMessage = await page.waitForSelector('text=Waiting for requested resources to be set up...'); + expect(resourcesMessage).not.toBeNull(); + + const upAndRunningMessage = await page.waitForSelector('text=Requested cluster is up and running!'); + expect(upAndRunningMessage).not.toBeNull(); + + const dashboardReadyMessage = await page.waitForSelector('text=Dashboard is ready!'); + expect(dashboardReadyMessage).not.toBeNull(); + }); + + await runPreviousCell(page, cellCount, '(, True)'); + + await interactWithWidget(page, applyDownWidgetCellIndex, 'button:has-text("Cluster Down")', async (button) => { + await button.click(); + const clusterDownMessage = await page.waitForSelector('text=Ray Cluster: \'widgettest\' has successfully been deleted', { timeout: 5000 }); + expect(clusterDownMessage).not.toBeNull(); + }); + + await runPreviousCell(page, cellCount, '(, False)'); + + // Replace text in ClusterConfiguration to run a new RayCluster + const cell = page.getByText('widgettest').first(); + await cell.fill('"widgettest-1"'); + await page.notebook.runCell(cellCount - 3, true); // Run ClusterConfiguration cell + + await interactWithWidget(page, applyDownWidgetCellIndex, 'button:has-text("Cluster Apply")', async (button) => { + await button.click(); + const successMessage = await page.waitForSelector('text=Ray Cluster: \'widgettest-1\' has successfully been created', { timeout: 10000 }); + expect(successMessage).not.toBeNull(); + }); + + const viewClustersCellIndex = 4; // 5 on OpenShift + await page.notebook.runCell(cellCount - 2, true); + + // Wait until the RayCluster status in the table updates to "Ready" + await interactWithWidget(page, viewClustersCellIndex, 'button:has-text("Refresh Data")', async (button) => { + let clusterReady = false; + const maxRefreshRetries = 24; // 24 retries * 5 seconds = 120 seconds + let numRefreshRetries = 0; + while (!clusterReady && numRefreshRetries < maxRefreshRetries) { + await button.click(); + try { + await page.waitForSelector('text=Ready ✓', { timeout: 5000 }); + clusterReady = true; + } + catch (e) { + console.log(`Cluster not ready yet. Retrying...`); + numRefreshRetries++; + } + } + expect(clusterReady).toBe(true); + }); + + await interactWithWidget(page, viewClustersCellIndex, 'button:has-text("Open Ray Dashboard")', async (button) => { + await button.click(); + const successMessage = await page.waitForSelector('text=Opening Ray Dashboard for widgettest-1 cluster', { timeout: 5000 }); + expect(successMessage).not.toBeNull(); + }); + + await interactWithWidget(page, viewClustersCellIndex, 'button:has-text("View Jobs")', async (button) => { + await button.click(); + const successMessage = await page.waitForSelector('text=Opening Ray Jobs Dashboard for widgettest-1 cluster', { timeout: 5000 }); + expect(successMessage).not.toBeNull(); + }); + + await interactWithWidget(page, viewClustersCellIndex, 'button:has-text("Delete Cluster")', async (button) => { + await button.click(); + + const noClustersMessage = await page.waitForSelector(`text=No clusters found in the ${namespace} namespace.`, { timeout: 5000 }); + expect(noClustersMessage).not.toBeNull(); + const successMessage = await page.waitForSelector(`text=Cluster widgettest-1 in the ${namespace} namespace was deleted successfully.`, { timeout: 5000 }); + expect(successMessage).not.toBeNull(); + }); + + await runPreviousCell(page, cellCount, '(, False)'); + }); +}); + +async function waitForWidget(page, cellIndex: number, widgetSelector: string, timeout = 5000) { + const widgetCell = await page.notebook.getCellOutput(cellIndex); + + if (widgetCell) { + await widgetCell.waitForSelector(widgetSelector, { timeout }); + } +} + +async function interactWithWidget(page, cellIndex: number, widgetSelector: string, action: (widget) => Promise) { + const widgetCell = await page.notebook.getCellOutput(cellIndex); + + if (widgetCell) { + const widget = await widgetCell.$(widgetSelector); + if (widget) { + await action(widget); + } + } +} + +async function runPreviousCell(page, cellCount, expectedMessage) { + const runSuccess = await page.notebook.runCell(cellCount - 1); expect(runSuccess).toBe(true); + const lastCellOutput = await page.notebook.getCellOutput(cellCount - 1); + const newOutput = await lastCellOutput.evaluate((output) => output.textContent); + + if (expectedMessage) { + expect(newOutput).toContain(expectedMessage); + } + + return lastCellOutput; +} diff --git a/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-0-linux.png b/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-0-linux.png new file mode 100644 index 00000000..2d3aa180 Binary files /dev/null and b/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-0-linux.png differ diff --git a/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-2-linux.png b/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-2-linux.png new file mode 100644 index 00000000..babe9dcb Binary files /dev/null and b/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-2-linux.png differ diff --git a/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-3-linux.png b/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-3-linux.png new file mode 100644 index 00000000..1454d17c Binary files /dev/null and b/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-3-linux.png differ diff --git a/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-4-linux.png b/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-4-linux.png new file mode 100644 index 00000000..bfa203a2 Binary files /dev/null and b/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-4-linux.png differ diff --git a/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-5-linux.png b/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-5-linux.png new file mode 100644 index 00000000..c3529853 Binary files /dev/null and b/ui-tests/tests/widget_notebook_example.test.ts-snapshots/widgets-cell-5-linux.png differ diff --git a/ui-tests/yarn.lock b/ui-tests/yarn.lock new file mode 100644 index 00000000..bf9629eb --- /dev/null +++ b/ui-tests/yarn.lock @@ -0,0 +1,2831 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@codemirror/autocomplete@^6.0.0", "@codemirror/autocomplete@^6.3.2", "@codemirror/autocomplete@^6.7.1": + version "6.18.0" + resolved "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.0.tgz#5f39b05daca04c95e990b70024144df47b2aa635" + integrity sha512-5DbOvBbY4qW5l57cjDsmmpDh3/TeK1vXfTHa+BUMrRzdWdcxKZ4U4V7vQaTtOpApNU4kLS4FQ6cINtLg245LXA== + dependencies: + "@codemirror/language" "^6.0.0" + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.17.0" + "@lezer/common" "^1.0.0" + +"@codemirror/autocomplete@^6.16.0": + version "6.18.2" + resolved "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.2.tgz#bf3f15f1bf0fdfa3b4fac560e419adae1ece8a94" + integrity sha512-wJGylKtMFR/Ds6Gh01+OovXE/pncPiKZNNBKuC39pKnH+XK5d9+WsNqcrdxPjFPFTigRBqse0rfxw9UxrfyhPg== + dependencies: + "@codemirror/language" "^6.0.0" + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.17.0" + "@lezer/common" "^1.0.0" + +"@codemirror/commands@^6.5.0": + version "6.7.1" + resolved "https://registry.npmjs.org/@codemirror/commands/-/commands-6.7.1.tgz#04561e95bc0779eaa49efd63e916c4efb3bbf6d6" + integrity sha512-llTrboQYw5H4THfhN4U3qCnSZ1SOJ60ohhz+SzU0ADGtwlc533DtklQP0vSFaQuCPDn3BPpOd1GbbnUtwNjsrw== + dependencies: + "@codemirror/language" "^6.0.0" + "@codemirror/state" "^6.4.0" + "@codemirror/view" "^6.27.0" + "@lezer/common" "^1.1.0" + +"@codemirror/lang-cpp@^6.0.2": + version "6.0.2" + resolved "https://registry.npmjs.org/@codemirror/lang-cpp/-/lang-cpp-6.0.2.tgz#076c98340c3beabde016d7d83e08eebe17254ef9" + integrity sha512-6oYEYUKHvrnacXxWxYa6t4puTlbN3dgV662BDfSH8+MfjQjVmP697/KYTDOqpxgerkvoNm7q5wlFMBeX8ZMocg== + dependencies: + "@codemirror/language" "^6.0.0" + "@lezer/cpp" "^1.0.0" + +"@codemirror/lang-css@^6.0.0", "@codemirror/lang-css@^6.2.1": + version "6.3.0" + resolved "https://registry.npmjs.org/@codemirror/lang-css/-/lang-css-6.3.0.tgz#607628559f2471b385c6070ec795072a55cffc0b" + integrity sha512-CyR4rUNG9OYcXDZwMPvJdtb6PHbBDKUc/6Na2BIwZ6dKab1JQqKa4di+RNRY9Myn7JB81vayKwJeQ7jEdmNVDA== + dependencies: + "@codemirror/autocomplete" "^6.0.0" + "@codemirror/language" "^6.0.0" + "@codemirror/state" "^6.0.0" + "@lezer/common" "^1.0.2" + "@lezer/css" "^1.1.7" + +"@codemirror/lang-html@^6.0.0", "@codemirror/lang-html@^6.4.9": + version "6.4.9" + resolved "https://registry.npmjs.org/@codemirror/lang-html/-/lang-html-6.4.9.tgz#d586f2cc9c341391ae07d1d7c545990dfa069727" + integrity sha512-aQv37pIMSlueybId/2PVSP6NPnmurFDVmZwzc7jszd2KAF8qd4VBbvNYPXWQq90WIARjsdVkPbw29pszmHws3Q== + dependencies: + "@codemirror/autocomplete" "^6.0.0" + "@codemirror/lang-css" "^6.0.0" + "@codemirror/lang-javascript" "^6.0.0" + "@codemirror/language" "^6.4.0" + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.17.0" + "@lezer/common" "^1.0.0" + "@lezer/css" "^1.1.0" + "@lezer/html" "^1.3.0" + +"@codemirror/lang-java@^6.0.1": + version "6.0.1" + resolved "https://registry.npmjs.org/@codemirror/lang-java/-/lang-java-6.0.1.tgz#03bd06334da7c8feb9dff6db01ac6d85bd2e48bb" + integrity sha512-OOnmhH67h97jHzCuFaIEspbmsT98fNdhVhmA3zCxW0cn7l8rChDhZtwiwJ/JOKXgfm4J+ELxQihxaI7bj7mJRg== + dependencies: + "@codemirror/language" "^6.0.0" + "@lezer/java" "^1.0.0" + +"@codemirror/lang-javascript@^6.0.0", "@codemirror/lang-javascript@^6.2.2": + version "6.2.2" + resolved "https://registry.npmjs.org/@codemirror/lang-javascript/-/lang-javascript-6.2.2.tgz#7141090b22994bef85bcc5608a3bc1257f2db2ad" + integrity sha512-VGQfY+FCc285AhWuwjYxQyUQcYurWlxdKYT4bqwr3Twnd5wP5WSeu52t4tvvuWmljT4EmgEgZCqSieokhtY8hg== + dependencies: + "@codemirror/autocomplete" "^6.0.0" + "@codemirror/language" "^6.6.0" + "@codemirror/lint" "^6.0.0" + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.17.0" + "@lezer/common" "^1.0.0" + "@lezer/javascript" "^1.0.0" + +"@codemirror/lang-json@^6.0.1": + version "6.0.1" + resolved "https://registry.npmjs.org/@codemirror/lang-json/-/lang-json-6.0.1.tgz#0a0be701a5619c4b0f8991f9b5e95fe33f462330" + integrity sha512-+T1flHdgpqDDlJZ2Lkil/rLiRy684WMLc74xUnjJH48GQdfJo/pudlTRreZmKwzP8/tGdKf83wlbAdOCzlJOGQ== + dependencies: + "@codemirror/language" "^6.0.0" + "@lezer/json" "^1.0.0" + +"@codemirror/lang-markdown@^6.2.5": + version "6.3.0" + resolved "https://registry.npmjs.org/@codemirror/lang-markdown/-/lang-markdown-6.3.0.tgz#949f8803332441705ed6def34c565f2166479538" + integrity sha512-lYrI8SdL/vhd0w0aHIEvIRLRecLF7MiiRfzXFZY94dFwHqC9HtgxgagJ8fyYNBldijGatf9wkms60d8SrAj6Nw== + dependencies: + "@codemirror/autocomplete" "^6.7.1" + "@codemirror/lang-html" "^6.0.0" + "@codemirror/language" "^6.3.0" + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.0.0" + "@lezer/common" "^1.2.1" + "@lezer/markdown" "^1.0.0" + +"@codemirror/lang-php@^6.0.1": + version "6.0.1" + resolved "https://registry.npmjs.org/@codemirror/lang-php/-/lang-php-6.0.1.tgz#fa34cc75562178325861a5731f79bd621f57ffaa" + integrity sha512-ublojMdw/PNWa7qdN5TMsjmqkNuTBD3k6ndZ4Z0S25SBAiweFGyY68AS3xNcIOlb6DDFDvKlinLQ40vSLqf8xA== + dependencies: + "@codemirror/lang-html" "^6.0.0" + "@codemirror/language" "^6.0.0" + "@codemirror/state" "^6.0.0" + "@lezer/common" "^1.0.0" + "@lezer/php" "^1.0.0" + +"@codemirror/lang-python@^6.1.6": + version "6.1.6" + resolved "https://registry.npmjs.org/@codemirror/lang-python/-/lang-python-6.1.6.tgz#0c55e7e2dfa85b68be93b9692e5d3f76f284bbb2" + integrity sha512-ai+01WfZhWqM92UqjnvorkxosZ2aq2u28kHvr+N3gu012XqY2CThD67JPMHnGceRfXPDBmn1HnyqowdpF57bNg== + dependencies: + "@codemirror/autocomplete" "^6.3.2" + "@codemirror/language" "^6.8.0" + "@codemirror/state" "^6.0.0" + "@lezer/common" "^1.2.1" + "@lezer/python" "^1.1.4" + +"@codemirror/lang-rust@^6.0.1": + version "6.0.1" + resolved "https://registry.npmjs.org/@codemirror/lang-rust/-/lang-rust-6.0.1.tgz#d6829fc7baa39a15bcd174a41a9e0a1bf7cf6ba8" + integrity sha512-344EMWFBzWArHWdZn/NcgkwMvZIWUR1GEBdwG8FEp++6o6vT6KL9V7vGs2ONsKxxFUPXKI0SPcWhyYyl2zPYxQ== + dependencies: + "@codemirror/language" "^6.0.0" + "@lezer/rust" "^1.0.0" + +"@codemirror/lang-sql@^6.6.4": + version "6.8.0" + resolved "https://registry.npmjs.org/@codemirror/lang-sql/-/lang-sql-6.8.0.tgz#1ae68ad49f378605ff88a4cc428ba667ce056068" + integrity sha512-aGLmY4OwGqN3TdSx3h6QeA1NrvaYtF7kkoWR/+W7/JzB0gQtJ+VJxewlnE3+VImhA4WVlhmkJr109PefOOhjLg== + dependencies: + "@codemirror/autocomplete" "^6.0.0" + "@codemirror/language" "^6.0.0" + "@codemirror/state" "^6.0.0" + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + +"@codemirror/lang-wast@^6.0.2": + version "6.0.2" + resolved "https://registry.npmjs.org/@codemirror/lang-wast/-/lang-wast-6.0.2.tgz#d2b14175e5e80d7878cbbb29e20ec90dc12d3a2b" + integrity sha512-Imi2KTpVGm7TKuUkqyJ5NRmeFWF7aMpNiwHnLQe0x9kmrxElndyH0K6H/gXtWwY6UshMRAhpENsgfpSwsgmC6Q== + dependencies: + "@codemirror/language" "^6.0.0" + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + +"@codemirror/lang-xml@^6.1.0": + version "6.1.0" + resolved "https://registry.npmjs.org/@codemirror/lang-xml/-/lang-xml-6.1.0.tgz#e3e786e1a89fdc9520efe75c1d6d3de1c40eb91c" + integrity sha512-3z0blhicHLfwi2UgkZYRPioSgVTo9PV5GP5ducFH6FaHy0IAJRg+ixj5gTR1gnT/glAIC8xv4w2VL1LoZfs+Jg== + dependencies: + "@codemirror/autocomplete" "^6.0.0" + "@codemirror/language" "^6.4.0" + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.0.0" + "@lezer/common" "^1.0.0" + "@lezer/xml" "^1.0.0" + +"@codemirror/language@^6.0.0", "@codemirror/language@^6.10.1", "@codemirror/language@^6.3.0", "@codemirror/language@^6.4.0", "@codemirror/language@^6.6.0", "@codemirror/language@^6.8.0": + version "6.10.2" + resolved "https://registry.npmjs.org/@codemirror/language/-/language-6.10.2.tgz#4056dc219619627ffe995832eeb09cea6060be61" + integrity sha512-kgbTYTo0Au6dCSc/TFy7fK3fpJmgHDv1sG1KNQKJXVi+xBTEeBPY/M30YXiU6mMXeH+YIDLsbrT4ZwNRdtF+SA== + dependencies: + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.23.0" + "@lezer/common" "^1.1.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + style-mod "^4.0.0" + +"@codemirror/legacy-modes@^6.4.0": + version "6.4.1" + resolved "https://registry.npmjs.org/@codemirror/legacy-modes/-/legacy-modes-6.4.1.tgz#fae7b03cad1beada637fd3c12c568a3a7f63fe89" + integrity sha512-vdg3XY7OAs5uLDx2Iw+cGfnwtd7kM+Et/eMsqAGTfT/JKiVBQZXosTzjEbWAi/FrY6DcQIz8mQjBozFHZEUWQA== + dependencies: + "@codemirror/language" "^6.0.0" + +"@codemirror/lint@^6.0.0": + version "6.8.1" + resolved "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.1.tgz#6427848815baaf68c08e98c7673b804d3d8c0e7f" + integrity sha512-IZ0Y7S4/bpaunwggW2jYqwLuHj0QtESf5xcROewY6+lDNwZ/NzvR4t+vpYgg9m7V8UXLPYqG+lu3DF470E5Oxg== + dependencies: + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.0.0" + crelt "^1.0.5" + +"@codemirror/search@^6.5.6": + version "6.5.6" + resolved "https://registry.npmjs.org/@codemirror/search/-/search-6.5.6.tgz#8f858b9e678d675869112e475f082d1e8488db93" + integrity sha512-rpMgcsh7o0GuCDUXKPvww+muLA1pDJaFrpq/CCHtpQJYz8xopu4D1hPcKRoDD0YlF8gZaqTNIRa4VRBWyhyy7Q== + dependencies: + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.0.0" + crelt "^1.0.5" + +"@codemirror/state@^6.0.0", "@codemirror/state@^6.4.0", "@codemirror/state@^6.4.1": + version "6.4.1" + resolved "https://registry.npmjs.org/@codemirror/state/-/state-6.4.1.tgz#da57143695c056d9a3c38705ed34136e2b68171b" + integrity sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A== + +"@codemirror/view@^6.0.0", "@codemirror/view@^6.17.0", "@codemirror/view@^6.23.0", "@codemirror/view@^6.27.0": + version "6.33.0" + resolved "https://registry.npmjs.org/@codemirror/view/-/view-6.33.0.tgz#51e270410fc3af92a6e38798e80ebf8add7dc3ec" + integrity sha512-AroaR3BvnjRW8fiZBalAaK+ZzB5usGgI014YKElYZvQdNH5ZIidHlO+cyf/2rWzyBFRkvG6VhiXeAEbC53P2YQ== + dependencies: + "@codemirror/state" "^6.4.0" + style-mod "^4.1.0" + w3c-keyname "^2.2.4" + +"@codemirror/view@^6.26.3": + version "6.34.1" + resolved "https://registry.npmjs.org/@codemirror/view/-/view-6.34.1.tgz#b17ed29c563e4adc60086233f2d3e7197e2dc33e" + integrity sha512-t1zK/l9UiRqwUNPm+pdIT0qzJlzuVckbTEMVNFhfWkGiBQClstzg+78vedCvLSX0xJEZ6lwZbPpnljL7L6iwMQ== + dependencies: + "@codemirror/state" "^6.4.0" + style-mod "^4.1.0" + w3c-keyname "^2.2.4" + +"@fortawesome/fontawesome-free@^5.12.0": + version "5.15.4" + resolved "https://registry.npmjs.org/@fortawesome/fontawesome-free/-/fontawesome-free-5.15.4.tgz#ecda5712b61ac852c760d8b3c79c96adca5554e5" + integrity sha512-eYm8vijH/hpzr/6/1CJ/V/Eb1xQFW2nnUKArb3z+yUWv7HTwj6M7SP957oMjfZjAHU6qpoNc2wQvIxBLWYa/Jg== + +"@jupyter/react-components@^0.16.6": + version "0.16.7" + resolved "https://registry.npmjs.org/@jupyter/react-components/-/react-components-0.16.7.tgz#94926647a3578409c65d69d5b44c86cb0ca8ceab" + integrity sha512-BKIPkJ9V011uhtdq1xBOu2M3up59CqsRbDS4aq8XhnHR4pwqfRV6k6irE5YBOETCoIwWZZ5RZO+cJcZ3DcsT5A== + dependencies: + "@jupyter/web-components" "^0.16.7" + react ">=17.0.0 <19.0.0" + +"@jupyter/web-components@^0.16.6", "@jupyter/web-components@^0.16.7": + version "0.16.7" + resolved "https://registry.npmjs.org/@jupyter/web-components/-/web-components-0.16.7.tgz#cd347c4a1dcda9597ef405f94e27bfcfe920d1b6" + integrity sha512-1a8awgvvP9J9pCV5vBRuQxdBk29764qiMJsJYEndrWH3cB/FlaO+sZIBm4OTf56Eqdgl8R3/ZSLM1+3mgXOkPg== + dependencies: + "@microsoft/fast-colors" "^5.3.1" + "@microsoft/fast-element" "^1.12.0" + "@microsoft/fast-foundation" "^2.49.4" + "@microsoft/fast-web-utilities" "^5.4.1" + +"@jupyter/ydoc@^3.0.0": + version "3.0.0" + resolved "https://registry.npmjs.org/@jupyter/ydoc/-/ydoc-3.0.0.tgz#36fcc4790723644713e35df32916961e4c9428d4" + integrity sha512-oWTSBPifD81I1oRNyKkMJF14FzNvBpJxiYHXaC1XeFXk67KNiqDepjVpYJ1E2QYThZhZGGtdNc6TC1XCQAJVKA== + dependencies: + "@jupyterlab/nbformat" "^3.0.0 || ^4.0.0-alpha.21 || ^4.0.0" + "@lumino/coreutils" "^1.11.0 || ^2.0.0" + "@lumino/disposable" "^1.10.0 || ^2.0.0" + "@lumino/signaling" "^1.10.0 || ^2.0.0" + y-protocols "^1.0.5" + yjs "^13.5.40" + +"@jupyterlab/application@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/application/-/application-4.3.0.tgz#c33e8f836974fbe4b3dd9f41c46ea3c8c8ce5b60" + integrity sha512-tH34PGe/cKDDcGgUfjtzVnvJRQCn9KRZtmXUHKYVWT4cJ4lgtiTWPPxPZv4vJi6bUFZG3tv9nfrPt1sUlPjAhg== + dependencies: + "@fortawesome/fontawesome-free" "^5.12.0" + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/docregistry" "^4.3.0" + "@jupyterlab/rendermime" "^4.3.0" + "@jupyterlab/rendermime-interfaces" "^3.11.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/statedb" "^4.3.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/algorithm" "^2.0.2" + "@lumino/application" "^2.4.1" + "@lumino/commands" "^2.3.1" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/messaging" "^2.0.2" + "@lumino/polling" "^2.1.3" + "@lumino/properties" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + +"@jupyterlab/apputils@^4.4.0": + version "4.4.0" + resolved "https://registry.npmjs.org/@jupyterlab/apputils/-/apputils-4.4.0.tgz#3c9d58c7ffd4fab5de067ab88b0f67139930d200" + integrity sha512-Qlt36C9AVCyOx1O30KZR0Q1cVbquxsbD+ZhxFdDKaoSlax0vsRrOR42FDLXqvRGO5MCFHe/KtNjTRHise6o/ww== + dependencies: + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/observables" "^5.3.0" + "@jupyterlab/rendermime-interfaces" "^3.11.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/settingregistry" "^4.3.0" + "@jupyterlab/statedb" "^4.3.0" + "@jupyterlab/statusbar" "^4.3.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/algorithm" "^2.0.2" + "@lumino/commands" "^2.3.1" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/domutils" "^2.0.2" + "@lumino/messaging" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/virtualdom" "^2.0.2" + "@lumino/widgets" "^2.5.0" + "@types/react" "^18.0.26" + react "^18.2.0" + sanitize-html "~2.12.1" + +"@jupyterlab/attachments@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/attachments/-/attachments-4.3.0.tgz#41818a3a1f8fa9107f9e8d6ac1b632ab6dddaa21" + integrity sha512-xToxNy3qKbIl1SVAnedAUBcjvFy7CXM1lMkf5jAR/hGGy+ac4dthYXTLiefaUi4UvpBLbeHfeY/Ya8UuWfSPkg== + dependencies: + "@jupyterlab/nbformat" "^4.3.0" + "@jupyterlab/observables" "^5.3.0" + "@jupyterlab/rendermime" "^4.3.0" + "@jupyterlab/rendermime-interfaces" "^3.11.0" + "@lumino/disposable" "^2.1.3" + "@lumino/signaling" "^2.1.3" + +"@jupyterlab/cells@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/cells/-/cells-4.3.0.tgz#8079c5d532891aa4a2b3ef260a8046a766fb1d99" + integrity sha512-IxTwICkGzp/S18TNoC+81sRGcPtlTVWZ7G44lTXTYKW+YrWRlO/bIyoNmWlc7Xu5tRitfmzqJFw80V/enMqG6A== + dependencies: + "@codemirror/state" "^6.4.1" + "@codemirror/view" "^6.26.3" + "@jupyter/ydoc" "^3.0.0" + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/attachments" "^4.3.0" + "@jupyterlab/codeeditor" "^4.3.0" + "@jupyterlab/codemirror" "^4.3.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/documentsearch" "^4.3.0" + "@jupyterlab/filebrowser" "^4.3.0" + "@jupyterlab/nbformat" "^4.3.0" + "@jupyterlab/observables" "^5.3.0" + "@jupyterlab/outputarea" "^4.3.0" + "@jupyterlab/rendermime" "^4.3.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/toc" "^6.3.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/algorithm" "^2.0.2" + "@lumino/coreutils" "^2.2.0" + "@lumino/domutils" "^2.0.2" + "@lumino/dragdrop" "^2.1.5" + "@lumino/messaging" "^2.0.2" + "@lumino/polling" "^2.1.3" + "@lumino/signaling" "^2.1.3" + "@lumino/virtualdom" "^2.0.2" + "@lumino/widgets" "^2.5.0" + react "^18.2.0" + +"@jupyterlab/codeeditor@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/codeeditor/-/codeeditor-4.3.0.tgz#1856dc4e0a8b5f08700b438cf5c11596fe156a45" + integrity sha512-eV0lxowI2CFalnqKL62kWV7/EekLfaQ4RjjrQJ8C+pz4/QNgj7oM/oaYd8YVM4rRa+TqGFQOzXoDm3Wk4Ely6g== + dependencies: + "@codemirror/state" "^6.4.1" + "@jupyter/ydoc" "^3.0.0" + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/nbformat" "^4.3.0" + "@jupyterlab/observables" "^5.3.0" + "@jupyterlab/statusbar" "^4.3.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/dragdrop" "^2.1.5" + "@lumino/messaging" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + react "^18.2.0" + +"@jupyterlab/codemirror@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/codemirror/-/codemirror-4.3.0.tgz#312be380cfe39dcd9f76527935314523c5f80e34" + integrity sha512-8cVyybFe3g8Z5A6Hz8p5Xsp0rqaQyZBypxqW952hDrMJ6jqWERBtQQ0Jwjqjz+TdABaloX/wIbG+oYFbfrfxKA== + dependencies: + "@codemirror/autocomplete" "^6.16.0" + "@codemirror/commands" "^6.5.0" + "@codemirror/lang-cpp" "^6.0.2" + "@codemirror/lang-css" "^6.2.1" + "@codemirror/lang-html" "^6.4.9" + "@codemirror/lang-java" "^6.0.1" + "@codemirror/lang-javascript" "^6.2.2" + "@codemirror/lang-json" "^6.0.1" + "@codemirror/lang-markdown" "^6.2.5" + "@codemirror/lang-php" "^6.0.1" + "@codemirror/lang-python" "^6.1.6" + "@codemirror/lang-rust" "^6.0.1" + "@codemirror/lang-sql" "^6.6.4" + "@codemirror/lang-wast" "^6.0.2" + "@codemirror/lang-xml" "^6.1.0" + "@codemirror/language" "^6.10.1" + "@codemirror/legacy-modes" "^6.4.0" + "@codemirror/search" "^6.5.6" + "@codemirror/state" "^6.4.1" + "@codemirror/view" "^6.26.3" + "@jupyter/ydoc" "^3.0.0" + "@jupyterlab/codeeditor" "^4.3.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/documentsearch" "^4.3.0" + "@jupyterlab/nbformat" "^4.3.0" + "@jupyterlab/translation" "^4.3.0" + "@lezer/common" "^1.2.1" + "@lezer/generator" "^1.7.0" + "@lezer/highlight" "^1.2.0" + "@lezer/markdown" "^1.3.0" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/signaling" "^2.1.3" + yjs "^13.5.40" + +"@jupyterlab/console@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/console/-/console-4.3.0.tgz#5e425dcd2c6d8d50791410bdd429cc852e2b00bc" + integrity sha512-Ph82d61psolGDNxRwO+88deP8agjK86+sh417J6HKxYzat8X/5HklRyxLTkztQYXUQUd7FobeQ+UH1ezCk9RwA== + dependencies: + "@jupyter/ydoc" "^3.0.0" + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/cells" "^4.3.0" + "@jupyterlab/codeeditor" "^4.3.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/nbformat" "^4.3.0" + "@jupyterlab/observables" "^5.3.0" + "@jupyterlab/rendermime" "^4.3.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/dragdrop" "^2.1.5" + "@lumino/messaging" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + +"@jupyterlab/coreutils@^6.3.0": + version "6.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/coreutils/-/coreutils-6.3.0.tgz#592b7aa640d82aac4be42bb5e95394d40e6bd159" + integrity sha512-zsoMx18JXfVEvMR4OVb+GR/AirXYEUBveySoY6/Z4Kv6vLZh2ZC+JZKgnlpPvql7D7Aa7tCUbSJdV33+fYELIQ== + dependencies: + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/signaling" "^2.1.3" + minimist "~1.2.0" + path-browserify "^1.0.0" + url-parse "~1.5.4" + +"@jupyterlab/debugger@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/debugger/-/debugger-4.3.0.tgz#7730a102652c26e4519f99194618e34ba8377022" + integrity sha512-mNpcm0NufsWxdCe4hnvm0WTBOW1lIe1sJyO/uos7JSdXUFtknYmtoEobVuf2y/goT6X5guvekMl22UIZQBVgXw== + dependencies: + "@codemirror/state" "^6.4.1" + "@codemirror/view" "^6.26.3" + "@jupyter/react-components" "^0.16.6" + "@jupyter/ydoc" "^3.0.0" + "@jupyterlab/application" "^4.3.0" + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/cells" "^4.3.0" + "@jupyterlab/codeeditor" "^4.3.0" + "@jupyterlab/codemirror" "^4.3.0" + "@jupyterlab/console" "^4.3.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/docregistry" "^4.3.0" + "@jupyterlab/fileeditor" "^4.3.0" + "@jupyterlab/notebook" "^4.3.0" + "@jupyterlab/observables" "^5.3.0" + "@jupyterlab/rendermime" "^4.3.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/algorithm" "^2.0.2" + "@lumino/commands" "^2.3.1" + "@lumino/coreutils" "^2.2.0" + "@lumino/datagrid" "^2.4.1" + "@lumino/disposable" "^2.1.3" + "@lumino/messaging" "^2.0.2" + "@lumino/polling" "^2.1.3" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + "@vscode/debugprotocol" "^1.51.0" + react "^18.2.0" + +"@jupyterlab/docmanager@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/docmanager/-/docmanager-4.3.0.tgz#256588d7f4d1ff632f89103bbd75b65a8a2dd009" + integrity sha512-ptdvLpD5i6CPTxHeL+Q8Yih36+59jOdDPZgwfb3TC5Atx8SdsWhm2I0HA8G/j+Xq/2Grd5L3kwCkvQTG332Nxg== + dependencies: + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/docregistry" "^4.3.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/statedb" "^4.3.0" + "@jupyterlab/statusbar" "^4.3.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/algorithm" "^2.0.2" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/messaging" "^2.0.2" + "@lumino/polling" "^2.1.3" + "@lumino/properties" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + react "^18.2.0" + +"@jupyterlab/docregistry@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/docregistry/-/docregistry-4.3.0.tgz#34a393c95aa48830b0cdab0ee4c16850830cf87c" + integrity sha512-FNJ7WNUDQrm0Fde+GqZqWUf7TYky2cznb7r3D31Anpbp5wggyDJqfqNmJ1EAehxFr/UxKDmDg4u3Zw1YjTLJGg== + dependencies: + "@jupyter/ydoc" "^3.0.0" + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/codeeditor" "^4.3.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/observables" "^5.3.0" + "@jupyterlab/rendermime" "^4.3.0" + "@jupyterlab/rendermime-interfaces" "^3.11.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/algorithm" "^2.0.2" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/messaging" "^2.0.2" + "@lumino/properties" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + react "^18.2.0" + +"@jupyterlab/documentsearch@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/documentsearch/-/documentsearch-4.3.0.tgz#42b4c1d60dc3306f37833d78baeb9b35152ed6c3" + integrity sha512-nNMc+ldAYy4XeliHAzMdCIrt9lBVCnHvgtvkwV6zI91ve1YPXr/ak5Fsy9GHOXMGODkCLqAjo6uBIiQLO+g9eQ== + dependencies: + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/commands" "^2.3.1" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/messaging" "^2.0.2" + "@lumino/polling" "^2.1.3" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + react "^18.2.0" + +"@jupyterlab/filebrowser@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/filebrowser/-/filebrowser-4.3.0.tgz#b9e91995d582c7367abb32579c1da1fa74ef2056" + integrity sha512-6lm77YO+Z78WZiJTURTUniduI2XpBkLHsSiWkw+3AaESxGxDyAAxk4YJftlZe1AYaMi6Oh6Kl4MmW7XuN2HCQg== + dependencies: + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/docmanager" "^4.3.0" + "@jupyterlab/docregistry" "^4.3.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/statedb" "^4.3.0" + "@jupyterlab/statusbar" "^4.3.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/algorithm" "^2.0.2" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/domutils" "^2.0.2" + "@lumino/dragdrop" "^2.1.5" + "@lumino/messaging" "^2.0.2" + "@lumino/polling" "^2.1.3" + "@lumino/signaling" "^2.1.3" + "@lumino/virtualdom" "^2.0.2" + "@lumino/widgets" "^2.5.0" + react "^18.2.0" + +"@jupyterlab/fileeditor@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/fileeditor/-/fileeditor-4.3.0.tgz#b6ab4ffef899a0d14f59b840f1fa24642abe7ac2" + integrity sha512-WmWFGDxxXuNc3Jc/dNEChscOC1URZvR/YNE4bPNsZAZxXEzLhm538ELT5gWPHjUwlw4ZV+J//+w3avmQJJ+p9Q== + dependencies: + "@jupyter/ydoc" "^3.0.0" + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/codeeditor" "^4.3.0" + "@jupyterlab/codemirror" "^4.3.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/docregistry" "^4.3.0" + "@jupyterlab/documentsearch" "^4.3.0" + "@jupyterlab/lsp" "^4.3.0" + "@jupyterlab/statusbar" "^4.3.0" + "@jupyterlab/toc" "^6.3.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/commands" "^2.3.1" + "@lumino/coreutils" "^2.2.0" + "@lumino/messaging" "^2.0.2" + "@lumino/widgets" "^2.5.0" + react "^18.2.0" + regexp-match-indices "^1.0.2" + +"@jupyterlab/galata@^5.3.0": + version "5.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/galata/-/galata-5.3.0.tgz#b9e0e731689a691911790c216410bff733a465d8" + integrity sha512-27F2XGkZwX3uuD3P0SQbJ5iOx/DZS+VRUO73VrS6YStNH6qw70De5jnTdPxZ5CnJw10lFaQbEboZ9g4798gEYg== + dependencies: + "@jupyterlab/application" "^4.3.0" + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/debugger" "^4.3.0" + "@jupyterlab/docmanager" "^4.3.0" + "@jupyterlab/nbformat" "^4.3.0" + "@jupyterlab/notebook" "^4.3.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/settingregistry" "^4.3.0" + "@lumino/coreutils" "^2.2.0" + "@playwright/test" "^1.48.0" + "@stdlib/stats" "~0.0.13" + fs-extra "^10.1.0" + json5 "^2.2.3" + path "~0.12.7" + systeminformation "^5.8.6" + vega "^5.20.0" + vega-lite "^5.6.1" + vega-statistics "^1.7.9" + +"@jupyterlab/lsp@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/lsp/-/lsp-4.3.0.tgz#b53ba93442ae4def24525e8c325c805887f950a9" + integrity sha512-mN7kR5MWKM+wnrXyDZOTnRe2xmlGjNb94V6ALKmFckKgGiFjAGBJkfrUjY0DkLjJWAzlaT8tjrxNDNyKdx9qwQ== + dependencies: + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/codeeditor" "^4.3.0" + "@jupyterlab/codemirror" "^4.3.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/docregistry" "^4.3.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/translation" "^4.3.0" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + lodash.mergewith "^4.6.1" + vscode-jsonrpc "^6.0.0" + vscode-languageserver-protocol "^3.17.0" + vscode-ws-jsonrpc "~1.0.2" + +"@jupyterlab/nbformat@^3.0.0 || ^4.0.0-alpha.21 || ^4.0.0", "@jupyterlab/nbformat@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/nbformat/-/nbformat-4.3.0.tgz#8581a59e7b088876d741df3b1598931677e0bce8" + integrity sha512-7XfYrCN3eF00tJq3Z+fJd+d9AmoJIRvXEcjmcwRdddUkb44jVEKxZ9LGCRZ0m4QPDCMticyrqbXQpVMJIrNDeg== + dependencies: + "@lumino/coreutils" "^2.2.0" + +"@jupyterlab/notebook@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/notebook/-/notebook-4.3.0.tgz#03292ac51ae8ca85e938a936aa56361145371d31" + integrity sha512-2/nM9a9R9zrgBLg+k4hn8PeImx7RjUYTKW3OCVPwxxAh38RlTc98A8G9phKuLeMMWYXJZwObOPt0dOEI+j7bJA== + dependencies: + "@jupyter/ydoc" "^3.0.0" + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/cells" "^4.3.0" + "@jupyterlab/codeeditor" "^4.3.0" + "@jupyterlab/codemirror" "^4.3.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/docregistry" "^4.3.0" + "@jupyterlab/documentsearch" "^4.3.0" + "@jupyterlab/lsp" "^4.3.0" + "@jupyterlab/nbformat" "^4.3.0" + "@jupyterlab/observables" "^5.3.0" + "@jupyterlab/rendermime" "^4.3.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/settingregistry" "^4.3.0" + "@jupyterlab/statusbar" "^4.3.0" + "@jupyterlab/toc" "^6.3.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/algorithm" "^2.0.2" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/domutils" "^2.0.2" + "@lumino/dragdrop" "^2.1.5" + "@lumino/messaging" "^2.0.2" + "@lumino/polling" "^2.1.3" + "@lumino/properties" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/virtualdom" "^2.0.2" + "@lumino/widgets" "^2.5.0" + react "^18.2.0" + +"@jupyterlab/observables@^5.3.0": + version "5.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/observables/-/observables-5.3.0.tgz#1941ba061070594a3fb61680415e5f8575c9db23" + integrity sha512-MasQvRzHcu+ROzyLUwH8X9Qpv/A8M3SXkqnozzi6Ttcx7kVZsbRTvkSR5KQWESsY1FEu0r450e5VtjSy1QM9sw== + dependencies: + "@lumino/algorithm" "^2.0.2" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/messaging" "^2.0.2" + "@lumino/signaling" "^2.1.3" + +"@jupyterlab/outputarea@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/outputarea/-/outputarea-4.3.0.tgz#ea94d50ae90d144eb27a9c3564af87a975b46c6d" + integrity sha512-3TslhNnnaxtGdZ3j/G7YOOVCosH1fIKceDIByjccir7f6Z9mMz23BDffoSx25vmq0FsVJB+FPsYBHeyfTfrvtQ== + dependencies: + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/nbformat" "^4.3.0" + "@jupyterlab/observables" "^5.3.0" + "@jupyterlab/rendermime" "^4.3.0" + "@jupyterlab/rendermime-interfaces" "^3.11.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/translation" "^4.3.0" + "@lumino/algorithm" "^2.0.2" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/messaging" "^2.0.2" + "@lumino/properties" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + +"@jupyterlab/rendermime-interfaces@^3.11.0": + version "3.11.0" + resolved "https://registry.npmjs.org/@jupyterlab/rendermime-interfaces/-/rendermime-interfaces-3.11.0.tgz#170b263124b5e3cdaab8f12e7dc6a7b85347ee65" + integrity sha512-Fn+H4iCBQow6IG+hWn7JzQQRQN4ePQz7gVBZ1s+dRvONnsOfAjs/Zgmbs1+0mZ/MQe23AKQoqV/Yqq7jbn3GhQ== + dependencies: + "@lumino/coreutils" "^1.11.0 || ^2.2.0" + "@lumino/widgets" "^1.37.2 || ^2.5.0" + +"@jupyterlab/rendermime@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/rendermime/-/rendermime-4.3.0.tgz#de1fe8fb350600193eff9dc9e215d537e41a4df9" + integrity sha512-D+S0RQxNZGCcpzBKXcUkEafS8HIaFOinqyzFQ0f6RMM9mbHRUoIRWG5ZBdpqK7FALQnOqKnz1Zyzawy2WkEIqg== + dependencies: + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/nbformat" "^4.3.0" + "@jupyterlab/observables" "^5.3.0" + "@jupyterlab/rendermime-interfaces" "^3.11.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/translation" "^4.3.0" + "@lumino/coreutils" "^2.2.0" + "@lumino/messaging" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + lodash.escape "^4.0.1" + +"@jupyterlab/services@^7.3.0": + version "7.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/services/-/services-7.3.0.tgz#c1cc3f0c0427ae55c2f6ca64317f0eb73c17476a" + integrity sha512-u9GWFMTEUJvDszz98tIUpaBOsUGSybQjwv+263obtCjaceezy87SReIsQefoI1Dh8SGfngGW7IOvThM/LNoYGw== + dependencies: + "@jupyter/ydoc" "^3.0.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/nbformat" "^4.3.0" + "@jupyterlab/settingregistry" "^4.3.0" + "@jupyterlab/statedb" "^4.3.0" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/polling" "^2.1.3" + "@lumino/properties" "^2.0.2" + "@lumino/signaling" "^2.1.3" + ws "^8.11.0" + +"@jupyterlab/settingregistry@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/settingregistry/-/settingregistry-4.3.0.tgz#46d9c506b43b56f4eb8f2819114e180224024b50" + integrity sha512-+/1IOaANMI35CVO67yTKAo0cVau04MH0QFeJUv9DtY88CU50O6vtfh9+gPGQnl/dTwgHZNiZbfapyLXvLuASig== + dependencies: + "@jupyterlab/nbformat" "^4.3.0" + "@jupyterlab/statedb" "^4.3.0" + "@lumino/commands" "^2.3.1" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/signaling" "^2.1.3" + "@rjsf/utils" "^5.13.4" + ajv "^8.12.0" + json5 "^2.2.3" + +"@jupyterlab/statedb@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/statedb/-/statedb-4.3.0.tgz#0242a0fb58ddd7cc464af4a3df277ae118aa68a3" + integrity sha512-NyME5GIHmTwV2MLIqtxV9hMxKa0v9AjAasN6xtDqhlyFwsPd4kI1dUAlYjxJ9Cbcc+z5K3/XNoFZyErOe/JQPQ== + dependencies: + "@lumino/commands" "^2.3.1" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/properties" "^2.0.2" + "@lumino/signaling" "^2.1.3" + +"@jupyterlab/statusbar@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/statusbar/-/statusbar-4.3.0.tgz#1482d69fe471dc61fc568de6f58ac6536904fb3a" + integrity sha512-ds8NB5MacSyzSYFGxTBtdxQ8RbfcUeCOSR8PLMqZ+DQuqAlyMAuzqNOGDHLmNVChSnzB97wLUeRWVkGj74PD4g== + dependencies: + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/algorithm" "^2.0.2" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/messaging" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + react "^18.2.0" + +"@jupyterlab/toc@^6.3.0": + version "6.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/toc/-/toc-6.3.0.tgz#18174d803fa00c27b503d355f5fbe5adf5d2210c" + integrity sha512-NjZqFllvpS4BlQzOfDa+YpQqeU1h8EQqaH9fvnfv48csUZ02P7ffvKcyBjzAcMpfhEfaXxIS1TqWPGC6jv+2hA== + dependencies: + "@jupyter/react-components" "^0.16.6" + "@jupyterlab/apputils" "^4.4.0" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/docregistry" "^4.3.0" + "@jupyterlab/observables" "^5.3.0" + "@jupyterlab/rendermime" "^4.3.0" + "@jupyterlab/rendermime-interfaces" "^3.11.0" + "@jupyterlab/translation" "^4.3.0" + "@jupyterlab/ui-components" "^4.3.0" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/messaging" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + react "^18.2.0" + +"@jupyterlab/translation@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/translation/-/translation-4.3.0.tgz#426756c8e63b70f4643848b0ce3eb384979159b4" + integrity sha512-um8rbFXs4S1AtzHqtgnh80ttJ50mid1dVwgD+gz+M3tFGqG9ZvTWLfWOeIOCzSGyv0dtYtuWUf9M6S1S9jBNfw== + dependencies: + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/rendermime-interfaces" "^3.11.0" + "@jupyterlab/services" "^7.3.0" + "@jupyterlab/statedb" "^4.3.0" + "@lumino/coreutils" "^2.2.0" + +"@jupyterlab/ui-components@^4.3.0": + version "4.3.0" + resolved "https://registry.npmjs.org/@jupyterlab/ui-components/-/ui-components-4.3.0.tgz#925284ebdf121940de7ef8db3691cb68c6131c79" + integrity sha512-P0axMQ61aqgypTherhbVMgj47iWYhZgDKNj7YeJTQgiWotWYNmF2AHiiE7H2orIklgzCZh0OQKvcCQLo2cQW1A== + dependencies: + "@jupyter/react-components" "^0.16.6" + "@jupyter/web-components" "^0.16.6" + "@jupyterlab/coreutils" "^6.3.0" + "@jupyterlab/observables" "^5.3.0" + "@jupyterlab/rendermime-interfaces" "^3.11.0" + "@jupyterlab/translation" "^4.3.0" + "@lumino/algorithm" "^2.0.2" + "@lumino/commands" "^2.3.1" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/messaging" "^2.0.2" + "@lumino/polling" "^2.1.3" + "@lumino/properties" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/virtualdom" "^2.0.2" + "@lumino/widgets" "^2.5.0" + "@rjsf/core" "^5.13.4" + "@rjsf/utils" "^5.13.4" + react "^18.2.0" + react-dom "^18.2.0" + typestyle "^2.0.4" + +"@lezer/common@^1.0.0", "@lezer/common@^1.0.2", "@lezer/common@^1.1.0", "@lezer/common@^1.2.0", "@lezer/common@^1.2.1": + version "1.2.1" + resolved "https://registry.npmjs.org/@lezer/common/-/common-1.2.1.tgz#198b278b7869668e1bebbe687586e12a42731049" + integrity sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ== + +"@lezer/cpp@^1.0.0": + version "1.1.2" + resolved "https://registry.npmjs.org/@lezer/cpp/-/cpp-1.1.2.tgz#1db93b09e011e8a7a08c347c9d5b7749971253bf" + integrity sha512-macwKtyeUO0EW86r3xWQCzOV9/CF8imJLpJlPv3sDY57cPGeUZ8gXWOWNlJr52TVByMV3PayFQCA5SHEERDmVQ== + dependencies: + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + +"@lezer/css@^1.1.0", "@lezer/css@^1.1.7": + version "1.1.9" + resolved "https://registry.npmjs.org/@lezer/css/-/css-1.1.9.tgz#404563d361422c5a1fe917295f1527ee94845ed1" + integrity sha512-TYwgljcDv+YrV0MZFFvYFQHCfGgbPMR6nuqLabBdmZoFH3EP1gvw8t0vae326Ne3PszQkbXfVBjCnf3ZVCr0bA== + dependencies: + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + +"@lezer/generator@^1.7.0": + version "1.7.1" + resolved "https://registry.npmjs.org/@lezer/generator/-/generator-1.7.1.tgz#90c1a9de2fb4d5a714216fa659058c7859accaab" + integrity sha512-MgPJN9Si+ccxzXl3OAmCeZuUKw4XiPl4y664FX/hnnyG9CTqUPq65N3/VGPA2jD23D7QgMTtNqflta+cPN+5mQ== + dependencies: + "@lezer/common" "^1.1.0" + "@lezer/lr" "^1.3.0" + +"@lezer/highlight@^1.0.0", "@lezer/highlight@^1.1.3", "@lezer/highlight@^1.2.0": + version "1.2.1" + resolved "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.1.tgz#596fa8f9aeb58a608be0a563e960c373cbf23f8b" + integrity sha512-Z5duk4RN/3zuVO7Jq0pGLJ3qynpxUVsh7IbUbGj88+uV2ApSAn6kWg2au3iJb+0Zi7kKtqffIESgNcRXWZWmSA== + dependencies: + "@lezer/common" "^1.0.0" + +"@lezer/html@^1.3.0": + version "1.3.10" + resolved "https://registry.npmjs.org/@lezer/html/-/html-1.3.10.tgz#1be9a029a6fe835c823b20a98a449a630416b2af" + integrity sha512-dqpT8nISx/p9Do3AchvYGV3qYc4/rKr3IBZxlHmpIKam56P47RSHkSF5f13Vu9hebS1jM0HmtJIwLbWz1VIY6w== + dependencies: + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + +"@lezer/java@^1.0.0": + version "1.1.2" + resolved "https://registry.npmjs.org/@lezer/java/-/java-1.1.2.tgz#01a6ffefa9a692ac6cd492f8b924009edcb903d7" + integrity sha512-3j8X70JvYf0BZt8iSRLXLkt0Ry1hVUgH6wT32yBxH/Xi55nW2VMhc1Az4SKwu4YGSmxCm1fsqDDcHTuFjC8pmg== + dependencies: + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + +"@lezer/javascript@^1.0.0": + version "1.4.17" + resolved "https://registry.npmjs.org/@lezer/javascript/-/javascript-1.4.17.tgz#8456e369f960c328b9e823342d0c72d704238c31" + integrity sha512-bYW4ctpyGK+JMumDApeUzuIezX01H76R1foD6LcRX224FWfyYit/HYxiPGDjXXe/wQWASjCvVGoukTH68+0HIA== + dependencies: + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.1.3" + "@lezer/lr" "^1.3.0" + +"@lezer/json@^1.0.0": + version "1.0.2" + resolved "https://registry.npmjs.org/@lezer/json/-/json-1.0.2.tgz#bdc849e174113e2d9a569a5e6fb1a27e2f703eaf" + integrity sha512-xHT2P4S5eeCYECyKNPhr4cbEL9tc8w83SPwRC373o9uEdrvGKTZoJVAGxpOsZckMlEh9W23Pc72ew918RWQOBQ== + dependencies: + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + +"@lezer/lr@^1.0.0", "@lezer/lr@^1.1.0", "@lezer/lr@^1.3.0": + version "1.4.2" + resolved "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.2.tgz#931ea3dea8e9de84e90781001dae30dea9ff1727" + integrity sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA== + dependencies: + "@lezer/common" "^1.0.0" + +"@lezer/markdown@^1.0.0": + version "1.3.1" + resolved "https://registry.npmjs.org/@lezer/markdown/-/markdown-1.3.1.tgz#2193012296927f383102d7a2b035f323759315b3" + integrity sha512-DGlzU/i8DC8k0uz1F+jeePrkATl0jWakauTzftMQOcbaMkHbNSRki/4E2tOzJWsVpoKYhe7iTJ03aepdwVUXUA== + dependencies: + "@lezer/common" "^1.0.0" + "@lezer/highlight" "^1.0.0" + +"@lezer/markdown@^1.3.0": + version "1.3.2" + resolved "https://registry.npmjs.org/@lezer/markdown/-/markdown-1.3.2.tgz#9d648b2a6cb47523f3d7ab494eee8c7be4f1ea9e" + integrity sha512-Wu7B6VnrKTbBEohqa63h5vxXjiC4pO5ZQJ/TDbhJxPQaaIoRD/6UVDhSDtVsCwVZV12vvN9KxuLL3ATMnlG0oQ== + dependencies: + "@lezer/common" "^1.0.0" + "@lezer/highlight" "^1.0.0" + +"@lezer/php@^1.0.0": + version "1.0.2" + resolved "https://registry.npmjs.org/@lezer/php/-/php-1.0.2.tgz#7c291631fc1e7f7efe99977522bc48bdc732658a" + integrity sha512-GN7BnqtGRpFyeoKSEqxvGvhJQiI4zkgmYnDk/JIyc7H7Ifc1tkPnUn/R2R8meH3h/aBf5rzjvU8ZQoyiNDtDrA== + dependencies: + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.1.0" + +"@lezer/python@^1.1.4": + version "1.1.14" + resolved "https://registry.npmjs.org/@lezer/python/-/python-1.1.14.tgz#a0887086fb7645cd09ada38ed748ca1d968e6363" + integrity sha512-ykDOb2Ti24n76PJsSa4ZoDF0zH12BSw1LGfQXCYJhJyOGiFTfGaX0Du66Ze72R+u/P35U+O6I9m8TFXov1JzsA== + dependencies: + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + +"@lezer/rust@^1.0.0": + version "1.0.2" + resolved "https://registry.npmjs.org/@lezer/rust/-/rust-1.0.2.tgz#cc9a75605d67182a0e799ac40b1965a61dcc6ef0" + integrity sha512-Lz5sIPBdF2FUXcWeCu1//ojFAZqzTQNRga0aYv6dYXqJqPfMdCAI0NzajWUd4Xijj1IKJLtjoXRPMvTKWBcqKg== + dependencies: + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + +"@lezer/xml@^1.0.0": + version "1.0.5" + resolved "https://registry.npmjs.org/@lezer/xml/-/xml-1.0.5.tgz#4bb7fd3e527f41b78372477aa753f035b41c3846" + integrity sha512-VFouqOzmUWfIg+tfmpcdV33ewtK+NSwd4ngSe1aG7HFb4BN0ExyY1b8msp+ndFrnlG4V4iC8yXacjFtrwERnaw== + dependencies: + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + +"@lumino/algorithm@^2.0.2": + version "2.0.2" + resolved "https://registry.npmjs.org/@lumino/algorithm/-/algorithm-2.0.2.tgz#d211da98c92be0271afde96b949982e29178ae48" + integrity sha512-cI8yJ2+QK1yM5ZRU3Kuaw9fJ/64JEDZEwWWp7+U0cd/mvcZ44BGdJJ29w+tIet1QXxPAvnsUleWyQ5qm4qUouA== + +"@lumino/application@^2.4.1": + version "2.4.1" + resolved "https://registry.npmjs.org/@lumino/application/-/application-2.4.1.tgz#6d312a4f8f8e14e61d1b784339f552f4a065f1ee" + integrity sha512-XdCAlNajcsGdK6ep+s6QC70EY+uBnP3kDiWthFLl3EMkvkYwmjOPzIPGlwLEd9Hu0XCO+1Vd2PlpeTnxw5D3/g== + dependencies: + "@lumino/commands" "^2.3.1" + "@lumino/coreutils" "^2.2.0" + "@lumino/widgets" "^2.5.0" + +"@lumino/collections@^2.0.2": + version "2.0.2" + resolved "https://registry.npmjs.org/@lumino/collections/-/collections-2.0.2.tgz#c790d8d4555d5dd349ecc817c8bd9e65b7f21c64" + integrity sha512-o0QmfV1D3WhAeA8GI1/YmEPaK89JtHVa764rQ5T0LdbDEwUtUDbjavHs1E/+y66tNTXz9RUJ4D2rcSb9tysYsg== + dependencies: + "@lumino/algorithm" "^2.0.2" + +"@lumino/commands@^2.3.1": + version "2.3.1" + resolved "https://registry.npmjs.org/@lumino/commands/-/commands-2.3.1.tgz#4ab5ec6521fefd3a9ff7ae0983c645483b9ecd07" + integrity sha512-DpX1kkE4PhILpvK1T4ZnaFb6UP4+YTkdZifvN3nbiomD64O2CTd+wcWIBpZMgy6MMgbVgrE8dzHxHk1EsKxNxw== + dependencies: + "@lumino/algorithm" "^2.0.2" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/domutils" "^2.0.2" + "@lumino/keyboard" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/virtualdom" "^2.0.2" + +"@lumino/coreutils@^1.11.0 || ^2.0.0", "@lumino/coreutils@^1.11.0 || ^2.2.0", "@lumino/coreutils@^2.2.0": + version "2.2.0" + resolved "https://registry.npmjs.org/@lumino/coreutils/-/coreutils-2.2.0.tgz#3f9d5c36f2513f067b2563c7ad3b33f43905a4e2" + integrity sha512-x5wnQ/GjWBayJ6vXVaUi6+Q6ETDdcUiH9eSfpRZFbgMQyyM6pi6baKqJBK2CHkCc/YbAEl6ipApTgm3KOJ/I3g== + dependencies: + "@lumino/algorithm" "^2.0.2" + +"@lumino/datagrid@^2.4.1": + version "2.4.1" + resolved "https://registry.npmjs.org/@lumino/datagrid/-/datagrid-2.4.1.tgz#6624d170d2695a707fc92d7364a08514778f2a4b" + integrity sha512-9sJg8UU/hqcKDqO5Rd0Blm2JYKT9nyAK/kuPYeaQc4ZLvtgn4SoOenNaShLDr3Wp54quBM8npAlk1mWG+yYC2g== + dependencies: + "@lumino/algorithm" "^2.0.2" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/domutils" "^2.0.2" + "@lumino/dragdrop" "^2.1.5" + "@lumino/keyboard" "^2.0.2" + "@lumino/messaging" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/widgets" "^2.5.0" + +"@lumino/disposable@^1.10.0 || ^2.0.0", "@lumino/disposable@^2.1.3": + version "2.1.3" + resolved "https://registry.npmjs.org/@lumino/disposable/-/disposable-2.1.3.tgz#cd2b11d82896eb654c2a528c9ff79a85ccf88d74" + integrity sha512-k5KXy/+T3UItiWHY4WwQawnsJnGo3aNtP5CTRKqo4+tbTNuhc3rTSvygJlNKIbEfIZXW2EWYnwfFDozkYx95eA== + dependencies: + "@lumino/signaling" "^2.1.3" + +"@lumino/domutils@^2.0.2": + version "2.0.2" + resolved "https://registry.npmjs.org/@lumino/domutils/-/domutils-2.0.2.tgz#b05973a179db2da71239976f25194f65c0f8eb98" + integrity sha512-2Kp6YHaMNI1rKB0PrALvOsZBHPy2EvVVAvJLWjlCm8MpWOVETjFp0MA9QpMubT9I76aKbaI5s1o1NJyZ8Y99pQ== + +"@lumino/dragdrop@^2.1.5": + version "2.1.5" + resolved "https://registry.npmjs.org/@lumino/dragdrop/-/dragdrop-2.1.5.tgz#2c178ac3e7520551f08ffb4f31521d87940dcce1" + integrity sha512-zqwR4GakrQBKZOW6S5pj2nfrQDurOErAoe9x3HS3BKLa1AzWA+t9PD5NESOKd81NqXFHjiMirSyFkTUs6pw+uA== + dependencies: + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + +"@lumino/keyboard@^2.0.2": + version "2.0.2" + resolved "https://registry.npmjs.org/@lumino/keyboard/-/keyboard-2.0.2.tgz#8ff5e360b8960716f45b742845bea6f3c5f44760" + integrity sha512-icRUpvswDaFjqmAJNbQRb/aTu6Iugo6Y2oC08TiIwhQtLS9W+Ee9VofdqvbPSvCm6DkyP+DCWMuA3KXZ4V4g4g== + +"@lumino/messaging@^2.0.2": + version "2.0.2" + resolved "https://registry.npmjs.org/@lumino/messaging/-/messaging-2.0.2.tgz#133b94d1fa1c67ad60e622a32acaf682faf05aaa" + integrity sha512-2sUF07cYA0f3mDil41Eh5sfBk0aGAH/mOh1I4+vyRUsKyBqp4WTUtpJFd8xVJGAntygxwnebIygkIaXXTIQvxA== + dependencies: + "@lumino/algorithm" "^2.0.2" + "@lumino/collections" "^2.0.2" + +"@lumino/polling@^2.1.3": + version "2.1.3" + resolved "https://registry.npmjs.org/@lumino/polling/-/polling-2.1.3.tgz#395fcfba3ada12439d5cc9592b68a89d781a54ed" + integrity sha512-WEZk96ddK6eHEhdDkFUAAA40EOLit86QVbqQqnbPmhdGwFogek26Kq9b1U273LJeirv95zXCATOJAkjRyb7D+w== + dependencies: + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/signaling" "^2.1.3" + +"@lumino/properties@^2.0.2": + version "2.0.2" + resolved "https://registry.npmjs.org/@lumino/properties/-/properties-2.0.2.tgz#39213876b06a917eae72e8ea38b71daa1c699682" + integrity sha512-b312oA3Bh97WFK8efXejYmC3DVJmvzJk72LQB7H3fXhfqS5jUWvL7MSnNmgcQvGzl9fIhDWDWjhtSTi0KGYYBg== + +"@lumino/signaling@^1.10.0 || ^2.0.0", "@lumino/signaling@^2.1.3": + version "2.1.3" + resolved "https://registry.npmjs.org/@lumino/signaling/-/signaling-2.1.3.tgz#612419e6948ce77c00328f9eff5ecd995c960a70" + integrity sha512-9Wd4iMk8F1i6pYjy65bqKuPlzQMicyL9xy1/ccS20kovPcfD074waneL/7BVe+3M8i+fGa3x2qjbWrBzOdTdNw== + dependencies: + "@lumino/algorithm" "^2.0.2" + "@lumino/coreutils" "^2.2.0" + +"@lumino/virtualdom@^2.0.2": + version "2.0.2" + resolved "https://registry.npmjs.org/@lumino/virtualdom/-/virtualdom-2.0.2.tgz#927c9803ebd31cd97ca2c599ec9a5d509afb3b2d" + integrity sha512-HYZThOtZSoknjdXA102xpy5CiXtTFCVz45EXdWeYLx3NhuEwuAIX93QBBIhupalmtFlRg1yhdDNV40HxJ4kcXg== + dependencies: + "@lumino/algorithm" "^2.0.2" + +"@lumino/widgets@^1.37.2 || ^2.5.0", "@lumino/widgets@^2.5.0": + version "2.5.0" + resolved "https://registry.npmjs.org/@lumino/widgets/-/widgets-2.5.0.tgz#7e37d86dbbc4eed1f85aa199b9fffa4919aa1e3e" + integrity sha512-RSRpc6aIEiuw79jqWUHYWXLJ2GBy7vhwuqgo94UVzg6oeh3XBECX0OvXGjK2k7N2BhmRrIs9bXky7Dm861S6mQ== + dependencies: + "@lumino/algorithm" "^2.0.2" + "@lumino/commands" "^2.3.1" + "@lumino/coreutils" "^2.2.0" + "@lumino/disposable" "^2.1.3" + "@lumino/domutils" "^2.0.2" + "@lumino/dragdrop" "^2.1.5" + "@lumino/keyboard" "^2.0.2" + "@lumino/messaging" "^2.0.2" + "@lumino/properties" "^2.0.2" + "@lumino/signaling" "^2.1.3" + "@lumino/virtualdom" "^2.0.2" + +"@microsoft/fast-colors@^5.3.1": + version "5.3.1" + resolved "https://registry.npmjs.org/@microsoft/fast-colors/-/fast-colors-5.3.1.tgz#defc59874176e42316be7e6d24c31885ead8ca56" + integrity sha512-72RZXVfCbwQzvo5sXXkuLXLT7rMeYaSf5r/6ewQiv/trBtqpWRm4DEH2EilHw/iWTBKOXs1qZNQndgUMa5n4LA== + +"@microsoft/fast-element@^1.12.0", "@microsoft/fast-element@^1.13.0": + version "1.13.0" + resolved "https://registry.npmjs.org/@microsoft/fast-element/-/fast-element-1.13.0.tgz#d390ff13697064a48dc6ad6bb332a5f5489f73f8" + integrity sha512-iFhzKbbD0cFRo9cEzLS3Tdo9BYuatdxmCEKCpZs1Cro/93zNMpZ/Y9/Z7SknmW6fhDZbpBvtO8lLh9TFEcNVAQ== + +"@microsoft/fast-foundation@^2.49.4": + version "2.49.6" + resolved "https://registry.npmjs.org/@microsoft/fast-foundation/-/fast-foundation-2.49.6.tgz#0bdee7d28dcf93918075618359b083a676d2891c" + integrity sha512-DZVr+J/NIoskFC1Y6xnAowrMkdbf2d5o7UyWK6gW5AiQ6S386Ql8dw4KcC4kHaeE1yL2CKvweE79cj6ZhJhTvA== + dependencies: + "@microsoft/fast-element" "^1.13.0" + "@microsoft/fast-web-utilities" "^5.4.1" + tabbable "^5.2.0" + tslib "^1.13.0" + +"@microsoft/fast-web-utilities@^5.4.1": + version "5.4.1" + resolved "https://registry.npmjs.org/@microsoft/fast-web-utilities/-/fast-web-utilities-5.4.1.tgz#8e3082ee2ff2b5467f17e7cb1fb01b0e4906b71f" + integrity sha512-ReWYncndjV3c8D8iq9tp7NcFNc1vbVHvcBFPME2nNFKNbS1XCesYZGlIlf3ot5EmuOXPlrzUHOWzQ2vFpIkqDg== + dependencies: + exenv-es6 "^1.1.1" + +"@playwright/test@^1.48.0", "@playwright/test@^1.49.0": + version "1.49.0" + resolved "https://registry.npmjs.org/@playwright/test/-/test-1.49.0.tgz#74227385b58317ee076b86b56d0e1e1b25cff01e" + integrity sha512-DMulbwQURa8rNIQrf94+jPJQ4FmOVdpE5ZppRNvWVjvhC+6sOeo28r8MgIpQRYouXRtt/FCCXU7zn20jnHR4Qw== + dependencies: + playwright "1.49.0" + +"@rjsf/core@^5.13.4": + version "5.21.0" + resolved "https://registry.npmjs.org/@rjsf/core/-/core-5.21.0.tgz#35c3b2303345dc5e616d18061e5797e8b9ff1b31" + integrity sha512-G8eROGeHVerBRcXyHKSrNeY0C3YeeLINLwZOl4XeDPA0hHxrrM6/kRhZ2l07t96LX/vZmqrNqrMCbWokvxQ/uw== + dependencies: + lodash "^4.17.21" + lodash-es "^4.17.21" + markdown-to-jsx "^7.4.1" + nanoid "^3.3.7" + prop-types "^15.8.1" + +"@rjsf/utils@^5.13.4": + version "5.21.0" + resolved "https://registry.npmjs.org/@rjsf/utils/-/utils-5.21.0.tgz#5626f7e8ad4b8015eacfe1017183230c00a5fe9b" + integrity sha512-rpYylVRHv7m9HG49vBxo6pRP5vXIoiZrdCKISE5+0CMO9bI1R6wF+5m1SZH4AL4C4sbcwA5vApK12slpzNXwTA== + dependencies: + fast-equals "^5.0.1" + json-schema-merge-allof "^0.8.1" + jsonpointer "^5.0.1" + lodash "^4.17.21" + lodash-es "^4.17.21" + react-is "^18.2.0" + +"@stdlib/array@^0.0.x": + version "0.0.12" + resolved "https://registry.npmjs.org/@stdlib/array/-/array-0.0.12.tgz#12f40ab95bb36d424cdad991f29fc3cb491ee29e" + integrity sha512-nDksiuvRC1dSTHrf5yOGQmlRwAzSKV8MdFQwFSvLbZGGhi5Y4hExqea5HloLgNVouVs8lnAFi2oubSM4Mc7YAg== + dependencies: + "@stdlib/assert" "^0.0.x" + "@stdlib/blas" "^0.0.x" + "@stdlib/complex" "^0.0.x" + "@stdlib/constants" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/symbol" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/assert@^0.0.x": + version "0.0.12" + resolved "https://registry.npmjs.org/@stdlib/assert/-/assert-0.0.12.tgz#1648c9016e5041291f55a6464abcc4069c5103ce" + integrity sha512-38FxFf+ZoQZbdc+m09UsWtaCmzd/2e7im0JOaaFYE7icmRfm+4KiE9BRvBT4tIn7ioLB2f9PsBicKjIsf+tY1w== + dependencies: + "@stdlib/array" "^0.0.x" + "@stdlib/cli" "^0.0.x" + "@stdlib/complex" "^0.0.x" + "@stdlib/constants" "^0.0.x" + "@stdlib/fs" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/ndarray" "^0.0.x" + "@stdlib/number" "^0.0.x" + "@stdlib/os" "^0.0.x" + "@stdlib/process" "^0.0.x" + "@stdlib/regexp" "^0.0.x" + "@stdlib/streams" "^0.0.x" + "@stdlib/string" "^0.0.x" + "@stdlib/symbol" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/bigint@^0.0.x": + version "0.0.11" + resolved "https://registry.npmjs.org/@stdlib/bigint/-/bigint-0.0.11.tgz#c416a1d727001c55f4897e6424124199d638f2fd" + integrity sha512-uz0aYDLABAYyqxaCSHYbUt0yPkXYUCR7TrVvHN+UUD3i8FZ02ZKcLO+faKisDyxKEoSFTNtn3Ro8Ir5ebOlVXQ== + dependencies: + "@stdlib/utils" "^0.0.x" + +"@stdlib/blas@^0.0.x": + version "0.0.12" + resolved "https://registry.npmjs.org/@stdlib/blas/-/blas-0.0.12.tgz#7e93e42b4621fc6903bf63264f045047333536c2" + integrity sha512-nWY749bWceuoWQ7gz977blCwR7lyQ/rsIXVO4b600h+NFpeA2i/ea7MYC680utIbeu2cnDWHdglBPoK535VAzA== + dependencies: + "@stdlib/array" "^0.0.x" + "@stdlib/assert" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/number" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/buffer@^0.0.x": + version "0.0.11" + resolved "https://registry.npmjs.org/@stdlib/buffer/-/buffer-0.0.11.tgz#6137b00845e6c905181cc7ebfae9f7e47c01b0ce" + integrity sha512-Jeie5eDDa1tVuRcuU+cBXI/oOXSmMxUUccZpqXzgYe0IO8QSNtNxv9mUTzJk/m5wH+lmLoDvNxzPpOH9TODjJg== + dependencies: + "@stdlib/array" "^0.0.x" + "@stdlib/assert" "^0.0.x" + "@stdlib/process" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/cli@^0.0.x": + version "0.0.10" + resolved "https://registry.npmjs.org/@stdlib/cli/-/cli-0.0.10.tgz#28e2fbe6865d7f5cd15b7dc5846c99bd3b91674f" + integrity sha512-OITGaxG46kwK799+NuOd/+ccosJ9koVuQBC610DDJv0ZJf8mD7sbjGXrmue9C4EOh8MP7Vm/6HN14BojX8oTCg== + dependencies: + "@stdlib/utils" "^0.0.x" + minimist "^1.2.0" + +"@stdlib/complex@^0.0.x": + version "0.0.12" + resolved "https://registry.npmjs.org/@stdlib/complex/-/complex-0.0.12.tgz#3afbc190cd0a9b37fc7c6e508c3aa9fda9106944" + integrity sha512-UbZBdaUxT2G+lsTIrVlRZwx2IRY6GXnVILggeejsIVxHSuK+oTyapfetcAv0FJFLP+Rrr+ZzrN4b9G3hBw6NHA== + dependencies: + "@stdlib/array" "^0.0.x" + "@stdlib/assert" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/constants@^0.0.x": + version "0.0.11" + resolved "https://registry.npmjs.org/@stdlib/constants/-/constants-0.0.11.tgz#78cd56d6c2982b30264843c3d75bde7125e90cd2" + integrity sha512-cWKy0L9hXHUQTvFzdPkTvZnn/5Pjv7H4UwY0WC1rLt+A5CxFDJKjvnIi9ypSzJS3CAiGl1ZaHCdadoqXhNdkUg== + dependencies: + "@stdlib/array" "^0.0.x" + "@stdlib/assert" "^0.0.x" + "@stdlib/number" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/fs@^0.0.x": + version "0.0.12" + resolved "https://registry.npmjs.org/@stdlib/fs/-/fs-0.0.12.tgz#662365fd5846a51f075724b4f2888ae88441b70d" + integrity sha512-zcDLbt39EEM3M3wJW6luChS53B8T+TMJkjs2526UpKJ71O0/0adR57cI7PfCpkMd33d05uM7GM+leEj4eks4Cw== + dependencies: + "@stdlib/array" "^0.0.x" + "@stdlib/assert" "^0.0.x" + "@stdlib/cli" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/process" "^0.0.x" + "@stdlib/string" "^0.0.x" + "@stdlib/utils" "^0.0.x" + debug "^2.6.9" + +"@stdlib/math@^0.0.x": + version "0.0.11" + resolved "https://registry.npmjs.org/@stdlib/math/-/math-0.0.11.tgz#eb6638bc03a20fbd6727dd5b977ee0170bda4649" + integrity sha512-qI78sR1QqGjHj8k/aAqkZ51Su2fyBvaR/jMKQqcB/ML8bpYpf+QGlGvTty5Qdru/wpqds4kVFOVbWGcNFIV2+Q== + dependencies: + "@stdlib/assert" "^0.0.x" + "@stdlib/constants" "^0.0.x" + "@stdlib/ndarray" "^0.0.x" + "@stdlib/number" "^0.0.x" + "@stdlib/strided" "^0.0.x" + "@stdlib/symbol" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + debug "^2.6.9" + +"@stdlib/ndarray@^0.0.x": + version "0.0.13" + resolved "https://registry.npmjs.org/@stdlib/ndarray/-/ndarray-0.0.13.tgz#2e8fc645e10f56a645a0ab81598808c0e8f43b82" + integrity sha512-Z+U9KJP4U2HWrLtuAXSPvhNetAdqaNLMcliR6S/fz+VPlFDeymRK7omRFMgVQ+1zcAvIgKZGJxpLC3vjiPUYEw== + dependencies: + "@stdlib/array" "^0.0.x" + "@stdlib/assert" "^0.0.x" + "@stdlib/bigint" "^0.0.x" + "@stdlib/buffer" "^0.0.x" + "@stdlib/complex" "^0.0.x" + "@stdlib/constants" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/number" "^0.0.x" + "@stdlib/string" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/nlp@^0.0.x": + version "0.0.11" + resolved "https://registry.npmjs.org/@stdlib/nlp/-/nlp-0.0.11.tgz#532ec0f7267b8d639e4c20c6de864e8de8a09054" + integrity sha512-D9avYWANm0Db2W7RpzdSdi5GxRYALGAqUrNnRnnKIO6sMEfr/DvONoAbWruda4QyvSC+0MJNwcEn7+PHhRwYhw== + dependencies: + "@stdlib/array" "^0.0.x" + "@stdlib/assert" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/random" "^0.0.x" + "@stdlib/string" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/number@^0.0.x": + version "0.0.10" + resolved "https://registry.npmjs.org/@stdlib/number/-/number-0.0.10.tgz#4030ad8fc3fac19a9afb415c443cee6deea0e65c" + integrity sha512-RyfoP9MlnX4kccvg8qv7vYQPbLdzfS1Mnp/prGOoWhvMG3pyBwFAan34kwFb5IS/zHC3W5EmrgXCV2QWyLg/Kg== + dependencies: + "@stdlib/array" "^0.0.x" + "@stdlib/assert" "^0.0.x" + "@stdlib/constants" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/os" "^0.0.x" + "@stdlib/string" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/os@^0.0.x": + version "0.0.12" + resolved "https://registry.npmjs.org/@stdlib/os/-/os-0.0.12.tgz#08bbf013c62a7153099fa9cbac086ca1349a4677" + integrity sha512-O7lklZ/9XEzoCmYvzjPh7jrFWkbpOSHGI71ve3dkSvBy5tyiSL3TtivfKsIC+9ZxuEJZ3d3lIjc9e+yz4HVbqQ== + dependencies: + "@stdlib/assert" "^0.0.x" + "@stdlib/cli" "^0.0.x" + "@stdlib/fs" "^0.0.x" + "@stdlib/process" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/process@^0.0.x": + version "0.0.12" + resolved "https://registry.npmjs.org/@stdlib/process/-/process-0.0.12.tgz#123325079d89a32f4212f72fb694f8fe3614cf18" + integrity sha512-P0X0TMvkissBE1Wr877Avi2/AxmP7X5Toa6GatHbpJdDg6jQmN4SgPd+NZNp98YtZUyk478c8XSIzMr1krQ20g== + dependencies: + "@stdlib/assert" "^0.0.x" + "@stdlib/buffer" "^0.0.x" + "@stdlib/cli" "^0.0.x" + "@stdlib/fs" "^0.0.x" + "@stdlib/streams" "^0.0.x" + "@stdlib/string" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/random@^0.0.x": + version "0.0.12" + resolved "https://registry.npmjs.org/@stdlib/random/-/random-0.0.12.tgz#e819c3abd602ed5559ba800dba751e49c633ff85" + integrity sha512-c5yND4Ahnm9Jx0I+jsKhn4Yrz10D53ALSrIe3PG1qIz3kNFcIPnmvCuNGd+3V4ch4Mbrez55Y8z/ZC5RJh4vJQ== + dependencies: + "@stdlib/array" "^0.0.x" + "@stdlib/assert" "^0.0.x" + "@stdlib/blas" "^0.0.x" + "@stdlib/buffer" "^0.0.x" + "@stdlib/cli" "^0.0.x" + "@stdlib/constants" "^0.0.x" + "@stdlib/fs" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/process" "^0.0.x" + "@stdlib/stats" "^0.0.x" + "@stdlib/streams" "^0.0.x" + "@stdlib/symbol" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + debug "^2.6.9" + readable-stream "^2.1.4" + +"@stdlib/regexp@^0.0.x": + version "0.0.13" + resolved "https://registry.npmjs.org/@stdlib/regexp/-/regexp-0.0.13.tgz#80b98361dc7a441b47bc3fa964bb0c826759e971" + integrity sha512-3JT5ZIoq/1nXY+dY+QtkU8/m7oWDeekyItEEXMx9c/AOf0ph8fmvTUGMDNfUq0RetcznFe3b66kFz6Zt4XHviA== + dependencies: + "@stdlib/assert" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/stats@^0.0.x", "@stdlib/stats@~0.0.13": + version "0.0.13" + resolved "https://registry.npmjs.org/@stdlib/stats/-/stats-0.0.13.tgz#87c973f385379d794707c7b5196a173dba8b07e1" + integrity sha512-hm+t32dKbx/L7+7WlQ1o4NDEzV0J4QSnwFBCsIMIAO8+VPxTZ4FxyNERl4oKlS3hZZe4AVKjoOVhBDtgEWrS4g== + dependencies: + "@stdlib/array" "^0.0.x" + "@stdlib/assert" "^0.0.x" + "@stdlib/blas" "^0.0.x" + "@stdlib/constants" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/ndarray" "^0.0.x" + "@stdlib/random" "^0.0.x" + "@stdlib/string" "^0.0.x" + "@stdlib/symbol" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/streams@^0.0.x": + version "0.0.12" + resolved "https://registry.npmjs.org/@stdlib/streams/-/streams-0.0.12.tgz#07f5ceae5852590afad8e1cb7ce94174becc8739" + integrity sha512-YLUlXwjJNknHp92IkJUdvn5jEQjDckpawKhDLLCoxyh3h5V+w/8+61SH7TMTfKx5lBxKJ8vvtchZh90mIJOAjQ== + dependencies: + "@stdlib/assert" "^0.0.x" + "@stdlib/buffer" "^0.0.x" + "@stdlib/cli" "^0.0.x" + "@stdlib/fs" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + debug "^2.6.9" + readable-stream "^2.1.4" + +"@stdlib/strided@^0.0.x": + version "0.0.12" + resolved "https://registry.npmjs.org/@stdlib/strided/-/strided-0.0.12.tgz#86ac48e660cb7f64a45cf07e80cbbfe58be21ae1" + integrity sha512-1NINP+Y7IJht34iri/bYLY7TVxrip51f6Z3qWxGHUCH33kvk5H5QqV+RsmFEGbbyoGtdeHrT2O+xA+7R2e3SNg== + dependencies: + "@stdlib/assert" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/ndarray" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/string@^0.0.x": + version "0.0.14" + resolved "https://registry.npmjs.org/@stdlib/string/-/string-0.0.14.tgz#4feea4f9089ab72428eebb65fe7b93d90a7f34f4" + integrity sha512-1ClvUTPysens7GZz3WsrkFYIFs8qDmnXkyAd3zMvTXgRpy7hqrv6nNzLMQj8BHv5cBWaWPOXYd/cZ+JyMnZNQQ== + dependencies: + "@stdlib/assert" "^0.0.x" + "@stdlib/cli" "^0.0.x" + "@stdlib/constants" "^0.0.x" + "@stdlib/fs" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/nlp" "^0.0.x" + "@stdlib/process" "^0.0.x" + "@stdlib/regexp" "^0.0.x" + "@stdlib/streams" "^0.0.x" + "@stdlib/types" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/symbol@^0.0.x": + version "0.0.12" + resolved "https://registry.npmjs.org/@stdlib/symbol/-/symbol-0.0.12.tgz#b9f396b0bf269c2985bb7fe99810a8e26d7288c3" + integrity sha512-2IDhpzWVGeLHgsvIsX12RXvf78r7xBkc4QLoRUv3k7Cp61BisR1Ym1p0Tq9PbxT8fknlvLToh9n5RpmESi2d4w== + dependencies: + "@stdlib/assert" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/time@^0.0.x": + version "0.0.14" + resolved "https://registry.npmjs.org/@stdlib/time/-/time-0.0.14.tgz#ea6daa438b1d3b019b99f5091117ee4bcef55d60" + integrity sha512-1gMFCQTabMVIgww+k4g8HHHIhyy1tIlvwT8mC0BHW7Q7TzDAgobwL0bvor+lwvCb5LlDAvNQEpaRgVT99QWGeQ== + dependencies: + "@stdlib/assert" "^0.0.x" + "@stdlib/cli" "^0.0.x" + "@stdlib/constants" "^0.0.x" + "@stdlib/fs" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/string" "^0.0.x" + "@stdlib/utils" "^0.0.x" + +"@stdlib/types@^0.0.x": + version "0.0.14" + resolved "https://registry.npmjs.org/@stdlib/types/-/types-0.0.14.tgz#02d3aab7a9bfaeb86e34ab749772ea22f7b2f7e0" + integrity sha512-AP3EI9/il/xkwUazcoY+SbjtxHRrheXgSbWZdEGD+rWpEgj6n2i63hp6hTOpAB5NipE0tJwinQlDGOuQ1lCaCw== + +"@stdlib/utils@^0.0.x": + version "0.0.12" + resolved "https://registry.npmjs.org/@stdlib/utils/-/utils-0.0.12.tgz#670de5a7b253f04f11a4cba38f790e82393bcb46" + integrity sha512-+JhFpl6l7RSq/xGnbWRQ5dAL90h9ONj8MViqlb7teBZFtePZLMwoRA1wssypFcJ8SFMRWQn7lPmpYVUkGwRSOg== + dependencies: + "@stdlib/array" "^0.0.x" + "@stdlib/assert" "^0.0.x" + "@stdlib/blas" "^0.0.x" + "@stdlib/buffer" "^0.0.x" + "@stdlib/cli" "^0.0.x" + "@stdlib/constants" "^0.0.x" + "@stdlib/fs" "^0.0.x" + "@stdlib/math" "^0.0.x" + "@stdlib/os" "^0.0.x" + "@stdlib/process" "^0.0.x" + "@stdlib/random" "^0.0.x" + "@stdlib/regexp" "^0.0.x" + "@stdlib/streams" "^0.0.x" + "@stdlib/string" "^0.0.x" + "@stdlib/symbol" "^0.0.x" + "@stdlib/time" "^0.0.x" + "@stdlib/types" "^0.0.x" + debug "^2.6.9" + +"@types/estree@^1.0.0": + version "1.0.5" + resolved "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz#a6ce3e556e00fd9895dd872dd172ad0d4bd687f4" + integrity sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw== + +"@types/geojson@7946.0.4": + version "7946.0.4" + resolved "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.4.tgz#4e049756383c3f055dd8f3d24e63fb543e98eb07" + integrity sha512-MHmwBtCb7OCv1DSivz2UNJXPGU/1btAWRKlqJ2saEhVJkpkvqHMMaOpKg0v4sAbDWSQekHGvPVMM8nQ+Jen03Q== + +"@types/prop-types@*": + version "15.7.12" + resolved "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz#12bb1e2be27293c1406acb6af1c3f3a1481d98c6" + integrity sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q== + +"@types/react@^18.0.26": + version "18.3.5" + resolved "https://registry.npmjs.org/@types/react/-/react-18.3.5.tgz#5f524c2ad2089c0ff372bbdabc77ca2c4dbadf8f" + integrity sha512-WeqMfGJLGuLCqHGYRGHxnKrXcTitc6L/nBUWfWPcTarG3t9PsquqUMuVeXZeca+mglY4Vo5GZjCi0A3Or2lnxA== + dependencies: + "@types/prop-types" "*" + csstype "^3.0.2" + +"@vscode/debugprotocol@^1.51.0": + version "1.67.0" + resolved "https://registry.npmjs.org/@vscode/debugprotocol/-/debugprotocol-1.67.0.tgz#cbeef6f9e8e4b5e9a30468faa6f42c96e4d42040" + integrity sha512-vTn5JwZ+LQy2QqT/wUD8Rlrb+7eLo5fsiKIxD5i0BZIuvdRbxTTfGU7+47PsorMrfBzozngIrocKCKS3OVnYyw== + +"@yarnpkg/lockfile@^1.1.0": + version "1.1.0" + resolved "https://registry.npmjs.org/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz#e77a97fbd345b76d83245edcd17d393b1b41fb31" + integrity sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ== + +ajv@^8.12.0: + version "8.17.1" + resolved "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz#37d9a5c776af6bc92d7f4f9510eba4c0a60d11a6" + integrity sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g== + dependencies: + fast-deep-equal "^3.1.3" + fast-uri "^3.0.1" + json-schema-traverse "^1.0.0" + require-from-string "^2.0.2" + +ansi-regex@^5.0.1: + version "5.0.1" + resolved "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" + integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== + +ansi-styles@^4.0.0: + version "4.3.0" + resolved "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" + integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== + dependencies: + color-convert "^2.0.1" + +cliui@^8.0.1: + version "8.0.1" + resolved "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa" + integrity sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.1" + wrap-ansi "^7.0.0" + +color-convert@^2.0.1: + version "2.0.1" + resolved "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" + integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== + dependencies: + color-name "~1.1.4" + +color-name@~1.1.4: + version "1.1.4" + resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== + +commander@2: + version "2.20.3" + resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" + integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== + +commander@7: + version "7.2.0" + resolved "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz#a36cb57d0b501ce108e4d20559a150a391d97ab7" + integrity sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw== + +commander@^10.0.1: + version "10.0.1" + resolved "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz#881ee46b4f77d1c1dccc5823433aa39b022cbe06" + integrity sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug== + +compute-gcd@^1.2.1: + version "1.2.1" + resolved "https://registry.npmjs.org/compute-gcd/-/compute-gcd-1.2.1.tgz#34d639f3825625e1357ce81f0e456a6249d8c77f" + integrity sha512-TwMbxBNz0l71+8Sc4czv13h4kEqnchV9igQZBi6QUaz09dnz13juGnnaWWJTRsP3brxOoxeB4SA2WELLw1hCtg== + dependencies: + validate.io-array "^1.0.3" + validate.io-function "^1.0.2" + validate.io-integer-array "^1.0.0" + +compute-lcm@^1.1.2: + version "1.1.2" + resolved "https://registry.npmjs.org/compute-lcm/-/compute-lcm-1.1.2.tgz#9107c66b9dca28cefb22b4ab4545caac4034af23" + integrity sha512-OFNPdQAXnQhDSKioX8/XYT6sdUlXwpeMjfd6ApxMJfyZ4GxmLR1xvMERctlYhlHwIiz6CSpBc2+qYKjHGZw4TQ== + dependencies: + compute-gcd "^1.2.1" + validate.io-array "^1.0.3" + validate.io-function "^1.0.2" + validate.io-integer-array "^1.0.0" + +core-util-is@~1.0.0: + version "1.0.3" + resolved "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz#a6042d3634c2b27e9328f837b965fac83808db85" + integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ== + +crelt@^1.0.5: + version "1.0.6" + resolved "https://registry.npmjs.org/crelt/-/crelt-1.0.6.tgz#7cc898ea74e190fb6ef9dae57f8f81cf7302df72" + integrity sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g== + +csstype@3.0.10: + version "3.0.10" + resolved "https://registry.npmjs.org/csstype/-/csstype-3.0.10.tgz#2ad3a7bed70f35b965707c092e5f30b327c290e5" + integrity sha512-2u44ZG2OcNUO9HDp/Jl8C07x6pU/eTR3ncV91SiK3dhG9TWvRVsCoJw14Ckx5DgWkzGA3waZWO3d7pgqpUI/XA== + +csstype@^3.0.2: + version "3.1.3" + resolved "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz#d80ff294d114fb0e6ac500fbf85b60137d7eff81" + integrity sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw== + +"d3-array@1 - 3", "d3-array@2 - 3", "d3-array@2.10.0 - 3", "d3-array@2.5.0 - 3", d3-array@3.2.4, d3-array@^3.2.2: + version "3.2.4" + resolved "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz#15fec33b237f97ac5d7c986dc77da273a8ed0bb5" + integrity sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg== + dependencies: + internmap "1 - 2" + +"d3-color@1 - 3", d3-color@^3.1.0: + version "3.1.0" + resolved "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz#395b2833dfac71507f12ac2f7af23bf819de24e2" + integrity sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA== + +d3-delaunay@^6.0.2: + version "6.0.4" + resolved "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz#98169038733a0a5babbeda55054f795bb9e4a58b" + integrity sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A== + dependencies: + delaunator "5" + +"d3-dispatch@1 - 3": + version "3.0.1" + resolved "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz#5fc75284e9c2375c36c839411a0cf550cbfc4d5e" + integrity sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg== + +d3-dsv@^3.0.1: + version "3.0.1" + resolved "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz#c63af978f4d6a0d084a52a673922be2160789b73" + integrity sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q== + dependencies: + commander "7" + iconv-lite "0.6" + rw "1" + +d3-force@^3.0.0: + version "3.0.0" + resolved "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz#3e2ba1a61e70888fe3d9194e30d6d14eece155c4" + integrity sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg== + dependencies: + d3-dispatch "1 - 3" + d3-quadtree "1 - 3" + d3-timer "1 - 3" + +"d3-format@1 - 3", d3-format@^3.1.0: + version "3.1.0" + resolved "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz#9260e23a28ea5cb109e93b21a06e24e2ebd55641" + integrity sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA== + +d3-geo-projection@^4.0.0: + version "4.0.0" + resolved "https://registry.npmjs.org/d3-geo-projection/-/d3-geo-projection-4.0.0.tgz#dc229e5ead78d31869a4e87cf1f45bd2716c48ca" + integrity sha512-p0bK60CEzph1iqmnxut7d/1kyTmm3UWtPlwdkM31AU+LW+BXazd5zJdoCn7VFxNCHXRngPHRnsNn5uGjLRGndg== + dependencies: + commander "7" + d3-array "1 - 3" + d3-geo "1.12.0 - 3" + +"d3-geo@1.12.0 - 3", d3-geo@^3.1.0: + version "3.1.1" + resolved "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.1.tgz#6027cf51246f9b2ebd64f99e01dc7c3364033a4d" + integrity sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q== + dependencies: + d3-array "2.5.0 - 3" + +d3-hierarchy@^3.1.2: + version "3.1.2" + resolved "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz#b01cd42c1eed3d46db77a5966cf726f8c09160c6" + integrity sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA== + +"d3-interpolate@1 - 3", "d3-interpolate@1.2.0 - 3", d3-interpolate@^3.0.1: + version "3.0.1" + resolved "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz#3c47aa5b32c5b3dfb56ef3fd4342078a632b400d" + integrity sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g== + dependencies: + d3-color "1 - 3" + +d3-path@^3.1.0: + version "3.1.0" + resolved "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz#22df939032fb5a71ae8b1800d61ddb7851c42526" + integrity sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ== + +"d3-quadtree@1 - 3": + version "3.0.1" + resolved "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz#6dca3e8be2b393c9a9d514dabbd80a92deef1a4f" + integrity sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw== + +d3-scale-chromatic@^3.1.0: + version "3.1.0" + resolved "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz#34c39da298b23c20e02f1a4b239bd0f22e7f1314" + integrity sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ== + dependencies: + d3-color "1 - 3" + d3-interpolate "1 - 3" + +d3-scale@^4.0.2: + version "4.0.2" + resolved "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz#82b38e8e8ff7080764f8dcec77bd4be393689396" + integrity sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ== + dependencies: + d3-array "2.10.0 - 3" + d3-format "1 - 3" + d3-interpolate "1.2.0 - 3" + d3-time "2.1.1 - 3" + d3-time-format "2 - 4" + +d3-shape@^3.2.0: + version "3.2.0" + resolved "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz#a1a839cbd9ba45f28674c69d7f855bcf91dfc6a5" + integrity sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA== + dependencies: + d3-path "^3.1.0" + +"d3-time-format@2 - 4", d3-time-format@^4.1.0: + version "4.1.0" + resolved "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz#7ab5257a5041d11ecb4fe70a5c7d16a195bb408a" + integrity sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg== + dependencies: + d3-time "1 - 3" + +"d3-time@1 - 3", "d3-time@2.1.1 - 3", d3-time@^3.1.0: + version "3.1.0" + resolved "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz#9310db56e992e3c0175e1ef385e545e48a9bb5c7" + integrity sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q== + dependencies: + d3-array "2 - 3" + +"d3-timer@1 - 3", d3-timer@^3.0.1: + version "3.0.1" + resolved "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz#6284d2a2708285b1abb7e201eda4380af35e63b0" + integrity sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA== + +debug@^2.6.9: + version "2.6.9" + resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== + dependencies: + ms "2.0.0" + +deepmerge@^4.2.2: + version "4.3.1" + resolved "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz#44b5f2147cd3b00d4b56137685966f26fd25dd4a" + integrity sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A== + +delaunator@5: + version "5.0.1" + resolved "https://registry.npmjs.org/delaunator/-/delaunator-5.0.1.tgz#39032b08053923e924d6094fe2cde1a99cc51278" + integrity sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw== + dependencies: + robust-predicates "^3.0.2" + +dom-serializer@^2.0.0: + version "2.0.0" + resolved "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz#e41b802e1eedf9f6cae183ce5e622d789d7d8e53" + integrity sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg== + dependencies: + domelementtype "^2.3.0" + domhandler "^5.0.2" + entities "^4.2.0" + +domelementtype@^2.3.0: + version "2.3.0" + resolved "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz#5c45e8e869952626331d7aab326d01daf65d589d" + integrity sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw== + +domhandler@^5.0.2, domhandler@^5.0.3: + version "5.0.3" + resolved "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz#cc385f7f751f1d1fc650c21374804254538c7d31" + integrity sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w== + dependencies: + domelementtype "^2.3.0" + +domutils@^3.0.1: + version "3.1.0" + resolved "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz#c47f551278d3dc4b0b1ab8cbb42d751a6f0d824e" + integrity sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA== + dependencies: + dom-serializer "^2.0.0" + domelementtype "^2.3.0" + domhandler "^5.0.3" + +emoji-regex@^8.0.0: + version "8.0.0" + resolved "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" + integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== + +entities@^4.2.0, entities@^4.4.0: + version "4.5.0" + resolved "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48" + integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw== + +escalade@^3.1.1: + version "3.2.0" + resolved "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz#011a3f69856ba189dffa7dc8fcce99d2a87903e5" + integrity sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA== + +escape-string-regexp@^4.0.0: + version "4.0.0" + resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" + integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== + +exenv-es6@^1.1.1: + version "1.1.1" + resolved "https://registry.npmjs.org/exenv-es6/-/exenv-es6-1.1.1.tgz#80b7a8c5af24d53331f755bac07e84abb1f6de67" + integrity sha512-vlVu3N8d6yEMpMsEm+7sUBAI81aqYYuEvfK0jNqmdb/OPXzzH7QWDDnVjMvDSY47JdHEqx/dfC/q8WkfoTmpGQ== + +fast-deep-equal@^3.1.3: + version "3.1.3" + resolved "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== + +fast-equals@^5.0.1: + version "5.0.1" + resolved "https://registry.npmjs.org/fast-equals/-/fast-equals-5.0.1.tgz#a4eefe3c5d1c0d021aeed0bc10ba5e0c12ee405d" + integrity sha512-WF1Wi8PwwSY7/6Kx0vKXtw8RwuSGoM1bvDaJbu7MxDlR1vovZjIAKrnzyrThgAjm6JDTu0fVgWXDlMGspodfoQ== + +fast-uri@^3.0.1: + version "3.0.1" + resolved "https://registry.npmjs.org/fast-uri/-/fast-uri-3.0.1.tgz#cddd2eecfc83a71c1be2cc2ef2061331be8a7134" + integrity sha512-MWipKbbYiYI0UC7cl8m/i/IWTqfC8YXsqjzybjddLsFjStroQzsHXkc73JutMvBiXmOvapk+axIl79ig5t55Bw== + +free-style@3.1.0: + version "3.1.0" + resolved "https://registry.npmjs.org/free-style/-/free-style-3.1.0.tgz#4e2996029534e6b1731611d843437b9e2f473f08" + integrity sha512-vJujYSIyT30iDoaoeigNAxX4yB1RUrh+N2ZMhIElMr3BvCuGXOw7XNJMEEJkDUeamK2Rnb/IKFGKRKlTWIGRWA== + +fs-extra@^10.1.0: + version "10.1.0" + resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz#02873cfbc4084dde127eaa5f9905eef2325d1abf" + integrity sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ== + dependencies: + graceful-fs "^4.2.0" + jsonfile "^6.0.1" + universalify "^2.0.0" + +fsevents@2.3.2: + version "2.3.2" + resolved "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" + integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== + +get-caller-file@^2.0.5: + version "2.0.5" + resolved "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" + integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== + +graceful-fs@^4.1.6, graceful-fs@^4.2.0: + version "4.2.11" + resolved "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" + integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== + +htmlparser2@^8.0.0: + version "8.0.2" + resolved "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz#f002151705b383e62433b5cf466f5b716edaec21" + integrity sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA== + dependencies: + domelementtype "^2.3.0" + domhandler "^5.0.3" + domutils "^3.0.1" + entities "^4.4.0" + +iconv-lite@0.6: + version "0.6.3" + resolved "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501" + integrity sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw== + dependencies: + safer-buffer ">= 2.1.2 < 3.0.0" + +inherits@2.0.3: + version "2.0.3" + resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw== + +inherits@~2.0.3: + version "2.0.4" + resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" + integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== + +"internmap@1 - 2": + version "2.0.3" + resolved "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz#6685f23755e43c524e251d29cbc97248e3061009" + integrity sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg== + +is-fullwidth-code-point@^3.0.0: + version "3.0.0" + resolved "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" + integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== + +is-plain-object@^5.0.0: + version "5.0.0" + resolved "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz#4427f50ab3429e9025ea7d52e9043a9ef4159344" + integrity sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q== + +isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + integrity sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ== + +isomorphic.js@^0.2.4: + version "0.2.5" + resolved "https://registry.npmjs.org/isomorphic.js/-/isomorphic.js-0.2.5.tgz#13eecf36f2dba53e85d355e11bf9d4208c6f7f88" + integrity sha512-PIeMbHqMt4DnUP3MA/Flc0HElYjMXArsw1qwJZcm9sqR8mq3l8NYizFMty0pWwE/tzIGH3EKK5+jes5mAr85yw== + +"js-tokens@^3.0.0 || ^4.0.0": + version "4.0.0" + resolved "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +json-schema-compare@^0.2.2: + version "0.2.2" + resolved "https://registry.npmjs.org/json-schema-compare/-/json-schema-compare-0.2.2.tgz#dd601508335a90c7f4cfadb6b2e397225c908e56" + integrity sha512-c4WYmDKyJXhs7WWvAWm3uIYnfyWFoIp+JEoX34rctVvEkMYCPGhXtvmFFXiffBbxfZsvQ0RNnV5H7GvDF5HCqQ== + dependencies: + lodash "^4.17.4" + +json-schema-merge-allof@^0.8.1: + version "0.8.1" + resolved "https://registry.npmjs.org/json-schema-merge-allof/-/json-schema-merge-allof-0.8.1.tgz#ed2828cdd958616ff74f932830a26291789eaaf2" + integrity sha512-CTUKmIlPJbsWfzRRnOXz+0MjIqvnleIXwFTzz+t9T86HnYX/Rozria6ZVGLktAU9e+NygNljveP+yxqtQp/Q4w== + dependencies: + compute-lcm "^1.1.2" + json-schema-compare "^0.2.2" + lodash "^4.17.20" + +json-schema-traverse@^1.0.0: + version "1.0.0" + resolved "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz#ae7bcb3656ab77a73ba5c49bf654f38e6b6860e2" + integrity sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug== + +json-stringify-pretty-compact@~3.0.0: + version "3.0.0" + resolved "https://registry.npmjs.org/json-stringify-pretty-compact/-/json-stringify-pretty-compact-3.0.0.tgz#f71ef9d82ef16483a407869556588e91b681d9ab" + integrity sha512-Rc2suX5meI0S3bfdZuA7JMFBGkJ875ApfVyq2WHELjBiiG22My/l7/8zPpH/CfFVQHuVLd8NLR0nv6vi0BYYKA== + +json5@^2.2.3: + version "2.2.3" + resolved "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283" + integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== + +jsonfile@^6.0.1: + version "6.1.0" + resolved "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz#bc55b2634793c679ec6403094eb13698a6ec0aae" + integrity sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ== + dependencies: + universalify "^2.0.0" + optionalDependencies: + graceful-fs "^4.1.6" + +jsonpointer@^5.0.1: + version "5.0.1" + resolved "https://registry.npmjs.org/jsonpointer/-/jsonpointer-5.0.1.tgz#2110e0af0900fd37467b5907ecd13a7884a1b559" + integrity sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ== + +lib0@^0.2.85, lib0@^0.2.86: + version "0.2.97" + resolved "https://registry.npmjs.org/lib0/-/lib0-0.2.97.tgz#a68d7c88577ac1910cdbe5204bac070f07c8e0b4" + integrity sha512-Q4d1ekgvufi9FiHkkL46AhecfNjznSL9MRNoJRQ76gBHS9OqU2ArfQK0FvBpuxgWeJeNI0LVgAYMIpsGeX4gYg== + dependencies: + isomorphic.js "^0.2.4" + +lodash-es@^4.17.21: + version "4.17.21" + resolved "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz#43e626c46e6591b7750beb2b50117390c609e3ee" + integrity sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw== + +lodash.escape@^4.0.1: + version "4.0.1" + resolved "https://registry.npmjs.org/lodash.escape/-/lodash.escape-4.0.1.tgz#c9044690c21e04294beaa517712fded1fa88de98" + integrity sha512-nXEOnb/jK9g0DYMr1/Xvq6l5xMD7GDG55+GSYIYmS0G4tBk/hURD4JR9WCavs04t33WmJx9kCyp9vJ+mr4BOUw== + +lodash.mergewith@^4.6.1: + version "4.6.2" + resolved "https://registry.npmjs.org/lodash.mergewith/-/lodash.mergewith-4.6.2.tgz#617121f89ac55f59047c7aec1ccd6654c6590f55" + integrity sha512-GK3g5RPZWTRSeLSpgP8Xhra+pnjBC56q9FZYe1d5RN3TJ35dbkGy3YqBSMbyCrlbi+CM9Z3Jk5yTL7RCsqboyQ== + +lodash@^4.17.20, lodash@^4.17.21, lodash@^4.17.4: + version "4.17.21" + resolved "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + +loose-envify@^1.1.0, loose-envify@^1.4.0: + version "1.4.0" + resolved "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" + integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== + dependencies: + js-tokens "^3.0.0 || ^4.0.0" + +markdown-to-jsx@^7.4.1: + version "7.5.0" + resolved "https://registry.npmjs.org/markdown-to-jsx/-/markdown-to-jsx-7.5.0.tgz#42ece0c71e842560a7d8bd9f81e7a34515c72150" + integrity sha512-RrBNcMHiFPcz/iqIj0n3wclzHXjwS7mzjBNWecKKVhNTIxQepIix6Il/wZCn2Cg5Y1ow2Qi84+eJrryFRWBEWw== + +minimist@^1.2.0, minimist@~1.2.0: + version "1.2.8" + resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" + integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A== + +nanoid@^3.3.7: + version "3.3.8" + resolved "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf" + integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== + +node-fetch@^2.6.7: + version "2.7.0" + resolved "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d" + integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A== + dependencies: + whatwg-url "^5.0.0" + +object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== + +parse-srcset@^1.0.2: + version "1.0.2" + resolved "https://registry.npmjs.org/parse-srcset/-/parse-srcset-1.0.2.tgz#f2bd221f6cc970a938d88556abc589caaaa2bde1" + integrity sha512-/2qh0lav6CmI15FzA3i/2Bzk2zCgQhGMkvhOhKNcBVQ1ldgpbfiNTVslmooUmWJcADi1f1kIeynbDRVzNlfR6Q== + +path-browserify@^1.0.0: + version "1.0.1" + resolved "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz#d98454a9c3753d5790860f16f68867b9e46be1fd" + integrity sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g== + +path@~0.12.7: + version "0.12.7" + resolved "https://registry.npmjs.org/path/-/path-0.12.7.tgz#d4dc2a506c4ce2197eb481ebfcd5b36c0140b10f" + integrity sha512-aXXC6s+1w7otVF9UletFkFcDsJeO7lSZBPUQhtb5O0xJe8LtYhj/GxldoL09bBj9+ZmE2hNoHqQSFMN5fikh4Q== + dependencies: + process "^0.11.1" + util "^0.10.3" + +picocolors@^1.0.1: + version "1.1.0" + resolved "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz#5358b76a78cde483ba5cef6a9dc9671440b27d59" + integrity sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw== + +playwright-core@1.49.0: + version "1.49.0" + resolved "https://registry.npmjs.org/playwright-core/-/playwright-core-1.49.0.tgz#8e69ffed3f41855b854982f3632f2922c890afcb" + integrity sha512-R+3KKTQF3npy5GTiKH/T+kdhoJfJojjHESR1YEWhYuEKRVfVaxH3+4+GvXE5xyCngCxhxnykk0Vlah9v8fs3jA== + +playwright@1.49.0: + version "1.49.0" + resolved "https://registry.npmjs.org/playwright/-/playwright-1.49.0.tgz#df6b9e05423377a99658202844a294a8afb95d0a" + integrity sha512-eKpmys0UFDnfNb3vfsf8Vx2LEOtflgRebl0Im2eQQnYMA4Aqd+Zw8bEOB+7ZKvN76901mRnqdsiOGKxzVTbi7A== + dependencies: + playwright-core "1.49.0" + optionalDependencies: + fsevents "2.3.2" + +postcss@^8.3.11: + version "8.4.45" + resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.45.tgz#538d13d89a16ef71edbf75d895284ae06b79e603" + integrity sha512-7KTLTdzdZZYscUc65XmjFiB73vBhBfbPztCYdUNvlaso9PrzjzcmjqBPR0lNGkcVlcO4BjiO5rK/qNz+XAen1Q== + dependencies: + nanoid "^3.3.7" + picocolors "^1.0.1" + source-map-js "^1.2.0" + +process-nextick-args@~2.0.0: + version "2.0.1" + resolved "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" + integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== + +process@^0.11.1: + version "0.11.10" + resolved "https://registry.npmjs.org/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" + integrity sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A== + +prop-types@^15.8.1: + version "15.8.1" + resolved "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz#67d87bf1a694f48435cf332c24af10214a3140b5" + integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg== + dependencies: + loose-envify "^1.4.0" + object-assign "^4.1.1" + react-is "^16.13.1" + +querystringify@^2.1.1: + version "2.2.0" + resolved "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz#3345941b4153cb9d082d8eee4cda2016a9aef7f6" + integrity sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ== + +react-dom@^18.2.0: + version "18.3.1" + resolved "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz#c2265d79511b57d479b3dd3fdfa51536494c5cb4" + integrity sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw== + dependencies: + loose-envify "^1.1.0" + scheduler "^0.23.2" + +react-is@^16.13.1: + version "16.13.1" + resolved "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" + integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== + +react-is@^18.2.0: + version "18.3.1" + resolved "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz#e83557dc12eae63a99e003a46388b1dcbb44db7e" + integrity sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg== + +"react@>=17.0.0 <19.0.0", react@^18.2.0: + version "18.3.1" + resolved "https://registry.npmjs.org/react/-/react-18.3.1.tgz#49ab892009c53933625bd16b2533fc754cab2891" + integrity sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ== + dependencies: + loose-envify "^1.1.0" + +readable-stream@^2.1.4: + version "2.3.8" + resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz#91125e8042bba1b9887f49345f6277027ce8be9b" + integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA== + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + +regexp-match-indices@^1.0.2: + version "1.0.2" + resolved "https://registry.npmjs.org/regexp-match-indices/-/regexp-match-indices-1.0.2.tgz#cf20054a6f7d5b3e116a701a7b00f82889d10da6" + integrity sha512-DwZuAkt8NF5mKwGGER1EGh2PRqyvhRhhLviH+R8y8dIuaQROlUfXjt4s9ZTXstIsSkptf06BSvwcEmmfheJJWQ== + dependencies: + regexp-tree "^0.1.11" + +regexp-tree@^0.1.11: + version "0.1.27" + resolved "https://registry.npmjs.org/regexp-tree/-/regexp-tree-0.1.27.tgz#2198f0ef54518ffa743fe74d983b56ffd631b6cd" + integrity sha512-iETxpjK6YoRWJG5o6hXLwvjYAoW+FEZn9os0PD/b6AP6xQwsa/Y7lCVgIixBbUPMfhu+i2LtdeAqVTgGlQarfA== + +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + integrity sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q== + +require-from-string@^2.0.2: + version "2.0.2" + resolved "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" + integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw== + +requires-port@^1.0.0: + version "1.0.0" + resolved "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" + integrity sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ== + +robust-predicates@^3.0.2: + version "3.0.2" + resolved "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz#d5b28528c4824d20fc48df1928d41d9efa1ad771" + integrity sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg== + +rw@1: + version "1.3.3" + resolved "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz#3f862dfa91ab766b14885ef4d01124bfda074fb4" + integrity sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ== + +safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== + +"safer-buffer@>= 2.1.2 < 3.0.0": + version "2.1.2" + resolved "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +sanitize-html@~2.12.1: + version "2.12.1" + resolved "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.12.1.tgz#280a0f5c37305222921f6f9d605be1f6558914c7" + integrity sha512-Plh+JAn0UVDpBRP/xEjsk+xDCoOvMBwQUf/K+/cBAVuTbtX8bj2VB7S1sL1dssVpykqp0/KPSesHrqXtokVBpA== + dependencies: + deepmerge "^4.2.2" + escape-string-regexp "^4.0.0" + htmlparser2 "^8.0.0" + is-plain-object "^5.0.0" + parse-srcset "^1.0.2" + postcss "^8.3.11" + +scheduler@^0.23.2: + version "0.23.2" + resolved "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz#414ba64a3b282892e944cf2108ecc078d115cdc3" + integrity sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ== + dependencies: + loose-envify "^1.1.0" + +semver@^7.5.0: + version "7.6.3" + resolved "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" + integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== + +source-map-js@^1.2.0: + version "1.2.1" + resolved "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" + integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== + +string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" + integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== + dependencies: + safe-buffer "~5.1.0" + +strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + +style-mod@^4.0.0, style-mod@^4.1.0: + version "4.1.2" + resolved "https://registry.npmjs.org/style-mod/-/style-mod-4.1.2.tgz#ca238a1ad4786520f7515a8539d5a63691d7bf67" + integrity sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw== + +systeminformation@^5.8.6: + version "5.23.5" + resolved "https://registry.npmjs.org/systeminformation/-/systeminformation-5.23.5.tgz#bf7544be42c42f7d14a81b02aa0365e9ca1f5266" + integrity sha512-PEpJwhRYxZgBCAlWZhWIgfMTjXLqfcaZ1pJsJn9snWNfBW/Z1YQg1mbIUSWrEV3ErAHF7l/OoVLQeaZDlPzkpA== + +tabbable@^5.2.0: + version "5.3.3" + resolved "https://registry.npmjs.org/tabbable/-/tabbable-5.3.3.tgz#aac0ff88c73b22d6c3c5a50b1586310006b47fbf" + integrity sha512-QD9qKY3StfbZqWOPLp0++pOrAVb/HbUi5xCc8cUo4XjP19808oaMiDzn0leBY5mCespIBM0CIZePzZjgzR83kA== + +topojson-client@^3.1.0: + version "3.1.0" + resolved "https://registry.npmjs.org/topojson-client/-/topojson-client-3.1.0.tgz#22e8b1ed08a2b922feeb4af6f53b6ef09a467b99" + integrity sha512-605uxS6bcYxGXw9qi62XyrV6Q3xwbndjachmNxu8HWTtVPxZfEJN9fd/SZS1Q54Sn2y0TMyMxFj/cJINqGHrKw== + dependencies: + commander "2" + +tr46@~0.0.3: + version "0.0.3" + resolved "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" + integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw== + +tslib@^1.13.0: + version "1.14.1" + resolved "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" + integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== + +tslib@^2.5.0: + version "2.7.0" + resolved "https://registry.npmjs.org/tslib/-/tslib-2.7.0.tgz#d9b40c5c40ab59e8738f297df3087bf1a2690c01" + integrity sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA== + +tslib@~2.6.3: + version "2.6.3" + resolved "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz#0438f810ad7a9edcde7a241c3d80db693c8cbfe0" + integrity sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ== + +typestyle@^2.0.4: + version "2.4.0" + resolved "https://registry.npmjs.org/typestyle/-/typestyle-2.4.0.tgz#df5bae6ff15093f5ce51f0caac5ef79428f64e78" + integrity sha512-/d1BL6Qi+YlMLEydnUEB8KL/CAjAN8cyt3/UyGnOyBrWf7bLGcR/6yhmsaUstO2IcYwZfagjE7AIzuI2vUW9mg== + dependencies: + csstype "3.0.10" + free-style "3.1.0" + +universalify@^2.0.0: + version "2.0.1" + resolved "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz#168efc2180964e6386d061e094df61afe239b18d" + integrity sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw== + +url-parse@~1.5.4: + version "1.5.10" + resolved "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz#9d3c2f736c1d75dd3bd2be507dcc111f1e2ea9c1" + integrity sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ== + dependencies: + querystringify "^2.1.1" + requires-port "^1.0.0" + +util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw== + +util@^0.10.3: + version "0.10.4" + resolved "https://registry.npmjs.org/util/-/util-0.10.4.tgz#3aa0125bfe668a4672de58857d3ace27ecb76901" + integrity sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A== + dependencies: + inherits "2.0.3" + +validate.io-array@^1.0.3: + version "1.0.6" + resolved "https://registry.npmjs.org/validate.io-array/-/validate.io-array-1.0.6.tgz#5b5a2cafd8f8b85abb2f886ba153f2d93a27774d" + integrity sha512-DeOy7CnPEziggrOO5CZhVKJw6S3Yi7e9e65R1Nl/RTN1vTQKnzjfvks0/8kQ40FP/dsjRAOd4hxmJ7uLa6vxkg== + +validate.io-function@^1.0.2: + version "1.0.2" + resolved "https://registry.npmjs.org/validate.io-function/-/validate.io-function-1.0.2.tgz#343a19802ed3b1968269c780e558e93411c0bad7" + integrity sha512-LlFybRJEriSuBnUhQyG5bwglhh50EpTL2ul23MPIuR1odjO7XaMLFV8vHGwp7AZciFxtYOeiSCT5st+XSPONiQ== + +validate.io-integer-array@^1.0.0: + version "1.0.0" + resolved "https://registry.npmjs.org/validate.io-integer-array/-/validate.io-integer-array-1.0.0.tgz#2cabde033293a6bcbe063feafe91eaf46b13a089" + integrity sha512-mTrMk/1ytQHtCY0oNO3dztafHYyGU88KL+jRxWuzfOmQb+4qqnWmI+gykvGp8usKZOM0H7keJHEbRaFiYA0VrA== + dependencies: + validate.io-array "^1.0.3" + validate.io-integer "^1.0.4" + +validate.io-integer@^1.0.4: + version "1.0.5" + resolved "https://registry.npmjs.org/validate.io-integer/-/validate.io-integer-1.0.5.tgz#168496480b95be2247ec443f2233de4f89878068" + integrity sha512-22izsYSLojN/P6bppBqhgUDjCkr5RY2jd+N2a3DCAUey8ydvrZ/OkGvFPR7qfOpwR2LC5p4Ngzxz36g5Vgr/hQ== + dependencies: + validate.io-number "^1.0.3" + +validate.io-number@^1.0.3: + version "1.0.3" + resolved "https://registry.npmjs.org/validate.io-number/-/validate.io-number-1.0.3.tgz#f63ffeda248bf28a67a8d48e0e3b461a1665baf8" + integrity sha512-kRAyotcbNaSYoDnXvb4MHg/0a1egJdLwS6oJ38TJY7aw9n93Fl/3blIXdyYvPOp55CNxywooG/3BcrwNrBpcSg== + +vega-canvas@^1.2.7: + version "1.2.7" + resolved "https://registry.npmjs.org/vega-canvas/-/vega-canvas-1.2.7.tgz#cf62169518f5dcd91d24ad352998c2248f8974fb" + integrity sha512-OkJ9CACVcN9R5Pi9uF6MZBF06pO6qFpDYHWSKBJsdHP5o724KrsgR6UvbnXFH82FdsiTOff/HqjuaG8C7FL+9Q== + +vega-crossfilter@~4.1.2: + version "4.1.2" + resolved "https://registry.npmjs.org/vega-crossfilter/-/vega-crossfilter-4.1.2.tgz#810281c279b3592310f12814bc61206dd42ca61d" + integrity sha512-J7KVEXkpfRJBfRvwLxn5vNCzQCNkrnzmDvkvwhuiwT4gPm5sk7MK5TuUP8GCl/iKYw+kWeVXEtrVHwWtug+bcQ== + dependencies: + d3-array "^3.2.2" + vega-dataflow "^5.7.6" + vega-util "^1.17.2" + +vega-dataflow@^5.7.6, vega-dataflow@~5.7.6: + version "5.7.6" + resolved "https://registry.npmjs.org/vega-dataflow/-/vega-dataflow-5.7.6.tgz#21dfad9120cb18d9aeaed578658670839d1adc95" + integrity sha512-9Md8+5iUC1MVKPKDyZ7pCEHk6I9am+DgaMzZqo/27O/KI4f23/WQXPyuI8jbNmc/mkm340P0TKREmzL5M7+2Dg== + dependencies: + vega-format "^1.1.2" + vega-loader "^4.5.2" + vega-util "^1.17.2" + +vega-encode@~4.10.1: + version "4.10.1" + resolved "https://registry.npmjs.org/vega-encode/-/vega-encode-4.10.1.tgz#1656e20396db99c414f495704ef3d9cff99631df" + integrity sha512-d25nVKZDrg109rC65M8uxE+7iUrTxktaqgK4fU3XZBgpWlh1K4UbU5nDag7kiHVVN4tKqwgd+synEotra9TiVQ== + dependencies: + d3-array "^3.2.2" + d3-interpolate "^3.0.1" + vega-dataflow "^5.7.6" + vega-scale "^7.4.1" + vega-util "^1.17.2" + +vega-event-selector@^3.0.1, vega-event-selector@~3.0.1: + version "3.0.1" + resolved "https://registry.npmjs.org/vega-event-selector/-/vega-event-selector-3.0.1.tgz#b99e92147b338158f8079d81b28b2e7199c2e259" + integrity sha512-K5zd7s5tjr1LiOOkjGpcVls8GsH/f2CWCrWcpKy74gTCp+llCdwz0Enqo013ZlGaRNjfgD/o1caJRt3GSaec4A== + +vega-expression@^5.0.1, vega-expression@^5.1.1, vega-expression@~5.1.1: + version "5.1.1" + resolved "https://registry.npmjs.org/vega-expression/-/vega-expression-5.1.1.tgz#9b2d287a1f34d990577c9798ae68ec88453815ef" + integrity sha512-zv9L1Hm0KHE9M7mldHyz8sXbGu3KmC0Cdk7qfHkcTNS75Jpsem6jkbu6ZAwx5cNUeW91AxUQOu77r4mygq2wUQ== + dependencies: + "@types/estree" "^1.0.0" + vega-util "^1.17.2" + +vega-force@~4.2.1: + version "4.2.1" + resolved "https://registry.npmjs.org/vega-force/-/vega-force-4.2.1.tgz#bdce6ec8572867b4ff2fb7e09d2894798c5358ec" + integrity sha512-2BcuuqFr77vcCyKfcpedNFeYMxi+XEFCrlgLWNx7YV0PI8pdP5y/yPkzyuE9Tb894+KkRAvfQHZRAshcnFNcMw== + dependencies: + d3-force "^3.0.0" + vega-dataflow "^5.7.6" + vega-util "^1.17.2" + +vega-format@^1.1.2, vega-format@~1.1.2: + version "1.1.2" + resolved "https://registry.npmjs.org/vega-format/-/vega-format-1.1.2.tgz#d344ba8a2680144e92127459c149a4181e9e7f84" + integrity sha512-0kUfAj0dg0U6GcEY0Kp6LiSTCZ8l8jl1qVdQyToMyKmtZg/q56qsiJQZy3WWRr1MtWkTIZL71xSJXgjwjeUaAw== + dependencies: + d3-array "^3.2.2" + d3-format "^3.1.0" + d3-time-format "^4.1.0" + vega-time "^2.1.2" + vega-util "^1.17.2" + +vega-functions@^5.15.0, vega-functions@~5.15.0: + version "5.15.0" + resolved "https://registry.npmjs.org/vega-functions/-/vega-functions-5.15.0.tgz#a7905e1dd6457efe265dbf954cbc0a5721c484b0" + integrity sha512-pCqmm5efd+3M65jrJGxEy3UGuRksmK6DnWijoSNocnxdCBxez+yqUUVX9o2pN8VxMe3648vZnR9/Vk5CXqRvIQ== + dependencies: + d3-array "^3.2.2" + d3-color "^3.1.0" + d3-geo "^3.1.0" + vega-dataflow "^5.7.6" + vega-expression "^5.1.1" + vega-scale "^7.4.1" + vega-scenegraph "^4.13.0" + vega-selections "^5.4.2" + vega-statistics "^1.9.0" + vega-time "^2.1.2" + vega-util "^1.17.2" + +vega-geo@~4.4.2: + version "4.4.2" + resolved "https://registry.npmjs.org/vega-geo/-/vega-geo-4.4.2.tgz#da4a08ee39c9488bfc4fe6493779f584dd8bb412" + integrity sha512-unuV/UxUHf6UJu6GYxMZonC3SZlMfFXYLOkgEsRSvmsMPt3+CVv8FmG88dXNRUJUrdROrJepgecqx0jOwMSnGA== + dependencies: + d3-array "^3.2.2" + d3-color "^3.1.0" + d3-geo "^3.1.0" + vega-canvas "^1.2.7" + vega-dataflow "^5.7.6" + vega-projection "^1.6.1" + vega-statistics "^1.9.0" + vega-util "^1.17.2" + +vega-hierarchy@~4.1.2: + version "4.1.2" + resolved "https://registry.npmjs.org/vega-hierarchy/-/vega-hierarchy-4.1.2.tgz#e42938c42527b392b110b1e3bf89eaa456dba1b8" + integrity sha512-m+xDtT5092YPSnV0rdTLW+AWmoCb+A54JQ66MUJwiDBpKxvfKnTiQeuiWDU2YudjUoXZN9EBOcI6QHF8H2Lu2A== + dependencies: + d3-hierarchy "^3.1.2" + vega-dataflow "^5.7.6" + vega-util "^1.17.2" + +vega-label@~1.3.0: + version "1.3.0" + resolved "https://registry.npmjs.org/vega-label/-/vega-label-1.3.0.tgz#21b3e5ef40e63f51ac987a449d183068c4961503" + integrity sha512-EfSFSCWAwVPsklM5g0gUEuohALgryuGC/SKMmsOH7dYT/bywmLBZhLVbrE+IHJAUauoGrMhYw1mqnXL/0giJBg== + dependencies: + vega-canvas "^1.2.7" + vega-dataflow "^5.7.6" + vega-scenegraph "^4.13.0" + vega-util "^1.17.2" + +vega-lite@^5.6.1: + version "5.21.0" + resolved "https://registry.npmjs.org/vega-lite/-/vega-lite-5.21.0.tgz#21ce8b905a02ba364b7b1d7ef471497ba3e12e93" + integrity sha512-hNxM9nuMqpI1vkUOhEx6ewEf23WWLmJxSFJ4TA86AW43ixJyqcLV+iSCO0NipuVTE0rlDcc2e8joSewWyOlEwA== + dependencies: + json-stringify-pretty-compact "~3.0.0" + tslib "~2.6.3" + vega-event-selector "~3.0.1" + vega-expression "~5.1.1" + vega-util "~1.17.2" + yargs "~17.7.2" + +vega-loader@^4.5.2, vega-loader@~4.5.2: + version "4.5.2" + resolved "https://registry.npmjs.org/vega-loader/-/vega-loader-4.5.2.tgz#7212f093c397b153f69f7e6cfef47817c17c5c01" + integrity sha512-ktIdGz3DRIS3XfTP9lJ6oMT5cKwC86nQkjUbXZbOtwXQFVNE2xVWBuH13GP6FKUZxg5hJCMtb5v/e/fwTvhKsQ== + dependencies: + d3-dsv "^3.0.1" + node-fetch "^2.6.7" + topojson-client "^3.1.0" + vega-format "^1.1.2" + vega-util "^1.17.2" + +vega-parser@~6.4.0: + version "6.4.0" + resolved "https://registry.npmjs.org/vega-parser/-/vega-parser-6.4.0.tgz#6a12f07f0f9178492a17842efe7e1f51a2d36bed" + integrity sha512-/hFIJs0yITxfvLIfhhcpUrcbKvu4UZYoMGmly5PSsbgo60oAsVQW8ZbX2Ji3iNFqZJh1ifoX/P0j+9wep1OISw== + dependencies: + vega-dataflow "^5.7.6" + vega-event-selector "^3.0.1" + vega-functions "^5.15.0" + vega-scale "^7.4.1" + vega-util "^1.17.2" + +vega-projection@^1.6.1, vega-projection@~1.6.1: + version "1.6.1" + resolved "https://registry.npmjs.org/vega-projection/-/vega-projection-1.6.1.tgz#da687abc60f4a93bb888385beb23e0a1000f8b57" + integrity sha512-sqfnAAHumU7MWU1tQN3b6HNgKGF3legek0uLHhjLKcDJQxEc7kwcD18txFz2ffQks6d5j+AUhBiq4GARWf0DEQ== + dependencies: + d3-geo "^3.1.0" + d3-geo-projection "^4.0.0" + vega-scale "^7.4.1" + +vega-regression@~1.3.0: + version "1.3.0" + resolved "https://registry.npmjs.org/vega-regression/-/vega-regression-1.3.0.tgz#3e68e234fa9460041fac082c6a3469c896d436a8" + integrity sha512-gxOQfmV7Ft/MYKpXDEo09WZyBuKOBqxqDRWay9KtfGq/E0Y4vbTPsWLv2cB1ToPJdKE6XSN6Re9tCIw5M/yMUg== + dependencies: + d3-array "^3.2.2" + vega-dataflow "^5.7.6" + vega-statistics "^1.9.0" + vega-util "^1.17.2" + +vega-runtime@^6.2.0, vega-runtime@~6.2.0: + version "6.2.0" + resolved "https://registry.npmjs.org/vega-runtime/-/vega-runtime-6.2.0.tgz#10f435089fff11d8e1b49cb0cbab8041731e6f06" + integrity sha512-30UXbujWjKNd5aeP+oeHuwFmzuyVYlBj4aDy9+AjfWLECu8wJt4K01vwegcaGPdCWcPLVIv4Oa9Lob4mcXn5KQ== + dependencies: + vega-dataflow "^5.7.6" + vega-util "^1.17.2" + +vega-scale@^7.4.1, vega-scale@~7.4.1: + version "7.4.1" + resolved "https://registry.npmjs.org/vega-scale/-/vega-scale-7.4.1.tgz#2dcd3e39ebb00269b03a8be86e44c7b48c67442a" + integrity sha512-dArA28DbV/M92O2QvswnzCmQ4bq9WwLKUoyhqFYWCltmDwkmvX7yhqiFLFMWPItIm7mi4Qyoygby6r4DKd1X2A== + dependencies: + d3-array "^3.2.2" + d3-interpolate "^3.0.1" + d3-scale "^4.0.2" + d3-scale-chromatic "^3.1.0" + vega-time "^2.1.2" + vega-util "^1.17.2" + +vega-scenegraph@^4.13.0, vega-scenegraph@~4.13.0: + version "4.13.0" + resolved "https://registry.npmjs.org/vega-scenegraph/-/vega-scenegraph-4.13.0.tgz#c4fa5c82773f6244a9ca8b01a44e380adf03fabd" + integrity sha512-nfl45XtuqB5CxyIZJ+bbJ+dofzosPCRlmF+eUQo+0J23NkNXsTzur+1krJDSdhcw0SOYs4sbYRoMz1cpuOM4+Q== + dependencies: + d3-path "^3.1.0" + d3-shape "^3.2.0" + vega-canvas "^1.2.7" + vega-loader "^4.5.2" + vega-scale "^7.4.1" + vega-util "^1.17.2" + +vega-selections@^5.4.2: + version "5.4.2" + resolved "https://registry.npmjs.org/vega-selections/-/vega-selections-5.4.2.tgz#cb4f41f5d4c0ee924ebf131b8dbd43e7885bcad4" + integrity sha512-99FUhYmg0jOJr2/K4TcEURmJRkuibrCDc8KBUX7qcQEITzrZ5R6a4QE+sarCvbb3hi8aA9GV2oyST6MQeA9mgQ== + dependencies: + d3-array "3.2.4" + vega-expression "^5.0.1" + vega-util "^1.17.1" + +vega-statistics@^1.7.9, vega-statistics@^1.9.0, vega-statistics@~1.9.0: + version "1.9.0" + resolved "https://registry.npmjs.org/vega-statistics/-/vega-statistics-1.9.0.tgz#7d6139cea496b22d60decfa6abd73346f70206f9" + integrity sha512-GAqS7mkatpXcMCQKWtFu1eMUKLUymjInU0O8kXshWaQrVWjPIO2lllZ1VNhdgE0qGj4oOIRRS11kzuijLshGXQ== + dependencies: + d3-array "^3.2.2" + +vega-time@^2.1.2, vega-time@~2.1.2: + version "2.1.2" + resolved "https://registry.npmjs.org/vega-time/-/vega-time-2.1.2.tgz#0c414e74780613d6d3234fb97f19b50c0ebd9f49" + integrity sha512-6rXc6JdDt8MnCRy6UzUCsa6EeFycPDmvioMddLfKw38OYCV8pRQC5nw44gyddOwXgUTJLiCtn/sp53P0iA542A== + dependencies: + d3-array "^3.2.2" + d3-time "^3.1.0" + vega-util "^1.17.2" + +vega-transforms@~4.12.0: + version "4.12.0" + resolved "https://registry.npmjs.org/vega-transforms/-/vega-transforms-4.12.0.tgz#6a69e0b67934b0c0a40a6f607fdb543bf749955e" + integrity sha512-bh/2Qbj85O70mjfLRgPKAsABArgSUP0k+GjmaY54zukIRxoGxKju+85nigeX/aR/INpEqNWif+5lL+NvmyWA5w== + dependencies: + d3-array "^3.2.2" + vega-dataflow "^5.7.6" + vega-statistics "^1.9.0" + vega-time "^2.1.2" + vega-util "^1.17.2" + +vega-typings@~1.3.1: + version "1.3.1" + resolved "https://registry.npmjs.org/vega-typings/-/vega-typings-1.3.1.tgz#025a6031505794b44d9b6e2c49d4551b8918d4ae" + integrity sha512-j9Sdgmvowz09jkMgTFGVfiv7ycuRP/TQkdHRPXIYwt3RDgPQn7inyFcJ8C8ABFt4MiMWdjOwbneF6KWW8TRXIw== + dependencies: + "@types/geojson" "7946.0.4" + vega-event-selector "^3.0.1" + vega-expression "^5.1.1" + vega-util "^1.17.2" + +vega-util@^1.17.1, vega-util@^1.17.2, vega-util@~1.17.2: + version "1.17.2" + resolved "https://registry.npmjs.org/vega-util/-/vega-util-1.17.2.tgz#f69aa09fd5d6110c19c4a0f0af9e35945b99987d" + integrity sha512-omNmGiZBdjm/jnHjZlywyYqafscDdHaELHx1q96n5UOz/FlO9JO99P4B3jZg391EFG8dqhWjQilSf2JH6F1mIw== + +vega-view-transforms@~4.6.0: + version "4.6.0" + resolved "https://registry.npmjs.org/vega-view-transforms/-/vega-view-transforms-4.6.0.tgz#829d56ca3c8116b0dded4ec0502f4ac70253de9a" + integrity sha512-z3z66aJTA3ZRo4oBY4iBXnn+A4KqBGZT/UrlKDbm+7Ec+Ip+hK2tF8Kmhp/WNcMsDZoUWFqLJgR2VgOgvJk9RA== + dependencies: + vega-dataflow "^5.7.6" + vega-scenegraph "^4.13.0" + vega-util "^1.17.2" + +vega-view@~5.13.0: + version "5.13.0" + resolved "https://registry.npmjs.org/vega-view/-/vega-view-5.13.0.tgz#8ea96da9fcdf42fe7c0e95fe6258933477524745" + integrity sha512-ZPAAQ3iYz6YrQjJoDT+0bcxJkXt9PKF5v4OO7Omw8PFhkIv++jFXeKlQTW1bBtyQ92dkdGGHv5lYY67Djqjf3A== + dependencies: + d3-array "^3.2.2" + d3-timer "^3.0.1" + vega-dataflow "^5.7.6" + vega-format "^1.1.2" + vega-functions "^5.15.0" + vega-runtime "^6.2.0" + vega-scenegraph "^4.13.0" + vega-util "^1.17.2" + +vega-voronoi@~4.2.3: + version "4.2.3" + resolved "https://registry.npmjs.org/vega-voronoi/-/vega-voronoi-4.2.3.tgz#54c4bb96b9b94c3fa0160bee24695dcb9d583fe1" + integrity sha512-aYYYM+3UGqwsOx+TkVtF1IZfguy0H7AN79dR8H0nONRIc+vhk/lbnlkgwY2nSzEu0EZ4b5wZxeGoDBEVmdDEcg== + dependencies: + d3-delaunay "^6.0.2" + vega-dataflow "^5.7.6" + vega-util "^1.17.2" + +vega-wordcloud@~4.1.5: + version "4.1.5" + resolved "https://registry.npmjs.org/vega-wordcloud/-/vega-wordcloud-4.1.5.tgz#789c9e67225c77f3f35a6fc052beec1c2bdc8b5e" + integrity sha512-p+qXU3cb9VeWzJ/HEdax0TX2mqDJcSbrCIfo2d/EalOXGkvfSLKobsmMQ8DxPbtVp0uhnpvfCGDyMJw+AzcI2A== + dependencies: + vega-canvas "^1.2.7" + vega-dataflow "^5.7.6" + vega-scale "^7.4.1" + vega-statistics "^1.9.0" + vega-util "^1.17.2" + +vega@^5.20.0: + version "5.30.0" + resolved "https://registry.npmjs.org/vega/-/vega-5.30.0.tgz#d12350c829878b481453ab28ce10855a954df06d" + integrity sha512-ZGoC8LdfEUV0LlXIuz7hup9jxuQYhSaWek2M7r9dEHAPbPrzSQvKXZ0BbsJbrarM100TGRpTVN/l1AFxCwDkWw== + dependencies: + vega-crossfilter "~4.1.2" + vega-dataflow "~5.7.6" + vega-encode "~4.10.1" + vega-event-selector "~3.0.1" + vega-expression "~5.1.1" + vega-force "~4.2.1" + vega-format "~1.1.2" + vega-functions "~5.15.0" + vega-geo "~4.4.2" + vega-hierarchy "~4.1.2" + vega-label "~1.3.0" + vega-loader "~4.5.2" + vega-parser "~6.4.0" + vega-projection "~1.6.1" + vega-regression "~1.3.0" + vega-runtime "~6.2.0" + vega-scale "~7.4.1" + vega-scenegraph "~4.13.0" + vega-statistics "~1.9.0" + vega-time "~2.1.2" + vega-transforms "~4.12.0" + vega-typings "~1.3.1" + vega-util "~1.17.2" + vega-view "~5.13.0" + vega-view-transforms "~4.6.0" + vega-voronoi "~4.2.3" + vega-wordcloud "~4.1.5" + +vscode-jsonrpc@8.2.0: + version "8.2.0" + resolved "https://registry.npmjs.org/vscode-jsonrpc/-/vscode-jsonrpc-8.2.0.tgz#f43dfa35fb51e763d17cd94dcca0c9458f35abf9" + integrity sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA== + +vscode-jsonrpc@^6.0.0: + version "6.0.0" + resolved "https://registry.npmjs.org/vscode-jsonrpc/-/vscode-jsonrpc-6.0.0.tgz#108bdb09b4400705176b957ceca9e0880e9b6d4e" + integrity sha512-wnJA4BnEjOSyFMvjZdpiOwhSq9uDoK8e/kpRJDTaMYzwlkrhG1fwDIZI94CLsLzlCK5cIbMMtFlJlfR57Lavmg== + +vscode-jsonrpc@^8.0.2: + version "8.2.1" + resolved "https://registry.npmjs.org/vscode-jsonrpc/-/vscode-jsonrpc-8.2.1.tgz#a322cc0f1d97f794ffd9c4cd2a898a0bde097f34" + integrity sha512-kdjOSJ2lLIn7r1rtrMbbNCHjyMPfRnowdKjBQ+mGq6NAW5QY2bEZC/khaC5OR8svbbjvLEaIXkOq45e2X9BIbQ== + +vscode-languageserver-protocol@^3.17.0: + version "3.17.5" + resolved "https://registry.npmjs.org/vscode-languageserver-protocol/-/vscode-languageserver-protocol-3.17.5.tgz#864a8b8f390835572f4e13bd9f8313d0e3ac4bea" + integrity sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg== + dependencies: + vscode-jsonrpc "8.2.0" + vscode-languageserver-types "3.17.5" + +vscode-languageserver-types@3.17.5: + version "3.17.5" + resolved "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.5.tgz#3273676f0cf2eab40b3f44d085acbb7f08a39d8a" + integrity sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg== + +vscode-ws-jsonrpc@~1.0.2: + version "1.0.2" + resolved "https://registry.npmjs.org/vscode-ws-jsonrpc/-/vscode-ws-jsonrpc-1.0.2.tgz#ead2efd66293f331ccc220222ae1aeca4bb5b2c1" + integrity sha512-09OpRC0RcqZs4DleJRgs+R+7gQkwb4tgvsL43lzVZwW4N5NO3H/9sLNeKPBt83k7WyA8qBZjrzM6X7tKFpFrjQ== + dependencies: + vscode-jsonrpc "^8.0.2" + +w3c-keyname@^2.2.4: + version "2.2.8" + resolved "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz#7b17c8c6883d4e8b86ac8aba79d39e880f8869c5" + integrity sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ== + +webidl-conversions@^3.0.0: + version "3.0.1" + resolved "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" + integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ== + +whatwg-url@^5.0.0: + version "5.0.0" + resolved "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" + integrity sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw== + dependencies: + tr46 "~0.0.3" + webidl-conversions "^3.0.0" + +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +ws@^8.11.0: + version "8.18.0" + resolved "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz#0d7505a6eafe2b0e712d232b42279f53bc289bbc" + integrity sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw== + +y-protocols@^1.0.5: + version "1.0.6" + resolved "https://registry.npmjs.org/y-protocols/-/y-protocols-1.0.6.tgz#66dad8a95752623443e8e28c0e923682d2c0d495" + integrity sha512-vHRF2L6iT3rwj1jub/K5tYcTT/mEYDUppgNPXwp8fmLpui9f7Yeq3OEtTLVF012j39QnV+KEQpNqoN7CWU7Y9Q== + dependencies: + lib0 "^0.2.85" + +y18n@^5.0.5: + version "5.0.8" + resolved "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" + integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA== + +yargs-parser@^21.1.1: + version "21.1.1" + resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" + integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== + +yargs@~17.7.2: + version "17.7.2" + resolved "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269" + integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w== + dependencies: + cliui "^8.0.1" + escalade "^3.1.1" + get-caller-file "^2.0.5" + require-directory "^2.1.1" + string-width "^4.2.3" + y18n "^5.0.5" + yargs-parser "^21.1.1" + +yarn-deduplicate@^6.0.1: + version "6.0.2" + resolved "https://registry.npmjs.org/yarn-deduplicate/-/yarn-deduplicate-6.0.2.tgz#63498d2d4c3a8567e992a994ce0ab51aa5681f2e" + integrity sha512-Efx4XEj82BgbRJe5gvQbZmEO7pU5DgHgxohYZp98/+GwPqdU90RXtzvHirb7hGlde0sQqk5G3J3Woyjai8hVqA== + dependencies: + "@yarnpkg/lockfile" "^1.1.0" + commander "^10.0.1" + semver "^7.5.0" + tslib "^2.5.0" + +yjs@^13.5.40: + version "13.6.19" + resolved "https://registry.npmjs.org/yjs/-/yjs-13.6.19.tgz#66999f41254ab65be8c8e71bd767d124ad600909" + integrity sha512-GNKw4mEUn5yWU2QPHRx8jppxmCm9KzbBhB4qJLUJFiiYD0g/tDVgXQ7aPkyh01YO28kbs2J/BEbWBagjuWyejw== + dependencies: + lib0 "^0.2.86"