diff --git a/.github/workflows/codeflash.yml b/.github/workflows/codeflash.yml
index b61c58830da3..5512597d2b89 100644
--- a/.github/workflows/codeflash.yml
+++ b/.github/workflows/codeflash.yml
@@ -3,7 +3,7 @@ name: Codeflash
on:
pull_request:
paths:
- - "src/backend/base/langflow/**"
+ - "src/packages/base/langflow/**"
workflow_dispatch:
concurrency:
@@ -31,7 +31,7 @@ jobs:
prune-cache: false
- run: uv sync
- name: Run Codeflash Optimizer
- working-directory: ./src/backend/base
+ working-directory: ./src/packages/base
continue-on-error: true
run: uv run codeflash
diff --git a/.github/workflows/cross-platform-test.yml b/.github/workflows/cross-platform-test.yml
index c6303f5b0da9..bb6660d19c81 100644
--- a/.github/workflows/cross-platform-test.yml
+++ b/.github/workflows/cross-platform-test.yml
@@ -50,16 +50,16 @@ jobs:
run: make build_langflow_base args="--wheel"
- name: Move base package to correct directory
run: |
- # Base package builds to dist/ but should be in src/backend/base/dist/
- mkdir -p src/backend/base/dist
- mv dist/langflow_base*.whl src/backend/base/dist/
+ # Base package builds to dist/ but should be in src/packages/base/dist/
+ mkdir -p src/packages/base/dist
+ mv dist/langflow_base*.whl src/packages/base/dist/
- name: Build main package
run: make build_langflow args="--wheel"
- name: Upload base artifact
uses: actions/upload-artifact@v4
with:
name: adhoc-dist-base
- path: src/backend/base/dist
+ path: src/packages/base/dist
- name: Upload main artifact
uses: actions/upload-artifact@v4
with:
diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml
index f92adb6c4926..7cd9ed697055 100644
--- a/.github/workflows/docker-build.yml
+++ b/.github/workflows/docker-build.yml
@@ -201,7 +201,7 @@ jobs:
- name: Install the project
run: |
if [[ "${{ inputs.release_type }}" == "base" || "${{ inputs.release_type }}" == "nightly-base" ]]; then
- uv sync --directory src/backend/base --no-dev --no-sources
+ uv sync --directory src/packages/base --no-dev --no-sources
else
uv sync --no-dev --no-sources
fi
diff --git a/.github/workflows/nightly_build.yml b/.github/workflows/nightly_build.yml
index 14eeac84a3b2..e021d039faf0 100644
--- a/.github/workflows/nightly_build.yml
+++ b/.github/workflows/nightly_build.yml
@@ -88,10 +88,10 @@ jobs:
uv run ./scripts/ci/update_pyproject_combined.py main $MAIN_TAG $BASE_TAG $LFX_TAG
uv lock
- cd src/backend/base && uv lock && cd ../../..
- cd src/lfx && uv lock && cd ../..
+ cd src/packages/base && uv lock && cd ../../..
+ cd src/packages/core && uv lock && cd ../..
- git add pyproject.toml src/backend/base/pyproject.toml src/lfx/pyproject.toml uv.lock src/backend/base/uv.lock
+ git add pyproject.toml src/packages/base/pyproject.toml src/packages/core/pyproject.toml uv.lock src/packages/base/uv.lock
git commit -m "Update version and project name"
echo "Tagging main with $MAIN_TAG"
@@ -114,7 +114,7 @@ jobs:
- name: Retrieve Base Tag
id: retrieve_base_tag
- working-directory: src/backend/base
+ working-directory: src/packages/base
run: |
# If the main tag already exists, we need to retrieve the base version from the main tag codebase.
version=$(uv tree | grep 'langflow-base' | awk '{print $3}' | head -n 1)
diff --git a/.github/workflows/release-lfx.yml b/.github/workflows/release-lfx.yml
index 3ccff33baf5c..64d7e285ea6f 100644
--- a/.github/workflows/release-lfx.yml
+++ b/.github/workflows/release-lfx.yml
@@ -58,7 +58,7 @@ jobs:
- name: Check version
id: check
run: |
- cd src/lfx
+ cd src/packages/core
# Use uv tree to get package info, consistent with nightly workflow
name=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $1}')
version=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $2}')
@@ -107,12 +107,12 @@ jobs:
- name: Run LFX tests
run: |
- cd src/lfx
+ cd src/packages/core
make test
- name: Test CLI installation
run: |
- cd src/lfx
+ cd src/packages/core
uv pip install .
uv run lfx --help
uv run lfx run --help
@@ -142,7 +142,7 @@ jobs:
- name: Verify Version
id: check-version
run: |
- cd src/lfx
+ cd src/packages/core
# Use uv tree to get package info, consistent with nightly workflow
name=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $1}')
version=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $2}')
@@ -166,20 +166,20 @@ jobs:
- name: Build distribution
run: |
- cd src/lfx
+ cd src/packages/core
rm -rf dist/
uv build --wheel --out-dir dist
- name: Check build artifacts
run: |
- cd src/lfx
+ cd src/packages/core
ls -la dist/
# Verify wheel contents
unzip -l dist/*.whl | grep -E "(lfx/__main__.py|lfx/cli/run.py|lfx/cli/commands.py)"
- name: Test installation from wheel
run: |
- cd src/lfx
+ cd src/packages/core
uv pip install dist/*.whl --force-reinstall
uv run lfx --help
echo "LFX CLI test completed successfully"
@@ -188,7 +188,7 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: lfx-dist
- path: src/lfx/dist/
+ path: src/packages/core/dist/
retention-days: 5
- name: Publish to PyPI
@@ -196,7 +196,7 @@ jobs:
env:
UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
run: |
- cd src/lfx
+ cd src/packages/core
uv publish dist/*.whl
build-docker:
@@ -250,7 +250,7 @@ jobs:
uses: docker/build-push-action@v5
with:
context: .
- file: src/lfx/docker/Dockerfile${{ matrix.variant == 'alpine' && '.alpine' || '' }}
+ file: src/packages/core/docker/Dockerfile${{ matrix.variant == 'alpine' && '.alpine' || '' }}
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.meta.outputs.tags }}
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index cfa9a065be7d..606b856c2467 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -100,10 +100,10 @@ jobs:
- name: Test CLI
if: steps.check-version.outputs.skipped == 'false'
run: |
- # TODO: Unsure why the whl is not built in src/backend/base/dist
- mkdir src/backend/base/dist
- mv dist/*.whl src/backend/base/dist
- uv pip install src/backend/base/dist/*.whl
+ # TODO: Unsure why the whl is not built in src/packages/base/dist
+ mkdir src/packages/base/dist
+ mv dist/*.whl src/packages/base/dist
+ uv pip install src/packages/base/dist/*.whl
uv run python -m langflow run --host localhost --port 7860 --backend-only &
SERVER_PID=$!
# Wait for the server to start
@@ -126,7 +126,7 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: dist-base
- path: src/backend/base/dist
+ path: src/packages/base/dist
build-main:
name: Build Langflow Main
@@ -250,7 +250,7 @@ jobs:
uses: actions/download-artifact@v4
with:
name: dist-base
- path: src/backend/base/dist
+ path: src/packages/base/dist
- name: Setup Environment
uses: astral-sh/setup-uv@v6
with:
@@ -261,7 +261,7 @@ jobs:
env:
UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
run: |
- cd src/backend/base && uv publish dist/*.whl
+ cd src/packages/base && uv publish dist/*.whl
publish-main:
name: Publish Langflow Main to PyPI
@@ -353,7 +353,7 @@ jobs:
- name: Check Version
id: check-version
run: |
- cd src/lfx
+ cd src/packages/core
version=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $2}' | sed 's/^v//')
last_released_version=$(curl -s "https://pypi.org/pypi/lfx/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
if [ "$version" = "$last_released_version" ]; then
@@ -364,19 +364,19 @@ jobs:
fi
- name: Build project for distribution
run: |
- cd src/lfx
+ cd src/packages/core
rm -rf dist/
uv build --wheel --out-dir dist
- name: Test CLI
run: |
- cd src/lfx
+ cd src/packages/core
uv pip install dist/*.whl --force-reinstall
uv run lfx --help
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: dist-lfx
- path: src/lfx/dist
+ path: src/packages/core/dist
publish-lfx:
name: Publish LFX to PyPI
@@ -388,7 +388,7 @@ jobs:
uses: actions/download-artifact@v4
with:
name: dist-lfx
- path: src/lfx/dist
+ path: src/packages/core/dist
- name: Setup Environment
uses: astral-sh/setup-uv@v6
with:
@@ -398,7 +398,7 @@ jobs:
env:
UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
run: |
- cd src/lfx && uv publish dist/*.whl
+ cd src/packages/core && uv publish dist/*.whl
create_release:
name: Create Release
diff --git a/.github/workflows/release_nightly.yml b/.github/workflows/release_nightly.yml
index 5ae568018f58..0371296c690b 100644
--- a/.github/workflows/release_nightly.yml
+++ b/.github/workflows/release_nightly.yml
@@ -99,12 +99,12 @@ jobs:
python-version: ${{ env.PYTHON_VERSION }}
prune-cache: false
- name: Install LFX dependencies
- run: cd src/lfx && uv sync
+ run: cd src/packages/core && uv sync
- name: Verify Nightly Name and Version
id: verify
run: |
- cd src/lfx
+ cd src/packages/core
name=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $1}')
version=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $2}')
if [ "$name" != "lfx-nightly" ]; then
@@ -121,13 +121,13 @@ jobs:
- name: Build LFX for distribution
run: |
- cd src/lfx
+ cd src/packages/core
rm -rf dist/
uv build --wheel --out-dir dist
- name: Test LFX CLI
run: |
- cd src/lfx
+ cd src/packages/core
uv pip install dist/*.whl --force-reinstall
uv run lfx --help
echo "LFX CLI test completed successfully"
@@ -138,7 +138,7 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: dist-nightly-lfx
- path: src/lfx/dist
+ path: src/packages/core/dist
build-nightly-base:
name: Build Langflow Nightly Base
@@ -190,16 +190,16 @@ jobs:
- name: Build Langflow Base for distribution
run: |
- rm -rf src/backend/base/dist
+ rm -rf src/packages/base/dist
rm -rf dist
make build base=true args="--wheel"
- name: Test Langflow Base CLI
run: |
- # TODO: Unsure why the whl is not built in src/backend/base/dist
- mkdir src/backend/base/dist
- mv dist/*.whl src/backend/base/dist/
- uv pip install src/backend/base/dist/*.whl
+ # TODO: Unsure why the whl is not built in src/packages/base/dist
+ mkdir src/packages/base/dist
+ mv dist/*.whl src/packages/base/dist/
+ uv pip install src/packages/base/dist/*.whl
uv run python -m langflow run --host localhost --port 7860 --backend-only &
SERVER_PID=$!
# Wait for the server to start
@@ -221,7 +221,7 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: dist-nightly-base
- path: src/backend/base/dist
+ path: src/packages/base/dist
build-nightly-main:
name: Build Langflow Nightly Main
@@ -317,7 +317,7 @@ jobs:
uses: actions/download-artifact@v4
with:
name: dist-nightly-lfx
- path: src/lfx/dist
+ path: src/packages/core/dist
- name: Setup Environment
uses: astral-sh/setup-uv@v6
with:
@@ -342,7 +342,7 @@ jobs:
uses: actions/download-artifact@v4
with:
name: dist-nightly-base
- path: src/backend/base/dist
+ path: src/packages/base/dist
- name: Setup Environment
uses: astral-sh/setup-uv@v6
with:
diff --git a/.github/workflows/template-tests.yml b/.github/workflows/template-tests.yml
index 6a763901149f..a08a2a02aefd 100644
--- a/.github/workflows/template-tests.yml
+++ b/.github/workflows/template-tests.yml
@@ -3,9 +3,9 @@ name: Template Tests
on:
pull_request:
paths:
- - 'src/backend/base/langflow/initial_setup/starter_projects/**'
+ - 'src/packages/base/langflow/initial_setup/starter_projects/**'
- 'src/backend/tests/unit/template/test_starter_projects.py'
- - 'src/backend/base/langflow/utils/template_validation.py'
+ - 'src/packages/base/langflow/utils/template_validation.py'
- '.github/workflows/template-tests.yml'
workflow_dispatch:
diff --git a/Makefile b/Makefile
index 3e83a3abe1c6..9668ee0a3a75 100644
--- a/Makefile
+++ b/Makefile
@@ -17,7 +17,7 @@ host ?= 0.0.0.0
port ?= 7860
env ?= .env
open_browser ?= true
-path = src/backend/base/langflow/frontend
+path = src/packages/base/langflow/frontend
workers ?= 1
async ?= true
lf ?= false
@@ -84,7 +84,7 @@ clean_python_cache:
clean_npm_cache:
@echo "Cleaning npm cache..."
cd src/frontend && npm cache clean --force
- $(call CLEAR_DIRS,src/frontend/node_modules src/frontend/build src/backend/base/langflow/frontend)
+ $(call CLEAR_DIRS,src/frontend/node_modules src/frontend/build src/packages/base/langflow/frontend)
rm -f src/frontend/package-lock.json
@echo "$(GREEN)NPM cache and frontend directories cleaned.$(NC)"
@@ -93,7 +93,7 @@ clean_frontend_build: ## clean frontend build artifacts to ensure fresh build
@echo " - Removing src/frontend/build directory"
$(call CLEAR_DIRS,src/frontend/build)
@echo " - Removing built frontend files from backend"
- $(call CLEAR_DIRS,src/backend/base/langflow/frontend)
+ $(call CLEAR_DIRS,src/packages/base/langflow/frontend)
@echo "$(GREEN)Frontend build artifacts cleaned - fresh build guaranteed.$(NC)"
clean_all: clean_python_cache clean_npm_cache # clean all caches and temporary directories
@@ -105,7 +105,7 @@ setup_uv: ## install uv using pipx
add:
@echo 'Adding dependencies'
ifdef devel
- @cd src/backend/base && uv add --group dev $(devel)
+ @cd src/packages/base && uv add --group dev $(devel)
endif
ifdef main
@@ -113,7 +113,7 @@ ifdef main
endif
ifdef base
- @cd src/backend/base && uv add $(base)
+ @cd src/packages/base && uv add $(base)
endif
@@ -151,7 +151,7 @@ unit_tests_looponfail:
lfx_tests: ## run lfx package unit tests
@echo 'Running LFX Package Tests...'
- @cd src/lfx && \
+ @cd src/packages/core && \
uv sync && \
uv run pytest tests/unit -v $(args)
@@ -285,15 +285,15 @@ else
endif
build_and_run: setup_env ## build the project and run it
- $(call CLEAR_DIRS,dist src/backend/base/dist)
+ $(call CLEAR_DIRS,dist src/packages/base/dist)
make build
uv run pip install dist/*.tar.gz
uv run langflow run
build_and_install: ## build the project and install it
@echo 'Removing dist folder'
- $(call CLEAR_DIRS,dist src/backend/base/dist)
- make build && uv run pip install dist/*.whl && pip install src/backend/base/dist/*.whl --force-reinstall
+ $(call CLEAR_DIRS,dist src/packages/base/dist)
+ make build && uv run pip install dist/*.whl && pip install src/packages/base/dist/*.whl --force-reinstall
build: setup_env ## build the frontend static files and package the project
ifdef base
@@ -310,7 +310,7 @@ ifdef main
endif
build_langflow_base:
- cd src/backend/base && uv build $(args)
+ cd src/packages/base && uv build $(args)
build_langflow_backup:
uv lock && uv build
@@ -373,30 +373,30 @@ dcdev_up:
$(DOCKER) compose -f docker/dev.docker-compose.yml up --remove-orphans
lock_base:
- cd src/backend/base && uv lock
+ cd src/packages/base && uv lock
lock_langflow:
uv lock
lock: ## lock dependencies
@echo 'Locking dependencies'
- cd src/backend/base && uv lock
+ cd src/packages/base && uv lock
uv lock
update: ## update dependencies
@echo 'Updating dependencies'
- cd src/backend/base && uv sync --upgrade
+ cd src/packages/base && uv sync --upgrade
uv sync --upgrade
publish_base:
- cd src/backend/base && uv publish
+ cd src/packages/base && uv publish
publish_langflow:
uv publish
publish_base_testpypi:
# TODO: update this to use the test-pypi repository
- cd src/backend/base && uv publish -r test-pypi
+ cd src/packages/base && uv publish -r test-pypi
publish_langflow_testpypi:
# TODO: update this to use the test-pypi repository
@@ -421,73 +421,73 @@ publish_testpypi: ## build the frontend static files and package the project and
lfx_build: ## build the LFX package
@echo 'Building LFX package'
- @cd src/lfx && make build
+ @cd src/packages/core && make build
lfx_publish: ## publish LFX package to PyPI
@echo 'Publishing LFX package'
- @cd src/lfx && make publish
+ @cd src/packages/core && make publish
lfx_publish_testpypi: ## publish LFX package to test PyPI
@echo 'Publishing LFX package to test PyPI'
- @cd src/lfx && make publish_test
+ @cd src/packages/core && make publish_test
lfx_test: ## run LFX tests
@echo 'Running LFX tests'
- @cd src/lfx && make test
+ @cd src/packages/core && make test
lfx_format: ## format LFX code
@echo 'Formatting LFX code'
- @cd src/lfx && make format
+ @cd src/packages/core && make format
lfx_lint: ## lint LFX code
@echo 'Linting LFX code'
- @cd src/lfx && make lint
+ @cd src/packages/core && make lint
lfx_clean: ## clean LFX build artifacts
@echo 'Cleaning LFX build artifacts'
- @cd src/lfx && make clean
+ @cd src/packages/core && make clean
lfx_docker_build: ## build LFX production Docker image
@echo 'Building LFX Docker image'
- @cd src/lfx && make docker_build
+ @cd src/packages/core && make docker_build
lfx_docker_dev: ## start LFX development environment
@echo 'Starting LFX development environment'
- @cd src/lfx && make docker_dev
+ @cd src/packages/core && make docker_dev
lfx_docker_test: ## run LFX tests in Docker
@echo 'Running LFX tests in Docker'
- @cd src/lfx && make docker_test
+ @cd src/packages/core && make docker_test
# example make alembic-revision message="Add user table"
alembic-revision: ## generate a new migration
@echo 'Generating a new Alembic revision'
- cd src/backend/base/langflow/ && uv run alembic revision --autogenerate -m "$(message)"
+ cd src/packages/base/langflow/ && uv run alembic revision --autogenerate -m "$(message)"
alembic-upgrade: ## upgrade database to the latest version
@echo 'Upgrading database to the latest version'
- cd src/backend/base/langflow/ && uv run alembic upgrade head
+ cd src/packages/base/langflow/ && uv run alembic upgrade head
alembic-downgrade: ## downgrade database by one version
@echo 'Downgrading database by one version'
- cd src/backend/base/langflow/ && uv run alembic downgrade -1
+ cd src/packages/base/langflow/ && uv run alembic downgrade -1
alembic-current: ## show current revision
@echo 'Showing current Alembic revision'
- cd src/backend/base/langflow/ && uv run alembic current
+ cd src/packages/base/langflow/ && uv run alembic current
alembic-history: ## show migration history
@echo 'Showing Alembic migration history'
- cd src/backend/base/langflow/ && uv run alembic history --verbose
+ cd src/packages/base/langflow/ && uv run alembic history --verbose
alembic-check: ## check migration status
@echo 'Running alembic check'
- cd src/backend/base/langflow/ && uv run alembic check
+ cd src/packages/base/langflow/ && uv run alembic check
alembic-stamp: ## stamp the database with a specific revision
@echo 'Stamping the database with revision $(revision)'
- cd src/backend/base/langflow/ && uv run alembic stamp $(revision)
+ cd src/packages/base/langflow/ && uv run alembic stamp $(revision)
######################
# VERSION MANAGEMENT
@@ -511,7 +511,7 @@ patch: ## Update version across all projects. Usage: make patch v=1.5.0
python -c "import re; fname='pyproject.toml'; txt=open(fname).read(); txt=re.sub(r'^version = \".*\"', 'version = \"$$LANGFLOW_VERSION\"', txt, flags=re.MULTILINE); txt=re.sub(r'\"langflow-base==.*\"', '\"langflow-base==$$LANGFLOW_BASE_VERSION\"', txt); open(fname, 'w').write(txt)"; \
\
echo "$(GREEN)Updating langflow-base pyproject.toml...$(NC)"; \
- python -c "import re; fname='src/backend/base/pyproject.toml'; txt=open(fname).read(); txt=re.sub(r'^version = \".*\"', 'version = \"$$LANGFLOW_BASE_VERSION\"', txt, flags=re.MULTILINE); open(fname, 'w').write(txt)"; \
+ python -c "import re; fname='src/packages/base/pyproject.toml'; txt=open(fname).read(); txt=re.sub(r'^version = \".*\"', 'version = \"$$LANGFLOW_BASE_VERSION\"', txt, flags=re.MULTILINE); open(fname, 'w').write(txt)"; \
\
echo "$(GREEN)Updating frontend package.json...$(NC)"; \
python -c "import re; fname='src/frontend/package.json'; txt=open(fname).read(); txt=re.sub(r'\"version\": \".*\"', '\"version\": \"$$LANGFLOW_VERSION\"', txt); open(fname, 'w').write(txt)"; \
@@ -519,7 +519,7 @@ patch: ## Update version across all projects. Usage: make patch v=1.5.0
echo "$(GREEN)Validating version changes...$(NC)"; \
if ! grep -q "^version = \"$$LANGFLOW_VERSION\"" pyproject.toml; then echo "$(RED)✗ Main pyproject.toml version validation failed$(NC)"; exit 1; fi; \
if ! grep -q "\"langflow-base==$$LANGFLOW_BASE_VERSION\"" pyproject.toml; then echo "$(RED)✗ Main pyproject.toml langflow-base dependency validation failed$(NC)"; exit 1; fi; \
- if ! grep -q "^version = \"$$LANGFLOW_BASE_VERSION\"" src/backend/base/pyproject.toml; then echo "$(RED)✗ Langflow-base pyproject.toml version validation failed$(NC)"; exit 1; fi; \
+ if ! grep -q "^version = \"$$LANGFLOW_BASE_VERSION\"" src/packages/base/pyproject.toml; then echo "$(RED)✗ Langflow-base pyproject.toml version validation failed$(NC)"; exit 1; fi; \
if ! grep -q "\"version\": \"$$LANGFLOW_VERSION\"" src/frontend/package.json; then echo "$(RED)✗ Frontend package.json version validation failed$(NC)"; exit 1; fi; \
echo "$(GREEN)✓ All versions updated successfully$(NC)"; \
\
@@ -536,7 +536,7 @@ patch: ## Update version across all projects. Usage: make patch v=1.5.0
git status --porcelain; \
exit 1; \
fi; \
- EXPECTED_FILES="pyproject.toml uv.lock src/backend/base/pyproject.toml src/frontend/package.json src/frontend/package-lock.json"; \
+ EXPECTED_FILES="pyproject.toml uv.lock src/packages/base/pyproject.toml src/frontend/package.json src/frontend/package-lock.json"; \
for file in $$EXPECTED_FILES; do \
if ! git status --porcelain | grep -q "$$file"; then \
echo "$(RED)✗ Expected file $$file was not modified$(NC)"; \
@@ -548,7 +548,7 @@ patch: ## Update version across all projects. Usage: make patch v=1.5.0
echo "$(GREEN)Version update complete!$(NC)"; \
echo "$(GREEN)Updated files:$(NC)"; \
echo " - pyproject.toml: $$LANGFLOW_VERSION"; \
- echo " - src/backend/base/pyproject.toml: $$LANGFLOW_BASE_VERSION"; \
+ echo " - src/packages/base/pyproject.toml: $$LANGFLOW_BASE_VERSION"; \
echo " - src/frontend/package.json: $$LANGFLOW_VERSION"; \
echo " - uv.lock: dependency lock updated"; \
echo " - src/frontend/package-lock.json: dependency lock updated"; \
diff --git a/pyproject.toml b/pyproject.toml
index 6f042c452ae2..c68bd8a8ac5b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -189,13 +189,13 @@ lfx = { workspace = true }
[tool.uv.workspace]
members = [
- "src/backend/base",
+ "src/packages/base",
".",
- "src/lfx",
+ "src/packages/core",
]
[tool.hatch.build.targets.wheel]
-packages = ["src/backend/langflow"]
+packages = ["src/packages/langflow"]
[project.urls]
Repository = "https://github.com/langflow-ai/langflow"
@@ -262,7 +262,7 @@ ignore-regex = '.*(Stati Uniti|Tense=Pres).*'
timeout = 150
timeout_method = "signal"
minversion = "6.0"
-testpaths = ["src/backend/tests", "src/lfx/tests"]
+testpaths = ["src/packages/base/tests", "src/packages/core/tests"]
console_output_style = "progress"
filterwarnings = ["ignore::DeprecationWarning", "ignore::ResourceWarning"]
log_cli = true
@@ -287,7 +287,7 @@ command_line = """
--cov --cov-report=term --cov-report=html
--instafail -ra -n auto -m "not api_key_required"
"""
-source = ["src/backend/base/langflow/"]
+source = ["src/packages/base/langflow/"]
omit = ["*/alembic/*", "tests/*", "*/__init__.py"]
@@ -303,7 +303,7 @@ directory = "coverage"
[tool.ruff]
-exclude = ["src/backend/base/langflow/alembic/*", "src/frontend/tests/assets/*"]
+exclude = ["src/packages/base/langflow/alembic/*", "src/frontend/tests/assets/*"]
line-length = 120
[tool.ruff.lint]
@@ -334,8 +334,8 @@ external = ["RUF027"]
[tool.ruff.lint.per-file-ignores]
"scripts/*" = ["D1", "INP", "T201"]
-"src/backend/base/langflow/alembic/versions/*" = ["INP001", "D415", "PGH003"]
-"src/backend/tests/*" = [
+"src/packages/base/langflow/alembic/versions/*" = ["INP001", "D415", "PGH003"]
+"src/packages/base/tests/*" = [
"D1",
"PLR2004",
"S101",
diff --git a/scripts/ci/update_starter_projects.py b/scripts/ci/update_starter_projects.py
index cb56b0f198ac..8345d4d4fba7 100644
--- a/scripts/ci/update_starter_projects.py
+++ b/scripts/ci/update_starter_projects.py
@@ -12,7 +12,6 @@
update_projects_components_with_latest_component_versions,
)
from langflow.services.utils import initialize_services
-
from lfx.interface.components import get_and_cache_all_types_dict
from lfx.services.deps import get_settings_service
diff --git a/src/backend/.gitignore b/src/backend/.gitignore
deleted file mode 100644
index 831545957b06..000000000000
--- a/src/backend/.gitignore
+++ /dev/null
@@ -1,137 +0,0 @@
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-notebooks
-
-# frontend
-src/frontend
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-pip-wheel-metadata/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.nox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-*.py,cover
-.hypothesis/
-.pytest_cache/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-db.sqlite3
-db.sqlite3-journal
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# IPython
-profile_default/
-ipython_config.py
-
-# pyenv
-.python-version
-
-# pipenv
-# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
-# However, in case of collaboration, if having platform-specific dependencies or dependencies
-# having no cross-platform support, pipenv may install dependencies that don't work, or not
-# install all needed dependencies.
-#Pipfile.lock
-
-# PEP 582; used by e.g. github.com/David-OConnor/pyflow
-__pypackages__/
-
-# Celery stuff
-celerybeat-schedule
-celerybeat.pid
-
-# SageMath parsed files
-*.sage.py
-
-# Environments
-.env
-.venv
-env/
-venv/
-ENV/
-env.bak/
-venv.bak/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
-*.db
-
-# Generated pytest-cov config
-.coveragerc
diff --git a/src/backend/tests/.test_durations b/src/backend/tests/.test_durations
deleted file mode 100644
index 68f000cad627..000000000000
--- a/src/backend/tests/.test_durations
+++ /dev/null
@@ -1,2230 +0,0 @@
-{
- "src/backend/tests/performance/test_server_init.py::test_create_starter_projects": 9.349637124105357,
- "src/backend/tests/performance/test_server_init.py::test_get_and_cache_all_types_dict": 0.009816041041631252,
- "src/backend/tests/performance/test_server_init.py::test_initialize_services": 0.5938955409801565,
- "src/backend/tests/performance/test_server_init.py::test_initialize_super_user": 0.31904370902338997,
- "src/backend/tests/performance/test_server_init.py::test_load_flows": 0.002782625029794872,
- "src/backend/tests/performance/test_server_init.py::test_setup_llm_caching": 0.01124733401229605,
- "src/backend/tests/test_endpoints.py::test_build_vertex_invalid_flow_id": 1.8161861660000795,
- "src/backend/tests/test_endpoints.py::test_build_vertex_invalid_vertex_id": 1.6184064170001875,
- "src/backend/tests/test_endpoints.py::test_get_all": 3.8724166670003797,
- "src/backend/tests/test_endpoints.py::test_get_vertices": 2.3527110839995657,
- "src/backend/tests/test_endpoints.py::test_get_vertices_flow_not_found": 1.781084457999441,
- "src/backend/tests/test_endpoints.py::test_invalid_flow_id": 1.8902806239998426,
- "src/backend/tests/test_endpoints.py::test_invalid_prompt": 1.104316251000455,
- "src/backend/tests/test_endpoints.py::test_invalid_run_with_input_type_chat": 1.7665630830001646,
- "src/backend/tests/test_endpoints.py::test_post_validate_code": 1.2675197489998027,
- "src/backend/tests/test_endpoints.py::test_successful_run_with_input_type_any": 4.90430141699926,
- "src/backend/tests/test_endpoints.py::test_successful_run_with_input_type_chat": 5.548072499999762,
- "src/backend/tests/test_endpoints.py::test_successful_run_with_input_type_text": 6.992585209000026,
- "src/backend/tests/test_endpoints.py::test_successful_run_with_output_type_any": 6.90225587499981,
- "src/backend/tests/test_endpoints.py::test_successful_run_with_output_type_debug": 6.666685292999773,
- "src/backend/tests/test_endpoints.py::test_successful_run_with_output_type_text": 6.525266874999943,
- "src/backend/tests/test_endpoints.py::test_valid_prompt": 1.0876984590004213,
- "src/backend/tests/test_endpoints.py::test_various_prompts[The weather is {weather} today.-expected_input_variables1]": 1.0912359599997217,
- "src/backend/tests/test_endpoints.py::test_various_prompts[This prompt has no variables.-expected_input_variables2]": 1.0882312500002627,
- "src/backend/tests/test_endpoints.py::test_various_prompts[{a}, {b}, and {c} are variables.-expected_input_variables3]": 1.1277481249994707,
- "src/backend/tests/test_endpoints.py::test_various_prompts[{color} is my favorite color.-expected_input_variables0]": 1.2742332080001688,
- "src/backend/tests/test_messages_endpoints.py::test_delete_messages": 1.5091020420009045,
- "src/backend/tests/test_messages_endpoints.py::test_delete_messages_session": 1.510577208000086,
- "src/backend/tests/test_messages_endpoints.py::test_update_message": 1.5425646240005335,
- "src/backend/tests/test_messages_endpoints.py::test_update_message_not_found": 1.5310995830000138,
- "src/backend/tests/test_schema.py::TestInput::test_field_type_str": 0.00023554100016554003,
- "src/backend/tests/test_schema.py::TestInput::test_field_type_type": 0.00021875100037505035,
- "src/backend/tests/test_schema.py::TestInput::test_input_to_dict": 0.00020670800040534232,
- "src/backend/tests/test_schema.py::TestInput::test_invalid_field_type": 0.00023608299989064108,
- "src/backend/tests/test_schema.py::TestInput::test_post_process_type_function": 0.00020749999976032996,
- "src/backend/tests/test_schema.py::TestInput::test_serialize_field_type": 0.0001787500000318687,
- "src/backend/tests/test_schema.py::TestInput::test_validate_type_class": 0.00018495699941922794,
- "src/backend/tests/test_schema.py::TestInput::test_validate_type_string": 0.00019104300008621067,
- "src/backend/tests/test_schema.py::TestOutput::test_output_add_types": 0.00017562499988343916,
- "src/backend/tests/test_schema.py::TestOutput::test_output_default": 0.00018087499984176247,
- "src/backend/tests/test_schema.py::TestOutput::test_output_set_selected": 0.00019041600035052397,
- "src/backend/tests/test_schema.py::TestOutput::test_output_to_dict": 0.00018754199982140562,
- "src/backend/tests/test_schema.py::TestOutput::test_output_validate_display_name": 0.00018495800031814724,
- "src/backend/tests/test_schema.py::TestOutput::test_output_validate_model": 0.00020916699941153638,
- "src/backend/tests/test_schema.py::TestPostProcessType::test_custom_type": 0.00018125099950339063,
- "src/backend/tests/test_schema.py::TestPostProcessType::test_int_type": 0.0001939999992828234,
- "src/backend/tests/test_schema.py::TestPostProcessType::test_list_custom_type": 0.00017420900030629127,
- "src/backend/tests/test_schema.py::TestPostProcessType::test_list_int_type": 0.00018208300025435165,
- "src/backend/tests/test_schema.py::TestPostProcessType::test_union_custom_type": 0.00019349999956830288,
- "src/backend/tests/test_schema.py::TestPostProcessType::test_union_type": 0.00017558299941811129,
- "src/backend/tests/test_user.py::test_add_user": 1.7881504160000077,
- "src/backend/tests/test_user.py::test_data_consistency_after_delete": 1.5578057500001705,
- "src/backend/tests/test_user.py::test_data_consistency_after_update": 1.7415219580002486,
- "src/backend/tests/test_user.py::test_deactivated_user_cannot_access": 1.7204212500000722,
- "src/backend/tests/test_user.py::test_deactivated_user_cannot_login": 1.3222801250003613,
- "src/backend/tests/test_user.py::test_delete_user": 1.5755463759996928,
- "src/backend/tests/test_user.py::test_delete_user_wrong_id": 2.0808037919996423,
- "src/backend/tests/test_user.py::test_inactive_user": 1.3267317489999186,
- "src/backend/tests/test_user.py::test_normal_user_cant_delete_user": 1.7230364579995694,
- "src/backend/tests/test_user.py::test_normal_user_cant_read_all_users": 1.5408454590001384,
- "src/backend/tests/test_user.py::test_patch_reset_password": 2.134319875000074,
- "src/backend/tests/test_user.py::test_patch_user": 1.5280406670003686,
- "src/backend/tests/test_user.py::test_patch_user_wrong_id": 1.5431871670002693,
- "src/backend/tests/test_user.py::test_read_all_users": 1.3631829999994807,
- "src/backend/tests/test_user.py::test_user_waiting_for_approval": 1.7217974579998554,
- "src/backend/tests/test_webhook.py::test_webhook_endpoint": 8.848518459000388,
- "src/backend/tests/test_webhook.py::test_webhook_flow_on_run_endpoint": 4.675444458000584,
- "src/backend/tests/test_webhook.py::test_webhook_with_random_payload": 5.161753501000476,
- "src/backend/tests/unit/api/test_api_utils.py::test_get_outdated_components": 0.0008271670085377991,
- "src/backend/tests/unit/api/test_api_utils.py::test_get_suggestion_message": 0.062292501010233536,
- "src/backend/tests/unit/api/v1/test_api_key.py::test_create_api_key_route": 2.506952625029953,
- "src/backend/tests/unit/api/v1/test_api_key.py::test_create_folder": 12.979961918026675,
- "src/backend/tests/unit/api/v1/test_api_key.py::test_delete_api_key_route": 2.4513380830176175,
- "src/backend/tests/unit/api/v1/test_api_key.py::test_save_store_api_key": 3.2220397089840844,
- "src/backend/tests/unit/api/v1/test_api_schemas.py::test_result_data_response_combined_fields": 0.015468625002540648,
- "src/backend/tests/unit/api/v1/test_api_schemas.py::test_result_data_response_logs": 0.014387874980457127,
- "src/backend/tests/unit/api/v1/test_api_schemas.py::test_result_data_response_nested_structures": 0.008282333030365407,
- "src/backend/tests/unit/api/v1/test_api_schemas.py::test_result_data_response_outputs": 0.016630333993816748,
- "src/backend/tests/unit/api/v1/test_api_schemas.py::test_result_data_response_special_types": 0.6439955829991959,
- "src/backend/tests/unit/api/v1/test_api_schemas.py::test_result_data_response_truncation": 0.03333358297822997,
- "src/backend/tests/unit/api/v1/test_api_schemas.py::test_vertex_build_response_serialization": 0.03763025099760853,
- "src/backend/tests/unit/api/v1/test_api_schemas.py::test_vertex_build_response_with_long_data": 0.008100416016532108,
- "src/backend/tests/unit/api/v1/test_endpoints.py::test_get_config": 1.8237768740218598,
- "src/backend/tests/unit/api/v1/test_endpoints.py::test_get_version": 1.906218040996464,
- "src/backend/tests/unit/api/v1/test_endpoints.py::test_update_component_model_name_options": 2.521151998982532,
- "src/backend/tests/unit/api/v1/test_endpoints.py::test_update_component_outputs": 2.477914999006316,
- "src/backend/tests/unit/api/v1/test_files.py::test_delete_file": 2.4448377909720875,
- "src/backend/tests/unit/api/v1/test_files.py::test_download_file": 2.474699292011792,
- "src/backend/tests/unit/api/v1/test_files.py::test_file_operations": 2.4727731659659185,
- "src/backend/tests/unit/api/v1/test_files.py::test_list_files": 3.317764541017823,
- "src/backend/tests/unit/api/v1/test_files.py::test_upload_file": 2.4290035419980995,
- "src/backend/tests/unit/api/v1/test_files.py::test_upload_file_size_limit": 2.4718297510116827,
- "src/backend/tests/unit/api/v1/test_flows.py::test_create_flow": 2.582368832983775,
- "src/backend/tests/unit/api/v1/test_flows.py::test_create_flows": 2.7334314990148414,
- "src/backend/tests/unit/api/v1/test_flows.py::test_read_basic_examples": 3.6504871669749264,
- "src/backend/tests/unit/api/v1/test_flows.py::test_read_flow": 2.5697394589951728,
- "src/backend/tests/unit/api/v1/test_flows.py::test_read_flows": 2.6436516670219135,
- "src/backend/tests/unit/api/v1/test_flows.py::test_read_flows_user_isolation": 3.153951084037544,
- "src/backend/tests/unit/api/v1/test_flows.py::test_update_flow": 2.5014465000131167,
- "src/backend/tests/unit/api/v1/test_folders.py::test_create_folder": 2.4833494579943363,
- "src/backend/tests/unit/api/v1/test_folders.py::test_read_folder": 2.4764065820199903,
- "src/backend/tests/unit/api/v1/test_folders.py::test_read_folders": 2.487064291985007,
- "src/backend/tests/unit/api/v1/test_folders.py::test_update_folder": 2.504634041019017,
- "src/backend/tests/unit/api/v1/test_mcp.py::test_mcp_post_endpoint_disconnect_error": 2.765244457987137,
- "src/backend/tests/unit/api/v1/test_mcp.py::test_mcp_post_endpoint_invalid_json": 2.8167178750154562,
- "src/backend/tests/unit/api/v1/test_mcp.py::test_mcp_post_endpoint_no_auth": 2.149924541998189,
- "src/backend/tests/unit/api/v1/test_mcp.py::test_mcp_post_endpoint_server_error": 2.772359000024153,
- "src/backend/tests/unit/api/v1/test_mcp.py::test_mcp_post_endpoint_success": 3.952213873999426,
- "src/backend/tests/unit/api/v1/test_mcp.py::test_mcp_sse_get_endpoint_invalid_auth": 2.181764957989799,
- "src/backend/tests/unit/api/v1/test_mcp.py::test_mcp_sse_head_endpoint": 2.1777867930359207,
- "src/backend/tests/unit/api/v1/test_mcp.py::test_mcp_sse_head_endpoint_no_auth": 2.1754070409806445,
- "src/backend/tests/unit/api/v1/test_mcp_projects.py::test_handle_project_messages_success": 2.476644499984104,
- "src/backend/tests/unit/api/v1/test_mcp_projects.py::test_init_mcp_servers": 2.455380542000057,
- "src/backend/tests/unit/api/v1/test_mcp_projects.py::test_init_mcp_servers_error_handling": 2.670027624000795,
- "src/backend/tests/unit/api/v1/test_mcp_projects.py::test_project_sse_creation": 2.1437969590188004,
- "src/backend/tests/unit/api/v1/test_mcp_projects.py::test_update_project_mcp_settings_empty_settings": 2.50428270699922,
- "src/backend/tests/unit/api/v1/test_mcp_projects.py::test_update_project_mcp_settings_invalid_json": 2.481030208989978,
- "src/backend/tests/unit/api/v1/test_mcp_projects.py::test_update_project_mcp_settings_invalid_project": 2.5376452500058804,
- "src/backend/tests/unit/api/v1/test_mcp_projects.py::test_update_project_mcp_settings_other_user_project": 2.7707887919677887,
- "src/backend/tests/unit/api/v1/test_mcp_projects.py::test_update_project_mcp_settings_success": 2.5892928339890204,
- "src/backend/tests/unit/api/v1/test_mcp_projects.py::test_user_can_only_access_own_projects": 4.1938177490083035,
- "src/backend/tests/unit/api/v1/test_mcp_projects.py::test_user_can_update_own_flow_mcp_settings": 2.511545791028766,
- "src/backend/tests/unit/api/v1/test_mcp_projects.py::test_user_data_isolation_with_real_db": 2.8074080410588067,
- "src/backend/tests/unit/api/v1/test_projects.py::test_create_and_read_project_cyrillic": 2.555548208008986,
- "src/backend/tests/unit/api/v1/test_projects.py::test_create_project": 2.4998485410178546,
- "src/backend/tests/unit/api/v1/test_projects.py::test_create_project_validation_error": 2.500857791979797,
- "src/backend/tests/unit/api/v1/test_projects.py::test_delete_project_then_404": 2.5153850420028903,
- "src/backend/tests/unit/api/v1/test_projects.py::test_read_project": 2.4847323760332074,
- "src/backend/tests/unit/api/v1/test_projects.py::test_read_project_invalid_id_format": 3.9964210420148447,
- "src/backend/tests/unit/api/v1/test_projects.py::test_read_projects": 2.469938791007735,
- "src/backend/tests/unit/api/v1/test_projects.py::test_read_projects_empty": 2.7520287910010666,
- "src/backend/tests/unit/api/v1/test_projects.py::test_read_projects_pagination": 2.4880284999962896,
- "src/backend/tests/unit/api/v1/test_projects.py::test_update_project": 2.5021324589906726,
- "src/backend/tests/unit/api/v1/test_rename_flow_to_save.py::test_duplicate_flow_name_basic": 2.5396002079651225,
- "src/backend/tests/unit/api/v1/test_rename_flow_to_save.py::test_duplicate_flow_name_gaps_in_numbering": 2.5215593749890104,
- "src/backend/tests/unit/api/v1/test_rename_flow_to_save.py::test_duplicate_flow_name_regex_patterns": 2.5361030000203755,
- "src/backend/tests/unit/api/v1/test_rename_flow_to_save.py::test_duplicate_flow_name_special_characters": 2.5226703330117743,
- "src/backend/tests/unit/api/v1/test_rename_flow_to_save.py::test_duplicate_flow_name_with_non_numeric_suffixes": 2.513904042018112,
- "src/backend/tests/unit/api/v1/test_rename_flow_to_save.py::test_duplicate_flow_name_with_numbers_in_original": 2.5571109160082415,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_structure_when_truncate_applies": 0.023025040980428457,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_structure_without_truncate": 0.0024828329915180802,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_truncation_behavior[0-0]": 0.0007958330097608268,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_truncation_behavior[100-100]": 0.0012827390000325067,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_truncation_behavior[1000-1000]": 0.007933541986858472,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_truncation_behavior[1100-101]": 0.002772633000006408,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_truncation_behavior[2000-1001]": 0.008365543006220832,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_truncation_behavior[2100-101]": 0.004317118000017217,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_truncation_behavior[3000-1001]": 0.009354082983918488,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_truncation_behavior[3100-101]": 0.0056621729999619674,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_truncation_behavior[4000-1001]": 0.009598375007044524,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_truncation_behavior[42-42]": 0.0015994080000609756,
- "src/backend/tests/unit/api/v1/test_schemas.py::test_vertex_response_truncation_behavior[8-8]": 0.0007110010192263871,
- "src/backend/tests/unit/api/v1/test_starter_projects.py::test_get_starter_projects": 2.6394810419878922,
- "src/backend/tests/unit/api/v1/test_store.py::test_check_if_store_is_enabled": 1.876104333990952,
- "src/backend/tests/unit/api/v1/test_users.py::test_add_user": 4.005188834009459,
- "src/backend/tests/unit/api/v1/test_users.py::test_delete_user": 2.7974890820041765,
- "src/backend/tests/unit/api/v1/test_users.py::test_patch_user": 3.1082065000082366,
- "src/backend/tests/unit/api/v1/test_users.py::test_read_all_users": 2.507381165982224,
- "src/backend/tests/unit/api/v1/test_users.py::test_read_current_user": 2.514553042012267,
- "src/backend/tests/unit/api/v1/test_users.py::test_reset_password": 3.102323331986554,
- "src/backend/tests/unit/api/v1/test_validate.py::test_post_validate_code": 2.5075825010135304,
- "src/backend/tests/unit/api/v1/test_validate.py::test_post_validate_code_with_unauthenticated_user": 2.0713883750140667,
- "src/backend/tests/unit/api/v1/test_validate.py::test_post_validate_prompt": 2.6730809160217177,
- "src/backend/tests/unit/api/v1/test_validate.py::test_post_validate_prompt_with_invalid_data": 3.50601858299342,
- "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable": 2.6408770410052966,
- "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__Exception": 5.891528583015315,
- "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__HTTPException": 2.8841335409670137,
- "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__exception": 2.5489250009995885,
- "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__httpexception": 2.5547019169898704,
- "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_name_alread_exists": 3.690157334029209,
- "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_name_already_exists": 2.5517429169849493,
- "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_name_and_value_cannot_be_empty": 2.5338064569805283,
- "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_name_cannot_be_empty": 2.58213637501467,
- "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_value_cannot_be_empty": 5.253880292963004,
- "src/backend/tests/unit/api/v1/test_variable.py::test_delete_variable": 2.557073792995652,
- "src/backend/tests/unit/api/v1/test_variable.py::test_delete_variable__Exception": 3.1565893749939278,
- "src/backend/tests/unit/api/v1/test_variable.py::test_delete_variable__exception": 2.568733041989617,
- "src/backend/tests/unit/api/v1/test_variable.py::test_read_variables": 2.582863333984278,
- "src/backend/tests/unit/api/v1/test_variable.py::test_read_variables__": 2.563697667006636,
- "src/backend/tests/unit/api/v1/test_variable.py::test_read_variables__empty": 2.5792906249698717,
- "src/backend/tests/unit/api/v1/test_variable.py::test_update_variable": 2.556691708014114,
- "src/backend/tests/unit/api/v1/test_variable.py::test_update_variable__Exception": 3.202228542009834,
- "src/backend/tests/unit/api/v1/test_variable.py::test_update_variable__exception": 2.612043667031685,
- "src/backend/tests/unit/api/v2/test_files.py::test_delete_file": 2.5786768340331037,
- "src/backend/tests/unit/api/v2/test_files.py::test_download_file": 2.569752043025801,
- "src/backend/tests/unit/api/v2/test_files.py::test_edit_file": 2.5710495410021394,
- "src/backend/tests/unit/api/v2/test_files.py::test_list_files": 2.6288422499783337,
- "src/backend/tests/unit/api/v2/test_files.py::test_mcp_servers_file_replacement": 2.607162458007224,
- "src/backend/tests/unit/api/v2/test_files.py::test_unique_filename_counter_handles_gaps": 2.6284001680032816,
- "src/backend/tests/unit/api/v2/test_files.py::test_unique_filename_path_storage": 2.6036685010185465,
- "src/backend/tests/unit/api/v2/test_files.py::test_upload_file": 2.5614012499863748,
- "src/backend/tests/unit/api/v2/test_files.py::test_upload_files_with_different_extensions_same_name": 2.603973582998151,
- "src/backend/tests/unit/api/v2/test_files.py::test_upload_files_with_same_name_creates_unique_names": 2.623052831972018,
- "src/backend/tests/unit/api/v2/test_files.py::test_upload_files_without_extension_creates_unique_names": 5.934239873953629,
- "src/backend/tests/unit/api/v2/test_files.py::test_upload_list_delete_and_validate_files": 2.613800998980878,
- "src/backend/tests/unit/api/v2/test_mcp_servers_file.py::test_mcp_servers_upload_replace": 0.012480165984015912,
- "src/backend/tests/unit/base/load/test_load.py::test_run_flow_from_json_params": 0.003475541976513341,
- "src/backend/tests/unit/base/load/test_load.py::test_run_flow_with_fake_env": 0.043105750024551526,
- "src/backend/tests/unit/base/load/test_load.py::test_run_flow_with_fake_env_tweaks": 0.027037374005885795,
- "src/backend/tests/unit/base/models/test_model_constants.py::test_provider_names": 0.024663168034749106,
- "src/backend/tests/unit/base/tools/test_component_tool.py::test_component_tool": 0.04467487393412739,
- "src/backend/tests/unit/base/tools/test_component_toolkit.py::test_component_tool": 0.0019873329729307443,
- "src/backend/tests/unit/base/tools/test_component_toolkit.py::test_component_tool_with_api_key": 3.2793682489718776,
- "src/backend/tests/unit/base/tools/test_component_toolkit.py::test_sql_component_to_toolkit": 7.634652540989919,
- "src/backend/tests/unit/base/tools/test_create_schema.py::test_create_schema": 0.002514541003620252,
- "src/backend/tests/unit/base/tools/test_toolmodemixin.py::test_component_inputs_toolkit": 0.013006500987103209,
- "src/backend/tests/unit/base/tools/test_vector_store_decorator.py::TestVectorStoreDecorator::test_all_versions_have_a_file_name_defined": 0.0010075410536956042,
- "src/backend/tests/unit/base/tools/test_vector_store_decorator.py::TestVectorStoreDecorator::test_component_versions[1.0.19]": 0.0006282929971348494,
- "src/backend/tests/unit/base/tools/test_vector_store_decorator.py::TestVectorStoreDecorator::test_component_versions[1.1.0]": 0.0005540830316022038,
- "src/backend/tests/unit/base/tools/test_vector_store_decorator.py::TestVectorStoreDecorator::test_component_versions[1.1.1]": 0.0007710419886279851,
- "src/backend/tests/unit/base/tools/test_vector_store_decorator.py::TestVectorStoreDecorator::test_decorator_applied": 0.001958625012775883,
- "src/backend/tests/unit/base/tools/test_vector_store_decorator.py::TestVectorStoreDecorator::test_latest_version": 0.010553333966527134,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_agent_component_initialization": 0.003768415976082906,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_agent_has_dual_outputs": 0.0037760420236736536,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_all_versions_have_a_file_name_defined": 0.0005480010295286775,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_build_config_update": 0.011978708003880456,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_component_versions[1.0.19]": 0.0004972489841748029,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_component_versions[1.1.0]": 0.0005015839997213334,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_component_versions[1.1.1]": 0.0004687080217991024,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_frontend_node_structure": 0.010656208003638312,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_json_mode_filtered_from_openai_inputs": 0.003795249998802319,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_json_response_error_handling": 0.0033481249993201345,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_json_response_parsing_embedded_json": 0.00349108298541978,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_json_response_parsing_valid_json": 0.0034717909584287554,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_latest_version": 0.004466166021302342,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_model_building_without_json_mode": 0.0033837499795481563,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponent::test_shared_execution_between_outputs": 0.0034727929742075503,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponentWithClient::test_agent_component_with_all_anthropic_models": 37.49017054200522,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponentWithClient::test_agent_component_with_all_openai_models": 91.73228058294626,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponentWithClient::test_agent_component_with_calculator": 7.525444708036957,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponentWithClient::test_all_versions_have_a_file_name_defined": 1.9329447920026723,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponentWithClient::test_component_versions[1.0.19]": 1.9314822909946088,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponentWithClient::test_component_versions[1.1.0]": 1.9367652910004836,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponentWithClient::test_component_versions[1.1.1]": 1.9361331679974683,
- "src/backend/tests/unit/components/agents/test_agent_component.py::TestAgentComponentWithClient::test_latest_version": 1.9004300000087824,
- "src/backend/tests/unit/components/agents/test_agent_component.py::test_agent_component_with_calculator": 9.962897010000006,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_chain_end_event": 0.005884126032469794,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_chain_start_event": 0.008368166978470981,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_chain_stream_event": 0.0012526659993454814,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_end_empty_data": 0.001971207995666191,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_end_no_output": 0.0007197920058388263,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_end_with_empty_return_values": 0.0044970010058023036,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_end_with_output": 0.0011227920185774565,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_start_no_input": 0.0007970000151544809,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_start_with_input": 0.0007171659672167152,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_stream_no_output": 0.0006465409824158996,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_stream_with_output": 0.0006703749822918326,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_tool_end": 0.004497958027059212,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_tool_error": 0.004614791017957032,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_tool_start": 0.005128292017616332,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_multiple_events": 0.0012292909959796816,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_tool_end_event": 0.0027982510218862444,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_tool_error_event": 0.0025122919760178775,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_tool_start_event": 0.005058793001808226,
- "src/backend/tests/unit/components/agents/test_agent_events.py::test_unknown_event": 0.0009467510099057108,
- "src/backend/tests/unit/components/agents/test_tool_calling_agent.py::test_tool_calling_agent_component": 3.95495145797031,
- "src/backend/tests/unit/components/bundles/composio/test_base.py::TestComposioBase::test_all_versions_have_a_file_name_defined": 0.0013641250261571258,
- "src/backend/tests/unit/components/bundles/composio/test_base.py::TestComposioBase::test_build_action_maps": 0.0011071659973822534,
- "src/backend/tests/unit/components/bundles/composio/test_base.py::TestComposioBase::test_build_wrapper_no_api_key": 0.0019804590265266597,
- "src/backend/tests/unit/components/bundles/composio/test_base.py::TestComposioBase::test_build_wrapper_with_api_key": 0.0015174170257523656,
- "src/backend/tests/unit/components/bundles/composio/test_base.py::TestComposioBase::test_component_versions[1.0.19]": 0.0013615420321002603,
- "src/backend/tests/unit/components/bundles/composio/test_base.py::TestComposioBase::test_component_versions[1.1.0]": 0.0010838740272447467,
- "src/backend/tests/unit/components/bundles/composio/test_base.py::TestComposioBase::test_component_versions[1.1.1]": 0.0015033740201033652,
- "src/backend/tests/unit/components/bundles/composio/test_base.py::TestComposioBase::test_get_action_fields": 0.0007218750251922756,
- "src/backend/tests/unit/components/bundles/composio/test_base.py::TestComposioBase::test_latest_version": 0.008931207994464785,
- "src/backend/tests/unit/components/bundles/composio/test_base.py::TestComposioBase::test_show_hide_fields": 0.0006626679969485849,
- "src/backend/tests/unit/components/bundles/composio/test_github.py::TestGitHubComponent::test_all_versions_have_a_file_name_defined": 0.000590665964409709,
- "src/backend/tests/unit/components/bundles/composio/test_github.py::TestGitHubComponent::test_as_dataframe": 0.009676958026830107,
- "src/backend/tests/unit/components/bundles/composio/test_github.py::TestGitHubComponent::test_component_versions[1.0.19]": 0.0005587080086115748,
- "src/backend/tests/unit/components/bundles/composio/test_github.py::TestGitHubComponent::test_component_versions[1.1.0]": 0.0005031249893363565,
- "src/backend/tests/unit/components/bundles/composio/test_github.py::TestGitHubComponent::test_component_versions[1.1.1]": 0.0005019579839427024,
- "src/backend/tests/unit/components/bundles/composio/test_github.py::TestGitHubComponent::test_execute_action_invalid_action": 0.0019453749991953373,
- "src/backend/tests/unit/components/bundles/composio/test_github.py::TestGitHubComponent::test_execute_action_list_branches": 0.0017187079938594252,
- "src/backend/tests/unit/components/bundles/composio/test_github.py::TestGitHubComponent::test_execute_action_list_repo_issues": 0.0017254590056836605,
- "src/backend/tests/unit/components/bundles/composio/test_github.py::TestGitHubComponent::test_execute_action_star_a_repo": 0.002778249967377633,
- "src/backend/tests/unit/components/bundles/composio/test_github.py::TestGitHubComponent::test_init": 0.002172915992559865,
- "src/backend/tests/unit/components/bundles/composio/test_github.py::TestGitHubComponent::test_latest_version": 0.005639333016006276,
- "src/backend/tests/unit/components/bundles/composio/test_github.py::TestGitHubComponent::test_update_build_config": 0.001747832982800901,
- "src/backend/tests/unit/components/bundles/composio/test_gmail.py::TestGmailComponent::test_all_versions_have_a_file_name_defined": 0.0005542919680010527,
- "src/backend/tests/unit/components/bundles/composio/test_gmail.py::TestGmailComponent::test_as_dataframe": 0.004509958002017811,
- "src/backend/tests/unit/components/bundles/composio/test_gmail.py::TestGmailComponent::test_component_versions[1.0.19]": 0.0005334160232450813,
- "src/backend/tests/unit/components/bundles/composio/test_gmail.py::TestGmailComponent::test_component_versions[1.1.0]": 0.0004847069794777781,
- "src/backend/tests/unit/components/bundles/composio/test_gmail.py::TestGmailComponent::test_component_versions[1.1.1]": 0.00048662498011253774,
- "src/backend/tests/unit/components/bundles/composio/test_gmail.py::TestGmailComponent::test_execute_action_fetch_emails": 0.0011807090195361525,
- "src/backend/tests/unit/components/bundles/composio/test_gmail.py::TestGmailComponent::test_execute_action_get_profile": 0.0009811250201892108,
- "src/backend/tests/unit/components/bundles/composio/test_gmail.py::TestGmailComponent::test_execute_action_invalid_action": 0.0011896660143975168,
- "src/backend/tests/unit/components/bundles/composio/test_gmail.py::TestGmailComponent::test_execute_action_send_email": 0.003445083013502881,
- "src/backend/tests/unit/components/bundles/composio/test_gmail.py::TestGmailComponent::test_init": 0.0009143760253209621,
- "src/backend/tests/unit/components/bundles/composio/test_gmail.py::TestGmailComponent::test_latest_version": 0.004278457985492423,
- "src/backend/tests/unit/components/bundles/composio/test_gmail.py::TestGmailComponent::test_update_build_config": 0.0012527919898275286,
- "src/backend/tests/unit/components/bundles/composio/test_googlecalendar.py::TestGoogleCalendarComponent::test_all_versions_have_a_file_name_defined": 0.0005167499766685069,
- "src/backend/tests/unit/components/bundles/composio/test_googlecalendar.py::TestGoogleCalendarComponent::test_as_dataframe": 0.0035292080137878656,
- "src/backend/tests/unit/components/bundles/composio/test_googlecalendar.py::TestGoogleCalendarComponent::test_component_versions[1.0.19]": 0.0005191250238567591,
- "src/backend/tests/unit/components/bundles/composio/test_googlecalendar.py::TestGoogleCalendarComponent::test_component_versions[1.1.0]": 0.0004775830020662397,
- "src/backend/tests/unit/components/bundles/composio/test_googlecalendar.py::TestGoogleCalendarComponent::test_component_versions[1.1.1]": 0.0004779590235557407,
- "src/backend/tests/unit/components/bundles/composio/test_googlecalendar.py::TestGoogleCalendarComponent::test_execute_action_create_event": 0.0015207919932436198,
- "src/backend/tests/unit/components/bundles/composio/test_googlecalendar.py::TestGoogleCalendarComponent::test_execute_action_invalid_action": 0.0016529580170754343,
- "src/backend/tests/unit/components/bundles/composio/test_googlecalendar.py::TestGoogleCalendarComponent::test_execute_action_list_calendars": 0.001615166023839265,
- "src/backend/tests/unit/components/bundles/composio/test_googlecalendar.py::TestGoogleCalendarComponent::test_init": 0.0017601260333321989,
- "src/backend/tests/unit/components/bundles/composio/test_googlecalendar.py::TestGoogleCalendarComponent::test_latest_version": 0.005791208997834474,
- "src/backend/tests/unit/components/bundles/composio/test_googlecalendar.py::TestGoogleCalendarComponent::test_update_build_config": 0.001750416005961597,
- "src/backend/tests/unit/components/bundles/composio/test_outlook.py::TestOutlookComponent::test_all_versions_have_a_file_name_defined": 0.0005066260055173188,
- "src/backend/tests/unit/components/bundles/composio/test_outlook.py::TestOutlookComponent::test_as_dataframe": 0.002483374992152676,
- "src/backend/tests/unit/components/bundles/composio/test_outlook.py::TestOutlookComponent::test_component_versions[1.0.19]": 0.0005131669749971479,
- "src/backend/tests/unit/components/bundles/composio/test_outlook.py::TestOutlookComponent::test_component_versions[1.1.0]": 0.0005113749939482659,
- "src/backend/tests/unit/components/bundles/composio/test_outlook.py::TestOutlookComponent::test_component_versions[1.1.1]": 0.0004998329968657345,
- "src/backend/tests/unit/components/bundles/composio/test_outlook.py::TestOutlookComponent::test_execute_action_fetch_emails": 0.0015756240172777325,
- "src/backend/tests/unit/components/bundles/composio/test_outlook.py::TestOutlookComponent::test_execute_action_invalid_action": 0.0016053339932113886,
- "src/backend/tests/unit/components/bundles/composio/test_outlook.py::TestOutlookComponent::test_execute_action_send_email": 0.0014677929866593331,
- "src/backend/tests/unit/components/bundles/composio/test_outlook.py::TestOutlookComponent::test_init": 0.0016895410080906004,
- "src/backend/tests/unit/components/bundles/composio/test_outlook.py::TestOutlookComponent::test_latest_version": 0.00544029101729393,
- "src/backend/tests/unit/components/bundles/composio/test_outlook.py::TestOutlookComponent::test_update_build_config": 0.0016177069919649512,
- "src/backend/tests/unit/components/bundles/composio/test_slack.py::TestSlackComponent::test_all_versions_have_a_file_name_defined": 0.00047204099246300757,
- "src/backend/tests/unit/components/bundles/composio/test_slack.py::TestSlackComponent::test_as_dataframe": 0.003204000007826835,
- "src/backend/tests/unit/components/bundles/composio/test_slack.py::TestSlackComponent::test_component_versions[1.0.19]": 0.00048258400056511164,
- "src/backend/tests/unit/components/bundles/composio/test_slack.py::TestSlackComponent::test_component_versions[1.1.0]": 0.0004694170202128589,
- "src/backend/tests/unit/components/bundles/composio/test_slack.py::TestSlackComponent::test_component_versions[1.1.1]": 0.0004875000158790499,
- "src/backend/tests/unit/components/bundles/composio/test_slack.py::TestSlackComponent::test_execute_action_invalid_action": 0.0014008739963173866,
- "src/backend/tests/unit/components/bundles/composio/test_slack.py::TestSlackComponent::test_execute_action_list_all_slack_team_users": 0.001392792008118704,
- "src/backend/tests/unit/components/bundles/composio/test_slack.py::TestSlackComponent::test_execute_action_send_message_to_channel": 0.0013018740282859653,
- "src/backend/tests/unit/components/bundles/composio/test_slack.py::TestSlackComponent::test_init": 0.0015250830037984997,
- "src/backend/tests/unit/components/bundles/composio/test_slack.py::TestSlackComponent::test_latest_version": 0.004491709027206525,
- "src/backend/tests/unit/components/bundles/composio/test_slack.py::TestSlackComponent::test_update_build_config": 0.0014909160090610385,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_all_versions_have_a_file_name_defined": 0.0010013749997597188,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_complex_query_result": 0.0044400839833542705,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_component_versions[1.0.19]": 0.0009715839987620711,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_component_versions[1.1.0]": 0.0009319579985458404,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_component_versions[1.1.1]": 0.0009336670045740902,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_empty_query_raises[ \\n\\t ]": 0.0018787499866448343,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_empty_query_raises[]": 0.0023359160113614053,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_execute_sql_invalid_query": 0.0020419160136952996,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_execute_sql_success": 0.003112083999440074,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_invalid_service_account_json": 0.00275295801111497,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_latest_version": 0.003404540999326855,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_missing_project_id_in_credentials": 0.0014151670038700104,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_missing_service_account_file": 0.0012511250097304583,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_query_with_multiple_statements": 0.002837667998392135,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_query_with_parameters": 0.0027423329884186387,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_query_with_quotes": 0.005764999950770289,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_query_with_special_characters": 0.0029064160480629653,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_query_with_sql_code_block": 0.0025010009994730353,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_query_with_whitespace": 0.003081998962443322,
- "src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py::TestBigQueryExecutorComponent::test_refresh_error_handling": 0.0016127090202644467,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_all_versions_have_a_file_name_defined": 0.00048070898628793657,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_component_versions[1.0.19]": 0.0005491250194609165,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_component_versions[1.1.0]": 0.0022520839993376285,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_component_versions[1.1.1]": 0.0004988750151824206,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_evaluate_evaluator_not_found": 0.0015255410107783973,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_evaluate_http_error": 0.008881082001607865,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_evaluate_no_api_key": 0.0016356660053133965,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_evaluate_no_evaluators": 0.0011819580104202032,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_evaluate_success": 0.009251374955056235,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_evaluate_timeout_handling": 0.008078957034740597,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_evaluate_with_contexts_parsing": 0.008182374993339181,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_evaluate_with_tracing": 0.008384373970329762,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_get_dynamic_inputs": 0.0010729159694164991,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_get_dynamic_inputs_error_handling": 0.0008575830142945051,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_get_dynamic_inputs_with_boolean_setting": 0.0009198319748975337,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_latest_version": 0.00373708299594,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_set_evaluators_empty_response": 0.0012653759913519025,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_set_evaluators_success": 0.00145575002534315,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_update_build_config_basic": 0.0017147080216091126,
- "src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py::TestLangWatchComponent::test_update_build_config_with_evaluator_selection": 0.0013980419607833028,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_all_versions_have_a_file_name_defined": 0.000492625025799498,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_basic_setup": 0.0015408750041387975,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_component_versions[1.0.19]": 0.0004934180178679526,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_component_versions[1.1.0]": 0.0004676240496337414,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_component_versions[1.1.1]": 0.00046129198744893074,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_empty_transcript_handling": 0.0011142909934278578,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_get_data_output_success": 0.001195126009406522,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_get_dataframe_output_success": 0.0016434179851785302,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_get_message_output_success": 0.0010021249763667583,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_latest_version": 0.0027228749822825193,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_no_transcript_found_error": 0.0011340409982949495,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_transcript_disabled_error": 0.0012877909757662565,
- "src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py::TestYouTubeTranscriptsComponent::test_translation_setting": 0.0006595839513465762,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_add_query_params": 0.0010754170070867985,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_all_versions_have_a_file_name_defined": 0.00045074999798089266,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_component_versions[1.0.19]": 0.0004679590347222984,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_component_versions[1.1.0]": 0.0004343740292824805,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_component_versions[1.1.1]": 0.000448833015980199,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_error_handling": 0.012866081990068778,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_invalid_urls": 0.0013495000312104821,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_latest_version": 0.004433374007930979,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_make_api_request": 0.012026001000776887,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_make_request_binary_response": 0.00746291596442461,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_make_request_save_to_file": 0.011377916001947597,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_make_request_success": 0.008038041996769607,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_make_request_timeout": 0.007103167998138815,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_make_request_with_metadata": 0.0075695840059779584,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_make_request_with_redirects": 0.008533499028999358,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_output_formats": 0.005004472999871723,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_parse_curl": 0.00128599998424761,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_process_body": 0.0010406250366941094,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_process_headers": 0.0012243750097695738,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_response_info": 0.002219833026174456,
- "src/backend/tests/unit/components/data/test_api_request_component.py::TestAPIRequestComponent::test_update_build_config": 0.0011027069995179772,
- "src/backend/tests/unit/components/data/test_api_request_component.py::test_httpx_metadata_behavior[False-expected_properties0]": 0.02888980000011543,
- "src/backend/tests/unit/components/data/test_api_request_component.py::test_httpx_metadata_behavior[True-expected_properties1]": 0.028863217999855806,
- "src/backend/tests/unit/components/data/test_api_request_component.py::test_parse_curl": 0.003312925000159339,
- "src/backend/tests/unit/components/data/test_api_request_component.py::test_response_info_binary_content": 0.003256059000023015,
- "src/backend/tests/unit/components/data/test_api_request_component.py::test_response_info_default_filename": 0.004079787999899054,
- "src/backend/tests/unit/components/data/test_api_request_component.py::test_response_info_filename_from_content_disposition": 0.004160126999977365,
- "src/backend/tests/unit/components/data/test_api_request_component.py::test_response_info_non_binary_content": 0.003093106999926931,
- "src/backend/tests/unit/components/data/test_api_request_component.py::test_save_to_file_behavior[False-expected_properties0]": 0.028578312000149708,
- "src/backend/tests/unit/components/data/test_api_request_component.py::test_save_to_file_behavior[True-expected_properties1]": 0.0307529940000677,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_all_versions_have_a_file_name_defined": 0.0009394589869771153,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_component_versions[1.0.19]": 6.407174333027797,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_component_versions[1.1.0]": 0.2403191670018714,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_component_versions[1.1.1]": 0.24304787500295788,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_as_dataframe": 0.005464333982672542,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_component_build_with_multithreading": 0.004901456995867193,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_invalid_type": 0.001749083021422848,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_depth": 0.005652666965033859,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_hidden_files": 0.002371708018472418,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_multithreading": 0.0020029579754918814,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_types[file_types0-1]": 0.00288512502447702,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_types[file_types1-1]": 0.002686874009668827,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_types[file_types2-2]": 0.0025501680211164057,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_without_mocks": 0.009557874989695847,
- "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_latest_version": 0.002912666997872293,
- "src/backend/tests/unit/components/data/test_file_component.py::TestFileComponentDynamicOutputs::test_update_outputs_empty_path": 0.000515000952873379,
- "src/backend/tests/unit/components/data/test_file_component.py::TestFileComponentDynamicOutputs::test_update_outputs_multiple_files": 0.0005228740046732128,
- "src/backend/tests/unit/components/data/test_file_component.py::TestFileComponentDynamicOutputs::test_update_outputs_non_path_field": 0.0005073329666629434,
- "src/backend/tests/unit/components/data/test_file_component.py::TestFileComponentDynamicOutputs::test_update_outputs_single_csv_file": 0.0007687920005992055,
- "src/backend/tests/unit/components/data/test_file_component.py::TestFileComponentDynamicOutputs::test_update_outputs_single_json_file": 0.0005275000003166497,
- "src/backend/tests/unit/components/data/test_mcp_component.py::TestMCPSseClient::test_connect_timeout": 0.00015070801600813866,
- "src/backend/tests/unit/components/data/test_mcp_component.py::TestMCPSseClient::test_connect_to_server": 0.00015145802171900868,
- "src/backend/tests/unit/components/data/test_mcp_component.py::TestMCPSseClient::test_pre_check_redirect": 0.00015016700490377843,
- "src/backend/tests/unit/components/data/test_mcp_component.py::TestMCPStdioClient::test_connect_to_server": 0.00015516800340265036,
- "src/backend/tests/unit/components/data/test_mcp_component.py::TestMCPToolsComponent::test_all_versions_have_a_file_name_defined": 0.00015491698286496103,
- "src/backend/tests/unit/components/data/test_mcp_component.py::TestMCPToolsComponent::test_component_versions[1.0.19]": 0.00015937400166876614,
- "src/backend/tests/unit/components/data/test_mcp_component.py::TestMCPToolsComponent::test_component_versions[1.1.0]": 0.00015012500807642937,
- "src/backend/tests/unit/components/data/test_mcp_component.py::TestMCPToolsComponent::test_component_versions[1.1.1]": 0.0001494169991929084,
- "src/backend/tests/unit/components/data/test_mcp_component.py::TestMCPToolsComponent::test_latest_version": 0.00015929201617836952,
- "src/backend/tests/unit/components/data/test_news_search.py::TestNewsSearchComponent::test_all_versions_have_a_file_name_defined": 0.000507083983393386,
- "src/backend/tests/unit/components/data/test_news_search.py::TestNewsSearchComponent::test_component_versions[1.0.19]": 0.000477084016893059,
- "src/backend/tests/unit/components/data/test_news_search.py::TestNewsSearchComponent::test_component_versions[1.1.0]": 0.00046041799942031503,
- "src/backend/tests/unit/components/data/test_news_search.py::TestNewsSearchComponent::test_component_versions[1.1.1]": 0.00045824903645552695,
- "src/backend/tests/unit/components/data/test_news_search.py::TestNewsSearchComponent::test_empty_news_results": 0.0011523329885676503,
- "src/backend/tests/unit/components/data/test_news_search.py::TestNewsSearchComponent::test_latest_version": 0.0034459989983588457,
- "src/backend/tests/unit/components/data/test_news_search.py::TestNewsSearchComponent::test_news_search_error": 0.0010145840351469815,
- "src/backend/tests/unit/components/data/test_news_search.py::TestNewsSearchComponent::test_successful_news_search": 0.002709583000978455,
- "src/backend/tests/unit/components/data/test_rss.py::TestRSSReaderComponent::test_all_versions_have_a_file_name_defined": 0.0005018330120947212,
- "src/backend/tests/unit/components/data/test_rss.py::TestRSSReaderComponent::test_component_versions[1.0.19]": 0.00048041599802672863,
- "src/backend/tests/unit/components/data/test_rss.py::TestRSSReaderComponent::test_component_versions[1.1.0]": 0.0004656669916585088,
- "src/backend/tests/unit/components/data/test_rss.py::TestRSSReaderComponent::test_component_versions[1.1.1]": 0.00044616698869504035,
- "src/backend/tests/unit/components/data/test_rss.py::TestRSSReaderComponent::test_empty_rss_feed": 0.0016158339858520776,
- "src/backend/tests/unit/components/data/test_rss.py::TestRSSReaderComponent::test_latest_version": 0.0023054990160744637,
- "src/backend/tests/unit/components/data/test_rss.py::TestRSSReaderComponent::test_rss_fetch_error": 0.0009557070152368397,
- "src/backend/tests/unit/components/data/test_rss.py::TestRSSReaderComponent::test_rss_fetch_with_missing_fields": 0.0014774170122109354,
- "src/backend/tests/unit/components/data/test_rss.py::TestRSSReaderComponent::test_successful_rss_fetch": 0.0019229159806855023,
- "src/backend/tests/unit/components/data/test_s3_uploader_component.py::TestS3UploaderComponent::test_all_versions_have_a_file_name_defined": 0.00014966700109653175,
- "src/backend/tests/unit/components/data/test_s3_uploader_component.py::TestS3UploaderComponent::test_component_versions[1.0.19]": 0.00014533300418406725,
- "src/backend/tests/unit/components/data/test_s3_uploader_component.py::TestS3UploaderComponent::test_component_versions[1.1.0]": 0.00014445898705162108,
- "src/backend/tests/unit/components/data/test_s3_uploader_component.py::TestS3UploaderComponent::test_component_versions[1.1.1]": 0.00013358399155549705,
- "src/backend/tests/unit/components/data/test_s3_uploader_component.py::TestS3UploaderComponent::test_latest_version": 0.00018295898917131126,
- "src/backend/tests/unit/components/data/test_s3_uploader_component.py::TestS3UploaderComponent::test_upload": 0.00014554100926034153,
- "src/backend/tests/unit/components/data/test_sql_executor.py::TestSQLComponent::test_all_versions_have_a_file_name_defined": 0.0016167510184459388,
- "src/backend/tests/unit/components/data/test_sql_executor.py::TestSQLComponent::test_build_data": 0.005873824999525823,
- "src/backend/tests/unit/components/data/test_sql_executor.py::TestSQLComponent::test_build_dataframe": 0.00853606999976364,
- "src/backend/tests/unit/components/data/test_sql_executor.py::TestSQLComponent::test_component_versions[1.0.19]": 0.0015825839655008167,
- "src/backend/tests/unit/components/data/test_sql_executor.py::TestSQLComponent::test_component_versions[1.1.0]": 0.0017892519827000797,
- "src/backend/tests/unit/components/data/test_sql_executor.py::TestSQLComponent::test_component_versions[1.1.1]": 0.0019315830431878567,
- "src/backend/tests/unit/components/data/test_sql_executor.py::TestSQLComponent::test_latest_version": 0.004538083012448624,
- "src/backend/tests/unit/components/data/test_sql_executor.py::TestSQLComponent::test_query_error_with_add_error": 0.00444866600446403,
- "src/backend/tests/unit/components/data/test_sql_executor.py::TestSQLComponent::test_run_sql_query": 0.00467204101732932,
- "src/backend/tests/unit/components/data/test_sql_executor.py::TestSQLComponent::test_successful_query_with_columns": 0.004683791019488126,
- "src/backend/tests/unit/components/data/test_sql_executor.py::TestSQLComponent::test_successful_query_without_columns": 0.003943373973015696,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_all_versions_have_a_file_name_defined": 0.00044878997141495347,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_component_versions[1.0.19]": 1.2219787489739247,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_component_versions[1.1.0]": 0.9946662079892121,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_component_versions[1.1.1]": 1.0076045420137234,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_latest_version": 0.003217125980881974,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_recursive_url_component": 0.0042570200000682235,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_recursive_url_component_as_dataframe": 0.004986199000086344,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_recursive_url_component_ensure_url": 0.003239873000211446,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_recursive_url_component_error_handling": 0.00368510399994193,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_recursive_url_component_fetch_content_text": 0.004006832999948529,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_recursive_url_component_format_options": 0.003853296000215778,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_recursive_url_component_multiple_urls": 0.004476819999808868,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component": 0.0032953139999563064,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_as_dataframe": 0.00391441199997189,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_basic_functionality": 0.006263292045332491,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_ensure_url": 0.0008219179871957749,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_error_handling": 0.010195249982643872,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_fetch_content_text": 0.0030438270000558987,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_format_options": 0.0026391660212539136,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_invalid_urls": 0.0025321470000108093,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_missing_metadata": 0.0034857080318033695,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_multiple_urls": 0.0036573750257957727,
- "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_request_success": 0.00575876200014136,
- "src/backend/tests/unit/components/data/test_web_search.py::TestWebSearchComponent::test_all_versions_have_a_file_name_defined": 0.0005157929554115981,
- "src/backend/tests/unit/components/data/test_web_search.py::TestWebSearchComponent::test_component_versions[1.0.19]": 0.0004949590074829757,
- "src/backend/tests/unit/components/data/test_web_search.py::TestWebSearchComponent::test_component_versions[1.1.0]": 0.0004674170049838722,
- "src/backend/tests/unit/components/data/test_web_search.py::TestWebSearchComponent::test_component_versions[1.1.1]": 0.0004539159999694675,
- "src/backend/tests/unit/components/data/test_web_search.py::TestWebSearchComponent::test_invalid_url_handling": 0.0008121670107357204,
- "src/backend/tests/unit/components/data/test_web_search.py::TestWebSearchComponent::test_latest_version": 0.002719084033742547,
- "src/backend/tests/unit/components/data/test_web_search.py::TestWebSearchComponent::test_successful_web_search": 22.156292709027184,
- "src/backend/tests/unit/components/embeddings/test_embedding_model_component.py::TestEmbeddingModelComponent::test_all_versions_have_a_file_name_defined": 8.159916476000035,
- "src/backend/tests/unit/components/embeddings/test_embedding_model_component.py::TestEmbeddingModelComponent::test_build_embeddings_openai": 10.576514679999946,
- "src/backend/tests/unit/components/embeddings/test_embedding_model_component.py::TestEmbeddingModelComponent::test_build_embeddings_openai_missing_api_key": 8.120289870000079,
- "src/backend/tests/unit/components/embeddings/test_embedding_model_component.py::TestEmbeddingModelComponent::test_build_embeddings_unknown_provider": 8.13874799700011,
- "src/backend/tests/unit/components/embeddings/test_embedding_model_component.py::TestEmbeddingModelComponent::test_component_versions[1.0.19]": 8.300481815000012,
- "src/backend/tests/unit/components/embeddings/test_embedding_model_component.py::TestEmbeddingModelComponent::test_component_versions[1.1.0]": 8.323870884000144,
- "src/backend/tests/unit/components/embeddings/test_embedding_model_component.py::TestEmbeddingModelComponent::test_component_versions[1.1.1]": 8.112025460000268,
- "src/backend/tests/unit/components/embeddings/test_embedding_model_component.py::TestEmbeddingModelComponent::test_latest_version": 8.215820538000116,
- "src/backend/tests/unit/components/embeddings/test_embedding_model_component.py::TestEmbeddingModelComponent::test_update_build_config_openai": 8.282516385000008,
- "src/backend/tests/unit/components/git/test_git_component.py::test_check_content_pattern": 0.0030239579791668802,
- "src/backend/tests/unit/components/git/test_git_component.py::test_check_file_patterns": 0.0035422499931883067,
- "src/backend/tests/unit/components/git/test_git_component.py::test_combined_filter": 0.0033488319895695895,
- "src/backend/tests/unit/components/git/test_git_component.py::test_is_binary": 0.006222750002052635,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_add_metadata_failure": 0.004369750999785538,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_add_metadata_success": 0.004423803999998199,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_all_versions_have_a_file_name_defined": 0.001901240999586662,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_batch_run_error_with_metadata": 0.005896684999697754,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_batch_run_error_without_metadata": 0.005105125999989468,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_batch_run_without_metadata": 0.0066861279999557155,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_batch_run_without_system_message": 0.004009337000013602,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_component_versions[1.0.19]": 0.0018921429998499661,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_component_versions[1.1.0]": 0.0019201959999008977,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_component_versions[1.1.1]": 0.0023453609999251057,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_create_base_row": 0.004416428999775235,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_empty_dataframe": 0.0061839309998958925,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_invalid_column_name": 0.005529347999981837,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_latest_version": 0.009116718999848672,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_metadata_disabled": 0.0044005400000060035,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_non_string_column_conversion": 0.00715712800001711,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_operational_error_with_metadata": 0.006253800999957093,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_operational_error_without_metadata": 0.006102398000166431,
- "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_successful_batch_run_with_system_message": 0.007426672000292456,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_all_versions_have_a_file_name_defined": 0.0016393719999996392,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_component_versions[1.0.19]": 0.001553460999730305,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_component_versions[1.1.0]": 0.0015498059999572433,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_component_versions[1.1.1]": 0.0015663749998111598,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_correctly_builds_output_model": 0.0033377929999005573,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_empty_output_schema": 0.0024972539999907895,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_handles_multiple_outputs": 0.0032065590003185207,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_invalid_llm_config": 0.42860454198671505,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_invalid_output_schema_type": 0.00249897599974247,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_large_input_value": 0.01574660099981884,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_latest_version": 0.0062522369998987415,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_nested_output_schema": 0.016930043000002115,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_raises_value_error_for_unsupported_language_model": 0.002724247000287505,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_successful_structured_output_generation_with_patch_with_config": 0.035178335999944466,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_with_real_nvidia_model_simple_schema": 0.00029323899980226997,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_with_real_openai_model_complex_schema": 1.4947445480001988,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_with_real_openai_model_nested_schema": 1.9058080820000214,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_with_real_openai_model_simple_schema": 0.960528087000057,
- "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_with_real_openai_model_simple_schema_fail": 0.7785365639999782,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_all_versions_have_a_file_name_defined": 6.539043208991643,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.0.17]": 4.332370791060384,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.0.18]": 3.6762167080305517,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.0.19]": 2.2135795410140418,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.1.0]": 2.246968833031133,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.1.1]": 2.218763584009139,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_latest_version": 1.9452624170226045,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_response": 1.9526873330178205,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_response_ai_sender": 1.970057792001171,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_response_with_files": 3.12873616599245,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_response_without_session": 1.9774919159826823,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_storage_disabled": 1.968287416966632,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_all_versions_have_a_file_name_defined": 0.0025716660311445594,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.0.17]": 0.26945149997482076,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.0.18]": 0.28087970800697803,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.0.19]": 0.22174570796778426,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.1.0]": 0.2130016669689212,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.1.1]": 0.2259559569938574,
- "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_latest_version": 0.007378084002994001,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_empty_str_endpoint": 0.0011477920052129775,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_invalid_endpoint": 0.0018697489867918193,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_none_endpoint": 0.0035746670328080654,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[AquilaChat-7B]": 0.0004310420190449804,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[BLOOMZ-7B]": 0.00039495897362940013,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ChatGLM2-6B-32K]": 0.00036862498382106423,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[EB-turbo-AppBuilder]": 0.0011716260050889105,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE 3.5]": 0.0003307919832877815,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE Speed-AppBuilder]": 0.0003307919832877815,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE Speed]": 0.0008584169845562428,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-3.5-8K]": 0.0005356260226108134,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-4.0-8K]": 0.0003534170100465417,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Bot-4]": 0.0003674580075312406,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Bot-turbo-AI]": 0.0007222500280477107,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Bot]": 0.00037416600389406085,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Lite-8K-0308]": 0.0006829169869888574,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Speed-128k]": 0.0003332499763928354,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Speed-8K]": 0.0003205010143574327,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Speed]": 0.00047295799595303833,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[Llama-2-13b-chat]": 0.0003735009813681245,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[Llama-2-70b-chat]": 0.0014139590202830732,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[Llama-2-7b-chat]": 0.00035670900251716375,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[Mixtral-8x7B-Instruct]": 0.0003174169978592545,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[Qianfan-BLOOMZ-7B-compressed]": 0.0003525829815771431,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[Qianfan-Chinese-Llama-2-13B]": 0.0005150829965714365,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[Qianfan-Chinese-Llama-2-7B]": 0.0003848329943139106,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[XuanYuan-70B-Chat-4bit]": 0.0006859590066596866,
- "src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py::test_qianfan_different_models[Yi-34B-Chat]": 0.00032133300555869937,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_all_versions_have_a_file_name_defined": 0.0005714159924536943,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_build_model": 0.0019048750109504908,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_build_model_integration": 0.013954625988844782,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_build_model_missing_base_url": 0.0013083330122753978,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_component_versions[1.0.19]": 0.0005165419715922326,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_component_versions[1.1.0]": 0.000494084000820294,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_component_versions[1.1.1]": 0.00047579201054759324,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_get_models_failure": 0.006945083994651213,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_get_models_success": 0.010135083022760227,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_latest_version": 0.004662041959818453,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_update_build_config_keep_alive": 0.0012813759967684746,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_update_build_config_mirostat_disabled": 0.001101000001654029,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_update_build_config_mirostat_enabled": 0.0010358320432715118,
- "src/backend/tests/unit/components/languagemodels/test_chatollama_component.py::TestChatOllamaComponent::test_update_build_config_model_name": 0.03060733401798643,
- "src/backend/tests/unit/components/languagemodels/test_deepseek.py::test_deepseek_build_model[0.5-100]": 0.0011963750002905726,
- "src/backend/tests/unit/components/languagemodels/test_deepseek.py::test_deepseek_build_model[1.0-500]": 0.0009675409819465131,
- "src/backend/tests/unit/components/languagemodels/test_deepseek.py::test_deepseek_build_model[1.5-1000]": 0.0009197920153383166,
- "src/backend/tests/unit/components/languagemodels/test_deepseek.py::test_deepseek_error_handling": 0.0011858330108225346,
- "src/backend/tests/unit/components/languagemodels/test_deepseek.py::test_deepseek_get_models": 0.0009361669945064932,
- "src/backend/tests/unit/components/languagemodels/test_deepseek.py::test_deepseek_initialization": 0.0007854590367060155,
- "src/backend/tests/unit/components/languagemodels/test_deepseek.py::test_deepseek_template": 0.01080862499657087,
- "src/backend/tests/unit/components/languagemodels/test_huggingface.py::test_huggingface_inputs": 0.0008037930529098958,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_all_versions_have_a_file_name_defined": 0.0004984999832231551,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_build_model": 0.002025251043960452,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_build_model_integration": 0.01272716699168086,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_build_model_integration_reasoning": 0.0013502920046448708,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_build_model_max_tokens_zero": 0.0017601660219952464,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_build_model_no_api_key": 0.0016860420000739396,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_build_model_reasoning_model": 0.0023898330109659582,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_build_model_with_json_mode": 0.001903833996038884,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_component_versions[1.0.19]": 0.0004895839956589043,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_component_versions[1.1.0]": 0.00047758303117007017,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_component_versions[1.1.1]": 0.00044966701534576714,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_get_exception_message_bad_request_error": 0.0010025420051533729,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_get_exception_message_no_openai_import": 0.0021924999891780317,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_get_exception_message_other_exception": 0.0009440830035600811,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_latest_version": 0.003186875954270363,
- "src/backend/tests/unit/components/languagemodels/test_openai_model.py::TestOpenAIModelComponent::test_update_build_config_reasoning_model": 0.0008847920107655227,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_all_versions_have_a_file_name_defined": 0.00048558300477452576,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_build_model": 0.0011021259997505695,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_build_model_error": 0.0012242929951753467,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_component_versions[1.0.19]": 0.00046616699546575546,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_component_versions[1.1.0]": 0.00044408300891518593,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_component_versions[1.1.1]": 0.0004647089808713645,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_get_models": 0.0009869580098893493,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_get_models_no_api_key": 0.0007053340086713433,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_initialization": 0.0007046660175547004,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_inputs": 0.0007589170127175748,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_json_mode": 0.0015162500203587115,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_latest_version": 0.0026592499925754964,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_template": 0.01161912499810569,
- "src/backend/tests/unit/components/languagemodels/test_xai.py::TestXAIComponent::test_update_build_config": 0.8106150010135025,
- "src/backend/tests/unit/components/logic/test_loop.py::TestLoopComponentWithAPI::test_all_versions_have_a_file_name_defined": 1.953069500013953,
- "src/backend/tests/unit/components/logic/test_loop.py::TestLoopComponentWithAPI::test_build_flow_loop": 2.7657917499891482,
- "src/backend/tests/unit/components/logic/test_loop.py::TestLoopComponentWithAPI::test_component_versions[1.0.19]": 1.9741800840129144,
- "src/backend/tests/unit/components/logic/test_loop.py::TestLoopComponentWithAPI::test_component_versions[1.1.0]": 2.0093055829929654,
- "src/backend/tests/unit/components/logic/test_loop.py::TestLoopComponentWithAPI::test_component_versions[1.1.1]": 1.9778604989987798,
- "src/backend/tests/unit/components/logic/test_loop.py::TestLoopComponentWithAPI::test_latest_version": 1.9571893329848535,
- "src/backend/tests/unit/components/logic/test_loop.py::TestLoopComponentWithAPI::test_run_flow_loop": 3.073773209034698,
- "src/backend/tests/unit/components/logic/test_loop.py::test_loop_flow": 1.617342416982865,
- "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_build_model": 0.0020211669616401196,
- "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_get_model_failure": 0.0068002091138623655,
- "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_get_model_success": 0.015780292043928057,
- "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_update_build_config_keep_alive": 0.0008187499479390681,
- "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_update_build_config_mirostat_disabled": 0.0013394170091487467,
- "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_update_build_config_mirostat_enabled": 0.0016756660188548267,
- "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_update_build_config_model_name": 0.0062951669679023325,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_empty_str_endpoint": 0.0010600269999940792,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_invalid_endpoint": 0.0010167260002162948,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_none_endpoint": 0.001458420000062688,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[AquilaChat-7B]": 0.0010182190001160052,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[BLOOMZ-7B]": 0.0010033209998709935,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ChatGLM2-6B-32K]": 0.0009943949999069446,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[EB-turbo-AppBuilder]": 0.0010539460001837142,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE 3.5]": 0.0009903969998958928,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE Speed-AppBuilder]": 0.0010340789999645494,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE Speed]": 0.0009939629999280442,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-3.5-8K]": 0.001002780000135317,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-4.0-8K]": 0.0009947549999651528,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Bot-4]": 0.000986598999816124,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Bot-turbo-AI]": 0.0010277369999585062,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Bot]": 0.000998371999912706,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Lite-8K-0308]": 0.000991880000128731,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Speed-128k]": 0.001381986999831497,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Speed-8K]": 0.0009957570002825378,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Speed]": 0.0009906669997690187,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Llama-2-13b-chat]": 0.00097524800003157,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Llama-2-70b-chat]": 0.0010122179999143555,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Llama-2-7b-chat]": 0.0009800070001801942,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Mixtral-8x7B-Instruct]": 0.0010096819999034778,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Qianfan-BLOOMZ-7B-compressed]": 0.0009769710002274223,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Qianfan-Chinese-Llama-2-13B]": 0.001579184000092937,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Qianfan-Chinese-Llama-2-7B]": 0.001041391999933694,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[XuanYuan-70B-Chat-4bit]": 0.00100266799995552,
- "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Yi-34B-Chat]": 0.0011193870000170136,
- "src/backend/tests/unit/components/models/test_chatollama_component.py::test_build_model": 0.11027329100033967,
- "src/backend/tests/unit/components/models/test_chatollama_component.py::test_get_model_failure": 0.02725019399986195,
- "src/backend/tests/unit/components/models/test_chatollama_component.py::test_get_model_success": 0.02962537399980647,
- "src/backend/tests/unit/components/models/test_chatollama_component.py::test_get_models_failure": 0.028048700000226745,
- "src/backend/tests/unit/components/models/test_chatollama_component.py::test_get_models_success": 0.029758339000181877,
- "src/backend/tests/unit/components/models/test_chatollama_component.py::test_update_build_config_keep_alive": 0.004108060999897134,
- "src/backend/tests/unit/components/models/test_chatollama_component.py::test_update_build_config_mirostat_disabled": 0.004956932000141023,
- "src/backend/tests/unit/components/models/test_chatollama_component.py::test_update_build_config_mirostat_enabled": 0.003847954999855574,
- "src/backend/tests/unit/components/models/test_chatollama_component.py::test_update_build_config_model_name": 0.12069272899998396,
- "src/backend/tests/unit/components/models/test_deepseek.py::test_deepseek_build_model": 0.0032321359999514243,
- "src/backend/tests/unit/components/models/test_deepseek.py::test_deepseek_build_model[0.5-100]": 0.003530765000050451,
- "src/backend/tests/unit/components/models/test_deepseek.py::test_deepseek_build_model[1.0-500]": 0.0032251240002096893,
- "src/backend/tests/unit/components/models/test_deepseek.py::test_deepseek_build_model[1.5-1000]": 0.0032302319996233564,
- "src/backend/tests/unit/components/models/test_deepseek.py::test_deepseek_error_handling": 0.0030071959999986575,
- "src/backend/tests/unit/components/models/test_deepseek.py::test_deepseek_get_models": 0.0036159830001452065,
- "src/backend/tests/unit/components/models/test_deepseek.py::test_deepseek_initialization": 0.0030138490001263563,
- "src/backend/tests/unit/components/models/test_deepseek.py::test_deepseek_template": 0.02356655199969282,
- "src/backend/tests/unit/components/models/test_embedding_model_component.py::TestEmbeddingModelComponent::test_all_versions_have_a_file_name_defined": 1.9666760420368519,
- "src/backend/tests/unit/components/models/test_embedding_model_component.py::TestEmbeddingModelComponent::test_build_embeddings_openai": 2.1013599990401417,
- "src/backend/tests/unit/components/models/test_embedding_model_component.py::TestEmbeddingModelComponent::test_build_embeddings_openai_missing_api_key": 2.008826000004774,
- "src/backend/tests/unit/components/models/test_embedding_model_component.py::TestEmbeddingModelComponent::test_build_embeddings_unknown_provider": 1.9734617499634624,
- "src/backend/tests/unit/components/models/test_embedding_model_component.py::TestEmbeddingModelComponent::test_component_versions[1.0.19]": 1.966638916026568,
- "src/backend/tests/unit/components/models/test_embedding_model_component.py::TestEmbeddingModelComponent::test_component_versions[1.1.0]": 1.9772514589712955,
- "src/backend/tests/unit/components/models/test_embedding_model_component.py::TestEmbeddingModelComponent::test_component_versions[1.1.1]": 1.9998762500181329,
- "src/backend/tests/unit/components/models/test_embedding_model_component.py::TestEmbeddingModelComponent::test_latest_version": 1.9665260840265546,
- "src/backend/tests/unit/components/models/test_embedding_model_component.py::TestEmbeddingModelComponent::test_update_build_config_openai": 6.623338917008368,
- "src/backend/tests/unit/components/models/test_huggingface.py::test_huggingface_inputs": 0.002935343000217472,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_all_versions_have_a_file_name_defined": 0.0015185420052148402,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_anthropic_live_api": 0.001195958029711619,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_anthropic_model_creation": 0.0016326250042766333,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_build_model_anthropic": 2.072215417999928,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_build_model_anthropic_missing_api_key": 0.0010029179975390434,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_build_model_google_missing_api_key": 0.0009371260239277035,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_build_model_openai": 2.102755736000063,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_build_model_openai_missing_api_key": 0.0012453749950509518,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_build_model_unknown_provider": 0.0009012070077005774,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_component_versions[1.0.19]": 0.0019312919757794589,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_component_versions[1.1.0]": 0.0015764170093461871,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_component_versions[1.1.1]": 0.0011154170206282288,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_google_live_api": 0.0004305840120650828,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_google_model_creation": 0.01996941602556035,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_latest_version": 0.009186249983031303,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_openai_live_api": 0.0016880829934962094,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_openai_model_creation": 0.01594841602491215,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_update_build_config_anthropic": 0.0011711659608408809,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_update_build_config_google": 0.001020042021991685,
- "src/backend/tests/unit/components/models/test_language_model_component.py::TestLanguageModelComponent::test_update_build_config_openai": 0.0016849999956320971,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_all_versions_have_a_file_name_defined": 0.0014568770002370002,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_build_model": 0.003458199999840872,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_build_model_error": 0.0043968889999632665,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_component_versions[1.0.19]": 0.001408326000046145,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_component_versions[1.1.0]": 0.001404348000050959,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_component_versions[1.1.1]": 0.0023752489998969395,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_get_models": 0.00338426200005415,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_get_models_no_api_key": 0.002824335000013889,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_initialization": 0.0028584300000602525,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_inputs": 0.0029244240001844446,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_json_mode": 0.0036447470001803595,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_latest_version": 0.006742511999846101,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_template": 0.025210852999862254,
- "src/backend/tests/unit/components/models/test_xai.py::TestXAIComponent::test_update_build_config": 0.4465963840000313,
- "src/backend/tests/unit/components/outputs/test_chat_output_component.py::TestChatOutput::test_all_versions_have_a_file_name_defined": 1.9761364989972208,
- "src/backend/tests/unit/components/outputs/test_chat_output_component.py::TestChatOutput::test_component_versions[1.0.19]": 2.251967625983525,
- "src/backend/tests/unit/components/outputs/test_chat_output_component.py::TestChatOutput::test_component_versions[1.1.0]": 2.2353118340251967,
- "src/backend/tests/unit/components/outputs/test_chat_output_component.py::TestChatOutput::test_component_versions[1.1.1]": 2.362934500008123,
- "src/backend/tests/unit/components/outputs/test_chat_output_component.py::TestChatOutput::test_invalid_input": 1.9874021660070866,
- "src/backend/tests/unit/components/outputs/test_chat_output_component.py::TestChatOutput::test_latest_version": 2.0073345830023754,
- "src/backend/tests/unit/components/outputs/test_chat_output_component.py::TestChatOutput::test_process_data_input": 1.9732563339930493,
- "src/backend/tests/unit/components/outputs/test_chat_output_component.py::TestChatOutput::test_process_dataframe_input": 1.9999032909981906,
- "src/backend/tests/unit/components/outputs/test_chat_output_component.py::TestChatOutput::test_process_list_input": 1.983463959011715,
- "src/backend/tests/unit/components/outputs/test_chat_output_component.py::TestChatOutput::test_process_message_input": 1.9960387089813594,
- "src/backend/tests/unit/components/outputs/test_chat_output_component.py::TestChatOutput::test_process_string_input": 1.9950223740015645,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_all_versions_have_a_file_name_defined": 4.963613892000012,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.0.17]": 3.6106157921021804,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.0.18]": 3.6919090420706198,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.0.19]": 4.94933817499998,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.1.0]": 4.997824592000029,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.1.1]": 5.098571616000072,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_latest_version": 6.680932718999998,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_all_versions_have_a_file_name_defined": 0.0011477500083856285,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.0.17]": 0.27941045799525455,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.0.18]": 0.24612879107007757,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.0.19]": 0.21448445896385238,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.1.0]": 0.2456407490244601,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.1.1]": 0.22824154200498015,
- "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_latest_version": 0.007045249978546053,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_add_metadata_failure": 0.0007479999912902713,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_add_metadata_success": 0.0007760419975966215,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_all_versions_have_a_file_name_defined": 0.002576000028057024,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_batch_run_error_with_metadata": 0.0015152500127442181,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_batch_run_error_without_metadata": 0.0011876680073328316,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_batch_run_without_metadata": 0.0040122499922290444,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_component_versions[1.0.19]": 0.002882583998143673,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_component_versions[1.1.0]": 0.00135087501257658,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_component_versions[1.1.1]": 0.0010500000207684934,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_create_base_row": 0.0008134170202538371,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_empty_dataframe": 0.0020067080040462315,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_invalid_column_name": 0.0012512070243246853,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_latest_version": 0.009444124036235735,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_metadata_disabled": 0.0007375420245807618,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_non_string_column_conversion": 0.003182707994710654,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_operational_error_with_metadata": 0.002109082997776568,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_operational_error_without_metadata": 0.0020837909542024136,
- "src/backend/tests/unit/components/processing/test_batch_run_component.py::TestBatchRunComponent::test_successful_batch_run_with_system_message": 0.007292125024832785,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_all_versions_have_a_file_name_defined": 0.00047270796494558454,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_append_update": 0.0007291670190170407,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_combine": 0.0007952910091262311,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_combine_with_overlapping_keys": 0.0007725840259809047,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_component_versions[1.0.19]": 0.0004806669894605875,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_component_versions[1.1.0]": 0.00046954199206084013,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_component_versions[1.1.1]": 0.00048137400881387293,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_filter_values": 0.0007591659668833017,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_get_normalized_data": 0.0006292919861152768,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_latest_version": 0.0037052910192869604,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_literal_eval": 0.000869417010108009,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_no_actions": 0.0006687910354230553,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_remove_keys": 0.0009336669754702598,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_rename_keys": 0.0008347090042661875,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_select_keys": 0.0008267910161521286,
- "src/backend/tests/unit/components/processing/test_data_operations_component.py::TestDataOperationsComponent::test_validate_single_data_with_multiple_data": 0.0008330409764312208,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_all_versions_have_a_file_name_defined": 0.00047512599849142134,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_basic_setup": 0.000534582999534905,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_build_dataframe_basic": 0.0010015829757321626,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_component_versions[1.0.19]": 0.00044491697917692363,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_component_versions[1.1.0]": 0.00044504200923256576,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_component_versions[1.1.1]": 0.00044583401177078485,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_data_without_data_dict": 0.0007136259810067713,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_data_without_text": 0.0007191670010797679,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_empty_data_list": 0.0007202080159913749,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_invalid_input_type": 0.0005318750045262277,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_latest_version": 0.002385083003900945,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_mixed_data_fields": 0.0008618330175522715,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_single_data_input": 0.000791291007772088,
- "src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py::TestDataToDataFrameComponent::test_status_update": 0.0007389160164166242,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestBasicOperations::test_add_column": 0.0014821249642409384,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestBasicOperations::test_drop_column": 0.0014619159628637135,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestBasicOperations::test_head_operation": 0.0011852490133605897,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestBasicOperations::test_rename_column": 0.0013672499917447567,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestBasicOperations::test_sort_ascending": 0.001320459006819874,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestBasicOperations::test_sort_descending": 0.0012335839855950326,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestBasicOperations::test_tail_operation": 0.0011675000132527202,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestDataTypes::test_mixed_data_types": 0.001247958978638053,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestDataTypes::test_numeric_string_conversion": 0.0012432079820428044,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestDynamicUI::test_empty_selection_hides_fields": 0.0005604990292340517,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestDynamicUI::test_filter_fields_show": 0.0005782499792985618,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestDynamicUI::test_sort_fields_show": 0.000565332971746102,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestEdgeCases::test_empty_dataframe": 0.0009371670021209866,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestEdgeCases::test_empty_selection": 0.0009517500293441117,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestEdgeCases::test_filter_no_matches": 0.0012013329833280295,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestEdgeCases::test_invalid_operation_format": 0.0009182500361930579,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestEdgeCases::test_non_existent_column": 0.0010046249954029918,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestFilterOperations::test_filter_contains": 0.001431501004844904,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestFilterOperations::test_filter_ends_with": 0.0012644159724004567,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestFilterOperations::test_filter_equals": 0.001389832963468507,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestFilterOperations::test_filter_greater_than": 0.0013107500271871686,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestFilterOperations::test_filter_less_than": 0.001239000994246453,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestFilterOperations::test_filter_not_equals": 0.00130095801432617,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::TestFilterOperations::test_filter_starts_with": 0.0012887089978903532,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_all_filter_operators_comprehensive": 0.004042668006150052,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_empty_dataframe": 0.0010818850000759994,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_invalid_operation": 0.0009657479999987117,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_non_existent_column": 0.0010275030000457264,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Add Column-expected_columns0-expected_values0]": 0.0017273470000418456,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Drop Column-expected_columns1-None]": 0.00167941699999119,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Filter-expected_columns2-expected_values2]": 0.0016480389999742329,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Head-expected_columns6-expected_values6]": 0.0014022710000745064,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Rename Column-expected_columns4-None]": 0.0014427260000502429,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Replace Value-expected_columns8-expected_values8]": 0.0015724880000789199,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Select Columns-expected_columns5-None]": 0.0016046690000166564,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Sort-expected_columns3-expected_values3]": 0.0015496350000603343,
- "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Tail-expected_columns7-expected_values7]": 0.0013863620000620358,
- "src/backend/tests/unit/components/processing/test_lambda_filter.py::TestLambdaFilterComponent::test_all_versions_have_a_file_name_defined": 0.000584166991757229,
- "src/backend/tests/unit/components/processing/test_lambda_filter.py::TestLambdaFilterComponent::test_component_versions[1.0.19]": 0.0005647510115522891,
- "src/backend/tests/unit/components/processing/test_lambda_filter.py::TestLambdaFilterComponent::test_component_versions[1.1.0]": 0.0005597500130534172,
- "src/backend/tests/unit/components/processing/test_lambda_filter.py::TestLambdaFilterComponent::test_component_versions[1.1.1]": 0.0005428759905043989,
- "src/backend/tests/unit/components/processing/test_lambda_filter.py::TestLambdaFilterComponent::test_get_data_structure": 0.000708332983776927,
- "src/backend/tests/unit/components/processing/test_lambda_filter.py::TestLambdaFilterComponent::test_invalid_lambda_response": 0.0026773340068757534,
- "src/backend/tests/unit/components/processing/test_lambda_filter.py::TestLambdaFilterComponent::test_lambda_with_complex_data_structure": 0.0025203330151271075,
- "src/backend/tests/unit/components/processing/test_lambda_filter.py::TestLambdaFilterComponent::test_lambda_with_large_dataset": 0.00555100102792494,
- "src/backend/tests/unit/components/processing/test_lambda_filter.py::TestLambdaFilterComponent::test_latest_version": 0.0028246669680811465,
- "src/backend/tests/unit/components/processing/test_lambda_filter.py::TestLambdaFilterComponent::test_successful_lambda_generation": 0.0035319159796927124,
- "src/backend/tests/unit/components/processing/test_lambda_filter.py::TestLambdaFilterComponent::test_validate_lambda": 0.0006867080228403211,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_all_versions_have_a_file_name_defined": 0.0006262920214794576,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_async_invocation": 0.0011381250224076211,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_component_versions[1.0.19]": 0.0006180409982334822,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_component_versions[1.1.0]": 0.0005995420215185732,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_component_versions[1.1.1]": 0.0006012509984429926,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_empty_dataframe": 0.0008274170104414225,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_invalid_template_keys": 0.0008424160478170961,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_large_dataframe": 0.15886841600877233,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_latest_version": 0.0021822910057380795,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_multiple_column_template": 0.000980209035333246,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_nan_values": 0.0009254579781554639,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_parse_with_custom_separator": 0.0008924990252126008,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_parse_with_custom_template": 0.000970374996541068,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_successful_parse_with_default_template": 0.0009839590056799352,
- "src/backend/tests/unit/components/processing/test_parse_dataframe_component.py::TestParseDataFrameComponent::test_various_data_types": 0.0022855839924886823,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_all_versions_have_a_file_name_defined": 0.0008213760156650096,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_clean_data_with_stringify": 0.0014997500111348927,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_component_versions[1.0.19]": 0.0007514989702031016,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_component_versions[1.1.0]": 0.0007023749931249768,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_component_versions[1.1.1]": 0.0006876249972265214,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_invalid_input_type": 0.000882500025909394,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_invalid_template": 0.0009483750036451966,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_latest_version": 0.0036158750008326024,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_multiple_rows_with_custom_separator": 0.0010294999810867012,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_none_input": 0.0008445419953204691,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_parse_data_object": 0.0008056660008151084,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_parse_dataframe": 0.0009217910119332373,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_stringify_data_object": 0.0007902910001575947,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_stringify_dataframe": 0.0014132489741314203,
- "src/backend/tests/unit/components/processing/test_parser_component.py::TestParserComponent::test_stringify_message_object": 0.0008132090151775628,
- "src/backend/tests/unit/components/processing/test_regex_component.py::TestRegexExtractorComponent::test_all_versions_have_a_file_name_defined": 0.00046704200212843716,
- "src/backend/tests/unit/components/processing/test_regex_component.py::TestRegexExtractorComponent::test_component_versions[1.0.19]": 0.0004544160037767142,
- "src/backend/tests/unit/components/processing/test_regex_component.py::TestRegexExtractorComponent::test_component_versions[1.1.0]": 0.00044479098869487643,
- "src/backend/tests/unit/components/processing/test_regex_component.py::TestRegexExtractorComponent::test_component_versions[1.1.1]": 0.00043604199890978634,
- "src/backend/tests/unit/components/processing/test_regex_component.py::TestRegexExtractorComponent::test_empty_input_text": 0.0005649579979944974,
- "src/backend/tests/unit/components/processing/test_regex_component.py::TestRegexExtractorComponent::test_get_matches_text_invalid_pattern": 0.0005613750254269689,
- "src/backend/tests/unit/components/processing/test_regex_component.py::TestRegexExtractorComponent::test_get_matches_text_no_matches": 0.000551291013834998,
- "src/backend/tests/unit/components/processing/test_regex_component.py::TestRegexExtractorComponent::test_get_matches_text_output": 0.0005546659813262522,
- "src/backend/tests/unit/components/processing/test_regex_component.py::TestRegexExtractorComponent::test_invalid_regex_pattern": 0.000565416004974395,
- "src/backend/tests/unit/components/processing/test_regex_component.py::TestRegexExtractorComponent::test_latest_version": 0.0022457920131273568,
- "src/backend/tests/unit/components/processing/test_regex_component.py::TestRegexExtractorComponent::test_no_matches_found": 0.0005672919796779752,
- "src/backend/tests/unit/components/processing/test_regex_component.py::TestRegexExtractorComponent::test_successful_regex_extraction": 0.0009764989954419434,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_adds_extension[./test_output-csv-.csv]": 0.0001535830378998071,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_adds_extension[./test_output-json-.json]": 0.00014679096057079732,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_adds_extension[./test_output-markdown-.markdown]": 0.0001404999929945916,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_adds_extension[./test_output-txt-.txt]": 0.00014475101488642395,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_expands_home": 0.00016245798906311393,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_handles_incorrect_or_excel_add[./test_output-excel-./test_output.xlsx]": 0.00016250001499429345,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_handles_incorrect_or_excel_add[./test_output.txt-csv-./test_output.txt.csv]": 0.0001600409741513431,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_handles_incorrect_or_excel_add[./test_output.txt-excel-./test_output.txt.xlsx]": 0.00014645798364654183,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_keeps_existing_correct_extension[./test_output.csv-csv]": 0.0001496259937994182,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_keeps_existing_correct_extension[./test_output.json-json]": 0.00014541696873493493,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_keeps_existing_correct_extension[./test_output.markdown-markdown]": 0.00014200000441633165,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_keeps_existing_correct_extension[./test_output.txt-txt]": 0.00014112499775364995,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_keeps_existing_excel_extension[./test_output.xls]": 0.00041437402251176536,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_adjust_path_keeps_existing_excel_extension[./test_output.xlsx]": 0.00014512499910779297,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_all_versions_have_a_file_name_defined": 0.00015600098413415253,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_basic_setup": 0.0001453740114811808,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_component_versions[1.0.19]": 0.00015091599198058248,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_component_versions[1.1.0]": 0.00014925000141374767,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_component_versions[1.1.1]": 0.00014391698641702533,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_directory_creation": 0.0001510840083938092,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_invalid_input_type": 0.00014908300363458693,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_latest_version": 0.00014870800077915192,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_save_data": 0.00014229101361706853,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_save_message": 0.00014608300989493728,
- "src/backend/tests/unit/components/processing/test_save_file_component.py::TestSaveToFileComponent::test_update_build_config_dataframe": 0.00015908299246802926,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_adds_extension[./test_output-csv-.csv]": 0.0028634599998440535,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_adds_extension[./test_output-json-.json]": 0.002895288999980039,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_adds_extension[./test_output-markdown-.markdown]": 0.002877976999798193,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_adds_extension[./test_output-txt-.txt]": 0.0028402870000263647,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_expands_home": 0.0027695260000655253,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_handles_incorrect_or_excel_add[./test_output-excel-./test_output.xlsx]": 0.002918451999903482,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_handles_incorrect_or_excel_add[./test_output.txt-csv-./test_output.txt.csv]": 0.0028850989999682497,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_handles_incorrect_or_excel_add[./test_output.txt-excel-./test_output.txt.xlsx]": 0.002860764999923049,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_keeps_existing_correct_extension[./test_output.csv-csv]": 0.0028442539999105065,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_keeps_existing_correct_extension[./test_output.json-json]": 0.002795462999984011,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_keeps_existing_correct_extension[./test_output.markdown-markdown]": 0.0027939199999309494,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_keeps_existing_correct_extension[./test_output.txt-txt]": 0.002844632999995156,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_keeps_existing_excel_extension[./test_output.xls]": 0.0027722699999230827,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_adjust_path_keeps_existing_excel_extension[./test_output.xlsx]": 0.002787618000184011,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_all_versions_have_a_file_name_defined": 0.001845171000013579,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_basic_setup": 0.0027739439999550086,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_component_versions[1.0.19]": 0.0017930550000073708,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_component_versions[1.1.0]": 0.0023453239996342745,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_component_versions[1.1.1]": 0.001748421000002054,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_directory_creation": 0.0045445650000601745,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_invalid_input_type": 0.002921936999882746,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_latest_version": 0.007096252000337699,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_save_data": 0.004565492999972776,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_save_message": 0.010504567999987557,
- "src/backend/tests/unit/components/processing/test_save_to_file_component.py::TestSaveToFileComponent::test_update_build_config_dataframe": 0.002705645000332879,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_all_versions_have_a_file_name_defined": 0.00048762402730062604,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_component_versions[1.0.19]": 0.23003083298681304,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_component_versions[1.1.0]": 0.2237371249939315,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_component_versions[1.1.1]": 0.23217750000185333,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_latest_version": 0.0024556239950470626,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_as_dataframe": 0.0033914260002347874,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_basic": 0.004420915967784822,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_custom_separator": 0.002672290982445702,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_empty_input": 0.001869458967121318,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_multiple_inputs": 0.001031541993143037,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_single_chunk": 0.0014055409701541066,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_with_dataframe_input": 0.0012737090000882745,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_with_metadata": 0.0033594999986235052,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_with_overlap": 0.002725625003222376,
- "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_with_url_loader": 1.2947423329751473,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_all_versions_have_a_file_name_defined": 0.0005357500049285591,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_build_structured_dataframe_fails_when_base_returns_non_list": 0.006117208016803488,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_build_structured_dataframe_fails_when_empty_output": 0.0061265410331543535,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_build_structured_dataframe_returns_dataframe_with_multiple_data": 0.008078290004050359,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_build_structured_dataframe_returns_dataframe_with_single_data": 0.007081917021423578,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_build_structured_output_data_object_properties": 0.009108917001867667,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_build_structured_output_fails_when_base_returns_non_list": 0.006319666019408032,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_build_structured_output_returns_data_with_dict": 0.006250540958717465,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_build_structured_output_returns_data_with_single_item": 0.006213875021785498,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_build_structured_output_returns_multiple_objects": 0.007143499999074265,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_component_versions[1.0.19]": 0.0004962919920217246,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_component_versions[1.1.0]": 0.000499165995279327,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_component_versions[1.1.1]": 0.00047266503679566085,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_correctly_builds_output_model": 0.0010860009933821857,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_empty_output_schema": 0.0007014160219114274,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_handles_multiple_outputs": 0.0009887910273391753,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_invalid_output_schema_type": 0.0007117910136003047,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_large_input_value": 0.006515416956972331,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_latest_version": 0.003128958953311667,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_multiple_patterns_with_duplicates_and_variations": 0.02061108298948966,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_nested_output_schema": 0.007383333024336025,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_raises_value_error_for_unsupported_language_model": 0.000860710017150268,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_structured_output_handles_empty_responses_array": 0.007518583966884762,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_structured_output_returns_dict_when_no_objects_key": 0.008771666995016858,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_structured_output_returns_direct_response_when_not_dict": 0.0062495839956682175,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_successful_structured_output_generation_with_patch_with_config": 0.009691083017969504,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_with_real_nvidia_model_simple_schema": 2.458996542991372,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_with_real_openai_model_complex_schema": 1.5546967499831226,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_with_real_openai_model_multiple_patterns": 4.3101096260070335,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_with_real_openai_model_nested_schema": 2.9080685819790233,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_with_real_openai_model_simple_schema": 1.2749667500029318,
- "src/backend/tests/unit/components/processing/test_structured_output_component.py::TestStructuredOutputComponent::test_with_real_openai_model_simple_schema_fail": 0.617628792009782,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_all_versions_have_a_file_name_defined": 0.0005106259777676314,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_component_versions[1.0.19]": 0.00048179199802689254,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_component_versions[1.1.0]": 0.00044328998774290085,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_component_versions[1.1.1]": 0.0014085830189287663,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_data_to_data": 0.0005289599939715117,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_data_to_dataframe": 0.0007592500187456608,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_data_to_message": 0.0005531670176424086,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_dataframe_to_data": 0.0009563339990563691,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_dataframe_to_dataframe": 0.000805750023573637,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_dataframe_to_message": 0.0025507090031169355,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_latest_version": 0.0026352079585194588,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_message_to_data": 0.0005530420166905969,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_message_to_dataframe": 0.000984583020908758,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_message_to_message": 0.0006156250019557774,
- "src/backend/tests/unit/components/processing/test_type_converter_component.py::TestTypeConverterComponent::test_update_outputs": 0.0006247080164030194,
- "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_all_versions_have_a_file_name_defined": 1.9582983739674091,
- "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.0.17]": 15.071019583090674,
- "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.0.18]": 5.277748624968808,
- "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.0.19]": 2.2007959169568494,
- "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.1.0]": 2.2142099180200603,
- "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.1.1]": 7.77612212402164,
- "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_latest_version": 1.9659918329853099,
- "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_post_code_processing": 2.011389582999982,
- "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_prompt_component_latest": 2.0202875419636257,
- "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_build_data": 0.0052994580182712525,
- "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_get_data": 0.0006567089876625687,
- "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_update_build_config": 0.0038311670068651438,
- "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_update_build_config_exceed_limit": 0.005434999999124557,
- "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_validate_text_key_invalid": 0.0006299999949987978,
- "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_validate_text_key_valid": 0.0005306660023052245,
- "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_build_data": 0.0008599160064477473,
- "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_get_data": 0.0005177089769858867,
- "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_update_build_config": 0.0006985420186538249,
- "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_update_build_config_exceed_limit": 0.0005461249966174364,
- "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_validate_text_key_invalid": 0.0005727499665226787,
- "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_validate_text_key_valid": 0.0004975830088369548,
- "src/backend/tests/unit/components/search/test_arxiv_component.py::TestArXivComponent::test_all_versions_have_a_file_name_defined": 2.008274583000457,
- "src/backend/tests/unit/components/search/test_arxiv_component.py::TestArXivComponent::test_build_query_url": 1.9903707079938613,
- "src/backend/tests/unit/components/search/test_arxiv_component.py::TestArXivComponent::test_component_initialization": 2.0078824590018485,
- "src/backend/tests/unit/components/search/test_arxiv_component.py::TestArXivComponent::test_component_versions": 1.9723654999688733,
- "src/backend/tests/unit/components/search/test_arxiv_component.py::TestArXivComponent::test_invalid_url_handling": 2.028605166997295,
- "src/backend/tests/unit/components/search/test_arxiv_component.py::TestArXivComponent::test_latest_version": 1.9730095420090947,
- "src/backend/tests/unit/components/search/test_arxiv_component.py::TestArXivComponent::test_parse_atom_response": 2.0083995000168215,
- "src/backend/tests/unit/components/search/test_google_search_api.py::TestGoogleSearchAPICore::test_all_versions_have_a_file_name_defined": 0.002289084019139409,
- "src/backend/tests/unit/components/search/test_google_search_api.py::TestGoogleSearchAPICore::test_build_method": 0.0006327500159386545,
- "src/backend/tests/unit/components/search/test_google_search_api.py::TestGoogleSearchAPICore::test_component_initialization": 0.005096999986562878,
- "src/backend/tests/unit/components/search/test_google_search_api.py::TestGoogleSearchAPICore::test_component_versions[1.0.19]": 0.0013052500144112855,
- "src/backend/tests/unit/components/search/test_google_search_api.py::TestGoogleSearchAPICore::test_component_versions[1.1.0]": 0.0012189159751869738,
- "src/backend/tests/unit/components/search/test_google_search_api.py::TestGoogleSearchAPICore::test_component_versions[1.1.1]": 0.001602459029527381,
- "src/backend/tests/unit/components/search/test_google_search_api.py::TestGoogleSearchAPICore::test_latest_version": 0.0009017900156322867,
- "src/backend/tests/unit/components/search/test_google_search_api.py::TestGoogleSearchAPICore::test_search_google_error_handling": 0.0021642080100718886,
- "src/backend/tests/unit/components/search/test_google_search_api.py::TestGoogleSearchAPICore::test_search_google_invalid_api_key": 0.0008651250100228935,
- "src/backend/tests/unit/components/search/test_google_search_api.py::TestGoogleSearchAPICore::test_search_google_invalid_cse_id": 0.0007623750425409526,
- "src/backend/tests/unit/components/search/test_google_search_api.py::TestGoogleSearchAPICore::test_search_google_success": 0.008456167008262128,
- "src/backend/tests/unit/components/search/test_google_serper_api_core.py::test_build_method": 0.0005421250243671238,
- "src/backend/tests/unit/components/search/test_google_serper_api_core.py::test_build_wrapper": 0.0005641249590553343,
- "src/backend/tests/unit/components/search/test_google_serper_api_core.py::test_component_initialization": 0.0005162910092622042,
- "src/backend/tests/unit/components/search/test_google_serper_api_core.py::test_search_serper_error_handling": 0.0012649999698624015,
- "src/backend/tests/unit/components/search/test_google_serper_api_core.py::test_search_serper_success": 0.001680291024968028,
- "src/backend/tests/unit/components/search/test_google_serper_api_core.py::test_text_search_serper": 0.001405458024237305,
- "src/backend/tests/unit/components/search/test_wikidata_api.py::TestWikidataComponent::test_all_versions_have_a_file_name_defined": 0.0005619580042548478,
- "src/backend/tests/unit/components/search/test_wikidata_api.py::TestWikidataComponent::test_component_versions[1.0.19]": 0.0005110840138513595,
- "src/backend/tests/unit/components/search/test_wikidata_api.py::TestWikidataComponent::test_component_versions[1.1.0]": 0.0004986670101061463,
- "src/backend/tests/unit/components/search/test_wikidata_api.py::TestWikidataComponent::test_component_versions[1.1.1]": 0.0008744590159039944,
- "src/backend/tests/unit/components/search/test_wikidata_api.py::TestWikidataComponent::test_fetch_content_empty_response": 0.0008708749955985695,
- "src/backend/tests/unit/components/search/test_wikidata_api.py::TestWikidataComponent::test_fetch_content_error_handling": 0.0006628760020248592,
- "src/backend/tests/unit/components/search/test_wikidata_api.py::TestWikidataComponent::test_fetch_content_success": 0.001012417982565239,
- "src/backend/tests/unit/components/search/test_wikidata_api.py::TestWikidataComponent::test_latest_version": 0.002908624999690801,
- "src/backend/tests/unit/components/search/test_wikidata_api.py::TestWikidataComponent::test_wikidata_initialization": 0.0005996669933665544,
- "src/backend/tests/unit/components/search/test_wikidata_api.py::TestWikidataComponent::test_wikidata_template": 0.005077042005723342,
- "src/backend/tests/unit/components/search/test_wikipedia_api.py::TestWikipediaComponent::test_all_versions_have_a_file_name_defined": 0.0004904580418951809,
- "src/backend/tests/unit/components/search/test_wikipedia_api.py::TestWikipediaComponent::test_component_versions[1.0.19]": 0.0004769999941345304,
- "src/backend/tests/unit/components/search/test_wikipedia_api.py::TestWikipediaComponent::test_component_versions[1.1.0]": 0.0004519580106716603,
- "src/backend/tests/unit/components/search/test_wikipedia_api.py::TestWikipediaComponent::test_component_versions[1.1.1]": 0.0008325409726239741,
- "src/backend/tests/unit/components/search/test_wikipedia_api.py::TestWikipediaComponent::test_fetch_content": 0.001082334027159959,
- "src/backend/tests/unit/components/search/test_wikipedia_api.py::TestWikipediaComponent::test_latest_version": 0.002207999990787357,
- "src/backend/tests/unit/components/search/test_wikipedia_api.py::TestWikipediaComponent::test_wikipedia_error_handling": 0.0006926649948582053,
- "src/backend/tests/unit/components/search/test_wikipedia_api.py::TestWikipediaComponent::test_wikipedia_initialization": 0.0005918329989071935,
- "src/backend/tests/unit/components/search/test_wikipedia_api.py::TestWikipediaComponent::test_wikipedia_template": 0.0049602910003159195,
- "src/backend/tests/unit/components/search/test_yfinance_tool.py::TestYfinanceComponent::test_error_handling": 0.0009893749956972897,
- "src/backend/tests/unit/components/search/test_yfinance_tool.py::TestYfinanceComponent::test_fetch_info": 0.0008797089976724237,
- "src/backend/tests/unit/components/search/test_yfinance_tool.py::TestYfinanceComponent::test_fetch_news": 0.0007533330062869936,
- "src/backend/tests/unit/components/search/test_yfinance_tool.py::TestYfinanceComponent::test_initialization": 0.0005080409755464643,
- "src/backend/tests/unit/components/search/test_yfinance_tool.py::TestYfinanceComponent::test_template_structure": 0.012736834003590047,
- "src/backend/tests/unit/components/tools/test_arxiv_component.py::TestArXivComponent::test_all_versions_have_a_file_name_defined": 7.719283441000016,
- "src/backend/tests/unit/components/tools/test_arxiv_component.py::TestArXivComponent::test_build_query_url": 7.880579604000104,
- "src/backend/tests/unit/components/tools/test_arxiv_component.py::TestArXivComponent::test_component_initialization": 7.921767463000151,
- "src/backend/tests/unit/components/tools/test_arxiv_component.py::TestArXivComponent::test_component_versions": 7.8657784039999115,
- "src/backend/tests/unit/components/tools/test_arxiv_component.py::TestArXivComponent::test_invalid_url_handling": 7.964883273999703,
- "src/backend/tests/unit/components/tools/test_arxiv_component.py::TestArXivComponent::test_latest_version": 7.6661638979999225,
- "src/backend/tests/unit/components/tools/test_arxiv_component.py::TestArXivComponent::test_parse_atom_response": 7.974242982000078,
- "src/backend/tests/unit/components/tools/test_calculator.py::TestCalculatorComponent::test_all_versions_have_a_file_name_defined": 0.0005130840290803462,
- "src/backend/tests/unit/components/tools/test_calculator.py::TestCalculatorComponent::test_basic_calculation": 0.0006024580216035247,
- "src/backend/tests/unit/components/tools/test_calculator.py::TestCalculatorComponent::test_complex_calculation": 0.0006607919640373439,
- "src/backend/tests/unit/components/tools/test_calculator.py::TestCalculatorComponent::test_component_frontend_node": 0.0012644169910345227,
- "src/backend/tests/unit/components/tools/test_calculator.py::TestCalculatorComponent::test_component_versions[1.0.19]": 0.00048799996147863567,
- "src/backend/tests/unit/components/tools/test_calculator.py::TestCalculatorComponent::test_component_versions[1.1.0]": 0.0004496249894145876,
- "src/backend/tests/unit/components/tools/test_calculator.py::TestCalculatorComponent::test_component_versions[1.1.1]": 0.0004496249894145876,
- "src/backend/tests/unit/components/tools/test_calculator.py::TestCalculatorComponent::test_division_by_zero": 0.0005333749868441373,
- "src/backend/tests/unit/components/tools/test_calculator.py::TestCalculatorComponent::test_invalid_expression": 0.0005329590057954192,
- "src/backend/tests/unit/components/tools/test_calculator.py::TestCalculatorComponent::test_latest_version": 0.002752083004452288,
- "src/backend/tests/unit/components/tools/test_calculator.py::TestCalculatorComponent::test_unsupported_operation": 0.0005257909942883998,
- "src/backend/tests/unit/components/tools/test_google_search_api.py::TestGoogleSearchAPICore::test_all_versions_have_a_file_name_defined": 0.0013710770001580386,
- "src/backend/tests/unit/components/tools/test_google_search_api.py::TestGoogleSearchAPICore::test_build_method": 0.0027305020003041136,
- "src/backend/tests/unit/components/tools/test_google_search_api.py::TestGoogleSearchAPICore::test_component_initialization": 0.005256973000086873,
- "src/backend/tests/unit/components/tools/test_google_search_api.py::TestGoogleSearchAPICore::test_component_versions[1.0.19]": 0.0013682010001048184,
- "src/backend/tests/unit/components/tools/test_google_search_api.py::TestGoogleSearchAPICore::test_component_versions[1.1.0]": 0.0014195569999628788,
- "src/backend/tests/unit/components/tools/test_google_search_api.py::TestGoogleSearchAPICore::test_component_versions[1.1.1]": 0.0017803300002015021,
- "src/backend/tests/unit/components/tools/test_google_search_api.py::TestGoogleSearchAPICore::test_latest_version": 0.0029206650001469825,
- "src/backend/tests/unit/components/tools/test_google_search_api.py::TestGoogleSearchAPICore::test_search_google_error_handling": 0.00417755000012221,
- "src/backend/tests/unit/components/tools/test_google_search_api.py::TestGoogleSearchAPICore::test_search_google_invalid_api_key": 0.002768182999943747,
- "src/backend/tests/unit/components/tools/test_google_search_api.py::TestGoogleSearchAPICore::test_search_google_invalid_cse_id": 0.0027788419997705205,
- "src/backend/tests/unit/components/tools/test_google_search_api.py::TestGoogleSearchAPICore::test_search_google_success": 0.007727160000285949,
- "src/backend/tests/unit/components/tools/test_google_serper_api_core.py::test_build_method": 0.0019307610002670117,
- "src/backend/tests/unit/components/tools/test_google_serper_api_core.py::test_build_wrapper": 0.001993917000163492,
- "src/backend/tests/unit/components/tools/test_google_serper_api_core.py::test_component_initialization": 0.0020683770001141966,
- "src/backend/tests/unit/components/tools/test_google_serper_api_core.py::test_search_serper_error_handling": 0.0030555060000097,
- "src/backend/tests/unit/components/tools/test_google_serper_api_core.py::test_search_serper_success": 0.003471242999921742,
- "src/backend/tests/unit/components/tools/test_google_serper_api_core.py::test_text_search_serper": 0.004033570999808944,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPSseClient::test_connect_timeout": 0.0022400220004783478,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPSseClient::test_connect_to_server": 0.003921520999938366,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPSseClient::test_pre_check_redirect": 0.0026828489999388694,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPStdioClient::test_connect_to_server": 0.005657581999457761,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPToolsComponent::test_all_versions_have_a_file_name_defined": 0.0014033699999345117,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPToolsComponent::test_build_output": 0.006520844000078796,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPToolsComponent::test_component_versions[1.0.19]": 0.001381868999942526,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPToolsComponent::test_component_versions[1.1.0]": 0.0013752380000369158,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPToolsComponent::test_component_versions[1.1.1]": 0.0013815699999213393,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPToolsComponent::test_get_inputs_for_all_tools": 0.00451956799997788,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPToolsComponent::test_latest_version": 0.009717880999687623,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPToolsComponent::test_remove_non_default_keys": 0.0031858779998401587,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPToolsComponent::test_update_build_config_mode_change": 2.051126612999724,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPToolsComponent::test_validate_connection_params_invalid_mode": 0.0037146149998079636,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPToolsComponent::test_validate_connection_params_missing_command": 0.0031783039994479623,
- "src/backend/tests/unit/components/tools/test_mcp_component.py::TestMCPToolsComponent::test_validate_connection_params_missing_url": 0.003544085000157793,
- "src/backend/tests/unit/components/tools/test_python_repl_tool.py::TestPythonREPLComponent::test_all_versions_have_a_file_name_defined": 0.0004359169688541442,
- "src/backend/tests/unit/components/tools/test_python_repl_tool.py::TestPythonREPLComponent::test_component_initialization": 0.001648333010962233,
- "src/backend/tests/unit/components/tools/test_python_repl_tool.py::TestPythonREPLComponent::test_component_versions[1.0.19]": 0.00046337401727214456,
- "src/backend/tests/unit/components/tools/test_python_repl_tool.py::TestPythonREPLComponent::test_component_versions[1.1.0]": 0.0008574170060455799,
- "src/backend/tests/unit/components/tools/test_python_repl_tool.py::TestPythonREPLComponent::test_component_versions[1.1.1]": 0.00044704199535772204,
- "src/backend/tests/unit/components/tools/test_python_repl_tool.py::TestPythonREPLComponent::test_latest_version": 0.002423624013317749,
- "src/backend/tests/unit/components/tools/test_python_repl_tool.py::test_python_repl_tool_template": 0.02093030200001067,
- "src/backend/tests/unit/components/tools/test_serp_api.py::test_error_handling": 0.0008732080459594727,
- "src/backend/tests/unit/components/tools/test_serp_api.py::test_fetch_content": 0.0008422500104643404,
- "src/backend/tests/unit/components/tools/test_serp_api.py::test_fetch_content_text": 0.0006160000048112124,
- "src/backend/tests/unit/components/tools/test_serp_api.py::test_serpapi_initialization": 0.0005109990306664258,
- "src/backend/tests/unit/components/tools/test_serp_api.py::test_serpapi_template": 0.010708707995945588,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::TestWikidataComponent::test_all_versions_have_a_file_name_defined": 0.0014399039998806984,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::TestWikidataComponent::test_component_versions[1.0.19]": 0.0014149389999147388,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::TestWikidataComponent::test_component_versions[1.1.0]": 0.0013904029999594059,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::TestWikidataComponent::test_component_versions[1.1.1]": 0.0013899820000915497,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::TestWikidataComponent::test_fetch_content_empty_response": 0.003578621999849929,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::TestWikidataComponent::test_fetch_content_error_handling": 0.003472082999905979,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::TestWikidataComponent::test_fetch_content_success": 0.003753358000267326,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::TestWikidataComponent::test_fetch_content_text": 0.003131679000262011,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::TestWikidataComponent::test_latest_version": 0.006567637000216564,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::TestWikidataComponent::test_wikidata_initialization": 0.0033579319999716972,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::TestWikidataComponent::test_wikidata_template": 0.016970574999959354,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_fetch_content_empty_response": 0.003265670000018872,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_fetch_content_error_handling": 0.002808468000011999,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_fetch_content_success": 0.0032077419999723134,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_fetch_content_text": 0.00273625400001265,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_wikidata_initialization": 0.002704716000039298,
- "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_wikidata_template": 0.01613066200002322,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::TestWikipediaComponent::test_all_versions_have_a_file_name_defined": 0.0014324910000595992,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::TestWikipediaComponent::test_component_versions[1.0.19]": 0.001448040000468609,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::TestWikipediaComponent::test_component_versions[1.1.0]": 0.0014265710001382104,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::TestWikipediaComponent::test_component_versions[1.1.1]": 0.0018522650000249996,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::TestWikipediaComponent::test_fetch_content": 0.0031127639999795065,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::TestWikipediaComponent::test_fetch_content_text": 0.0023736660000395204,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::TestWikipediaComponent::test_latest_version": 0.005395730999907755,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::TestWikipediaComponent::test_wikipedia_error_handling": 0.0023547909997887473,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::TestWikipediaComponent::test_wikipedia_initialization": 0.0021511099998861027,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::TestWikipediaComponent::test_wikipedia_template": 0.015991128000223398,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::test_fetch_content": 0.002685490999965623,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::test_fetch_content_text": 0.001898934999985613,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::test_wikipedia_error_handling": 0.0019180109999865635,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::test_wikipedia_initialization": 0.0017836609999903885,
- "src/backend/tests/unit/components/tools/test_wikipedia_api.py::test_wikipedia_template": 0.01370607699999482,
- "src/backend/tests/unit/components/tools/test_yfinance_tool.py::TestYfinanceComponent::test_error_handling": 0.0028651630000240402,
- "src/backend/tests/unit/components/tools/test_yfinance_tool.py::TestYfinanceComponent::test_fetch_info": 0.0030333959998642968,
- "src/backend/tests/unit/components/tools/test_yfinance_tool.py::TestYfinanceComponent::test_fetch_news": 0.00288838599999508,
- "src/backend/tests/unit/components/tools/test_yfinance_tool.py::TestYfinanceComponent::test_initialization": 0.0025505469998279295,
- "src/backend/tests/unit/components/tools/test_yfinance_tool.py::TestYfinanceComponent::test_template_structure": 0.06717140499995367,
- "src/backend/tests/unit/components/tools/test_yfinance_tool.py::test_yfinance_tool_template": 0.03864965400003939,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_all_versions_have_a_file_name_defined": 0.012453000992536545,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_chroma_collection_to_data": 1.0077733749640174,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_chroma_collection_to_data_empty_collection": 0.06913429195992649,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_chroma_collection_to_data_without_metadata": 0.5512137910118327,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_component_versions[1.0.19]": 0.26562599997851066,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_component_versions[1.1.0]": 0.24795970899867825,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_component_versions[1.1.1]": 0.25578554096864536,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_create_collection_with_data": 0.6333307089807931,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_create_db": 0.55427929101279,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_duplicate_handling": 2.3550448760506697,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_latest_version": 0.01634991599712521,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_mmr_search": 2.360618542006705,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_search_with_different_types": 2.3776622920122463,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_search_with_score": 1.3293308750144206,
- "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_similarity_search": 3.1114264170173556,
- "src/backend/tests/unit/components/vectorstores/test_graph_rag_component.py::TestGraphRAGComponent::test_all_versions_have_a_file_name_defined": 0.0005947909958194941,
- "src/backend/tests/unit/components/vectorstores/test_graph_rag_component.py::TestGraphRAGComponent::test_component_versions[1.0.19]": 0.00048637500731274486,
- "src/backend/tests/unit/components/vectorstores/test_graph_rag_component.py::TestGraphRAGComponent::test_component_versions[1.1.0]": 0.0004857080348301679,
- "src/backend/tests/unit/components/vectorstores/test_graph_rag_component.py::TestGraphRAGComponent::test_component_versions[1.1.1]": 0.0004555839695967734,
- "src/backend/tests/unit/components/vectorstores/test_graph_rag_component.py::TestGraphRAGComponent::test_graphrag": 0.20784370703040622,
- "src/backend/tests/unit/components/vectorstores/test_graph_rag_component.py::TestGraphRAGComponent::test_latest_version": 0.015758333989651874,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_all_versions_have_a_file_name_defined": 0.01379579200875014,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_build_config_update": 0.033823167002992705,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_component_versions[1.0.19]": 0.01410741699510254,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_component_versions[1.1.0]": 0.01241016699350439,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_component_versions[1.1.1]": 0.015366416017059237,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_create_db": 0.051133706961991265,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_create_db_with_data": 0.05039254200528376,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_default_persist_dir": 0.014123667031526566,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_duplicate_handling": 0.6240682920033578,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_latest_version": 0.019211415987228975,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_list_existing_collections": 0.015889582980889827,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_mmr_search": 0.5339552910008933,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_search_with_different_types": 0.7122681660403032,
- "src/backend/tests/unit/components/vectorstores/test_local_db_component.py::TestLocalDBComponent::test_similarity_search": 1.0169339580170345,
- "src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py::TestMongoVectorStoreComponent::test_all_versions_have_a_file_name_defined": 0.00015879201237112284,
- "src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py::TestMongoVectorStoreComponent::test_component_versions[1.0.19]": 0.00022754102246835828,
- "src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py::TestMongoVectorStoreComponent::test_component_versions[1.1.0]": 0.00016550003783777356,
- "src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py::TestMongoVectorStoreComponent::test_component_versions[1.1.1]": 0.00015420798445120454,
- "src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py::TestMongoVectorStoreComponent::test_create_collection_with_data": 0.0001602920237928629,
- "src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py::TestMongoVectorStoreComponent::test_create_db": 0.00016187498113140464,
- "src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py::TestMongoVectorStoreComponent::test_empty_search_query": 0.00014808299602009356,
- "src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py::TestMongoVectorStoreComponent::test_error_handling": 0.0001829999964684248,
- "src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py::TestMongoVectorStoreComponent::test_latest_version": 0.00023429200518876314,
- "src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py::TestMongoVectorStoreComponent::test_metadata_handling": 0.00019883300410583615,
- "src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py::TestMongoVectorStoreComponent::test_mtls_configuration": 0.00015275098849087954,
- "src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py::TestMongoVectorStoreComponent::test_similarity_search": 0.0001617920061107725,
- "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_files_independence": 0.0008971669885795563,
- "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_input_value_independence": 0.002610334020573646,
- "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_message_output_independence": 0.0023311669938266277,
- "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_multiple_attributes_independence": 0.000769876001868397,
- "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_sender_name_independence": 0.000770626007579267,
- "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_status_independence": 0.002959792036563158,
- "src/backend/tests/unit/custom/component/test_component_to_tool.py::test_component_to_tool": 0.019733334018383175,
- "src/backend/tests/unit/custom/component/test_component_to_tool.py::test_component_to_tool_has_no_component_as_tool": 0.0017144169833045453,
- "src/backend/tests/unit/custom/component/test_component_to_tool.py::test_component_to_toolkit": 0.003958249959396198,
- "src/backend/tests/unit/custom/component/test_componet_set_functionality.py::test_set_with_message_text_input_list": 0.0009028329805005342,
- "src/backend/tests/unit/custom/component/test_componet_set_functionality.py::test_set_with_mixed_list_input": 0.0011632919777184725,
- "src/backend/tests/unit/custom/custom_component/test_component.py::test_agent_component_send_message_events": 0.00707308403798379,
- "src/backend/tests/unit/custom/custom_component/test_component.py::test_send_message_without_database": 0.01779983498272486,
- "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_component": 0.0009594590228516608,
- "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_invalid_output": 0.0011498339881654829,
- "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_required_inputs": 0.0019985559999895486,
- "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_required_inputs_various_components": 0.006992995000018709,
- "src/backend/tests/unit/custom/custom_component/test_component.py::test_update_component_build_config_async": 0.0011770410346798599,
- "src/backend/tests/unit/custom/custom_component/test_component.py::test_update_component_build_config_sync": 0.006297668005572632,
- "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_build_results": 2.200303374993382,
- "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_error_handling": 2.4291231249808334,
- "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_logging": 2.099894165963633,
- "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_message_sending": 2.2274943760130554,
- "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_streaming_message": 2.136350290995324,
- "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_tool_output": 2.4911535419814754,
- "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_custom_update": 0.0022535840107593685,
- "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_invalid_output": 0.0031336249667219818,
- "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_output_validation": 0.0012186669919174165,
- "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_tool_mode": 0.0051676249713636935,
- "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_with_existing_tool_output": 0.0023152489739004523,
- "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_with_multiple_outputs": 0.0012423759908415377,
- "src/backend/tests/unit/custom/test_utils_metadata.py::TestCodeHashGeneration::test_hash_consistency": 0.0003726650320459157,
- "src/backend/tests/unit/custom/test_utils_metadata.py::TestCodeHashGeneration::test_hash_different_code": 0.00037483303458429873,
- "src/backend/tests/unit/custom/test_utils_metadata.py::TestCodeHashGeneration::test_hash_empty_source_raises": 0.0004445840313564986,
- "src/backend/tests/unit/custom/test_utils_metadata.py::TestCodeHashGeneration::test_hash_generation_basic": 0.0005094160151202232,
- "src/backend/tests/unit/custom/test_utils_metadata.py::TestCodeHashGeneration::test_hash_none_source_raises": 0.0003539169847499579,
- "src/backend/tests/unit/custom/test_utils_metadata.py::TestMetadataInTemplateBuilders::test_build_from_inputs_adds_metadata_with_module": 0.001851832988904789,
- "src/backend/tests/unit/custom/test_utils_metadata.py::TestMetadataInTemplateBuilders::test_build_template_adds_metadata_with_module": 0.002772416017251089,
- "src/backend/tests/unit/custom/test_utils_metadata.py::TestMetadataInTemplateBuilders::test_hash_generation_unicode": 0.0005855409835930914,
- "src/backend/tests/unit/custom/test_utils_metadata.py::TestMetadataInTemplateBuilders::test_hash_mock_source_raises": 0.000702917983289808,
- "src/backend/tests/unit/custom/test_utils_metadata.py::TestMetadataInTemplateBuilders::test_hash_non_string_source_raises": 0.000643500970909372,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_accessing_non_registered_callback": 0.00037075000000186265,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_accessing_non_registered_event_callback_with_recommended_fix": 0.0004805409989785403,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_accessing_registered_event_callback": 0.00043733298662118614,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_event_id_uniqueness_with_await": 0.0009195420134346932,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_handling_large_number_of_events": 0.001003707991912961,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_performance_impact_frequent_registrations": 0.0010615420469548553,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_queue_receives_correct_event_data_format": 0.0007837910088710487,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_with_empty_name": 0.0005669170059263706,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_with_invalid_name_fixed": 0.0004363759944681078,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_with_valid_name_and_callback_with_mock_callback": 0.0006233330350369215,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_with_valid_name_and_no_callback": 0.00047354199341498315,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_without_event_type_argument_fixed": 0.0004366670036688447,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_sending_event_with_complex_data": 0.001318166992859915,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_sending_event_with_none_data": 0.00038458401104435325,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_sending_event_with_valid_type_and_data_asyncio_plugin": 0.007096707937307656,
- "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_thread_safety_accessing_events_dictionary": 0.0012902089802082628,
- "src/backend/tests/unit/exceptions/test_api.py::test_api_exception": 0.002647335029905662,
- "src/backend/tests/unit/exceptions/test_api.py::test_api_exception_no_flow": 0.0003862919984385371,
- "src/backend/tests/unit/graph/edge/test_edge_base.py::test_edge_raises_error_on_invalid_target_handle": 0.019070834037847817,
- "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_model_and_assign_values_fails": 0.002439000003505498,
- "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_model_with_fields_from_kwargs": 0.0007358330185525119,
- "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_model_with_invalid_callable": 0.0004396670265123248,
- "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_model_with_valid_return_type_annotations": 0.003273584006819874,
- "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_with_multiple_components": 0.0022366240445990115,
- "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_with_pydantic_field": 0.0025943750224541873,
- "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_default_model_name_to_state": 0.0005948749894741923,
- "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_graph_functional_start_state_update": 2.0472435829869937,
- "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_handle_empty_kwargs_gracefully": 0.0005160419968888164,
- "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_raise_typeerror_for_invalid_field_type_in_tuple": 0.0004901259671896696,
- "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_raise_valueerror_for_invalid_field_type_in_tuple": 0.00342700001783669,
- "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_raise_valueerror_for_unsupported_value_types": 0.00041295899427495897,
- "src/backend/tests/unit/graph/graph/test_base.py::test_graph": 0.014074043021537364,
- "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional": 0.01638441698742099,
- "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional_async_start": 0.029975084005855024,
- "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional_start": 0.02255220798542723,
- "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional_start_end": 0.024824750027619302,
- "src/backend/tests/unit/graph/graph/test_base.py::test_graph_not_prepared": 0.02373974901274778,
- "src/backend/tests/unit/graph/graph/test_base.py::test_graph_set_with_invalid_component": 0.0009155830484814942,
- "src/backend/tests/unit/graph/graph/test_base.py::test_graph_set_with_valid_component": 0.000254459009738639,
- "src/backend/tests/unit/graph/graph/test_base.py::test_graph_with_edge": 0.016106498980661854,
- "src/backend/tests/unit/graph/graph/test_callback_graph.py::test_callback_graph": 0.00018112600082531571,
- "src/backend/tests/unit/graph/graph/test_cycles.py::test_conditional_router_max_iterations": 0.035005624988116324,
- "src/backend/tests/unit/graph/graph/test_cycles.py::test_cycle_in_graph": 0.00026345800142735243,
- "src/backend/tests/unit/graph/graph/test_cycles.py::test_cycle_in_graph_max_iterations": 0.04827658299473114,
- "src/backend/tests/unit/graph/graph/test_cycles.py::test_that_outputs_cache_is_set_to_false_in_cycle": 0.2553665000014007,
- "src/backend/tests/unit/graph/graph/test_cycles.py::test_updated_graph_with_max_iterations": 0.00017374998424202204,
- "src/backend/tests/unit/graph/graph/test_cycles.py::test_updated_graph_with_prompts": 0.0001889169798232615,
- "src/backend/tests/unit/graph/graph/test_graph_state_model.py::test_graph_functional_start_graph_state_update": 0.01914562497404404,
- "src/backend/tests/unit/graph/graph/test_graph_state_model.py::test_graph_state_model": 0.14022970799123868,
- "src/backend/tests/unit/graph/graph/test_graph_state_model.py::test_graph_state_model_json_schema": 0.0001846670056693256,
- "src/backend/tests/unit/graph/graph/test_graph_state_model.py::test_graph_state_model_serialization": 0.017467542027588934,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_add_to_vertices_being_run": 0.0003346250159665942,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_are_all_predecessors_fulfilled": 0.0003333339700475335,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_are_all_predecessors_fulfilled__wrong": 0.0003435010148677975,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_build_run_map": 0.0003347920428495854,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict": 0.00037374900421127677,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_run_map__bad_case": 0.0003602079814299941,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_run_predecessors__bad_case": 0.0003635830362327397,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_vertices_being_run__bad_case": 0.00033554196124896407,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_vertices_to_run__bad_case": 0.0003590010164771229,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable": 0.00033912499202415347,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable__wrong_is_active": 0.00034562498331069946,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable__wrong_run_predecessors": 0.0003230420406907797,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable__wrong_vertices_to_run": 0.00034383300226181746,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_pickle": 0.00037833303213119507,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_remove_from_predecessors": 0.0003417079569771886,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_remove_vertex_from_runnables": 0.0003426669572945684,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_to_dict": 0.00041245800093747675,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_update_run_state": 0.0003465830232016742,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_update_vertex_run_state": 0.0003370010235812515,
- "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_update_vertex_run_state__bad_case": 0.000332291005179286,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_detects_cycles_in_simple_graph": 0.0007949169958010316,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_disconnected_components": 0.0003471249947324395,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_duplicate_edges": 0.0003371660131961107,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_identifies_multiple_cycles": 0.00034716801019385457,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_large_graphs_efficiency": 0.0005987499898765236,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_mixed_data_types_in_edges": 0.00031516700983047485,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_multiple_edges_between_same_nodes": 0.000334084004862234,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_no_cycles_present": 0.00033745801192708313,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_nodes_with_no_incoming_edges": 0.0003493750118650496,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_nodes_with_no_outgoing_edges": 0.00034179104841314256,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_self_loops": 0.00035079196095466614,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_single_node_no_edges": 0.0003423750167712569,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_detects_cycle_in_simple_graph": 0.00034487401717342436,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_disconnected_components": 0.000340208993293345,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_duplicate_edges": 0.00034141598735004663,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_empty_edges_list": 0.0003461680025793612,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_identifies_first_cycle": 0.0003400839923415333,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_large_graph_efficiency": 0.00034791702637448907,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_multiple_cycles": 0.00033612700644880533,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_multiple_edges_between_same_nodes": 0.0003477930149529129,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_nodes_with_no_outgoing_edges": 0.0003343339776620269,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_returns_none_when_no_cycle": 0.000335916003677994,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_self_loop_cycle": 0.000342417013598606,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_single_node_no_edges": 0.00033850001636892557,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_correctly_identify_and_return_vertices_in_single_cycle": 0.00035212497459724545,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_detect_cycles_simple_graph": 0.0003949170059058815,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_duplicate_edges_fixed_fixed": 0.0003812079958152026,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_empty_edges": 0.0003472490352578461,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_large_graphs_efficiently": 0.00034791702637448907,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_no_outgoing_edges": 0.00035220899735577404,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_self_loops": 0.00036962496233172715,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_single_cycle": 0.0003559159813448787,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[0]": 0.0003766240261029452,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[1]": 0.0003915010020136833,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[2]": 0.000399957993067801,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[3]": 0.0003943339688703418,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[4]": 0.0003837910189758986,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_no_cycles_empty_list": 0.00036545898183248937,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_no_modification_of_input_edges_list": 0.00038695803959853947,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_non_string_vertex_ids": 0.00037658197106793523,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_process_disconnected_components": 0.00038874897290952504,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_return_vertices_involved_in_multiple_cycles": 0.00037549997796304524,
- "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_single_vertex_no_edges": 0.0003451659868005663,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_chat_inputs_at_start": 0.0004159160307608545,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_filter_vertices_from_vertex": 0.0003304170095361769,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_exact_sequence": 0.00035837400355376303,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_simple": 0.00034170897561125457,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_complex_cycle": 0.00037508297828026116,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_cycle": 0.0003310420142952353,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_stop": 0.0003387919859960675,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_stop_at_chroma": 0.00040179098141379654,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_unconnected_graph": 0.0003588759864214808,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_get_successors_a": 0.00041729200165718794,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_get_successors_z": 0.0004277929838281125,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_has_cycle": 0.00032458300120197237,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_a": 0.0003444169997237623,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_g": 0.00033566702040843666,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_h": 0.0003632910083979368,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_invalid_vertex": 0.0004397500306367874,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_m": 0.0003437089908402413,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_n_is_start": 0.00042970897629857063,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_t": 0.000337707984726876,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_x": 0.00035191699862480164,
- "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_z": 0.0003515420248731971,
- "src/backend/tests/unit/graph/test_graph.py::test_build_edges": 0.001086625037714839,
- "src/backend/tests/unit/graph/test_graph.py::test_build_nodes": 0.0012113330303691328,
- "src/backend/tests/unit/graph/test_graph.py::test_build_params": 0.00745550001738593,
- "src/backend/tests/unit/graph/test_graph.py::test_circular_dependencies": 0.0011518750106915832,
- "src/backend/tests/unit/graph/test_graph.py::test_find_last_node": 0.0018907500198110938,
- "src/backend/tests/unit/graph/test_graph.py::test_get_node": 3.6276886249543168,
- "src/backend/tests/unit/graph/test_graph.py::test_get_node_neighbors_basic": 0.0015942919999361038,
- "src/backend/tests/unit/graph/test_graph.py::test_get_root_vertex": 0.00336533400695771,
- "src/backend/tests/unit/graph/test_graph.py::test_get_vertices_with_target": 0.0015001240535639226,
- "src/backend/tests/unit/graph/test_graph.py::test_graph_structure": 3.660518125980161,
- "src/backend/tests/unit/graph/test_graph.py::test_invalid_node_types": 0.0007338339637499303,
- "src/backend/tests/unit/graph/test_graph.py::test_matched_type": 0.0011828330461867154,
- "src/backend/tests/unit/graph/test_graph.py::test_pickle_graph": 0.025576499931048602,
- "src/backend/tests/unit/graph/test_graph.py::test_process_flow": 0.0011693339911289513,
- "src/backend/tests/unit/graph/test_graph.py::test_process_flow_one_group": 0.0018959989538416266,
- "src/backend/tests/unit/graph/test_graph.py::test_process_flow_vector_store_grouped": 0.0027778749936260283,
- "src/backend/tests/unit/graph/test_graph.py::test_serialize_graph": 0.06459895797888748,
- "src/backend/tests/unit/graph/test_graph.py::test_set_new_target_handle": 0.0003247499989811331,
- "src/backend/tests/unit/graph/test_graph.py::test_ungroup_node": 0.0010553340252954513,
- "src/backend/tests/unit/graph/test_graph.py::test_update_source_handle": 0.00033258297480642796,
- "src/backend/tests/unit/graph/test_graph.py::test_update_target_handle_proxy": 0.0003360009868629277,
- "src/backend/tests/unit/graph/test_graph.py::test_update_template": 0.0004004160000476986,
- "src/backend/tests/unit/graph/test_graph.py::test_validate_edges": 0.0010510420543141663,
- "src/backend/tests/unit/graph/vertex/test_vertex_base.py::test_handle_optional_field": 0.0009100419702008367,
- "src/backend/tests/unit/graph/vertex/test_vertex_base.py::test_process_edge_parameters": 0.002049332979368046,
- "src/backend/tests/unit/graph/vertex/test_vertex_base.py::test_process_field_parameters_bool_field": 0.0010674590012058616,
- "src/backend/tests/unit/graph/vertex/test_vertex_base.py::test_process_field_parameters_code_error": 0.0010008740064222366,
- "src/backend/tests/unit/graph/vertex/test_vertex_base.py::test_process_field_parameters_dict_field_list": 0.0008272500126622617,
- "src/backend/tests/unit/graph/vertex/test_vertex_base.py::test_process_field_parameters_invalid": 0.0009061660093721002,
- "src/backend/tests/unit/graph/vertex/test_vertex_base.py::test_process_field_parameters_table_field": 0.0019628750160336494,
- "src/backend/tests/unit/graph/vertex/test_vertex_base.py::test_process_field_parameters_table_field_invalid": 0.001045251003233716,
- "src/backend/tests/unit/graph/vertex/test_vertex_base.py::test_process_field_parameters_valid": 0.0009287499997299165,
- "src/backend/tests/unit/graph/vertex/test_vertex_base.py::test_process_file_field": 0.0011359989875927567,
- "src/backend/tests/unit/graph/vertex/test_vertex_base.py::test_process_non_list_edge_param": 0.001195916993310675,
- "src/backend/tests/unit/graph/vertex/test_vertex_base.py::test_should_skip_field": 0.00097762601217255,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_correctly_accesses_descriptions_recommended_fix": 0.0007961249793879688,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_create_model_from_valid_schema": 0.0010582910035736859,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_handle_empty_schema": 0.0006187079998198897,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_handle_large_schemas_efficiently": 0.0008389580179937184,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_handles_multiple_fields_fixed_with_instance_check": 0.0008545409946236759,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_manages_unknown_field_types": 0.0004480420029722154,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_nested_list_and_dict_types_handling": 0.0008984589949250221,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_no_duplicate_field_names_fixed_fixed": 0.0007451680139638484,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_process_schema_missing_optional_keys_updated": 0.0012528329971246421,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_raises_error_for_invalid_input_different_exception_with_specific_exception": 0.00040779198752716184,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_returns_valid_model_class": 0.0012605839874595404,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_schema_fields_with_none_default": 0.012897376000182703,
- "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_supports_single_and_multiple_type_annotations": 0.0007144159753806889,
- "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list[{name} is {age} years old-data0-expected0]": 0.00042258299072273076,
- "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list[{name} is {age} years old-data1-expected1]": 0.0004137920041102916,
- "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__data_contains_nested_data_key": 0.0003497909929137677,
- "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__data_with_data_attribute_empty": 0.00038325003697536886,
- "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__template_empty": 0.0003547930100467056,
- "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__template_without_placeholder": 0.00035595803637988865,
- "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__template_without_placeholder_and_data_attribute_empty": 0.0003455839760135859,
- "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__template_wrong_placeholder": 0.0004950419825036079,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_complex_nested_data": 0.00034674903145059943,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_empty_data_dict": 0.00033629301469773054,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_empty_template": 0.00032604101579636335,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_invalid_template_type": 0.0003869589709211141,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_missing_key": 0.0003401660069357604,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_mixed_data_types": 0.0003470830270089209,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_none_data": 0.00032058299984782934,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_none_template": 0.0004567909927573055,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_parametrized[Error: {text}-data4-expected_text4]": 0.00038945901906117797,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_parametrized[Text: {text}-data0-expected_text0]": 0.00041429197881370783,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_parametrized[User: {text}-data3-expected_text3]": 0.0003852079971693456,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_parametrized[Value: {text}-data5-expected_text5]": 0.0003813339862972498,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_parametrized[{name} is {age} years old-data1-expected_text1]": 0.0004074589814990759,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_parametrized[{name} is {age} years old-data2-expected_text2]": 0.00037929200334474444,
- "src/backend/tests/unit/helpers/test_data_to_text_list.py::test_data_to_text_list_string_data": 0.0003257500065956265,
- "src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py::test_memory_chatbot": 0.007514458964578807,
- "src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py::test_memory_chatbot_dump_components_and_edges": 0.020821666985284537,
- "src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py::test_memory_chatbot_dump_structure": 0.021207208948908374,
- "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag": 0.09574437499395572,
- "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag_add": 0.09895337498164736,
- "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag_dump": 0.05050879102782346,
- "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag_dump_components_and_edges": 0.05553699901793152,
- "src/backend/tests/unit/initial_setup/test_setup_functions.py::test_get_or_create_default_folder_concurrent_calls": 2.259237292018952,
- "src/backend/tests/unit/initial_setup/test_setup_functions.py::test_get_or_create_default_folder_creation": 2.0082786250277422,
- "src/backend/tests/unit/initial_setup/test_setup_functions.py::test_get_or_create_default_folder_idempotency": 2.046581625996623,
- "src/backend/tests/unit/inputs/test_inputs.py::test_bool_input_invalid": 0.0003367919707670808,
- "src/backend/tests/unit/inputs/test_inputs.py::test_bool_input_valid": 0.0003392079961486161,
- "src/backend/tests/unit/inputs/test_inputs.py::test_code_input_valid": 0.0007265819876920432,
- "src/backend/tests/unit/inputs/test_inputs.py::test_data_input_valid": 0.0005829989968333393,
- "src/backend/tests/unit/inputs/test_inputs.py::test_dict_input_invalid": 0.0003485830093268305,
- "src/backend/tests/unit/inputs/test_inputs.py::test_dict_input_valid": 0.0003449999785516411,
- "src/backend/tests/unit/inputs/test_inputs.py::test_dropdown_input_invalid": 0.0003276670177001506,
- "src/backend/tests/unit/inputs/test_inputs.py::test_dropdown_input_valid": 0.000371375004760921,
- "src/backend/tests/unit/inputs/test_inputs.py::test_file_input_valid": 0.0003250000299885869,
- "src/backend/tests/unit/inputs/test_inputs.py::test_float_input_invalid": 0.0003447070193942636,
- "src/backend/tests/unit/inputs/test_inputs.py::test_float_input_valid": 0.0003342499549034983,
- "src/backend/tests/unit/inputs/test_inputs.py::test_handle_input_invalid": 0.0004087919951416552,
- "src/backend/tests/unit/inputs/test_inputs.py::test_handle_input_valid": 0.001097415981348604,
- "src/backend/tests/unit/inputs/test_inputs.py::test_instantiate_input_comprehensive": 0.0004297079867683351,
- "src/backend/tests/unit/inputs/test_inputs.py::test_instantiate_input_invalid": 0.0011178749846294522,
- "src/backend/tests/unit/inputs/test_inputs.py::test_instantiate_input_valid": 0.0008210840169340372,
- "src/backend/tests/unit/inputs/test_inputs.py::test_int_input_invalid": 0.0003512090479489416,
- "src/backend/tests/unit/inputs/test_inputs.py::test_int_input_valid": 0.0004939170030411333,
- "src/backend/tests/unit/inputs/test_inputs.py::test_message_text_input_invalid": 0.0013644169666804373,
- "src/backend/tests/unit/inputs/test_inputs.py::test_message_text_input_valid": 0.0013636240037158132,
- "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_input_invalid": 0.0006090830138418823,
- "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_input_valid": 0.0005961250280961394,
- "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_secret_input_invalid": 0.000488289981149137,
- "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_secret_input_valid": 0.0004837500164285302,
- "src/backend/tests/unit/inputs/test_inputs.py::test_multiselect_input_invalid": 0.0003418329870328307,
- "src/backend/tests/unit/inputs/test_inputs.py::test_multiselect_input_valid": 0.00033862600685097277,
- "src/backend/tests/unit/inputs/test_inputs.py::test_nested_dict_input_invalid": 0.00042608403600752354,
- "src/backend/tests/unit/inputs/test_inputs.py::test_nested_dict_input_valid": 0.00042787502752617,
- "src/backend/tests/unit/inputs/test_inputs.py::test_prompt_input_valid": 0.0006914580008015037,
- "src/backend/tests/unit/inputs/test_inputs.py::test_secret_str_input_invalid": 0.00048574901302345097,
- "src/backend/tests/unit/inputs/test_inputs.py::test_secret_str_input_valid": 0.0010794580448418856,
- "src/backend/tests/unit/inputs/test_inputs.py::test_slider_input_valid": 0.0015779999957885593,
- "src/backend/tests/unit/inputs/test_inputs.py::test_str_input_invalid": 0.002764416014542803,
- "src/backend/tests/unit/inputs/test_inputs.py::test_str_input_valid": 0.0007458760228473693,
- "src/backend/tests/unit/inputs/test_inputs.py::test_tab_input_invalid[non_string_value-options2-123-TypeError]": 0.00045012496411800385,
- "src/backend/tests/unit/inputs/test_inputs.py::test_tab_input_invalid[option_too_long-options1-Tab1-ValidationError]": 0.00045858300291001797,
- "src/backend/tests/unit/inputs/test_inputs.py::test_tab_input_invalid[too_many_options-options0-Tab1-ValidationError]": 0.0004446660168468952,
- "src/backend/tests/unit/inputs/test_inputs.py::test_tab_input_valid[empty_options-options2--expected_options2-]": 0.00040629200520925224,
- "src/backend/tests/unit/inputs/test_inputs.py::test_tab_input_valid[fewer_options-options1-Tab2-expected_options1-Tab2]": 0.00041783301276154816,
- "src/backend/tests/unit/inputs/test_inputs.py::test_tab_input_valid[standard_valid-options0-Tab1-expected_options0-Tab1]": 0.0004344180051703006,
- "src/backend/tests/unit/inputs/test_inputs.py::test_table_input_invalid": 0.0012402489956002682,
- "src/backend/tests/unit/inputs/test_inputs.py::test_table_input_valid": 0.0024859180266503245,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_complex_nested_structures_handling": 0.000577708997298032,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_default_values_assignment": 0.0005712080164812505,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_default_values_for_non_required_fields": 0.0005308340187184513,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_empty_list_of_inputs": 0.0005046669975854456,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_field_types_conversion": 0.0005328759725671262,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_fields_creation_with_correct_types_and_attributes": 0.000604833010584116,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_invalid_field_types_handling": 0.0005195839912630618,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_is_list_attribute_processing": 0.0005977499822620302,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_is_list_handling": 0.0005844590195920318,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_missing_attributes_handling": 0.0005157490086276084,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_missing_optional_attributes": 0.0005411249876488,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_mixed_required_optional_fields_processing": 0.0013469569676090032,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_multiple_input_types": 0.0007255829696077853,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_non_standard_field_types_handling": 0.0005486670415848494,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_none_default_value_handling": 0.0005297920142766088,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_options_attribute_processing": 0.0006539170281030238,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_options_handling": 0.0005805830005556345,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_passing_input_type_directly": 0.0003440830623731017,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_schema_model_creation": 0.0005790819996036589,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_single_input_type_conversion": 0.0008891249890439212,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_single_input_type_replica": 0.0005427490104921162,
- "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_special_characters_in_names_handling": 0.001616499008378014,
- "src/backend/tests/unit/io/test_io_schema.py::test_create_input_schema": 0.0024146240029949695,
- "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_create_column_with_valid_formatter": 0.00039595901034772396,
- "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_create_column_without_display_name": 0.00048308196710422635,
- "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_create_with_type_instead_of_formatter": 0.0006470840307883918,
- "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_default_sortable_filterable": 0.00034454301930963993,
- "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_description_and_default": 0.0003719580126926303,
- "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_formatter_explicitly_set_to_enum": 0.00034445797791704535,
- "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_formatter_none_when_not_provided": 0.00036145898047834635,
- "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_formatter_set_based_on_value": 0.00038529204903170466,
- "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_invalid_formatter_raises_value_error": 0.000709417014149949,
- "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_allow_markdown_override": 0.00043202399990605045,
- "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_initialize_with_empty_contents": 0.0004326759999457863,
- "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_initialize_with_valid_title_and_contents": 0.0006206660000316333,
- "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_invalid_contents_type": 0.0005189370000380222,
- "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_media_url_handling": 0.0004379549999384835,
- "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_serialize_contents": 0.0005069929999308442,
- "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_single_content_conversion": 0.0004610800000364179,
- "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_validate_different_content_types": 0.0005015139998931772,
- "src/backend/tests/unit/schema/test_content_types.py::TestBaseContent::test_base_content_serialization": 0.00048684599994430755,
- "src/backend/tests/unit/schema/test_content_types.py::TestBaseContent::test_base_content_with_duration": 0.00042160500004229107,
- "src/backend/tests/unit/schema/test_content_types.py::TestBaseContent::test_base_content_with_header": 0.00044598999988920696,
- "src/backend/tests/unit/schema/test_content_types.py::TestCodeContent::test_code_content_creation": 0.0004456399999526184,
- "src/backend/tests/unit/schema/test_content_types.py::TestCodeContent::test_code_content_without_title": 0.0004395880000629404,
- "src/backend/tests/unit/schema/test_content_types.py::TestErrorContent::test_error_content_creation": 0.0004261540000243258,
- "src/backend/tests/unit/schema/test_content_types.py::TestErrorContent::test_error_content_optional_fields": 0.0004228080000530099,
- "src/backend/tests/unit/schema/test_content_types.py::TestJSONContent::test_json_content_complex_data": 0.0004502189999584516,
- "src/backend/tests/unit/schema/test_content_types.py::TestJSONContent::test_json_content_creation": 0.0004199019999759912,
- "src/backend/tests/unit/schema/test_content_types.py::TestMediaContent::test_media_content_creation": 0.0004447079999181369,
- "src/backend/tests/unit/schema/test_content_types.py::TestMediaContent::test_media_content_without_caption": 0.0004189800000631294,
- "src/backend/tests/unit/schema/test_content_types.py::TestTextContent::test_text_content_creation": 0.0004448079998837784,
- "src/backend/tests/unit/schema/test_content_types.py::TestTextContent::test_text_content_with_duration": 0.0004446890000053827,
- "src/backend/tests/unit/schema/test_content_types.py::TestToolContent::test_tool_content_creation": 0.0004214649998175446,
- "src/backend/tests/unit/schema/test_content_types.py::TestToolContent::test_tool_content_minimal": 0.0004211139998915314,
- "src/backend/tests/unit/schema/test_content_types.py::TestToolContent::test_tool_content_with_error": 0.00044189400000504975,
- "src/backend/tests/unit/schema/test_content_types.py::test_content_type_discrimination": 0.00043243400000392285,
- "src/backend/tests/unit/schema/test_image.py::test_get_file_paths": 0.004353788999878816,
- "src/backend/tests/unit/schema/test_image.py::test_get_file_paths__empty": 0.0005339450000292345,
- "src/backend/tests/unit/schema/test_image.py::test_get_files": 0.006616151999992326,
- "src/backend/tests/unit/schema/test_image.py::test_get_files__convert_to_base64": 0.006464670000127626,
- "src/backend/tests/unit/schema/test_image.py::test_get_files__empty": 0.0008111910000252465,
- "src/backend/tests/unit/schema/test_image.py::test_is_image_file": 0.0012336979999645337,
- "src/backend/tests/unit/schema/test_image.py::test_is_image_file__not_image": 0.0007717380001395213,
- "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_ai_response": 0.0004977869999720497,
- "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_invalid_image_path": 0.000846755999987181,
- "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_missing_required_keys": 0.0005377410000164673,
- "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_with_image": 0.0010502449999876262,
- "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_with_multiple_images": 0.001101541999901201,
- "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_with_text_only": 0.00050662299997839,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_row_with_data_object": 0.0018233059998919998,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_row_with_dict": 0.0018630499998835148,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_rows_mixed_types": 0.0017488869999624512,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_rows_with_data_objects": 0.0017675429999144399,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_rows_with_dicts": 0.0017574629999899116,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_dataset_pandas_operations": 0.0025748170000952086,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_dataset_type_preservation": 0.001357018000021526,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_dataset_with_null_values": 0.0011962880000737641,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_from_data_list_basic": 0.0012104359999511871,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_from_data_list_empty": 0.0008153999998512518,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_from_data_list_missing_fields": 0.0010372120001420626,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_from_data_list_nested_data": 0.0008734570000115127,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_data_objects": 0.000930171999925733,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_dict_of_lists": 0.0008815709999225874,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_dicts": 0.0009158049999768991,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_invalid_list": 0.0007283770000867662,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_kwargs": 0.000992940999935854,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_none": 0.0005954079998673478,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_pandas_dataframe": 0.0008785859999989043,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_to_data_list_basic": 0.0012922159999106952,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_to_data_list_empty": 0.0007326330000978487,
- "src/backend/tests/unit/schema/test_schema_data_set.py::test_to_data_list_modified_data": 0.0017241119999198418,
- "src/backend/tests/unit/schema/test_schema_dataframe.py::TestDataFrameSchema::test_add_row": 0.002079252999919845,
- "src/backend/tests/unit/schema/test_schema_dataframe.py::TestDataFrameSchema::test_add_rows": 0.0017790240000294943,
- "src/backend/tests/unit/schema/test_schema_dataframe.py::TestDataFrameSchema::test_bool_operator": 0.0008708410000508593,
- "src/backend/tests/unit/schema/test_schema_dataframe.py::TestDataFrameSchema::test_to_data_list": 0.001102522999985922,
- "src/backend/tests/unit/schema/test_schema_dataframe.py::TestDataFrameSchema::test_to_lc_documents": 0.0010846300000366682,
- "src/backend/tests/unit/schema/test_schema_message.py::test_message_async_prompt_serialization": 0.00209424999775365,
- "src/backend/tests/unit/schema/test_schema_message.py::test_message_from_ai_text": 0.000564160999942942,
- "src/backend/tests/unit/schema/test_schema_message.py::test_message_from_human_text": 0.0005765839999867239,
- "src/backend/tests/unit/schema/test_schema_message.py::test_message_prompt_serialization": 0.004082765000021027,
- "src/backend/tests/unit/schema/test_schema_message.py::test_message_serialization": 0.0006620419999308069,
- "src/backend/tests/unit/schema/test_schema_message.py::test_message_to_lc_without_sender": 0.0005675169999221907,
- "src/backend/tests/unit/schema/test_schema_message.py::test_message_with_invalid_image_path": 0.0007580019998840726,
- "src/backend/tests/unit/schema/test_schema_message.py::test_message_with_multiple_images": 0.002024892000008549,
- "src/backend/tests/unit/schema/test_schema_message.py::test_message_with_single_image": 0.0018490049999400071,
- "src/backend/tests/unit/schema/test_schema_message.py::test_message_without_sender": 0.0005725160000338292,
- "src/backend/tests/unit/schema/test_schema_message.py::test_timestamp_serialization": 0.0011540589998730866,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_async_iterator_handling": 0.0004393750277813524,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_builtin_type_serialization": 0.00040691703907214105,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_bytes_serialization": 0.04117608297383413,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_class_serialization": 0.010863458010135219,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_custom_type_serialization": 0.00038662500446662307,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_datetime_serialization": 0.04621591599425301,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_decimal_serialization": 0.04977587499888614,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_dict_serialization": 0.15210558305261657,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_document_serialization": 0.00047724999603815377,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_enum_serialization": 0.0007805000059306622,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_fallback_serialization": 0.0030999580048955977,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_generic_type_serialization": 0.00047137399087660015,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_instance_serialization": 0.0004892089928034693,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_list_truncation": 0.15894745799596421,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_max_items_none": 0.12451404103194363,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_max_length_none": 0.043650792009430006,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_nested_class_serialization": 0.008709791000001132,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_nested_structures": 0.25152941700071096,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_none_serialization": 0.000351458991644904,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_numpy_int64_serialization": 0.0003716670034918934,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_numpy_numeric_serialization": 0.00049083202611655,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_pandas_serialization": 0.003586333041312173,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_primitive_types": 0.050340124987997115,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_pydantic_class_serialization": 0.0003987920063082129,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_pydantic_modern_model": 0.04153270801180042,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_pydantic_v1_model": 0.04171108399168588,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_series_serialization": 0.0005517500103451312,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_series_truncation": 0.0013347910135053098,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_string_serialization": 0.11072829196928069,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_type_alias_serialization": 0.00038008400588296354,
- "src/backend/tests/unit/serialization/test_serialization.py::TestSerializationHypothesis::test_uuid_serialization": 0.04183741699671373,
- "src/backend/tests/unit/services/database/test_utils.py::test_truncate_json__large_case": 0.0013368430001037268,
- "src/backend/tests/unit/services/database/test_utils.py::test_truncate_json__small_case": 0.001604773000053683,
- "src/backend/tests/unit/services/database/test_vertex_builds.py::test_concurrent_log_vertex_build": 0.1044863749993965,
- "src/backend/tests/unit/services/database/test_vertex_builds.py::test_log_vertex_build_basic": 0.07359408398042433,
- "src/backend/tests/unit/services/database/test_vertex_builds.py::test_log_vertex_build_integrity_error": 0.05558145898976363,
- "src/backend/tests/unit/services/database/test_vertex_builds.py::test_log_vertex_build_max_global_limit": 3.7613749580050353,
- "src/backend/tests/unit/services/database/test_vertex_builds.py::test_log_vertex_build_max_per_vertex_limit": 0.06237437602248974,
- "src/backend/tests/unit/services/database/test_vertex_builds.py::test_log_vertex_build_ordering": 0.06448433399782516,
- "src/backend/tests/unit/services/database/test_vertex_builds.py::test_log_vertex_build_with_different_limits[1-1]": 0.055347916029859334,
- "src/backend/tests/unit/services/database/test_vertex_builds.py::test_log_vertex_build_with_different_limits[100-50]": 0.2438022079586517,
- "src/backend/tests/unit/services/database/test_vertex_builds.py::test_log_vertex_build_with_different_limits[5-3]": 0.0621429999882821,
- "src/backend/tests/unit/services/flow/test_flow_runner.py::test_database_exists_check": 0.050625124014914036,
- "src/backend/tests/unit/services/flow/test_flow_runner.py::test_get_flow_dict_from_dict": 0.00875162601005286,
- "src/backend/tests/unit/services/flow/test_flow_runner.py::test_get_flow_dict_invalid_input": 0.00851883293944411,
- "src/backend/tests/unit/services/flow/test_flow_runner.py::test_initialize_database": 0.23140333301853389,
- "src/backend/tests/unit/services/flow/test_flow_runner.py::test_run_with_dict_input": 0.2517979569674935,
- "src/backend/tests/unit/services/flow/test_flow_runner.py::test_run_with_different_input_types": 0.2700725829927251,
- "src/backend/tests/unit/services/tasks/test_temp_flow_cleanup.py::test_cleanup_orphaned_records_no_orphans": 2.032830042007845,
- "src/backend/tests/unit/services/tasks/test_temp_flow_cleanup.py::test_cleanup_orphaned_records_with_orphans": 2.099078124971129,
- "src/backend/tests/unit/services/tasks/test_temp_flow_cleanup.py::test_cleanup_worker_run_with_exception": 0.0018798749661073089,
- "src/backend/tests/unit/services/tasks/test_temp_flow_cleanup.py::test_cleanup_worker_start_stop": 0.017580665997229517,
- "src/backend/tests/unit/services/tracing/test_tracing_service.py::test_cleanup_inputs": 0.0015315829950850457,
- "src/backend/tests/unit/services/tracing/test_tracing_service.py::test_concurrent_tracing": 4.01801554299891,
- "src/backend/tests/unit/services/tracing/test_tracing_service.py::test_deactivated_tracing": 0.006589290976990014,
- "src/backend/tests/unit/services/tracing/test_tracing_service.py::test_get_langchain_callbacks": 0.014096583996433765,
- "src/backend/tests/unit/services/tracing/test_tracing_service.py::test_start_end_tracers": 0.0028827089990954846,
- "src/backend/tests/unit/services/tracing/test_tracing_service.py::test_start_tracers_with_exception": 0.004516124987276271,
- "src/backend/tests/unit/services/tracing/test_tracing_service.py::test_trace_component": 0.20807087401044555,
- "src/backend/tests/unit/services/tracing/test_tracing_service.py::test_trace_component_with_exception": 0.11320775104104541,
- "src/backend/tests/unit/services/tracing/test_tracing_service.py::test_trace_worker_with_exception": 0.11201870901277289,
- "src/backend/tests/unit/services/variable/test_service.py::test_create_variable": 0.05143954200320877,
- "src/backend/tests/unit/services/variable/test_service.py::test_delete_varaible_by_id": 0.0060262500192038715,
- "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable": 0.06105941699934192,
- "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable__ValueError": 0.0035743750049732625,
- "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable__valueerror": 0.050905333016999066,
- "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable_by_id": 0.05836579197784886,
- "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable_by_id__ValueError": 0.27340612601256,
- "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable_by_id__valueerror": 0.05316070799017325,
- "src/backend/tests/unit/services/variable/test_service.py::test_get_variable": 0.052720709005370736,
- "src/backend/tests/unit/services/variable/test_service.py::test_get_variable__TypeError": 0.00458791694836691,
- "src/backend/tests/unit/services/variable/test_service.py::test_get_variable__ValueError": 0.003811584028881043,
- "src/backend/tests/unit/services/variable/test_service.py::test_get_variable__typeerror": 0.05184795896639116,
- "src/backend/tests/unit/services/variable/test_service.py::test_get_variable__valueerror": 0.05390345799969509,
- "src/backend/tests/unit/services/variable/test_service.py::test_initialize_user_variables__create_and_update": 0.13024941601906903,
- "src/backend/tests/unit/services/variable/test_service.py::test_initialize_user_variables__donkey": 0.0002315010060556233,
- "src/backend/tests/unit/services/variable/test_service.py::test_initialize_user_variables__not_found_variable": 0.055186540994327515,
- "src/backend/tests/unit/services/variable/test_service.py::test_initialize_user_variables__skipping_environment_variable_storage": 0.049173210019944236,
- "src/backend/tests/unit/services/variable/test_service.py::test_list_variables": 0.05633520899573341,
- "src/backend/tests/unit/services/variable/test_service.py::test_list_variables__empty": 0.055090414971346036,
- "src/backend/tests/unit/services/variable/test_service.py::test_update_variable": 0.059357249963795766,
- "src/backend/tests/unit/services/variable/test_service.py::test_update_variable__ValueError": 0.0036237920285202563,
- "src/backend/tests/unit/services/variable/test_service.py::test_update_variable__valueerror": 0.05496270800358616,
- "src/backend/tests/unit/services/variable/test_service.py::test_update_variable_fields": 0.05812974998843856,
- "src/backend/tests/unit/test_api_key.py::test_create_api_key": 2.7594605819904245,
- "src/backend/tests/unit/test_api_key.py::test_delete_api_key": 2.7562009589746594,
- "src/backend/tests/unit/test_api_key.py::test_get_api_keys": 9.624758707999717,
- "src/backend/tests/unit/test_async_helpers.py::TestRunUntilComplete::test_original_behavior_preserved_no_loop": 0.0007016250165179372,
- "src/backend/tests/unit/test_async_helpers.py::TestRunUntilComplete::test_run_until_complete_complex_coro_with_running_loop": 0.01584991699201055,
- "src/backend/tests/unit/test_async_helpers.py::TestRunUntilComplete::test_run_until_complete_concurrent_execution": 0.01696304199867882,
- "src/backend/tests/unit/test_async_helpers.py::TestRunUntilComplete::test_run_until_complete_nested_async_operations": 0.002464459976181388,
- "src/backend/tests/unit/test_async_helpers.py::TestRunUntilComplete::test_run_until_complete_no_running_loop": 0.003668749995995313,
- "src/backend/tests/unit/test_async_helpers.py::TestRunUntilComplete::test_run_until_complete_performance_impact": 0.007463082991307601,
- "src/backend/tests/unit/test_async_helpers.py::TestRunUntilComplete::test_run_until_complete_preserves_return_value": 0.0024466670001856983,
- "src/backend/tests/unit/test_async_helpers.py::TestRunUntilComplete::test_run_until_complete_simple_coro_with_running_loop": 0.005388958030380309,
- "src/backend/tests/unit/test_async_helpers.py::TestRunUntilComplete::test_run_until_complete_thread_isolation": 0.003928084042854607,
- "src/backend/tests/unit/test_async_helpers.py::TestRunUntilComplete::test_run_until_complete_with_exception_in_new_thread": 0.0044510420120786875,
- "src/backend/tests/unit/test_async_helpers.py::TestRunUntilComplete::test_run_until_complete_with_timeout": 0.01291825098451227,
- "src/backend/tests/unit/test_cache.py::test_build_graph": 1.1988659180001378,
- "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow": 2.8563256670022383,
- "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_from_request_data": 2.869324667030014,
- "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_invalid_flow_id": 2.7840439589926973,
- "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_invalid_job_id": 2.706707751000067,
- "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_polling": 3.1221607490151655,
- "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_start_only": 2.859548167005414,
- "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_start_with_inputs": 3.1739403330138884,
- "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_with_frozen_path": 2.876795416988898,
- "src/backend/tests/unit/test_chat_endpoint.py::test_cancel_build_failure": 2.8563863319868688,
- "src/backend/tests/unit/test_chat_endpoint.py::test_cancel_build_success": 2.8929652919759974,
- "src/backend/tests/unit/test_chat_endpoint.py::test_cancel_build_unexpected_error": 2.8994718739995733,
- "src/backend/tests/unit/test_chat_endpoint.py::test_cancel_build_with_cancelled_error": 2.804159916995559,
- "src/backend/tests/unit/test_chat_endpoint.py::test_cancel_nonexistent_build": 2.803647416003514,
- "src/backend/tests/unit/test_cli.py::test_components_path": 5.026017208991107,
- "src/backend/tests/unit/test_cli.py::test_superuser": 0.613421832997119,
- "src/backend/tests/unit/test_code_hash.py::test_code_hash_uniqueness": 1.5680500000016764,
- "src/backend/tests/unit/test_code_hash.py::test_component_metadata_has_code_hash": 1.6465477909951005,
- "src/backend/tests/unit/test_custom_component.py::test_build_config_field_keys": 0.0003525829524733126,
- "src/backend/tests/unit/test_custom_component.py::test_build_config_field_value_keys": 0.0003495409619063139,
- "src/backend/tests/unit/test_custom_component.py::test_build_config_field_values_dict": 0.00035591694177128375,
- "src/backend/tests/unit/test_custom_component.py::test_build_config_fields_dict": 0.0003349570033606142,
- "src/backend/tests/unit/test_custom_component.py::test_build_config_has_fields": 0.0003521240141708404,
- "src/backend/tests/unit/test_custom_component.py::test_build_config_no_code": 0.0003374589723534882,
- "src/backend/tests/unit/test_custom_component.py::test_build_config_return_type": 0.0003859169955831021,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_get_tree": 0.000915748969418928,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_init": 0.0009958750160876662,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_ann_assign": 0.0003618340124376118,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_arg_no_annotation": 0.0006946250214241445,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_arg_with_annotation": 0.0005193740071263164,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_assign": 0.000446291989646852,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_callable_details_no_args": 0.00044120801612734795,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_classes": 0.018622917967149988,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_classes_raises": 0.0003959150053560734,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_function_def_init": 0.0003935419663321227,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_function_def_not_init": 0.0003537499578669667,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_functions": 0.00039874998037703335,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_global_vars": 0.00034495905856601894,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_imports_import": 0.00046462396858260036,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_imports_importfrom": 0.00037066699587740004,
- "src/backend/tests/unit/test_custom_component.py::test_code_parser_syntax_error": 0.004238040972268209,
- "src/backend/tests/unit/test_custom_component.py::test_component_code_null_error": 0.0004880000196862966,
- "src/backend/tests/unit/test_custom_component.py::test_component_get_code_tree": 0.07396320803673007,
- "src/backend/tests/unit/test_custom_component.py::test_component_get_code_tree_syntax_error": 0.0007461249479092658,
- "src/backend/tests/unit/test_custom_component.py::test_component_get_function_valid": 0.00038174999644979835,
- "src/backend/tests/unit/test_custom_component.py::test_component_init": 0.0005977500113658607,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_build_not_implemented": 0.0003565829829312861,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_build_template_config": 0.0009029579814523458,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_class_template_validation_no_code": 0.00036504201125353575,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_code_tree_syntax_error": 0.0005512920033652335,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function": 0.00041416598833166063,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_args": 0.004955416021402925,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_args_no_args": 0.0055357910168822855,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_return_type": 0.005358874972444028,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_return_type_no_return_type": 0.004373831994598731,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_valid": 0.0003830839996226132,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_main_class_name": 0.004812333005247638,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_main_class_name_no_main_class": 0.0003850830253213644,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_init": 0.00036133298999629915,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_multiple_outputs": 0.003090540994890034,
- "src/backend/tests/unit/test_custom_component.py::test_custom_component_subclass_from_lctoolcomponent": 0.0010585429845377803,
- "src/backend/tests/unit/test_custom_component.py::test_list_flows_flow_objects": 1.981454541994026,
- "src/backend/tests/unit/test_custom_component.py::test_list_flows_return_type": 0.36947908403817564,
- "src/backend/tests/unit/test_custom_component_with_client.py::test_feature_flags_add_toolkit_output": 2.7484489580092486,
- "src/backend/tests/unit/test_custom_component_with_client.py::test_list_flows_flow_objects": 2.4376869579718914,
- "src/backend/tests/unit/test_custom_component_with_client.py::test_list_flows_return_type": 2.3714169170416426,
- "src/backend/tests/unit/test_data_class.py::test_add_method_for_integers": 0.0004909640001642401,
- "src/backend/tests/unit/test_data_class.py::test_add_method_for_strings": 0.00048714700005803024,
- "src/backend/tests/unit/test_data_class.py::test_add_method_with_non_overlapping_keys": 0.0004958419999638863,
- "src/backend/tests/unit/test_data_class.py::test_conversion_from_document": 0.00047414300001946685,
- "src/backend/tests/unit/test_data_class.py::test_conversion_to_document": 0.0004791220000015528,
- "src/backend/tests/unit/test_data_class.py::test_custom_attribute_get_set_del": 0.0004868570000553518,
- "src/backend/tests/unit/test_data_class.py::test_custom_attribute_setting_and_getting": 0.0004832489998989331,
- "src/backend/tests/unit/test_data_class.py::test_data_initialization": 0.0006179509998673893,
- "src/backend/tests/unit/test_data_class.py::test_deep_copy": 0.000507534999883319,
- "src/backend/tests/unit/test_data_class.py::test_dir_includes_data_keys": 0.0005894280001257357,
- "src/backend/tests/unit/test_data_class.py::test_dir_reflects_attribute_deletion": 0.0005968410000605218,
- "src/backend/tests/unit/test_data_class.py::test_get_text_with_empty_data": 0.00047716799997488124,
- "src/backend/tests/unit/test_data_class.py::test_get_text_with_none_data": 0.0004683019999447424,
- "src/backend/tests/unit/test_data_class.py::test_get_text_with_text_key": 0.0005054919998883634,
- "src/backend/tests/unit/test_data_class.py::test_get_text_without_text_key": 0.0004956330000140952,
- "src/backend/tests/unit/test_data_class.py::test_str_and_dir_methods": 0.0006173780000153783,
- "src/backend/tests/unit/test_data_class.py::test_validate_data_with_extra_keys": 0.000497345999974641,
- "src/backend/tests/unit/test_data_components.py::test_build_with_multiple_urls": 2.1151568749919534,
- "src/backend/tests/unit/test_data_components.py::test_directory_component_build_with_multithreading": 0.011123959033284336,
- "src/backend/tests/unit/test_data_components.py::test_directory_without_mocks": 0.17772862600395456,
- "src/backend/tests/unit/test_data_components.py::test_failed_request": 0.029582915944047272,
- "src/backend/tests/unit/test_data_components.py::test_parse_curl": 0.003926167031750083,
- "src/backend/tests/unit/test_data_components.py::test_successful_get_request": 0.04254975001094863,
- "src/backend/tests/unit/test_data_components.py::test_timeout": 0.023703540966380388,
- "src/backend/tests/unit/test_data_components.py::test_url_component": 2.0934785840217955,
- "src/backend/tests/unit/test_database.py::test_create_flow": 2.7052300829673186,
- "src/backend/tests/unit/test_database.py::test_create_flow_with_invalid_data": 2.865984165982809,
- "src/backend/tests/unit/test_database.py::test_create_flows": 2.751347874989733,
- "src/backend/tests/unit/test_database.py::test_delete_flow": 2.7740632090135477,
- "src/backend/tests/unit/test_database.py::test_delete_flows": 2.8640632499882486,
- "src/backend/tests/unit/test_database.py::test_delete_flows_with_transaction_and_build": 2.9670972070016433,
- "src/backend/tests/unit/test_database.py::test_delete_folder_with_flows_with_transaction_and_build": 2.8803618339879904,
- "src/backend/tests/unit/test_database.py::test_delete_nonexistent_flow": 2.754923748987494,
- "src/backend/tests/unit/test_database.py::test_download_file": 2.8387151669885498,
- "src/backend/tests/unit/test_database.py::test_get_flows_from_folder_pagination": 2.7247668339987285,
- "src/backend/tests/unit/test_database.py::test_get_flows_from_folder_pagination_with_params": 2.7411295009951573,
- "src/backend/tests/unit/test_database.py::test_get_nonexistent_flow": 2.831204958987655,
- "src/backend/tests/unit/test_database.py::test_load_flows": 2.0784470409998903,
- "src/backend/tests/unit/test_database.py::test_migrate_transactions": 3.3142859160434455,
- "src/backend/tests/unit/test_database.py::test_migrate_transactions_no_duckdb": 4.5406213329406455,
- "src/backend/tests/unit/test_database.py::test_read_flow": 2.8032612500246614,
- "src/backend/tests/unit/test_database.py::test_read_flows": 2.7716447509883437,
- "src/backend/tests/unit/test_database.py::test_read_flows_components_only": 2.7893136260390747,
- "src/backend/tests/unit/test_database.py::test_read_flows_components_only_paginated": 2.809052250959212,
- "src/backend/tests/unit/test_database.py::test_read_flows_custom_page_size": 2.923362957983045,
- "src/backend/tests/unit/test_database.py::test_read_flows_invalid_page": 2.9114560420275666,
- "src/backend/tests/unit/test_database.py::test_read_flows_invalid_size": 12.008710082998732,
- "src/backend/tests/unit/test_database.py::test_read_flows_no_pagination_params": 2.8915225000237115,
- "src/backend/tests/unit/test_database.py::test_read_flows_pagination_with_flows": 2.927169458998833,
- "src/backend/tests/unit/test_database.py::test_read_flows_pagination_with_params": 2.875768917030655,
- "src/backend/tests/unit/test_database.py::test_read_flows_pagination_without_params": 2.8355551669956185,
- "src/backend/tests/unit/test_database.py::test_read_folder": 2.7329202089749742,
- "src/backend/tests/unit/test_database.py::test_read_folder_with_component_filter": 2.871805167000275,
- "src/backend/tests/unit/test_database.py::test_read_folder_with_flows": 2.7932831670041196,
- "src/backend/tests/unit/test_database.py::test_read_folder_with_pagination": 2.9011792079836596,
- "src/backend/tests/unit/test_database.py::test_read_folder_with_search": 2.8796487919753417,
- "src/backend/tests/unit/test_database.py::test_read_nonexistent_folder": 2.870054667000659,
- "src/backend/tests/unit/test_database.py::test_read_only_starter_projects": 2.8585666670114733,
- "src/backend/tests/unit/test_database.py::test_sqlite_pragmas": 0.07541662399307825,
- "src/backend/tests/unit/test_database.py::test_update_flow": 2.8001106239680666,
- "src/backend/tests/unit/test_database.py::test_update_flow_idempotency": 2.821949000004679,
- "src/backend/tests/unit/test_database.py::test_update_nonexistent_flow": 2.725973458000226,
- "src/backend/tests/unit/test_database.py::test_upload_file": 2.7899827089859173,
- "src/backend/tests/unit/test_endpoints.py::test_build_vertex_invalid_flow_id": 2.9291972929786425,
- "src/backend/tests/unit/test_endpoints.py::test_build_vertex_invalid_vertex_id": 2.9100301250000484,
- "src/backend/tests/unit/test_endpoints.py::test_concurrent_stream_run_with_input_type_chat": 9.601628832984716,
- "src/backend/tests/unit/test_endpoints.py::test_get_all": 2.979537999985041,
- "src/backend/tests/unit/test_endpoints.py::test_get_vertices": 2.8253356239874847,
- "src/backend/tests/unit/test_endpoints.py::test_get_vertices_flow_not_found": 2.7797513349796645,
- "src/backend/tests/unit/test_endpoints.py::test_invalid_flow_id": 2.8764577499823645,
- "src/backend/tests/unit/test_endpoints.py::test_invalid_prompt": 2.1289947910117917,
- "src/backend/tests/unit/test_endpoints.py::test_invalid_run_with_input_type_chat": 3.2016742089763284,
- "src/backend/tests/unit/test_endpoints.py::test_post_validate_code": 2.8428013330267277,
- "src/backend/tests/unit/test_endpoints.py::test_starter_projects": 3.006369166978402,
- "src/backend/tests/unit/test_endpoints.py::test_successful_run_no_payload": 3.220601749024354,
- "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_input_type_any": 3.248805916024139,
- "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_input_type_chat": 3.1939231240248773,
- "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_input_type_text": 3.2224760839890223,
- "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_output_type_any": 3.216045084001962,
- "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_output_type_debug": 3.2311353749828413,
- "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_output_type_text": 3.212846374983201,
- "src/backend/tests/unit/test_endpoints.py::test_valid_prompt": 2.13011583298794,
- "src/backend/tests/unit/test_endpoints.py::test_various_prompts[The weather is {weather} today.-expected_input_variables1]": 13.074470873980317,
- "src/backend/tests/unit/test_endpoints.py::test_various_prompts[This prompt has no variables.-expected_input_variables2]": 2.161580999963917,
- "src/backend/tests/unit/test_endpoints.py::test_various_prompts[{a}, {b}, and {c} are variables.-expected_input_variables3]": 2.139543875004165,
- "src/backend/tests/unit/test_endpoints.py::test_various_prompts[{color} is my favorite color.-expected_input_variables0]": 2.1355227490421385,
- "src/backend/tests/unit/test_experimental_components.py::test_python_function_component": 0.0038072500028647482,
- "src/backend/tests/unit/test_files.py::test_delete_file": 11.937014124996495,
- "src/backend/tests/unit/test_files.py::test_download_file": 9.813468083040789,
- "src/backend/tests/unit/test_files.py::test_file_operations": 11.151997918030247,
- "src/backend/tests/unit/test_files.py::test_list_files": 11.372431917930953,
- "src/backend/tests/unit/test_files.py::test_upload_file": 9.378826959000435,
- "src/backend/tests/unit/test_frontend_nodes.py::test_frontend_node_to_dict": 0.0005896249786019325,
- "src/backend/tests/unit/test_frontend_nodes.py::test_template_field_defaults": 0.001315416011493653,
- "src/backend/tests/unit/test_frontend_nodes.py::test_template_to_dict": 0.0007304580067284405,
- "src/backend/tests/unit/test_helper_components.py::test_data_as_text_component": 0.0007907919934950769,
- "src/backend/tests/unit/test_helper_components.py::test_uuid_generator_component": 0.0035138749808538705,
- "src/backend/tests/unit/test_initial_setup.py::test_create_or_update_starter_projects": 2.2110438759846147,
- "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://example.com/myzip.zip-https://example.com/myzip.zip]": 0.0010844580247066915,
- "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/main.zip]": 0.009196958999382332,
- "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles.git-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/main.zip]": 0.008824415970593691,
- "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/main.zip]": 0.007598666998092085,
- "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/commit/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9-https://github.com/langflow-ai/langflow-bundles/archive/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9.zip]": 0.0011206660128664225,
- "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/commit/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9/-https://github.com/langflow-ai/langflow-bundles/archive/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9.zip]": 0.0010252069914713502,
- "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/releases/tag/foo/v1.0.0-https://github.com/langflow-ai/langflow-bundles/archive/refs/tags/foo/v1.0.0.zip]": 0.0013057089818175882,
- "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/releases/tag/foo/v1.0.0/-https://github.com/langflow-ai/langflow-bundles/archive/refs/tags/foo/v1.0.0.zip]": 0.0011299589823465794,
- "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/releases/tag/v1.0.0-0_1-https://github.com/langflow-ai/langflow-bundles/archive/refs/tags/v1.0.0-0_1.zip]": 0.4312945839774329,
- "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/tree/some.branch-0_1-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/some.branch-0_1.zip]": 0.0015269590367097408,
- "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/tree/some/branch-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/some/branch.zip]": 0.0010997909703291953,
- "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/tree/some/branch/-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/some/branch.zip]": 0.0009310420136898756,
- "src/backend/tests/unit/test_initial_setup.py::test_get_project_data": 0.03448262403253466,
- "src/backend/tests/unit/test_initial_setup.py::test_load_bundles_from_urls": 2.831420834030723,
- "src/backend/tests/unit/test_initial_setup.py::test_load_starter_projects": 0.03855141601525247,
- "src/backend/tests/unit/test_initial_setup.py::test_refresh_starter_projects": 10.959869041020283,
- "src/backend/tests/unit/test_initial_setup.py::test_sync_flows_from_fs": 2.927540876000421,
- "src/backend/tests/unit/test_kubernetes_secrets.py::test_create_secret": 0.012160499987658113,
- "src/backend/tests/unit/test_kubernetes_secrets.py::test_delete_secret": 0.003695624996908009,
- "src/backend/tests/unit/test_kubernetes_secrets.py::test_email_address": 0.00034520801273174584,
- "src/backend/tests/unit/test_kubernetes_secrets.py::test_encode_string": 0.0003958329907618463,
- "src/backend/tests/unit/test_kubernetes_secrets.py::test_encode_uuid": 0.0006710829911753535,
- "src/backend/tests/unit/test_kubernetes_secrets.py::test_ends_with_non_alphanumeric": 0.0003549579996615648,
- "src/backend/tests/unit/test_kubernetes_secrets.py::test_get_secret": 0.004226043005473912,
- "src/backend/tests/unit/test_kubernetes_secrets.py::test_long_string": 0.00035049900179728866,
- "src/backend/tests/unit/test_kubernetes_secrets.py::test_starts_with_non_alphanumeric": 0.0003331660118419677,
- "src/backend/tests/unit/test_kubernetes_secrets.py::test_uuid_case_insensitivity": 0.0003565410152077675,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_aget_all_types_dict_basic": 22.426637291995576,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_component_differences_analysis": 27.172496957937256,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_component_loading_performance": 1.661878540966427,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_component_loading_performance_comparison": 26.241167333966587,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_component_template_structure": 2.061513917025877,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_components_path_variations": 23.327357416972518,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_comprehensive_performance_summary": 62.45951362399501,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_concurrent_loading": 13.767882791958982,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_error_handling": 1.6045854589901865,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_get_langflow_components_list_basic": 1.426067396999997,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_import_langflow_components_basic": 1.6523972499999218,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_memory_efficiency": 40.91349891704158,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_repeated_loading_performance": 75.07795800099848,
- "src/backend/tests/unit/test_load_components.py::TestComponentLoading::test_result_structure_comparison": 24.198835249000695,
- "src/backend/tests/unit/test_loading.py::test_load_flow_from_json": 1.2976477909833193,
- "src/backend/tests/unit/test_loading.py::test_load_flow_from_json_object": 0.12894654195406474,
- "src/backend/tests/unit/test_loading.py::test_load_flow_from_json_with_tweaks": 0.005636290996335447,
- "src/backend/tests/unit/test_logger.py::test_enabled": 0.00035012501757591963,
- "src/backend/tests/unit/test_logger.py::test_get_after_timestamp": 0.0004077920166309923,
- "src/backend/tests/unit/test_logger.py::test_get_before_timestamp": 0.0004137499781791121,
- "src/backend/tests/unit/test_logger.py::test_get_last_n": 0.00039016801747493446,
- "src/backend/tests/unit/test_logger.py::test_init_default": 0.0015744169941172004,
- "src/backend/tests/unit/test_logger.py::test_init_with_env_variable": 0.002226666983915493,
- "src/backend/tests/unit/test_logger.py::test_len": 0.0003881250158883631,
- "src/backend/tests/unit/test_logger.py::test_max_size": 0.00036345800617709756,
- "src/backend/tests/unit/test_logger.py::test_write": 0.0005444999842438847,
- "src/backend/tests/unit/test_logger.py::test_write_overflow": 0.0004406250372994691,
- "src/backend/tests/unit/test_login.py::test_login_successful": 0.009610915993107483,
- "src/backend/tests/unit/test_login.py::test_login_unsuccessful_wrong_password": 2.6272996260086074,
- "src/backend/tests/unit/test_login.py::test_login_unsuccessful_wrong_username": 2.2174614170216955,
- "src/backend/tests/unit/test_messages.py::test_aadd_messages": 1.6231730080000943,
- "src/backend/tests/unit/test_messages.py::test_aadd_messagetables": 1.6020565650001117,
- "src/backend/tests/unit/test_messages.py::test_add_messages": 1.626520419000144,
- "src/backend/tests/unit/test_messages.py::test_add_messagetables": 0.05725845799315721,
- "src/backend/tests/unit/test_messages.py::test_adelete_messages": 1.5568391219999285,
- "src/backend/tests/unit/test_messages.py::test_aget_messages": 1.521009597000102,
- "src/backend/tests/unit/test_messages.py::test_astore_message": 1.6243206829999508,
- "src/backend/tests/unit/test_messages.py::test_aupdate_message_with_content_blocks": 1.5972662259998742,
- "src/backend/tests/unit/test_messages.py::test_aupdate_message_with_nested_properties": 1.602534328000047,
- "src/backend/tests/unit/test_messages.py::test_aupdate_message_with_timestamp": 1.5871280220001154,
- "src/backend/tests/unit/test_messages.py::test_aupdate_mixed_messages": 1.5728207469999234,
- "src/backend/tests/unit/test_messages.py::test_aupdate_multiple_messages": 1.5900392140001713,
- "src/backend/tests/unit/test_messages.py::test_aupdate_multiple_messages_with_timestamps": 1.5633066049999798,
- "src/backend/tests/unit/test_messages.py::test_aupdate_nonexistent_message": 3.133551847000035,
- "src/backend/tests/unit/test_messages.py::test_aupdate_nonexistent_message_generates_a_new_message": 1.5784494020001603,
- "src/backend/tests/unit/test_messages.py::test_aupdate_single_message": 1.5570718739998028,
- "src/backend/tests/unit/test_messages.py::test_convert_to_langchain[convert_to_langchain_type]": 0.0006909960000029969,
- "src/backend/tests/unit/test_messages.py::test_convert_to_langchain[message]": 0.0008285330000035174,
- "src/backend/tests/unit/test_messages.py::test_delete_messages": 1.579748290000225,
- "src/backend/tests/unit/test_messages.py::test_get_messages": 1.5057488729999022,
- "src/backend/tests/unit/test_messages.py::test_store_message": 1.5526250890000028,
- "src/backend/tests/unit/test_messages.py::test_update_message_with_content_blocks": 5.128578291973099,
- "src/backend/tests/unit/test_messages.py::test_update_message_with_nested_properties": 1.5983659149496816,
- "src/backend/tests/unit/test_messages.py::test_update_message_with_timestamp": 4.5035865410463884,
- "src/backend/tests/unit/test_messages.py::test_update_mixed_messages": 4.780824999965262,
- "src/backend/tests/unit/test_messages.py::test_update_multiple_messages": 3.9809147500200197,
- "src/backend/tests/unit/test_messages.py::test_update_multiple_messages_with_timestamps": 4.659952084010001,
- "src/backend/tests/unit/test_messages.py::test_update_nonexistent_message": 4.162011249980424,
- "src/backend/tests/unit/test_messages.py::test_update_single_message": 8.01532608200796,
- "src/backend/tests/unit/test_messages_endpoints.py::test_delete_messages": 3.6078300830558874,
- "src/backend/tests/unit/test_messages_endpoints.py::test_delete_messages_session": 2.79296762496233,
- "src/backend/tests/unit/test_messages_endpoints.py::test_get_messages_empty_result_with_encoded_nonexistent_session": 2.8638507499999832,
- "src/backend/tests/unit/test_messages_endpoints.py::test_get_messages_with_non_encoded_datetime_session_id": 2.8448847079707775,
- "src/backend/tests/unit/test_messages_endpoints.py::test_get_messages_with_url_encoded_datetime_session_id": 2.8409975829999894,
- "src/backend/tests/unit/test_messages_endpoints.py::test_get_messages_with_various_encoded_characters": 2.8276033750153147,
- "src/backend/tests/unit/test_messages_endpoints.py::test_no_messages_found_with_given_session_id": 2.828868374985177,
- "src/backend/tests/unit/test_messages_endpoints.py::test_successfully_update_session_id": 2.8242660829855595,
- "src/backend/tests/unit/test_messages_endpoints.py::test_update_message": 2.9984481669962406,
- "src/backend/tests/unit/test_messages_endpoints.py::test_update_message_not_found": 2.8472529590071645,
- "src/backend/tests/unit/test_process.py::test_load_langchain_object_with_cached_session": 0.031168292014626786,
- "src/backend/tests/unit/test_process.py::test_load_langchain_object_with_no_cached_session": 2.9178847920848057,
- "src/backend/tests/unit/test_process.py::test_load_langchain_object_without_session_id": 2.8941064990358427,
- "src/backend/tests/unit/test_process.py::test_multiple_tweaks": 0.0036274170270189643,
- "src/backend/tests/unit/test_process.py::test_no_tweaks": 0.001560457021696493,
- "src/backend/tests/unit/test_process.py::test_single_tweak": 0.000916124990908429,
- "src/backend/tests/unit/test_process.py::test_tweak_no_node_id": 0.0024407509772572666,
- "src/backend/tests/unit/test_process.py::test_tweak_not_in_template": 0.004069709015311673,
- "src/backend/tests/unit/test_schema.py::TestInput::test_field_type_str": 0.0005014840000967524,
- "src/backend/tests/unit/test_schema.py::TestInput::test_field_type_type": 0.00045346399997470144,
- "src/backend/tests/unit/test_schema.py::TestInput::test_input_to_dict": 0.000525487999766483,
- "src/backend/tests/unit/test_schema.py::TestInput::test_invalid_field_type": 0.0004848519997722178,
- "src/backend/tests/unit/test_schema.py::TestInput::test_post_process_type_function": 0.0008248369999819261,
- "src/backend/tests/unit/test_schema.py::TestInput::test_serialize_field_type": 0.00044205200015312585,
- "src/backend/tests/unit/test_schema.py::TestInput::test_validate_type_class": 0.0004621709999810264,
- "src/backend/tests/unit/test_schema.py::TestInput::test_validate_type_string": 0.000455497999837462,
- "src/backend/tests/unit/test_schema.py::TestOutput::test_output_add_types": 0.0004450289998203516,
- "src/backend/tests/unit/test_schema.py::TestOutput::test_output_default": 0.00045572800013360393,
- "src/backend/tests/unit/test_schema.py::TestOutput::test_output_set_selected": 0.0011847680002574634,
- "src/backend/tests/unit/test_schema.py::TestOutput::test_output_to_dict": 0.00046446500005004054,
- "src/backend/tests/unit/test_schema.py::TestOutput::test_output_validate_display_name": 0.0004624919999969279,
- "src/backend/tests/unit/test_schema.py::TestOutput::test_output_validate_model": 0.000452873000085674,
- "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_custom_type": 0.00045844299984310055,
- "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_int_type": 0.0004145319999224739,
- "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_list_custom_type": 0.00043424900036370673,
- "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_list_int_type": 0.0004075189999639406,
- "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_union_custom_type": 0.00045543800024461234,
- "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_union_type": 0.000462280999727227,
- "src/backend/tests/unit/test_schema.py::test_schema_to_langflow_inputs": 0.0013725469998462358,
- "src/backend/tests/unit/test_schema.py::test_schema_to_langflow_inputs_invalid_type": 0.012805617999902097,
- "src/backend/tests/unit/test_session_endpoint.py::test_get_sessions_all": 2.8402235840039793,
- "src/backend/tests/unit/test_session_endpoint.py::test_get_sessions_empty_database": 2.828042126027867,
- "src/backend/tests/unit/test_session_endpoint.py::test_get_sessions_invalid_flow_id_format": 2.8313062920060474,
- "src/backend/tests/unit/test_session_endpoint.py::test_get_sessions_with_different_flow_id": 2.9602817070262972,
- "src/backend/tests/unit/test_session_endpoint.py::test_get_sessions_with_flow_id_filter": 2.933354083012091,
- "src/backend/tests/unit/test_session_endpoint.py::test_get_sessions_with_non_existent_flow_id": 2.8522146260074805,
- "src/backend/tests/unit/test_setup_superuser.py::test_create_super_user_concurrent_workers": 0.6128209169837646,
- "src/backend/tests/unit/test_setup_superuser.py::test_create_super_user_race_condition": 0.004131625028094277,
- "src/backend/tests/unit/test_setup_superuser.py::test_create_super_user_race_condition_no_user_found": 0.003451707016211003,
- "src/backend/tests/unit/test_setup_superuser.py::test_teardown_superuser_default_superuser": 0.006587874988326803,
- "src/backend/tests/unit/test_setup_superuser.py::test_teardown_superuser_no_default_superuser": 0.006629081966821104,
- "src/backend/tests/unit/test_telemetry.py::test_gauge": 0.00047058300697244704,
- "src/backend/tests/unit/test_telemetry.py::test_gauge_with_counter_method": 0.0007385009957943112,
- "src/backend/tests/unit/test_telemetry.py::test_gauge_with_historgram_method": 0.00045770700671710074,
- "src/backend/tests/unit/test_telemetry.py::test_gauge_with_up_down_counter_method": 0.00045012598275206983,
- "src/backend/tests/unit/test_telemetry.py::test_increment_counter": 0.00034808399504981935,
- "src/backend/tests/unit/test_telemetry.py::test_increment_counter_empty_label": 0.0004251670034136623,
- "src/backend/tests/unit/test_telemetry.py::test_increment_counter_missing_mandatory_label": 0.00046845898032188416,
- "src/backend/tests/unit/test_telemetry.py::test_increment_counter_unregisted_metric": 0.00044154099305160344,
- "src/backend/tests/unit/test_telemetry.py::test_init": 0.0005839579971507192,
- "src/backend/tests/unit/test_telemetry.py::test_missing_labels": 0.0003878760035149753,
- "src/backend/tests/unit/test_telemetry.py::test_multithreaded_singleton": 0.005864666978595778,
- "src/backend/tests/unit/test_telemetry.py::test_multithreaded_singleton_race_condition": 0.03667691699229181,
- "src/backend/tests/unit/test_telemetry.py::test_opentelementry_singleton": 0.00036266600363887846,
- "src/backend/tests/unit/test_template.py::test_build_template_from_function": 0.0025881669716909528,
- "src/backend/tests/unit/test_template.py::test_get_base_classes": 0.000390498957131058,
- "src/backend/tests/unit/test_template.py::test_get_default_factory": 0.0005537080287467688,
- "src/backend/tests/unit/test_user.py::test_add_user": 2.476223083009245,
- "src/backend/tests/unit/test_user.py::test_data_consistency_after_delete": 2.8224178749951534,
- "src/backend/tests/unit/test_user.py::test_data_consistency_after_update": 3.139214707975043,
- "src/backend/tests/unit/test_user.py::test_deactivated_user_cannot_access": 3.170889624976553,
- "src/backend/tests/unit/test_user.py::test_deactivated_user_cannot_login": 2.506316083978163,
- "src/backend/tests/unit/test_user.py::test_delete_user": 2.8748775009880774,
- "src/backend/tests/unit/test_user.py::test_delete_user_wrong_id": 2.832191083987709,
- "src/backend/tests/unit/test_user.py::test_inactive_user": 2.491084125038469,
- "src/backend/tests/unit/test_user.py::test_normal_user_cant_delete_user": 3.1305363750143442,
- "src/backend/tests/unit/test_user.py::test_normal_user_cant_read_all_users": 2.810784248984419,
- "src/backend/tests/unit/test_user.py::test_patch_reset_password": 3.731907749985112,
- "src/backend/tests/unit/test_user.py::test_patch_user": 2.848253250005655,
- "src/backend/tests/unit/test_user.py::test_patch_user_wrong_id": 2.823068207973847,
- "src/backend/tests/unit/test_user.py::test_read_all_users": 2.5221609589643776,
- "src/backend/tests/unit/test_user.py::test_user_waiting_for_approval": 2.485611792013515,
- "src/backend/tests/unit/test_validate_code.py::test_create_class": 0.001758833008352667,
- "src/backend/tests/unit/test_validate_code.py::test_create_class_module_import": 0.0018980010063387454,
- "src/backend/tests/unit/test_validate_code.py::test_create_class_with_external_variables_and_functions": 0.000916459015570581,
- "src/backend/tests/unit/test_validate_code.py::test_create_class_with_multiple_external_classes": 0.0010958340135402977,
- "src/backend/tests/unit/test_validate_code.py::test_create_function": 0.003927750018192455,
- "src/backend/tests/unit/test_validate_code.py::test_execute_function_missing_function": 0.00093962496612221,
- "src/backend/tests/unit/test_validate_code.py::test_execute_function_missing_module": 0.00106487498851493,
- "src/backend/tests/unit/test_validate_code.py::test_execute_function_missing_schema": 0.0011890009918715805,
- "src/backend/tests/unit/test_validate_code.py::test_execute_function_success": 0.0019466679950710386,
- "src/backend/tests/unit/test_validate_code.py::test_validate_code": 0.0026859170175157487,
- "src/backend/tests/unit/test_version.py::test_compute_main": 0.00044933301978744566,
- "src/backend/tests/unit/test_version.py::test_version": 0.00045766698895022273,
- "src/backend/tests/unit/test_voice_mode.py::test_resample_24k_to_16k_invalid_frame": 0.0002242499904241413,
- "src/backend/tests/unit/test_voice_mode.py::test_resample_24k_to_16k_valid_frame": 0.00023129198234528303,
- "src/backend/tests/unit/test_voice_mode.py::test_webrtcvad_silence_detection": 0.00021454101079143584,
- "src/backend/tests/unit/test_voice_mode.py::test_webrtcvad_with_real_data": 0.00020624900935217738,
- "src/backend/tests/unit/test_webhook.py::test_webhook_endpoint": 3.944781416998012,
- "src/backend/tests/unit/test_webhook.py::test_webhook_flow_on_run_endpoint": 18.90724924998358,
- "src/backend/tests/unit/test_webhook.py::test_webhook_with_random_payload": 2.970908582996344,
- "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol::password@host-protocol::password@host]": 0.00048704203800298274,
- "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:pa:ss:word@host-protocol:user:pa:ss:word@host]": 0.0012998749734833837,
- "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:pa@ss@word@host-protocol:user:pa%40ss%40word@host]": 0.0003912079846486449,
- "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:pass@word@host-protocol:user:pass%40word@host]": 0.005104541021864861,
- "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:password@-protocol:user:password@]": 0.0004062910156790167,
- "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:password@host-protocol:user:password@host]": 0.003633123997133225,
- "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user@host-protocol:user@host]": 0.002642792009282857,
- "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[user:password@host-user:password@host]": 0.0009857489785645157,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[-]": 0.0003908329817932099,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/\\ndocu\\nments/file.txt-/home/user/\\\\ndocu\\\\nments/file.txt]": 0.00039304199162870646,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/docu\\n\\nments/file.txt-/home/user/docu\\\\n\\\\nments/file.txt]": 0.0004015830345451832,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/docu\\nments/file.txt-/home/user/docu\\\\nments/file.txt]": 0.00043075004941783845,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/documents/\\n-/home/user/documents/\\\\n]": 0.0003684579860419035,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/documents/file.txt-/home/user/documents/file.txt]": 0.0005105010350234807,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/my-\\ndocs/special_file!.pdf-/home/user/my-\\\\ndocs/special_file!.pdf]": 0.0003592920256778598,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[C:/Users\\\\Documents/file.txt-C:/Users\\\\Documents/file.txt]": 0.00039641797775402665,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[C:\\\\Users\\\\Documents\\\\-C:\\\\Users\\\\Documents\\\\]": 0.0005557499825954437,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[C:\\\\Users\\\\Documents\\\\file.txt-C:\\\\Users\\\\Documents\\\\file.txt]": 0.00047733497922308743,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[C:\\\\Users\\\\\\nDocuments\\\\file.txt-C:\\\\Users\\\\\\\\nDocuments\\\\file.txt]": 0.00035954199847765267,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[\\\\\\\\server\\\\share\\\\file.txt-\\\\\\\\server\\\\share\\\\file.txt]": 0.00037437499850057065,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[\\n/home/user/documents/-\\\\n/home/user/documents/]": 0.00035812397254630923,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[\\n\\n\\n-\\\\n\\\\n\\\\n]": 0.00037862497265450656,
- "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path_type": 0.0003551239788066596,
- "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_convert_image_to_base64_directory": 0.002373834024183452,
- "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_convert_image_to_base64_empty_path": 0.0015134999412111938,
- "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_convert_image_to_base64_nonexistent_file": 0.0014794580056332052,
- "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_convert_image_to_base64_success": 0.0013306670589372516,
- "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_create_data_url_invalid_file": 0.006085248955059797,
- "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_create_data_url_success": 0.0014539569965563715,
- "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_create_data_url_unrecognized_extension": 0.0038709990330971777,
- "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_create_data_url_with_custom_mime": 0.0027264999807812274,
- "src/backend/tests/unit/utils/test_image_utils.py::test_convert_image_to_base64_directory": 0.000977000017883256,
- "src/backend/tests/unit/utils/test_image_utils.py::test_convert_image_to_base64_empty_path": 0.0005482490232679993,
- "src/backend/tests/unit/utils/test_image_utils.py::test_convert_image_to_base64_nonexistent_file": 0.0008852930041030049,
- "src/backend/tests/unit/utils/test_image_utils.py::test_convert_image_to_base64_success": 0.0020875409827567637,
- "src/backend/tests/unit/utils/test_image_utils.py::test_create_data_url_invalid_file": 0.00038270902587100863,
- "src/backend/tests/unit/utils/test_image_utils.py::test_create_data_url_success": 0.0011498339881654829,
- "src/backend/tests/unit/utils/test_image_utils.py::test_create_data_url_unrecognized_extension": 0.0015792080375831574,
- "src/backend/tests/unit/utils/test_image_utils.py::test_create_data_url_with_custom_mime": 0.001010959007544443,
- "src/backend/tests/unit/utils/test_image_utils.py::test_create_image_content_dict_invalid_file": 0.0005314170266501606,
- "src/backend/tests/unit/utils/test_image_utils.py::test_create_image_content_dict_success": 0.001473083975724876,
- "src/backend/tests/unit/utils/test_image_utils.py::test_create_image_content_dict_unrecognized_extension": 0.0012713739997707307,
- "src/backend/tests/unit/utils/test_image_utils.py::test_create_image_content_dict_with_custom_mime": 0.0013821249885950238,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[-expected2]": 0.00035958399530500174,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[Double escaped {{{{not_this}}}}-expected9]": 0.000439041992649436,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[Escaped {{not_a_var}}-expected7]": 0.00036954297684133053,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[Hello { name }!-expected5]": 0.0004452920111361891,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[Hello {name}! How are you {name}?-expected4]": 0.00045312498696148396,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[Hello {name}! Your score is {{4 + 5}}, age: {age}-expected10]": 0.00039762500091455877,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[Hello {name}!-expected0]": 0.0003963339840993285,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[Hi { name }, bye-expected6]": 0.00043666601413860917,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[Hi {name}, you are {age} years old-expected1]": 0.0003689589793793857,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[Mixed {{escaped}} and {real_var}-expected8]": 0.0004262500151526183,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[Nested {{obj['key']}} with {normal_var}-expected11]": 0.00038483398384414613,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[No variables here-expected3]": 0.00046533302520401776,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[Template {{user.name}} with {id} and {type}-expected12]": 0.00037270900793373585,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[\\n Multi-line with {var1}\\n and {var2} plus\\n {var3} at the end\\n -expected16]": 0.0004532910243142396,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[{single}-expected13]": 0.0003748339950107038,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[{{double}}-expected14]": 0.000429290987085551,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables[{{{}}}-expected15]": 0.0004148330190218985,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables_malformed[incomplete}-Single '}' encountered in format string]": 0.0004274160019122064,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables_malformed[{incomplete-expected '}' before end of string]": 0.0004964589898008853,
- "src/backend/tests/unit/utils/test_interface_utils.py::test_extract_input_variables_malformed[}{-Single '}' encountered in format string]": 0.0005182500171940774,
- "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[-]": 0.0003820830024778843,
- "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/\\ndocu\\nments/file.txt-/home/user/\\\\ndocu\\\\nments/file.txt]": 0.0003947099903598428,
- "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/docu\\n\\nments/file.txt-/home/user/docu\\\\n\\\\nments/file.txt]": 0.00037224998231977224,
- "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/docu\\nments/file.txt-/home/user/docu\\\\nments/file.txt]": 0.0004061250074300915,
- "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/documents/\\n-/home/user/documents/\\\\n]": 0.00039108400233089924,
- "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/documents/file.txt-/home/user/documents/file.txt]": 0.0003877500130329281,
- "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/my-\\ndocs/special_file!.pdf-/home/user/my-\\\\ndocs/special_file!.pdf]": 0.00039283299702219665,
- "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[C:\\\\Users\\\\\\nDocuments\\\\file.txt-C:\\\\Users\\\\\\\\nDocuments\\\\file.txt]": 0.0004260009736754,
- "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[\\n/home/user/documents/-\\\\n/home/user/documents/]": 0.00039933298830874264,
- "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[\\n\\n\\n-\\\\n\\\\n\\\\n]": 0.0003804159932769835,
- "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path_type": 0.00035066696000285447,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_negative_max_length": 0.00031891799881123006,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[-5-]": 0.0003885829937644303,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[12345-3-12345]": 0.00039129197830334306,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[3.141592653589793-4-3.141592653589793]": 0.0003984580107498914,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[None-5-None]": 0.00041433400474488735,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[True-2-True]": 0.00038341799518093467,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[\\u3053\\u3093\\u306b\\u3061\\u306f-3-\\u3053\\u3093\\u306b...]": 0.000406457984354347,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[a-1-a]": 0.000435041991295293,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-10-aaaaaaaaaa...]": 0.0004207089659757912,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[exact-5-exact]": 0.0004617919912561774,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[long string-7-long st...]": 0.0004058330086991191,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[short string-20-short string]": 0.0008254580025095493,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_none_max_length": 0.0003404999733902514,
- "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_zero_max_length": 0.00036212601116858423,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data0-10-expected0]": 0.0004444170044735074,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data1-5-expected1]": 0.0005127080075908452,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data2-7-expected2]": 0.00043050001841038465,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data3-8-expected3]": 0.00046049998491071165,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data4-10-expected4]": 0.00041558401426300406,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data5-10-expected5]": 0.0004132080066483468,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data6-10-expected6]": 0.0003732499899342656,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data7-5-expected7]": 0.0005550839996431023,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data8-3-expected8]": 0.0005044580320827663,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data9-10-expected9]": 0.0026743340131361037,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_default_max_length": 0.00040199997602030635,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_in_place_modification": 0.00032849900890141726,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_invalid_input": 0.0008381659863516688,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_negative_max_length": 0.0004729999927803874,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_no_modification": 0.0003431249933782965,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_small_max_length": 0.0003504999913275242,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_type_preservation": 0.0003769160248339176,
- "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_zero_max_length": 0.0003878749848809093,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[ invalid -False]": 0.0004462079668883234,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[-False]": 0.00043650000588968396,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[None-False]": 0.000378625001758337,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[invalid://:@/test-False]": 0.022100916976341978,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[invalid://database-False]": 0.10988566602463834,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[mysql+mysqldb://scott:tiger@localhost/foo-True]": 0.0005408330180216581,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[mysql+pymysql://scott:tiger@localhost/foo-True]": 0.0008243330230470747,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[mysql://user:pass@localhost/dbname-True]": 0.05775237598572858,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[not_a_url-False]": 0.0003810419875662774,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[oracle+cx_oracle://scott:tiger@tnsalias-True]": 0.000536834035301581,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[oracle+oracledb://scott:tiger@127.0.0.1:1521/?service_name=freepdb1-True]": 0.0006004080000820977,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[oracle+oracledb://scott:tiger@localhost:1521/?service_name=freepdb1-True]": 0.0004251660138834268,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[oracle://scott:tiger@127.0.0.1:1521/?service_name=freepdb1-True]": 0.03693678899981023,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[oracle://scott:tiger@localhost:1521/?service_name=freepdb1-True]": 0.050994626042665914,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[postgresql+pg8000://dbuser:kx%40jj5%2Fg@pghost10/appdb-True]": 0.0008046249859035015,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[postgresql+psycopg2://scott:tiger@localhost:5432/mydatabase-True]": 0.00037645999691449106,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[postgresql://user:pass@localhost/dbname-True]": 0.0003730829630512744,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[sqlite+aiosqlite:////var/folders/test.db-True]": 0.0004176250076852739,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[sqlite:////var/folders/test.db-True]": 0.00042062398279085755,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[sqlite:///:memory:-True]": 0.0004084570100530982,
- "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[sqlite:///test.db-True]": 0.0004563339753076434
-}
\ No newline at end of file
diff --git a/src/backend/tests/__init__.py b/src/backend/tests/__init__.py
deleted file mode 100644
index 454402681626..000000000000
--- a/src/backend/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Tests package for langflow."""
diff --git a/src/backend/tests/api_keys.py b/src/backend/tests/api_keys.py
deleted file mode 100644
index 42260f744a51..000000000000
--- a/src/backend/tests/api_keys.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import os.path
-
-# we need to import tmpdir
-
-
-def get_required_env_var(var: str) -> str:
- """Get the value of the specified environment variable.
-
- Args:
- var (str): The environment variable to get.
-
- Returns:
- str: The value of the environment variable.
-
- Raises:
- ValueError: If the environment variable is not set.
- """
- value = os.getenv(var)
- if not value:
- msg = f"Environment variable {var} is not set"
- raise ValueError(msg)
- return value
-
-
-def get_openai_api_key() -> str:
- return get_required_env_var("OPENAI_API_KEY")
-
-
-def get_astradb_application_token() -> str:
- return get_required_env_var("ASTRA_DB_APPLICATION_TOKEN")
-
-
-def get_astradb_api_endpoint() -> str:
- return get_required_env_var("ASTRA_DB_API_ENDPOINT")
diff --git a/src/backend/tests/base.py b/src/backend/tests/base.py
deleted file mode 100644
index b1234ebb66fe..000000000000
--- a/src/backend/tests/base.py
+++ /dev/null
@@ -1,167 +0,0 @@
-import asyncio
-import inspect
-from typing import Any
-from unittest.mock import Mock
-from uuid import uuid4
-
-import pytest
-from typing_extensions import TypedDict
-
-from lfx.custom.custom_component.component import Component
-from tests.constants import SUPPORTED_VERSIONS
-from tests.integration.utils import build_component_instance_for_tests
-
-
-class VersionComponentMapping(TypedDict):
- version: str
- module: str
- file_name: str
-
-
-# Sentinel value to mark undefined test cases
-DID_NOT_EXIST = object()
-
-
-class ComponentTestBase:
- @pytest.fixture(autouse=True)
- def _validate_required_fixtures(
- self,
- component_class: type[Any],
- default_kwargs: dict[str, Any],
- file_names_mapping: list[VersionComponentMapping],
- ) -> None:
- """Validate that all required fixtures are implemented."""
- # If we get here, all fixtures exist
-
- @pytest.fixture
- def component_class(self) -> type[Any]:
- """Return the component class to test."""
- msg = f"{self.__class__.__name__} must implement the component_class fixture"
- raise NotImplementedError(msg)
-
- @pytest.fixture
- def default_kwargs(self) -> dict[str, Any]:
- """Return the default kwargs for the component."""
- return {}
-
- @pytest.fixture
- def file_names_mapping(self) -> list[VersionComponentMapping]:
- """Return the file names mapping for different versions."""
- msg = f"{self.__class__.__name__} must implement the file_names_mapping fixture"
- raise NotImplementedError(msg)
-
- async def component_setup(self, component_class: type[Any], default_kwargs: dict[str, Any]) -> Component:
- mock_vertex = Mock()
- mock_vertex.id = str(uuid4())
- mock_vertex.graph = Mock()
- mock_vertex.graph.id = str(uuid4())
- mock_vertex.graph.session_id = str(uuid4())
- mock_vertex.graph.flow_id = str(uuid4())
- mock_vertex.is_output = Mock(return_value=False)
- source_code = await asyncio.to_thread(inspect.getsource, component_class)
- component_instance = component_class(_code=source_code, **default_kwargs)
- component_instance._should_process_output = Mock(return_value=False)
- component_instance._vertex = mock_vertex
- # Mock the log method to avoid tracing service context issues
- component_instance.log = Mock()
- return component_instance
-
- async def test_latest_version(self, component_class: type[Any], default_kwargs: dict[str, Any]) -> None:
- """Test that the component works with the latest version."""
- component_instance = await self.component_setup(component_class, default_kwargs)
- result = await component_instance.run()
- assert result is not None, "Component returned None for the latest version."
-
- def test_all_versions_have_a_file_name_defined(self, file_names_mapping: list[VersionComponentMapping]) -> None:
- """Ensure all supported versions have a file name defined."""
- if not file_names_mapping:
- msg = f"file_names_mapping is empty for {self.__class__.__name__}. Skipping versions test."
- pytest.skip(msg)
-
- version_mappings = {mapping["version"]: mapping for mapping in file_names_mapping}
-
- for version in SUPPORTED_VERSIONS:
- if version not in version_mappings:
- supported_versions = ", ".join(sorted(m["version"] for m in file_names_mapping))
- msg = (
- f"Version {version} not found in file_names_mapping for {self.__class__.__name__}.\n"
- f"Currently defined versions: {supported_versions}\n"
- "Please add this version to your component's file_names_mapping."
- )
- raise AssertionError(msg)
-
- mapping = version_mappings[version]
- if mapping["file_name"] is None:
- msg = (
- f"file_name is None for version {version} in {self.__class__.__name__}.\n"
- "Please provide a valid file_name in file_names_mapping or set it to DID_NOT_EXIST."
- )
- raise AssertionError(msg)
-
- if mapping["module"] is None:
- msg = (
- f"module is None for version {version} in {self.__class__.__name__}.\n"
- "Please provide a valid module name in file_names_mapping or set it to DID_NOT_EXIST."
- )
- raise AssertionError(msg)
-
- @pytest.mark.parametrize("version", SUPPORTED_VERSIONS)
- def test_component_versions(
- self,
- version: str,
- default_kwargs: dict[str, Any],
- file_names_mapping: list[VersionComponentMapping],
- ) -> None:
- """Test if the component works across different versions."""
- if not file_names_mapping:
- pytest.skip("No file names mapping defined for this component.")
- version_mappings = {mapping["version"]: mapping for mapping in file_names_mapping}
-
- mapping = version_mappings[version]
- if mapping["file_name"] is DID_NOT_EXIST:
- pytest.skip(f"Skipping version {version} as it does not have a file name defined.")
-
- try:
- instance, component_code = build_component_instance_for_tests(
- version, file_name=mapping["file_name"], module=mapping["module"], **default_kwargs
- )
- except Exception as e:
- msg = (
- f"Failed to build component instance for {self.__class__.__name__} "
- f"version {version}:\n"
- f"Module: {mapping['module']}\n"
- f"File: {mapping['file_name']}\n"
- f"Error: {e!s}"
- )
- raise AssertionError(msg) from e
-
- try:
- result = instance()
- except Exception as e:
- msg = (
- f"Failed to execute component {self.__class__.__name__} "
- f"for version {version}:\n"
- f"Module: {mapping['module']}\n"
- f"File: {mapping['file_name']}\n"
- f"Error: {e!s}\n"
- f"Component Code: {component_code}"
- )
- raise AssertionError(msg) from e
-
- if result is None:
- msg = (
- f"Component {self.__class__.__name__} returned None "
- f"for version {version}.\n"
- f"Module: {mapping['module']}\n"
- f"File: {mapping['file_name']}"
- )
- raise AssertionError(msg)
-
-
-@pytest.mark.usefixtures("client")
-class ComponentTestBaseWithClient(ComponentTestBase):
- pass
-
-
-class ComponentTestBaseWithoutClient(ComponentTestBase):
- pass
diff --git a/src/backend/tests/conftest.py b/src/backend/tests/conftest.py
deleted file mode 100644
index 0f7bc806ea62..000000000000
--- a/src/backend/tests/conftest.py
+++ /dev/null
@@ -1,729 +0,0 @@
-import asyncio
-import json
-import shutil
-
-# we need to import tmpdir
-import tempfile
-from collections.abc import AsyncGenerator
-from contextlib import suppress
-from pathlib import Path
-from uuid import UUID
-
-import anyio
-import orjson
-import pytest
-from asgi_lifespan import LifespanManager
-from blockbuster import blockbuster_ctx
-from dotenv import load_dotenv
-from fastapi.testclient import TestClient
-from httpx import ASGITransport, AsyncClient
-from langflow.initial_setup.constants import STARTER_FOLDER_NAME
-from langflow.main import create_app
-from langflow.services.auth.utils import get_password_hash
-from langflow.services.database.models.api_key.model import ApiKey
-from langflow.services.database.models.flow.model import Flow, FlowCreate
-from langflow.services.database.models.folder.model import Folder
-from langflow.services.database.models.transactions.model import TransactionTable
-from langflow.services.database.models.user.model import User, UserCreate, UserRead
-from langflow.services.database.models.vertex_builds.crud import delete_vertex_builds_by_flow_id
-from langflow.services.database.utils import session_getter
-from langflow.services.deps import get_db_service, session_scope
-from sqlalchemy.ext.asyncio import create_async_engine
-from sqlalchemy.orm import selectinload
-from sqlmodel import Session, SQLModel, create_engine, select
-from sqlmodel.ext.asyncio.session import AsyncSession
-from sqlmodel.pool import StaticPool
-from typer.testing import CliRunner
-
-from lfx.components.input_output import ChatInput
-from lfx.graph import Graph
-from lfx.log.logger import logger
-from tests.api_keys import get_openai_api_key
-
-load_dotenv()
-
-
-# TODO: Revert this to True once bb.functions[func].can_block_in("http/client.py", "_safe_read") is fixed
-@pytest.fixture(autouse=False)
-def blockbuster(request):
- if "benchmark" in request.keywords or "no_blockbuster" in request.keywords:
- yield
- else:
- with blockbuster_ctx() as bb:
- for func in [
- "io.BufferedReader.read",
- "io.BufferedWriter.write",
- "io.TextIOWrapper.read",
- "io.TextIOWrapper.write",
- "os.mkdir",
- "os.stat",
- "os.path.abspath",
- ]:
- bb.functions[func].can_block_in("settings/service.py", "initialize")
- for func in [
- "io.BufferedReader.read",
- "io.TextIOWrapper.read",
- ]:
- bb.functions[func].can_block_in("importlib_metadata/__init__.py", "metadata")
- # bb.functions[func].can_block_in("http/client.py", "_safe_read")
-
- (
- bb.functions["os.stat"]
- # TODO: make set_class_code async
- .can_block_in("langflow/custom/custom_component/component.py", "set_class_code")
- # TODO: follow discussion in https://github.com/encode/httpx/discussions/3456
- .can_block_in("httpx/_client.py", "_init_transport")
- .can_block_in("rich/traceback.py", "_render_stack")
- .can_block_in("langchain_core/_api/internal.py", "is_caller_internal")
- .can_block_in("langchain_core/runnables/utils.py", "get_function_nonlocals")
- .can_block_in("alembic/versions", "_load_revisions")
- .can_block_in("dotenv/main.py", "find_dotenv")
- .can_block_in("alembic/script/base.py", "_load_revisions")
- .can_block_in("alembic/env.py", "_do_run_migrations")
- )
-
- for func in ["os.stat", "os.path.abspath", "os.scandir", "os.listdir"]:
- bb.functions[func].can_block_in("alembic/util/pyfiles.py", "load_python_file")
- bb.functions[func].can_block_in("dotenv/main.py", "find_dotenv")
- bb.functions[func].can_block_in("pkgutil.py", "_iter_file_finder_modules")
-
- for func in ["os.path.abspath", "os.scandir"]:
- bb.functions[func].can_block_in("alembic/script/base.py", "_load_revisions")
-
- # Add os.stat to alembic/script/base.py _load_revisions
- bb.functions["os.stat"].can_block_in("alembic/script/base.py", "_load_revisions")
-
- (
- bb.functions["os.path.abspath"]
- .can_block_in("loguru/_better_exceptions.py", {"_get_lib_dirs", "_format_exception"})
- .can_block_in("sqlalchemy/dialects/sqlite/pysqlite.py", "create_connect_args")
- .can_block_in("botocore/__init__.py", "__init__")
- )
-
- bb.functions["socket.socket.connect"].can_block_in("urllib3/connection.py", "_new_conn")
- bb.functions["ssl.SSLSocket.send"].can_block_in("ssl.py", "sendall")
- bb.functions["ssl.SSLSocket.read"].can_block_in("ssl.py", "recv_into")
-
- yield bb
-
-
-def pytest_configure(config):
- config.addinivalue_line("markers", "noclient: don't create a client for this test")
- config.addinivalue_line("markers", "load_flows: load the flows for this test")
- config.addinivalue_line("markers", "api_key_required: run only if the api key is set in the environment variables")
- data_path = Path(__file__).parent.absolute() / "data"
-
- pytest.BASIC_EXAMPLE_PATH = data_path / "basic_example.json"
- pytest.COMPLEX_EXAMPLE_PATH = data_path / "complex_example.json"
- pytest.OPENAPI_EXAMPLE_PATH = data_path / "Openapi.json"
- pytest.GROUPED_CHAT_EXAMPLE_PATH = data_path / "grouped_chat.json"
- pytest.ONE_GROUPED_CHAT_EXAMPLE_PATH = data_path / "one_group_chat.json"
- pytest.VECTOR_STORE_GROUPED_EXAMPLE_PATH = data_path / "vector_store_grouped.json"
- pytest.WEBHOOK_TEST = data_path / "WebhookTest.json"
-
- pytest.BASIC_CHAT_WITH_PROMPT_AND_HISTORY = data_path / "BasicChatwithPromptandHistory.json"
- pytest.CHAT_INPUT = data_path / "ChatInputTest.json"
- pytest.TWO_OUTPUTS = data_path / "TwoOutputsTest.json"
- pytest.VECTOR_STORE_PATH = data_path / "Vector_store.json"
- pytest.SIMPLE_API_TEST = data_path / "SimpleAPITest.json"
- pytest.MEMORY_CHATBOT_NO_LLM = data_path / "MemoryChatbotNoLLM.json"
- pytest.ENV_VARIABLE_TEST = data_path / "env_variable_test.json"
- pytest.LOOP_TEST = data_path / "LoopTest.json"
- pytest.CODE_WITH_SYNTAX_ERROR = """
-def get_text():
- retun "Hello World"
- """
-
- # validate that all the paths are correct and the files exist
- for path in [
- pytest.BASIC_EXAMPLE_PATH,
- pytest.COMPLEX_EXAMPLE_PATH,
- pytest.OPENAPI_EXAMPLE_PATH,
- pytest.GROUPED_CHAT_EXAMPLE_PATH,
- pytest.ONE_GROUPED_CHAT_EXAMPLE_PATH,
- pytest.VECTOR_STORE_GROUPED_EXAMPLE_PATH,
- pytest.BASIC_CHAT_WITH_PROMPT_AND_HISTORY,
- pytest.CHAT_INPUT,
- pytest.TWO_OUTPUTS,
- pytest.VECTOR_STORE_PATH,
- pytest.MEMORY_CHATBOT_NO_LLM,
- pytest.LOOP_TEST,
- ]:
- assert path.exists(), f"File {path} does not exist. Available files: {list(data_path.iterdir())}"
-
-
-def pytest_collection_modifyitems(config, items): # noqa: ARG001
- """Automatically add markers based on test file location."""
- for item in items:
- if "tests/unit/" in str(item.fspath):
- item.add_marker(pytest.mark.unit)
- elif "tests/integration/" in str(item.fspath):
- item.add_marker(pytest.mark.integration)
- elif "tests/slow/" in str(item.fspath):
- item.add_marker(pytest.mark.slow)
-
-
-async def delete_transactions_by_flow_id(db: AsyncSession, flow_id: UUID):
- if not flow_id:
- return
- stmt = select(TransactionTable).where(TransactionTable.flow_id == flow_id)
- transactions = await db.exec(stmt)
- for transaction in transactions:
- await db.delete(transaction)
-
-
-async def _delete_transactions_and_vertex_builds(session, flows: list[Flow]):
- flow_ids = [flow.id for flow in flows]
- for flow_id in flow_ids:
- if not flow_id:
- continue
- try:
- await delete_vertex_builds_by_flow_id(session, flow_id)
- except Exception as e:
- logger.debug(f"Error deleting vertex builds for flow {flow_id}: {e}")
- try:
- await delete_transactions_by_flow_id(session, flow_id)
- except Exception as e:
- logger.debug(f"Error deleting transactions for flow {flow_id}: {e}")
-
-
-@pytest.fixture
-async def async_client() -> AsyncGenerator:
- app = create_app()
- async with AsyncClient(app=app, base_url="http://testserver", http2=True) as client:
- yield client
-
-
-@pytest.fixture(name="session")
-def session_fixture():
- engine = create_engine(
- "sqlite+pysqlite:///:memory:",
- connect_args={"check_same_thread": False},
- poolclass=StaticPool,
- )
- try:
- SQLModel.metadata.create_all(engine)
- with Session(engine) as session:
- yield session
- finally:
- SQLModel.metadata.drop_all(engine)
- engine.dispose()
-
-
-@pytest.fixture
-async def async_session():
- engine = create_async_engine("sqlite+aiosqlite://", connect_args={"check_same_thread": False}, poolclass=StaticPool)
- async with engine.begin() as conn:
- await conn.run_sync(SQLModel.metadata.create_all)
- async with AsyncSession(engine, expire_on_commit=False) as session:
- yield session
- async with engine.begin() as conn:
- await conn.run_sync(SQLModel.metadata.drop_all)
-
-
-class Config:
- broker_url = "redis://localhost:6379/0"
- result_backend = "redis://localhost:6379/0"
-
-
-@pytest.fixture(name="load_flows_dir")
-def load_flows_dir():
- with tempfile.TemporaryDirectory() as tempdir:
- yield tempdir
-
-
-@pytest.fixture(name="distributed_env")
-def _setup_env(monkeypatch):
- monkeypatch.setenv("LANGFLOW_CACHE_TYPE", "redis")
- monkeypatch.setenv("LANGFLOW_REDIS_HOST", "result_backend")
- monkeypatch.setenv("LANGFLOW_REDIS_PORT", "6379")
- monkeypatch.setenv("LANGFLOW_REDIS_DB", "0")
- monkeypatch.setenv("LANGFLOW_REDIS_EXPIRE", "3600")
- monkeypatch.setenv("LANGFLOW_REDIS_PASSWORD", "")
- monkeypatch.setenv("FLOWER_UNAUTHENTICATED_API", "True")
- monkeypatch.setenv("BROKER_URL", "redis://result_backend:6379/0")
- monkeypatch.setenv("RESULT_BACKEND", "redis://result_backend:6379/0")
- monkeypatch.setenv("C_FORCE_ROOT", "true")
-
-
-@pytest.fixture(name="distributed_client")
-def distributed_client_fixture(
- session: Session, # noqa: ARG001
- monkeypatch,
- distributed_env, # noqa: ARG001
-):
- # Here we load the .env from ../deploy/.env
- from langflow.core import celery_app
-
- db_dir = tempfile.mkdtemp()
- try:
- db_path = Path(db_dir) / "test.db"
- monkeypatch.setenv("LANGFLOW_DATABASE_URL", f"sqlite:///{db_path}")
- monkeypatch.setenv("LANGFLOW_AUTO_LOGIN", "false")
- # monkeypatch langflow.services.task.manager.USE_CELERY to True
- # monkeypatch.setattr(manager, "USE_CELERY", True)
- monkeypatch.setattr(celery_app, "celery_app", celery_app.make_celery("langflow", Config))
-
- # def get_session_override():
- # return session
-
- app = create_app()
-
- # app.dependency_overrides[get_session] = get_session_override
- with TestClient(app) as client:
- yield client
- finally:
- shutil.rmtree(db_dir) # Clean up the temporary directory
- app.dependency_overrides.clear()
- monkeypatch.undo()
-
-
-def get_graph(type_="basic"):
- """Get a graph from a json file."""
- if type_ == "basic":
- path = pytest.BASIC_EXAMPLE_PATH
- elif type_ == "complex":
- path = pytest.COMPLEX_EXAMPLE_PATH
- elif type_ == "openapi":
- path = pytest.OPENAPI_EXAMPLE_PATH
-
- with path.open(encoding="utf-8") as f:
- flow_graph = json.load(f)
- data_graph = flow_graph["data"]
- nodes = data_graph["nodes"]
- edges = data_graph["edges"]
- graph = Graph()
- graph.add_nodes_and_edges(nodes, edges)
- return graph
-
-
-@pytest.fixture
-def basic_graph_data():
- with pytest.BASIC_EXAMPLE_PATH.open(encoding="utf-8") as f:
- return json.load(f)
-
-
-@pytest.fixture
-def basic_graph():
- return get_graph()
-
-
-@pytest.fixture
-def complex_graph():
- return get_graph("complex")
-
-
-@pytest.fixture
-def openapi_graph():
- return get_graph("openapi")
-
-
-@pytest.fixture
-def json_flow():
- return pytest.BASIC_EXAMPLE_PATH.read_text(encoding="utf-8")
-
-
-@pytest.fixture
-def grouped_chat_json_flow():
- return pytest.GROUPED_CHAT_EXAMPLE_PATH.read_text(encoding="utf-8")
-
-
-@pytest.fixture
-def one_grouped_chat_json_flow():
- return pytest.ONE_GROUPED_CHAT_EXAMPLE_PATH.read_text(encoding="utf-8")
-
-
-@pytest.fixture
-def vector_store_grouped_json_flow():
- return pytest.VECTOR_STORE_GROUPED_EXAMPLE_PATH.read_text(encoding="utf-8")
-
-
-@pytest.fixture
-def json_flow_with_prompt_and_history():
- return pytest.BASIC_CHAT_WITH_PROMPT_AND_HISTORY.read_text(encoding="utf-8")
-
-
-@pytest.fixture
-def json_simple_api_test():
- return pytest.SIMPLE_API_TEST.read_text(encoding="utf-8")
-
-
-@pytest.fixture
-def json_vector_store():
- return pytest.VECTOR_STORE_PATH.read_text(encoding="utf-8")
-
-
-@pytest.fixture
-def json_webhook_test():
- return pytest.WEBHOOK_TEST.read_text(encoding="utf-8")
-
-
-@pytest.fixture
-def json_memory_chatbot_no_llm():
- return pytest.MEMORY_CHATBOT_NO_LLM.read_text(encoding="utf-8")
-
-
-@pytest.fixture
-def json_loop_test():
- return pytest.LOOP_TEST.read_text(encoding="utf-8")
-
-
-@pytest.fixture(autouse=True)
-def deactivate_tracing(monkeypatch):
- monkeypatch.setenv("LANGFLOW_DEACTIVATE_TRACING", "true")
- yield
- monkeypatch.undo()
-
-
-@pytest.fixture
-def use_noop_session(monkeypatch):
- monkeypatch.setenv("LANGFLOW_USE_NOOP_DATABASE", "1")
- # Optionally patch the Settings object if needed
- # from lfx.services.settings.base import Settings
- # monkeypatch.setattr(Settings, "use_noop_database", True)
- yield
- monkeypatch.undo()
-
-
-@pytest.fixture(name="client")
-async def client_fixture(
- session: Session, # noqa: ARG001
- monkeypatch,
- request,
- load_flows_dir,
-):
- # Set the database url to a test database
- if "noclient" in request.keywords:
- yield
- else:
-
- def init_app():
- db_dir = tempfile.mkdtemp()
- db_path = Path(db_dir) / "test.db"
- monkeypatch.setenv("LANGFLOW_DATABASE_URL", f"sqlite:///{db_path}")
- monkeypatch.setenv("LANGFLOW_AUTO_LOGIN", "false")
- if "load_flows" in request.keywords:
- shutil.copyfile(
- pytest.BASIC_EXAMPLE_PATH, Path(load_flows_dir) / "c54f9130-f2fa-4a3e-b22a-3856d946351b.json"
- )
- monkeypatch.setenv("LANGFLOW_LOAD_FLOWS_PATH", load_flows_dir)
- monkeypatch.setenv("LANGFLOW_AUTO_LOGIN", "true")
- # Clear the services cache
- from lfx.services.manager import get_service_manager
-
- get_service_manager().factories.clear()
- get_service_manager().services.clear() # Clear the services cache
- app = create_app()
- db_service = get_db_service()
- db_service.database_url = f"sqlite:///{db_path}"
- db_service.reload_engine()
- return app, db_path
-
- app, db_path = await asyncio.to_thread(init_app)
- # app.dependency_overrides[get_session] = get_session_override
- async with (
- LifespanManager(app, startup_timeout=None, shutdown_timeout=None) as manager,
- AsyncClient(transport=ASGITransport(app=manager.app), base_url="http://testserver/", http2=True) as client,
- ):
- yield client
- # app.dependency_overrides.clear()
- monkeypatch.undo()
- # clear the temp db
- with suppress(FileNotFoundError):
- await anyio.Path(db_path).unlink()
-
-
-@pytest.fixture
-def runner(tmp_path):
- env = {"LANGFLOW_DATABASE_URL": f"sqlite:///{tmp_path}/test.db"}
- return CliRunner(env=env)
-
-
-@pytest.fixture
-async def test_user(client):
- user_data = UserCreate(
- username="testuser",
- password="testpassword", # noqa: S106
- )
- response = await client.post("api/v1/users/", json=user_data.model_dump())
- assert response.status_code == 201
- user = response.json()
- yield user
- # Clean up
- await client.delete(f"/api/v1/users/{user['id']}")
-
-
-@pytest.fixture
-async def active_user(client): # noqa: ARG001
- db_manager = get_db_service()
- async with db_manager.with_session() as session:
- user = User(
- username="activeuser",
- password=get_password_hash("testpassword"),
- is_active=True,
- is_superuser=False,
- )
- stmt = select(User).where(User.username == user.username)
- if active_user := (await session.exec(stmt)).first():
- user = active_user
- else:
- session.add(user)
- await session.commit()
- await session.refresh(user)
- user = UserRead.model_validate(user, from_attributes=True)
- yield user
- # Clean up
- # Now cleanup transactions, vertex_build
- try:
- async with db_manager.with_session() as session:
- user = await session.get(User, user.id, options=[selectinload(User.flows)])
- await _delete_transactions_and_vertex_builds(session, user.flows)
- await session.commit()
- except Exception as e:
- logger.exception(f"Error deleting transactions and vertex builds for user: {e}")
-
- try:
- async with db_manager.with_session() as session:
- user = await session.get(User, user.id)
- await session.delete(user)
- await session.commit()
- except Exception as e:
- logger.exception(f"Error deleting user: {e}")
-
-
-@pytest.fixture
-async def logged_in_headers(client, active_user):
- login_data = {"username": active_user.username, "password": "testpassword"}
- response = await client.post("api/v1/login", data=login_data)
- assert response.status_code == 200
- tokens = response.json()
- a_token = tokens["access_token"]
- return {"Authorization": f"Bearer {a_token}"}
-
-
-@pytest.fixture
-async def active_super_user(client): # noqa: ARG001
- db_manager = get_db_service()
- async with db_manager.with_session() as session:
- user = User(
- username="activeuser",
- password=get_password_hash("testpassword"),
- is_active=True,
- is_superuser=True,
- )
- stmt = select(User).where(User.username == user.username)
- if active_user := (await session.exec(stmt)).first():
- user = active_user
- else:
- session.add(user)
- await session.commit()
- await session.refresh(user)
- user = UserRead.model_validate(user, from_attributes=True)
- yield user
- # Clean up
- # Now cleanup transactions, vertex_build
- async with db_manager.with_session() as session:
- user = await session.get(User, user.id, options=[selectinload(User.flows)])
- await _delete_transactions_and_vertex_builds(session, user.flows)
- await session.delete(user)
-
- await session.commit()
-
-
-@pytest.fixture
-async def logged_in_headers_super_user(client, active_super_user):
- login_data = {"username": active_super_user.username, "password": "testpassword"}
- response = await client.post("api/v1/login", data=login_data)
- assert response.status_code == 200
- tokens = response.json()
- a_token = tokens["access_token"]
- return {"Authorization": f"Bearer {a_token}"}
-
-
-@pytest.fixture
-async def flow(
- client, # noqa: ARG001
- json_flow: str,
- active_user,
-):
- loaded_json = json.loads(json_flow)
- flow_data = FlowCreate(name="test_flow", data=loaded_json.get("data"), user_id=active_user.id)
-
- flow = Flow.model_validate(flow_data)
- async with session_getter(get_db_service()) as session:
- session.add(flow)
- await session.commit()
- await session.refresh(flow)
- yield flow
- # Clean up
- await session.delete(flow)
- await session.commit()
-
-
-@pytest.fixture
-def json_chat_input():
- return pytest.CHAT_INPUT.read_text(encoding="utf-8")
-
-
-@pytest.fixture
-def json_two_outputs():
- return pytest.TWO_OUTPUTS.read_text(encoding="utf-8")
-
-
-@pytest.fixture
-async def added_flow_webhook_test(client, json_webhook_test, logged_in_headers):
- flow = orjson.loads(json_webhook_test)
- data = flow["data"]
- flow = FlowCreate(name="Basic Chat", description="description", data=data)
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- assert response.json()["name"] == flow.name
- assert response.json()["data"] == flow.data
- yield response.json()
- await client.delete(f"api/v1/flows/{response.json()['id']}", headers=logged_in_headers)
-
-
-@pytest.fixture
-async def added_flow_chat_input(client, json_chat_input, logged_in_headers):
- flow = orjson.loads(json_chat_input)
- data = flow["data"]
- flow = FlowCreate(name="Chat Input", description="description", data=data)
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- assert response.json()["name"] == flow.name
- assert response.json()["data"] == flow.data
- yield response.json()
- await client.delete(f"api/v1/flows/{response.json()['id']}", headers=logged_in_headers)
-
-
-@pytest.fixture
-async def added_flow_two_outputs(client, json_two_outputs, logged_in_headers):
- flow = orjson.loads(json_two_outputs)
- data = flow["data"]
- flow = FlowCreate(name="Two Outputs", description="description", data=data)
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- assert response.json()["name"] == flow.name
- assert response.json()["data"] == flow.data
- yield response.json()
- await client.delete(f"api/v1/flows/{response.json()['id']}", headers=logged_in_headers)
-
-
-@pytest.fixture
-async def added_vector_store(client, json_vector_store, logged_in_headers):
- vector_store = orjson.loads(json_vector_store)
- data = vector_store["data"]
- vector_store = FlowCreate(name="Vector Store", description="description", data=data)
- response = await client.post("api/v1/flows/", json=vector_store.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- assert response.json()["name"] == vector_store.name
- assert response.json()["data"] == vector_store.data
- yield response.json()
- await client.delete(f"api/v1/flows/{response.json()['id']}", headers=logged_in_headers)
-
-
-@pytest.fixture
-async def added_webhook_test(client, json_webhook_test, logged_in_headers):
- webhook_test = orjson.loads(json_webhook_test)
- data = webhook_test["data"]
- webhook_test = FlowCreate(
- name="Webhook Test", description="description", data=data, endpoint_name=webhook_test["endpoint_name"]
- )
- response = await client.post("api/v1/flows/", json=webhook_test.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- assert response.json()["name"] == webhook_test.name
- assert response.json()["data"] == webhook_test.data
- yield response.json()
- await client.delete(f"api/v1/flows/{response.json()['id']}", headers=logged_in_headers)
-
-
-@pytest.fixture
-async def flow_component(client: AsyncClient, logged_in_headers):
- chat_input = ChatInput()
- graph = Graph(start=chat_input, end=chat_input)
- graph_dict = graph.dump(name="Chat Input Component")
- flow = FlowCreate(**graph_dict)
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- yield response.json()
- await client.delete(f"api/v1/flows/{response.json()['id']}", headers=logged_in_headers)
-
-
-@pytest.fixture
-async def created_api_key(active_user):
- hashed = get_password_hash("random_key")
- api_key = ApiKey(
- name="test_api_key",
- user_id=active_user.id,
- api_key="random_key",
- hashed_api_key=hashed,
- )
- db_manager = get_db_service()
- async with session_getter(db_manager) as session:
- stmt = select(ApiKey).where(ApiKey.api_key == api_key.api_key)
- if existing_api_key := (await session.exec(stmt)).first():
- yield existing_api_key
- return
- session.add(api_key)
- await session.commit()
- await session.refresh(api_key)
- yield api_key
- # Clean up
- await session.delete(api_key)
- await session.commit()
-
-
-@pytest.fixture(name="simple_api_test")
-async def get_simple_api_test(client, logged_in_headers, json_simple_api_test):
- # Once the client is created, we can get the starter project
- # Just create a new flow with the simple api test
- flow = orjson.loads(json_simple_api_test)
- data = flow["data"]
- flow = FlowCreate(name="Simple API Test", data=data, description="Simple API Test")
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- yield response.json()
- await client.delete(f"api/v1/flows/{response.json()['id']}", headers=logged_in_headers)
-
-
-@pytest.fixture(name="starter_project")
-async def get_starter_project(client, active_user): # noqa: ARG001
- # once the client is created, we can get the starter project
- async with session_scope() as session:
- stmt = (
- select(Flow)
- .where(Flow.folder.has(Folder.name == STARTER_FOLDER_NAME))
- .where(Flow.name == "Basic Prompting")
- )
- flow = (await session.exec(stmt)).first()
- if not flow:
- msg = "No starter project found"
- raise ValueError(msg)
-
- # ensure openai api key is set
- openai_api_key = get_openai_api_key()
- data_as_json = json.dumps(flow.data)
- data_as_json = data_as_json.replace("OPENAI_API_KEY", openai_api_key)
- # also replace `"load_from_db": true` with `"load_from_db": false`
- if '"load_from_db": true' in data_as_json:
- data_as_json = data_as_json.replace('"load_from_db": true', '"load_from_db": false')
- if '"load_from_db": true' in data_as_json:
- msg = "load_from_db should be false"
- raise ValueError(msg)
- flow.data = json.loads(data_as_json)
-
- new_flow_create = FlowCreate(
- name=flow.name,
- description=flow.description,
- data=flow.data,
- user_id=active_user.id,
- )
- new_flow = Flow.model_validate(new_flow_create, from_attributes=True)
- session.add(new_flow)
- await session.commit()
- await session.refresh(new_flow)
- new_flow_dict = new_flow.model_dump()
- yield new_flow_dict
- # Clean up
- await session.delete(new_flow)
- await session.commit()
diff --git a/src/backend/tests/constants.py b/src/backend/tests/constants.py
deleted file mode 100644
index 901798948324..000000000000
--- a/src/backend/tests/constants.py
+++ /dev/null
@@ -1 +0,0 @@
-SUPPORTED_VERSIONS = ["1.0.19", "1.1.0", "1.1.1"]
diff --git a/src/backend/tests/data/LoopTest.json b/src/backend/tests/data/LoopTest.json
deleted file mode 100644
index ca947eb968c1..000000000000
--- a/src/backend/tests/data/LoopTest.json
+++ /dev/null
@@ -1,1121 +0,0 @@
-{
- "data": {
- "edges": [
- {
- "animated": false,
- "className": "",
- "data": {
- "sourceHandle": {
- "dataType": "CustomComponent",
- "id": "CustomComponent-y0t72",
- "name": "output",
- "output_types": [
- "Data"
- ]
- },
- "targetHandle": {
- "fieldName": "data",
- "id": "LoopComponent-PTNzd",
- "inputTypes": [
- "Data"
- ],
- "type": "other"
- }
- },
- "id": "reactflow__edge-CustomComponent-y0t72{œdataTypeœ:œCustomComponentœ,œidœ:œCustomComponent-y0t72œ,œnameœ:œoutputœ,œoutput_typesœ:[œDataœ]}-LoopComponent-PTNzd{œfieldNameœ:œdataœ,œidœ:œLoopComponent-PTNzdœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}",
- "selected": false,
- "source": "CustomComponent-y0t72",
- "sourceHandle": "{œdataTypeœ:œCustomComponentœ,œidœ:œCustomComponent-y0t72œ,œnameœ:œoutputœ,œoutput_typesœ:[œDataœ]}",
- "target": "LoopComponent-PTNzd",
- "targetHandle": "{œfieldNameœ:œdataœ,œidœ:œLoopComponent-PTNzdœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}"
- },
- {
- "animated": false,
- "className": "",
- "data": {
- "sourceHandle": {
- "dataType": "MessagetoData",
- "id": "MessagetoData-8O7uJ",
- "name": "data",
- "output_types": [
- "Data"
- ]
- },
- "targetHandle": {
- "dataType": "LoopComponent",
- "id": "LoopComponent-PTNzd",
- "name": "item",
- "output_types": [
- "Data"
- ]
- }
- },
- "id": "reactflow__edge-MessagetoData-8O7uJ{œdataTypeœ:œMessagetoDataœ,œidœ:œMessagetoData-8O7uJœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-LoopComponent-PTNzd{œdataTypeœ:œLoopComponentœ,œidœ:œLoopComponent-PTNzdœ,œnameœ:œitemœ,œoutput_typesœ:[œDataœ]}",
- "selected": false,
- "source": "MessagetoData-8O7uJ",
- "sourceHandle": "{œdataTypeœ:œMessagetoDataœ,œidœ:œMessagetoData-8O7uJœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}",
- "target": "LoopComponent-PTNzd",
- "targetHandle": "{œdataTypeœ:œLoopComponentœ,œidœ:œLoopComponent-PTNzdœ,œnameœ:œitemœ,œoutput_typesœ:[œDataœ]}"
- },
- {
- "animated": false,
- "className": "",
- "data": {
- "sourceHandle": {
- "dataType": "LoopComponent",
- "id": "LoopComponent-PTNzd",
- "name": "item",
- "output_types": [
- "Data"
- ]
- },
- "targetHandle": {
- "fieldName": "data",
- "id": "ParseData-qyLj8",
- "inputTypes": [
- "Data"
- ],
- "type": "other"
- }
- },
- "id": "reactflow__edge-LoopComponent-PTNzd{œdataTypeœ:œLoopComponentœ,œidœ:œLoopComponent-PTNzdœ,œnameœ:œitemœ,œoutput_typesœ:[œDataœ]}-ParseData-qyLj8{œfieldNameœ:œdataœ,œidœ:œParseData-qyLj8œ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}",
- "selected": false,
- "source": "LoopComponent-PTNzd",
- "sourceHandle": "{œdataTypeœ:œLoopComponentœ,œidœ:œLoopComponent-PTNzdœ,œnameœ:œitemœ,œoutput_typesœ:[œDataœ]}",
- "target": "ParseData-qyLj8",
- "targetHandle": "{œfieldNameœ:œdataœ,œidœ:œParseData-qyLj8œ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}"
- },
- {
- "animated": false,
- "className": "",
- "data": {
- "sourceHandle": {
- "dataType": "ParseData",
- "id": "ParseData-qyLj8",
- "name": "text",
- "output_types": [
- "Message"
- ]
- },
- "targetHandle": {
- "fieldName": "message",
- "id": "MessagetoData-8O7uJ",
- "inputTypes": [
- "Message"
- ],
- "type": "str"
- }
- },
- "id": "reactflow__edge-ParseData-qyLj8{œdataTypeœ:œParseDataœ,œidœ:œParseData-qyLj8œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-MessagetoData-8O7uJ{œfieldNameœ:œmessageœ,œidœ:œMessagetoData-8O7uJœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
- "selected": false,
- "source": "ParseData-qyLj8",
- "sourceHandle": "{œdataTypeœ:œParseDataœ,œidœ:œParseData-qyLj8œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
- "target": "MessagetoData-8O7uJ",
- "targetHandle": "{œfieldNameœ:œmessageœ,œidœ:œMessagetoData-8O7uJœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}"
- },
- {
- "animated": false,
- "className": "",
- "data": {
- "sourceHandle": {
- "dataType": "CustomComponent",
- "id": "CustomComponent-y0t72",
- "name": "output",
- "output_types": [
- "Data"
- ]
- },
- "targetHandle": {
- "fieldName": "list2",
- "id": "MyZipper-xVGrn",
- "inputTypes": [
- "Data"
- ],
- "type": "other"
- }
- },
- "id": "reactflow__edge-CustomComponent-y0t72{œdataTypeœ:œCustomComponentœ,œidœ:œCustomComponent-y0t72œ,œnameœ:œoutputœ,œoutput_typesœ:[œDataœ]}-MyZipper-xVGrn{œfieldNameœ:œlist2œ,œidœ:œMyZipper-xVGrnœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}",
- "selected": false,
- "source": "CustomComponent-y0t72",
- "sourceHandle": "{œdataTypeœ:œCustomComponentœ,œidœ:œCustomComponent-y0t72œ,œnameœ:œoutputœ,œoutput_typesœ:[œDataœ]}",
- "target": "MyZipper-xVGrn",
- "targetHandle": "{œfieldNameœ:œlist2œ,œidœ:œMyZipper-xVGrnœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}"
- },
- {
- "animated": false,
- "className": "",
- "data": {
- "sourceHandle": {
- "dataType": "LoopComponent",
- "id": "LoopComponent-PTNzd",
- "name": "done",
- "output_types": [
- "Data"
- ]
- },
- "targetHandle": {
- "fieldName": "list1",
- "id": "MyZipper-xVGrn",
- "inputTypes": [
- "Data"
- ],
- "type": "other"
- }
- },
- "id": "reactflow__edge-LoopComponent-PTNzd{œdataTypeœ:œLoopComponentœ,œidœ:œLoopComponent-PTNzdœ,œnameœ:œdoneœ,œoutput_typesœ:[œDataœ]}-MyZipper-xVGrn{œfieldNameœ:œlist1œ,œidœ:œMyZipper-xVGrnœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}",
- "selected": false,
- "source": "LoopComponent-PTNzd",
- "sourceHandle": "{œdataTypeœ:œLoopComponentœ,œidœ:œLoopComponent-PTNzdœ,œnameœ:œdoneœ,œoutput_typesœ:[œDataœ]}",
- "target": "MyZipper-xVGrn",
- "targetHandle": "{œfieldNameœ:œlist1œ,œidœ:œMyZipper-xVGrnœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}"
- },
- {
- "animated": false,
- "className": "",
- "data": {
- "sourceHandle": {
- "dataType": "MyZipper",
- "id": "MyZipper-xVGrn",
- "name": "output",
- "output_types": [
- "Message"
- ]
- },
- "targetHandle": {
- "fieldName": "input_value",
- "id": "ChatOutput-tF7vz",
- "inputTypes": [
- "Data",
- "DataFrame",
- "Message"
- ],
- "type": "other"
- }
- },
- "id": "xy-edge__MyZipper-xVGrn{œdataTypeœ:œMyZipperœ,œidœ:œMyZipper-xVGrnœ,œnameœ:œoutputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-tF7vz{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-tF7vzœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}",
- "selected": false,
- "source": "MyZipper-xVGrn",
- "sourceHandle": "{œdataTypeœ:œMyZipperœ,œidœ:œMyZipper-xVGrnœ,œnameœ:œoutputœ,œoutput_typesœ:[œMessageœ]}",
- "target": "ChatOutput-tF7vz",
- "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-tF7vzœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}"
- }
- ],
- "nodes": [
- {
- "data": {
- "id": "MyZipper-xVGrn",
- "node": {
- "base_classes": [
- "Message"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Use as a template to create your own component.",
- "display_name": "C MyZipper",
- "documentation": "https://docs.langflow.org/components-custom-components",
- "edited": true,
- "field_order": [
- "list1",
- "list2"
- ],
- "frozen": false,
- "icon": "code",
- "legacy": false,
- "lf_version": "1.4.1",
- "metadata": {},
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Output",
- "hidden": false,
- "method": "build_output",
- "name": "output",
- "required_inputs": null,
- "selected": "Message",
- "tool_mode": true,
- "types": [
- "Message"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "# from lfx.field_typing import Data\nfrom lfx.custom import Component\nfrom lfx.io import MessageTextInput, Output\nfrom lfx.schema import Message\nfrom fastapi.encoders import jsonable_encoder\n\nclass CustomComponent(Component):\n display_name = \"C MyZipper\"\n description = \"Use as a template to create your own component.\"\n documentation: str = \"https://docs.langflow.org/components-custom-components\"\n icon = \"code\"\n name = \"MyZipper\"\n\n inputs = [\n DataInput(\n name=\"list1\",\n display_name=\"List One\",\n is_list=True,\n required=True,\n ),\n DataInput(\n name=\"list2\",\n display_name=\"List Two\",\n is_list=True,\n required=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Output\", name=\"output\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Message:\n list1 = self.list1\n list2 = self.list2\n lists = list(zip(list1, list2))\n self.status = lists\n msg = Message(text=json.dumps(jsonable_encoder(lists)))\n return msg\n"
- },
- "list1": {
- "_input_type": "DataInput",
- "advanced": false,
- "display_name": "List One",
- "dynamic": false,
- "info": "",
- "input_types": [
- "Data"
- ],
- "list": true,
- "list_add_label": "Add More",
- "name": "list1",
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "other",
- "value": ""
- },
- "list2": {
- "_input_type": "DataInput",
- "advanced": false,
- "display_name": "List Two",
- "dynamic": false,
- "info": "",
- "input_types": [
- "Data"
- ],
- "list": true,
- "list_add_label": "Add More",
- "name": "list2",
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "other",
- "value": ""
- }
- },
- "tool_mode": false
- },
- "showNode": true,
- "type": "MyZipper"
- },
- "id": "MyZipper-xVGrn",
- "measured": {
- "height": 256,
- "width": 320
- },
- "position": {
- "x": 1273.5574899204412,
- "y": 939.9104384225966
- },
- "selected": false,
- "type": "genericNode"
- },
- {
- "data": {
- "id": "CustomComponent-y0t72",
- "node": {
- "base_classes": [
- "Data"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Use as a template to create your own component.",
- "display_name": "C SequenceMaker",
- "documentation": "https://docs.langflow.org/components-custom-components",
- "edited": true,
- "field_order": [],
- "frozen": false,
- "icon": "code",
- "legacy": false,
- "lf_version": "1.4.1",
- "metadata": {},
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Output",
- "hidden": false,
- "method": "build_output",
- "name": "output",
- "required_inputs": null,
- "selected": "Data",
- "tool_mode": true,
- "types": [
- "Data"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "# from lfx.field_typing import Data\nfrom lfx.custom import Component\nfrom lfx.io import MessageTextInput, Output\nfrom lfx.schema import Data\n\n\nclass CustomComponent(Component):\n display_name = \"C SequenceMaker\"\n description = \"Use as a template to create your own component.\"\n documentation: str = \"https://docs.langflow.org/components-custom-components\"\n icon = \"code\"\n name = \"CustomComponent\"\n\n outputs = [\n Output(display_name=\"Output\", name=\"output\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Data:\n return [Data(q=i) for i in range(10)]\n"
- }
- },
- "tool_mode": false
- },
- "showNode": true,
- "type": "CustomComponent"
- },
- "id": "CustomComponent-y0t72",
- "measured": {
- "height": 167,
- "width": 320
- },
- "position": {
- "x": 197,
- "y": 979.6063779114629
- },
- "selected": false,
- "type": "genericNode"
- },
- {
- "data": {
- "description": "Iterates over a list of Data objects, outputting one item at a time and aggregating results from loop inputs.",
- "display_name": "Loop",
- "id": "LoopComponent-PTNzd",
- "node": {
- "base_classes": [
- "Data"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Iterates over a list of Data objects, outputting one item at a time and aggregating results from loop inputs.",
- "display_name": "Loop",
- "documentation": "",
- "edited": false,
- "field_order": [
- "data"
- ],
- "frozen": false,
- "icon": "infinity",
- "legacy": false,
- "lf_version": "1.4.1",
- "metadata": {},
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": true,
- "cache": true,
- "display_name": "Item",
- "hidden": false,
- "method": "item_output",
- "name": "item",
- "options": null,
- "required_inputs": null,
- "selected": "Data",
- "tool_mode": true,
- "types": [
- "Data"
- ],
- "value": "__UNDEFINED__"
- },
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Done",
- "hidden": false,
- "method": "done_output",
- "name": "done",
- "options": null,
- "required_inputs": null,
- "selected": "Data",
- "tool_mode": true,
- "types": [
- "Data"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "from lfx.custom import Component\nfrom lfx.io import DataInput, Output\nfrom lfx.schema import Data\n\n\nclass LoopComponent(Component):\n display_name = \"Loop\"\n description = (\n \"Iterates over a list of Data objects, outputting one item at a time and aggregating results from loop inputs.\"\n )\n icon = \"infinity\"\n\n inputs = [\n DataInput(\n name=\"data\",\n display_name=\"Data\",\n info=\"The initial list of Data objects to iterate over.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Item\", name=\"item\", method=\"item_output\", allows_loop=True),\n Output(display_name=\"Done\", name=\"done\", method=\"done_output\"),\n ]\n\n def initialize_data(self) -> None:\n \"\"\"Initialize the data list, context index, and aggregated list.\"\"\"\n if self.ctx.get(f\"{self._id}_initialized\", False):\n return\n\n # Ensure data is a list of Data objects\n data_list = self._validate_data(self.data)\n\n # Store the initial data and context variables\n self.update_ctx(\n {\n f\"{self._id}_data\": data_list,\n f\"{self._id}_index\": 0,\n f\"{self._id}_aggregated\": [],\n f\"{self._id}_initialized\": True,\n }\n )\n\n def _validate_data(self, data):\n \"\"\"Validate and return a list of Data objects.\"\"\"\n if isinstance(data, Data):\n return [data]\n if isinstance(data, list) and all(isinstance(item, Data) for item in data):\n return data\n msg = \"The 'data' input must be a list of Data objects or a single Data object.\"\n raise TypeError(msg)\n\n def evaluate_stop_loop(self) -> bool:\n \"\"\"Evaluate whether to stop item or done output.\"\"\"\n current_index = self.ctx.get(f\"{self._id}_index\", 0)\n data_length = len(self.ctx.get(f\"{self._id}_data\", []))\n return current_index > data_length\n\n def item_output(self) -> Data:\n \"\"\"Output the next item in the list or stop if done.\"\"\"\n self.initialize_data()\n current_item = Data(text=\"\")\n\n if self.evaluate_stop_loop():\n self.stop(\"item\")\n else:\n # Get data list and current index\n data_list, current_index = self.loop_variables()\n if current_index < len(data_list):\n # Output current item and increment index\n try:\n current_item = data_list[current_index]\n except IndexError:\n current_item = Data(text=\"\")\n self.aggregated_output()\n self.update_ctx({f\"{self._id}_index\": current_index + 1})\n\n # Now we need to update the dependencies for the next run\n return current_item\n\n def update_dependency(self):\n item_dependency_id = self.get_incoming_edge_by_target_param(\"item\")\n\n self.graph.run_manager.run_predecessors[self._id].append(item_dependency_id)\n\n def done_output(self) -> Data:\n \"\"\"Trigger the done output when iteration is complete.\"\"\"\n self.initialize_data()\n\n if self.evaluate_stop_loop():\n self.stop(\"item\")\n self.start(\"done\")\n\n return self.ctx.get(f\"{self._id}_aggregated\", [])\n self.stop(\"done\")\n return Data(text=\"\")\n\n def loop_variables(self):\n \"\"\"Retrieve loop variables from context.\"\"\"\n return (\n self.ctx.get(f\"{self._id}_data\", []),\n self.ctx.get(f\"{self._id}_index\", 0),\n )\n\n def aggregated_output(self) -> Data:\n \"\"\"Return the aggregated list once all items are processed.\"\"\"\n self.initialize_data()\n\n # Get data list and aggregated list\n data_list = self.ctx.get(f\"{self._id}_data\", [])\n aggregated = self.ctx.get(f\"{self._id}_aggregated\", [])\n\n # Check if loop input is provided and append to aggregated list\n if self.item is not None and not isinstance(self.item, str) and len(aggregated) <= len(data_list):\n aggregated.append(self.item)\n self.update_ctx({f\"{self._id}_aggregated\": aggregated})\n return aggregated\n"
- },
- "data": {
- "_input_type": "DataInput",
- "advanced": false,
- "display_name": "Data",
- "dynamic": false,
- "info": "The initial list of Data objects to iterate over.",
- "input_types": [
- "Data"
- ],
- "list": false,
- "list_add_label": "Add More",
- "name": "data",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "other",
- "value": ""
- }
- },
- "tool_mode": false
- },
- "showNode": true,
- "type": "LoopComponent"
- },
- "id": "LoopComponent-PTNzd",
- "measured": {
- "height": 280,
- "width": 320
- },
- "position": {
- "x": 585.4137083070362,
- "y": 505.0807090732918
- },
- "selected": false,
- "type": "genericNode"
- },
- {
- "data": {
- "id": "MessagetoData-8O7uJ",
- "node": {
- "base_classes": [
- "Data"
- ],
- "beta": true,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Convert a Message object to a Data object",
- "display_name": "Message to Data",
- "documentation": "",
- "edited": false,
- "field_order": [
- "message"
- ],
- "frozen": false,
- "icon": "message-square-share",
- "legacy": false,
- "lf_version": "1.4.1",
- "metadata": {},
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Data",
- "hidden": false,
- "method": "convert_message_to_data",
- "name": "data",
- "selected": "Data",
- "tool_mode": true,
- "types": [
- "Data"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "from lfx.log.logger import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import MessageInput, Output\nfrom lfx.schema import Data\nfrom lfx.schema.message import Message\n\n\nclass MessageToDataComponent(Component):\n display_name = \"Message to Data\"\n description = \"Convert a Message object to a Data object\"\n icon = \"message-square-share\"\n beta = True\n name = \"MessagetoData\"\n\n inputs = [\n MessageInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The Message object to convert to a Data object\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"convert_message_to_data\"),\n ]\n\n def convert_message_to_data(self) -> Data:\n if isinstance(self.message, Message):\n # Convert Message to Data\n return Data(data=self.message.data)\n\n msg = \"Error converting Message to Data: Input must be a Message object\"\n logger.debug(msg, exc_info=True)\n self.status = msg\n return Data(data={\"error\": msg})\n"
- },
- "message": {
- "_input_type": "MessageInput",
- "advanced": false,
- "display_name": "Message",
- "dynamic": false,
- "info": "The Message object to convert to a Data object",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "message",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- }
- },
- "tool_mode": false
- },
- "showNode": true,
- "type": "MessagetoData"
- },
- "id": "MessagetoData-8O7uJ",
- "measured": {
- "height": 230,
- "width": 320
- },
- "position": {
- "x": 1343.3046986106053,
- "y": 472.9775668087468
- },
- "selected": false,
- "type": "genericNode"
- },
- {
- "data": {
- "description": "Convert Data objects into Messages using any {field_name} from input data.",
- "display_name": "Data to Message",
- "id": "ParseData-qyLj8",
- "node": {
- "base_classes": [
- "Data",
- "Message"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Convert Data objects into Messages using any {field_name} from input data.",
- "display_name": "Data to Message",
- "documentation": "",
- "edited": false,
- "field_order": [
- "data",
- "template",
- "sep"
- ],
- "frozen": false,
- "icon": "message-square",
- "legacy": true,
- "lf_version": "1.4.1",
- "metadata": {
- "legacy_name": "Parse Data"
- },
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Message",
- "hidden": false,
- "method": "parse_data",
- "name": "text",
- "options": null,
- "required_inputs": null,
- "selected": "Message",
- "tool_mode": true,
- "types": [
- "Message"
- ],
- "value": "__UNDEFINED__"
- },
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Data List",
- "method": "parse_data_as_list",
- "name": "data_list",
- "options": null,
- "required_inputs": null,
- "selected": "Data",
- "tool_mode": true,
- "types": [
- "Data"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "from lfx.custom import Component\nfrom lfx.helpers.data import data_to_text, data_to_text_list\nfrom lfx.io import DataInput, MultilineInput, Output, StrInput\nfrom lfx.schema import Data\nfrom lfx.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Data to Message\"\n description = \"Convert Data objects into Messages using any {field_name} from input data.\"\n icon = \"message-square\"\n name = \"ParseData\"\n legacy = True\n metadata = {\n \"legacy_name\": \"Parse Data\",\n }\n\n inputs = [\n DataInput(\n name=\"data\",\n display_name=\"Data\",\n info=\"The data to convert to text.\",\n is_list=True,\n required=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n required=True,\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"text\",\n info=\"Data as a single Message, with each input Data separated by Separator\",\n method=\"parse_data\",\n ),\n Output(\n display_name=\"Data List\",\n name=\"data_list\",\n info=\"Data as a list of new Data, each having `text` formatted by Template\",\n method=\"parse_data_as_list\",\n ),\n ]\n\n def _clean_args(self) -> tuple[list[Data], str, str]:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n sep = self.sep\n return data, template, sep\n\n def parse_data(self) -> Message:\n data, template, sep = self._clean_args()\n result_string = data_to_text(template, data, sep)\n self.status = result_string\n return Message(text=result_string)\n\n def parse_data_as_list(self) -> list[Data]:\n data, template, _ = self._clean_args()\n text_list, data_list = data_to_text_list(template, data)\n for item, text in zip(data_list, text_list, strict=True):\n item.set_text(text)\n self.status = data_list\n return data_list\n"
- },
- "data": {
- "_input_type": "DataInput",
- "advanced": false,
- "display_name": "Data",
- "dynamic": false,
- "info": "The data to convert to text.",
- "input_types": [
- "Data"
- ],
- "list": true,
- "list_add_label": "Add More",
- "name": "data",
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "other",
- "value": ""
- },
- "sep": {
- "_input_type": "StrInput",
- "advanced": true,
- "display_name": "Separator",
- "dynamic": false,
- "info": "",
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "sep",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "\n"
- },
- "template": {
- "_input_type": "MultilineInput",
- "advanced": false,
- "copy_field": false,
- "display_name": "Template",
- "dynamic": false,
- "info": "The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "multiline": true,
- "name": "template",
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": "THIS IS Q ==> {q}"
- }
- },
- "tool_mode": false
- },
- "showNode": true,
- "type": "ParseData"
- },
- "id": "ParseData-qyLj8",
- "measured": {
- "height": 342,
- "width": 320
- },
- "position": {
- "x": 991.9841408151478,
- "y": 418
- },
- "selected": false,
- "type": "genericNode"
- },
- {
- "data": {
- "id": "ChatOutput-tF7vz",
- "node": {
- "base_classes": [
- "Message"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Display a chat message in the Playground.",
- "display_name": "Chat Output",
- "documentation": "",
- "edited": false,
- "field_order": [
- "input_value",
- "should_store_message",
- "sender",
- "sender_name",
- "session_id",
- "data_template",
- "background_color",
- "chat_icon",
- "text_color",
- "clean_data"
- ],
- "frozen": false,
- "icon": "MessagesSquare",
- "legacy": false,
- "lf_version": "1.4.1",
- "metadata": {},
- "minimized": true,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Message",
- "method": "message_response",
- "name": "message",
- "selected": "Message",
- "tool_mode": true,
- "types": [
- "Message"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "background_color": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Background Color",
- "dynamic": false,
- "info": "The background color of the icon.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "background_color",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "chat_icon": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Icon",
- "dynamic": false,
- "info": "The icon of the message.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "chat_icon",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "clean_data": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Basic Clean Data",
- "dynamic": false,
- "info": "Whether to clean the data",
- "list": false,
- "list_add_label": "Add More",
- "name": "clean_data",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": true
- },
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs import BoolInput\nfrom lfx.inputs.inputs import HandleInput\nfrom lfx.io import DropdownInput, MessageTextInput, Output\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def _safe_convert(self, data: Any) -> str:\n \"\"\"Safely convert input data to string.\"\"\"\n try:\n if isinstance(data, str):\n return data\n if isinstance(data, Message):\n return data.get_text()\n if isinstance(data, Data):\n if data.get_text() is None:\n msg = \"Empty Data object\"\n raise ValueError(msg)\n return data.get_text()\n if isinstance(data, DataFrame):\n if self.clean_data:\n # Remove empty rows\n data = data.dropna(how=\"all\")\n # Remove empty lines in each cell\n data = data.replace(r\"^\\s*$\", \"\", regex=True)\n # Replace multiple newlines with a single newline\n data = data.replace(r\"\\n+\", \"\\n\", regex=True)\n\n # Replace pipe characters to avoid markdown table issues\n processed_data = data.replace(r\"\\|\", r\"\\\\|\", regex=True)\n\n processed_data = processed_data.map(\n lambda x: str(x).replace(\"\\n\", \"
\") if isinstance(x, str) else x\n )\n\n return processed_data.to_markdown(index=False)\n return str(data)\n except (ValueError, TypeError, AttributeError) as e:\n msg = f\"Error converting data: {e!s}\"\n raise ValueError(msg) from e\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([self._safe_convert(item) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return self._safe_convert(self.input_value)\n"
- },
- "data_template": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Data Template",
- "dynamic": false,
- "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "data_template",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": "{text}"
- },
- "input_value": {
- "_input_type": "HandleInput",
- "advanced": false,
- "display_name": "Text",
- "dynamic": false,
- "info": "Message to be passed as output.",
- "input_types": [
- "Data",
- "DataFrame",
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "name": "input_value",
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "trace_as_metadata": true,
- "type": "other",
- "value": ""
- },
- "sender": {
- "_input_type": "DropdownInput",
- "advanced": true,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "Sender Type",
- "dynamic": false,
- "info": "Type of sender.",
- "name": "sender",
- "options": [
- "Machine",
- "User"
- ],
- "options_metadata": [],
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "Machine"
- },
- "sender_name": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Sender Name",
- "dynamic": false,
- "info": "Name of the sender.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "sender_name",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": "AI"
- },
- "session_id": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Session ID",
- "dynamic": false,
- "info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "session_id",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "should_store_message": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Store Messages",
- "dynamic": false,
- "info": "Store the message in the history.",
- "list": false,
- "list_add_label": "Add More",
- "name": "should_store_message",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": true
- },
- "text_color": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Text Color",
- "dynamic": false,
- "info": "The text color of the name",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "text_color",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- }
- },
- "tool_mode": false
- },
- "showNode": false,
- "type": "ChatOutput"
- },
- "dragging": false,
- "id": "ChatOutput-tF7vz",
- "measured": {
- "height": 66,
- "width": 192
- },
- "position": {
- "x": 1919.7453579471505,
- "y": 967.5942772860075
- },
- "selected": false,
- "type": "genericNode"
- }
- ],
- "viewport": {
- "x": -59.74646157524057,
- "y": 33.37710013512529,
- "zoom": 0.5875454902296473
- }
- },
- "description": "Where Language Meets Logic.",
- "endpoint_name": null,
- "id": "692d3c55-f461-44b8-89ba-5c32a745e224",
- "is_component": false,
- "last_tested_version": "1.4.1",
- "name": "Untitled document",
- "tags": []
-}
\ No newline at end of file
diff --git a/src/backend/tests/data/simple_agent.py b/src/backend/tests/data/simple_agent.py
deleted file mode 100644
index 8a8e5d78ee09..000000000000
--- a/src/backend/tests/data/simple_agent.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""A simple agent flow example for Langflow.
-
-This script demonstrates how to set up a conversational agent using Langflow's
-Agent component with web search capabilities.
-
-Features:
-- Uses the new flattened component access (cp.AgentComponent instead of deep imports)
-- Configures logging to 'langflow.log' at INFO level
-- Creates an agent with OpenAI GPT model
-- Provides web search tools via URLComponent
-- Connects ChatInput → Agent → ChatOutput
-
-Usage:
- uv run lfx run simple_agent.py "How are you?"
-"""
-
-import os
-from pathlib import Path
-
-# Using the new flattened component access
-from lfx import components as cp
-from lfx.graph import Graph
-from lfx.log.logger import LogConfig
-
-log_config = LogConfig(
- log_level="INFO",
- log_file=Path("langflow.log"),
-)
-
-# Showcase the new flattened component access - no need for deep imports!
-chat_input = cp.ChatInput()
-agent = cp.AgentComponent()
-url_component = cp.URLComponent()
-tools = url_component.to_toolkit()
-
-agent.set(
- model_name="gpt-4o-mini",
- agent_llm="OpenAI",
- api_key=os.getenv("OPENAI_API_KEY"),
- input_value=chat_input.message_response,
- tools=tools,
-)
-chat_output = cp.ChatOutput().set(input_value=agent.message_response)
-
-graph = Graph(chat_input, chat_output, log_config=log_config)
diff --git a/src/backend/tests/integration/backward_compatibility/test_starter_projects.py b/src/backend/tests/integration/backward_compatibility/test_starter_projects.py
deleted file mode 100644
index 8d2d0b2905b9..000000000000
--- a/src/backend/tests/integration/backward_compatibility/test_starter_projects.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import pytest
-
-from lfx.schema.message import Message
-from tests.api_keys import get_openai_api_key
-from tests.integration.utils import download_flow_from_github, run_json_flow
-
-
-@pytest.mark.api_key_required
-async def test_1_0_15_basic_prompting():
- api_key = get_openai_api_key()
- json_flow = download_flow_from_github("Basic Prompting (Hello, World)", "1.0.15")
- json_flow.set_value(json_flow.get_component_by_type("OpenAIModel"), "api_key", api_key)
- outputs = await run_json_flow(json_flow, run_input="my name is bob, say hello!")
- assert isinstance(outputs["message"], Message)
- response = outputs["message"].text.lower()
- assert "arr" in response or "ahoy" in response
diff --git a/src/backend/tests/integration/cli/__init__.py b/src/backend/tests/integration/cli/__init__.py
deleted file mode 100644
index 3894ad88e655..000000000000
--- a/src/backend/tests/integration/cli/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Integration CLI tests package."""
diff --git a/src/backend/tests/integration/components/apify/apify_actor.md b/src/backend/tests/integration/components/apify/apify_actor.md
deleted file mode 100644
index 7196993e453b..000000000000
--- a/src/backend/tests/integration/components/apify/apify_actor.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Apify Actors component tests
-
-This component was tested manually with various Apify Actors. The component was tested with manual runs and with an AI Agent as a tool.
-
-## Test cases
-
-### Run Apify Actors manually
-Apify Actor input was manually configured and the component was run to retrieve data.
-When invalid input was provided, the component returned an error message with details.
-
-### Run Apify Actors with AI Agent
-Multiple Apify Actors components with different Actors were connected to an AI Agent.
-The agent was given a task that required running multiple Apify Actors to complete.
diff --git a/src/backend/tests/integration/components/assistants/test_assistants_components.py b/src/backend/tests/integration/components/assistants/test_assistants_components.py
deleted file mode 100644
index 0e51f42aff36..000000000000
--- a/src/backend/tests/integration/components/assistants/test_assistants_components.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import pytest
-
-from lfx.components.datastax import (
- AssistantsCreateAssistant,
- AssistantsCreateThread,
- AssistantsGetAssistantName,
- AssistantsListAssistants,
- AssistantsRun,
-)
-from tests.integration.utils import run_single_component
-
-
-@pytest.mark.api_key_required
-async def test_list_assistants():
- results = await run_single_component(
- AssistantsListAssistants,
- inputs={},
- )
- assert results["assistants"].text is not None
-
-
-@pytest.mark.api_key_required
-async def test_create_assistants():
- results = await run_single_component(
- AssistantsCreateAssistant,
- inputs={
- "assistant_name": "artist-bot",
- "instructions": "reply only with ascii art",
- "model": "gpt-4o-mini",
- },
- )
- assistant_id = results["assistant_id"].text
- assert assistant_id is not None
- await test_list_assistants()
- await get_assistant_name(assistant_id)
- thread_id = await test_create_thread()
- await run_assistant(assistant_id, thread_id)
-
-
-@pytest.mark.api_key_required
-async def test_create_thread():
- results = await run_single_component(
- AssistantsCreateThread,
- inputs={},
- )
- thread_id = results["thread_id"].text
- assert thread_id is not None
- return thread_id
-
-
-async def get_assistant_name(assistant_id):
- results = await run_single_component(
- AssistantsGetAssistantName,
- inputs={
- "assistant_id": assistant_id,
- },
- )
- assert results["assistant_name"].text is not None
-
-
-async def run_assistant(assistant_id, thread_id):
- results = await run_single_component(
- AssistantsRun,
- inputs={
- "assistant_id": assistant_id,
- "user_message": "hello",
- "thread_id": thread_id,
- },
- )
- assert results["assistant_response"].text is not None
diff --git a/src/backend/tests/integration/components/astra/test_astra_component.py b/src/backend/tests/integration/components/astra/test_astra_component.py
deleted file mode 100644
index 6dad2a919abf..000000000000
--- a/src/backend/tests/integration/components/astra/test_astra_component.py
+++ /dev/null
@@ -1,221 +0,0 @@
-import os
-
-import pytest
-from astrapy import DataAPIClient
-from langchain_astradb import AstraDBVectorStore, VectorServiceOptions
-from langchain_core.documents import Document
-
-from lfx.components.openai.openai import OpenAIEmbeddingsComponent
-from lfx.components.vectorstores import AstraDBVectorStoreComponent
-from lfx.schema.data import Data
-from tests.api_keys import get_astradb_api_endpoint, get_astradb_application_token, get_openai_api_key
-from tests.integration.components.mock_components import TextToData
-from tests.integration.utils import ComponentInputHandle, run_single_component
-
-BASIC_COLLECTION = "test_basic"
-SEARCH_COLLECTION = "test_search"
-# MEMORY_COLLECTION = "test_memory"
-VECTORIZE_COLLECTION = "test_vectorize"
-VECTORIZE_COLLECTION_OPENAI = "test_vectorize_openai"
-VECTORIZE_COLLECTION_OPENAI_WITH_AUTH = "test_vectorize_openai_auth"
-ALL_COLLECTIONS = [
- BASIC_COLLECTION,
- SEARCH_COLLECTION,
- # MEMORY_COLLECTION,
- VECTORIZE_COLLECTION,
- VECTORIZE_COLLECTION_OPENAI,
- VECTORIZE_COLLECTION_OPENAI_WITH_AUTH,
-]
-
-
-@pytest.fixture
-def astradb_client():
- api_client = DataAPIClient()
- client = api_client.get_database(get_astradb_api_endpoint(), token=get_astradb_application_token())
-
- yield client # Provide the client to the test functions
-
- # Cleanup: Drop all collections after tests
- for collection in ALL_COLLECTIONS:
- try: # noqa: SIM105
- client.drop_collection(collection)
- except Exception: # noqa: S110
- pass
-
-
-@pytest.mark.api_key_required
-async def test_base(astradb_client: DataAPIClient):
- application_token = get_astradb_application_token()
- api_endpoint = get_astradb_api_endpoint()
-
- results = await run_single_component(
- AstraDBVectorStoreComponent,
- inputs={
- "token": application_token,
- "api_endpoint": api_endpoint,
- "collection_name": BASIC_COLLECTION,
- "embedding_model": ComponentInputHandle(
- clazz=OpenAIEmbeddingsComponent,
- inputs={"openai_api_key": get_openai_api_key()},
- output_name="embeddings",
- ),
- },
- )
-
- assert results["search_results"] == []
- assert astradb_client.get_collection(BASIC_COLLECTION)
-
-
-@pytest.mark.api_key_required
-async def test_astra_embeds_and_search():
- application_token = get_astradb_application_token()
- api_endpoint = get_astradb_api_endpoint()
-
- results = await run_single_component(
- AstraDBVectorStoreComponent,
- inputs={
- "token": application_token,
- "api_endpoint": api_endpoint,
- "collection_name": BASIC_COLLECTION,
- "number_of_results": 1,
- "search_query": "test1",
- "ingest_data": ComponentInputHandle(
- clazz=TextToData, inputs={"text_data": ["test1", "test2"]}, output_name="from_text"
- ),
- "embedding_model": ComponentInputHandle(
- clazz=OpenAIEmbeddingsComponent,
- inputs={"openai_api_key": get_openai_api_key()},
- output_name="embeddings",
- ),
- },
- )
- assert len(results["search_results"]) == 1
-
-
-@pytest.mark.api_key_required
-def test_astra_vectorize():
- application_token = get_astradb_application_token()
- api_endpoint = get_astradb_api_endpoint()
-
- store = None
- try:
- # Get the vectorize options
- options = {"provider": "nvidia", "modelName": "NV-Embed-QA"}
-
- store = AstraDBVectorStore(
- collection_name=VECTORIZE_COLLECTION,
- api_endpoint=api_endpoint,
- token=application_token,
- collection_vector_service_options=VectorServiceOptions._from_dict(options),
- )
-
- documents = [Document(page_content="test1"), Document(page_content="test2")]
- records = [Data.from_document(d) for d in documents]
-
- component = AstraDBVectorStoreComponent()
-
- component.build(
- token=application_token,
- api_endpoint=api_endpoint,
- collection_name=VECTORIZE_COLLECTION,
- ingest_data=records,
- search_query="test",
- number_of_results=2,
- )
- vector_store = component.build_vector_store()
- records = component.search_documents(vector_store=vector_store)
-
- assert len(records) == 2
- finally:
- if store is not None:
- store.delete_collection()
-
-
-@pytest.mark.api_key_required
-def test_astra_vectorize_with_provider_api_key():
- """Tests vectorize using an openai api key."""
- application_token = get_astradb_application_token()
- api_endpoint = get_astradb_api_endpoint()
-
- store = None
- try:
- options = {
- "provider": "openai",
- "modelName": "text-embedding-3-small",
- "parameters": {},
- "authentication": {"providerKey": "openai"},
- }
-
- store = AstraDBVectorStore(
- collection_name=VECTORIZE_COLLECTION_OPENAI,
- api_endpoint=api_endpoint,
- token=application_token,
- collection_vector_service_options=VectorServiceOptions._from_dict(options),
- collection_embedding_api_key=os.getenv("OPENAI_API_KEY"),
- )
- documents = [Document(page_content="test1"), Document(page_content="test2")]
- records = [Data.from_document(d) for d in documents]
-
- component = AstraDBVectorStoreComponent()
-
- component.build(
- token=application_token,
- api_endpoint=api_endpoint,
- collection_name=VECTORIZE_COLLECTION_OPENAI,
- ingest_data=records,
- search_query="test",
- number_of_results=2,
- )
-
- vector_store = component.build_vector_store()
- records = component.search_documents(vector_store=vector_store)
-
- assert len(records) == 2
- finally:
- if store is not None:
- store.delete_collection()
-
-
-@pytest.mark.api_key_required
-def test_astra_vectorize_passes_authentication():
- """Tests vectorize using the authentication parameter."""
- store = None
- try:
- application_token = get_astradb_application_token()
- api_endpoint = get_astradb_api_endpoint()
-
- options = {
- "provider": "openai",
- "modelName": "text-embedding-3-small",
- "parameters": {},
- "authentication": {"providerKey": "openai"},
- }
-
- store = AstraDBVectorStore(
- collection_name=VECTORIZE_COLLECTION_OPENAI_WITH_AUTH,
- api_endpoint=api_endpoint,
- token=application_token,
- collection_vector_service_options=VectorServiceOptions._from_dict(options),
- )
-
- documents = [Document(page_content="test1"), Document(page_content="test2")]
- records = [Data.from_document(d) for d in documents]
-
- component = AstraDBVectorStoreComponent()
-
- component.build(
- token=application_token,
- api_endpoint=api_endpoint,
- collection_name=VECTORIZE_COLLECTION_OPENAI_WITH_AUTH,
- ingest_data=records,
- search_query="test",
- number_of_results=2,
- )
-
- vector_store = component.build_vector_store()
- records = component.search_documents(vector_store=vector_store)
-
- assert len(records) == 2
- finally:
- if store is not None:
- store.delete_collection()
diff --git a/src/backend/tests/integration/components/helpers/test_parse_json_data.py b/src/backend/tests/integration/components/helpers/test_parse_json_data.py
deleted file mode 100644
index 669f78a04afb..000000000000
--- a/src/backend/tests/integration/components/helpers/test_parse_json_data.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from lfx.components.input_output import ChatInput
-from lfx.components.processing.parse_json_data import ParseJSONDataComponent
-from lfx.schema import Data
-from tests.integration.components.mock_components import TextToData
-from tests.integration.utils import ComponentInputHandle, pyleak_marker, run_single_component
-
-pytestmark = pyleak_marker()
-
-
-async def test_from_data():
- outputs = await run_single_component(
- ParseJSONDataComponent,
- inputs={
- "input_value": ComponentInputHandle(
- clazz=TextToData, inputs={"text_data": ['{"key":"value1"}'], "is_json": True}, output_name="from_text"
- ),
- "query": ".[0].key",
- },
- )
- assert outputs["filtered_data"] == [Data(text="value1")]
-
- outputs = await run_single_component(
- ParseJSONDataComponent,
- inputs={
- "input_value": ComponentInputHandle(
- clazz=TextToData,
- inputs={"text_data": ['{"key":[{"field1": 1, "field2": 2}]}'], "is_json": True},
- output_name="from_text",
- ),
- "query": ".[0].key.[0].field2",
- },
- )
- assert outputs["filtered_data"] == [Data(text="2")]
-
-
-async def test_from_message():
- outputs = await run_single_component(
- ParseJSONDataComponent,
- inputs={
- "input_value": ComponentInputHandle(clazz=ChatInput, inputs={}, output_name="message"),
- "query": ".key",
- },
- run_input="{'key':'value1'}",
- )
- assert outputs["filtered_data"] == [Data(text="value1")]
-
- outputs = await run_single_component(
- ParseJSONDataComponent,
- inputs={
- "input_value": ComponentInputHandle(clazz=ChatInput, inputs={}, output_name="message"),
- "query": ".key.[0].field2",
- },
- run_input='{"key":[{"field1": 1, "field2": 2}]}',
- )
- assert outputs["filtered_data"] == [Data(text="2")]
diff --git a/src/backend/tests/integration/components/inputs/test_chat_input.py b/src/backend/tests/integration/components/inputs/test_chat_input.py
deleted file mode 100644
index cfe0b2f0edec..000000000000
--- a/src/backend/tests/integration/components/inputs/test_chat_input.py
+++ /dev/null
@@ -1,52 +0,0 @@
-from lfx.components.input_output import ChatInput
-from lfx.memory import aget_messages
-from lfx.schema.message import Message
-from tests.integration.utils import pyleak_marker, run_single_component
-
-pytestmark = pyleak_marker()
-
-
-async def test_default():
- outputs = await run_single_component(ChatInput, run_input="hello")
- assert isinstance(outputs["message"], Message)
- assert outputs["message"].text == "hello"
- assert outputs["message"].sender == "User"
- assert outputs["message"].sender_name == "User"
-
- outputs = await run_single_component(ChatInput, run_input="")
- assert isinstance(outputs["message"], Message)
- assert outputs["message"].text == ""
- assert outputs["message"].sender == "User"
- assert outputs["message"].sender_name == "User"
-
-
-async def test_sender():
- outputs = await run_single_component(
- ChatInput, inputs={"sender": "Machine", "sender_name": "AI"}, run_input="hello"
- )
- assert isinstance(outputs["message"], Message)
- assert outputs["message"].text == "hello"
- assert outputs["message"].sender == "Machine"
- assert outputs["message"].sender_name == "AI"
-
-
-async def test_do_not_store_messages():
- session_id = "test-session-id"
- outputs = await run_single_component(
- ChatInput, inputs={"should_store_message": True}, run_input="hello", session_id=session_id
- )
- assert isinstance(outputs["message"], Message)
- assert outputs["message"].text == "hello"
- assert outputs["message"].session_id == session_id
-
- assert len(await aget_messages(session_id=session_id)) == 1
-
- session_id = "test-session-id-another"
- outputs = await run_single_component(
- ChatInput, inputs={"should_store_message": False}, run_input="hello", session_id=session_id
- )
- assert isinstance(outputs["message"], Message)
- assert outputs["message"].text == "hello"
- assert outputs["message"].session_id == session_id
-
- assert len(await aget_messages(session_id=session_id)) == 0
diff --git a/src/backend/tests/integration/components/inputs/test_text_input.py b/src/backend/tests/integration/components/inputs/test_text_input.py
deleted file mode 100644
index ecbd7be3ff7b..000000000000
--- a/src/backend/tests/integration/components/inputs/test_text_input.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from lfx.components.input_output import TextInputComponent
-from lfx.schema.message import Message
-from tests.integration.utils import pyleak_marker, run_single_component
-
-pytestmark = pyleak_marker()
-
-
-async def test_text_input():
- outputs = await run_single_component(TextInputComponent, run_input="sample text", input_type="text")
- assert isinstance(outputs["text"], Message)
- assert outputs["text"].text == "sample text"
- assert outputs["text"].sender is None
- assert outputs["text"].sender_name is None
-
- outputs = await run_single_component(TextInputComponent, run_input="", input_type="text")
- assert isinstance(outputs["text"], Message)
- assert outputs["text"].text == ""
- assert outputs["text"].sender is None
- assert outputs["text"].sender_name is None
diff --git a/src/backend/tests/integration/components/mcp/test_mcp_component.py b/src/backend/tests/integration/components/mcp/test_mcp_component.py
deleted file mode 100644
index d77941bbc143..000000000000
--- a/src/backend/tests/integration/components/mcp/test_mcp_component.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import pytest
-
-from tests.integration.utils import run_single_component
-
-
-# TODO: Add more tests for MCPToolsComponent
-@pytest.mark.asyncio
-async def test_mcp_component():
- from lfx.components.agents.mcp_component import MCPToolsComponent
-
- inputs = {}
-
- # Expect an error from this call
- with pytest.raises(ValueError, match="None"):
- await run_single_component(
- MCPToolsComponent,
- inputs=inputs,
- )
diff --git a/src/backend/tests/integration/components/mcp/test_mcp_memory_leak.py b/src/backend/tests/integration/components/mcp/test_mcp_memory_leak.py
deleted file mode 100644
index 4b295cd13197..000000000000
--- a/src/backend/tests/integration/components/mcp/test_mcp_memory_leak.py
+++ /dev/null
@@ -1,415 +0,0 @@
-"""Integration tests for MCP memory leak fix.
-
-These tests verify that the MCP session manager properly handles session reuse
-and cleanup to prevent subprocess leaks.
-"""
-
-import asyncio
-import contextlib
-import os
-import platform
-import shutil
-import time
-
-import psutil
-import pytest
-from mcp import StdioServerParameters
-
-from lfx.base.mcp.util import MCPSessionManager
-from lfx.log.logger import logger
-
-pytestmark = [
- pytest.mark.timeout(300, method="thread"),
- pytest.mark.skip(reason="Skipping all MCP memory leak integration tests for now."),
-]
-
-
-async def wait_tools(session, t=20):
- return await asyncio.wait_for(session.list_tools(), timeout=t)
-
-
-async def wait_no_children(proc, max_wait=10, target=None):
- deadline = time.monotonic() + max_wait
- while time.monotonic() < deadline:
- children = proc.children(recursive=True)
- if target is not None and len(children) <= target:
- return True
- if not children:
- return True
- await asyncio.sleep(0.2)
- return False
-
-
-@pytest.fixture
-def mcp_server_params():
- """Create MCP server parameters for testing."""
- command = ["npx", "-y", "@modelcontextprotocol/server-everything"]
- env_data = {"DEBUG": "true", "PATH": os.environ["PATH"]}
-
- if platform.system() == "Windows":
- return StdioServerParameters(
- command="cmd",
- args=["/c", f"{command[0]} {' '.join(command[1:])}"],
- env=env_data,
- )
- return StdioServerParameters(
- command="bash",
- args=["-c", f"exec {' '.join(command)}"],
- env=env_data,
- )
-
-
-@pytest.fixture
-def process_tracker():
- """Track subprocess count for memory leak detection."""
- process = psutil.Process()
- initial_count = len(process.children(recursive=True))
-
- yield process, initial_count
-
- # Give some time for cleanup to complete before checking for leftover processes
- # Collect child processes that we expect to wait for
- try:
- children = process.children(recursive=True)
- if not children:
- return
-
- gone, alive = psutil.wait_procs(children, timeout=2)
- if gone:
- logger.debug("Processes exited naturally: %s", [p.pid for p in gone])
-
- if alive:
- logger.debug("Processes still alive after 2s: %s", [p.pid for p in alive])
- for p in alive:
- with contextlib.suppress(psutil.NoSuchProcess):
- p.terminate()
-
- gone2, alive2 = psutil.wait_procs(alive, timeout=5)
- if gone2:
- logger.debug("Processes terminated gracefully: %s", [p.pid for p in gone2])
-
- for p in alive2:
- with contextlib.suppress(psutil.NoSuchProcess):
- p.kill()
-
- _ = psutil.wait_procs(alive2, timeout=2)
-
- leftover = process.children(recursive=True)
- assert not leftover, f"Leftover child processes: {[p.pid for p in leftover]}"
-
- except Exception as e:
- logger.exception("Error cleaning up child processes: %s", e)
-
-
-@pytest.mark.asyncio
-@pytest.mark.skipif(not shutil.which("npx"), reason="Node.js not available")
-async def test_session_reuse_prevents_subprocess_leak(mcp_server_params, process_tracker):
- """Test that session reuse prevents subprocess proliferation."""
- process, initial_count = process_tracker
-
- session_manager = MCPSessionManager()
-
- try:
- # Create multiple sessions with different context IDs but same server
- sessions = []
- for i in range(3):
- context_id = f"test_context_{i}"
- session = await session_manager.get_session(context_id, mcp_server_params, "stdio")
- sessions.append(session)
-
- # Verify session is working
- tools_response = await wait_tools(session)
- assert len(tools_response.tools) > 0
-
- # Check subprocess count after creating sessions
- current_count = len(process.children(recursive=True))
- subprocess_increase = current_count - initial_count
-
- # With the fix, we should have minimal subprocess increase
- # (ideally 2 subprocesses max for the MCP server)
- assert subprocess_increase <= 4, f"Too many subprocesses created: {subprocess_increase}"
-
- # Verify all sessions are functional
- for session in sessions:
- tools_response = await wait_tools(session)
- assert len(tools_response.tools) > 0
-
- finally:
- await session_manager.cleanup_all()
- await wait_no_children(process, max_wait=10, target=initial_count)
- await asyncio.sleep(2) # Allow cleanup to complete
-
-
-@pytest.mark.asyncio
-@pytest.mark.skipif(not shutil.which("npx"), reason="Node.js not available")
-async def test_session_cleanup_removes_subprocesses(mcp_server_params, process_tracker):
- """Test that session cleanup properly removes subprocesses."""
- process, initial_count = process_tracker
-
- session_manager = MCPSessionManager()
-
- try:
- # Create a session
- session = await session_manager.get_session("cleanup_test", mcp_server_params, "stdio")
- tools_response = await wait_tools(session)
- assert len(tools_response.tools) > 0
-
- # Verify subprocess was created
- after_creation_count = len(process.children(recursive=True))
- assert after_creation_count > initial_count
-
- finally:
- # Clean up session
- await session_manager.cleanup_all()
- await wait_no_children(process, max_wait=10, target=initial_count)
- await asyncio.sleep(2) # Allow cleanup to complete
-
- # Verify subprocess was cleaned up
- after_cleanup_count = len(process.children(recursive=True))
- # Allow some tolerance for cleanup timing and system processes
- assert after_cleanup_count <= initial_count + 1, (
- f"Subprocesses not cleaned up properly: {after_cleanup_count} vs {initial_count}"
- )
-
-
-@pytest.mark.asyncio
-@pytest.mark.skipif(not shutil.which("npx"), reason="Node.js not available")
-async def test_session_health_check_and_recovery(mcp_server_params, process_tracker):
- """Test that unhealthy sessions are properly detected and recreated."""
- process, initial_count = process_tracker
-
- session_manager = MCPSessionManager()
-
- try:
- # Create a session
- session1 = await session_manager.get_session("health_test", mcp_server_params, "stdio")
- tools_response = await wait_tools(session1)
- assert len(tools_response.tools) > 0
-
- # Simulate session becoming unhealthy by accessing internal state
- # This is a bit of a hack but necessary for testing
- server_key = session_manager._get_server_key(mcp_server_params, "stdio")
- if hasattr(session_manager, "sessions_by_server"):
- # For the fixed version
- sessions = session_manager.sessions_by_server.get(server_key, {})
- if sessions:
- session_id = next(iter(sessions.keys()))
- session_info = sessions[session_id]
- if "task" in session_info:
- task = session_info["task"]
- if not task.done():
- task.cancel()
- with contextlib.suppress(asyncio.CancelledError):
- await task
- elif hasattr(session_manager, "sessions"):
- # For the original version
- for session_info in session_manager.sessions.values():
- if "task" in session_info:
- task = session_info["task"]
- if not task.done():
- task.cancel()
- with contextlib.suppress(asyncio.CancelledError):
- await task
-
- # Wait a bit for the task to be cancelled
- await asyncio.sleep(1)
-
- # Try to get a session again - should create a new healthy one
- session2 = await session_manager.get_session("health_test_2", mcp_server_params, "stdio")
- tools_response = await wait_tools(session2)
- assert len(tools_response.tools) > 0
-
- finally:
- await session_manager.cleanup_all()
- await wait_no_children(process, max_wait=10, target=initial_count)
- await asyncio.sleep(2)
-
-
-@pytest.mark.asyncio
-@pytest.mark.skipif(not shutil.which("npx"), reason="Node.js not available")
-async def test_multiple_servers_isolation(process_tracker):
- """Test that different servers get separate sessions."""
- process, initial_count = process_tracker
-
- session_manager = MCPSessionManager()
-
- # Create parameters for different servers
- server1_params = StdioServerParameters(
- command="bash",
- args=["-c", "exec npx -y @modelcontextprotocol/server-everything"],
- env={"DEBUG": "true", "PATH": os.environ["PATH"]},
- )
-
- server2_params = StdioServerParameters(
- command="bash",
- args=["-c", "exec npx -y @modelcontextprotocol/server-everything"],
- env={"DEBUG": "false", "PATH": os.environ["PATH"]}, # Different env
- )
-
- try:
- # Create sessions for different servers
- session1 = await session_manager.get_session("server1_test", server1_params, "stdio")
- session2 = await session_manager.get_session("server2_test", server2_params, "stdio")
-
- # Verify both sessions work
- tools1 = await session1.list_tools()
- tools2 = await session2.list_tools()
-
- assert len(tools1.tools) > 0
- assert len(tools2.tools) > 0
-
- # Sessions should be different objects for different servers (different environments)
- # Since the servers have different environments, they should get different server keys
- server_key1 = session_manager._get_server_key(server1_params, "stdio")
- server_key2 = session_manager._get_server_key(server2_params, "stdio")
- assert server_key1 != server_key2, "Different server environments should generate different keys"
- assert session1 is not session2
-
- finally:
- await session_manager.cleanup_all()
- await wait_no_children(process, max_wait=10, target=initial_count)
- await asyncio.sleep(2)
-
-
-@pytest.mark.asyncio
-async def test_session_manager_server_key_generation():
- """Test that server key generation works correctly."""
- session_manager = MCPSessionManager()
-
- # Test stdio server key
- stdio_params = StdioServerParameters(
- command="test_command",
- args=["arg1", "arg2"],
- env={"TEST": "value"},
- )
-
- key1 = session_manager._get_server_key(stdio_params, "stdio")
- key2 = session_manager._get_server_key(stdio_params, "stdio")
-
- # Same parameters should generate same key
- assert key1 == key2
- assert key1.startswith("stdio_")
-
- # Different parameters should generate different keys
- stdio_params2 = StdioServerParameters(
- command="different_command",
- args=["arg1", "arg2"],
- env={"TEST": "value"},
- )
-
- key3 = session_manager._get_server_key(stdio_params2, "stdio")
- assert key1 != key3
-
- # Test SSE server key
- sse_params = {
- "url": "http://example.com/sse",
- "headers": {"Authorization": "Bearer token"},
- "timeout_seconds": 30,
- "sse_read_timeout_seconds": 30,
- }
-
- sse_key1 = session_manager._get_server_key(sse_params, "sse")
- sse_key2 = session_manager._get_server_key(sse_params, "sse")
-
- assert sse_key1 == sse_key2
- assert sse_key1.startswith("sse_")
-
- # Different URL should generate different key
- sse_params2 = sse_params.copy()
- sse_params2["url"] = "http://different.com/sse"
-
- sse_key3 = session_manager._get_server_key(sse_params2, "sse")
- assert sse_key1 != sse_key3
-
-
-@pytest.mark.asyncio
-async def test_session_manager_connectivity_validation():
- """Test session connectivity validation."""
- session_manager = MCPSessionManager()
-
- # Mock a session that responds to list_tools
- class MockSession:
- def __init__(self, should_fail=False): # noqa: FBT002
- self.should_fail = should_fail
-
- async def list_tools(self):
- if self.should_fail:
- msg = "Connection failed"
- raise Exception(msg) # noqa: TRY002
-
- class MockResponse:
- def __init__(self):
- self.tools = ["tool1", "tool2"]
-
- return MockResponse()
-
- # Test healthy session
- healthy_session = MockSession(should_fail=False)
- is_healthy = await session_manager._validate_session_connectivity(healthy_session)
- assert is_healthy is True
-
- # Test unhealthy session
- unhealthy_session = MockSession(should_fail=True)
- is_healthy = await session_manager._validate_session_connectivity(unhealthy_session)
- assert is_healthy is False
-
- # Test session that returns None
- class MockNoneSession:
- async def list_tools(self):
- return None
-
- none_session = MockNoneSession()
- is_healthy = await session_manager._validate_session_connectivity(none_session)
- assert is_healthy is False
-
-
-@pytest.mark.asyncio
-async def test_session_manager_cleanup_all(process_tracker):
- """Test that cleanup_all properly cleans up all sessions."""
- process, initial_count = process_tracker
- session_manager = MCPSessionManager()
-
- # Mock some sessions using the correct structure
- session_manager.sessions_by_server = {
- "server1": {
- "sessions": {
- "session1": {
- "session": "mock_session",
- "task": asyncio.create_task(asyncio.sleep(10)),
- "type": "stdio",
- "last_used": asyncio.get_event_loop().time(),
- }
- }
- },
- "server2": {
- "sessions": {
- "session2": {
- "session": "mock_session",
- "task": asyncio.create_task(asyncio.sleep(10)),
- "type": "sse",
- "last_used": asyncio.get_event_loop().time(),
- }
- }
- },
- }
-
- # Add some background tasks
- task1 = asyncio.create_task(asyncio.sleep(10))
- task2 = asyncio.create_task(asyncio.sleep(10))
- session_manager._background_tasks = {task1, task2}
-
- # Cleanup all
- await session_manager.cleanup_all()
- await wait_no_children(process, max_wait=10, target=initial_count)
-
- # Verify cleanup
- if hasattr(session_manager, "sessions_by_server"):
- # For fixed version
- assert len(session_manager.sessions_by_server) == 0
- elif hasattr(session_manager, "sessions"):
- # For original version
- assert len(session_manager.sessions) == 0
-
- # Verify background tasks were cancelled
- assert task1.done()
- assert task2.done()
diff --git a/src/backend/tests/integration/components/mock_components.py b/src/backend/tests/integration/components/mock_components.py
deleted file mode 100644
index 1c8655601649..000000000000
--- a/src/backend/tests/integration/components/mock_components.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import json
-
-from lfx.custom import Component
-from lfx.inputs import BoolInput, StrInput
-from lfx.schema import Data
-from lfx.template import Output
-
-
-class TextToData(Component):
- inputs = [
- StrInput(name="text_data", is_list=True),
- BoolInput(name="is_json", info="Parse text_data as json and fill the data object."),
- ]
- outputs = [
- Output(name="from_text", display_name="From text", method="create_data"),
- ]
-
- def _to_data(self, text: str) -> Data:
- if self.is_json:
- return Data(data=json.loads(text))
- return Data(text=text)
-
- def create_data(self) -> list[Data]:
- return [self._to_data(t) for t in self.text_data]
diff --git a/src/backend/tests/integration/components/output_parsers/test_output_parser.py b/src/backend/tests/integration/components/output_parsers/test_output_parser.py
deleted file mode 100644
index 13de03d68d0e..000000000000
--- a/src/backend/tests/integration/components/output_parsers/test_output_parser.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import os
-
-import pytest
-
-from lfx.components.helpers import OutputParserComponent
-from lfx.components.openai.openai_chat_model import OpenAIModelComponent
-from lfx.components.processing import PromptComponent
-from tests.integration.utils import ComponentInputHandle, run_single_component
-
-
-@pytest.mark.api_key_required
-async def test_csv_output_parser_openai():
- format_instructions_ = ComponentInputHandle(
- clazz=OutputParserComponent,
- inputs={},
- output_name="format_instructions",
- )
- output_parser_handle = ComponentInputHandle(
- clazz=OutputParserComponent,
- inputs={},
- output_name="output_parser",
- )
- prompt_handler = ComponentInputHandle(
- clazz=PromptComponent,
- inputs={
- "template": "List the first five positive integers.\n\n{format_instructions}",
- "format_instructions": format_instructions_,
- },
- output_name="prompt",
- )
-
- outputs = await run_single_component(
- OpenAIModelComponent,
- inputs={
- "api_key": os.environ["OPENAI_API_KEY"],
- "output_parser": output_parser_handle,
- "input_value": prompt_handler,
- },
- )
- assert outputs["text_output"] == "1, 2, 3, 4, 5"
diff --git a/src/backend/tests/integration/components/outputs/test_chat_output.py b/src/backend/tests/integration/components/outputs/test_chat_output.py
deleted file mode 100644
index a88b5b9c66bb..000000000000
--- a/src/backend/tests/integration/components/outputs/test_chat_output.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from langflow.memory import aget_messages
-
-from lfx.components.input_output import ChatOutput
-from lfx.schema.message import Message
-from tests.integration.utils import run_single_component
-
-
-async def test_string():
- outputs = await run_single_component(ChatOutput, inputs={"input_value": "hello"})
- assert isinstance(outputs["message"], Message)
- assert outputs["message"].text == "hello"
- assert outputs["message"].sender == "Machine"
- assert outputs["message"].sender_name == "AI"
-
-
-async def test_message():
- outputs = await run_single_component(ChatOutput, inputs={"input_value": Message(text="hello")})
- assert isinstance(outputs["message"], Message)
- assert outputs["message"].text == "hello"
- assert outputs["message"].sender == "Machine"
- assert outputs["message"].sender_name == "AI"
-
-
-async def test_do_not_store_message():
- session_id = "test-session-id"
- outputs = await run_single_component(
- ChatOutput, inputs={"input_value": Message(text="hello"), "should_store_message": True}, session_id=session_id
- )
- assert isinstance(outputs["message"], Message)
- assert outputs["message"].text == "hello"
-
- assert len(await aget_messages(session_id=session_id)) == 1
- session_id = "test-session-id-another"
-
- outputs = await run_single_component(
- ChatOutput, inputs={"input_value": Message(text="hello"), "should_store_message": False}, session_id=session_id
- )
- assert isinstance(outputs["message"], Message)
- assert outputs["message"].text == "hello"
-
- assert len(await aget_messages(session_id=session_id)) == 0
diff --git a/src/backend/tests/integration/components/outputs/test_text_output.py b/src/backend/tests/integration/components/outputs/test_text_output.py
deleted file mode 100644
index 15b3dcbab759..000000000000
--- a/src/backend/tests/integration/components/outputs/test_text_output.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from lfx.components.input_output import TextOutputComponent
-from lfx.schema.message import Message
-from tests.integration.utils import run_single_component
-
-
-async def test():
- outputs = await run_single_component(TextOutputComponent, inputs={"input_value": "hello"})
- assert isinstance(outputs["text"], Message)
- assert outputs["text"].text == "hello"
- assert outputs["text"].sender is None
- assert outputs["text"].sender_name is None
-
-
-async def test_message():
- outputs = await run_single_component(TextOutputComponent, inputs={"input_value": Message(text="hello")})
- assert isinstance(outputs["text"], Message)
- assert outputs["text"].text == "hello"
- assert outputs["text"].sender is None
- assert outputs["text"].sender_name is None
diff --git a/src/backend/tests/integration/components/prompts/test_prompt.py b/src/backend/tests/integration/components/prompts/test_prompt.py
deleted file mode 100644
index 35af6711ad5e..000000000000
--- a/src/backend/tests/integration/components/prompts/test_prompt.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from lfx.components.processing import PromptComponent
-from lfx.schema.message import Message
-from tests.integration.utils import pyleak_marker, run_single_component
-
-pytestmark = pyleak_marker()
-
-
-async def test():
- outputs = await run_single_component(PromptComponent, inputs={"template": "test {var1}", "var1": "from the var"})
- assert isinstance(outputs["prompt"], Message)
- assert outputs["prompt"].text == "test from the var"
diff --git a/src/backend/tests/integration/conftest.py b/src/backend/tests/integration/conftest.py
deleted file mode 100644
index 685c501139b9..000000000000
--- a/src/backend/tests/integration/conftest.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import pytest
-
-
-@pytest.fixture(autouse=True)
-def _start_app(client):
- pass
-
-
-def pytest_configure(config):
- config.addinivalue_line("markers", "no_leaks: detect asyncio task leaks, thread leaks, and event loop blocking")
diff --git a/src/backend/tests/integration/flows/test_basic_prompting.py b/src/backend/tests/integration/flows/test_basic_prompting.py
deleted file mode 100644
index a108ba48d7ca..000000000000
--- a/src/backend/tests/integration/flows/test_basic_prompting.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from lfx.components.input_output import ChatInput, ChatOutput
-from lfx.components.processing import PromptComponent
-from lfx.graph import Graph
-from lfx.schema.message import Message
-from tests.integration.utils import pyleak_marker, run_flow
-
-
-@pyleak_marker()
-async def test_simple_no_llm():
- graph = Graph()
- flow_input = graph.add_component(ChatInput())
- flow_output = graph.add_component(ChatOutput())
- component = PromptComponent(template="This is the message: {var1}", var1="")
- prompt = graph.add_component(component)
- graph.add_component_edge(flow_input, ("message", "var1"), prompt)
- graph.add_component_edge(prompt, ("prompt", "input_value"), flow_output)
- outputs = await run_flow(graph, run_input="hello!")
- assert isinstance(outputs["message"], Message)
- assert outputs["message"].text == "This is the message: hello!"
diff --git a/src/backend/tests/integration/test_dynamic_import_integration.py b/src/backend/tests/integration/test_dynamic_import_integration.py
deleted file mode 100644
index 6d12353f0e00..000000000000
--- a/src/backend/tests/integration/test_dynamic_import_integration.py
+++ /dev/null
@@ -1,299 +0,0 @@
-"""Integration tests for dynamic import refactor.
-
-Tests the dynamic import system in realistic usage scenarios to ensure
-the refactor doesn't break existing functionality.
-"""
-
-import sys
-import time
-
-import pytest
-from langflow.components.agents import AgentComponent
-from langflow.components.data import APIRequestComponent
-from langflow.components.openai import OpenAIModelComponent
-
-
-class TestDynamicImportIntegration:
- """Integration tests for the dynamic import system."""
-
- def test_component_discovery_still_works(self):
- """Test that component discovery mechanisms still work after refactor."""
- # This tests that the existing component discovery logic
- # can still find and load components
- from langflow import components
-
- # Test that we can discover components through the main module
- openai_module = components.openai
- assert hasattr(openai_module, "OpenAIModelComponent")
-
- data_module = components.data
- assert hasattr(data_module, "APIRequestComponent")
-
- def test_existing_import_patterns_work(self):
- """Test that all existing import patterns continue to work."""
- # Test direct imports
- import langflow.components.data as data_comp
-
- # Test module imports
- import langflow.components.openai as openai_comp
-
- # All should work
- assert OpenAIModelComponent is not None
- assert APIRequestComponent is not None
- assert AgentComponent is not None
- assert openai_comp.OpenAIModelComponent is not None
- assert data_comp.APIRequestComponent is not None
-
- def test_component_instantiation_works(self):
- """Test that components can still be instantiated normally."""
- # Test that we can create component instances
- # (Note: Some components may require specific initialization parameters)
-
- from langflow.components.helpers import CalculatorComponent
-
- # Should be able to access the class
- assert CalculatorComponent is not None
- assert callable(CalculatorComponent)
-
- def test_template_creation_compatibility(self):
- """Test that template creation still works with dynamic imports."""
- # Test accessing component attributes needed for templates
-
- # Components should have all necessary attributes for template creation
- assert hasattr(OpenAIModelComponent, "__name__")
- assert hasattr(OpenAIModelComponent, "__module__")
- assert hasattr(OpenAIModelComponent, "display_name")
- assert isinstance(OpenAIModelComponent.display_name, str)
- assert OpenAIModelComponent.display_name
- assert hasattr(OpenAIModelComponent, "description")
- assert isinstance(OpenAIModelComponent.description, str)
- assert OpenAIModelComponent.description
- assert hasattr(OpenAIModelComponent, "icon")
- assert isinstance(OpenAIModelComponent.icon, str)
- assert OpenAIModelComponent.icon
- assert hasattr(OpenAIModelComponent, "inputs")
- assert isinstance(OpenAIModelComponent.inputs, list)
- assert len(OpenAIModelComponent.inputs) > 0
- # Check that each input has required attributes
- for input_field in OpenAIModelComponent.inputs:
- assert hasattr(input_field, "name"), f"Input {input_field} missing 'name' attribute"
- assert hasattr(input_field, "display_name"), f"Input {input_field} missing 'display_name' attribute"
-
- def test_multiple_import_styles_same_result(self):
- """Test that different import styles yield the same component."""
- # Import the same component in different ways
- from langflow import components
- from langflow.components.openai import OpenAIModelComponent as DirectImport
-
- dynamic_import = components.openai.OpenAIModelComponent
-
- import langflow.components.openai as openai_module
-
- module_import = openai_module.OpenAIModelComponent
-
- # All three should be the exact same class object
- assert DirectImport is dynamic_import
- assert dynamic_import is module_import
- assert DirectImport is module_import
-
- def test_startup_performance_improvement(self):
- """Test that startup time is improved with lazy loading."""
- # This test measures the difference in import time
- # Fresh modules to test startup behavior
- modules_to_clean = [
- "langflow.components.vectorstores",
- "langflow.components.tools",
- "langflow.components.langchain_utilities",
- ]
-
- for module_name in modules_to_clean:
- if module_name in sys.modules:
- del sys.modules[module_name]
-
- # Time the import of a large module
- start_time = time.time()
- from langflow.components import chroma
-
- import_time = time.time() - start_time
-
- # Import time should be very fast (just loading the __init__.py)
- assert import_time < 0.1 # Should be well under 100ms
-
- # Test that we can access a component (it may already be cached from previous tests)
- # This is expected behavior in a test suite where components get cached
-
- # Now access a component - this should trigger loading
- start_time = time.time()
- chroma_component = chroma.ChromaVectorStoreComponent
- access_time = time.time() - start_time
-
- assert chroma_component is not None
- # Access time should still be reasonable
- assert access_time < 2.0 # Should be under 2 seconds
-
- def test_memory_usage_efficiency(self):
- """Test that memory usage is more efficient with lazy loading."""
- from langflow.components import processing
-
- # Count currently loaded components
- initial_component_count = len([k for k in processing.__dict__ if k.endswith("Component")])
-
- # Access just one component
- combine_text = processing.CombineTextComponent
- assert combine_text is not None
-
- # At least one more component should be loaded now
- after_one_access = len([k for k in processing.__dict__ if k.endswith("Component")])
- assert after_one_access >= initial_component_count
-
- # Access another component
- split_text = processing.SplitTextComponent
- assert split_text is not None
-
- # Should have at least one more component loaded
- after_two_access = len([k for k in processing.__dict__ if k.endswith("Component")])
- assert after_two_access >= after_one_access
-
- def test_error_handling_in_realistic_scenarios(self):
- """Test error handling in realistic usage scenarios."""
- from langflow import components
-
- # Test accessing non-existent component category
- with pytest.raises(AttributeError):
- _ = components.nonexistent_category
-
- # Test accessing non-existent component in valid category
- with pytest.raises(AttributeError):
- _ = components.openai.NonExistentComponent
-
- def test_ide_autocomplete_support(self):
- """Test that IDE autocomplete support still works."""
- import langflow.components.openai as openai_components
- from langflow import components
-
- # __dir__ should return all available components/modules
- main_dir = dir(components)
- assert "openai" in main_dir
- assert "data" in main_dir
- assert "agents" in main_dir
-
- openai_dir = dir(openai_components)
- assert "OpenAIModelComponent" in openai_dir
- assert "OpenAIEmbeddingsComponent" in openai_dir
-
- def test_concurrent_access(self):
- """Test that concurrent access to components works correctly."""
- import threading
-
- from langflow.components import helpers
-
- results = []
- errors = []
-
- def access_component():
- try:
- component = helpers.CalculatorComponent
- results.append(component)
- except Exception as e:
- errors.append(e)
-
- # Create multiple threads accessing the same component
- threads = []
- for _ in range(5):
- thread = threading.Thread(target=access_component)
- threads.append(thread)
- thread.start()
-
- # Wait for all threads to complete
- for thread in threads:
- thread.join()
-
- # Should have no errors
- assert len(errors) == 0
- assert len(results) == 5
-
- # All results should be the same component class
- first_result = results[0]
- for result in results[1:]:
- assert result is first_result
-
- def test_circular_import_prevention(self):
- """Test that the refactor doesn't introduce circular imports."""
- # This test ensures that importing components doesn't create
- # circular dependency issues
-
- # These imports should work without circular import errors
- from langflow import components
- from langflow.components import openai
-
- # Access components in different orders
- model1 = components.openai.OpenAIModelComponent
- model2 = openai.OpenAIModelComponent
- model3 = OpenAIModelComponent
-
- # All should be the same
- assert model1 is model2 is model3
-
- def test_large_scale_component_access(self):
- """Test accessing many components doesn't cause issues."""
- from langflow.components import datastax
-
- # Access multiple components rapidly
- components_accessed = []
- component_names = [
- "AstraDBVectorStoreComponent",
- "AstraDBChatComponent",
- "AstraDBToolComponent",
- "AstraDBCQLToolComponent",
- "AstraAssistantManager",
- ]
-
- for name in component_names:
- if hasattr(datastax, name):
- component = getattr(datastax, name)
- components_accessed.append(component)
-
- # Should have accessed multiple components without issues
- assert len(components_accessed) > 0
-
- # All should be different classes
- assert len(set(components_accessed)) == len(components_accessed)
-
- def test_component_metadata_preservation(self):
- """Test that component metadata is preserved after dynamic loading."""
- # Component should have all expected metadata
- assert hasattr(OpenAIModelComponent, "__name__")
- assert hasattr(OpenAIModelComponent, "__module__")
- assert hasattr(OpenAIModelComponent, "__doc__")
-
- # Module path should be correct
- assert "openai" in OpenAIModelComponent.__module__
-
- def test_backwards_compatibility_comprehensive(self):
- """Comprehensive test of backwards compatibility."""
- # Test all major import patterns that should still work
-
- # 1. Direct component imports
- from langflow.components.data import APIRequestComponent
-
- assert AgentComponent is not None
- assert APIRequestComponent is not None
-
- # 2. Module imports
- # 3. Main module access
- import langflow.components as comp
- import langflow.components.helpers as helpers_mod
- import langflow.components.openai as openai_mod
-
- # 4. Nested access
- nested_component = comp.openai.OpenAIModelComponent
- direct_component = openai_mod.OpenAIModelComponent
-
- # All patterns should work and yield consistent results
- assert openai_mod.OpenAIModelComponent is not None
- assert helpers_mod.CalculatorComponent is not None
- assert nested_component is direct_component
-
-
-if __name__ == "__main__":
- pytest.main([__file__, "-v"])
diff --git a/src/backend/tests/integration/test_exception_telemetry.py b/src/backend/tests/integration/test_exception_telemetry.py
deleted file mode 100644
index ed028564663a..000000000000
--- a/src/backend/tests/integration/test_exception_telemetry.py
+++ /dev/null
@@ -1,129 +0,0 @@
-"""Integration tests for exception telemetry."""
-
-import asyncio
-from unittest.mock import AsyncMock, MagicMock
-
-import pytest
-from langflow.services.telemetry.service import TelemetryService
-
-
-class TestExceptionTelemetryIntegration:
- """Integration test suite for exception telemetry functionality."""
-
- @pytest.mark.asyncio
- async def test_telemetry_http_request_format(self):
- """Integration test verifying the exact HTTP request sent to Scarf."""
- # Create service
- telemetry_service = TelemetryService.__new__(TelemetryService)
- telemetry_service.base_url = "https://mock-telemetry.example.com"
- telemetry_service.do_not_track = False
-
- # Mock successful response
- mock_response = MagicMock()
- mock_response.status_code = 200
- mock_client = AsyncMock()
- mock_client.get.return_value = mock_response
- telemetry_service.client = mock_client
-
- # Create a real exception to get realistic stack trace
- try:
-
- def nested_function():
- msg = "Integration test exception"
- raise ValueError(msg)
-
- nested_function()
- except ValueError as exc:
- real_exc = exc
-
- # Mock _queue_event to directly call send_telemetry_data
- async def mock_queue_event(event_tuple):
- func, payload, path = event_tuple
- await func(payload, path)
-
- telemetry_service._queue_event = mock_queue_event
-
- # Test the full flow
- await telemetry_service.log_exception(real_exc, "lifespan")
-
- # Verify the exact HTTP request that would be sent to Scarf
- mock_client.get.assert_called_once()
- call_args = mock_client.get.call_args
-
- # Verify URL
- assert call_args[0][0] == "https://mock-telemetry.example.com/exception"
-
- # Verify parameters match our schema
- params = call_args[1]["params"]
- assert params["exceptionType"] == "ValueError"
- assert "Integration test exception" in params["exceptionMessage"]
- assert params["exceptionContext"] == "lifespan"
- assert "stackTraceHash" in params
- assert len(params["stackTraceHash"]) == 16
-
- @pytest.mark.asyncio
- async def test_exception_telemetry_service_integration(self):
- """Integration test for exception telemetry service without FastAPI."""
- # Create service with mocked dependencies
- telemetry_service = TelemetryService.__new__(TelemetryService)
- telemetry_service.base_url = "https://mock-telemetry.example.com"
- telemetry_service.do_not_track = False
-
- # Mock the async queue and HTTP client
- telemetry_service.telemetry_queue = asyncio.Queue()
-
- # Track actual calls
- http_calls = []
-
- async def mock_send_data(payload, path):
- http_calls.append(
- {
- "url": f"{telemetry_service.base_url}/{path}",
- "payload": payload.model_dump(by_alias=True),
- "path": path,
- }
- )
-
- # Mock _queue_event to call our mock directly
- async def mock_queue_event(event_tuple):
- func, payload, path = event_tuple
- await mock_send_data(payload, path)
-
- telemetry_service._queue_event = mock_queue_event
-
- # Test with real exception
- test_exception = RuntimeError("Service integration test")
- await telemetry_service.log_exception(test_exception, "handler")
-
- # Verify the call was made with correct data
- assert len(http_calls) == 1
- call = http_calls[0]
-
- assert call["url"] == "https://mock-telemetry.example.com/exception"
- assert call["path"] == "exception"
- assert call["payload"]["exceptionType"] == "RuntimeError"
- assert call["payload"]["exceptionMessage"] == "Service integration test"
- assert call["payload"]["exceptionContext"] == "handler"
- assert "stackTraceHash" in call["payload"]
-
-
-@pytest.mark.asyncio
-async def test_exception_telemetry_end_to_end():
- """End-to-end integration test to verify telemetry flow works."""
- # Track if telemetry was called
- telemetry_called = []
-
- async def mock_log_exception(exc, context):
- telemetry_called.append({"type": type(exc).__name__, "message": str(exc), "context": context})
-
- # Test that we can create the payload and it works
- test_exc = RuntimeError("End-to-end integration test")
-
- # Simulate what the exception handler does
- await mock_log_exception(test_exc, "handler")
-
- # Verify telemetry was "called"
- assert len(telemetry_called) == 1
- assert telemetry_called[0]["type"] == "RuntimeError"
- assert telemetry_called[0]["message"] == "End-to-end integration test"
- assert telemetry_called[0]["context"] == "handler"
diff --git a/src/backend/tests/integration/test_misc.py b/src/backend/tests/integration/test_misc.py
deleted file mode 100644
index 43b1986f81ed..000000000000
--- a/src/backend/tests/integration/test_misc.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from uuid import uuid4
-
-import pytest
-from fastapi import status
-from httpx import AsyncClient
-from langflow.initial_setup.setup import load_starter_projects
-
-from lfx.graph.schema import RunOutputs
-from lfx.load.load import arun_flow_from_json
-
-
-@pytest.mark.api_key_required
-async def test_run_flow_with_caching_success(client: AsyncClient, starter_project, created_api_key):
- flow_id = starter_project["id"]
- headers = {"x-api-key": created_api_key.api_key}
- payload = {
- "input_value": "value1",
- "input_type": "text",
- "output_type": "text",
- "tweaks": {"parameter_name": "value"},
- "stream": False,
- }
- response = await client.post(f"/api/v1/run/{flow_id}", json=payload, headers=headers)
- assert response.status_code == status.HTTP_200_OK
- data = response.json()
- assert "outputs" in data
- assert "session_id" in data
-
-
-@pytest.mark.api_key_required
-async def test_run_flow_with_caching_invalid_flow_id(client: AsyncClient, created_api_key):
- invalid_flow_id = uuid4()
- headers = {"x-api-key": created_api_key.api_key}
- payload = {"input_value": "", "input_type": "text", "output_type": "text", "tweaks": {}, "stream": False}
- response = await client.post(f"/api/v1/run/{invalid_flow_id}", json=payload, headers=headers)
- assert response.status_code == status.HTTP_404_NOT_FOUND
- data = response.json()
- assert "detail" in data
- assert f"Flow identifier {invalid_flow_id} not found" in data["detail"]
-
-
-@pytest.mark.api_key_required
-async def test_run_flow_with_caching_invalid_input_format(client: AsyncClient, starter_project, created_api_key):
- flow_id = starter_project["id"]
- headers = {"x-api-key": created_api_key.api_key}
- payload = {"input_value": {"key": "value"}, "input_type": "text", "output_type": "text", "tweaks": {}}
- response = await client.post(f"/api/v1/run/{flow_id}", json=payload, headers=headers)
- assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
-
-
-@pytest.mark.api_key_required
-async def test_run_flow_with_invalid_tweaks(client, starter_project, created_api_key):
- headers = {"x-api-key": created_api_key.api_key}
- flow_id = starter_project["id"]
- payload = {
- "input_value": "value1",
- "input_type": "text",
- "output_type": "text",
- "tweaks": {"invalid_tweak": "value"},
- }
- response = await client.post(f"/api/v1/run/{flow_id}", json=payload, headers=headers)
- assert response.status_code == status.HTTP_200_OK
-
-
-@pytest.mark.api_key_required
-async def test_run_with_inputs_and_outputs(client, starter_project, created_api_key):
- headers = {"x-api-key": created_api_key.api_key}
- flow_id = starter_project["id"]
- payload = {
- "input_value": "value1",
- "input_type": "text",
- "output_type": "text",
- "tweaks": {"parameter_name": "value"},
- "stream": False,
- }
- response = await client.post(f"/api/v1/run/{flow_id}", json=payload, headers=headers)
- assert response.status_code == status.HTTP_200_OK, response.text
-
-
-@pytest.mark.noclient
-@pytest.mark.api_key_required
-async def test_run_flow_from_json_object():
- """Test loading a flow from a json file and applying tweaks."""
- project = next(project for _, project in await load_starter_projects() if "Basic Prompting" in project["name"])
- results = await arun_flow_from_json(project, input_value="test", fallback_to_env_vars=True)
- assert results is not None
- assert all(isinstance(result, RunOutputs) for result in results)
diff --git a/src/backend/tests/integration/test_openai_responses_extended.py b/src/backend/tests/integration/test_openai_responses_extended.py
deleted file mode 100644
index 823d40cbd11d..000000000000
--- a/src/backend/tests/integration/test_openai_responses_extended.py
+++ /dev/null
@@ -1,504 +0,0 @@
-import asyncio
-import json
-import os
-import pathlib
-
-import pytest
-from dotenv import load_dotenv
-from httpx import AsyncClient
-
-from lfx.log.logger import logger
-
-
-# Load environment variables from .env file
-def load_env_vars():
- """Load environment variables from .env files."""
- # Try to find .env file in various locations
- possible_paths = [
- pathlib.Path(".env"), # Current directory
- pathlib.Path("../../.env"), # Project root
- pathlib.Path("../../../.env"), # One level up from project root
- ]
-
- for env_path in possible_paths:
- if env_path.exists():
- logger.info(f"Loading environment variables from {env_path.absolute()}")
- load_dotenv(env_path)
- return True
-
- logger.warning("No .env file found. Using existing environment variables.")
- return False
-
-
-# Load environment variables at module import time
-load_env_vars()
-
-
-async def create_global_variable(client: AsyncClient, headers, name, value, variable_type="credential"):
- """Create a global variable in Langflow."""
- payload = {"name": name, "value": value, "type": variable_type, "default_fields": []}
-
- response = await client.post("/api/v1/variables/", json=payload, headers=headers)
- if response.status_code != 201:
- logger.error(f"Failed to create global variable: {response.content}")
- return False
-
- logger.info(f"Successfully created global variable: {name}")
- return True
-
-
-async def load_and_prepare_flow(client: AsyncClient, created_api_key):
- """Load a flow template, create it, and wait for it to be ready."""
- # Set up headers
- headers = {"x-api-key": created_api_key.api_key}
-
- # Create OPENAI_API_KEY global variable
- openai_api_key = os.getenv("OPENAI_API_KEY")
- if not openai_api_key:
- pytest.skip("OPENAI_API_KEY environment variable not set")
-
- await create_global_variable(client, headers, "OPENAI_API_KEY", openai_api_key)
-
- # Load the Basic Prompting template
- template_path = (
- pathlib.Path(__file__).resolve().parent.parent.parent
- / "base"
- / "langflow"
- / "initial_setup"
- / "starter_projects"
- / "Basic Prompting.json"
- )
-
- flow_data = await asyncio.to_thread(lambda: json.loads(pathlib.Path(template_path).read_text()))
-
- # Add the flow
- response = await client.post("/api/v1/flows/", json=flow_data, headers=headers)
- logger.info(f"Flow creation response: {response.status_code}")
-
- assert response.status_code == 201
- flow = response.json()
-
- # Poll for flow builds to complete
- max_attempts = 10
- for attempt in range(max_attempts):
- # Get the flow builds
- builds_response = await client.get(f"/api/v1/monitor/builds?flow_id={flow['id']}", headers=headers)
-
- if builds_response.status_code == 200:
- builds = builds_response.json().get("vertex_builds", {})
- # Check if builds are complete
- all_valid = True
- for build_list in builds.values():
- if not build_list or build_list[0].get("valid") is not True:
- all_valid = False
- break
-
- if all_valid and builds:
- logger.info(f"Flow builds completed successfully after {attempt + 1} attempts")
- break
-
- # Wait before polling again
- if attempt < max_attempts - 1:
- logger.info(f"Waiting for flow builds to complete (attempt {attempt + 1}/{max_attempts})...")
- await asyncio.sleep(1)
- else:
- logger.warning("Flow builds polling timed out, proceeding anyway")
-
- return flow, headers
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_invalid_flow_id(client: AsyncClient, created_api_key):
- """Test the OpenAI responses endpoint with an invalid flow ID."""
- headers = {"x-api-key": created_api_key.api_key}
-
- # Test with non-existent flow ID
- payload = {"model": "non-existent-flow-id", "input": "Hello", "stream": False}
-
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
-
- assert response.status_code == 200 # OpenAI errors are still 200 status
- data = response.json()
- assert "error" in data
- assert isinstance(data["error"], dict)
- assert data["error"]["type"] == "invalid_request_error"
- assert "not found" in data["error"]["message"].lower()
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_with_tools(client: AsyncClient, created_api_key):
- """Test that tools parameter is rejected."""
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- # Test with tools parameter
- payload = {
- "model": flow["id"],
- "input": "Hello",
- "stream": False,
- "tools": [{"type": "function", "function": {"name": "test", "parameters": {}}}],
- }
-
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
-
- assert response.status_code == 200 # OpenAI errors are still 200 status
- data = response.json()
- assert "error" in data
- assert isinstance(data["error"], dict)
- assert data["error"]["type"] == "invalid_request_error"
- assert data["error"]["code"] == "tools_not_supported"
- assert "tools are not supported" in data["error"]["message"].lower()
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_empty_input(client: AsyncClient, created_api_key):
- """Test the OpenAI responses endpoint with empty input."""
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- # Test with empty input
- payload = {"model": flow["id"], "input": "", "stream": False}
-
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
- logger.info(f"Empty input response status: {response.status_code}")
-
- # The flow might still process empty input, so we check for a valid response structure
- data = response.json()
-
- if "error" not in data or data["error"] is None:
- # Valid response even with empty input
- assert "id" in data
- assert "output" in data
- assert "created_at" in data
- assert data["object"] == "response"
- else:
- # Some flows might reject empty input
- assert isinstance(data["error"], dict)
- assert "message" in data["error"]
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_long_input(client: AsyncClient, created_api_key):
- """Test the OpenAI responses endpoint with very long input."""
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- # Create a very long input
- long_input = "Hello " * 1000 # ~6000 characters
- payload = {"model": flow["id"], "input": long_input, "stream": False}
-
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
-
- assert response.status_code == 200
- data = response.json()
-
- if "error" not in data:
- assert "id" in data
- assert "output" in data
- assert isinstance(data["output"], str)
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_streaming_error_handling(client: AsyncClient, created_api_key):
- """Test streaming response error handling."""
- headers = {"x-api-key": created_api_key.api_key}
-
- # Test with invalid flow ID in streaming mode
- payload = {"model": "invalid-flow-id", "input": "Hello", "stream": True}
-
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
-
- # For streaming errors, we should still get a 200 status but with error in the response
- assert response.status_code == 200
-
- # Read the response content
- content = await response.aread()
- text_content = content.decode("utf-8")
-
- # Should contain error information in JSON format, not SSE
- data = json.loads(text_content)
- assert "error" in data
- assert isinstance(data["error"], dict)
- assert data["error"]["type"] == "invalid_request_error"
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_concurrent_requests(client: AsyncClient, created_api_key):
- """Test handling of concurrent requests to the same flow."""
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- # Create multiple concurrent requests
- payloads = [{"model": flow["id"], "input": f"Request {i}", "stream": False} for i in range(5)]
-
- # Send all requests concurrently
- tasks = [client.post("/api/v1/responses", json=payload, headers=headers) for payload in payloads]
-
- responses = await asyncio.gather(*tasks)
-
- # All requests should succeed
- for i, response in enumerate(responses):
- assert response.status_code == 200
- data = response.json()
-
- if "error" not in data:
- assert "id" in data
- assert "output" in data
- # Each response should have a unique ID
- assert all(
- data["id"] != other.json()["id"]
- for j, other in enumerate(responses)
- if i != j and "error" not in other.json()
- )
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_unauthorized(client: AsyncClient):
- """Test the OpenAI responses endpoint without authentication."""
- payload = {"model": "some-flow-id", "input": "Hello", "stream": False}
-
- # No headers = no authentication
- response = await client.post("/api/v1/responses", json=payload)
-
- # Should get 403 Forbidden
- assert response.status_code == 403
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_invalid_api_key(client: AsyncClient):
- """Test the OpenAI responses endpoint with invalid API key."""
- headers = {"x-api-key": "invalid-api-key-12345"}
- payload = {"model": "some-flow-id", "input": "Hello", "stream": False}
-
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
-
- # Should get 403 Forbidden
- assert response.status_code == 403
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_malformed_request(client: AsyncClient, created_api_key):
- """Test the OpenAI responses endpoint with malformed requests."""
- headers = {"x-api-key": created_api_key.api_key}
-
- # Missing required fields
- test_cases = [
- {}, # Empty payload
- {"model": "flow-id"}, # Missing input
- {"input": "Hello"}, # Missing model
- {"model": 123, "input": "Hello"}, # Wrong type for model
- {"model": "flow-id", "input": 123}, # Wrong type for input
- {"model": "flow-id", "input": "Hello", "stream": "yes"}, # Wrong type for stream
- ]
-
- for payload in test_cases:
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
- # OpenAI API returns validation errors as 200 with error in body or 422
- if response.status_code == 200:
- data = response.json()
- assert "error" in data
- assert isinstance(data["error"], dict)
- assert "message" in data["error"]
- else:
- # Should get 422 Unprocessable Entity for validation errors
- assert response.status_code == 422
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_stream_interruption(client: AsyncClient, created_api_key):
- """Test behavior when streaming is interrupted."""
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- payload = {"model": flow["id"], "input": "Tell me a long story", "stream": True}
-
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
- assert response.status_code == 200
-
- # Read only first 500 bytes then close (streaming might need more bytes)
- content = await response.aread()
- text_content = content.decode("utf-8")
-
- # Should have received at least some data
- assert len(content) > 0
- # Check for either data: or valid response content
- assert "data:" in text_content or "id" in text_content
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_background_processing(client: AsyncClient, created_api_key):
- """Test background processing parameter."""
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- # Test with background=True
- payload = {"model": flow["id"], "input": "Hello", "background": True, "stream": False}
-
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
- assert response.status_code == 200
-
- data = response.json()
- if "error" not in data or data["error"] is None:
- assert "id" in data
- assert "status" in data
- # Background processing might change the status
- assert data["status"] in ["completed", "in_progress"]
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_previous_response_id(client: AsyncClient, created_api_key):
- """Test previous_response_id parameter for conversation continuity."""
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- # First request
- payload1 = {"model": flow["id"], "input": "Hello", "stream": False}
- response1 = await client.post("/api/v1/responses", json=payload1, headers=headers)
- assert response1.status_code == 200
-
- data1 = response1.json()
- if "error" not in data1 or data1["error"] is None:
- first_response_id = data1["id"]
-
- # Second request with previous_response_id
- payload2 = {
- "model": flow["id"],
- "input": "Continue our conversation",
- "previous_response_id": first_response_id,
- "stream": False,
- }
- response2 = await client.post("/api/v1/responses", json=payload2, headers=headers)
- assert response2.status_code == 200
-
- data2 = response2.json()
- if "error" not in data2 or data2["error"] is None:
- # The previous_response_id might be preserved in the response
- # This depends on the implementation, so we just check it doesn't error
- # We'll just verify that the request was processed successfully
- assert "id" in data2
- assert "output" in data2
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_response_format(client: AsyncClient, created_api_key):
- """Test OpenAI response format compliance."""
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- payload = {"model": flow["id"], "input": "Hello", "stream": False}
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
-
- assert response.status_code == 200
- data = response.json()
-
- if "error" not in data or data["error"] is None:
- # Check OpenAI response format compliance
- required_fields = ["id", "object", "created_at", "status", "model", "output"]
- for field in required_fields:
- assert field in data, f"Missing required field: {field}"
-
- # Check field types and values
- assert isinstance(data["id"], str)
- assert data["object"] == "response"
- assert isinstance(data["created_at"], int)
- assert data["status"] in ["completed", "in_progress", "failed"]
- assert isinstance(data["model"], str)
- assert isinstance(data["output"], list)
-
- # Check optional fields with expected defaults
- assert data["parallel_tool_calls"] is True
- assert data["store"] is True
- assert data["temperature"] == 1.0
- assert data["top_p"] == 1.0
- assert data["truncation"] == "disabled"
- assert data["tool_choice"] == "auto"
- assert isinstance(data["tools"], list)
- assert isinstance(data["reasoning"], dict)
- assert isinstance(data["text"], dict)
- assert isinstance(data["metadata"], dict)
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_stream_chunk_format(client: AsyncClient, created_api_key):
- """Test OpenAI streaming response chunk format compliance."""
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- payload = {"model": flow["id"], "input": "Hello", "stream": True}
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
-
- assert response.status_code == 200
-
- content = await response.aread()
- text_content = content.decode("utf-8")
-
- # Parse the events
- events = text_content.strip().split("\n\n")
- data_events = [evt for evt in events if evt.startswith("data:") and not evt.startswith("data: [DONE]")]
-
- if data_events:
- # Check first chunk format
- first_chunk_json = data_events[0].replace("data: ", "")
- try:
- first_chunk = json.loads(first_chunk_json)
-
- # Basic checks for streaming response
- assert "id" in first_chunk
- assert "delta" in first_chunk
- assert isinstance(first_chunk["id"], str)
- assert isinstance(first_chunk["delta"], dict)
-
- # Check OpenAI stream chunk format compliance if fields exist
- if "object" in first_chunk:
- assert first_chunk["object"] == "response.chunk"
- if "created" in first_chunk:
- assert isinstance(first_chunk["created"], int)
- if "model" in first_chunk:
- assert isinstance(first_chunk["model"], str)
-
- # Status is optional in chunks and can be None
- if "status" in first_chunk and first_chunk["status"] is not None:
- assert first_chunk["status"] in ["completed", "in_progress", "failed"]
- except json.JSONDecodeError:
- # If streaming format is different or not JSON, just ensure we have data
- assert len(data_events) > 0
- else:
- # If no streaming chunks, ensure we have the [DONE] marker or valid response
- assert "data: [DONE]" in text_content or len(text_content) > 0
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_rate_limiting_simulation(client: AsyncClient, created_api_key):
- """Test behavior under rapid successive requests."""
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- # Send 10 rapid requests
- rapid_requests = []
- for i in range(10):
- payload = {"model": flow["id"], "input": f"Rapid request {i}", "stream": False}
- rapid_requests.append(client.post("/api/v1/responses", json=payload, headers=headers))
-
- # Wait for all requests to complete
- responses = await asyncio.gather(*rapid_requests, return_exceptions=True)
-
- # Check that most requests succeeded (allowing for some potential failures)
- successful_responses = [r for r in responses if not isinstance(r, Exception) and r.status_code == 200]
-
- # At least 50% should succeed
- assert len(successful_responses) >= 5
-
- # Check that successful responses have unique IDs
- response_ids = []
- for response in successful_responses:
- data = response.json()
- if "error" not in data or data["error"] is None:
- response_ids.append(data["id"])
-
- # All response IDs should be unique
- assert len(response_ids) == len(set(response_ids))
diff --git a/src/backend/tests/integration/test_openai_responses_integration.py b/src/backend/tests/integration/test_openai_responses_integration.py
deleted file mode 100644
index 800cf3ceba0c..000000000000
--- a/src/backend/tests/integration/test_openai_responses_integration.py
+++ /dev/null
@@ -1,168 +0,0 @@
-import asyncio
-import json
-import os
-import pathlib
-
-import pytest
-from dotenv import find_dotenv, load_dotenv
-from httpx import AsyncClient
-
-from lfx.log.logger import logger
-
-load_dotenv(find_dotenv())
-
-
-async def create_global_variable(client: AsyncClient, headers, name, value, variable_type="credential"):
- """Create a global variable in Langflow."""
- payload = {"name": name, "value": value, "type": variable_type, "default_fields": []}
-
- response = await client.post("/api/v1/variables/", json=payload, headers=headers)
- if response.status_code != 201:
- logger.error(f"Failed to create global variable: {response.content}")
- return False
-
- logger.info(f"Successfully created global variable: {name}")
- return True
-
-
-async def load_and_prepare_flow(client: AsyncClient, created_api_key):
- """Load a flow template, create it, and wait for it to be ready."""
- # Set up headers
- headers = {"x-api-key": created_api_key.api_key}
-
- # Create OPENAI_API_KEY global variable
- openai_api_key = os.getenv("OPENAI_API_KEY")
- if not openai_api_key:
- pytest.skip("OPENAI_API_KEY environment variable not set")
-
- await create_global_variable(client, headers, "OPENAI_API_KEY", openai_api_key)
-
- # Load the Basic Prompting template
- template_path = (
- pathlib.Path(__file__).resolve().parent.parent.parent
- / "base"
- / "langflow"
- / "initial_setup"
- / "starter_projects"
- / "Basic Prompting.json"
- )
-
- flow_data = await asyncio.to_thread(lambda: json.loads(pathlib.Path(template_path).read_text()))
-
- # Add the flow
- response = await client.post("/api/v1/flows/", json=flow_data, headers=headers)
- logger.info(f"Flow creation response: {response.status_code}")
-
- assert response.status_code == 201
- flow = response.json()
-
- # Poll for flow builds to complete
- max_attempts = 10
- for attempt in range(max_attempts):
- # Get the flow builds
- builds_response = await client.get(f"/api/v1/monitor/builds?flow_id={flow['id']}", headers=headers)
-
- if builds_response.status_code == 200:
- builds = builds_response.json().get("vertex_builds", {})
- # Check if builds are complete
- all_valid = True
- for build_list in builds.values():
- if not build_list or build_list[0].get("valid") is not True:
- all_valid = False
- break
-
- if all_valid and builds:
- logger.info(f"Flow builds completed successfully after {attempt + 1} attempts")
- break
-
- # Wait before polling again
- if attempt < max_attempts - 1:
- logger.info(f"Waiting for flow builds to complete (attempt {attempt + 1}/{max_attempts})...")
- await asyncio.sleep(1)
- else:
- logger.warning("Flow builds polling timed out, proceeding anyway")
-
- return flow, headers
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_non_streaming(client: AsyncClient, created_api_key):
- """Test the OpenAI-compatible non-streaming responses endpoint directly."""
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- # Now test the OpenAI-compatible endpoint
- payload = {"model": flow["id"], "input": "Hello, Langflow!", "stream": False}
-
- # Make the request
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
- logger.info(f"Response status: {response.status_code}")
- logger.debug(f"Response content: {response.content}")
-
- # Handle potential errors
- if response.status_code != 200:
- logger.error(f"Error response: {response.content}")
- pytest.fail(f"Request failed with status {response.status_code}")
-
- try:
- data = response.json()
- if "error" in data and data["error"] is not None:
- logger.error(f"Error in response: {data['error']}")
- # Don't fail immediately, log more details for debugging
- logger.error(f"Full error details: {data}")
- error_msg = "Unknown error"
- if isinstance(data.get("error"), dict):
- error_msg = data["error"].get("message", "Unknown error")
- elif data.get("error"):
- error_msg = str(data["error"])
- pytest.fail(f"Error in response: {error_msg}")
-
- # Validate the response
- assert "id" in data
- assert "output" in data
- except Exception as exc:
- logger.exception("Exception parsing response")
- pytest.fail(f"Failed to parse response: {exc}")
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_responses_streaming(client: AsyncClient, created_api_key):
- """Test the OpenAI-compatible streaming responses endpoint directly."""
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- # Now test the OpenAI-compatible streaming endpoint
- payload = {"model": flow["id"], "input": "Hello, stream!", "stream": True}
-
- # Make the request
- response = await client.post("/api/v1/responses", json=payload, headers=headers)
- logger.info(f"Response status: {response.status_code}")
-
- # Handle potential errors
- if response.status_code != 200:
- logger.error(f"Error response: {response.content}")
- pytest.fail(f"Request failed with status {response.status_code}")
-
- # For streaming, we should get a stream of server-sent events
- content = await response.aread()
- text_content = content.decode("utf-8")
- logger.debug(f"Response content (first 200 chars): {text_content[:200]}")
-
- # Check that we got some SSE data events
- assert "data:" in text_content
-
- # Parse the events to validate structure and final [DONE] marker
- events = text_content.strip().split("\n\n")
- # The stream must end with the OpenAI '[DONE]' sentinel
- assert events, "No events in stream"
- assert events[-1].strip() == "data: [DONE]", "Stream did not end with [DONE] marker"
-
- # Filter out the [DONE] marker to inspect JSON data events
- data_events = [evt for evt in events if evt.startswith("data:") and not evt.startswith("data: [DONE]")]
- assert data_events, "No streaming events were received"
-
- # Parse the first and last JSON events to check their structure
- first_event = json.loads(data_events[0].replace("data: ", ""))
- last_event = json.loads(data_events[-1].replace("data: ", ""))
- assert "delta" in first_event
- assert "delta" in last_event
diff --git a/src/backend/tests/integration/test_openai_streaming_comparison.py b/src/backend/tests/integration/test_openai_streaming_comparison.py
deleted file mode 100644
index f8f0921e5262..000000000000
--- a/src/backend/tests/integration/test_openai_streaming_comparison.py
+++ /dev/null
@@ -1,344 +0,0 @@
-import asyncio
-import json
-import os
-import pathlib
-
-import httpx
-import pytest
-from dotenv import load_dotenv
-from httpx import AsyncClient
-
-from lfx.log.logger import logger
-
-
-# Load environment variables from .env file
-def load_env_vars():
- """Load environment variables from .env files."""
- possible_paths = [
- pathlib.Path(".env"),
- pathlib.Path("../../.env"),
- pathlib.Path("../../../.env"),
- ]
-
- for env_path in possible_paths:
- if env_path.exists():
- logger.info(f"Loading environment variables from {env_path.absolute()}")
- load_dotenv(env_path)
- return True
-
- logger.warning("No .env file found. Using existing environment variables.")
- return False
-
-
-# Load environment variables at module import time
-load_env_vars()
-
-
-async def create_global_variable(client: AsyncClient, headers, name, value, variable_type="credential"):
- """Create a global variable in Langflow."""
- payload = {"name": name, "value": value, "type": variable_type, "default_fields": []}
-
- response = await client.post("/api/v1/variables/", json=payload, headers=headers)
- if response.status_code != 201:
- logger.error(f"Failed to create global variable: {response.content}")
- return False
-
- logger.info(f"Successfully created global variable: {name}")
- return True
-
-
-async def load_and_prepare_flow(client: AsyncClient, created_api_key):
- """Load Simple Agent flow and wait for it to be ready."""
- headers = {"x-api-key": created_api_key.api_key}
-
- # Create OPENAI_API_KEY global variable
- openai_api_key = os.getenv("OPENAI_API_KEY")
- if not openai_api_key or openai_api_key == "dummy":
- pytest.skip("OPENAI_API_KEY environment variable not set")
-
- await create_global_variable(client, headers, "OPENAI_API_KEY", openai_api_key)
-
- # Load the Simple Agent template
- template_path = (
- pathlib.Path(__file__).resolve().parent.parent.parent
- / "base"
- / "langflow"
- / "initial_setup"
- / "starter_projects"
- / "Simple Agent.json"
- )
-
- flow_data = await asyncio.to_thread(lambda: json.loads(pathlib.Path(template_path).read_text()))
-
- # Add the flow
- response = await client.post("/api/v1/flows/", json=flow_data, headers=headers)
- assert response.status_code == 201
- flow = response.json()
-
- # Poll for flow builds to complete
- max_attempts = 10
- for attempt in range(max_attempts):
- builds_response = await client.get(f"/api/v1/monitor/builds?flow_id={flow['id']}", headers=headers)
-
- if builds_response.status_code == 200:
- builds = builds_response.json().get("vertex_builds", {})
- all_valid = True
- for build_list in builds.values():
- if not build_list or build_list[0].get("valid") is not True:
- all_valid = False
- break
-
- if all_valid and builds:
- break
-
- if attempt < max_attempts - 1:
- await asyncio.sleep(1)
-
- return flow, headers
-
-
-@pytest.mark.api_key_required
-@pytest.mark.integration
-async def test_openai_streaming_format_comparison(client: AsyncClient, created_api_key):
- """Compare raw HTTP streaming formats between OpenAI and our API."""
- # Test input
- input_msg = "What is 25 + 17? Use your calculator tool."
-
- # Tools definition
- tools = [
- {
- "type": "function",
- "name": "evaluate_expression",
- "description": "Perform basic arithmetic operations on a given expression.",
- "parameters": {
- "type": "object",
- "properties": {
- "expression": {
- "type": "string",
- "description": "The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').",
- }
- },
- "required": ["expression"],
- },
- }
- ]
-
- # Get OpenAI API key
- openai_api_key = os.getenv("OPENAI_API_KEY")
- if not openai_api_key:
- pytest.skip("OPENAI_API_KEY environment variable not set")
-
- # === Test OpenAI's raw HTTP streaming format ===
- logger.info("=== Testing OpenAI API Raw HTTP Format ===")
-
- async with httpx.AsyncClient() as openai_client:
- openai_payload = {"model": "gpt-4o-mini", "input": input_msg, "tools": tools, "stream": True}
-
- openai_response = await openai_client.post(
- "https://api.openai.com/v1/responses",
- headers={"Authorization": f"Bearer {openai_api_key}", "Content-Type": "application/json"},
- json=openai_payload,
- )
-
- logger.info(f"OpenAI status: {openai_response.status_code}")
- if openai_response.status_code != 200:
- logger.error(f"OpenAI error: {openai_response.text}")
- pytest.skip("OpenAI API request failed")
-
- # Parse OpenAI's raw SSE stream
- openai_content = await openai_response.aread()
- openai_text = openai_content.decode("utf-8")
-
- openai_events = openai_text.strip().split("\n\n")
- openai_data_events = [evt for evt in openai_events if "data: " in evt and not evt.startswith("data: [DONE]")]
-
- # === Test Our API's streaming format ===
- logger.info("=== Testing Our API Format ===")
-
- flow, headers = await load_and_prepare_flow(client, created_api_key)
-
- our_payload = {"model": flow["id"], "input": input_msg, "stream": True, "include": ["tool_call.results"]}
-
- our_response = await client.post("/api/v1/responses", json=our_payload, headers=headers)
- assert our_response.status_code == 200
-
- our_content = await our_response.aread()
- our_text = our_content.decode("utf-8")
-
- our_events = our_text.strip().split("\n\n")
- our_data_events = [evt for evt in our_events if "data: " in evt and not evt.startswith("data: [DONE]")]
-
- # === Parse and compare events ===
-
- # Extract JSON data from OpenAI events
- openai_parsed = []
- for event_block in openai_data_events:
- lines = event_block.strip().split("\n")
- for line in lines:
- if line.startswith("data: "):
- try:
- json_str = line.replace("data: ", "", 1)
- event_data = json.loads(json_str)
- openai_parsed.append(event_data)
- break
- except json.JSONDecodeError:
- continue
-
- # Extract JSON data from our events
- our_parsed = []
- for event_block in our_data_events:
- lines = event_block.strip().split("\n")
- for line in lines:
- if line.startswith("data: "):
- try:
- json_str = line.replace("data: ", "", 1)
- event_data = json.loads(json_str)
- our_parsed.append(event_data)
- break
- except json.JSONDecodeError:
- continue
-
- # === Analysis ===
- logger.info("Event counts:")
- logger.info(f" OpenAI: {len(openai_parsed)} events")
- logger.info(f" Our API: {len(our_parsed)} events")
-
- # Check for tool call events with detailed logging
- logger.info("Detailed OpenAI event analysis:")
- output_item_added_events = [e for e in openai_parsed if e.get("type") == "response.output_item.added"]
- logger.info(f" Found {len(output_item_added_events)} 'response.output_item.added' events")
-
- for i, event in enumerate(output_item_added_events):
- item = event.get("item", {})
- item_type = item.get("type", "unknown")
- logger.info(f" Event {i}: item.type = '{item_type}'")
- logger.info(f" Event {i}: item keys = {list(item.keys())}")
- if "name" in item:
- logger.info(f" Event {i}: item.name = '{item.get('name')}'")
- logger.debug(f" Event {i}: full item = {json.dumps(item, indent=6)}")
-
- openai_tool_events = [
- e
- for e in openai_parsed
- if e.get("type") == "response.output_item.added" and e.get("item", {}).get("type") == "tool_call"
- ]
- openai_function_events = [
- e
- for e in openai_parsed
- if e.get("type") == "response.output_item.added" and e.get("item", {}).get("type") == "function_call"
- ]
-
- logger.info("Detailed Our API event analysis:")
- our_output_item_added_events = [e for e in our_parsed if e.get("type") == "response.output_item.added"]
- logger.info(f" Found {len(our_output_item_added_events)} 'response.output_item.added' events")
-
- for i, event in enumerate(our_output_item_added_events):
- item = event.get("item", {})
- item_type = item.get("type", "unknown")
- logger.info(f" Event {i}: item.type = '{item_type}'")
- logger.info(f" Event {i}: item keys = {list(item.keys())}")
- if "name" in item:
- logger.info(f" Event {i}: item.name = '{item.get('name')}'")
- logger.debug(f" Event {i}: full item = {json.dumps(item, indent=6)}")
-
- our_function_events = [
- e
- for e in our_parsed
- if e.get("type") == "response.output_item.added" and e.get("item", {}).get("type") == "function_call"
- ]
-
- logger.info("Tool call detection results:")
- logger.info(f" OpenAI tool_call events: {len(openai_tool_events)}")
- logger.info(f" OpenAI function_call events: {len(openai_function_events)}")
- logger.info(f" Our function_call events: {len(our_function_events)}")
-
- # Use the correct event type for OpenAI (function_call vs tool_call)
- openai_actual_tool_events = openai_function_events if openai_function_events else openai_tool_events
-
- logger.info("Function call events:")
- logger.info(f" OpenAI: {len(openai_actual_tool_events)} function call events")
- logger.info(f" Our API: {len(our_function_events)} function call events")
-
- # Show event types
- openai_types = {e.get("type", e.get("object", "unknown")) for e in openai_parsed}
- our_types = {e.get("type", e.get("object", "unknown")) for e in our_parsed}
-
- logger.info("Event types:")
- logger.info(f" OpenAI: {sorted(openai_types)}")
- logger.info(f" Our API: {sorted(our_types)}")
-
- # Print sample events for debugging
- logger.info("Sample OpenAI events:")
- for i, event in enumerate(openai_parsed[:3]):
- logger.debug(f" {i}: {json.dumps(event, indent=2)[:200]}...")
-
- logger.info("Sample Our events:")
- for i, event in enumerate(our_parsed[:3]):
- logger.debug(f" {i}: {json.dumps(event, indent=2)[:200]}...")
-
- # Check delta content for duplicates/accumulation
- logger.info("Checking delta content for proper streaming:")
- delta_contents = []
- for i, event in enumerate(our_parsed):
- if event.get("object") == "response.chunk" and "delta" in event:
- delta_content = event["delta"].get("content", "")
- if delta_content: # Only track non-empty content
- delta_contents.append(delta_content)
- logger.info(f" Delta {i}: '{delta_content}'")
-
- # Check for accumulated content (bad) vs incremental content (good)
- if len(delta_contents) > 1:
- logger.info("Analyzing delta content patterns:")
- accumulated_pattern = True
- for i in range(1, len(delta_contents)):
- if not delta_contents[i].startswith(delta_contents[i - 1]):
- accumulated_pattern = False
- break
-
- if accumulated_pattern:
- logger.error("❌ DETECTED ACCUMULATED CONTENT PATTERN (BAD)")
- logger.error("Each delta contains the full accumulated message instead of just new content")
- logger.error("Example:")
- for i, content in enumerate(delta_contents[:3]):
- logger.error(f" Delta {i}: '{content}'")
- else:
- logger.success("✅ DETECTED INCREMENTAL CONTENT PATTERN (GOOD)")
- logger.success("Each delta contains only new content")
- else:
- logger.info("Not enough delta content to analyze pattern")
-
- if openai_actual_tool_events:
- logger.info("OpenAI tool call example:")
- logger.debug(f" {json.dumps(openai_actual_tool_events[0], indent=2)}")
-
- if our_function_events:
- logger.info("Our function call example:")
- logger.debug(f" {json.dumps(our_function_events[0], indent=2)}")
-
- # === Validation ===
-
- # Basic validation
- assert len(openai_parsed) > 0, "No OpenAI events received"
- assert len(our_parsed) > 0, "No events from our API"
-
- # Check if both APIs produced function call events
- if len(openai_actual_tool_events) > 0:
- logger.success("✅ OpenAI produced function call events")
- if len(our_function_events) > 0:
- logger.success("✅ Our API also produced function call events")
- logger.success("✅ Both APIs support function call streaming")
- else:
- logger.error("❌ Our API did not produce function call events")
- pytest.fail("Our API should produce function call events when OpenAI does")
- else:
- logger.info("No function calls were made by OpenAI")
-
- logger.info("📊 Test Summary:")
- logger.info(f" OpenAI events: {len(openai_parsed)}")
- logger.info(f" Our events: {len(our_parsed)}")
- logger.info(f" OpenAI function events: {len(openai_actual_tool_events)}")
- logger.info(f" Our function events: {len(our_function_events)}")
- compatibility_result = (
- "✅ PASS" if len(our_function_events) > 0 or len(openai_actual_tool_events) == 0 else "❌ FAIL"
- )
- logger.info(f" Format compatibility: {compatibility_result}")
diff --git a/src/backend/tests/integration/utils.py b/src/backend/tests/integration/utils.py
deleted file mode 100644
index e1f4b3d0284c..000000000000
--- a/src/backend/tests/integration/utils.py
+++ /dev/null
@@ -1,199 +0,0 @@
-import dataclasses
-import os
-import uuid
-from typing import Any
-
-import pytest
-import requests
-from astrapy.admin import parse_api_endpoint
-
-from lfx.custom import Component
-from lfx.custom.eval import eval_custom_component_code
-from lfx.field_typing import Embeddings
-from lfx.graph import Graph
-from lfx.processing.process import run_graph_internal
-from lfx.schema.schema import InputValueRequest
-
-
-def check_env_vars(*env_vars):
- """Check if all specified environment variables are set.
-
- Args:
- *env_vars (str): The environment variables to check.
-
- Returns:
- bool: True if all environment variables are set, False otherwise.
- """
- return all(os.getenv(var) for var in env_vars)
-
-
-def valid_nvidia_vectorize_region(api_endpoint: str) -> bool:
- """Check if the specified region is valid.
-
- Args:
- api_endpoint: The API endpoint to check.
-
- Returns:
- True if the region contains hosted nvidia models, False otherwise.
- """
- parsed_endpoint = parse_api_endpoint(api_endpoint)
- if not parsed_endpoint:
- msg = "Invalid ASTRA_DB_API_ENDPOINT"
- raise ValueError(msg)
- return parsed_endpoint.region == "us-east-2"
-
-
-class MockEmbeddings(Embeddings):
- def __init__(self):
- self.embedded_documents = None
- self.embedded_query = None
-
- @staticmethod
- def mock_embedding(text: str):
- return [len(text) / 2, len(text) / 5, len(text) / 10]
-
- def embed_documents(self, texts: list[str]) -> list[list[float]]:
- self.embedded_documents = texts
- return [self.mock_embedding(text) for text in texts]
-
- def embed_query(self, text: str) -> list[float]:
- self.embedded_query = text
- return self.mock_embedding(text)
-
-
-@dataclasses.dataclass
-class JSONFlow:
- json: dict
-
- def get_components_by_type(self, component_type):
- result = [node["id"] for node in self.json["data"]["nodes"] if node["data"]["type"] == component_type]
- if not result:
- msg = (
- f"Component of type {component_type} not found, "
- f"available types: {', '.join({node['data']['type'] for node in self.json['data']['nodes']})}"
- )
- raise ValueError(msg)
- return result
-
- def get_component_by_type(self, component_type):
- components = self.get_components_by_type(component_type)
- if len(components) > 1:
- msg = f"Multiple components of type {component_type} found"
- raise ValueError(msg)
- return components[0]
-
- def set_value(self, component_id, key, value):
- done = False
- for node in self.json["data"]["nodes"]:
- if node["id"] == component_id:
- if key not in node["data"]["node"]["template"]:
- msg = f"Component {component_id} does not have input {key}"
- raise ValueError(msg)
- node["data"]["node"]["template"][key]["value"] = value
- node["data"]["node"]["template"][key]["load_from_db"] = False
- done = True
- break
- if not done:
- msg = f"Component {component_id} not found"
- raise ValueError(msg)
-
-
-def download_flow_from_github(name: str, version: str) -> JSONFlow:
- response = requests.get(
- f"https://raw.githubusercontent.com/langflow-ai/langflow/v{version}/src/backend/base/langflow/initial_setup/starter_projects/{name}.json",
- timeout=10,
- )
- response.raise_for_status()
- as_json = response.json()
- return JSONFlow(json=as_json)
-
-
-def download_component_from_github(module: str, file_name: str, version: str) -> Component:
- version_string = f"v{version}" if version != "main" else version
- response = requests.get(
- f"https://raw.githubusercontent.com/langflow-ai/langflow/{version_string}/src/backend/base/langflow/components/{module}/{file_name}.py",
- timeout=10,
- )
- response.raise_for_status()
- return Component(_code=response.text)
-
-
-async def run_json_flow(
- json_flow: JSONFlow, run_input: Any | None = None, session_id: str | None = None
-) -> dict[str, Any]:
- graph = Graph.from_payload(json_flow.json)
- return await run_flow(graph, run_input, session_id)
-
-
-async def run_flow(graph: Graph, run_input: Any | None = None, session_id: str | None = None) -> dict[str, Any]:
- graph.prepare()
- graph_run_inputs = [InputValueRequest(input_value=run_input, type="chat")] if run_input else []
-
- flow_id = str(uuid.uuid4())
-
- results, _ = await run_graph_internal(graph, flow_id, session_id=session_id, inputs=graph_run_inputs)
- outputs = {}
- for r in results:
- for out in r.outputs:
- outputs |= out.results
- return outputs
-
-
-@dataclasses.dataclass
-class ComponentInputHandle:
- clazz: type
- inputs: dict
- output_name: str
-
-
-async def run_single_component(
- clazz: type,
- inputs: dict | None = None,
- run_input: Any | None = None,
- session_id: str | None = None,
- input_type: str | None = "chat",
-) -> dict[str, Any]:
- user_id = str(uuid.uuid4())
- flow_id = str(uuid.uuid4())
- graph = Graph(user_id=user_id, flow_id=flow_id)
-
- def _add_component(clazz: type, inputs: dict | None = None) -> str:
- raw_inputs = {}
- if inputs:
- for key, value in inputs.items():
- if not isinstance(value, ComponentInputHandle):
- raw_inputs[key] = value
- if isinstance(value, Component):
- msg = "Component inputs must be wrapped in ComponentInputHandle"
- raise TypeError(msg)
- component = clazz(**raw_inputs, _user_id=user_id)
- component_id = graph.add_component(component)
- if inputs:
- for input_name, handle in inputs.items():
- if isinstance(handle, ComponentInputHandle):
- handle_component_id = _add_component(handle.clazz, handle.inputs)
- graph.add_component_edge(handle_component_id, (handle.output_name, input_name), component_id)
- return component_id
-
- component_id = _add_component(clazz, inputs)
- graph.prepare()
- graph_run_inputs = [InputValueRequest(input_value=run_input, type=input_type)] if run_input else []
-
- _, _ = await run_graph_internal(
- graph, flow_id, session_id=session_id, inputs=graph_run_inputs, outputs=[component_id]
- )
- return graph.get_vertex(component_id).built_object
-
-
-def build_component_instance_for_tests(version: str, module: str, file_name: str, **kwargs):
- component = download_component_from_github(module, file_name, version)
- cc_class = eval_custom_component_code(component._code)
- return cc_class(**kwargs), component._code
-
-
-def pyleak_marker(**extra_args):
- default_args = {
- "enable_task_creation_tracking": True, # log task creation stacks
- "thread_name_filter": r"^(?!asyncio_\d+$).*", # exclude `asyncio_{num}` threads
- }
- return pytest.mark.no_leaks(**default_args, **extra_args)
diff --git a/src/backend/tests/locust/locustfile.py b/src/backend/tests/locust/locustfile.py
deleted file mode 100644
index ab4cd612aa5e..000000000000
--- a/src/backend/tests/locust/locustfile.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import os
-import time
-from http import HTTPStatus
-
-from locust import FastHttpUser, between, events, task
-
-
-@events.quitting.add_listener
-def _(environment, **_kwargs):
- """Print stats at test end for analysis."""
- if environment.stats.total.fail_ratio > 0.01:
- environment.process_exit_code = 1
- environment.runner.quit()
-
-
-class FlowRunUser(FastHttpUser):
- """FlowRunUser simulates users sending requests to the Langflow run endpoint.
-
- Designed for high-load testing with proper wait times and connection handling.
- Uses FastHttpUser for better performance with keep-alive connections and connection pooling.
-
- Environment Variables:
- - LANGFLOW_HOST: Base URL for the Langflow server (default: http://localhost:7860)
- - FLOW_ID: UUID or endpoint name of the flow to test (default: 62c21279-f7ca-43e2-b5e3-326ac573db04)
- - API_KEY: API key for authentication, sent as header 'x-api-key' (Required)
- - MIN_WAIT: Minimum wait time between requests in ms (default: 2000)
- - MAX_WAIT: Maximum wait time between requests in ms (default: 5000)
- - REQUEST_TIMEOUT: Timeout for each request in seconds (default: 30.0)
- """
-
- abstract = False # This user class can be instantiated
- connection_timeout = float(os.getenv("REQUEST_TIMEOUT", "30.0")) # Configurable timeout
- network_timeout = float(os.getenv("REQUEST_TIMEOUT", "30.0"))
-
- # Dynamic wait time based on environment variables or defaults
- # Increased default minimum wait to reduce database pressure
- wait_time = between(
- float(os.getenv("MIN_WAIT", "2000")) / 1000,
- float(os.getenv("MAX_WAIT", "5000")) / 1000,
- )
-
- # Use the host provided by environment variable or default
- host = os.getenv("LANGFLOW_HOST", "http://localhost:7860")
-
- # Flow ID from environment variable or default example UUID
- flow_id = os.getenv("FLOW_ID")
-
- def __init__(self, *args, **kwargs) -> None:
- super().__init__(*args, **kwargs)
- self._last_response: dict | None = None
- self._consecutive_failures = 0
-
- def on_start(self):
- """Setup and validate required configurations."""
- if not os.getenv("API_KEY"):
- msg = "API_KEY environment variable is required for load testing"
- raise ValueError(msg)
-
- # Test connection and auth before starting
- with self.client.get("/health", catch_response=True) as response:
- if response.status_code != HTTPStatus.OK:
- msg = f"Initial health check failed: {response.status_code}"
- raise ConnectionError(msg)
-
- def log_error(self, name: str, exc: Exception, response_time: float):
- """Helper method to log errors in a format Locust expects.
-
- Args:
- name: The name/endpoint of the request
- exc: The exception that occurred
- response_time: The response time in milliseconds
- """
- # Log error in stats
- self.environment.stats.log_error("ERROR", name, str(exc))
- # Log request with error
- self.environment.stats.log_request("ERROR", name, response_time, 0)
-
- @task(1)
- def run_flow_endpoint(self):
- """Sends a POST request to the run endpoint using a realistic payload.
-
- Includes basic error handling.
- """
- if not self.flow_id:
- msg = "FLOW_ID environment variable is required for load testing"
- raise ValueError(msg)
- endpoint = f"/api/v1/run/{self.flow_id}?stream=false"
-
- # Realistic payload that exercises the system
- payload = {
- "input_value": (
- "Hey, Could you check https://docs.langflow.org for me? Later, could you calculate 1390 / 192 ?"
- ),
- "output_type": "chat",
- "input_type": "chat",
- "tweaks": {},
- }
-
- headers = {
- "Content-Type": "application/json",
- "x-api-key": os.getenv("API_KEY"),
- "Accept": "application/json",
- }
-
- start_time = time.time()
- try:
- with self.client.post(
- endpoint, json=payload, headers=headers, catch_response=True, timeout=self.connection_timeout
- ) as response:
- response_time = (time.time() - start_time) * 1000
- if response.status_code == HTTPStatus.OK:
- try:
- self._last_response = response.json()
- except ValueError as e:
- response.failure("Invalid JSON response")
- self.log_error(endpoint, e, response_time)
- else:
- error_text = response.text or "No response text"
- error_msg = f"Unexpected status code: {response.status_code}, Response: {error_text[:200]}"
- response.failure(error_msg)
- self.log_error(endpoint, Exception(error_msg), response_time)
- except Exception as e:
- response_time = (time.time() - start_time) * 1000
- self.log_error(endpoint, e, response_time)
- response.failure(f"Error: {e}")
diff --git a/src/backend/tests/locust/names.txt b/src/backend/tests/locust/names.txt
deleted file mode 100644
index 555af7369db4..000000000000
--- a/src/backend/tests/locust/names.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Bob
-Alice
-John
-Gabriel
-Lily
diff --git a/src/backend/tests/performance/test_server_init.py b/src/backend/tests/performance/test_server_init.py
deleted file mode 100644
index 3a113acfe368..000000000000
--- a/src/backend/tests/performance/test_server_init.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import os
-
-import pytest
-from langflow.services.deps import get_settings_service
-
-
-@pytest.fixture(autouse=True)
-def setup_database_url(tmp_path, monkeypatch):
- """Setup a temporary database URL for testing."""
- settings_service = get_settings_service()
- db_path = tmp_path / "test_performance.db"
- original_value = os.getenv("LANGFLOW_DATABASE_URL")
- monkeypatch.delenv("LANGFLOW_DATABASE_URL", raising=False)
- test_db_url = f"sqlite:///{db_path}"
- monkeypatch.setenv("LANGFLOW_DATABASE_URL", test_db_url)
- settings_service.set("database_url", test_db_url)
- yield
- # Restore original value if it existed
- if original_value is not None:
- monkeypatch.setenv("LANGFLOW_DATABASE_URL", original_value)
- settings_service.set("database_url", original_value)
- else:
- monkeypatch.delenv("LANGFLOW_DATABASE_URL", raising=False)
-
-
-async def test_initialize_services():
- """Benchmark the initialization of services."""
- from langflow.services.utils import initialize_services
-
- await initialize_services(fix_migration=False)
- settings_service = get_settings_service()
- assert "test_performance.db" in settings_service.settings.database_url
-
-
-def test_setup_llm_caching():
- """Benchmark LLM caching setup."""
- from langflow.interface.utils import setup_llm_caching
-
- setup_llm_caching()
- settings_service = get_settings_service()
- assert "test_performance.db" in settings_service.settings.database_url
-
-
-async def test_initialize_super_user():
- """Benchmark super user initialization."""
- from langflow.initial_setup.setup import initialize_super_user_if_needed
- from langflow.services.utils import initialize_services
-
- await initialize_services(fix_migration=False)
- await initialize_super_user_if_needed()
- settings_service = get_settings_service()
- assert "test_performance.db" in settings_service.settings.database_url
-
-
-async def test_get_and_cache_all_types_dict():
- """Benchmark get_and_cache_all_types_dict function."""
- from lfx.interface.components import get_and_cache_all_types_dict
-
- settings_service = get_settings_service()
- result = await get_and_cache_all_types_dict(settings_service)
- assert "vectorstores" in result
- assert "test_performance.db" in settings_service.settings.database_url
-
-
-async def test_create_starter_projects():
- """Benchmark creation of starter projects."""
- from langflow.initial_setup.setup import create_or_update_starter_projects
- from langflow.services.utils import initialize_services
-
- from lfx.interface.components import get_and_cache_all_types_dict
-
- await initialize_services(fix_migration=False)
- settings_service = get_settings_service()
- types_dict = await get_and_cache_all_types_dict(settings_service)
- await create_or_update_starter_projects(types_dict)
- assert "test_performance.db" in settings_service.settings.database_url
-
-
-async def test_load_flows():
- """Benchmark loading flows from directory."""
- from langflow.initial_setup.setup import load_flows_from_directory
-
- await load_flows_from_directory()
- settings_service = get_settings_service()
- assert "test_performance.db" in settings_service.settings.database_url
diff --git a/src/backend/tests/test_messages.py b/src/backend/tests/test_messages.py
deleted file mode 100644
index 1ea49a815cd7..000000000000
--- a/src/backend/tests/test_messages.py
+++ /dev/null
@@ -1,364 +0,0 @@
-from datetime import datetime, timezone
-from uuid import UUID, uuid4
-
-import pytest
-from langflow.memory import (
- aadd_messages,
- aadd_messagetables,
- add_messages,
- adelete_messages,
- aget_messages,
- astore_message,
- aupdate_messages,
- delete_messages,
- get_messages,
-)
-from langflow.schema.content_block import ContentBlock
-from langflow.schema.content_types import TextContent, ToolContent
-from langflow.schema.message import Message
-from langflow.schema.properties import Properties, Source
-
-# Assuming you have these imports available
-from langflow.services.database.models.message import MessageCreate, MessageRead
-from langflow.services.database.models.message.model import MessageTable
-from langflow.services.deps import session_scope
-from langflow.services.tracing.utils import convert_to_langchain_type
-
-
-@pytest.fixture
-async def created_message():
- async with session_scope() as session:
- message = MessageCreate(text="Test message", sender="User", sender_name="User", session_id="session_id")
- messagetable = MessageTable.model_validate(message, from_attributes=True)
- messagetables = await aadd_messagetables([messagetable], session)
- return MessageRead.model_validate(messagetables[0], from_attributes=True)
-
-
-@pytest.fixture
-async def created_messages(async_session): # noqa: ARG001
- async with session_scope() as _session:
- messages = [
- MessageCreate(text="Test message 1", sender="User", sender_name="User", session_id="session_id2"),
- MessageCreate(text="Test message 2", sender="User", sender_name="User", session_id="session_id2"),
- MessageCreate(text="Test message 3", sender="User", sender_name="User", session_id="session_id2"),
- ]
- messagetables = [MessageTable.model_validate(message, from_attributes=True) for message in messages]
- messagetables = await aadd_messagetables(messagetables, _session)
- return [MessageRead.model_validate(messagetable, from_attributes=True) for messagetable in messagetables]
-
-
-@pytest.mark.usefixtures("client")
-def test_get_messages():
- add_messages(
- [
- Message(text="Test message 1", sender="User", sender_name="User", session_id="session_id2"),
- Message(text="Test message 2", sender="User", sender_name="User", session_id="session_id2"),
- ]
- )
- limit = 2
- messages = get_messages(sender="User", session_id="session_id2", limit=limit)
- assert len(messages) == limit
- assert messages[0].text == "Test message 1"
- assert messages[1].text == "Test message 2"
-
-
-@pytest.mark.usefixtures("client")
-async def test_aget_messages():
- await aadd_messages(
- [
- Message(text="Test message 1", sender="User", sender_name="User", session_id="session_id2"),
- Message(text="Test message 2", sender="User", sender_name="User", session_id="session_id2"),
- ]
- )
- limit = 2
- messages = await aget_messages(sender="User", session_id="session_id2", limit=limit)
- assert len(messages) == limit
- assert messages[0].text == "Test message 1"
- assert messages[1].text == "Test message 2"
-
-
-@pytest.mark.usefixtures("client")
-def test_add_messages():
- message = Message(text="New Test message", sender="User", sender_name="User", session_id="new_session_id")
- messages = add_messages(message)
- assert len(messages) == 1
- assert messages[0].text == "New Test message"
-
-
-@pytest.mark.usefixtures("client")
-async def test_aadd_messages():
- message = Message(text="New Test message", sender="User", sender_name="User", session_id="new_session_id")
- messages = await aadd_messages(message)
- assert len(messages) == 1
- assert messages[0].text == "New Test message"
-
-
-@pytest.mark.usefixtures("client")
-async def test_aadd_messagetables(async_session):
- messages = [MessageTable(text="New Test message", sender="User", sender_name="User", session_id="new_session_id")]
- added_messages = await aadd_messagetables(messages, async_session)
- assert len(added_messages) == 1
- assert added_messages[0].text == "New Test message"
-
-
-@pytest.mark.usefixtures("client")
-def test_delete_messages():
- session_id = "new_session_id"
- message = Message(text="New Test message", sender="User", sender_name="User", session_id=session_id)
- add_messages([message])
- messages = get_messages(sender="User", session_id=session_id)
- assert len(messages) == 1
- delete_messages(session_id)
- messages = get_messages(sender="User", session_id=session_id)
- assert len(messages) == 0
-
-
-@pytest.mark.usefixtures("client")
-async def test_adelete_messages():
- session_id = "new_session_id"
- message = Message(text="New Test message", sender="User", sender_name="User", session_id=session_id)
- await aadd_messages([message])
- messages = await aget_messages(sender="User", session_id=session_id)
- assert len(messages) == 1
- await adelete_messages(session_id)
- messages = await aget_messages(sender="User", session_id=session_id)
- assert len(messages) == 0
-
-
-@pytest.mark.usefixtures("client")
-async def test_store_message():
- session_id = "stored_session_id"
- message = Message(text="Stored message", sender="User", sender_name="User", session_id=session_id)
- await astore_message(message)
- stored_messages = await aget_messages(sender="User", session_id=session_id)
- assert len(stored_messages) == 1
- assert stored_messages[0].text == "Stored message"
-
-
-@pytest.mark.usefixtures("client")
-async def test_astore_message():
- session_id = "stored_session_id"
- message = Message(text="Stored message", sender="User", sender_name="User", session_id=session_id)
- await astore_message(message)
- stored_messages = await aget_messages(sender="User", session_id=session_id)
- assert len(stored_messages) == 1
- assert stored_messages[0].text == "Stored message"
-
-
-@pytest.mark.parametrize("method_name", ["message", "convert_to_langchain_type"])
-def test_convert_to_langchain(method_name):
- def convert(value):
- if method_name == "message":
- return value.to_lc_message()
- if method_name == "convert_to_langchain_type":
- return convert_to_langchain_type(value)
- msg = f"Invalid method: {method_name}"
- raise ValueError(msg)
-
- lc_message = convert(Message(text="Test message 1", sender="User", sender_name="User", session_id="session_id2"))
- assert lc_message.content == "Test message 1"
- assert lc_message.type == "human"
-
- lc_message = convert(Message(text="Test message 2", sender="AI", session_id="session_id2"))
- assert lc_message.content == "Test message 2"
- assert lc_message.type == "ai"
-
- iterator = iter(["stream", "message"])
- lc_message = convert(Message(text=iterator, sender="AI", session_id="session_id2"))
- assert lc_message.content == ""
- assert lc_message.type == "ai"
- expected_len = 2
- assert len(list(iterator)) == expected_len
-
-
-@pytest.mark.usefixtures("client")
-async def test_aupdate_single_message(created_message):
- # Modify the message
- created_message.text = "Updated message"
- updated = await aupdate_messages(created_message)
-
- assert len(updated) == 1
- assert updated[0].text == "Updated message"
- assert updated[0].id == created_message.id
-
-
-@pytest.mark.usefixtures("client")
-async def test_aupdate_multiple_messages(created_messages):
- # Modify the messages
- for i, message in enumerate(created_messages):
- message.text = f"Updated message {i}"
-
- updated = await aupdate_messages(created_messages)
-
- assert len(updated) == len(created_messages)
- for i, message in enumerate(updated):
- assert message.text == f"Updated message {i}"
- assert message.id == created_messages[i].id
-
-
-@pytest.mark.usefixtures("client")
-async def test_aupdate_nonexistent_message_generates_a_new_message():
- # Create a message with a non-existent UUID
- nonexistent_uuid = uuid4()
- message = MessageRead(
- id=nonexistent_uuid, # Generate a random UUID that won't exist in the database
- text="Test message",
- sender="User",
- sender_name="User",
- session_id="session_id",
- flow_id=uuid4(),
- )
- with pytest.raises(ValueError, match=f"Message with id {nonexistent_uuid} not found"):
- await aupdate_messages(message)
-
-
-@pytest.mark.usefixtures("client")
-async def test_aupdate_mixed_messages(created_messages):
- # Create a mix of existing and non-existing messages
- nonexistent_uuid = uuid4()
- nonexistent_message = MessageRead(
- id=nonexistent_uuid, # Generate a random UUID that won't exist in the database
- text="Test message",
- sender="User",
- sender_name="User",
- session_id="session_id",
- flow_id=uuid4(),
- )
-
- messages_to_update = [*created_messages[:1], nonexistent_message]
- created_messages[0].text = "Updated existing message"
-
- with pytest.raises(ValueError, match=f"Message with id {nonexistent_uuid} not found"):
- await aupdate_messages(messages_to_update)
-
- # Update just the existing message
- updated = await aupdate_messages(created_messages[:1])
-
- assert len(updated) == 1
- assert updated[0].text == "Updated existing message"
- assert updated[0].id == created_messages[0].id
- assert isinstance(updated[0].id, UUID) # Verify ID is UUID type
-
-
-@pytest.mark.usefixtures("client")
-async def test_aupdate_message_with_timestamp(created_message):
- # Set a specific timestamp
- new_timestamp = datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
- created_message.timestamp = new_timestamp
- created_message.text = "Updated message with timestamp"
-
- updated = await aupdate_messages(created_message)
-
- assert len(updated) == 1
- assert updated[0].text == "Updated message with timestamp"
-
- # Compare timestamps without timezone info since DB doesn't preserve it
- assert updated[0].timestamp.replace(tzinfo=None) == new_timestamp.replace(tzinfo=None)
- assert updated[0].id == created_message.id
-
-
-@pytest.mark.usefixtures("client")
-async def test_aupdate_multiple_messages_with_timestamps(created_messages):
- # Modify messages with different timestamps
- for i, message in enumerate(created_messages):
- message.text = f"Updated message {i}"
- message.timestamp = datetime(2024, 1, 1, i, 0, 0, tzinfo=timezone.utc)
-
- updated = await aupdate_messages(created_messages)
-
- assert len(updated) == len(created_messages)
- for i, message in enumerate(updated):
- assert message.text == f"Updated message {i}"
- # Compare timestamps without timezone info
- expected_timestamp = datetime(2024, 1, 1, i, 0, 0, tzinfo=timezone.utc)
- assert message.timestamp.replace(tzinfo=None) == expected_timestamp.replace(tzinfo=None)
- assert message.id == created_messages[i].id
-
-
-@pytest.mark.usefixtures("client")
-async def test_aupdate_message_with_content_blocks(created_message):
- # Create a content block using proper models
- text_content = TextContent(
- type="text", text="Test content", duration=5, header={"title": "Test Header", "icon": "TestIcon"}
- )
-
- tool_content = ToolContent(type="tool_use", name="test_tool", tool_input={"param": "value"}, duration=10)
-
- content_block = ContentBlock(title="Test Block", contents=[text_content, tool_content], allow_markdown=True)
-
- created_message.content_blocks = [content_block]
- created_message.text = "Message with content blocks"
-
- updated = await aupdate_messages(created_message)
-
- assert len(updated) == 1
- assert updated[0].text == "Message with content blocks"
- assert len(updated[0].content_blocks) == 1
-
- # Verify the content block structure
- updated_block = updated[0].content_blocks[0]
- assert updated_block.title == "Test Block"
- expected_len = 2
- assert len(updated_block.contents) == expected_len
-
- # Verify text content
- text_content = updated_block.contents[0]
- assert text_content.type == "text"
- assert text_content.text == "Test content"
- duration = 5
- assert text_content.duration == duration
- assert text_content.header["title"] == "Test Header"
-
- # Verify tool content
- tool_content = updated_block.contents[1]
- assert tool_content.type == "tool_use"
- assert tool_content.name == "test_tool"
- assert tool_content.tool_input == {"param": "value"}
- duration = 10
- assert tool_content.duration == duration
-
-
-@pytest.mark.usefixtures("client")
-async def test_aupdate_message_with_nested_properties(created_message):
- # Create a text content with nested properties
- text_content = TextContent(
- type="text", text="Test content", header={"title": "Test Header", "icon": "TestIcon"}, duration=15
- )
-
- content_block = ContentBlock(
- title="Test Properties",
- contents=[text_content],
- allow_markdown=True,
- media_url=["http://example.com/image.jpg"],
- )
-
- # Set properties according to the Properties model structure
- created_message.properties = Properties(
- text_color="blue",
- background_color="white",
- edited=False,
- source=Source(id="test_id", display_name="Test Source", source="test"),
- icon="TestIcon",
- allow_markdown=True,
- state="complete",
- targets=[],
- )
- created_message.text = "Message with nested properties"
- created_message.content_blocks = [content_block]
-
- updated = await aupdate_messages(created_message)
-
- assert len(updated) == 1
- assert updated[0].text == "Message with nested properties"
-
- # Verify the properties were properly serialized and stored
- assert updated[0].properties.text_color == "blue"
- assert updated[0].properties.background_color == "white"
- assert updated[0].properties.edited is False
- assert updated[0].properties.source.id == "test_id"
- assert updated[0].properties.source.display_name == "Test Source"
- assert updated[0].properties.source.source == "test"
- assert updated[0].properties.icon == "TestIcon"
- assert updated[0].properties.allow_markdown is True
- assert updated[0].properties.state == "complete"
- assert updated[0].properties.targets == []
diff --git a/src/backend/tests/unit/__init__.py b/src/backend/tests/unit/__init__.py
deleted file mode 100644
index b7cc0e547422..000000000000
--- a/src/backend/tests/unit/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Unit tests for langflow."""
diff --git a/src/backend/tests/unit/api/test_api_utils.py b/src/backend/tests/unit/api/test_api_utils.py
deleted file mode 100644
index f87befdaf4f6..000000000000
--- a/src/backend/tests/unit/api/test_api_utils.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from unittest.mock import patch
-
-from langflow.api.utils import get_suggestion_message
-from langflow.services.database.models.flow.utils import get_outdated_components
-from langflow.utils.version import get_version_info
-
-
-def test_get_suggestion_message():
- # Test case 1: No outdated components
- assert get_suggestion_message([]) == "The flow contains no outdated components."
-
- # Test case 2: One outdated component
- assert (
- get_suggestion_message(["component1"])
- == "The flow contains 1 outdated component. We recommend updating the following component: component1."
- )
-
- # Test case 3: Multiple outdated components
- outdated_components = ["component1", "component2", "component3"]
- expected_message = (
- "The flow contains 3 outdated components. "
- "We recommend updating the following components: component1, component2, component3."
- )
- assert get_suggestion_message(outdated_components) == expected_message
-
-
-def test_get_outdated_components():
- # Mock data
- flow = "mock_flow"
- version = get_version_info()["version"]
- mock_component_versions = {
- "component1": version,
- "component2": version,
- "component3": "2.0",
- }
- # Expected result
- expected_outdated_components = ["component3"]
-
- with patch(
- "langflow.services.database.models.flow.utils.get_components_versions", return_value=mock_component_versions
- ):
- # Call the function with the mock flow
- result = get_outdated_components(flow)
- # Assert the result is as expected
- assert result == expected_outdated_components
diff --git a/src/backend/tests/unit/api/v1/test_api_key.py b/src/backend/tests/unit/api/v1/test_api_key.py
deleted file mode 100644
index 5c5c6d15c423..000000000000
--- a/src/backend/tests/unit/api/v1/test_api_key.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from fastapi import status
-from httpx import AsyncClient
-
-
-async def test_create_folder(client: AsyncClient, logged_in_headers):
- response = await client.get("api/v1/api_key/", headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "api_keys" in result, "The dictionary must contain a key called 'api_keys'"
- assert "user_id" in result, "The dictionary must contain a key called 'user_id'"
- assert "total_count" in result, "The dictionary must contain a key called 'total_count'"
-
-
-async def test_create_api_key_route(client: AsyncClient, logged_in_headers, active_user):
- basic_case = {
- "name": "string",
- "total_uses": 0,
- "is_active": True,
- "api_key": "string",
- "user_id": str(active_user.id),
- }
- response = await client.post("api/v1/api_key/", json=basic_case, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "api_key" in result, "The dictionary must contain a key called 'api_key'"
- assert "id" in result, "The dictionary must contain a key called 'id'"
- assert "is_active" in result, "The dictionary must contain a key called 'is_active'"
- assert "last_used_at" in result, "The dictionary must contain a key called 'last_used_at'"
- assert "name" in result, "The dictionary must contain a key called 'name'"
- assert "total_uses" in result, "The dictionary must contain a key called 'total_uses'"
- assert "user_id" in result, "The dictionary must contain a key called 'user_id'"
-
-
-async def test_delete_api_key_route(client: AsyncClient, logged_in_headers, active_user):
- basic_case = {
- "name": "string",
- "total_uses": 0,
- "is_active": True,
- "api_key": "string",
- "user_id": str(active_user.id),
- }
- response_ = await client.post("api/v1/api_key/", json=basic_case, headers=logged_in_headers)
- id_ = response_.json()["id"]
-
- response = await client.delete(f"api/v1/api_key/{id_}", headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "detail" in result, "The dictionary must contain a key called 'detail'"
-
-
-async def test_save_store_api_key(client: AsyncClient, logged_in_headers):
- basic_case = {"api_key": "string"}
- response = await client.post("api/v1/api_key/store", json=basic_case, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "detail" in result, "The dictionary must contain a key called 'detail'"
diff --git a/src/backend/tests/unit/api/v1/test_api_schemas.py b/src/backend/tests/unit/api/v1/test_api_schemas.py
deleted file mode 100644
index 6065885b2ddb..000000000000
--- a/src/backend/tests/unit/api/v1/test_api_schemas.py
+++ /dev/null
@@ -1,317 +0,0 @@
-from datetime import datetime, timezone
-
-from hypothesis import HealthCheck, example, given, settings
-from hypothesis import strategies as st
-from langflow.api.v1.schemas import ResultDataResponse, VertexBuildResponse
-from langflow.schema.schema import OutputValue
-from langflow.serialization import serialize
-from langflow.services.tracing.schema import Log
-from pydantic import BaseModel
-
-# Use a smaller test size for hypothesis
-TEST_TEXT_LENGTH = 50
-
-
-class SampleBaseModel(BaseModel):
- name: str
- value: int
-
-
-@given(st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2))
-@settings(max_examples=10)
-def test_result_data_response_truncation(long_string):
- """Test that ResultDataResponse properly truncates long strings."""
- response = ResultDataResponse(
- results={"long_text": long_string},
- message={"text": long_string},
- )
-
- response.serialize_model()
- truncated = serialize(long_string, max_length=TEST_TEXT_LENGTH)
- assert len(truncated) <= TEST_TEXT_LENGTH + len("...")
- assert "..." in truncated
-
-
-@given(
- st.uuids(),
- st.datetimes(timezones=st.just(timezone.utc)),
- st.decimals(min_value=-1000, max_value=1000, places=2),
- st.text(min_size=1, max_size=10),
- st.integers(min_value=-1000, max_value=1000),
-)
-@settings(max_examples=10, suppress_health_check=[HealthCheck.too_slow])
-def test_result_data_response_special_types(uuid, dt, decimal, name, value):
- """Test that ResultDataResponse properly handles special data types."""
- test_model = SampleBaseModel(name=name, value=value)
-
- response = ResultDataResponse(
- results={
- "uuid": uuid,
- "datetime": dt,
- "decimal": decimal,
- "model": test_model,
- }
- )
-
- serialized = response.serialize_model()
- assert serialized["results"]["uuid"] == str(uuid)
- # Compare timezone-aware datetimes
- assert datetime.fromisoformat(serialized["results"]["datetime"]).astimezone(timezone.utc) == dt
- assert isinstance(serialized["results"]["decimal"], float)
- assert serialized["results"]["model"] == {"name": name, "value": value}
-
-
-@given(
- st.lists(st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2), min_size=1, max_size=2),
- st.dictionaries(
- keys=st.text(min_size=1, max_size=10),
- values=st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2),
- min_size=1,
- max_size=2,
- ),
-)
-@settings(max_examples=5, suppress_health_check=[HealthCheck.too_slow, HealthCheck.large_base_example])
-def test_result_data_response_nested_structures(long_list, long_dict):
- """Test that ResultDataResponse handles nested structures correctly."""
- nested_data = {
- "list": long_list,
- "dict": long_dict,
- }
-
- ResultDataResponse(results=nested_data)
- serialized = serialize(nested_data, max_length=TEST_TEXT_LENGTH)
-
- # Check list items
- for item in serialized["list"]:
- assert len(item) <= TEST_TEXT_LENGTH + len("...")
- if len(item) > TEST_TEXT_LENGTH:
- assert "..." in item
-
- # Check dict values
- for val in serialized["dict"].values():
- assert len(val) <= TEST_TEXT_LENGTH + len("...")
- if len(val) > TEST_TEXT_LENGTH:
- assert "..." in val
-
-
-@given(
- st.dictionaries(
- keys=st.text(min_size=1, max_size=5),
- values=st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2),
- min_size=1,
- max_size=2,
- ),
-)
-@settings(max_examples=10)
-@example(
- outputs_dict={"0": "000000000000000000000000000000000000000000000000000"},
-).via("discovered failure")
-@example(
- outputs_dict={"0": "000000000000000000000000000000000000000000000000000000000000000000"},
-).via("discovered failure")
-def test_result_data_response_outputs(outputs_dict):
- """Test that ResultDataResponse properly handles and truncates outputs."""
- # Create OutputValue objects with potentially long messages
- outputs = {key: OutputValue(type="text", message=value) for key, value in outputs_dict.items()}
-
- response = ResultDataResponse(outputs=outputs)
- serialized = serialize(response, max_length=TEST_TEXT_LENGTH)
-
- # Check outputs are properly serialized and truncated
- for key, value in outputs_dict.items():
- assert key in serialized["outputs"]
- serialized_output = serialized["outputs"][key]
- assert serialized_output["type"] == "text"
-
- # Check message truncation
- message = serialized_output["message"]
- assert len(message) <= TEST_TEXT_LENGTH + len("..."), f"Message length: {len(message)}"
- if len(value) > TEST_TEXT_LENGTH:
- assert "..." in message
- assert message.startswith(value[:TEST_TEXT_LENGTH])
- else:
- assert message == value
-
-
-@given(
- st.lists(
- st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2),
- min_size=1,
- max_size=3,
- ),
-)
-@settings(max_examples=10)
-@example(
- log_messages=["000000000000000000000000000000000000000000000000000"],
-).via("discovered failure")
-def test_result_data_response_logs(log_messages):
- """Test that ResultDataResponse properly handles and truncates logs."""
- # Create logs with long messages
- logs = {
- "test_node": [
- Log(
- message=msg,
- name="test_log",
- type="test",
- )
- for msg in log_messages
- ]
- }
-
- response = ResultDataResponse(logs=logs)
- serialized = serialize(response, max_length=TEST_TEXT_LENGTH)
-
- # Check logs are properly serialized and truncated
- assert "test_node" in serialized["logs"]
- serialized_logs = serialized["logs"]["test_node"]
-
- for i, log_msg in enumerate(log_messages):
- serialized_log = serialized_logs[i]
- assert serialized_log["name"] == "test_log"
- assert serialized_log["type"] == "test"
-
- # Check message truncation
- message = serialized_log["message"]
- assert len(message) <= TEST_TEXT_LENGTH + len("...")
- if len(log_msg) > TEST_TEXT_LENGTH:
- assert "..." in message
- assert message.startswith(log_msg[:TEST_TEXT_LENGTH])
- else:
- assert message == log_msg
-
-
-@given(
- st.dictionaries(
- keys=st.text(min_size=1, max_size=5),
- values=st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2),
- min_size=1,
- max_size=2,
- ),
- st.lists(
- st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2),
- min_size=1,
- max_size=3,
- ),
-)
-@settings(max_examples=10)
-@example(
- outputs_dict={"0": "000000000000000000000000000000000000000000000000000000000000000000"},
- log_messages=["000000000000000000000000000000000000000000000000000"],
-).via("discovered failure")
-@example(
- outputs_dict={"0": "000000000000000000000000000000000000000000000000000"},
- log_messages=["000000000000000000000000000000000000000000000000000"],
-).via("discovered failure")
-def test_result_data_response_combined_fields(outputs_dict, log_messages):
- """Test that ResultDataResponse properly handles all fields together."""
- # Create OutputValue objects with potentially long messages
- outputs = {key: OutputValue(type="text", message=value) for key, value in outputs_dict.items()}
-
- # Create logs with long messages
- logs = {
- "test_node": [
- Log(
- message=msg,
- name="test_log",
- type="test",
- )
- for msg in log_messages
- ]
- }
-
- response = ResultDataResponse(
- outputs=outputs,
- logs=logs,
- results={"test": "value"},
- message={"text": "test"},
- artifacts={"file": "test.txt"},
- )
- serialized = serialize(response, max_length=TEST_TEXT_LENGTH)
-
- # Check all fields are present
- assert "outputs" in serialized
- assert "logs" in serialized
- assert "results" in serialized
- assert "message" in serialized
- assert "artifacts" in serialized
-
- # Check outputs truncation
- for key, value in outputs_dict.items():
- assert key in serialized["outputs"]
- serialized_output = serialized["outputs"][key]
- assert serialized_output["type"] == "text"
-
- # Check message truncation
- message = serialized_output["message"]
- if len(value) > TEST_TEXT_LENGTH:
- assert len(message) <= TEST_TEXT_LENGTH + len("...")
- assert "..." in message
- else:
- assert message == value
-
- # Check logs truncation
- assert "test_node" in serialized["logs"]
- serialized_logs = serialized["logs"]["test_node"]
-
- for i, log_msg in enumerate(log_messages):
- serialized_log = serialized_logs[i]
- assert serialized_log["name"] == "test_log"
- assert serialized_log["type"] == "test"
-
- # Check message truncation
- message = serialized_log["message"]
- if len(log_msg) > TEST_TEXT_LENGTH:
- assert len(message) <= TEST_TEXT_LENGTH + len("...")
- assert "..." in message
- else:
- assert message == log_msg
-
-
-@given(
- st.text(min_size=1), # build_id
- st.lists(st.text()), # logs
- st.text(min_size=1), # message
-)
-@settings(max_examples=10)
-def test_vertex_build_response_serialization(build_id, log_messages, test_message):
- """Test that VertexBuildResponse properly serializes its data field."""
- logs = [Log(message=msg, name="test_log", type="test") for msg in log_messages]
-
- result_data = ResultDataResponse(
- results={"test": test_message},
- message={"text": test_message},
- logs={"node1": logs},
- )
-
- response = VertexBuildResponse(
- id=build_id,
- valid=True,
- data=result_data,
- )
-
- serialized = response.model_dump()
- assert serialized["id"] == build_id
- assert serialized["valid"] is True
- assert isinstance(serialized["data"], dict)
- assert serialized["data"]["results"]["test"] == test_message
-
-
-@given(st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2))
-@settings(max_examples=10)
-def test_vertex_build_response_with_long_data(long_string):
- """Test that VertexBuildResponse properly handles long data in its data field."""
- result_data = ResultDataResponse(
- results={"long_text": long_string},
- message={"text": long_string},
- )
-
- response = VertexBuildResponse(
- id="test-id",
- valid=True,
- data=result_data,
- )
-
- response.model_dump()
- truncated = serialize(long_string, max_length=TEST_TEXT_LENGTH)
- assert len(truncated) <= TEST_TEXT_LENGTH + len("...")
- assert "..." in truncated
diff --git a/src/backend/tests/unit/api/v1/test_endpoints.py b/src/backend/tests/unit/api/v1/test_endpoints.py
deleted file mode 100644
index 15979b579946..000000000000
--- a/src/backend/tests/unit/api/v1/test_endpoints.py
+++ /dev/null
@@ -1,195 +0,0 @@
-import asyncio
-import inspect
-from typing import Any
-
-from anyio import Path
-from fastapi import status
-from httpx import AsyncClient
-from langflow.api.v1.schemas import CustomComponentRequest, UpdateCustomComponentRequest
-
-from lfx.components.agents.agent import AgentComponent
-from lfx.custom.utils import build_custom_component_template
-
-
-async def test_get_version(client: AsyncClient):
- response = await client.get("api/v1/version")
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "version" in result, "The dictionary must contain a key called 'version'"
- assert "main_version" in result, "The dictionary must contain a key called 'main_version'"
- assert "package" in result, "The dictionary must contain a key called 'package'"
-
-
-async def test_get_config(client: AsyncClient):
- response = await client.get("api/v1/config")
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "frontend_timeout" in result, "The dictionary must contain a key called 'frontend_timeout'"
- assert "auto_saving" in result, "The dictionary must contain a key called 'auto_saving'"
- assert "health_check_max_retries" in result, "The dictionary must contain a 'health_check_max_retries' key"
- assert "max_file_size_upload" in result, "The dictionary must contain a key called 'max_file_size_upload'"
-
-
-async def test_update_component_outputs(client: AsyncClient, logged_in_headers: dict):
- path = Path(__file__).parent.parent.parent.parent / "data" / "dynamic_output_component.py"
-
- code = await path.read_text(encoding="utf-8")
- frontend_node: dict[str, Any] = {"outputs": []}
- request = UpdateCustomComponentRequest(
- code=code,
- frontend_node=frontend_node,
- field="show_output",
- field_value=True,
- template={},
- )
- response = await client.post("api/v1/custom_component/update", json=request.model_dump(), headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- output_names = [output["name"] for output in result["outputs"]]
- assert "tool_output" in output_names
-
-
-async def test_update_component_model_name_options(client: AsyncClient, logged_in_headers: dict):
- """Test that model_name options are updated when selecting a provider."""
- component = AgentComponent()
- component_node, _cc_instance = build_custom_component_template(
- component,
- )
-
- # Initial template with OpenAI as the provider
- template = component_node["template"]
- current_model_names = template["model_name"]["options"]
-
- # load the code from the file at lfx.components.agents.agent.py asynchronously
- # we are at str/backend/tests/unit/api/v1/test_endpoints.py
- # find the file by using the class AgentComponent
- agent_component_file = await asyncio.to_thread(inspect.getsourcefile, AgentComponent)
- code = await Path(agent_component_file).read_text(encoding="utf-8")
-
- # Create the request to update the component
- request = UpdateCustomComponentRequest(
- code=code,
- frontend_node=component_node,
- field="agent_llm",
- field_value="Anthropic",
- template=template,
- )
-
- # Make the request to update the component
- response = await client.post("api/v1/custom_component/update", json=request.model_dump(), headers=logged_in_headers)
- result = response.json()
-
- # Verify the response
- assert response.status_code == status.HTTP_200_OK, f"Response: {response.json()}"
- assert "template" in result
- assert "model_name" in result["template"]
- assert isinstance(result["template"]["model_name"]["options"], list)
- assert len(result["template"]["model_name"]["options"]) > 0, (
- f"Model names: {result['template']['model_name']['options']}"
- )
- assert current_model_names != result["template"]["model_name"]["options"], (
- f"Current model names: {current_model_names}, New model names: {result['template']['model_name']['options']}"
- )
- # Now test with Custom provider
- template["agent_llm"]["value"] = "Custom"
- request.field_value = "Custom"
- request.template = template
-
- response = await client.post("api/v1/custom_component/update", json=request.model_dump(), headers=logged_in_headers)
- result = response.json()
-
- # Verify that model_name is not present for Custom provider
- assert response.status_code == status.HTTP_200_OK
- assert "template" in result
- assert "model_name" not in result["template"]
-
-
-async def test_custom_component_endpoint_returns_metadata(client: AsyncClient, logged_in_headers: dict):
- """Test that the /custom_component endpoint returns metadata with module and code_hash."""
- component_code = """
-from lfx.custom import Component
-from lfx.inputs import MessageTextInput
-from lfx.template.field.base import Output
-
-class TestMetadataComponent(Component):
- display_name = "Test Metadata Component"
- description = "Test component for metadata"
-
- inputs = [
- MessageTextInput(display_name="Input", name="input_value"),
- ]
- outputs = [
- Output(display_name="Output", name="output", method="process_input"),
- ]
-
- def process_input(self) -> str:
- return f"Processed: {self.input_value}"
-"""
-
- request = CustomComponentRequest(code=component_code)
- response = await client.post("api/v1/custom_component", json=request.model_dump(), headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert "data" in result
- assert "type" in result
-
- # Verify metadata is present in the response
- frontend_node = result["data"]
- assert "metadata" in frontend_node, "Frontend node should contain metadata"
-
- metadata = frontend_node["metadata"]
- assert "module" in metadata, "Metadata should contain module field"
- assert "code_hash" in metadata, "Metadata should contain code_hash field"
-
- # Verify metadata values
- assert isinstance(metadata["module"], str), "Module should be a string"
- expected_module = "custom_components.test_metadata_component"
- assert metadata["module"] == expected_module, "Module should be auto-generated from display_name"
-
- assert isinstance(metadata["code_hash"], str), "Code hash should be a string"
- assert len(metadata["code_hash"]) == 12, "Code hash should be 12 characters long"
- assert all(c in "0123456789abcdef" for c in metadata["code_hash"]), "Code hash should be hexadecimal"
-
-
-async def test_custom_component_endpoint_metadata_consistency(client: AsyncClient, logged_in_headers: dict):
- """Test that the same component code produces consistent metadata."""
- component_code = """
-from lfx.custom import Component
-from lfx.template.field.base import Output
-
-class ConsistencyTestComponent(Component):
- display_name = "Consistency Test"
-
- outputs = [
- Output(display_name="Output", name="output", method="get_result"),
- ]
-
- def get_result(self) -> str:
- return "consistent result"
-"""
-
- # Make two identical requests
- request = CustomComponentRequest(code=component_code)
-
- response1 = await client.post("api/v1/custom_component", json=request.model_dump(), headers=logged_in_headers)
- result1 = response1.json()
-
- response2 = await client.post("api/v1/custom_component", json=request.model_dump(), headers=logged_in_headers)
- result2 = response2.json()
-
- # Both requests should succeed
- assert response1.status_code == status.HTTP_200_OK
- assert response2.status_code == status.HTTP_200_OK
-
- # Metadata should be identical
- metadata1 = result1["data"]["metadata"]
- metadata2 = result2["data"]["metadata"]
-
- assert metadata1["module"] == metadata2["module"], "Module names should be consistent"
- assert metadata1["code_hash"] == metadata2["code_hash"], "Code hashes should be consistent for identical code"
diff --git a/src/backend/tests/unit/api/v1/test_files.py b/src/backend/tests/unit/api/v1/test_files.py
deleted file mode 100644
index b4d135ed6d64..000000000000
--- a/src/backend/tests/unit/api/v1/test_files.py
+++ /dev/null
@@ -1,287 +0,0 @@
-import asyncio
-import json
-import re
-import tempfile
-from contextlib import suppress
-from io import BytesIO
-from pathlib import Path
-
-# we need to import tmpdir
-import anyio
-import pytest
-from asgi_lifespan import LifespanManager
-from httpx import ASGITransport, AsyncClient
-from langflow.main import create_app
-from langflow.services.auth.utils import get_password_hash
-from langflow.services.database.models.api_key.model import ApiKey
-from langflow.services.database.models.flow.model import Flow, FlowCreate
-from langflow.services.database.models.user.model import User, UserRead
-from langflow.services.deps import get_db_service
-from sqlalchemy.orm import selectinload
-from sqlmodel import select
-
-from lfx.services.deps import session_scope
-from tests.conftest import _delete_transactions_and_vertex_builds
-
-
-@pytest.fixture(name="files_created_api_key")
-async def files_created_api_key(files_client, files_active_user): # noqa: ARG001
- hashed = get_password_hash("random_key")
- api_key = ApiKey(
- name="files_created_api_key",
- user_id=files_active_user.id,
- api_key="random_key",
- hashed_api_key=hashed,
- )
- async with session_scope() as session:
- stmt = select(ApiKey).where(ApiKey.api_key == api_key.api_key)
- if existing_api_key := (await session.exec(stmt)).first():
- yield existing_api_key
- return
- session.add(api_key)
- await session.commit()
- await session.refresh(api_key)
- yield api_key
- # Clean up
- await session.delete(api_key)
- await session.commit()
-
-
-@pytest.fixture(name="files_active_user")
-async def files_active_user(files_client): # noqa: ARG001
- db_manager = get_db_service()
- async with db_manager.with_session() as session:
- user = User(
- username="files_active_user",
- password=get_password_hash("testpassword"),
- is_active=True,
- is_superuser=False,
- )
- stmt = select(User).where(User.username == user.username)
- if active_user := (await session.exec(stmt)).first():
- user = active_user
- else:
- session.add(user)
- await session.commit()
- await session.refresh(user)
- user = UserRead.model_validate(user, from_attributes=True)
- yield user
- # Clean up
- # Now cleanup transactions, vertex_build
- async with db_manager.with_session() as session:
- user = await session.get(User, user.id, options=[selectinload(User.flows)])
- await _delete_transactions_and_vertex_builds(session, user.flows)
- await session.delete(user)
-
- await session.commit()
-
-
-@pytest.fixture(name="files_flow")
-async def files_flow(
- files_client, # noqa: ARG001
- json_flow: str,
- files_active_user,
-):
- loaded_json = json.loads(json_flow)
- flow_data = FlowCreate(name="test_flow", data=loaded_json.get("data"), user_id=files_active_user.id)
- db_manager = get_db_service()
- flow = Flow.model_validate(flow_data)
- async with db_manager.with_session() as session:
- session.add(flow)
- await session.commit()
- await session.refresh(flow)
- yield flow
- # Clean up
- await session.delete(flow)
- await session.commit()
-
-
-@pytest.fixture
-def max_file_size_upload_fixture(monkeypatch):
- monkeypatch.setenv("LANGFLOW_MAX_FILE_SIZE_UPLOAD", "1")
- yield
- monkeypatch.undo()
-
-
-@pytest.fixture
-def max_file_size_upload_10mb_fixture(monkeypatch):
- monkeypatch.setenv("LANGFLOW_MAX_FILE_SIZE_UPLOAD", "10")
- yield
- monkeypatch.undo()
-
-
-@pytest.fixture(name="files_client")
-async def files_client_fixture(
- monkeypatch,
- request,
-):
- # Set the database url to a test database
- if "noclient" in request.keywords:
- yield
- else:
-
- def init_app():
- db_dir = tempfile.mkdtemp()
- db_path = Path(db_dir) / "test.db"
- monkeypatch.setenv("LANGFLOW_DATABASE_URL", f"sqlite:///{db_path}")
- monkeypatch.setenv("LANGFLOW_AUTO_LOGIN", "false")
- from lfx.services.manager import get_service_manager
-
- get_service_manager().factories.clear()
- get_service_manager().services.clear() # Clear the services cache
- app = create_app()
- return app, db_path
-
- app, db_path = await asyncio.to_thread(init_app)
-
- async with (
- LifespanManager(app, startup_timeout=None, shutdown_timeout=None) as manager,
- AsyncClient(transport=ASGITransport(app=manager.app), base_url="http://testserver/") as client,
- ):
- yield client
- # app.dependency_overrides.clear()
- monkeypatch.undo()
- # clear the temp db
- with suppress(FileNotFoundError):
- await anyio.Path(db_path).unlink()
-
-
-async def test_upload_file(files_client, files_created_api_key, files_flow):
- headers = {"x-api-key": files_created_api_key.api_key}
-
- response = await files_client.post(
- f"api/v1/files/upload/{files_flow.id}",
- files={"file": ("test.txt", b"test content")},
- headers=headers,
- )
- assert response.status_code == 201, f"Expected 201, got {response.status_code}: {response.json()}"
-
- response_json = response.json()
- assert response_json["flowId"] == str(files_flow.id)
-
- # Check that the file_path matches the expected pattern
- file_path_pattern = re.compile(rf"{files_flow.id}/\d{{4}}-\d{{2}}-\d{{2}}_\d{{2}}-\d{{2}}-\d{{2}}_test\.txt")
- assert file_path_pattern.match(response_json["file_path"])
-
-
-async def test_download_file(files_client, files_created_api_key, files_flow):
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # First upload a file
- response = await files_client.post(
- f"api/v1/files/upload/{files_flow.id}",
- files={"file": ("test.txt", b"test content")},
- headers=headers,
- )
- assert response.status_code == 201
-
- # Get the actual filename from the response
- file_path = response.json()["file_path"]
- file_name = file_path.split("/")[-1]
-
- # Then try to download it
- response = await files_client.get(f"api/v1/files/download/{files_flow.id}/{file_name}", headers=headers)
- assert response.status_code == 200
- assert response.content == b"test content"
-
-
-async def test_list_files(files_client, files_created_api_key, files_flow):
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # First upload a file
- response = await files_client.post(
- f"api/v1/files/upload/{files_flow.id}",
- files={"file": ("test.txt", b"test content")},
- headers=headers,
- )
- assert response.status_code == 201
-
- # Then list the files
- response = await files_client.get(f"api/v1/files/list/{files_flow.id}", headers=headers)
- assert response.status_code == 200, f"Expected 200, got {response.status_code}: {response.json()}"
- files = response.json()["files"]
- assert len(files) == 1
- assert files[0].endswith("test.txt")
-
-
-async def test_delete_file(files_client, files_created_api_key, files_flow):
- headers = {"x-api-key": files_created_api_key.api_key}
-
- response = await files_client.delete(f"api/v1/files/delete/{files_flow.id}/test.txt", headers=headers)
- assert response.status_code == 200
- assert response.json() == {"message": "File test.txt deleted successfully"}
-
-
-async def test_file_operations(files_client, files_created_api_key, files_flow):
- headers = {"x-api-key": files_created_api_key.api_key}
- flow_id = files_flow.id
- file_name = "test.txt"
- file_content = b"Hello, world!"
-
- # Step 1: Upload the file
- response = await files_client.post(
- f"api/v1/files/upload/{flow_id}",
- files={"file": (file_name, file_content)},
- headers=headers,
- )
- assert response.status_code == 201
-
- response_json = response.json()
- assert response_json["flowId"] == str(flow_id)
-
- # Check that the file_path matches the expected pattern
- file_path_pattern = re.compile(rf"{flow_id}/\d{{4}}-\d{{2}}-\d{{2}}_\d{{2}}-\d{{2}}-\d{{2}}_{file_name}")
- assert file_path_pattern.match(response_json["file_path"])
-
- # Extract the full file name with timestamp from the response
- full_file_name = response_json["file_path"].split("/")[-1]
-
- # Step 2: List files in the folder
- response = await files_client.get(f"api/v1/files/list/{files_flow.id}", headers=headers)
- assert response.status_code == 200
- assert full_file_name in response.json()["files"]
-
- # Step 3: Download the file and verify its content
- response = await files_client.get(f"api/v1/files/download/{files_flow.id}/{full_file_name}", headers=headers)
- assert response.status_code == 200
- assert response.content == file_content
- assert response.headers["content-type"] == "application/octet-stream"
-
- # Step 4: Delete the file
- response = await files_client.delete(f"api/v1/files/delete/{files_flow.id}/{full_file_name}", headers=headers)
- assert response.status_code == 200
- assert response.json() == {"message": f"File {full_file_name} deleted successfully"}
-
- # Verify that the file is indeed deleted
- response = await files_client.get(f"api/v1/files/list/{files_flow.id}", headers=headers)
- assert full_file_name not in response.json()["files"]
-
-
-@pytest.mark.usefixtures("max_file_size_upload_fixture")
-async def test_upload_file_size_limit(files_client, files_created_api_key, files_flow):
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # Test file under the limit (500KB)
- small_content = b"x" * (500 * 1024)
- small_file = ("small_file.txt", small_content, "application/octet-stream")
- headers["Content-Length"] = str(len(small_content))
- response = await files_client.post(
- f"api/v1/files/upload/{files_flow.id}",
- files={"file": small_file},
- headers=headers,
- )
- assert response.status_code == 201, f"Expected 201, got {response.status_code}: {response.json()}"
-
- # Test file over the limit (1MB + 1KB)
- large_content = b"x" * (1024 * 1024 + 1024)
-
- bio = BytesIO(large_content)
- headers["Content-Length"] = str(len(large_content))
- response = await files_client.post(
- f"api/v1/files/upload/{files_flow.id}",
- files={"file": ("large_file.txt", bio, "application/octet-stream")},
- headers=headers,
- )
-
- assert response.status_code == 413, f"Expected 413, got {response.status_code}: {response.json()}"
- assert "Content size limit exceeded. Maximum allowed is 1MB and got 1.001MB." in response.json()["detail"]
diff --git a/src/backend/tests/unit/api/v1/test_flows.py b/src/backend/tests/unit/api/v1/test_flows.py
deleted file mode 100644
index fdd7895d9011..000000000000
--- a/src/backend/tests/unit/api/v1/test_flows.py
+++ /dev/null
@@ -1,324 +0,0 @@
-import tempfile
-import uuid
-
-from anyio import Path
-from fastapi import status
-from httpx import AsyncClient
-from langflow.services.database.models import Flow
-
-
-async def test_create_flow(client: AsyncClient, logged_in_headers):
- flow_file = Path(tempfile.tempdir) / f"{uuid.uuid4()}.json"
- try:
- basic_case = {
- "name": "string",
- "description": "string",
- "icon": "string",
- "icon_bg_color": "#ff00ff",
- "gradient": "string",
- "data": {},
- "is_component": False,
- "webhook": False,
- "endpoint_name": "string",
- "tags": ["string"],
- "folder_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
- "fs_path": str(flow_file),
- }
- response = await client.post("api/v1/flows/", json=basic_case, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_201_CREATED
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "data" in result, "The result must have a 'data' key"
- assert "description" in result, "The result must have a 'description' key"
- assert "endpoint_name" in result, "The result must have a 'endpoint_name' key"
- assert "folder_id" in result, "The result must have a 'folder_id' key"
- assert "gradient" in result, "The result must have a 'gradient' key"
- assert "icon" in result, "The result must have a 'icon' key"
- assert "icon_bg_color" in result, "The result must have a 'icon_bg_color' key"
- assert "id" in result, "The result must have a 'id' key"
- assert "is_component" in result, "The result must have a 'is_component' key"
- assert "name" in result, "The result must have a 'name' key"
- assert "tags" in result, "The result must have a 'tags' key"
- assert "updated_at" in result, "The result must have a 'updated_at' key"
- assert "user_id" in result, "The result must have a 'user_id' key"
- assert "webhook" in result, "The result must have a 'webhook' key"
-
- content = await flow_file.read_text()
- Flow.model_validate_json(content)
- finally:
- await flow_file.unlink(missing_ok=True)
-
-
-async def test_read_flows(client: AsyncClient, logged_in_headers):
- params = {
- "remove_example_flows": False,
- "components_only": False,
- "get_all": True,
- "header_flows": False,
- "page": 1,
- "size": 50,
- }
- response = await client.get("api/v1/flows/", params=params, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, list), "The result must be a list"
-
-
-async def test_read_flow(client: AsyncClient, logged_in_headers):
- basic_case = {
- "name": "string",
- "description": "string",
- "icon": "string",
- "icon_bg_color": "#ff00ff",
- "gradient": "string",
- "data": {},
- "is_component": False,
- "webhook": False,
- "endpoint_name": "string",
- "tags": ["string"],
- "folder_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
- }
- response_ = await client.post("api/v1/flows/", json=basic_case, headers=logged_in_headers)
- id_ = response_.json()["id"]
- response = await client.get(f"api/v1/flows/{id_}", headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "data" in result, "The result must have a 'data' key"
- assert "description" in result, "The result must have a 'description' key"
- assert "endpoint_name" in result, "The result must have a 'endpoint_name' key"
- assert "folder_id" in result, "The result must have a 'folder_id' key"
- assert "gradient" in result, "The result must have a 'gradient' key"
- assert "icon" in result, "The result must have a 'icon' key"
- assert "icon_bg_color" in result, "The result must have a 'icon_bg_color' key"
- assert "id" in result, "The result must have a 'id' key"
- assert "is_component" in result, "The result must have a 'is_component' key"
- assert "name" in result, "The result must have a 'name' key"
- assert "tags" in result, "The result must have a 'tags' key"
- assert "updated_at" in result, "The result must have a 'updated_at' key"
- assert "user_id" in result, "The result must have a 'user_id' key"
- assert "webhook" in result, "The result must have a 'webhook' key"
-
-
-async def test_update_flow(client: AsyncClient, logged_in_headers):
- name = "first_name"
- updated_name = "second_name"
- basic_case = {
- "description": "string",
- "icon": "string",
- "icon_bg_color": "#ff00ff",
- "gradient": "string",
- "data": {},
- "is_component": False,
- "webhook": False,
- "endpoint_name": "string",
- "tags": ["string"],
- "folder_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
- }
- basic_case["name"] = name
- response_ = await client.post("api/v1/flows/", json=basic_case, headers=logged_in_headers)
- id_ = response_.json()["id"]
-
- flow_file = Path(tempfile.tempdir) / f"{uuid.uuid4()!s}.json"
- basic_case["name"] = updated_name
- basic_case["fs_path"] = str(flow_file)
-
- try:
- response = await client.patch(f"api/v1/flows/{id_}", json=basic_case, headers=logged_in_headers)
- result = response.json()
-
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "data" in result, "The result must have a 'data' key"
- assert "description" in result, "The result must have a 'description' key"
- assert "endpoint_name" in result, "The result must have a 'endpoint_name' key"
- assert "folder_id" in result, "The result must have a 'folder_id' key"
- assert "gradient" in result, "The result must have a 'gradient' key"
- assert "icon" in result, "The result must have a 'icon' key"
- assert "icon_bg_color" in result, "The result must have a 'icon_bg_color' key"
- assert "id" in result, "The result must have a 'id' key"
- assert "is_component" in result, "The result must have a 'is_component' key"
- assert "name" in result, "The result must have a 'name' key"
- assert "tags" in result, "The result must have a 'tags' key"
- assert "updated_at" in result, "The result must have a 'updated_at' key"
- assert "user_id" in result, "The result must have a 'user_id' key"
- assert "webhook" in result, "The result must have a 'webhook' key"
- assert result["name"] == updated_name, "The name must be updated"
-
- content = await flow_file.read_text()
- Flow.model_validate_json(content)
- finally:
- await flow_file.unlink(missing_ok=True)
-
-
-async def test_create_flows(client: AsyncClient, logged_in_headers):
- amount_flows = 10
- basic_case = {
- "description": "string",
- "icon": "string",
- "icon_bg_color": "#ff00ff",
- "gradient": "string",
- "data": {},
- "is_component": False,
- "webhook": False,
- "tags": ["string"],
- "folder_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
- }
- cases = []
- for i in range(amount_flows):
- case = basic_case.copy()
- case["name"] = f"string_{i}"
- case["endpoint_name"] = f"string_{i}"
- cases.append(case)
-
- response = await client.post("api/v1/flows/batch/", json={"flows": cases}, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_201_CREATED
- assert isinstance(result, list), "The result must be a list"
- assert len(result) == amount_flows, "The result must have the same amount of flows"
-
-
-async def test_read_basic_examples(client: AsyncClient, logged_in_headers):
- response = await client.get("api/v1/flows/basic_examples/", headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, list), "The result must be a list"
- assert len(result) > 0, "The result must have at least one flow"
-
-
-async def test_read_flows_user_isolation(client: AsyncClient, logged_in_headers, active_user):
- """Test that read_flows returns only flows from the current user."""
- from uuid import uuid4
-
- from langflow.services.auth.utils import get_password_hash
- from langflow.services.database.models.user.model import User
- from langflow.services.deps import session_scope
-
- # Create a second user
- other_user_id = uuid4()
- async with session_scope() as session:
- other_user = User(
- id=other_user_id,
- username="other_test_user",
- password=get_password_hash("testpassword"),
- is_active=True,
- is_superuser=False,
- )
- session.add(other_user)
- await session.commit()
- await session.refresh(other_user)
-
- # Login as the other user to get headers
- login_data = {"username": "other_test_user", "password": "testpassword"}
- response = await client.post("api/v1/login", data=login_data)
- assert response.status_code == 200
- tokens = response.json()
- other_user_headers = {"Authorization": f"Bearer {tokens['access_token']}"}
-
- # Create flows for the first user (active_user)
- flow_user1_1 = {
- "name": "user1_flow_1",
- "description": "Flow 1 for user 1",
- "icon": "string",
- "icon_bg_color": "#ff00ff",
- "gradient": "string",
- "data": {},
- "is_component": False,
- "webhook": False,
- "endpoint_name": "user1_flow_1_endpoint",
- "tags": ["user1"],
- "folder_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
- }
-
- flow_user1_2 = {
- "name": "user1_flow_2",
- "description": "Flow 2 for user 1",
- "icon": "string",
- "icon_bg_color": "#00ff00",
- "gradient": "string",
- "data": {},
- "is_component": False,
- "webhook": False,
- "endpoint_name": "user1_flow_2_endpoint",
- "tags": ["user1"],
- "folder_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
- }
-
- # Create flows for the second user
- flow_user2_1 = {
- "name": "user2_flow_1",
- "description": "Flow 1 for user 2",
- "icon": "string",
- "icon_bg_color": "#0000ff",
- "gradient": "string",
- "data": {},
- "is_component": False,
- "webhook": False,
- "endpoint_name": "user2_flow_1_endpoint",
- "tags": ["user2"],
- "folder_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
- }
-
- # Create flows using the appropriate user headers
- response1 = await client.post("api/v1/flows/", json=flow_user1_1, headers=logged_in_headers)
- assert response1.status_code == status.HTTP_201_CREATED
-
- response2 = await client.post("api/v1/flows/", json=flow_user1_2, headers=logged_in_headers)
- assert response2.status_code == status.HTTP_201_CREATED
-
- response3 = await client.post("api/v1/flows/", json=flow_user2_1, headers=other_user_headers)
- assert response3.status_code == status.HTTP_201_CREATED
-
- # Test read_flows for user 1 - should only return user 1's flows
- params = {
- "remove_example_flows": True, # Exclude example flows to focus on our test flows
- "components_only": False,
- "get_all": True,
- "header_flows": False,
- "page": 1,
- "size": 50,
- }
-
- response_user1 = await client.get("api/v1/flows/", params=params, headers=logged_in_headers)
- result_user1 = response_user1.json()
-
- assert response_user1.status_code == status.HTTP_200_OK
- assert isinstance(result_user1, list), "The result must be a list"
-
- # Verify only user 1's flows are returned
- user1_flow_names = [flow["name"] for flow in result_user1]
- assert "user1_flow_1" in user1_flow_names, "User 1's first flow should be returned"
- assert "user1_flow_2" in user1_flow_names, "User 1's second flow should be returned"
- assert "user2_flow_1" not in user1_flow_names, "User 2's flow should not be returned for user 1"
-
- # Verify all returned flows belong to user 1
- for flow in result_user1:
- assert str(flow["user_id"]) == str(active_user.id), f"Flow {flow['name']} should belong to user 1"
-
- # Test read_flows for user 2 - should only return user 2's flows
- response_user2 = await client.get("api/v1/flows/", params=params, headers=other_user_headers)
- result_user2 = response_user2.json()
-
- assert response_user2.status_code == status.HTTP_200_OK
- assert isinstance(result_user2, list), "The result must be a list"
-
- # Verify only user 2's flows are returned
- user2_flow_names = [flow["name"] for flow in result_user2]
- assert "user2_flow_1" in user2_flow_names, "User 2's flow should be returned"
- assert "user1_flow_1" not in user2_flow_names, "User 1's first flow should not be returned for user 2"
- assert "user1_flow_2" not in user2_flow_names, "User 1's second flow should not be returned for user 2"
-
- # Verify all returned flows belong to user 2
- for flow in result_user2:
- assert str(flow["user_id"]) == str(other_user_id), f"Flow {flow['name']} should belong to user 2"
-
- # Cleanup: Delete the other user
- async with session_scope() as session:
- user = await session.get(User, other_user_id)
- if user:
- await session.delete(user)
- await session.commit()
diff --git a/src/backend/tests/unit/api/v1/test_folders.py b/src/backend/tests/unit/api/v1/test_folders.py
deleted file mode 100644
index f35071f60214..000000000000
--- a/src/backend/tests/unit/api/v1/test_folders.py
+++ /dev/null
@@ -1,102 +0,0 @@
-import pytest
-from fastapi import status
-from httpx import AsyncClient
-
-
-@pytest.fixture
-def basic_case():
- return {
- "name": "New Project",
- "description": "",
- "flows_list": [],
- "components_list": [],
- }
-
-
-async def test_create_folder(client: AsyncClient, logged_in_headers, basic_case):
- # Configure client to follow redirects
- client.follow_redirects = True
-
- response = await client.post("api/v1/folders/", json=basic_case, headers=logged_in_headers)
- result = response.json()
-
- # Check that we're getting a valid response from the projects endpoint
- assert response.status_code == status.HTTP_201_CREATED
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "name" in result, "The dictionary must contain a key called 'name'"
- assert "description" in result, "The dictionary must contain a key called 'description'"
- assert "id" in result, "The dictionary must contain a key called 'id'"
- assert "parent_id" in result, "The dictionary must contain a key called 'parent_id'"
-
-
-async def test_read_folders(client: AsyncClient, logged_in_headers):
- # Configure client to follow redirects
- client.follow_redirects = True
-
- response = await client.get("api/v1/folders/", headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, list), "The result must be a list"
- assert len(result) > 0, "The list must not be empty"
-
-
-async def test_read_folder(client: AsyncClient, logged_in_headers, basic_case):
- # Configure client to follow redirects
- client.follow_redirects = True
-
- # Create a folder first
- response_ = await client.post("api/v1/folders/", json=basic_case, headers=logged_in_headers)
- id_ = response_.json()["id"]
-
- # Get the folder
- response = await client.get(f"api/v1/folders/{id_}", headers=logged_in_headers)
- result = response.json()
-
- # The response structure may be different depending on whether pagination is enabled
- if "folder" in result:
- # Handle paginated project response
- folder_data = result["folder"]
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(folder_data, dict), "The folder data must be a dictionary"
- assert "name" in folder_data, "The dictionary must contain a key called 'name'"
- assert "description" in folder_data, "The dictionary must contain a key called 'description'"
- assert "id" in folder_data, "The dictionary must contain a key called 'id'"
- elif "project" in result:
- # Handle paginated project response
- project_data = result["project"]
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(project_data, dict), "The project data must be a dictionary"
- assert "name" in project_data, "The dictionary must contain a key called 'name'"
- assert "description" in project_data, "The dictionary must contain a key called 'description'"
- assert "id" in project_data, "The dictionary must contain a key called 'id'"
- else:
- # Handle direct project response
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "name" in result, "The dictionary must contain a key called 'name'"
- assert "description" in result, "The dictionary must contain a key called 'description'"
- assert "id" in result, "The dictionary must contain a key called 'id'"
-
-
-async def test_update_folder(client: AsyncClient, logged_in_headers, basic_case):
- # Configure client to follow redirects
- client.follow_redirects = True
-
- update_case = basic_case.copy()
- update_case["name"] = "Updated Folder"
-
- # Create a folder first
- response_ = await client.post("api/v1/folders/", json=basic_case, headers=logged_in_headers)
- id_ = response_.json()["id"]
-
- # Update the folder
- response = await client.patch(f"api/v1/folders/{id_}", json=update_case, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "name" in result, "The dictionary must contain a key called 'name'"
- assert "description" in result, "The dictionary must contain a key called 'description'"
- assert "id" in result, "The dictionary must contain a key called 'id'"
- assert "parent_id" in result, "The dictionary must contain a key called 'parent_id'"
diff --git a/src/backend/tests/unit/api/v1/test_mcp.py b/src/backend/tests/unit/api/v1/test_mcp.py
deleted file mode 100644
index 2bea371cc1fc..000000000000
--- a/src/backend/tests/unit/api/v1/test_mcp.py
+++ /dev/null
@@ -1,113 +0,0 @@
-from unittest.mock import AsyncMock, MagicMock, patch
-from uuid import uuid4
-
-import pytest
-from fastapi import status
-from httpx import AsyncClient
-from langflow.services.auth.utils import get_password_hash
-from langflow.services.database.models.user import User
-
-# Mark all tests in this module as asyncio
-pytestmark = pytest.mark.asyncio
-
-
-@pytest.fixture
-def mock_user():
- return User(
- id=uuid4(), username="testuser", password=get_password_hash("testpassword"), is_active=True, is_superuser=False
- )
-
-
-@pytest.fixture
-def mock_mcp_server():
- with patch("langflow.api.v1.mcp.server") as mock:
- # Basic mocking for server attributes potentially accessed during endpoint calls
- mock.request_context = MagicMock()
- mock.request_context.meta = MagicMock()
- mock.request_context.meta.progressToken = "test_token"
- mock.request_context.session = AsyncMock()
- mock.create_initialization_options = MagicMock()
- mock.run = AsyncMock()
- yield mock
-
-
-@pytest.fixture
-def mock_sse_transport():
- with patch("langflow.api.v1.mcp.sse") as mock:
- mock.connect_sse = AsyncMock()
- mock.handle_post_message = AsyncMock()
- yield mock
-
-
-# Fixture to mock the current user context variable needed for auth in /sse GET
-@pytest.fixture(autouse=True)
-def mock_current_user_ctx(mock_user):
- with patch("langflow.api.v1.mcp.current_user_ctx") as mock:
- mock.get.return_value = mock_user
- mock.set = MagicMock(return_value="dummy_token") # Return a dummy token for reset
- mock.reset = MagicMock()
- yield mock
-
-
-# Test the HEAD /sse endpoint (checks server availability)
-async def test_mcp_sse_head_endpoint(client: AsyncClient):
- """Test HEAD /sse endpoint returns 200 OK."""
- response = await client.head("api/v1/mcp/sse")
- assert response.status_code == status.HTTP_200_OK
-
-
-# Test the HEAD /sse endpoint without authentication
-async def test_mcp_sse_head_endpoint_no_auth(client: AsyncClient):
- """Test HEAD /sse endpoint without authentication returns 200 OK (HEAD requests don't require auth)."""
- response = await client.head("api/v1/mcp/sse")
- assert response.status_code == status.HTTP_200_OK
-
-
-async def test_mcp_sse_get_endpoint_invalid_auth(client: AsyncClient):
- """Test GET /sse endpoint with invalid authentication returns 401."""
- headers = {"Authorization": "Bearer invalid_token"}
- response = await client.get("api/v1/mcp/sse", headers=headers)
- assert response.status_code == status.HTTP_401_UNAUTHORIZED
-
-
-# Test the POST / endpoint (handles incoming MCP messages)
-async def test_mcp_post_endpoint_success(client: AsyncClient, logged_in_headers, mock_sse_transport):
- """Test POST / endpoint successfully handles MCP messages."""
- test_message = {"type": "test", "content": "message"}
- response = await client.post("api/v1/mcp/", headers=logged_in_headers, json=test_message)
-
- assert response.status_code == status.HTTP_200_OK
- mock_sse_transport.handle_post_message.assert_called_once()
-
-
-async def test_mcp_post_endpoint_no_auth(client: AsyncClient):
- """Test POST / endpoint without authentication returns 400 (current behavior)."""
- response = await client.post("api/v1/mcp/", json={})
- assert response.status_code == status.HTTP_400_BAD_REQUEST
-
-
-async def test_mcp_post_endpoint_invalid_json(client: AsyncClient, logged_in_headers):
- """Test POST / endpoint with invalid JSON returns 400."""
- response = await client.post("api/v1/mcp/", headers=logged_in_headers, content="invalid json")
- assert response.status_code == status.HTTP_400_BAD_REQUEST
-
-
-async def test_mcp_post_endpoint_disconnect_error(client: AsyncClient, logged_in_headers, mock_sse_transport):
- """Test POST / endpoint handles disconnection errors correctly."""
- mock_sse_transport.handle_post_message.side_effect = BrokenPipeError("Simulated disconnect")
-
- response = await client.post("api/v1/mcp/", headers=logged_in_headers, json={"type": "test"})
-
- assert response.status_code == status.HTTP_404_NOT_FOUND
- assert "MCP Server disconnected" in response.json()["detail"]
- mock_sse_transport.handle_post_message.assert_called_once()
-
-
-async def test_mcp_post_endpoint_server_error(client: AsyncClient, logged_in_headers, mock_sse_transport):
- """Test POST / endpoint handles server errors correctly."""
- mock_sse_transport.handle_post_message.side_effect = Exception("Internal server error")
-
- response = await client.post("api/v1/mcp/", headers=logged_in_headers, json={"type": "test"})
-
- assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR
- assert "Internal server error" in response.json()["detail"]
diff --git a/src/backend/tests/unit/api/v1/test_mcp_projects.py b/src/backend/tests/unit/api/v1/test_mcp_projects.py
deleted file mode 100644
index 3c69dab9a0b4..000000000000
--- a/src/backend/tests/unit/api/v1/test_mcp_projects.py
+++ /dev/null
@@ -1,599 +0,0 @@
-from unittest.mock import AsyncMock, MagicMock, patch
-from uuid import uuid4
-
-import pytest
-from fastapi import status
-from httpx import AsyncClient
-from langflow.api.v1.mcp_projects import (
- get_project_mcp_server,
- get_project_sse,
- init_mcp_servers,
- project_mcp_servers,
- project_sse_transports,
-)
-from langflow.services.auth.utils import get_password_hash
-from langflow.services.database.models.flow import Flow
-from langflow.services.database.models.folder import Folder
-from langflow.services.database.models.user import User
-from langflow.services.deps import session_scope
-from mcp.server.sse import SseServerTransport
-
-# Mark all tests in this module as asyncio
-pytestmark = pytest.mark.asyncio
-
-
-@pytest.fixture
-def mock_project(active_user):
- """Fixture to provide a mock project linked to the active user."""
- return Folder(id=uuid4(), name="Test Project", user_id=active_user.id)
-
-
-@pytest.fixture
-def mock_flow(active_user, mock_project):
- """Fixture to provide a mock flow linked to the active user and project."""
- return Flow(
- id=uuid4(),
- name="Test Flow",
- description="Test Description",
- mcp_enabled=True,
- action_name="test_action",
- action_description="Test Action Description",
- folder_id=mock_project.id,
- user_id=active_user.id,
- )
-
-
-@pytest.fixture
-def mock_project_mcp_server():
- with patch("langflow.api.v1.mcp_projects.ProjectMCPServer") as mock:
- server_instance = MagicMock()
- server_instance.server = MagicMock()
- server_instance.server.name = "test-server"
- server_instance.server.run = AsyncMock()
- server_instance.server.create_initialization_options = MagicMock()
- mock.return_value = server_instance
- yield server_instance
-
-
-class AsyncContextManagerMock:
- """Mock class that implements async context manager protocol."""
-
- async def __aenter__(self):
- return (MagicMock(), MagicMock())
-
- async def __aexit__(self, exc_type, exc_val, exc_tb):
- pass
-
-
-@pytest.fixture
-def mock_sse_transport():
- with patch("langflow.api.v1.mcp_projects.SseServerTransport") as mock:
- transport_instance = MagicMock()
- # Create an async context manager for connect_sse
- connect_sse_mock = AsyncContextManagerMock()
- transport_instance.connect_sse = MagicMock(return_value=connect_sse_mock)
- transport_instance.handle_post_message = AsyncMock()
- mock.return_value = transport_instance
- yield transport_instance
-
-
-@pytest.fixture(autouse=True)
-def mock_current_user_ctx(active_user):
- with patch("langflow.api.v1.mcp_projects.current_user_ctx") as mock:
- mock.get.return_value = active_user
- mock.set = MagicMock(return_value="dummy_token")
- mock.reset = MagicMock()
- yield mock
-
-
-@pytest.fixture(autouse=True)
-def mock_current_project_ctx(mock_project):
- with patch("langflow.api.v1.mcp_projects.current_project_ctx") as mock:
- mock.get.return_value = mock_project.id
- mock.set = MagicMock(return_value="dummy_token")
- mock.reset = MagicMock()
- yield mock
-
-
-@pytest.fixture
-async def other_test_user():
- """Fixture for creating another test user."""
- user_id = uuid4()
- async with session_scope() as session:
- user = User(
- id=user_id,
- username="other_test_user",
- password=get_password_hash("testpassword"),
- is_active=True,
- is_superuser=False,
- )
- session.add(user)
- await session.commit()
- await session.refresh(user)
- yield user
- # Clean up
- async with session_scope() as session:
- user = await session.get(User, user_id)
- if user:
- await session.delete(user)
- await session.commit()
-
-
-@pytest.fixture
-async def other_test_project(other_test_user):
- """Fixture for creating a project for another test user."""
- project_id = uuid4()
- async with session_scope() as session:
- project = Folder(id=project_id, name="Other Test Project", user_id=other_test_user.id)
- session.add(project)
- await session.commit()
- await session.refresh(project)
- yield project
- # Clean up
- async with session_scope() as session:
- project = await session.get(Folder, project_id)
- if project:
- await session.delete(project)
- await session.commit()
-
-
-async def test_handle_project_messages_success(
- client: AsyncClient, user_test_project, mock_sse_transport, logged_in_headers
-):
- """Test successful handling of project messages."""
- response = await client.post(
- f"api/v1/mcp/project/{user_test_project.id}",
- headers=logged_in_headers,
- json={"type": "test", "content": "message"},
- )
- assert response.status_code == status.HTTP_200_OK
- mock_sse_transport.handle_post_message.assert_called_once()
-
-
-async def test_update_project_mcp_settings_invalid_json(client: AsyncClient, user_test_project, logged_in_headers):
- """Test updating MCP settings with invalid JSON."""
- response = await client.patch(
- f"api/v1/mcp/project/{user_test_project.id}", headers=logged_in_headers, json="invalid"
- )
- assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
-
-
-@pytest.fixture
-async def test_flow_for_update(active_user, user_test_project):
- """Fixture to provide a real flow for testing MCP settings updates."""
- flow_id = uuid4()
- flow_data = {
- "id": flow_id,
- "name": "Test Flow For Update",
- "description": "Test flow that will be updated",
- "mcp_enabled": True,
- "action_name": "original_action",
- "action_description": "Original description",
- "folder_id": user_test_project.id,
- "user_id": active_user.id,
- }
-
- # Create the flow in the database
- async with session_scope() as session:
- flow = Flow(**flow_data)
- session.add(flow)
- await session.commit()
- await session.refresh(flow)
-
- yield flow
-
- # Clean up
- async with session_scope() as session:
- # Get the flow from the database
- flow = await session.get(Flow, flow_id)
- if flow:
- await session.delete(flow)
- await session.commit()
-
-
-async def test_update_project_mcp_settings_success(
- client: AsyncClient, user_test_project, test_flow_for_update, logged_in_headers
-):
- """Test successful update of MCP settings using real database."""
- # Create settings for updating the flow
- json_payload = {
- "settings": [
- {
- "id": str(test_flow_for_update.id),
- "action_name": "updated_action",
- "action_description": "Updated description",
- "mcp_enabled": False,
- "name": test_flow_for_update.name,
- "description": test_flow_for_update.description,
- }
- ],
- "auth_settings": {
- "auth_type": "none",
- "api_key": None,
- "iam_endpoint": None,
- "username": None,
- "password": None,
- "bearer_token": None,
- },
- }
-
- # Make the real PATCH request
- response = await client.patch(
- f"api/v1/mcp/project/{user_test_project.id}", headers=logged_in_headers, json=json_payload
- )
-
- # Assert response
- assert response.status_code == 200
- assert "Updated MCP settings for 1 flows" in response.json()["message"]
-
- # Verify the flow was actually updated in the database
- async with session_scope() as session:
- updated_flow = await session.get(Flow, test_flow_for_update.id)
- assert updated_flow is not None
- assert updated_flow.action_name == "updated_action"
- assert updated_flow.action_description == "Updated description"
- assert updated_flow.mcp_enabled is False
-
-
-async def test_update_project_mcp_settings_invalid_project(client: AsyncClient, logged_in_headers):
- """Test accessing an invalid project ID."""
- # We're using the GET endpoint since it works correctly and tests the same security constraints
- # Generate a random UUID that doesn't exist in the database
- nonexistent_project_id = uuid4()
-
- # Try to access the project
- response = await client.get(f"api/v1/mcp/project/{nonexistent_project_id}/sse", headers=logged_in_headers)
-
- # Verify the response
- assert response.status_code == 404
- assert response.json()["detail"] == "Project not found"
-
-
-async def test_update_project_mcp_settings_other_user_project(
- client: AsyncClient, other_test_project, logged_in_headers
-):
- """Test accessing a project belonging to another user."""
- # We're using the GET endpoint since it works correctly and tests the same security constraints
-
- # Try to access the other user's project using active_user's credentials
- response = await client.get(f"api/v1/mcp/project/{other_test_project.id}/sse", headers=logged_in_headers)
-
- # Verify the response
- assert response.status_code == 404
- assert response.json()["detail"] == "Project not found"
-
-
-async def test_update_project_mcp_settings_empty_settings(client: AsyncClient, user_test_project, logged_in_headers):
- """Test updating MCP settings with empty settings list."""
- # Use real database objects instead of mocks to avoid the coroutine issue
-
- # Empty settings list
- json_payload = {
- "settings": [],
- "auth_settings": {
- "auth_type": "none",
- "api_key": None,
- "iam_endpoint": None,
- "username": None,
- "password": None,
- "bearer_token": None,
- },
- }
-
- # Make the request to the actual endpoint
- response = await client.patch(
- f"api/v1/mcp/project/{user_test_project.id}", headers=logged_in_headers, json=json_payload
- )
-
- # Verify response - the real endpoint should handle empty settings correctly
- assert response.status_code == 200
- assert "Updated MCP settings for 0 flows" in response.json()["message"]
-
-
-async def test_user_can_only_access_own_projects(client: AsyncClient, other_test_project, logged_in_headers):
- """Test that a user can only access their own projects."""
- # Try to access the other user's project using first user's credentials
- response = await client.get(f"api/v1/mcp/project/{other_test_project.id}/sse", headers=logged_in_headers)
- # Should fail with 404 as first user cannot see second user's project
- assert response.status_code == 404
- assert response.json()["detail"] == "Project not found"
-
-
-async def test_user_data_isolation_with_real_db(
- client: AsyncClient, logged_in_headers, other_test_user, other_test_project
-):
- """Test that users can only access their own MCP projects using a real database session."""
- # Create a flow for the other test user in their project
- second_flow_id = uuid4()
-
- # Use real database session just for flow creation and cleanup
- async with session_scope() as session:
- # Create a flow in the other user's project
- second_flow = Flow(
- id=second_flow_id,
- name="Second User Flow",
- description="This flow belongs to the second user",
- mcp_enabled=True,
- action_name="second_user_action",
- action_description="Second user action description",
- folder_id=other_test_project.id,
- user_id=other_test_user.id,
- )
-
- # Add flow to database
- session.add(second_flow)
- await session.commit()
-
- try:
- # Test that first user can't see the project
- response = await client.get(f"api/v1/mcp/project/{other_test_project.id}/sse", headers=logged_in_headers)
-
- # Should fail with 404
- assert response.status_code == 404
- assert response.json()["detail"] == "Project not found"
-
- # First user attempts to update second user's flow settings
- # Note: We're not testing the PATCH endpoint because it has the coroutine error
- # Instead, verify permissions via the GET endpoint
-
- finally:
- # Clean up flow
- async with session_scope() as session:
- second_flow = await session.get(Flow, second_flow_id)
- if second_flow:
- await session.delete(second_flow)
- await session.commit()
-
-
-@pytest.fixture
-async def user_test_project(active_user):
- """Fixture for creating a project for the active user."""
- project_id = uuid4()
- async with session_scope() as session:
- project = Folder(id=project_id, name="User Test Project", user_id=active_user.id)
- session.add(project)
- await session.commit()
- await session.refresh(project)
- yield project
- # Clean up
- async with session_scope() as session:
- project = await session.get(Folder, project_id)
- if project:
- await session.delete(project)
- await session.commit()
-
-
-@pytest.fixture
-async def user_test_flow(active_user, user_test_project):
- """Fixture for creating a flow for the active user."""
- flow_id = uuid4()
- async with session_scope() as session:
- flow = Flow(
- id=flow_id,
- name="User Test Flow",
- description="This flow belongs to the active user",
- mcp_enabled=True,
- action_name="user_action",
- action_description="User action description",
- folder_id=user_test_project.id,
- user_id=active_user.id,
- )
- session.add(flow)
- await session.commit()
- await session.refresh(flow)
- yield flow
- # Clean up
- async with session_scope() as session:
- flow = await session.get(Flow, flow_id)
- if flow:
- await session.delete(flow)
- await session.commit()
-
-
-async def test_user_can_update_own_flow_mcp_settings(
- client: AsyncClient, logged_in_headers, user_test_project, user_test_flow
-):
- """Test that a user can update MCP settings for their own flows using real database."""
- # User attempts to update their own flow settings
- json_payload = {
- "settings": [
- {
- "id": str(user_test_flow.id),
- "action_name": "updated_user_action",
- "action_description": "Updated user action description",
- "mcp_enabled": False,
- "name": "User Test Flow",
- "description": "This flow belongs to the active user",
- }
- ],
- "auth_settings": {
- "auth_type": "none",
- "api_key": None,
- "iam_endpoint": None,
- "username": None,
- "password": None,
- "bearer_token": None,
- },
- }
-
- # Make the PATCH request to update settings
- response = await client.patch(
- f"api/v1/mcp/project/{user_test_project.id}", headers=logged_in_headers, json=json_payload
- )
-
- # Should succeed as the user owns this project and flow
- assert response.status_code == 200
- assert "Updated MCP settings for 1 flows" in response.json()["message"]
-
- # Verify the flow was actually updated in the database
- async with session_scope() as session:
- updated_flow = await session.get(Flow, user_test_flow.id)
- assert updated_flow is not None
- assert updated_flow.action_name == "updated_user_action"
- assert updated_flow.action_description == "Updated user action description"
- assert updated_flow.mcp_enabled is False
-
-
-async def test_update_project_auth_settings_encryption(
- client: AsyncClient, user_test_project, test_flow_for_update, logged_in_headers
-):
- """Test that sensitive auth_settings fields are encrypted when stored."""
- # Create settings with sensitive data
- json_payload = {
- "settings": [
- {
- "id": str(test_flow_for_update.id),
- "action_name": "test_action",
- "action_description": "Test description",
- "mcp_enabled": True,
- "name": test_flow_for_update.name,
- "description": test_flow_for_update.description,
- }
- ],
- "auth_settings": {
- "auth_type": "oauth",
- "oauth_host": "localhost",
- "oauth_port": "3000",
- "oauth_server_url": "http://localhost:3000",
- "oauth_callback_path": "/callback",
- "oauth_client_id": "test-client-id",
- "oauth_client_secret": "test-oauth-secret-value-456",
- "oauth_auth_url": "https://oauth.example.com/auth",
- "oauth_token_url": "https://oauth.example.com/token",
- "oauth_mcp_scope": "read write",
- "oauth_provider_scope": "user:email",
- },
- }
-
- # Send the update request
- response = await client.patch(
- f"/api/v1/mcp/project/{user_test_project.id}",
- json=json_payload,
- headers=logged_in_headers,
- )
- assert response.status_code == 200
-
- # Verify the sensitive data is encrypted in the database
- async with session_scope() as session:
- updated_project = await session.get(Folder, user_test_project.id)
- assert updated_project is not None
- assert updated_project.auth_settings is not None
-
- # Check that sensitive field is encrypted (not plaintext)
- stored_value = updated_project.auth_settings.get("oauth_client_secret")
- assert stored_value is not None
- assert stored_value != "test-oauth-secret-value-456" # Should be encrypted
-
- # The encrypted value should be a base64-like string (Fernet token)
- assert len(stored_value) > 50 # Encrypted values are longer
-
- # Now test that the GET endpoint returns the data (SecretStr will be masked)
- response = await client.get(
- f"/api/v1/mcp/project/{user_test_project.id}",
- headers=logged_in_headers,
- )
- assert response.status_code == 200
- data = response.json()
-
- # SecretStr fields are masked in the response for security
- assert data["auth_settings"]["oauth_client_secret"] == "**********" # noqa: S105
- assert data["auth_settings"]["oauth_client_id"] == "test-client-id"
- assert data["auth_settings"]["auth_type"] == "oauth"
-
- # Verify that decryption is working by checking the actual decrypted value in the backend
- from langflow.services.auth.mcp_encryption import decrypt_auth_settings
-
- async with session_scope() as session:
- project = await session.get(Folder, user_test_project.id)
- decrypted_settings = decrypt_auth_settings(project.auth_settings)
- assert decrypted_settings["oauth_client_secret"] == "test-oauth-secret-value-456" # noqa: S105
-
-
-async def test_project_sse_creation(user_test_project):
- """Test that SSE transport and MCP server are correctly created for a project."""
- # Test getting an SSE transport for the first time
- project_id = user_test_project.id
- project_id_str = str(project_id)
-
- # Ensure there's no SSE transport for this project yet
- if project_id_str in project_sse_transports:
- del project_sse_transports[project_id_str]
-
- # Get an SSE transport
- sse_transport = get_project_sse(project_id)
-
- # Verify the transport was created correctly
- assert project_id_str in project_sse_transports
- assert sse_transport is project_sse_transports[project_id_str]
- assert isinstance(sse_transport, SseServerTransport)
-
- # Test getting an MCP server for the first time
- if project_id_str in project_mcp_servers:
- del project_mcp_servers[project_id_str]
-
- # Get an MCP server
- mcp_server = get_project_mcp_server(project_id)
-
- # Verify the server was created correctly
- assert project_id_str in project_mcp_servers
- assert mcp_server is project_mcp_servers[project_id_str]
- assert mcp_server.project_id == project_id
- assert mcp_server.server.name == f"langflow-mcp-project-{project_id}"
-
- # Test that getting the same SSE transport and MCP server again returns the cached instances
- sse_transport2 = get_project_sse(project_id)
- mcp_server2 = get_project_mcp_server(project_id)
-
- assert sse_transport2 is sse_transport
- assert mcp_server2 is mcp_server
-
-
-async def test_init_mcp_servers(user_test_project, other_test_project):
- """Test the initialization of MCP servers for all projects."""
- # Clear existing caches
- project_sse_transports.clear()
- project_mcp_servers.clear()
-
- # Test the initialization function
- await init_mcp_servers()
-
- # Verify that both test projects have SSE transports and MCP servers initialized
- project1_id = str(user_test_project.id)
- project2_id = str(other_test_project.id)
-
- # Both projects should have SSE transports created
- assert project1_id in project_sse_transports
- assert project2_id in project_sse_transports
-
- # Both projects should have MCP servers created
- assert project1_id in project_mcp_servers
- assert project2_id in project_mcp_servers
-
- # Verify the correct configuration
- assert isinstance(project_sse_transports[project1_id], SseServerTransport)
- assert isinstance(project_sse_transports[project2_id], SseServerTransport)
-
- assert project_mcp_servers[project1_id].project_id == user_test_project.id
- assert project_mcp_servers[project2_id].project_id == other_test_project.id
-
-
-async def test_init_mcp_servers_error_handling():
- """Test that init_mcp_servers handles errors correctly and continues initialization."""
- # Clear existing caches
- project_sse_transports.clear()
- project_mcp_servers.clear()
-
- # Create a mock to simulate an error when initializing one project
- original_get_project_sse = get_project_sse
-
- def mock_get_project_sse(project_id):
- # Raise an exception for the first project only
- if not project_sse_transports: # Only for the first project
- msg = "Test error for project SSE creation"
- raise ValueError(msg)
- return original_get_project_sse(project_id)
-
- # Apply the patch
- with patch("langflow.api.v1.mcp_projects.get_project_sse", side_effect=mock_get_project_sse):
- # This should not raise any exception, as the error should be caught
- await init_mcp_servers()
diff --git a/src/backend/tests/unit/api/v1/test_projects.py b/src/backend/tests/unit/api/v1/test_projects.py
deleted file mode 100644
index db9400dcaf88..000000000000
--- a/src/backend/tests/unit/api/v1/test_projects.py
+++ /dev/null
@@ -1,171 +0,0 @@
-from uuid import uuid4
-
-import pytest
-from fastapi import status
-from httpx import AsyncClient
-
-CYRILLIC_NAME = "Новый проект"
-CYRILLIC_DESC = "Описание проекта с кириллицей" # noqa: RUF001
-
-
-@pytest.fixture
-def basic_case():
- return {
- "name": "New Project",
- "description": "",
- "flows_list": [],
- "components_list": [],
- }
-
-
-async def test_create_project(client: AsyncClient, logged_in_headers, basic_case):
- response = await client.post("api/v1/projects/", json=basic_case, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_201_CREATED
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "name" in result, "The dictionary must contain a key called 'name'"
- assert "description" in result, "The dictionary must contain a key called 'description'"
- assert "id" in result, "The dictionary must contain a key called 'id'"
- assert "parent_id" in result, "The dictionary must contain a key called 'parent_id'"
-
-
-async def test_read_projects(client: AsyncClient, logged_in_headers):
- response = await client.get("api/v1/projects/", headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, list), "The result must be a list"
- assert len(result) > 0, "The list must not be empty"
-
-
-async def test_read_project(client: AsyncClient, logged_in_headers, basic_case):
- # Create a project first
- response_ = await client.post("api/v1/projects/", json=basic_case, headers=logged_in_headers)
- id_ = response_.json()["id"]
-
- # Get the project
- response = await client.get(f"api/v1/projects/{id_}", headers=logged_in_headers)
- result = response.json()
-
- # The response structure may be different depending on whether pagination is enabled
- if isinstance(result, dict) and "folder" in result:
- # Handle paginated project response
- folder_data = result["folder"]
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(folder_data, dict), "The folder data must be a dictionary"
- assert "name" in folder_data, "The dictionary must contain a key called 'name'"
- assert "description" in folder_data, "The dictionary must contain a key called 'description'"
- assert "id" in folder_data, "The dictionary must contain a key called 'id'"
- elif isinstance(result, dict) and "project" in result:
- # Handle paginated project response
- project_data = result["project"]
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(project_data, dict), "The project data must be a dictionary"
- assert "name" in project_data, "The dictionary must contain a key called 'name'"
- assert "description" in project_data, "The dictionary must contain a key called 'description'"
- assert "id" in project_data, "The dictionary must contain a key called 'id'"
- else:
- # Handle direct project response
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "name" in result, "The dictionary must contain a key called 'name'"
- assert "description" in result, "The dictionary must contain a key called 'description'"
- assert "id" in result, "The dictionary must contain a key called 'id'"
-
-
-async def test_update_project(client: AsyncClient, logged_in_headers, basic_case):
- update_case = basic_case.copy()
- update_case["name"] = "Updated Project"
-
- # Create a project first
- response_ = await client.post("api/v1/projects/", json=basic_case, headers=logged_in_headers)
- id_ = response_.json()["id"]
-
- # Update the project
- response = await client.patch(f"api/v1/projects/{id_}", json=update_case, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "name" in result, "The dictionary must contain a key called 'name'"
- assert "description" in result, "The dictionary must contain a key called 'description'"
- assert "id" in result, "The dictionary must contain a key called 'id'"
- assert "parent_id" in result, "The dictionary must contain a key called 'parent_id'"
-
-
-async def test_create_project_validation_error(client: AsyncClient, logged_in_headers, basic_case):
- invalid_case = basic_case.copy()
- invalid_case.pop("name")
- response = await client.post("api/v1/projects/", json=invalid_case, headers=logged_in_headers)
- assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
-
-
-async def test_delete_project_then_404(client: AsyncClient, logged_in_headers, basic_case):
- create_resp = await client.post("api/v1/projects/", json=basic_case, headers=logged_in_headers)
- proj_id = create_resp.json()["id"]
-
- del_resp = await client.delete(f"api/v1/projects/{proj_id}", headers=logged_in_headers)
- assert del_resp.status_code == status.HTTP_204_NO_CONTENT
-
- get_resp = await client.get(f"api/v1/projects/{proj_id}", headers=logged_in_headers)
- assert get_resp.status_code == status.HTTP_404_NOT_FOUND
-
-
-async def test_read_project_invalid_id_format(client: AsyncClient, logged_in_headers):
- bad_id = "not-a-uuid"
- response = await client.get(f"api/v1/projects/{bad_id}", headers=logged_in_headers)
- assert response.status_code in (status.HTTP_422_UNPROCESSABLE_ENTITY, status.HTTP_400_BAD_REQUEST)
-
-
-async def test_read_projects_pagination(client: AsyncClient, logged_in_headers):
- response = await client.get("api/v1/projects/?limit=1&offset=0", headers=logged_in_headers)
- assert response.status_code == status.HTTP_200_OK
- result = response.json()
- if isinstance(result, list):
- assert len(result) <= 1
- else:
- assert "items" in result
- assert result.get("limit") == 1
-
-
-async def test_read_projects_empty(client: AsyncClient, logged_in_headers):
- # Ensure DB is clean by fetching with a random header that forces each test transactional isolation
- random_headers = {**logged_in_headers, "X-Transaction-ID": str(uuid4())}
- response = await client.get("api/v1/projects/", headers=random_headers)
- if response.json():
- pytest.skip("Pre-existing projects found; skipping empty list assertion")
- assert response.status_code == status.HTTP_200_OK
- assert response.json() == []
-
-
-async def test_create_and_read_project_cyrillic(client: AsyncClient, logged_in_headers):
- """Ensure that the API correctly handles non-ASCII (Cyrillic) characters during project creation and retrieval."""
- payload = {
- "name": CYRILLIC_NAME,
- "description": CYRILLIC_DESC,
- "flows_list": [],
- "components_list": [],
- }
-
- # Create the project with Cyrillic characters
- create_resp = await client.post("api/v1/projects/", json=payload, headers=logged_in_headers)
- assert create_resp.status_code == status.HTTP_201_CREATED
- created = create_resp.json()
- assert created["name"] == CYRILLIC_NAME
- assert created["description"] == CYRILLIC_DESC
- proj_id = created["id"]
-
- # Fetch the project back to verify round-trip UTF-8 integrity
- get_resp = await client.get(f"api/v1/projects/{proj_id}", headers=logged_in_headers)
- assert get_resp.status_code == status.HTTP_200_OK
- fetched = get_resp.json()
-
- # Handle potential pagination/envelope variations already seen in other tests
- if isinstance(fetched, dict) and "folder" in fetched:
- fetched = fetched["folder"]
- elif isinstance(fetched, dict) and "project" in fetched:
- fetched = fetched["project"]
-
- assert fetched["name"] == CYRILLIC_NAME
- assert fetched["description"] == CYRILLIC_DESC
diff --git a/src/backend/tests/unit/api/v1/test_rename_flow_to_save.py b/src/backend/tests/unit/api/v1/test_rename_flow_to_save.py
deleted file mode 100644
index 35b63d933788..000000000000
--- a/src/backend/tests/unit/api/v1/test_rename_flow_to_save.py
+++ /dev/null
@@ -1,161 +0,0 @@
-import pytest
-from fastapi import status
-from httpx import AsyncClient
-
-
-@pytest.mark.asyncio
-async def test_duplicate_flow_name_basic(client: AsyncClient, logged_in_headers):
- """Test that duplicate flow names get numbered correctly."""
- base_flow = {
- "name": "Test Flow",
- "description": "Test flow description",
- "data": {},
- "is_component": False,
- }
-
- # Create first flow
- response1 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response1.status_code == status.HTTP_201_CREATED
- assert response1.json()["name"] == "Test Flow"
-
- # Create second flow with same name - should become "Test Flow (1)"
- response2 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response2.status_code == status.HTTP_201_CREATED
- assert response2.json()["name"] == "Test Flow (1)"
-
- # Create third flow with same name - should become "Test Flow (2)"
- response3 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response3.status_code == status.HTTP_201_CREATED
- assert response3.json()["name"] == "Test Flow (2)"
-
-
-@pytest.mark.asyncio
-async def test_duplicate_flow_name_with_numbers_in_original(client: AsyncClient, logged_in_headers):
- """Test duplication of flows with numbers in their original name."""
- base_flow = {
- "name": "Untitled document (7)",
- "description": "Test flow description",
- "data": {},
- "is_component": False,
- }
-
- # Create first flow
- response1 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response1.status_code == status.HTTP_201_CREATED
- assert response1.json()["name"] == "Untitled document (7)"
-
- # Create second flow with same name - should become "Untitled document (7) (1)"
- response2 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response2.status_code == status.HTTP_201_CREATED
- assert response2.json()["name"] == "Untitled document (7) (1)"
-
- # Create third flow with same name - should become "Untitled document (7) (2)"
- response3 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response3.status_code == status.HTTP_201_CREATED
- assert response3.json()["name"] == "Untitled document (7) (2)"
-
-
-@pytest.mark.asyncio
-async def test_duplicate_flow_name_with_non_numeric_suffixes(client: AsyncClient, logged_in_headers):
- """Test that non-numeric suffixes don't interfere with numbering."""
- base_flow = {
- "name": "My Flow",
- "description": "Test flow description",
- "data": {},
- "is_component": False,
- }
-
- # Create first flow
- response1 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response1.status_code == status.HTTP_201_CREATED
- assert response1.json()["name"] == "My Flow"
-
- # Create flow with non-numeric suffix
- backup_flow = base_flow.copy()
- backup_flow["name"] = "My Flow (Backup)"
- response2 = await client.post("api/v1/flows/", json=backup_flow, headers=logged_in_headers)
- assert response2.status_code == status.HTTP_201_CREATED
- assert response2.json()["name"] == "My Flow (Backup)"
-
- # Create another flow with original name - should become "My Flow (1)"
- # because "My Flow (Backup)" doesn't match the numeric pattern
- response3 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response3.status_code == status.HTTP_201_CREATED
- assert response3.json()["name"] == "My Flow (1)"
-
-
-@pytest.mark.asyncio
-async def test_duplicate_flow_name_gaps_in_numbering(client: AsyncClient, logged_in_headers):
- """Test that gaps in numbering are handled correctly (uses max + 1)."""
- base_flow = {
- "name": "Gapped Flow",
- "description": "Test flow description",
- "data": {},
- "is_component": False,
- }
-
- # Create original flow
- response1 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response1.status_code == status.HTTP_201_CREATED
- assert response1.json()["name"] == "Gapped Flow"
-
- # Create numbered flows with gaps
- numbered_flows = [
- "Gapped Flow (1)",
- "Gapped Flow (5)", # Gap: 2, 3, 4 missing
- "Gapped Flow (7)", # Gap: 6 missing
- ]
-
- for flow_name in numbered_flows:
- numbered_flow = base_flow.copy()
- numbered_flow["name"] = flow_name
- response = await client.post("api/v1/flows/", json=numbered_flow, headers=logged_in_headers)
- assert response.status_code == status.HTTP_201_CREATED
- assert response.json()["name"] == flow_name
-
- # Create another duplicate - should use max(1,5,7) + 1 = 8
- response_final = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response_final.status_code == status.HTTP_201_CREATED
- assert response_final.json()["name"] == "Gapped Flow (8)"
-
-
-@pytest.mark.asyncio
-async def test_duplicate_flow_name_special_characters(client: AsyncClient, logged_in_headers):
- """Test duplication with special characters in flow names."""
- base_flow = {
- "name": "Flow-with_special@chars!",
- "description": "Test flow description",
- "data": {},
- "is_component": False,
- }
-
- # Create first flow
- response1 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response1.status_code == status.HTTP_201_CREATED
- assert response1.json()["name"] == "Flow-with_special@chars!"
-
- # Create duplicate - should properly escape special characters in regex
- response2 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response2.status_code == status.HTTP_201_CREATED
- assert response2.json()["name"] == "Flow-with_special@chars! (1)"
-
-
-@pytest.mark.asyncio
-async def test_duplicate_flow_name_regex_patterns(client: AsyncClient, logged_in_headers):
- """Test that flow names containing regex special characters work correctly."""
- base_flow = {
- "name": "Flow (.*) [test]",
- "description": "Test flow description",
- "data": {},
- "is_component": False,
- }
-
- # Create first flow
- response1 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response1.status_code == status.HTTP_201_CREATED
- assert response1.json()["name"] == "Flow (.*) [test]"
-
- # Create duplicate
- response2 = await client.post("api/v1/flows/", json=base_flow, headers=logged_in_headers)
- assert response2.status_code == status.HTTP_201_CREATED
- assert response2.json()["name"] == "Flow (.*) [test] (1)"
diff --git a/src/backend/tests/unit/api/v1/test_schemas.py b/src/backend/tests/unit/api/v1/test_schemas.py
deleted file mode 100644
index 077af749fa5d..000000000000
--- a/src/backend/tests/unit/api/v1/test_schemas.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import pytest
-from langflow.api.v1.schemas import VertexBuildResponse
-from langflow.serialization.constants import MAX_ITEMS_LENGTH
-
-expected_keys_vertex_build_response = {
- "id",
- "inactivated_vertices",
- "next_vertices_ids",
- "top_level_vertices",
- "valid",
- "params",
- "data",
- "timestamp",
-}
-expected_keys_data = {
- "results",
- "outputs",
- "logs",
- "message",
- "artifacts",
- "timedelta",
- "duration",
- "used_frozen_result",
-}
-expected_keys_outputs = {"message", "type"}
-
-
-def assert_vertex_response_structure(result):
- assert set(result.keys()).issuperset(expected_keys_vertex_build_response)
- assert set(result["data"].keys()).issuperset(expected_keys_data)
- assert set(result["data"]["outputs"]["dataframe"].keys()).issuperset(expected_keys_outputs)
-
-
-def test_vertex_response_structure_without_truncate():
- message = [{"key": 1, "value": 1}]
- output_value = {"message": message, "type": "bar"}
- data = {
- "data": {"outputs": {"dataframe": output_value}, "type": "foo"},
- "valid": True,
- }
-
- result = VertexBuildResponse(**data).model_dump()
-
- assert_vertex_response_structure(result)
- assert len(result["data"]["outputs"]["dataframe"]["message"]) == len(message)
-
-
-def test_vertex_response_structure_when_truncate_applies():
- message = [{"key": i, "value": i} for i in range(MAX_ITEMS_LENGTH + 5000)]
- output_value = {"message": message, "type": "bar"}
- data = {
- "data": {"outputs": {"dataframe": output_value}, "type": "foo"},
- "valid": True,
- }
-
- result = VertexBuildResponse(**data).model_dump()
-
- assert_vertex_response_structure(result)
- assert len(result["data"]["outputs"]["dataframe"]["message"]) == MAX_ITEMS_LENGTH + 1
-
-
-@pytest.mark.parametrize(
- ("size", "expected"),
- [
- (0, 0),
- (8, 8),
- (MAX_ITEMS_LENGTH, MAX_ITEMS_LENGTH),
- (MAX_ITEMS_LENGTH + 1000, MAX_ITEMS_LENGTH + 1),
- (MAX_ITEMS_LENGTH + 2000, MAX_ITEMS_LENGTH + 1),
- (MAX_ITEMS_LENGTH + 3000, MAX_ITEMS_LENGTH + 1),
- ],
-)
-def test_vertex_response_truncation_behavior(size, expected):
- message = [{"key": i, "value": i} for i in range(size)]
- output_value = {"message": message, "type": "bar"}
- data = {
- "data": {"outputs": {"dataframe": output_value}, "type": "foo"},
- "valid": True,
- }
-
- result = VertexBuildResponse(**data).model_dump()
- assert len(result["data"]["outputs"]["dataframe"]["message"]) == expected
diff --git a/src/backend/tests/unit/api/v1/test_starter_projects.py b/src/backend/tests/unit/api/v1/test_starter_projects.py
deleted file mode 100644
index 640aa88a05c7..000000000000
--- a/src/backend/tests/unit/api/v1/test_starter_projects.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from fastapi import status
-from httpx import AsyncClient
-
-
-async def test_get_starter_projects(client: AsyncClient, logged_in_headers):
- response = await client.get("api/v1/starter-projects/", headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK, response.text
- assert isinstance(result, list), "The result must be a list"
diff --git a/src/backend/tests/unit/api/v1/test_store.py b/src/backend/tests/unit/api/v1/test_store.py
deleted file mode 100644
index ca17dc761815..000000000000
--- a/src/backend/tests/unit/api/v1/test_store.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from fastapi import status
-from httpx import AsyncClient
-
-
-async def test_check_if_store_is_enabled(client: AsyncClient):
- response = await client.get("api/v1/store/check/")
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The variable must be a dictionary"
- assert "enabled" in result, "The dictionary must contain a key called 'enabled'"
- assert isinstance(result["enabled"], bool), "There must be a boolean value for the key 'enabled' in the dictionary"
diff --git a/src/backend/tests/unit/api/v1/test_users.py b/src/backend/tests/unit/api/v1/test_users.py
deleted file mode 100644
index 491d9f08ae35..000000000000
--- a/src/backend/tests/unit/api/v1/test_users.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from fastapi import status
-from httpx import AsyncClient
-
-
-async def test_add_user(client: AsyncClient):
- basic_case = {"username": "string", "password": "string"}
- response = await client.post("api/v1/users/", json=basic_case)
- result = response.json()
-
- assert response.status_code == status.HTTP_201_CREATED
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "id" in result, "The result must have an 'id' key"
- assert "is_active" in result, "The result must have an 'is_active' key"
- assert "is_superuser" in result, "The result must have an 'is_superuser' key"
- assert "last_login_at" in result, "The result must have an 'last_login_at' key"
- assert "profile_image" in result, "The result must have an 'profile_image' key"
- assert "store_api_key" in result, "The result must have an 'store_api_key' key"
- assert "updated_at" in result, "The result must have an 'updated_at' key"
- assert "username" in result, "The result must have an 'username' key"
-
-
-async def test_read_current_user(client: AsyncClient, logged_in_headers):
- response = await client.get("api/v1/users/whoami", headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "id" in result, "The result must have an 'id' key"
- assert "is_active" in result, "The result must have an 'is_active' key"
- assert "is_superuser" in result, "The result must have an 'is_superuser' key"
- assert "last_login_at" in result, "The result must have an 'last_login_at' key"
- assert "profile_image" in result, "The result must have an 'profile_image' key"
- assert "store_api_key" in result, "The result must have an 'store_api_key' key"
- assert "updated_at" in result, "The result must have an 'updated_at' key"
- assert "username" in result, "The result must have an 'username' key"
-
-
-async def test_read_all_users(client: AsyncClient, logged_in_headers_super_user):
- response = await client.get("api/v1/users/", headers=logged_in_headers_super_user)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "total_count" in result, "The result must have an 'total_count' key"
- assert "users" in result, "The result must have an 'users' key"
-
-
-async def test_patch_user(client: AsyncClient, logged_in_headers_super_user):
- name = "string"
- updated_name = "string2"
- basic_case = {"username": name, "password": "string"}
- response_ = await client.post("api/v1/users/", json=basic_case)
- id_ = response_.json()["id"]
- basic_case["username"] = updated_name
- response = await client.patch(f"api/v1/users/{id_}", json=basic_case, headers=logged_in_headers_super_user)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "id" in result, "The result must have an 'id' key"
- assert "is_active" in result, "The result must have an 'is_active' key"
- assert "is_superuser" in result, "The result must have an 'is_superuser' key"
- assert "last_login_at" in result, "The result must have an 'last_login_at' key"
- assert "profile_image" in result, "The result must have an 'profile_image' key"
- assert "store_api_key" in result, "The result must have an 'store_api_key' key"
- assert "updated_at" in result, "The result must have an 'updated_at' key"
- assert "username" in result, "The result must have an 'username' key"
- assert result["username"] == updated_name, "The username must be updated"
-
-
-async def test_reset_password(client: AsyncClient, logged_in_headers, active_user):
- id_ = str(active_user.id)
- basic_case = {"username": "string", "password": "new_password"}
- response = await client.patch(f"api/v1/users/{id_}/reset-password", json=basic_case, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "id" in result, "The result must have an 'id' key"
- assert "is_active" in result, "The result must have an 'is_active' key"
- assert "is_superuser" in result, "The result must have an 'is_superuser' key"
- assert "last_login_at" in result, "The result must have an 'last_login_at' key"
- assert "profile_image" in result, "The result must have an 'profile_image' key"
- assert "store_api_key" in result, "The result must have an 'store_api_key' key"
- assert "updated_at" in result, "The result must have an 'updated_at' key"
- assert "username" in result, "The result must have an 'username' key"
-
-
-async def test_delete_user(client: AsyncClient, logged_in_headers_super_user):
- basic_case = {"username": "string", "password": "string"}
- response_ = await client.post("api/v1/users/", json=basic_case)
- id_ = response_.json()["id"]
- response = await client.delete(f"api/v1/users/{id_}", headers=logged_in_headers_super_user)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "detail" in result, "The result must have an 'detail' key"
diff --git a/src/backend/tests/unit/api/v1/test_validate.py b/src/backend/tests/unit/api/v1/test_validate.py
deleted file mode 100644
index 3957f65d19e7..000000000000
--- a/src/backend/tests/unit/api/v1/test_validate.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import pytest
-from fastapi import status
-from httpx import AsyncClient
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_post_validate_code(client: AsyncClient, logged_in_headers):
- good_code = """
-from pprint import pprint
-var = {"a": 1, "b": 2}
-pprint(var)
- """
- response = await client.post("api/v1/validate/code", json={"code": good_code}, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "imports" in result, "The result must have an 'imports' key"
- assert "function" in result, "The result must have a 'function' key"
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_post_validate_prompt(client: AsyncClient, logged_in_headers):
- basic_case = {
- "name": "string",
- "template": "string",
- "custom_fields": {},
- "frontend_node": {
- "template": {},
- "description": "string",
- "icon": "string",
- "is_input": True,
- "is_output": True,
- "is_composition": True,
- "base_classes": ["string"],
- "name": "",
- "display_name": "",
- "documentation": "",
- "custom_fields": {},
- "output_types": [],
- "full_path": "string",
- "pinned": False,
- "conditional_paths": [],
- "frozen": False,
- "outputs": [],
- "field_order": [],
- "beta": False,
- "minimized": False,
- "error": "string",
- "edited": False,
- "metadata": {},
- },
- }
- response = await client.post("api/v1/validate/prompt", json=basic_case, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert isinstance(result, dict), "The result must be a dictionary"
- assert "frontend_node" in result, "The result must have a 'frontend_node' key"
- assert "input_variables" in result, "The result must have an 'input_variables' key"
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_post_validate_prompt_with_invalid_data(client: AsyncClient, logged_in_headers):
- invalid_case = {
- "name": "string",
- # Missing required fields
- "frontend_node": {"template": {}, "is_input": True},
- }
- response = await client.post("api/v1/validate/prompt", json=invalid_case, headers=logged_in_headers)
- assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
-
-
-async def test_post_validate_code_with_unauthenticated_user(client: AsyncClient):
- code = """
- print("Hello World")
- """
- response = await client.post("api/v1/validate/code", json={"code": code}, headers={"Authorization": "Bearer fake"})
- assert response.status_code == status.HTTP_401_UNAUTHORIZED
diff --git a/src/backend/tests/unit/api/v1/test_variable.py b/src/backend/tests/unit/api/v1/test_variable.py
deleted file mode 100644
index 8d8f81fde295..000000000000
--- a/src/backend/tests/unit/api/v1/test_variable.py
+++ /dev/null
@@ -1,221 +0,0 @@
-from unittest import mock
-from uuid import uuid4
-
-import pytest
-from fastapi import HTTPException, status
-from httpx import AsyncClient
-from langflow.services.variable.constants import CREDENTIAL_TYPE, GENERIC_TYPE
-
-
-@pytest.fixture
-def generic_variable():
- return {
- "name": "test_generic_variable",
- "value": "test_generic_value",
- "type": GENERIC_TYPE,
- "default_fields": ["test_field"],
- }
-
-
-@pytest.fixture
-def credential_variable():
- return {
- "name": "test_credential_variable",
- "value": "test_credential_value",
- "type": CREDENTIAL_TYPE,
- "default_fields": ["test_field"],
- }
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_create_variable(client: AsyncClient, generic_variable, logged_in_headers):
- response = await client.post("api/v1/variables/", json=generic_variable, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_201_CREATED
- assert generic_variable["name"] == result["name"]
- assert generic_variable["type"] == result["type"]
- assert generic_variable["default_fields"] == result["default_fields"]
- assert "id" in result
- assert generic_variable["value"] != result["value"] # Value should be encrypted
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_create_variable__variable_name_already_exists(client: AsyncClient, generic_variable, logged_in_headers):
- await client.post("api/v1/variables/", json=generic_variable, headers=logged_in_headers)
-
- response = await client.post("api/v1/variables/", json=generic_variable, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_400_BAD_REQUEST
- assert "Variable name already exists" in result["detail"]
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_create_variable__variable_name_and_value_cannot_be_empty(
- client: AsyncClient, generic_variable, logged_in_headers
-):
- generic_variable["name"] = ""
- generic_variable["value"] = ""
-
- response = await client.post("api/v1/variables/", json=generic_variable, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_400_BAD_REQUEST
- assert "Variable name and value cannot be empty" in result["detail"]
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_create_variable__variable_name_cannot_be_empty(client: AsyncClient, generic_variable, logged_in_headers):
- generic_variable["name"] = ""
-
- response = await client.post("api/v1/variables/", json=generic_variable, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_400_BAD_REQUEST
- assert "Variable name cannot be empty" in result["detail"]
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_create_variable__variable_value_cannot_be_empty(
- client: AsyncClient, generic_variable, logged_in_headers
-):
- generic_variable["value"] = ""
-
- response = await client.post("api/v1/variables/", json=generic_variable, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_400_BAD_REQUEST
- assert "Variable value cannot be empty" in result["detail"]
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_create_variable__httpexception(client: AsyncClient, generic_variable, logged_in_headers):
- status_code = 418
- generic_message = "I'm a teapot"
-
- with mock.patch("langflow.services.auth.utils.encrypt_api_key") as m:
- m.side_effect = HTTPException(status_code=status_code, detail=generic_message)
- response = await client.post("api/v1/variables/", json=generic_variable, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_418_IM_A_TEAPOT
- assert generic_message in result["detail"]
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_create_variable__exception(client: AsyncClient, generic_variable, logged_in_headers):
- generic_message = "Generic error message"
-
- with mock.patch("langflow.services.auth.utils.encrypt_api_key") as m:
- m.side_effect = Exception(generic_message)
- response = await client.post("api/v1/variables/", json=generic_variable, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR
- assert generic_message in result["detail"]
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_read_variables(client: AsyncClient, generic_variable, credential_variable, logged_in_headers):
- # Create a generic variable
- create_response = await client.post("api/v1/variables/", json=generic_variable, headers=logged_in_headers)
- assert create_response.status_code == status.HTTP_201_CREATED
-
- # Create a credential variable
- create_response = await client.post("api/v1/variables/", json=credential_variable, headers=logged_in_headers)
- assert create_response.status_code == status.HTTP_201_CREATED
-
- response = await client.get("api/v1/variables/", headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
-
- # Check both variables exist
- assert generic_variable["name"] in [r["name"] for r in result]
- assert credential_variable["name"] in [r["name"] for r in result]
-
- # Assert that credentials are not decrypted and generic are decrypted
- credential_vars = [r for r in result if r["type"] == CREDENTIAL_TYPE]
- generic_vars = [r for r in result if r["type"] == GENERIC_TYPE]
-
- # Credential variables should remain encrypted (value should be different)
- assert all(c["value"] != credential_variable["value"] for c in credential_vars)
-
- # Generic variables should be decrypted (value should match original)
- assert all(g["value"] == generic_variable["value"] for g in generic_vars)
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_read_variables__empty(client: AsyncClient, logged_in_headers):
- all_variables = await client.get("api/v1/variables/", headers=logged_in_headers)
- all_variables = all_variables.json()
- for variable in all_variables:
- await client.delete(f"api/v1/variables/{variable.get('id')}", headers=logged_in_headers)
-
- response = await client.get("api/v1/variables/", headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert result == []
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_read_variables__(client: AsyncClient, logged_in_headers):
- generic_message = "Generic error message"
-
- with mock.patch("sqlmodel.Session.exec") as m:
- m.side_effect = Exception(generic_message)
- with pytest.raises(Exception, match=generic_message):
- await client.get("api/v1/variables/", headers=logged_in_headers)
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_update_variable(client: AsyncClient, generic_variable, logged_in_headers):
- saved = await client.post("api/v1/variables/", json=generic_variable, headers=logged_in_headers)
- saved = saved.json()
- generic_variable["id"] = saved.get("id")
- generic_variable["name"] = "new_name"
- generic_variable["value"] = "new_value"
- generic_variable["type"] = GENERIC_TYPE # Ensure we keep it as GENERIC_TYPE
- generic_variable["default_fields"] = ["new_field"]
-
- response = await client.patch(
- f"api/v1/variables/{saved.get('id')}", json=generic_variable, headers=logged_in_headers
- )
- result = response.json()
-
- assert response.status_code == status.HTTP_200_OK
- assert saved["id"] == result["id"]
- assert saved["name"] != result["name"]
- assert saved["default_fields"] != result["default_fields"]
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_update_variable__exception(client: AsyncClient, generic_variable, logged_in_headers):
- wrong_id = uuid4()
- generic_variable["id"] = str(wrong_id)
-
- response = await client.patch(f"api/v1/variables/{wrong_id}", json=generic_variable, headers=logged_in_headers)
- result = response.json()
-
- assert response.status_code == status.HTTP_404_NOT_FOUND
- assert "Variable not found" in result["detail"]
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_delete_variable(client: AsyncClient, generic_variable, logged_in_headers):
- response = await client.post("api/v1/variables/", json=generic_variable, headers=logged_in_headers)
- saved = response.json()
- response = await client.delete(f"api/v1/variables/{saved.get('id')}", headers=logged_in_headers)
-
- assert response.status_code == status.HTTP_204_NO_CONTENT
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_delete_variable__exception(client: AsyncClient, logged_in_headers):
- wrong_id = uuid4()
-
- response = await client.delete(f"api/v1/variables/{wrong_id}", headers=logged_in_headers)
-
- assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR
diff --git a/src/backend/tests/unit/api/v2/test_files.py b/src/backend/tests/unit/api/v2/test_files.py
deleted file mode 100644
index b2c370ff6be9..000000000000
--- a/src/backend/tests/unit/api/v2/test_files.py
+++ /dev/null
@@ -1,522 +0,0 @@
-import asyncio
-import tempfile
-from contextlib import suppress
-from pathlib import Path
-
-# we need to import tmpdir
-import anyio
-import pytest
-from asgi_lifespan import LifespanManager
-from httpx import ASGITransport, AsyncClient
-from langflow.main import create_app
-from langflow.services.auth.utils import get_password_hash
-from langflow.services.database.models.api_key.model import ApiKey
-from langflow.services.database.models.user.model import User, UserRead
-from langflow.services.deps import get_db_service
-from sqlalchemy.orm import selectinload
-from sqlmodel import select
-
-from lfx.services.deps import session_scope
-from tests.conftest import _delete_transactions_and_vertex_builds
-
-
-@pytest.fixture(name="files_created_api_key")
-async def files_created_api_key(files_client, files_active_user): # noqa: ARG001
- hashed = get_password_hash("random_key")
- api_key = ApiKey(
- name="files_created_api_key",
- user_id=files_active_user.id,
- api_key="random_key",
- hashed_api_key=hashed,
- )
- async with session_scope() as session:
- stmt = select(ApiKey).where(ApiKey.api_key == api_key.api_key)
- if existing_api_key := (await session.exec(stmt)).first():
- yield existing_api_key
- return
- session.add(api_key)
- await session.commit()
- await session.refresh(api_key)
- yield api_key
- # Clean up
- await session.delete(api_key)
- await session.commit()
-
-
-@pytest.fixture(name="files_active_user")
-async def files_active_user(files_client): # noqa: ARG001
- db_manager = get_db_service()
- async with db_manager.with_session() as session:
- user = User(
- username="files_active_user",
- password=get_password_hash("testpassword"),
- is_active=True,
- is_superuser=False,
- )
- stmt = select(User).where(User.username == user.username)
- if active_user := (await session.exec(stmt)).first():
- user = active_user
- else:
- session.add(user)
- await session.commit()
- await session.refresh(user)
- user = UserRead.model_validate(user, from_attributes=True)
- yield user
- # Clean up
- # Now cleanup transactions, vertex_build
- async with db_manager.with_session() as session:
- user = await session.get(User, user.id, options=[selectinload(User.flows)])
- await _delete_transactions_and_vertex_builds(session, user.flows)
- await session.delete(user)
-
- await session.commit()
-
-
-@pytest.fixture
-def max_file_size_upload_fixture(monkeypatch):
- monkeypatch.setenv("LANGFLOW_MAX_FILE_SIZE_UPLOAD", "1")
- yield
- monkeypatch.undo()
-
-
-@pytest.fixture
-def max_file_size_upload_10mb_fixture(monkeypatch):
- monkeypatch.setenv("LANGFLOW_MAX_FILE_SIZE_UPLOAD", "10")
- yield
- monkeypatch.undo()
-
-
-@pytest.fixture(name="files_client")
-async def files_client_fixture(
- monkeypatch,
- request,
-):
- # Set the database url to a test database
- if "noclient" in request.keywords:
- yield
- else:
-
- def init_app():
- db_dir = tempfile.mkdtemp()
- db_path = Path(db_dir) / "test.db"
- monkeypatch.setenv("LANGFLOW_DATABASE_URL", f"sqlite:///{db_path}")
- monkeypatch.setenv("LANGFLOW_AUTO_LOGIN", "false")
- from lfx.services.manager import get_service_manager
-
- get_service_manager().factories.clear()
- get_service_manager().services.clear() # Clear the services cache
- app = create_app()
- return app, db_path
-
- app, db_path = await asyncio.to_thread(init_app)
-
- async with (
- LifespanManager(app, startup_timeout=None, shutdown_timeout=None) as manager,
- AsyncClient(transport=ASGITransport(app=manager.app), base_url="http://testserver/") as client,
- ):
- yield client
- # app.dependency_overrides.clear()
- monkeypatch.undo()
- # clear the temp db
- with suppress(FileNotFoundError):
- await anyio.Path(db_path).unlink()
-
-
-async def test_upload_file(files_client, files_created_api_key):
- headers = {"x-api-key": files_created_api_key.api_key}
-
- response = await files_client.post(
- "api/v2/files",
- files={"file": ("test.txt", b"test content")},
- headers=headers,
- )
- assert response.status_code == 201, f"Expected 201, got {response.status_code}: {response.json()}"
-
- response_json = response.json()
- assert "id" in response_json
-
-
-async def test_download_file(files_client, files_created_api_key):
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # First upload a file
- response = await files_client.post(
- "api/v2/files",
- files={"file": ("test.txt", b"test content")},
- headers=headers,
- )
- assert response.status_code == 201
- upload_response = response.json()
-
- # Then try to download it
- response = await files_client.get(f"api/v2/files/{upload_response['id']}", headers=headers)
-
- assert response.status_code == 200
- assert response.content == b"test content"
-
-
-async def test_list_files(files_client, files_created_api_key):
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # First upload a file
- response = await files_client.post(
- "api/v2/files",
- files={"file": ("test.txt", b"test content")},
- headers=headers,
- )
- assert response.status_code == 201
-
- # Then list the files
- response = await files_client.get("api/v2/files", headers=headers)
- assert response.status_code == 200
- files = response.json()
- assert len(files) == 1
-
-
-async def test_delete_file(files_client, files_created_api_key):
- headers = {"x-api-key": files_created_api_key.api_key}
-
- response = await files_client.post(
- "api/v2/files",
- files={"file": ("test.txt", b"test content")},
- headers=headers,
- )
- assert response.status_code == 201
- upload_response = response.json()
-
- response = await files_client.delete(f"api/v2/files/{upload_response['id']}", headers=headers)
- assert response.status_code == 200
- assert response.json() == {"detail": "File test deleted successfully"}
-
-
-async def test_edit_file(files_client, files_created_api_key):
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # First upload a file
- response = await files_client.post(
- "api/v2/files",
- files={"file": ("test.txt", b"test content")},
- headers=headers,
- )
- assert response.status_code == 201
- upload_response = response.json()
-
- # Then list the files
- response = await files_client.put(f"api/v2/files/{upload_response['id']}?name=potato.txt", headers=headers)
- assert response.status_code == 200
- file = response.json()
- assert file["name"] == "potato.txt"
-
-
-async def test_upload_list_delete_and_validate_files(files_client, files_created_api_key):
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # Upload two files
- response1 = await files_client.post(
- "api/v2/files",
- files={"file": ("file1.txt", b"content1")},
- headers=headers,
- )
- assert response1.status_code == 201
- file1 = response1.json()
-
- response2 = await files_client.post(
- "api/v2/files",
- files={"file": ("file2.txt", b"content2")},
- headers=headers,
- )
- assert response2.status_code == 201
- file2 = response2.json()
-
- # List files and validate both are present
- response = await files_client.get("api/v2/files", headers=headers)
- assert response.status_code == 200
- files = response.json()
- file_names = [f["name"] for f in files]
- file_ids = [f["id"] for f in files]
- assert file1["name"] in file_names
- assert file2["name"] in file_names
- assert file1["id"] in file_ids
- assert file2["id"] in file_ids
- assert len(files) == 2
-
- # Delete one file
- response = await files_client.delete(f"api/v2/files/{file1['id']}", headers=headers)
- assert response.status_code == 200
-
- # List files again and validate only the other remains
- response = await files_client.get("api/v2/files", headers=headers)
- assert response.status_code == 200
- files = response.json()
- file_names = [f["name"] for f in files]
- file_ids = [f["id"] for f in files]
- assert file1["name"] not in file_names
- assert file1["id"] not in file_ids
- assert file2["name"] in file_names
- assert file2["id"] in file_ids
- assert len(files) == 1
-
-
-async def test_upload_files_with_same_name_creates_unique_names(files_client, files_created_api_key):
- """Test that uploading files with the same name creates unique filenames."""
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # Upload first file
- response1 = await files_client.post(
- "api/v2/files",
- files={"file": ("duplicate.txt", b"content1")},
- headers=headers,
- )
- assert response1.status_code == 201
- file1 = response1.json()
- assert file1["name"] == "duplicate"
-
- # Upload second file with same name
- response2 = await files_client.post(
- "api/v2/files",
- files={"file": ("duplicate.txt", b"content2")},
- headers=headers,
- )
- assert response2.status_code == 201
- file2 = response2.json()
- assert file2["name"] == "duplicate (1)"
-
- # Upload third file with same name
- response3 = await files_client.post(
- "api/v2/files",
- files={"file": ("duplicate.txt", b"content3")},
- headers=headers,
- )
- assert response3.status_code == 201
- file3 = response3.json()
- assert file3["name"] == "duplicate (2)"
-
- # Verify all files can be downloaded with their unique content
- download1 = await files_client.get(f"api/v2/files/{file1['id']}", headers=headers)
- assert download1.status_code == 200
- assert download1.content == b"content1"
-
- download2 = await files_client.get(f"api/v2/files/{file2['id']}", headers=headers)
- assert download2.status_code == 200
- assert download2.content == b"content2"
-
- download3 = await files_client.get(f"api/v2/files/{file3['id']}", headers=headers)
- assert download3.status_code == 200
- assert download3.content == b"content3"
-
- # List files and verify all three are present with unique names
- response = await files_client.get("api/v2/files", headers=headers)
- assert response.status_code == 200
- files = response.json()
- file_names = [f["name"] for f in files]
- assert "duplicate" in file_names
- assert "duplicate (1)" in file_names
- assert "duplicate (2)" in file_names
- assert len(files) == 3
-
-
-async def test_upload_files_without_extension_creates_unique_names(files_client, files_created_api_key):
- """Test that uploading files without extensions also creates unique filenames."""
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # Upload first file without extension
- response1 = await files_client.post(
- "api/v2/files",
- files={"file": ("noextension", b"content1")},
- headers=headers,
- )
- assert response1.status_code == 201
- file1 = response1.json()
- assert file1["name"] == "noextension"
-
- # Upload second file with same name
- response2 = await files_client.post(
- "api/v2/files",
- files={"file": ("noextension", b"content2")},
- headers=headers,
- )
- assert response2.status_code == 201
- file2 = response2.json()
- assert file2["name"] == "noextension (1)"
-
- # Verify both files can be downloaded
- download1 = await files_client.get(f"api/v2/files/{file1['id']}", headers=headers)
- assert download1.status_code == 200
- assert download1.content == b"content1"
-
- download2 = await files_client.get(f"api/v2/files/{file2['id']}", headers=headers)
- assert download2.status_code == 200
- assert download2.content == b"content2"
-
-
-async def test_upload_files_with_different_extensions_same_name(files_client, files_created_api_key):
- """Test that files with same root name but different extensions create unique names."""
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # Upload file with .txt extension
- response1 = await files_client.post(
- "api/v2/files",
- files={"file": ("document.txt", b"text content")},
- headers=headers,
- )
- assert response1.status_code == 201
- file1 = response1.json()
- assert file1["name"] == "document"
-
- # Upload file with .md extension and same root name
- response2 = await files_client.post(
- "api/v2/files",
- files={"file": ("document.md", b"markdown content")},
- headers=headers,
- )
- assert response2.status_code == 201
- file2 = response2.json()
- assert file2["name"] == "document (1)"
-
- # Upload another .txt file with same root name
- response3 = await files_client.post(
- "api/v2/files",
- files={"file": ("document.txt", b"more text content")},
- headers=headers,
- )
- assert response3.status_code == 201
- file3 = response3.json()
- assert file3["name"] == "document (2)"
-
-
-async def test_mcp_servers_file_replacement(files_client, files_created_api_key):
- """Test that _mcp_servers file gets replaced instead of creating unique names."""
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # Upload first _mcp_servers file
- response1 = await files_client.post(
- "api/v2/files",
- files={"file": ("_mcp_servers.json", b'{"servers": ["server1"]}')},
- headers=headers,
- )
- assert response1.status_code == 201
- file1 = response1.json()
- assert file1["name"] == "_mcp_servers"
-
- # Upload second _mcp_servers file - should replace the first one
- response2 = await files_client.post(
- "api/v2/files",
- files={"file": ("_mcp_servers.json", b'{"servers": ["server2"]}')},
- headers=headers,
- )
- assert response2.status_code == 201
- file2 = response2.json()
- assert file2["name"] == "_mcp_servers"
-
- # Note: _mcp_servers files are filtered out from the regular file list
- # This is expected behavior since they're managed separately
- response = await files_client.get("api/v2/files", headers=headers)
- assert response.status_code == 200
- files = response.json()
- mcp_files = [f for f in files if f["name"] == "_mcp_servers"]
- assert len(mcp_files) == 0 # MCP servers files are filtered out from regular list
-
- # Verify the second file can be downloaded with the updated content
- download2 = await files_client.get(f"api/v2/files/{file2['id']}", headers=headers)
- assert download2.status_code == 200
- assert download2.content == b'{"servers": ["server2"]}'
-
- # Verify the first file no longer exists (should return 404)
- download1 = await files_client.get(f"api/v2/files/{file1['id']}", headers=headers)
- assert download1.status_code == 404
-
- # Verify the file IDs are different (new file replaced old one)
- assert file1["id"] != file2["id"]
-
-
-async def test_unique_filename_counter_handles_gaps(files_client, files_created_api_key):
- """Test that the unique filename counter properly handles gaps in sequence."""
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # Upload original file
- response1 = await files_client.post(
- "api/v2/files",
- files={"file": ("gaptest.txt", b"content1")},
- headers=headers,
- )
- assert response1.status_code == 201
- file1 = response1.json()
- assert file1["name"] == "gaptest"
-
- # Upload second file (should be gaptest (1))
- response2 = await files_client.post(
- "api/v2/files",
- files={"file": ("gaptest.txt", b"content2")},
- headers=headers,
- )
- assert response2.status_code == 201
- file2 = response2.json()
- assert file2["name"] == "gaptest (1)"
-
- # Upload third file (should be gaptest (2))
- response3 = await files_client.post(
- "api/v2/files",
- files={"file": ("gaptest.txt", b"content3")},
- headers=headers,
- )
- assert response3.status_code == 201
- file3 = response3.json()
- assert file3["name"] == "gaptest (2)"
-
- # Delete the middle file (gaptest (1))
- delete_response = await files_client.delete(f"api/v2/files/{file2['id']}", headers=headers)
- assert delete_response.status_code == 200
-
- # Upload another file - should be gaptest (3), not filling the gap
- response4 = await files_client.post(
- "api/v2/files",
- files={"file": ("gaptest.txt", b"content4")},
- headers=headers,
- )
- assert response4.status_code == 201
- file4 = response4.json()
- assert file4["name"] == "gaptest (3)"
-
- # Verify final state
- response = await files_client.get("api/v2/files", headers=headers)
- assert response.status_code == 200
- files = response.json()
- file_names = [f["name"] for f in files]
- assert "gaptest" in file_names
- assert "gaptest (1)" not in file_names # deleted
- assert "gaptest (2)" in file_names
- assert "gaptest (3)" in file_names
- assert len([name for name in file_names if name.startswith("gaptest")]) == 3
-
-
-async def test_unique_filename_path_storage(files_client, files_created_api_key):
- """Test that files with unique names are stored with unique paths."""
- headers = {"x-api-key": files_created_api_key.api_key}
-
- # Upload two files with same name
- response1 = await files_client.post(
- "api/v2/files",
- files={"file": ("pathtest.txt", b"path content 1")},
- headers=headers,
- )
- assert response1.status_code == 201
- file1 = response1.json()
-
- response2 = await files_client.post(
- "api/v2/files",
- files={"file": ("pathtest.txt", b"path content 2")},
- headers=headers,
- )
- assert response2.status_code == 201
- file2 = response2.json()
-
- # Verify both files have different paths and can be downloaded independently
- assert file1["path"] != file2["path"]
-
- download1 = await files_client.get(f"api/v2/files/{file1['id']}", headers=headers)
- assert download1.status_code == 200
- assert download1.content == b"path content 1"
-
- download2 = await files_client.get(f"api/v2/files/{file2['id']}", headers=headers)
- assert download2.status_code == 200
- assert download2.content == b"path content 2"
diff --git a/src/backend/tests/unit/api/v2/test_mcp_servers_file.py b/src/backend/tests/unit/api/v2/test_mcp_servers_file.py
deleted file mode 100644
index 9a68bcbd3fd4..000000000000
--- a/src/backend/tests/unit/api/v2/test_mcp_servers_file.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import io
-import uuid
-from types import SimpleNamespace
-from typing import TYPE_CHECKING
-
-import pytest
-from fastapi import UploadFile
-
-# Module under test
-from langflow.api.v2.files import MCP_SERVERS_FILE, upload_user_file
-
-if TYPE_CHECKING:
- from langflow.services.database.models.file.model import File as UserFile
-
-
-class FakeStorageService: # Minimal stub for storage interactions
- def __init__(self):
- # key -> bytes
- self._store: dict[str, bytes] = {}
-
- async def save_file(self, flow_id: str, file_name: str, data: bytes):
- self._store[f"{flow_id}/{file_name}"] = data
-
- async def get_file_size(self, flow_id: str, file_name: str):
- return len(self._store.get(f"{flow_id}/{file_name}", b""))
-
- async def delete_file(self, flow_id: str, file_name: str):
- self._store.pop(f"{flow_id}/{file_name}", None)
-
-
-class FakeResult: # Helper for Session.exec return
- def __init__(self, rows):
- self._rows = rows
-
- def first(self):
- return self._rows[0] if self._rows else None
-
- def all(self):
- return list(self._rows)
-
-
-class FakeSession: # Minimal async session stub
- def __init__(self):
- self._db: dict[str, UserFile] = {}
-
- async def exec(self, stmt):
- # Extremely simplified: detect by LIKE pattern or equality against name/id
- # We only support SELECT UserFile WHERE name LIKE pattern or id equality
- stmt_str = str(stmt)
- if "user_file.name" in stmt_str:
- # LIKE pattern extraction
- pattern = stmt_str.split("like(")[-1].split(")")[0].strip('"%')
- rows = [f for name, f in self._db.items() if name.startswith(pattern)]
- return FakeResult(rows)
- if "user_file.id" in stmt_str:
- uid = stmt_str.split("=")[-1].strip().strip("'")
- rows = [f for f in self._db.values() if str(f.id) == uid]
- return FakeResult(rows)
- return FakeResult([])
-
- def add(self, obj):
- self._db[obj.name] = obj
-
- async def commit(self):
- return
-
- async def refresh(self, obj): # noqa: ARG002
- return
-
- async def delete(self, obj):
- self._db.pop(obj.name, None)
-
- async def flush(self):
- return
-
-
-class FakeSettings:
- max_file_size_upload: int = 10 # MB
-
-
-@pytest.fixture
-def current_user():
- class User(SimpleNamespace):
- id: str
-
- return User(id=str(uuid.uuid4()))
-
-
-@pytest.fixture
-def storage_service():
- return FakeStorageService()
-
-
-@pytest.fixture
-def settings_service():
- return SimpleNamespace(settings=FakeSettings())
-
-
-@pytest.fixture
-def session():
- return FakeSession()
-
-
-@pytest.mark.asyncio
-async def test_mcp_servers_upload_replace(session, storage_service, settings_service, current_user):
- """Uploading _mcp_servers.json twice should keep single DB record and no rename."""
- content1 = b'{"mcpServers": {}}'
- file1 = UploadFile(filename=f"{MCP_SERVERS_FILE}.json", file=io.BytesIO(content1))
- file1.size = len(content1)
-
- # First upload
- await upload_user_file(
- file=file1,
- session=session,
- current_user=current_user,
- storage_service=storage_service,
- settings_service=settings_service,
- )
-
- # DB should contain single entry named _mcp_servers
- assert list(session._db.keys()) == [MCP_SERVERS_FILE]
-
- # Upload again with different content
- content2 = b'{"mcpServers": {"everything": {}}}'
- file2 = UploadFile(filename=f"{MCP_SERVERS_FILE}.json", file=io.BytesIO(content2))
- file2.size = len(content2)
-
- await upload_user_file(
- file=file2,
- session=session,
- current_user=current_user,
- storage_service=storage_service,
- settings_service=settings_service,
- )
-
- # Still single record, same name
- assert list(session._db.keys()) == [MCP_SERVERS_FILE]
-
- record = session._db[MCP_SERVERS_FILE]
- # Storage path should match user_id/_mcp_servers.json
- expected_path = f"{current_user.id}/{MCP_SERVERS_FILE}.json"
- assert record.path == expected_path
-
- # Storage should have updated content
- stored_bytes = storage_service._store[expected_path]
- assert stored_bytes == content2
-
- # Third upload with server config provided by user
- content3 = (
- b'{"mcpServers": {"everything": {"command": "npx", "args": ["-y", "@modelcontextprotocol/server-everything"]}}}'
- )
- file3 = UploadFile(filename=f"{MCP_SERVERS_FILE}.json", file=io.BytesIO(content3))
- file3.size = len(content3)
-
- await upload_user_file(
- file=file3,
- session=session,
- current_user=current_user,
- storage_service=storage_service,
- settings_service=settings_service,
- )
-
- stored_bytes = storage_service._store[expected_path]
- assert stored_bytes == content3
diff --git a/src/backend/tests/unit/base/data/test_kb_utils.py b/src/backend/tests/unit/base/data/test_kb_utils.py
deleted file mode 100644
index 0ee77d34dca3..000000000000
--- a/src/backend/tests/unit/base/data/test_kb_utils.py
+++ /dev/null
@@ -1,458 +0,0 @@
-import pytest
-from langflow.base.knowledge_bases import compute_bm25, compute_tfidf
-
-
-class TestKBUtils:
- """Test suite for knowledge base utility functions."""
-
- # Test data for TF-IDF and BM25 tests
- @pytest.fixture
- def sample_documents(self):
- """Sample documents for testing."""
- return ["the cat sat on the mat", "the dog ran in the park", "cats and dogs are pets", "birds fly in the sky"]
-
- @pytest.fixture
- def query_terms(self):
- """Sample query terms for testing."""
- return ["cat", "dog"]
-
- @pytest.fixture
- def empty_documents(self):
- """Empty documents for edge case testing."""
- return ["", "", ""]
-
- @pytest.fixture
- def single_document(self):
- """Single document for testing."""
- return ["hello world"]
-
- def test_compute_tfidf_basic(self, sample_documents, query_terms):
- """Test basic TF-IDF computation."""
- scores = compute_tfidf(sample_documents, query_terms)
-
- # Should return a score for each document
- assert len(scores) == len(sample_documents)
-
- # All scores should be floats
- assert all(isinstance(score, float) for score in scores)
-
- # First document contains "cat", should have non-zero score
- assert scores[0] > 0.0
-
- # Second document contains "dog", should have non-zero score
- assert scores[1] > 0.0
-
- # Third document contains both "cats" and "dogs", but case-insensitive matching should work
- # Note: "cats" != "cat" exactly, so this tests the term matching behavior
- assert scores[2] >= 0.0
-
- # Fourth document contains neither term, should have zero score
- assert scores[3] == 0.0
-
- def test_compute_tfidf_case_insensitive(self):
- """Test that TF-IDF computation is case insensitive."""
- documents = ["The CAT sat", "the dog RAN", "CATS and DOGS"]
- query_terms = ["cat", "DOG"]
-
- scores = compute_tfidf(documents, query_terms)
-
- # First document should match "cat" (case insensitive)
- assert scores[0] > 0.0
-
- # Second document should match "dog" (case insensitive)
- assert scores[1] > 0.0
-
- def test_compute_tfidf_empty_documents(self, empty_documents, query_terms):
- """Test TF-IDF with empty documents."""
- scores = compute_tfidf(empty_documents, query_terms)
-
- # Should return scores for all documents
- assert len(scores) == len(empty_documents)
-
- # All scores should be zero since documents are empty
- assert all(score == 0.0 for score in scores)
-
- def test_compute_tfidf_empty_query_terms(self, sample_documents):
- """Test TF-IDF with empty query terms."""
- scores = compute_tfidf(sample_documents, [])
-
- # Should return scores for all documents
- assert len(scores) == len(sample_documents)
-
- # All scores should be zero since no query terms
- assert all(score == 0.0 for score in scores)
-
- def test_compute_tfidf_single_document(self, single_document):
- """Test TF-IDF with single document."""
- query_terms = ["hello", "world"]
- scores = compute_tfidf(single_document, query_terms)
-
- assert len(scores) == 1
- # With only one document, IDF = log(1/1) = 0, so TF-IDF score is always 0
- # This is correct mathematical behavior - TF-IDF is designed to discriminate between documents
- assert scores[0] == 0.0
-
- def test_compute_tfidf_two_documents_positive_scores(self):
- """Test TF-IDF with two documents to ensure positive scores are possible."""
- documents = ["hello world", "goodbye earth"]
- query_terms = ["hello", "world"]
- scores = compute_tfidf(documents, query_terms)
-
- assert len(scores) == 2
- # First document contains both terms, should have positive score
- assert scores[0] > 0.0
- # Second document contains neither term, should have zero score
- assert scores[1] == 0.0
-
- def test_compute_tfidf_no_documents(self):
- """Test TF-IDF with no documents."""
- scores = compute_tfidf([], ["cat", "dog"])
-
- assert scores == []
-
- def test_compute_tfidf_term_frequency_calculation(self):
- """Test TF-IDF term frequency calculation."""
- # Documents with different term frequencies for the same term
- documents = ["rare word text", "rare rare word", "other content"]
- query_terms = ["rare"]
-
- scores = compute_tfidf(documents, query_terms)
-
- # "rare" appears in documents 0 and 1, but with different frequencies
- # Document 1 has higher TF (2/3 vs 1/3), so should score higher
- assert scores[0] > 0.0 # Contains "rare" once
- assert scores[1] > scores[0] # Contains "rare" twice, should score higher
- assert scores[2] == 0.0 # Doesn't contain "rare"
-
- def test_compute_tfidf_idf_calculation(self):
- """Test TF-IDF inverse document frequency calculation."""
- # "rare" appears in only one document, "common" appears in both
- documents = ["rare term", "common term", "common word"]
- query_terms = ["rare", "common"]
-
- scores = compute_tfidf(documents, query_terms)
-
- # First document should have higher score due to rare term having higher IDF
- assert scores[0] > scores[1] # rare term gets higher IDF
- assert scores[0] > scores[2]
-
- def test_compute_bm25_basic(self, sample_documents, query_terms):
- """Test basic BM25 computation."""
- scores = compute_bm25(sample_documents, query_terms)
-
- # Should return a score for each document
- assert len(scores) == len(sample_documents)
-
- # All scores should be floats
- assert all(isinstance(score, float) for score in scores)
-
- # First document contains "cat", should have non-zero score
- assert scores[0] > 0.0
-
- # Second document contains "dog", should have non-zero score
- assert scores[1] > 0.0
-
- # Fourth document contains neither term, should have zero score
- assert scores[3] == 0.0
-
- def test_compute_bm25_parameters(self, sample_documents, query_terms):
- """Test BM25 with different k1 and b parameters."""
- # Test with default parameters
- scores_default = compute_bm25(sample_documents, query_terms)
-
- # Test with different k1
- scores_k1 = compute_bm25(sample_documents, query_terms, k1=2.0)
-
- # Test with different b
- scores_b = compute_bm25(sample_documents, query_terms, b=0.5)
-
- # Test with both different
- scores_both = compute_bm25(sample_documents, query_terms, k1=2.0, b=0.5)
-
- # All should return valid scores
- assert len(scores_default) == len(sample_documents)
- assert len(scores_k1) == len(sample_documents)
- assert len(scores_b) == len(sample_documents)
- assert len(scores_both) == len(sample_documents)
-
- # Scores should be different with different parameters
- assert scores_default != scores_k1
- assert scores_default != scores_b
-
- def test_compute_bm25_case_insensitive(self):
- """Test that BM25 computation is case insensitive."""
- documents = ["The CAT sat", "the dog RAN", "CATS and DOGS"]
- query_terms = ["cat", "DOG"]
-
- scores = compute_bm25(documents, query_terms)
-
- # First document should match "cat" (case insensitive)
- assert scores[0] > 0.0
-
- # Second document should match "dog" (case insensitive)
- assert scores[1] > 0.0
-
- def test_compute_bm25_empty_documents(self, empty_documents, query_terms):
- """Test BM25 with empty documents."""
- scores = compute_bm25(empty_documents, query_terms)
-
- # Should return scores for all documents
- assert len(scores) == len(empty_documents)
-
- # All scores should be zero since documents are empty
- assert all(score == 0.0 for score in scores)
-
- def test_compute_bm25_empty_query_terms(self, sample_documents):
- """Test BM25 with empty query terms."""
- scores = compute_bm25(sample_documents, [])
-
- # Should return scores for all documents
- assert len(scores) == len(sample_documents)
-
- # All scores should be zero since no query terms
- assert all(score == 0.0 for score in scores)
-
- def test_compute_bm25_single_document(self, single_document):
- """Test BM25 with single document."""
- query_terms = ["hello", "world"]
- scores = compute_bm25(single_document, query_terms)
-
- assert len(scores) == 1
- # With only one document, IDF = log(1/1) = 0, so BM25 score is always 0
- # This is correct mathematical behavior - both TF-IDF and BM25 are designed to discriminate between documents
- assert scores[0] == 0.0
-
- def test_compute_bm25_two_documents_positive_scores(self):
- """Test BM25 with two documents to ensure positive scores are possible."""
- documents = ["hello world", "goodbye earth"]
- query_terms = ["hello", "world"]
- scores = compute_bm25(documents, query_terms)
-
- assert len(scores) == 2
- # First document contains both terms, should have positive score
- assert scores[0] > 0.0
- # Second document contains neither term, should have zero score
- assert scores[1] == 0.0
-
- def test_compute_bm25_no_documents(self):
- """Test BM25 with no documents."""
- scores = compute_bm25([], ["cat", "dog"])
-
- assert scores == []
-
- def test_compute_bm25_document_length_normalization(self):
- """Test BM25 document length normalization."""
- # Test with documents where some terms appear in subset of documents
- documents = [
- "cat unique1", # Short document with unique term
- "cat dog bird mouse elephant tiger lion bear wolf unique2", # Long document with unique term
- "other content", # Document without query terms
- ]
- query_terms = ["unique1", "unique2"]
-
- scores = compute_bm25(documents, query_terms)
-
- # Documents with unique terms should have positive scores
- assert scores[0] > 0.0 # Contains "unique1"
- assert scores[1] > 0.0 # Contains "unique2"
- assert scores[2] == 0.0 # Contains neither term
-
- # Document length normalization affects scores
- assert len(scores) == 3
-
- def test_compute_bm25_term_frequency_saturation(self):
- """Test BM25 term frequency saturation behavior."""
- # Test with documents where term frequencies can be meaningfully compared
- documents = [
- "rare word text", # TF = 1 for "rare"
- "rare rare word", # TF = 2 for "rare"
- "rare rare rare rare rare word", # TF = 5 for "rare"
- "other content", # No "rare" term
- ]
- query_terms = ["rare"]
-
- scores = compute_bm25(documents, query_terms)
-
- # Documents with the term should have positive scores
- assert scores[0] > 0.0 # TF=1
- assert scores[1] > 0.0 # TF=2
- assert scores[2] > 0.0 # TF=5
- assert scores[3] == 0.0 # TF=0
-
- # Scores should increase with term frequency, but with diminishing returns
- assert scores[1] > scores[0] # TF=2 > TF=1
- assert scores[2] > scores[1] # TF=5 > TF=2
-
- # Check that increases demonstrate saturation effect
- increase_1_to_2 = scores[1] - scores[0]
- increase_2_to_5 = scores[2] - scores[1]
- assert increase_1_to_2 > 0
- assert increase_2_to_5 > 0
-
- def test_compute_bm25_idf_calculation(self):
- """Test BM25 inverse document frequency calculation."""
- # "rare" appears in only one document, "common" appears in multiple
- documents = ["rare term", "common term", "common word"]
- query_terms = ["rare", "common"]
-
- scores = compute_bm25(documents, query_terms)
-
- # First document should have higher score due to rare term having higher IDF
- assert scores[0] > scores[1] # rare term gets higher IDF
- assert scores[0] > scores[2]
-
- def test_compute_bm25_zero_parameters(self, sample_documents, query_terms):
- """Test BM25 with edge case parameters."""
- # Test with k1=0 (no term frequency scaling)
- scores_k1_zero = compute_bm25(sample_documents, query_terms, k1=0.0)
- assert len(scores_k1_zero) == len(sample_documents)
-
- # Test with b=0 (no document length normalization)
- scores_b_zero = compute_bm25(sample_documents, query_terms, b=0.0)
- assert len(scores_b_zero) == len(sample_documents)
-
- # Test with b=1 (full document length normalization)
- scores_b_one = compute_bm25(sample_documents, query_terms, b=1.0)
- assert len(scores_b_one) == len(sample_documents)
-
- def test_tfidf_vs_bm25_comparison(self, sample_documents, query_terms):
- """Test that TF-IDF and BM25 produce different but related scores."""
- tfidf_scores = compute_tfidf(sample_documents, query_terms)
- bm25_scores = compute_bm25(sample_documents, query_terms)
-
- # Both should return same number of scores
- assert len(tfidf_scores) == len(bm25_scores) == len(sample_documents)
-
- # For documents that match, both should be positive
- for i in range(len(sample_documents)):
- if tfidf_scores[i] > 0:
- assert bm25_scores[i] > 0, f"Document {i} has TF-IDF score but zero BM25 score"
- if bm25_scores[i] > 0:
- assert tfidf_scores[i] > 0, f"Document {i} has BM25 score but zero TF-IDF score"
-
- def test_compute_tfidf_special_characters(self):
- """Test TF-IDF with documents containing special characters."""
- documents = ["hello, world!", "world... hello?", "no match here"]
- query_terms = ["hello", "world"]
-
- scores = compute_tfidf(documents, query_terms)
-
- # Should handle punctuation and still match terms
- assert len(scores) == 3
- # Note: Current implementation does simple split(), so punctuation stays attached
- # This tests the current behavior - may need updating if tokenization improves
-
- def test_compute_bm25_special_characters(self):
- """Test BM25 with documents containing special characters."""
- documents = ["hello, world!", "world... hello?", "no match here"]
- query_terms = ["hello", "world"]
-
- scores = compute_bm25(documents, query_terms)
-
- # Should handle punctuation and still match terms
- assert len(scores) == 3
- # Same tokenization behavior as TF-IDF
-
- def test_compute_tfidf_whitespace_handling(self):
- """Test TF-IDF with various whitespace scenarios."""
- documents = [
- " hello world ", # Extra spaces
- "\thello\tworld\t", # Tabs
- "hello\nworld", # Newlines
- "", # Empty string
- ]
- query_terms = ["hello", "world"]
-
- scores = compute_tfidf(documents, query_terms)
-
- assert len(scores) == 4
- # First three should have positive scores (they contain the terms)
- assert scores[0] > 0.0
- assert scores[1] > 0.0
- assert scores[2] > 0.0
- # Last should be zero (empty document)
- assert scores[3] == 0.0
-
- def test_compute_bm25_whitespace_handling(self):
- """Test BM25 with various whitespace scenarios."""
- documents = [
- " hello world ", # Extra spaces
- "\thello\tworld\t", # Tabs
- "hello\nworld", # Newlines
- "", # Empty string
- ]
- query_terms = ["hello", "world"]
-
- scores = compute_bm25(documents, query_terms)
-
- assert len(scores) == 4
- # First three should have positive scores (they contain the terms)
- assert scores[0] > 0.0
- assert scores[1] > 0.0
- assert scores[2] > 0.0
- # Last should be zero (empty document)
- assert scores[3] == 0.0
-
- def test_compute_tfidf_mathematical_properties(self):
- """Test mathematical properties of TF-IDF scores."""
- documents = ["cat dog", "cat", "dog"]
- query_terms = ["cat"]
-
- scores = compute_tfidf(documents, query_terms)
-
- # All scores should be non-negative
- assert all(score >= 0.0 for score in scores)
-
- # Documents containing the term should have positive scores
- assert scores[0] > 0.0 # contains "cat"
- assert scores[1] > 0.0 # contains "cat"
- assert scores[2] == 0.0 # doesn't contain "cat"
-
- def test_compute_bm25_mathematical_properties(self):
- """Test mathematical properties of BM25 scores."""
- documents = ["cat dog", "cat", "dog"]
- query_terms = ["cat"]
-
- scores = compute_bm25(documents, query_terms)
-
- # All scores should be non-negative
- assert all(score >= 0.0 for score in scores)
-
- # Documents containing the term should have positive scores
- assert scores[0] > 0.0 # contains "cat"
- assert scores[1] > 0.0 # contains "cat"
- assert scores[2] == 0.0 # doesn't contain "cat"
-
- def test_compute_tfidf_duplicate_terms_in_query(self):
- """Test TF-IDF with duplicate terms in query."""
- documents = ["cat dog bird", "cat cat dog", "bird bird bird"]
- query_terms = ["cat", "cat", "dog"] # "cat" appears twice
-
- scores = compute_tfidf(documents, query_terms)
-
- # Should handle duplicate query terms gracefully
- assert len(scores) == 3
- assert all(isinstance(score, float) for score in scores)
-
- # First two documents should have positive scores
- assert scores[0] > 0.0
- assert scores[1] > 0.0
- # Third document only contains "bird", so should have zero score
- assert scores[2] == 0.0
-
- def test_compute_bm25_duplicate_terms_in_query(self):
- """Test BM25 with duplicate terms in query."""
- documents = ["cat dog bird", "cat cat dog", "bird bird bird"]
- query_terms = ["cat", "cat", "dog"] # "cat" appears twice
-
- scores = compute_bm25(documents, query_terms)
-
- # Should handle duplicate query terms gracefully
- assert len(scores) == 3
- assert all(isinstance(score, float) for score in scores)
-
- # First two documents should have positive scores
- assert scores[0] > 0.0
- assert scores[1] > 0.0
- # Third document only contains "bird", so should have zero score
- assert scores[2] == 0.0
diff --git a/src/backend/tests/unit/base/load/test_load.py b/src/backend/tests/unit/base/load/test_load.py
deleted file mode 100644
index bce82d6589d5..000000000000
--- a/src/backend/tests/unit/base/load/test_load.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import inspect
-import os
-
-import pytest
-from dotenv import load_dotenv
-from langflow.load import run_flow_from_json
-
-
-def test_run_flow_from_json_params():
- # Define the expected parameters
- expected_params = {
- "flow",
- "input_value",
- "session_id",
- "tweaks",
- "input_type",
- "output_type",
- "output_component",
- "log_level",
- "log_file",
- "env_file",
- "cache",
- "disable_logs",
- "fallback_to_env_vars",
- }
-
- # Check if the function accepts all expected parameters
- func_spec = inspect.getfullargspec(run_flow_from_json)
- params = func_spec.args + func_spec.kwonlyargs
- assert expected_params.issubset(params), "Not all expected parameters are present in run_flow_from_json"
-
- # TODO: Add tests by loading a flow and running it need to text with fake llm and check if it
- # returns the correct output
-
-
-@pytest.fixture
-def fake_env_file(tmp_path):
- # Create a fake .env file
- env_file = tmp_path / ".env"
- env_file.write_text("TEST_OP=TESTWORKS", encoding="utf-8")
- return env_file
-
-
-def test_run_flow_with_fake_env(fake_env_file):
- # Load the flow from the JSON file
- # flow_file = Path("src/backend/tests/data/env_variable_test.json")
- flow_file = pytest.ENV_VARIABLE_TEST
- tweaks_dict = {"Secret-zIbKs": {"secret_key_input": "TEST_OP"}}
-
- # Run the flow from JSON, providing the fake env file
- result = run_flow_from_json(
- flow=flow_file,
- input_value="some_input_value",
- env_file=str(fake_env_file), # Pass the path of the fake env file
- tweaks=tweaks_dict,
- )
- # Extract and check the output data
- output_data = result[0].outputs[0].results["message"].data["text"]
- assert output_data == "TESTWORKS"
-
-
-def test_run_flow_with_fake_env_tweaks(fake_env_file):
- # Load the flow from the JSON file
- # flow_file = Path("src/backend/tests/data/env_variable_test.json")
- flow_file = pytest.ENV_VARIABLE_TEST
-
- # Load env file and set up tweaks
-
- load_dotenv(str(fake_env_file))
- tweaks = {
- "Secret-zIbKs": {"secret_key_input": os.environ["TEST_OP"]},
- }
- # Run the flow from JSON without passing the env_file
- result = run_flow_from_json(
- flow=flow_file,
- input_value="some_input_value",
- tweaks=tweaks,
- )
- # Extract and check the output data
- output_data = result[0].outputs[0].results["message"].data["text"]
- assert output_data == "TESTWORKS"
diff --git a/src/backend/tests/unit/base/mcp/test_mcp_util.py b/src/backend/tests/unit/base/mcp/test_mcp_util.py
deleted file mode 100644
index 0eaa0e59e5dd..000000000000
--- a/src/backend/tests/unit/base/mcp/test_mcp_util.py
+++ /dev/null
@@ -1,818 +0,0 @@
-"""Unit tests for MCP utility functions.
-
-This test suite validates the MCP utility functions including:
-- Session management
-- Header validation and processing
-- Utility functions for name sanitization and schema conversion
-"""
-
-import shutil
-import sys
-from unittest.mock import AsyncMock, MagicMock, patch
-
-import pytest
-
-from lfx.base.mcp import util
-from lfx.base.mcp.util import MCPSessionManager, MCPSseClient, MCPStdioClient, _process_headers, validate_headers
-
-
-class TestMCPSessionManager:
- @pytest.fixture
- async def session_manager(self):
- """Create a session manager and clean it up after the test."""
- manager = MCPSessionManager()
- yield manager
- # Clean up after test
- await manager.cleanup_all()
-
- async def test_session_caching(self, session_manager):
- """Test that sessions are properly cached and reused."""
- context_id = "test_context"
- connection_params = MagicMock()
- transport_type = "stdio"
-
- # Create a mock session that will appear healthy
- mock_session = AsyncMock()
- mock_session._write_stream = MagicMock()
- mock_session._write_stream._closed = False
-
- # Create a mock task that appears to be running
- mock_task = AsyncMock()
- mock_task.done = MagicMock(return_value=False)
-
- with (
- patch.object(session_manager, "_create_stdio_session") as mock_create,
- patch.object(session_manager, "_validate_session_connectivity", return_value=True),
- ):
- mock_create.return_value = (mock_session, mock_task)
-
- # First call should create session
- session1 = await session_manager.get_session(context_id, connection_params, transport_type)
-
- # Second call should return cached session without creating new one
- session2 = await session_manager.get_session(context_id, connection_params, transport_type)
-
- assert session1 == session2
- assert session1 == mock_session
- # Should only create once since the second call should use the cached session
- mock_create.assert_called_once()
-
- async def test_session_cleanup(self, session_manager):
- """Test session cleanup functionality."""
- context_id = "test_context"
- server_key = "test_server"
- session_id = "test_session"
-
- # Add a session to the manager with proper mock setup using new structure
- mock_task = AsyncMock()
- mock_task.done = MagicMock(return_value=False) # Use MagicMock for sync method
- mock_task.cancel = MagicMock() # Use MagicMock for sync method
-
- # Set up the new session structure
- session_manager.sessions_by_server[server_key] = {
- "sessions": {session_id: {"session": AsyncMock(), "task": mock_task, "type": "stdio", "last_used": 0}},
- "last_cleanup": 0,
- }
-
- # Set up mapping for backwards compatibility
- session_manager._context_to_session[context_id] = (server_key, session_id)
-
- await session_manager._cleanup_session(context_id)
-
- # Should cancel the task and remove from sessions
- mock_task.cancel.assert_called_once()
- assert session_id not in session_manager.sessions_by_server[server_key]["sessions"]
-
- async def test_server_switch_detection(self, session_manager):
- """Test that server switches are properly detected and handled."""
- context_id = "test_context"
-
- # First server
- server1_params = MagicMock()
- server1_params.command = "server1"
-
- # Second server
- server2_params = MagicMock()
- server2_params.command = "server2"
-
- with (
- patch.object(session_manager, "_create_stdio_session") as mock_create,
- patch.object(session_manager, "_validate_session_connectivity", return_value=True),
- ):
- mock_session1 = AsyncMock()
- mock_session2 = AsyncMock()
- mock_task1 = AsyncMock()
- mock_task2 = AsyncMock()
- mock_create.side_effect = [(mock_session1, mock_task1), (mock_session2, mock_task2)]
-
- # First connection
- session1 = await session_manager.get_session(context_id, server1_params, "stdio")
-
- # Switch to different server should create new session
- session2 = await session_manager.get_session(context_id, server2_params, "stdio")
-
- assert session1 != session2
- assert mock_create.call_count == 2
-
-
-class TestHeaderValidation:
- """Test the header validation functionality."""
-
- def test_validate_headers_valid_input(self):
- """Test header validation with valid headers."""
- headers = {"Authorization": "Bearer token123", "Content-Type": "application/json", "X-API-Key": "secret-key"}
-
- result = validate_headers(headers)
-
- # Headers should be normalized to lowercase
- expected = {"authorization": "Bearer token123", "content-type": "application/json", "x-api-key": "secret-key"}
- assert result == expected
-
- def test_validate_headers_empty_input(self):
- """Test header validation with empty/None input."""
- assert validate_headers({}) == {}
- assert validate_headers(None) == {}
-
- def test_validate_headers_invalid_names(self):
- """Test header validation with invalid header names."""
- headers = {
- "Invalid Header": "value", # spaces not allowed
- "Header@Name": "value", # @ not allowed
- "Header Name": "value", # spaces not allowed
- "Valid-Header": "value", # this should pass
- }
-
- result = validate_headers(headers)
-
- # Only the valid header should remain
- assert result == {"valid-header": "value"}
-
- def test_validate_headers_sanitize_values(self):
- """Test header value sanitization."""
- headers = {
- "Authorization": "Bearer \x00token\x1f with\r\ninjection",
- "Clean-Header": " clean value ",
- "Empty-After-Clean": "\x00\x01\x02",
- "Tab-Header": "value\twith\ttabs", # tabs should be preserved
- }
-
- result = validate_headers(headers)
-
- # Control characters should be removed, whitespace trimmed
- # Header with injection attempts should be skipped
- expected = {"clean-header": "clean value", "tab-header": "value\twith\ttabs"}
- assert result == expected
-
- def test_validate_headers_non_string_values(self):
- """Test header validation with non-string values."""
- headers = {"String-Header": "valid", "Number-Header": 123, "None-Header": None, "List-Header": ["value"]}
-
- result = validate_headers(headers)
-
- # Only string headers should remain
- assert result == {"string-header": "valid"}
-
- def test_validate_headers_injection_attempts(self):
- """Test header validation against injection attempts."""
- headers = {
- "Injection1": "value\r\nInjected-Header: malicious",
- "Injection2": "value\nX-Evil: attack",
- "Safe-Header": "safe-value",
- }
-
- result = validate_headers(headers)
-
- # Injection attempts should be filtered out
- assert result == {"safe-header": "safe-value"}
-
-
-class TestSSEHeaderIntegration:
- """Integration test to verify headers are properly passed through the entire SSE flow."""
-
- async def test_headers_processing(self):
- """Test that headers flow properly from server config through to SSE client connection."""
- # Test the header processing function directly
- headers_input = [
- {"key": "Authorization", "value": "Bearer test-token"},
- {"key": "X-API-Key", "value": "secret-key"},
- ]
-
- expected_headers = {
- "authorization": "Bearer test-token", # normalized to lowercase
- "x-api-key": "secret-key",
- }
-
- # Test _process_headers function with validation
- processed_headers = _process_headers(headers_input)
- assert processed_headers == expected_headers
-
- # Test different input formats
- # Test dict input with validation
- dict_headers = {"Authorization": "Bearer dict-token", "Invalid Header": "bad"}
- result = _process_headers(dict_headers)
- # Invalid header should be filtered out, valid header normalized
- assert result == {"authorization": "Bearer dict-token"}
-
- # Test None input
- assert _process_headers(None) == {}
-
- # Test empty list
- assert _process_headers([]) == {}
-
- # Test malformed list
- malformed_headers = [{"key": "Auth"}, {"value": "token"}] # Missing value/key
- assert _process_headers(malformed_headers) == {}
-
- # Test list with invalid header names
- invalid_headers = [
- {"key": "Valid-Header", "value": "good"},
- {"key": "Invalid Header", "value": "bad"}, # spaces not allowed
- ]
- result = _process_headers(invalid_headers)
- assert result == {"valid-header": "good"}
-
- async def test_sse_client_header_storage(self):
- """Test that SSE client properly stores headers in connection params."""
- sse_client = MCPSseClient()
- test_url = "http://test.url"
- test_headers = {"Authorization": "Bearer test123", "Custom": "value"}
-
- # Test that headers are properly stored in connection params
- # Set connection params as a dict like the implementation expects
- sse_client._connection_params = {
- "url": test_url,
- "headers": test_headers,
- "timeout_seconds": 30,
- "sse_read_timeout_seconds": 30,
- }
-
- # Verify headers are stored
- assert sse_client._connection_params["url"] == test_url
- assert sse_client._connection_params["headers"] == test_headers
-
-
-class TestMCPUtilityFunctions:
- """Test utility functions from util.py that don't have dedicated test classes."""
-
- def test_sanitize_mcp_name(self):
- """Test MCP name sanitization."""
- assert util.sanitize_mcp_name("Test Name 123") == "test_name_123"
- assert util.sanitize_mcp_name(" ") == ""
- assert util.sanitize_mcp_name("123abc") == "_123abc"
- assert util.sanitize_mcp_name("Tést-😀-Námé") == "test_name"
- assert util.sanitize_mcp_name("a" * 100) == "a" * 46
-
- def test_get_unique_name(self):
- """Test unique name generation."""
- names = {"foo", "foo_1"}
- assert util.get_unique_name("foo", 10, names) == "foo_2"
- assert util.get_unique_name("bar", 10, names) == "bar"
- assert util.get_unique_name("longname", 4, {"long"}) == "lo_1"
-
- def test_is_valid_key_value_item(self):
- """Test key-value item validation."""
- assert util._is_valid_key_value_item({"key": "a", "value": "b"}) is True
- assert util._is_valid_key_value_item({"key": "a"}) is False
- assert util._is_valid_key_value_item(["key", "value"]) is False
- assert util._is_valid_key_value_item(None) is False
-
- def test_validate_node_installation(self):
- """Test Node.js installation validation."""
- if shutil.which("node"):
- assert util._validate_node_installation("npx something") == "npx something"
- else:
- with pytest.raises(ValueError, match="Node.js is not installed"):
- util._validate_node_installation("npx something")
- assert util._validate_node_installation("echo test") == "echo test"
-
- def test_create_input_schema_from_json_schema(self):
- """Test JSON schema to Pydantic model conversion."""
- schema = {
- "type": "object",
- "properties": {
- "foo": {"type": "string", "description": "desc"},
- "bar": {"type": "integer"},
- },
- "required": ["foo"],
- }
- model_class = util.create_input_schema_from_json_schema(schema)
- instance = model_class(foo="abc", bar=1)
- assert instance.foo == "abc"
- assert instance.bar == 1
-
- with pytest.raises(Exception): # noqa: B017, PT011
- model_class(bar=1) # missing required field
-
- @pytest.mark.asyncio
- async def test_validate_connection_params(self):
- """Test connection parameter validation."""
- # Valid parameters
- await util._validate_connection_params("Stdio", command="echo test")
- await util._validate_connection_params("SSE", url="http://test")
-
- # Invalid parameters
- with pytest.raises(ValueError, match="Command is required for Stdio mode"):
- await util._validate_connection_params("Stdio", command=None)
- with pytest.raises(ValueError, match="URL is required for SSE mode"):
- await util._validate_connection_params("SSE", url=None)
- with pytest.raises(ValueError, match="Invalid mode"):
- await util._validate_connection_params("InvalidMode")
-
- @pytest.mark.asyncio
- async def test_get_flow_snake_case_mocked(self):
- """Test flow lookup by snake case name with mocked session."""
-
- class DummyFlow:
- def __init__(self, name: str, user_id: str, *, is_component: bool = False, action_name: str | None = None):
- self.name = name
- self.user_id = user_id
- self.is_component = is_component
- self.action_name = action_name
-
- class DummyExec:
- def __init__(self, flows: list[DummyFlow]):
- self._flows = flows
-
- def all(self):
- return self._flows
-
- class DummySession:
- def __init__(self, flows: list[DummyFlow]):
- self._flows = flows
-
- async def exec(self, stmt): # noqa: ARG002
- return DummyExec(self._flows)
-
- user_id = "123e4567-e89b-12d3-a456-426614174000"
- flows = [DummyFlow("Test Flow", user_id), DummyFlow("Other", user_id)]
-
- # Should match sanitized name
- result = await util.get_flow_snake_case(util.sanitize_mcp_name("Test Flow"), user_id, DummySession(flows))
- assert result is flows[0]
-
- # Should return None if not found
- result = await util.get_flow_snake_case("notfound", user_id, DummySession(flows))
- assert result is None
-
-
-@pytest.mark.skip(reason="Skipping MCPStdioClientWithEverythingServer tests.")
-class TestMCPStdioClientWithEverythingServer:
- """Test MCPStdioClient with the Everything MCP server."""
-
- @pytest.fixture
- def stdio_client(self):
- """Create a stdio client for testing."""
- return MCPStdioClient()
-
- @pytest.mark.asyncio
- @pytest.mark.skipif(not shutil.which("npx"), reason="Node.js not available")
- @pytest.mark.skipif(
- sys.version_info >= (3, 13),
- reason="Temporarily disabled on Python 3.13 due to frequent timeouts with MCP Everything server",
- )
- async def test_connect_to_everything_server(self, stdio_client):
- """Test connecting to the Everything MCP server."""
- command = "npx -y @modelcontextprotocol/server-everything"
-
- try:
- # Connect to the server
- tools = await stdio_client.connect_to_server(command)
-
- # Verify tools were returned
- assert len(tools) > 0
-
- # Find the echo tool
- echo_tool = None
- for tool in tools:
- if hasattr(tool, "name") and tool.name == "echo":
- echo_tool = tool
- break
-
- assert echo_tool is not None, "Echo tool not found in server tools"
- assert echo_tool.description is not None
-
- # Verify the echo tool has the expected input schema
- assert hasattr(echo_tool, "inputSchema")
- assert echo_tool.inputSchema is not None
-
- finally:
- # Clean up the connection
- await stdio_client.disconnect()
-
- @pytest.mark.asyncio
- @pytest.mark.skipif(not shutil.which("npx"), reason="Node.js not available")
- async def test_run_echo_tool(self, stdio_client):
- """Test running the echo tool from the Everything server."""
- command = "npx -y @modelcontextprotocol/server-everything"
-
- try:
- # Connect to the server
- tools = await stdio_client.connect_to_server(command)
-
- # Find the echo tool
- echo_tool = None
- for tool in tools:
- if hasattr(tool, "name") and tool.name == "echo":
- echo_tool = tool
- break
-
- assert echo_tool is not None, "Echo tool not found"
-
- # Run the echo tool
- test_message = "Hello, MCP!"
- result = await stdio_client.run_tool("echo", {"message": test_message})
-
- # Verify the result
- assert result is not None
- assert hasattr(result, "content")
- assert len(result.content) > 0
-
- # Check that the echo worked - content should contain our message
- content_text = str(result.content[0])
- assert test_message in content_text or "Echo:" in content_text
-
- finally:
- await stdio_client.disconnect()
-
- @pytest.mark.asyncio
- @pytest.mark.skipif(not shutil.which("npx"), reason="Node.js not available")
- async def test_list_all_tools(self, stdio_client):
- """Test listing all available tools from the Everything server."""
- command = "npx -y @modelcontextprotocol/server-everything"
-
- try:
- # Connect to the server
- tools = await stdio_client.connect_to_server(command)
-
- # Verify we have multiple tools
- assert len(tools) >= 3 # Everything server typically has several tools
-
- # Check that tools have the expected attributes
- for tool in tools:
- assert hasattr(tool, "name")
- assert hasattr(tool, "description")
- assert hasattr(tool, "inputSchema")
- assert tool.name is not None
- assert len(tool.name) > 0
-
- # Common tools that should be available
- expected_tools = ["echo"] # Echo is typically available
- for expected_tool in expected_tools:
- assert any(tool.name == expected_tool for tool in tools), f"Expected tool '{expected_tool}' not found"
-
- finally:
- await stdio_client.disconnect()
-
- @pytest.mark.asyncio
- @pytest.mark.skipif(not shutil.which("npx"), reason="Node.js not available")
- async def test_session_reuse(self, stdio_client):
- """Test that sessions are properly reused."""
- command = "npx -y @modelcontextprotocol/server-everything"
-
- try:
- # Set session context
- stdio_client.set_session_context("test_session_reuse")
-
- # Connect to the server
- tools1 = await stdio_client.connect_to_server(command)
-
- # Connect again - should reuse the session
- tools2 = await stdio_client.connect_to_server(command)
-
- # Should have the same tools
- assert len(tools1) == len(tools2)
-
- # Run a tool to verify the session is working
- result = await stdio_client.run_tool("echo", {"message": "Session reuse test"})
- assert result is not None
-
- finally:
- await stdio_client.disconnect()
-
-
-class TestMCPSseClientWithDeepWikiServer:
- """Test MCPSseClient with the DeepWiki MCP server."""
-
- @pytest.fixture
- def sse_client(self):
- """Create an SSE client for testing."""
- return MCPSseClient()
-
- @pytest.mark.asyncio
- async def test_connect_to_deepwiki_server(self, sse_client):
- """Test connecting to the DeepWiki MCP server."""
- url = "https://mcp.deepwiki.com/sse"
-
- try:
- # Connect to the server
- tools = await sse_client.connect_to_server(url)
-
- # Verify tools were returned
- assert len(tools) > 0
-
- # Check for expected DeepWiki tools
- expected_tools = ["read_wiki_structure", "read_wiki_contents", "ask_question"]
-
- # Verify we have the expected tools
- for expected_tool in expected_tools:
- assert any(tool.name == expected_tool for tool in tools), f"Expected tool '{expected_tool}' not found"
-
- except Exception as e:
- # If the server is not accessible, skip the test
- pytest.skip(f"DeepWiki server not accessible: {e}")
- finally:
- await sse_client.disconnect()
-
- @pytest.mark.asyncio
- async def test_run_wiki_structure_tool(self, sse_client):
- """Test running the read_wiki_structure tool."""
- url = "https://mcp.deepwiki.com/sse"
-
- try:
- # Connect to the server
- tools = await sse_client.connect_to_server(url)
-
- # Find the read_wiki_structure tool
- wiki_tool = None
- for tool in tools:
- if hasattr(tool, "name") and tool.name == "read_wiki_structure":
- wiki_tool = tool
- break
-
- assert wiki_tool is not None, "read_wiki_structure tool not found"
-
- # Run the tool with a test repository (use repoName as expected by the API)
- result = await sse_client.run_tool("read_wiki_structure", {"repoName": "microsoft/vscode"})
-
- # Verify the result
- assert result is not None
- assert hasattr(result, "content")
- assert len(result.content) > 0
-
- except Exception as e:
- # If the server is not accessible or the tool fails, skip the test
- pytest.skip(f"DeepWiki server test failed: {e}")
- finally:
- await sse_client.disconnect()
-
- @pytest.mark.asyncio
- async def test_ask_question_tool(self, sse_client):
- """Test running the ask_question tool."""
- url = "https://mcp.deepwiki.com/sse"
-
- try:
- # Connect to the server
- tools = await sse_client.connect_to_server(url)
-
- # Find the ask_question tool
- ask_tool = None
- for tool in tools:
- if hasattr(tool, "name") and tool.name == "ask_question":
- ask_tool = tool
- break
-
- assert ask_tool is not None, "ask_question tool not found"
-
- # Run the tool with a test question (use repoName as expected by the API)
- result = await sse_client.run_tool(
- "ask_question", {"repoName": "microsoft/vscode", "question": "What is VS Code?"}
- )
-
- # Verify the result
- assert result is not None
- assert hasattr(result, "content")
- assert len(result.content) > 0
-
- except Exception as e:
- # If the server is not accessible or the tool fails, skip the test
- pytest.skip(f"DeepWiki server test failed: {e}")
- finally:
- await sse_client.disconnect()
-
- @pytest.mark.asyncio
- async def test_url_validation(self, sse_client):
- """Test URL validation for SSE connections."""
- # Test valid URL
- valid_url = "https://mcp.deepwiki.com/sse"
- is_valid, error = await sse_client.validate_url(valid_url)
- # Either valid or accessible, or rate-limited (429) which indicates server is reachable
- if not is_valid and "429" in error:
- # Rate limiting indicates the server is accessible but limiting requests
- # This is a transient network issue, not a test failure
- pytest.skip(f"DeepWiki server is rate limiting requests: {error}")
- assert is_valid or error == "" # Either valid or accessible
-
- # Test invalid URL
- invalid_url = "not_a_url"
- is_valid, error = await sse_client.validate_url(invalid_url)
- assert not is_valid
- assert error != ""
-
- @pytest.mark.asyncio
- async def test_redirect_handling(self, sse_client):
- """Test redirect handling for SSE connections."""
- # Test with the DeepWiki URL
- url = "https://mcp.deepwiki.com/sse"
-
- try:
- # Check for redirects
- final_url = await sse_client.pre_check_redirect(url)
-
- # Should return a URL (either original or redirected)
- assert final_url is not None
- assert isinstance(final_url, str)
- assert final_url.startswith("http")
-
- except Exception as e:
- # If the server is not accessible, skip the test
- pytest.skip(f"DeepWiki server not accessible for redirect test: {e}")
-
- @pytest.fixture
- def mock_tool(self):
- """Create a mock MCP tool."""
- tool = MagicMock()
- tool.name = "test_tool"
- tool.description = "Test tool description"
- tool.inputSchema = {
- "type": "object",
- "properties": {"test_param": {"type": "string", "description": "Test parameter"}},
- "required": ["test_param"],
- }
- return tool
-
- @pytest.fixture
- def mock_session(self, mock_tool):
- """Create a mock ClientSession."""
- session = AsyncMock()
- session.initialize = AsyncMock()
- list_tools_result = MagicMock()
- list_tools_result.tools = [mock_tool]
- session.list_tools = AsyncMock(return_value=list_tools_result)
- session.call_tool = AsyncMock(
- return_value=MagicMock(content=[MagicMock(model_dump=lambda: {"result": "success"})])
- )
- return session
-
-
-class TestMCPSseClientUnit:
- """Unit tests for MCPSseClient functionality."""
-
- @pytest.fixture
- def sse_client(self):
- return MCPSseClient()
-
- @pytest.mark.asyncio
- async def test_client_initialization(self, sse_client):
- """Test that SSE client initializes correctly."""
- # Client should initialize with default values
- assert sse_client.session is None
- assert sse_client._connection_params is None
- assert sse_client._connected is False
- assert sse_client._session_context is None
-
- async def test_validate_url_valid(self, sse_client):
- """Test URL validation with valid URL."""
- with patch("httpx.AsyncClient") as mock_client:
- mock_response = MagicMock()
- mock_response.status_code = 200
- mock_client.return_value.__aenter__.return_value.get.return_value = mock_response
-
- is_valid, error_msg = await sse_client.validate_url("http://test.url", {})
-
- assert is_valid is True
- assert error_msg == ""
-
- async def test_validate_url_invalid_format(self, sse_client):
- """Test URL validation with invalid format."""
- is_valid, error_msg = await sse_client.validate_url("invalid-url", {})
-
- assert is_valid is False
- assert "Invalid URL format" in error_msg
-
- async def test_validate_url_with_404_response(self, sse_client):
- """Test URL validation with 404 response (should be valid for SSE)."""
- with patch("httpx.AsyncClient") as mock_client:
- mock_response = MagicMock()
- mock_response.status_code = 404
- mock_client.return_value.__aenter__.return_value.get.return_value = mock_response
-
- is_valid, error_msg = await sse_client.validate_url("http://test.url", {})
-
- assert is_valid is True
- assert error_msg == ""
-
- async def test_connect_to_server_with_headers(self, sse_client):
- """Test connecting to server via SSE with custom headers."""
- test_url = "http://test.url"
- test_headers = {"Authorization": "Bearer token123", "Custom-Header": "value"}
- expected_headers = {"authorization": "Bearer token123", "custom-header": "value"} # normalized
-
- with (
- patch.object(sse_client, "validate_url", return_value=(True, "")),
- patch.object(sse_client, "pre_check_redirect", return_value=test_url),
- patch.object(sse_client, "_get_or_create_session") as mock_get_session,
- ):
- # Mock session
- mock_session = AsyncMock()
- mock_tool = MagicMock()
- mock_tool.name = "test_tool"
- list_tools_result = MagicMock()
- list_tools_result.tools = [mock_tool]
- mock_session.list_tools = AsyncMock(return_value=list_tools_result)
- mock_get_session.return_value = mock_session
-
- tools = await sse_client.connect_to_server(test_url, test_headers)
-
- assert len(tools) == 1
- assert tools[0].name == "test_tool"
- assert sse_client._connected is True
-
- # Verify headers are stored in connection params (normalized)
- assert sse_client._connection_params is not None
- assert sse_client._connection_params["headers"] == expected_headers
- assert sse_client._connection_params["url"] == test_url
-
- async def test_headers_passed_to_session_manager(self, sse_client):
- """Test that headers are properly passed to the session manager."""
- test_url = "http://test.url"
- expected_headers = {"authorization": "Bearer token123", "x-api-key": "secret"} # normalized
-
- sse_client._session_context = "test_context"
- sse_client._connection_params = {
- "url": test_url,
- "headers": expected_headers, # Use normalized headers
- "timeout_seconds": 30,
- "sse_read_timeout_seconds": 30,
- }
-
- with patch.object(sse_client, "_get_session_manager") as mock_get_manager:
- mock_manager = AsyncMock()
- mock_session = AsyncMock()
- mock_manager.get_session = AsyncMock(return_value=mock_session)
- mock_get_manager.return_value = mock_manager
-
- result_session = await sse_client._get_or_create_session()
-
- # Verify session manager was called with correct parameters including normalized headers
- mock_manager.get_session.assert_called_once_with("test_context", sse_client._connection_params, "sse")
- assert result_session == mock_session
-
- async def test_pre_check_redirect_with_headers(self, sse_client):
- """Test pre-check redirect functionality with custom headers."""
- test_url = "http://test.url"
- redirect_url = "http://redirect.url"
- # Use pre-validated headers since pre_check_redirect expects already validated headers
- test_headers = {"authorization": "Bearer token123"} # already normalized
-
- with patch("httpx.AsyncClient") as mock_client:
- mock_response = MagicMock()
- mock_response.status_code = 307
- mock_response.headers.get.return_value = redirect_url
- mock_client.return_value.__aenter__.return_value.get.return_value = mock_response
-
- result = await sse_client.pre_check_redirect(test_url, test_headers)
-
- assert result == redirect_url
- # Verify validated headers were passed to the request
- mock_client.return_value.__aenter__.return_value.get.assert_called_with(
- test_url, timeout=2.0, headers={"Accept": "text/event-stream", **test_headers}
- )
-
- async def test_run_tool_with_retry_on_connection_error(self, sse_client):
- """Test that run_tool retries on connection errors."""
- # Setup connection state
- sse_client._connected = True
- sse_client._connection_params = {"url": "http://test.url", "headers": {}}
- sse_client._session_context = "test_context"
-
- call_count = 0
-
- async def mock_get_session_side_effect():
- nonlocal call_count
- call_count += 1
- session = AsyncMock()
- if call_count == 1:
- # First call fails with connection error
- from anyio import ClosedResourceError
-
- session.call_tool = AsyncMock(side_effect=ClosedResourceError())
- else:
- # Second call succeeds
- mock_result = MagicMock()
- session.call_tool = AsyncMock(return_value=mock_result)
- return session
-
- with (
- patch.object(sse_client, "_get_or_create_session", side_effect=mock_get_session_side_effect),
- patch.object(sse_client, "_get_session_manager") as mock_get_manager,
- ):
- mock_manager = AsyncMock()
- mock_get_manager.return_value = mock_manager
-
- result = await sse_client.run_tool("test_tool", {"param": "value"})
-
- # Should have retried and succeeded on second attempt
- assert call_count == 2
- assert result is not None
- # Should have cleaned up the failed session
- mock_manager._cleanup_session.assert_called_once_with("test_context")
diff --git a/src/backend/tests/unit/base/tools/test_component_toolkit.py b/src/backend/tests/unit/base/tools/test_component_toolkit.py
deleted file mode 100644
index 457950458e55..000000000000
--- a/src/backend/tests/unit/base/tools/test_component_toolkit.py
+++ /dev/null
@@ -1,168 +0,0 @@
-import os
-import sqlite3
-from pathlib import Path
-
-import pytest
-from pydantic import BaseModel
-
-from lfx.base.tools.component_tool import ComponentToolkit
-from lfx.components.data.sql_executor import SQLComponent
-from lfx.components.input_output.chat_output import ChatOutput
-from lfx.components.langchain_utilities import ToolCallingAgentComponent
-from lfx.components.openai.openai_chat_model import OpenAIModelComponent
-from lfx.components.tools.calculator import CalculatorToolComponent
-from lfx.graph.graph.base import Graph
-
-
-@pytest.fixture
-def test_db():
- """Fixture that creates a temporary SQLite database for testing."""
- test_data_dir = Path(__file__).parent.parent.parent.parent / "data"
- db_path = test_data_dir / "test.db"
- conn = sqlite3.connect(db_path)
- cursor = conn.cursor()
- # Create students table
- cursor.execute("""
- CREATE TABLE students (
- id INTEGER PRIMARY KEY,
- first_name TEXT NOT NULL,
- last_name TEXT NOT NULL,
- age INTEGER,
- gpa REAL,
- major TEXT
- )
- """)
-
- # Create courses table
- cursor.execute("""
- CREATE TABLE courses (
- id INTEGER PRIMARY KEY,
- course_name TEXT NOT NULL,
- instructor TEXT,
- credits INTEGER
- )
- """)
-
- # Create enrollment junction table
- cursor.execute("""
- CREATE TABLE enrollments (
- student_id INTEGER,
- course_id INTEGER,
- grade TEXT,
- PRIMARY KEY (student_id, course_id),
- FOREIGN KEY (student_id) REFERENCES students (id),
- FOREIGN KEY (course_id) REFERENCES courses (id)
- )
- """)
-
- # Insert sample student data
- students = [
- (1, "John", "Smith", 20, 3.5, "Computer Science"),
- (2, "Emma", "Johnson", 21, 3.8, "Mathematics"),
- (3, "Michael", "Williams", 19, 3.2, "Physics"),
- (4, "Olivia", "Brown", 22, 3.9, "Biology"),
- (5, "James", "Davis", 20, 3.1, "Chemistry"),
- ]
-
- cursor.executemany("INSERT INTO students VALUES (?, ?, ?, ?, ?, ?)", students)
-
- # Insert sample course data
- courses = [
- (101, "Introduction to Programming", "Dr. Jones", 3),
- (102, "Calculus I", "Dr. Smith", 4),
- (103, "Physics 101", "Dr. Brown", 4),
- (104, "Biology Fundamentals", "Dr. Wilson", 3),
- (105, "Chemistry Basics", "Dr. Miller", 3),
- ]
-
- cursor.executemany("INSERT INTO courses VALUES (?, ?, ?, ?)", courses)
-
- # Insert sample enrollment data
- enrollments = [
- (1, 101, "A"),
- (1, 102, "B+"),
- (2, 102, "A"),
- (2, 103, "A-"),
- (3, 103, "B"),
- (3, 105, "C+"),
- (4, 104, "A"),
- (5, 105, "B+"),
- ]
-
- cursor.executemany("INSERT INTO enrollments VALUES (?, ?, ?)", enrollments)
-
- # Commit changes and close connection
- conn.commit()
- conn.close()
- yield str(db_path)
-
- Path(db_path).unlink()
-
-
-def test_component_tool():
- calculator_component = CalculatorToolComponent()
- component_toolkit = ComponentToolkit(component=calculator_component)
- component_tool = component_toolkit.get_tools()[0]
- assert component_tool.name == "run_model"
- assert issubclass(component_tool.args_schema, BaseModel)
- # TODO: fix this
- # assert component_tool.args_schema.model_json_schema()["properties"] == {
- # "input_value": {
- # "default": "",
- # "description": "Message to be passed as input.",
- # "title": "Input Value",
- # "type": "string",
- # },
- # }
- assert component_toolkit.component == calculator_component
-
- result = component_tool.invoke(input={"expression": "1+1"})
- assert isinstance(result[0], dict)
- assert "result" in result[0]["data"]
- assert result[0]["data"]["result"] == "2"
-
-
-@pytest.mark.api_key_required
-@pytest.mark.usefixtures("client")
-async def test_component_tool_with_api_key():
- chat_output = ChatOutput()
- openai_llm = OpenAIModelComponent()
- openai_llm.set(api_key=os.environ["OPENAI_API_KEY"])
- tool_calling_agent = ToolCallingAgentComponent()
- tools = await chat_output.to_toolkit()
- tool_calling_agent.set(
- llm=openai_llm.build_model,
- tools=list(tools),
- input_value="Which tools are available? Please tell its name.",
- )
-
- g = Graph(start=tool_calling_agent, end=tool_calling_agent)
- g.session_id = "test"
- assert g is not None
- results = [result async for result in g.async_start()]
- assert len(results) == 3
- assert "message_response" in tool_calling_agent._outputs_map["response"].value.get_text()
-
-
-@pytest.mark.api_key_required
-@pytest.mark.usefixtures("client")
-async def test_sql_component_to_toolkit(test_db):
- sql_component = SQLComponent()
- sql_component.set(database_url=f"sqlite:///{test_db}")
- tool = await sql_component.to_toolkit()
- openai_llm = OpenAIModelComponent()
- openai_llm.set(api_key=os.environ["OPENAI_API_KEY"])
- tool_calling_agent = ToolCallingAgentComponent()
-
- tool_calling_agent.set(
- llm=openai_llm.build_model,
- tools=list(tool),
- input_value="run SELECT * FROM courses to get course details.",
- )
-
- g = Graph(start=tool_calling_agent, end=tool_calling_agent)
- g.session_id = "test"
- assert g is not None
- results = [result async for result in g.async_start()]
- assert len(results) > 0
- assert "Physics 101" in tool_calling_agent._outputs_map["response"].value.get_text()
diff --git a/src/backend/tests/unit/base/tools/test_create_schema.py b/src/backend/tests/unit/base/tools/test_create_schema.py
deleted file mode 100644
index df6712b8c66e..000000000000
--- a/src/backend/tests/unit/base/tools/test_create_schema.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from langflow.io.schema import create_input_schema_from_dict
-
-from lfx.schema.dotdict import dotdict
-
-
-def test_create_schema():
- sample_input = [
- {
- "_input_type": "MultilineInput",
- "advanced": False,
- "display_name": "Chat Input - Text",
- "dynamic": False,
- "info": "Message to be passed as input.",
- "input_types": ["Message"],
- "list": False,
- "load_from_db": False,
- "multiline": True,
- "name": "ChatInput-xNZ0a|input_value",
- "placeholder": "",
- "required": False,
- "show": True,
- "title_case": False,
- "tool_mode": True,
- "trace_as_input": True,
- "trace_as_metadata": True,
- "type": "str",
- "value": "add 1+1",
- }
- ]
- # convert to dotdict
- # change the key type
- sample_input = [dotdict(field) for field in sample_input]
- schema = create_input_schema_from_dict(sample_input)
- assert schema is not None
diff --git a/src/backend/tests/unit/base/tools/test_toolmodemixin.py b/src/backend/tests/unit/base/tools/test_toolmodemixin.py
deleted file mode 100644
index afe8f05568c7..000000000000
--- a/src/backend/tests/unit/base/tools/test_toolmodemixin.py
+++ /dev/null
@@ -1,156 +0,0 @@
-from langflow.custom import Component
-
-# Import all input types
-from langflow.io import (
- BoolInput,
- DataFrameInput,
- DataInput,
- DictInput,
- DropdownInput,
- FloatInput,
- IntInput,
- MessageInput,
- MessageTextInput,
- MultilineInput,
- MultiselectInput,
- NestedDictInput,
- Output,
- PromptInput,
- StrInput,
- TableInput,
-)
-from pydantic import BaseModel
-
-from lfx.base.tools.component_tool import ComponentToolkit
-from lfx.schema import Data
-
-
-class AllInputsComponent(Component):
- display_name = "All Inputs Component"
- description = "A component with all input types available in Langflow."
- documentation: str = "http://docs.langflow.org/components/all_inputs"
- icon = "code"
- name = "AllInputsComponent"
-
- inputs = [
- TableInput(
- name="table_input",
- display_name="Table Input",
- info="Input for table data.",
- value=[],
- tool_mode=True,
- table_schema=[
- {"name": "id", "type": "int"},
- {"name": "name", "type": "str"},
- ],
- ),
- DataInput(name="data_input", display_name="Data Input", info="Input for data objects.", tool_mode=True),
- DataFrameInput(
- name="dataframe_input", display_name="DataFrame Input", info="Input for DataFrame objects.", tool_mode=True
- ),
- PromptInput(name="prompt_input", display_name="Prompt Input", info="Input for prompt data.", tool_mode=True),
- StrInput(name="str_input", display_name="String Input", info="Input for string data.", tool_mode=True),
- MessageInput(
- name="message_input", display_name="Message Input", info="Input for message objects.", tool_mode=True
- ),
- MessageTextInput(
- name="message_text_input", display_name="Message Text Input", info="Input for message text.", tool_mode=True
- ),
- MultilineInput(
- name="multiline_input", display_name="Multiline Input", info="Input for multiline text.", tool_mode=True
- ),
- IntInput(name="int_input", display_name="Integer Input", info="Input for integer values.", tool_mode=True),
- FloatInput(name="float_input", display_name="Float Input", info="Input for float values.", tool_mode=True),
- BoolInput(name="bool_input", display_name="Boolean Input", info="Input for boolean values.", tool_mode=True),
- NestedDictInput(
- name="nested_dict_input",
- display_name="Nested Dictionary Input",
- info="Input for nested dictionary data.",
- tool_mode=True,
- value={"key1": "value1", "key2": "value2"},
- ),
- DictInput(
- name="dict_input",
- display_name="Dictionary Input",
- info="Input for dictionary data.",
- tool_mode=True,
- is_list=True,
- value={"key1": "value1", "key2": "value2"},
- ),
- DropdownInput(
- name="dropdown_input",
- display_name="Dropdown Input",
- info="Input for dropdown selections.",
- tool_mode=True,
- options=["option1", "option2", "option3"],
- value="option1",
- ),
- MultiselectInput(
- name="multiselect_input",
- display_name="Multiselect Input",
- info="Input for multiple selections.",
- tool_mode=True,
- options=["option1", "option2", "option3"],
- value=["option1", "option2"],
- ),
- ]
-
- outputs = [
- Output(display_name="Output", name="output", method="build_output"),
- ]
-
- def build_output(self) -> Data:
- # Example logic to process inputs and produce an output
- data_dict = {
- "table_input": self.table_input,
- "data_input": self.data_input,
- "dataframe_input": self.dataframe_input,
- "prompt_input": self.prompt_input,
- "str_input": self.str_input,
- "message_input": self.message_input,
- "message_text_input": self.message_text_input,
- "multiline_input": self.multiline_input,
- "int_input": self.int_input,
- "float_input": self.float_input,
- "bool_input": self.bool_input,
- }
- data = Data(value=data_dict)
- self.status = data
- return data
-
-
-def test_component_inputs_toolkit():
- component = AllInputsComponent()
- component_toolkit = ComponentToolkit(component=component)
- component_tool = component_toolkit.get_tools()[0]
- assert component_tool.name == "build_output"
- assert issubclass(component_tool.args_schema, BaseModel)
- properties = component_tool.args_schema.model_json_schema()["properties"]
-
- # Define expected properties based on the component's inputs
- expected_inputs = {
- "table_input": {"title": "Table Input", "description": "Input for table data."},
- "data_input": {"title": "Data Input", "description": "Input for data objects."},
- "dataframe_input": {"title": "Dataframe Input", "description": "Input for DataFrame objects."},
- "prompt_input": {"title": "Prompt Input", "description": "Input for prompt data."},
- "str_input": {"title": "Str Input", "description": "Input for string data."},
- "message_input": {"title": "Message Input", "description": "Input for message objects."},
- "message_text_input": {"title": "Message Text Input", "description": "Input for message text."},
- "multiline_input": {"title": "Multiline Input", "description": "Input for multiline text."},
- # TODO: to check how the title is generated, Shouldnt it be the display name?
- "int_input": {"title": "Int Input", "description": "Input for integer values."},
- "float_input": {"title": "Float Input", "description": "Input for float values."},
- "bool_input": {"title": "Bool Input", "description": "Input for boolean values."},
- "nested_dict_input": {"title": "Nested Dict Input", "description": "Input for nested dictionary data."},
- "dict_input": {"title": "Dict Input", "description": "Input for dictionary data."},
- "dropdown_input": {"title": "Dropdown Input", "description": "Input for dropdown selections."},
- "multiselect_input": {"title": "Multiselect Input", "description": "Input for multiple selections."},
- }
-
- # Iterate and assert each input's properties
- for input_name, expected in expected_inputs.items():
- assert input_name in properties, f"{input_name} is missing in properties."
- assert properties[input_name]["title"] == expected["title"], f"Title mismatch for {input_name}."
- assert properties[input_name]["description"] == expected["description"], (
- f"Description mismatch for {input_name}."
- )
diff --git a/src/backend/tests/unit/base/tools/test_vector_store_decorator.py b/src/backend/tests/unit/base/tools/test_vector_store_decorator.py
deleted file mode 100644
index eda50a4cee0f..000000000000
--- a/src/backend/tests/unit/base/tools/test_vector_store_decorator.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from typing import Any
-
-import pytest
-
-from lfx.components.datastax import AstraDBVectorStoreComponent
-from tests.base import ComponentTestBaseWithoutClient, VersionComponentMapping
-
-
-class TestVectorStoreDecorator(ComponentTestBaseWithoutClient):
- """Unit tests for the AstraDBVectorStoreComponent decorator.
-
- This test class inherits from ComponentTestBaseWithoutClient and includes
- the following tests and fixtures:
-
- Fixtures:
- - component_class: Returns the AstraDBVectorStoreComponent class to be tested.
- - file_names_mapping: Returns an empty list representing the file names mapping for different versions.
- Tests:
- - test_decorator_applied: Verifies that the AstraDBVectorStoreComponent has the 'decorated' attribute and that
- it is set to True.
- """
-
- @pytest.fixture
- def component_class(self) -> type[Any]:
- """Return the component class to test."""
- return AstraDBVectorStoreComponent
-
- @pytest.fixture
- def file_names_mapping(self) -> list[VersionComponentMapping]:
- """Return the file names mapping for different versions."""
- return []
-
- def test_decorator_applied(self, component_class: type[Any]):
- component: AstraDBVectorStoreComponent = component_class()
- assert hasattr(component, "decorated")
- assert component.decorated
diff --git a/src/backend/tests/unit/build_utils.py b/src/backend/tests/unit/build_utils.py
deleted file mode 100644
index 9e3f1bed939c..000000000000
--- a/src/backend/tests/unit/build_utils.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import asyncio
-import json
-from typing import Any
-from uuid import UUID
-
-from httpx import AsyncClient, codes
-
-from lfx.log.logger import logger
-
-
-async def create_flow(client: AsyncClient, flow_data: str, headers: dict[str, str]) -> UUID:
- """Create a flow and return its ID."""
- response = await client.post("api/v1/flows/", json=json.loads(flow_data), headers=headers)
- assert response.status_code == codes.CREATED
- return UUID(response.json()["id"])
-
-
-async def build_flow(
- client: AsyncClient, flow_id: UUID, headers: dict[str, str], json: dict[str, Any] | None = None
-) -> dict[str, Any]:
- """Start a flow build and return the job_id."""
- if json is None:
- json = {}
- response = await client.post(f"api/v1/build/{flow_id}/flow", json=json, headers=headers)
- assert response.status_code == codes.OK
- return response.json()
-
-
-async def get_build_events(client: AsyncClient, job_id: str, headers: dict[str, str]):
- """Get events for a build job."""
- # Add Accept header for NDJSON format
- headers_with_accept = {**headers, "Accept": "application/x-ndjson"}
- return await client.get(f"api/v1/build/{job_id}/events", headers=headers_with_accept)
-
-
-async def consume_and_assert_stream(response, job_id, timeout=30.0):
- """Consume the event stream and assert the expected event structure.
-
- Args:
- response: The response object with an aiter_lines method
- job_id: The job ID to verify in events
- timeout: Maximum time in seconds to wait for events (default: 10s)
- """
- count = 0
- lines = []
- first_event_seen = False
- end_event_seen = False
-
- # Set a timeout for the entire consumption process
- try:
- # In Python 3.10, asyncio.timeout() is not available, so we use wait_for instead
- async def process_events():
- nonlocal count, lines, first_event_seen, end_event_seen
- async for line in response.aiter_lines():
- # Skip empty lines (ndjson uses double newlines)
- if not line:
- continue
-
- lines.append(line)
- try:
- parsed = json.loads(line)
- except json.JSONDecodeError:
- logger.debug(f"ERROR: Failed to parse JSON: {line}")
- raise
-
- if "job_id" in parsed:
- assert parsed["job_id"] == job_id
- continue
-
- # First event should be vertices_sorted
- if not first_event_seen:
- assert parsed["event"] == "vertices_sorted", (
- "Invalid first event. Expected 'vertices_sorted'. Full event stream:\n" + "\n".join(lines)
- )
- ids = parsed["data"]["ids"]
-
- assert ids == ["ChatInput-vsgM1"], "Invalid ids in first event. Full event stream:\n" + "\n".join(
- lines
- )
-
- to_run = parsed["data"]["to_run"]
- expected_to_run = [
- "ChatInput-vsgM1",
- "Prompt-VSSGR",
- "TypeConverterComponent-koSIz",
- "Memory-8X8Cq",
- "ChatOutput-NAw0P",
- ]
- assert set(to_run) == set(expected_to_run), (
- "Invalid to_run list in the first event. Full event stream:\n" + "\n".join(lines)
- )
- first_event_seen = True
- # Last event should be end
- elif parsed["event"] == "end":
- end_event_seen = True
- # Middle events should be end_vertex
- elif parsed["event"] == "end_vertex":
- assert parsed["data"]["build_data"] is not None, (
- f"Missing build_data at position {count}. Full event stream:\n" + "\n".join(lines)
- )
- # Other event types (like token or add_message) are allowed and ignored
- else:
- # Allow other event types to pass through without failing
- pass
-
- count += 1
-
- # Debug output for verbose mode to track progress
- if count % 10 == 0:
- logger.debug(f"Processed {count} events so far")
-
- await asyncio.wait_for(process_events(), timeout=timeout)
- except asyncio.TimeoutError as e:
- # If we timed out, logger.debug what we have so far and fail the test
- events_summary = "\n".join(
- f"{i}: {line[:80]}..." if len(line) > 80 else f"{i}: {line}" for i, line in enumerate(lines)
- )
- logger.debug(
- f"ERROR: Test timed out after {timeout}s. Processed {count} events.\nEvents received:\n{events_summary}"
- )
- if first_event_seen and not end_event_seen:
- msg = f"Test timed out after {timeout}s waiting for 'end' event"
- raise TimeoutError(msg) from e
- if not first_event_seen:
- msg = f"Test timed out after {timeout}s waiting for 'vertices_sorted' event"
- raise TimeoutError(msg) from e
- msg = f"Test timed out after {timeout}s"
- raise TimeoutError(msg) from e
-
- # Verify we saw both the first and end events
- assert first_event_seen, "Missing vertices_sorted event. Full event stream:\n" + "\n".join(lines)
- assert end_event_seen, "Missing end event. Full event stream:\n" + "\n".join(lines)
-
- # logger.debug summary of events processed
- logger.debug(f"Successfully processed {count} events for job {job_id}")
- return count
diff --git a/src/backend/tests/unit/components/agents/test_agent_component.py b/src/backend/tests/unit/components/agents/test_agent_component.py
deleted file mode 100644
index a7eccff3c4c2..000000000000
--- a/src/backend/tests/unit/components/agents/test_agent_component.py
+++ /dev/null
@@ -1,439 +0,0 @@
-import os
-from typing import Any
-from uuid import uuid4
-
-import pytest
-from langflow.custom import Component
-
-from lfx.base.models.anthropic_constants import ANTHROPIC_MODELS
-from lfx.base.models.model_input_constants import (
- MODEL_PROVIDERS,
-)
-from lfx.base.models.openai_constants import (
- OPENAI_CHAT_MODEL_NAMES,
- OPENAI_REASONING_MODEL_NAMES,
-)
-from lfx.components.agents.agent import AgentComponent
-from lfx.components.tools.calculator import CalculatorToolComponent
-from tests.base import ComponentTestBaseWithClient, ComponentTestBaseWithoutClient
-from tests.unit.mock_language_model import MockLanguageModel
-
-# Load environment variables from .env file
-
-
-class TestAgentComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- return AgentComponent
-
- @pytest.fixture
- def file_names_mapping(self):
- return []
-
- async def component_setup(self, component_class: type[Any], default_kwargs: dict[str, Any]) -> Component:
- component_instance = await super().component_setup(component_class, default_kwargs)
- # Mock _should_process_output method
- component_instance._should_process_output = lambda output: False # noqa: ARG005
- return component_instance
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "_type": "Agent",
- "add_current_date_tool": True,
- "agent_description": "A helpful agent",
- "agent_llm": MockLanguageModel(),
- "handle_parsing_errors": True,
- "input_value": "",
- "max_iterations": 10,
- "system_prompt": "You are a helpful assistant.",
- "tools": [],
- "verbose": True,
- "n_messages": 100,
- "format_instructions": "You are an AI that extracts structured JSON objects from unstructured text.",
- "output_schema": [],
- }
-
- async def test_build_config_update(self, component_class, default_kwargs):
- component = await self.component_setup(component_class, default_kwargs)
- frontend_node = component.to_frontend_node()
- build_config = frontend_node["data"]["node"]["template"]
- # Test updating build config for OpenAI
- component.set(agent_llm="OpenAI")
- updated_config = await component.update_build_config(build_config, "OpenAI", "agent_llm")
- assert "agent_llm" in updated_config
- assert updated_config["agent_llm"]["value"] == "OpenAI"
- assert isinstance(updated_config["agent_llm"]["options"], list)
- assert len(updated_config["agent_llm"]["options"]) > 0
- assert all(provider in updated_config["agent_llm"]["options"] for provider in MODEL_PROVIDERS)
- assert "Custom" in updated_config["agent_llm"]["options"]
-
- # Verify model_name field is populated for OpenAI
-
- assert "model_name" in updated_config
- model_name_dict = updated_config["model_name"]
- assert isinstance(model_name_dict["options"], list)
- assert len(model_name_dict["options"]) > 0 # OpenAI should have available models
- assert "gpt-4o" in model_name_dict["options"]
-
- # Test Anthropic
- component.set(agent_llm="Anthropic")
- updated_config = await component.update_build_config(build_config, "Anthropic", "agent_llm")
- assert "agent_llm" in updated_config
- assert updated_config["agent_llm"]["value"] == "Anthropic"
- assert isinstance(updated_config["agent_llm"]["options"], list)
- assert len(updated_config["agent_llm"]["options"]) > 0
- assert all(provider in updated_config["agent_llm"]["options"] for provider in MODEL_PROVIDERS)
- assert "Anthropic" in updated_config["agent_llm"]["options"]
- assert updated_config["agent_llm"]["input_types"] == []
- options = updated_config["model_name"]["options"]
- assert any("sonnet" in option.lower() for option in options), f"Options: {options}"
-
- # Test updating build config for Custom
- updated_config = await component.update_build_config(build_config, "Custom", "agent_llm")
- assert "agent_llm" in updated_config
- assert updated_config["agent_llm"]["value"] == "Custom"
- assert isinstance(updated_config["agent_llm"]["options"], list)
- assert len(updated_config["agent_llm"]["options"]) > 0
- assert all(provider in updated_config["agent_llm"]["options"] for provider in MODEL_PROVIDERS)
- assert "Custom" in updated_config["agent_llm"]["options"]
- assert updated_config["agent_llm"]["input_types"] == ["LanguageModel"]
-
- # Verify model_name field is cleared for Custom
- assert "model_name" not in updated_config
-
- async def test_agent_has_dual_outputs(self, component_class, default_kwargs):
- """Test that Agent component has both Response and Structured Response outputs."""
- component = await self.component_setup(component_class, default_kwargs)
-
- assert len(component.outputs) == 2
- assert component.outputs[0].name == "response"
- assert component.outputs[0].display_name == "Response"
- assert component.outputs[0].method == "message_response"
-
- assert component.outputs[1].name == "structured_response"
- assert component.outputs[1].display_name == "Structured Response"
- assert component.outputs[1].method == "json_response"
- assert component.outputs[1].tool_mode is False
-
- async def test_json_mode_filtered_from_openai_inputs(self, component_class, default_kwargs):
- """Test that json_mode is filtered out from OpenAI inputs."""
- component = await self.component_setup(component_class, default_kwargs)
-
- # Check that json_mode is not in the agent's inputs
- input_names = [inp.name for inp in component.inputs if hasattr(inp, "name")]
- assert "json_mode" not in input_names
-
- # Verify other OpenAI inputs are still present
- assert "model_name" in input_names
- assert "api_key" in input_names
- assert "temperature" in input_names
-
- async def test_json_response_parsing_valid_json(self, component_class, default_kwargs):
- """Test that json_response correctly parses JSON from agent response."""
- component = await self.component_setup(component_class, default_kwargs)
- # Mock the get_agent_requirements method to avoid actual LLM calls
- from unittest.mock import AsyncMock
-
- component.get_agent_requirements = AsyncMock(return_value=(MockLanguageModel(), [], []))
- component.create_agent_runnable = AsyncMock(return_value=None)
- mock_result = type("MockResult", (), {"content": '{"name": "test", "value": 123}'})()
- component.run_agent = AsyncMock(return_value=mock_result)
-
- result = await component.json_response()
-
- from lfx.schema.data import Data
-
- assert isinstance(result, Data)
- assert result.data == {"name": "test", "value": 123}
-
- async def test_json_response_parsing_embedded_json(self, component_class, default_kwargs):
- """Test that json_response handles text containing JSON."""
- component = await self.component_setup(component_class, default_kwargs)
- # Mock the get_agent_requirements method to avoid actual LLM calls
- from unittest.mock import AsyncMock
-
- component.get_agent_requirements = AsyncMock(return_value=(MockLanguageModel(), [], []))
- component.create_agent_runnable = AsyncMock(return_value=None)
- mock_result = type("MockResult", (), {"content": 'Here is the result: {"status": "success"} - done!'})()
- component.run_agent = AsyncMock(return_value=mock_result)
-
- result = await component.json_response()
-
- from lfx.schema.data import Data
-
- assert isinstance(result, Data)
- assert result.data == {"status": "success"}
-
- async def test_json_response_error_handling(self, component_class, default_kwargs):
- """Test that json_response handles completely non-JSON responses."""
- component = await self.component_setup(component_class, default_kwargs)
- # Mock the get_agent_requirements method to avoid actual LLM calls
- from unittest.mock import AsyncMock
-
- component.get_agent_requirements = AsyncMock(return_value=(MockLanguageModel(), [], []))
- component.create_agent_runnable = AsyncMock(return_value=None)
- mock_result = type("MockResult", (), {"content": "This is just plain text with no JSON"})()
- component.run_agent = AsyncMock(return_value=mock_result)
-
- result = await component.json_response()
-
- from lfx.schema.data import Data
-
- assert isinstance(result, Data)
- assert "error" in result.data
- assert result.data["content"] == "This is just plain text with no JSON"
-
- async def test_model_building_without_json_mode(self, component_class, default_kwargs):
- """Test that model building works without json_mode attribute."""
- component = await self.component_setup(component_class, default_kwargs)
- component.agent_llm = "OpenAI"
-
- # Mock component for testing
- from unittest.mock import Mock
-
- mock_component = Mock()
- mock_component.set.return_value = mock_component
-
- # Should not raise AttributeError for missing json_mode
- result = component.set_component_params(mock_component)
-
- assert result is not None
- # Verify set was called (meaning no AttributeError occurred)
- mock_component.set.assert_called_once()
-
- async def test_json_response_with_schema_validation(self, component_class, default_kwargs):
- """Test that json_response validates against provided schema."""
- # Set up component with output schema
- default_kwargs["output_schema"] = [
- {"name": "name", "type": "str", "description": "Name field", "multiple": False},
- {"name": "age", "type": "int", "description": "Age field", "multiple": False},
- ]
- component = await self.component_setup(component_class, default_kwargs)
- # Mock the get_agent_requirements method
- from unittest.mock import AsyncMock
-
- component.get_agent_requirements = AsyncMock(return_value=(MockLanguageModel(), [], []))
- component.create_agent_runnable = AsyncMock(return_value=None)
- mock_result = type("MockResult", (), {"content": '{"name": "John", "age": 25}'})()
- component.run_agent = AsyncMock(return_value=mock_result)
-
- result = await component.json_response()
-
- from langflow.schema.data import Data
-
- assert isinstance(result, Data)
- assert result.data == {"name": "John", "age": 25}
-
- async def test_agent_component_initialization(self, component_class, default_kwargs):
- """Test that Agent component initializes correctly with filtered inputs."""
- component = await self.component_setup(component_class, default_kwargs)
-
- # Should not raise any errors during initialization
- assert component.display_name == "Agent"
- assert component.name == "Agent"
- assert len(component.inputs) > 0
- assert len(component.outputs) == 2
-
- async def test_frontend_node_structure(self, component_class, default_kwargs):
- """Test that frontend node has correct structure with filtered inputs."""
- component = await self.component_setup(component_class, default_kwargs)
-
- frontend_node = component.to_frontend_node()
- build_config = frontend_node["data"]["node"]["template"]
-
- # Verify json_mode is not in build config
- assert "json_mode" not in build_config
-
- # Verify other expected fields are present
- assert "agent_llm" in build_config
- assert "system_prompt" in build_config
- assert "add_current_date_tool" in build_config
-
- async def test_preprocess_schema(self, component_class, default_kwargs):
- """Test that _preprocess_schema correctly handles schema validation."""
- component = await self.component_setup(component_class, default_kwargs)
-
- # Test schema preprocessing
- raw_schema = [
- {"name": "field1", "type": "str", "description": "Test field", "multiple": "true"},
- {"name": "field2", "type": "int", "description": "Another field", "multiple": False},
- ]
-
- processed = component._preprocess_schema(raw_schema)
-
- assert len(processed) == 2
- assert processed[0]["multiple"] is True # String "true" should be converted to bool
- assert processed[1]["multiple"] is False
-
- async def test_build_structured_output_base_with_validation(self, component_class, default_kwargs):
- """Test build_structured_output_base with schema validation."""
- default_kwargs["output_schema"] = [
- {"name": "name", "type": "str", "description": "Name field", "multiple": False},
- {"name": "count", "type": "int", "description": "Count field", "multiple": False},
- ]
- component = await self.component_setup(component_class, default_kwargs)
-
- # Test valid JSON that matches schema
- valid_content = '{"name": "test", "count": 42}'
- result = await component.build_structured_output_base(valid_content)
- assert result == [{"name": "test", "count": 42}]
-
- async def test_build_structured_output_base_without_schema(self, component_class, default_kwargs):
- """Test build_structured_output_base without schema validation."""
- component = await self.component_setup(component_class, default_kwargs)
-
- # Test with no output_schema
- content = '{"any": "data", "number": 123}'
- result = await component.build_structured_output_base(content)
- assert result == {"any": "data", "number": 123}
-
- async def test_build_structured_output_base_embedded_json(self, component_class, default_kwargs):
- """Test extraction of JSON from embedded text."""
- component = await self.component_setup(component_class, default_kwargs)
-
- content = 'Here is some text with {"embedded": "json"} inside it.'
- result = await component.build_structured_output_base(content)
- assert result == {"embedded": "json"}
-
- async def test_build_structured_output_base_no_json(self, component_class, default_kwargs):
- """Test handling of content with no JSON."""
- component = await self.component_setup(component_class, default_kwargs)
-
- content = "This is just plain text with no JSON at all."
- result = await component.build_structured_output_base(content)
- assert "error" in result
- assert result["content"] == content
-
- async def test_new_input_fields_present(self, component_class, default_kwargs):
- """Test that new input fields are present in the component."""
- component = await self.component_setup(component_class, default_kwargs)
-
- input_names = [inp.name for inp in component.inputs if hasattr(inp, "name")]
-
- # Test for new fields
- assert "format_instructions" in input_names
- assert "output_schema" in input_names
- assert "n_messages" in input_names
-
- # Verify default values
- assert hasattr(component, "format_instructions")
- assert hasattr(component, "output_schema")
- assert hasattr(component, "n_messages")
- assert component.n_messages == 100
-
- async def test_agent_has_correct_outputs(self, component_class, default_kwargs):
- """Test that Agent component has the correct output configuration."""
- component = await self.component_setup(component_class, default_kwargs)
-
- assert len(component.outputs) == 2
-
- # Test response output
- response_output = component.outputs[0]
- assert response_output.name == "response"
- assert response_output.display_name == "Response"
- assert response_output.method == "message_response"
-
- # Test structured response output
- structured_output = component.outputs[1]
- assert structured_output.name == "structured_response"
- assert structured_output.display_name == "Structured Response"
- assert structured_output.method == "json_response"
- assert structured_output.tool_mode is False
-
-
-class TestAgentComponentWithClient(ComponentTestBaseWithClient):
- @pytest.fixture
- def component_class(self):
- return AgentComponent
-
- @pytest.fixture
- def file_names_mapping(self):
- return []
-
- @pytest.mark.api_key_required
- @pytest.mark.no_blockbuster
- async def test_agent_component_with_calculator(self):
- # Now you can access the environment variables
- api_key = os.getenv("OPENAI_API_KEY")
- tools = [CalculatorToolComponent().build_tool()] # Use the Calculator component as a tool
- input_value = "What is 2 + 2?"
-
- temperature = 0.1
-
- # Initialize the AgentComponent with mocked inputs
- agent = AgentComponent(
- tools=tools,
- input_value=input_value,
- api_key=api_key,
- model_name="gpt-4o",
- agent_llm="OpenAI",
- temperature=temperature,
- _session_id=str(uuid4()),
- )
-
- response = await agent.message_response()
- assert "4" in response.data.get("text")
-
- @pytest.mark.api_key_required
- @pytest.mark.no_blockbuster
- async def test_agent_component_with_all_openai_models(self):
- # Mock inputs
- api_key = os.getenv("OPENAI_API_KEY")
- input_value = "What is 2 + 2?"
-
- # Iterate over all OpenAI models
- failed_models = []
- for model_name in OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES:
- # Initialize the AgentComponent with mocked inputs
- tools = [CalculatorToolComponent().build_tool()] # Use the Calculator component as a tool
- agent = AgentComponent(
- tools=tools,
- input_value=input_value,
- api_key=api_key,
- model_name=model_name,
- agent_llm="OpenAI",
- _session_id=str(uuid4()),
- )
-
- response = await agent.message_response()
- if "4" not in response.data.get("text"):
- failed_models.append(model_name)
-
- assert not failed_models, f"The following models failed the test: {failed_models}"
-
- @pytest.mark.api_key_required
- @pytest.mark.no_blockbuster
- async def test_agent_component_with_all_anthropic_models(self):
- # Mock inputs
- api_key = os.getenv("ANTHROPIC_API_KEY")
- input_value = "What is 2 + 2?"
-
- # Iterate over all Anthropic models
- failed_models = {}
-
- for model_name in ANTHROPIC_MODELS:
- try:
- # Initialize the AgentComponent with mocked inputs
- tools = [CalculatorToolComponent().build_tool()]
- agent = AgentComponent(
- tools=tools,
- input_value=input_value,
- api_key=api_key,
- model_name=model_name,
- agent_llm="Anthropic",
- _session_id=str(uuid4()),
- )
-
- response = await agent.message_response()
- response_text = response.data.get("text", "")
-
- if "4" not in response_text:
- failed_models[model_name] = f"Expected '4' in response but got: {response_text}"
-
- except Exception as e:
- failed_models[model_name] = f"Exception occurred: {e!s}"
-
- assert not failed_models, "The following models failed the test:\n" + "\n".join(
- f"{model}: {error}" for model, error in failed_models.items()
- )
diff --git a/src/backend/tests/unit/components/agents/test_agent_events.py b/src/backend/tests/unit/components/agents/test_agent_events.py
deleted file mode 100644
index 84683d5bc7a3..000000000000
--- a/src/backend/tests/unit/components/agents/test_agent_events.py
+++ /dev/null
@@ -1,544 +0,0 @@
-from collections.abc import AsyncIterator
-from typing import Any
-from unittest.mock import AsyncMock
-
-from langchain_core.agents import AgentFinish
-
-from lfx.base.agents.agent import process_agent_events
-from lfx.base.agents.events import (
- handle_on_chain_end,
- handle_on_chain_start,
- handle_on_chain_stream,
- handle_on_tool_end,
- handle_on_tool_error,
- handle_on_tool_start,
-)
-from lfx.schema.content_block import ContentBlock
-from lfx.schema.content_types import ToolContent
-from lfx.schema.message import Message
-from lfx.utils.constants import MESSAGE_SENDER_AI
-
-
-async def create_event_iterator(events: list[dict[str, Any]]) -> AsyncIterator[dict[str, Any]]:
- """Helper function to create an async iterator from a list of events."""
- for event in events:
- yield event
-
-
-async def test_chain_start_event():
- """Test handling of on_chain_start event."""
- send_message = AsyncMock(side_effect=lambda message: message)
-
- events = [
- {"event": "on_chain_start", "data": {"input": {"input": "test input", "chat_history": []}}, "start_time": 0}
- ]
-
- # Initialize message with content blocks
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- session_id="test_session_id",
- )
- send_message.return_value = agent_message
-
- result = await process_agent_events(create_event_iterator(events), agent_message, send_message)
-
- assert result.properties.icon == "Bot"
- assert len(result.content_blocks) == 1
- assert result.content_blocks[0].title == "Agent Steps"
-
-
-async def test_chain_end_event():
- """Test handling of on_chain_end event."""
- send_message = AsyncMock(side_effect=lambda message: message)
-
- # Create a mock AgentFinish output
- output = AgentFinish(return_values={"output": "final output"}, log="test log")
-
- events = [{"event": "on_chain_end", "data": {"output": output}, "start_time": 0}]
-
- # Initialize message with content blocks
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- session_id="test_session_id",
- )
- send_message.return_value = agent_message
-
- result = await process_agent_events(create_event_iterator(events), agent_message, send_message)
-
- assert result.properties.icon == "Bot"
- assert result.properties.state == "complete"
- assert result.text == "final output"
-
-
-async def test_tool_start_event():
- """Test handling of on_tool_start event."""
- send_message = AsyncMock()
-
- # Set up the send_message mock to return the modified message
- def update_message(message):
- # Return a copy of the message to simulate real behavior
- return Message(**message.model_dump())
-
- send_message.side_effect = update_message
-
- events = [
- {
- "event": "on_tool_start",
- "name": "test_tool",
- "run_id": "test_run",
- "data": {"input": {"query": "tool input"}},
- "start_time": 0,
- }
- ]
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- session_id="test_session_id",
- )
- result = await process_agent_events(create_event_iterator(events), agent_message, send_message)
-
- assert result.properties.icon == "Bot"
- assert len(result.content_blocks) == 1
- assert result.content_blocks[0].title == "Agent Steps"
- assert len(result.content_blocks[0].contents) > 0
- tool_content = result.content_blocks[0].contents[-1]
- assert isinstance(tool_content, ToolContent)
- assert tool_content.name == "test_tool"
- assert tool_content.tool_input == {"query": "tool input"}, tool_content
-
-
-async def test_tool_end_event():
- """Test handling of on_tool_end event."""
- send_message = AsyncMock(side_effect=lambda message: message)
-
- events = [
- {
- "event": "on_tool_start",
- "name": "test_tool",
- "run_id": "test_run",
- "data": {"input": {"query": "tool input"}},
- "start_time": 0,
- },
- {
- "event": "on_tool_end",
- "name": "test_tool",
- "run_id": "test_run",
- "data": {"output": "tool output"},
- "start_time": 0,
- },
- ]
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- session_id="test_session_id",
- )
- result = await process_agent_events(create_event_iterator(events), agent_message, send_message)
-
- assert len(result.content_blocks) == 1
- tool_content = result.content_blocks[0].contents[-1]
- assert tool_content.name == "test_tool"
- assert tool_content.output == "tool output"
-
-
-async def test_tool_error_event():
- """Test handling of on_tool_error event."""
- send_message = AsyncMock(side_effect=lambda message: message)
-
- events = [
- {
- "event": "on_tool_start",
- "name": "test_tool",
- "run_id": "test_run",
- "data": {"input": {"query": "tool input"}},
- "start_time": 0,
- },
- {
- "event": "on_tool_error",
- "name": "test_tool",
- "run_id": "test_run",
- "data": {"error": "error message"},
- "start_time": 0,
- },
- ]
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- session_id="test_session_id",
- )
-
- result = await process_agent_events(create_event_iterator(events), agent_message, send_message)
-
- tool_content = result.content_blocks[0].contents[-1]
- assert tool_content.name == "test_tool"
- assert tool_content.error == "error message"
- assert tool_content.header["title"] == "Error using **test_tool**"
-
-
-async def test_chain_stream_event():
- """Test handling of on_chain_stream event."""
- send_message = AsyncMock(side_effect=lambda message: message)
-
- events = [{"event": "on_chain_stream", "data": {"chunk": {"output": "streamed output"}}, "start_time": 0}]
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- session_id="test_session_id",
- )
- result = await process_agent_events(create_event_iterator(events), agent_message, send_message)
-
- assert result.properties.state == "complete"
- assert result.text == "streamed output"
-
-
-async def test_multiple_events():
- """Test handling of multiple events in sequence."""
- send_message = AsyncMock(side_effect=lambda message: message)
-
- # Create a mock AgentFinish output instead of MockOutput
- output = AgentFinish(return_values={"output": "final output"}, log="test log")
-
- events = [
- {"event": "on_chain_start", "data": {"input": {"input": "initial input", "chat_history": []}}, "start_time": 0},
- {
- "event": "on_tool_start",
- "name": "test_tool",
- "run_id": "test_run",
- "data": {"input": {"query": "tool input"}},
- "start_time": 0,
- },
- {
- "event": "on_tool_end",
- "name": "test_tool",
- "run_id": "test_run",
- "data": {"output": "tool output"},
- "start_time": 0,
- },
- {"event": "on_chain_end", "data": {"output": output}, "start_time": 0},
- ]
-
- # Initialize message with content blocks
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- )
- send_message.return_value = agent_message
-
- result = await process_agent_events(create_event_iterator(events), agent_message, send_message)
-
- assert result.properties.state == "complete"
- assert result.properties.icon == "Bot"
- assert len(result.content_blocks) == 1
- assert result.text == "final output"
-
-
-async def test_unknown_event():
- """Test handling of unknown event type."""
- send_message = AsyncMock(side_effect=lambda message: message)
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])], # Initialize with empty content block
- )
- send_message.return_value = agent_message
-
- events = [{"event": "unknown_event", "data": {"some": "data"}, "start_time": 0}]
-
- result = await process_agent_events(create_event_iterator(events), agent_message, send_message)
-
- # Should complete without error and maintain default state
- assert result.properties.state == "complete"
- # Content blocks should be empty but present
- assert len(result.content_blocks) == 1
- assert len(result.content_blocks[0].contents) == 0
-
-
-# Additional tests for individual handler functions
-
-
-async def test_handle_on_chain_start_with_input():
- """Test handle_on_chain_start with input."""
- send_message = AsyncMock(side_effect=lambda message: message)
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- )
- event = {"event": "on_chain_start", "data": {"input": {"input": "test input", "chat_history": []}}, "start_time": 0}
-
- updated_message, start_time = await handle_on_chain_start(event, agent_message, send_message, 0.0)
-
- assert updated_message.properties.icon == "Bot"
- assert len(updated_message.content_blocks) == 1
- assert updated_message.content_blocks[0].title == "Agent Steps"
- assert isinstance(start_time, float)
-
-
-async def test_handle_on_chain_start_no_input():
- """Test handle_on_chain_start without input."""
- send_message = AsyncMock(side_effect=lambda message: message)
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- )
- event = {"event": "on_chain_start", "data": {}, "start_time": 0}
-
- updated_message, start_time = await handle_on_chain_start(event, agent_message, send_message, 0.0)
-
- assert updated_message.properties.icon == "Bot"
- assert len(updated_message.content_blocks) == 1
- assert len(updated_message.content_blocks[0].contents) == 0
- assert isinstance(start_time, float)
-
-
-async def test_handle_on_chain_end_with_output():
- """Test handle_on_chain_end with output."""
- send_message = AsyncMock(side_effect=lambda message: message)
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- )
-
- output = AgentFinish(return_values={"output": "final output"}, log="test log")
- event = {"event": "on_chain_end", "data": {"output": output}, "start_time": 0}
-
- updated_message, start_time = await handle_on_chain_end(event, agent_message, send_message, 0.0)
-
- assert updated_message.properties.icon == "Bot"
- assert updated_message.properties.state == "complete"
- assert updated_message.text == "final output"
- assert isinstance(start_time, float)
-
-
-async def test_handle_on_chain_end_no_output():
- """Test handle_on_chain_end without output key in data."""
- send_message = AsyncMock(side_effect=lambda message: message)
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- )
- event = {"event": "on_chain_end", "data": {}, "start_time": 0}
-
- updated_message, start_time = await handle_on_chain_end(event, agent_message, send_message, 0.0)
-
- assert updated_message.properties.icon == "Bot"
- assert updated_message.properties.state == "partial"
- assert updated_message.text == ""
- assert isinstance(start_time, float)
-
-
-async def test_handle_on_chain_end_empty_data():
- """Test handle_on_chain_end with empty data."""
- send_message = AsyncMock(side_effect=lambda message: message)
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- )
- event = {"event": "on_chain_end", "data": {"output": None}, "start_time": 0}
-
- updated_message, start_time = await handle_on_chain_end(event, agent_message, send_message, 0.0)
-
- assert updated_message.properties.icon == "Bot"
- assert updated_message.properties.state == "partial"
- assert updated_message.text == ""
- assert isinstance(start_time, float)
-
-
-async def test_handle_on_chain_end_with_empty_return_values():
- """Test handle_on_chain_end with empty return_values."""
- send_message = AsyncMock(side_effect=lambda message: message)
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- )
-
- class MockOutputEmptyReturnValues:
- def __init__(self):
- self.return_values = {}
-
- event = {"event": "on_chain_end", "data": {"output": MockOutputEmptyReturnValues()}, "start_time": 0}
-
- updated_message, start_time = await handle_on_chain_end(event, agent_message, send_message, 0.0)
-
- assert updated_message.properties.icon == "Bot"
- assert updated_message.properties.state == "partial"
- assert updated_message.text == ""
- assert isinstance(start_time, float)
-
-
-async def test_handle_on_tool_start():
- """Test handle_on_tool_start event."""
- send_message = AsyncMock(side_effect=lambda message: message)
- tool_blocks_map = {}
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- )
- event = {
- "event": "on_tool_start",
- "name": "test_tool",
- "run_id": "test_run",
- "data": {"input": {"query": "tool input"}},
- "start_time": 0,
- }
-
- updated_message, start_time = await handle_on_tool_start(event, agent_message, tool_blocks_map, send_message, 0.0)
-
- assert len(updated_message.content_blocks) == 1
- assert len(updated_message.content_blocks[0].contents) > 0
- tool_key = f"{event['name']}_{event['run_id']}"
- tool_content = updated_message.content_blocks[0].contents[-1]
- assert tool_content == tool_blocks_map.get(tool_key)
- assert isinstance(tool_content, ToolContent)
- assert tool_content.name == "test_tool"
- assert tool_content.tool_input == {"query": "tool input"}
- assert isinstance(tool_content.duration, int)
- assert isinstance(start_time, float)
-
-
-async def test_handle_on_tool_end():
- """Test handle_on_tool_end event."""
- send_message = AsyncMock(side_effect=lambda message: message)
- tool_blocks_map = {}
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- )
-
- start_event = {
- "event": "on_tool_start",
- "name": "test_tool",
- "run_id": "test_run",
- "data": {"input": {"query": "tool input"}},
- }
- agent_message, _ = await handle_on_tool_start(start_event, agent_message, tool_blocks_map, send_message, 0.0)
-
- end_event = {
- "event": "on_tool_end",
- "name": "test_tool",
- "run_id": "test_run",
- "data": {"output": "tool output"},
- "start_time": 0,
- }
-
- updated_message, start_time = await handle_on_tool_end(end_event, agent_message, tool_blocks_map, send_message, 0.0)
-
- f"{end_event['name']}_{end_event['run_id']}"
- tool_content = updated_message.content_blocks[0].contents[-1]
- assert tool_content.name == "test_tool"
- assert tool_content.output == "tool output"
- assert isinstance(tool_content.duration, int)
- assert isinstance(start_time, float)
-
-
-async def test_handle_on_tool_error():
- """Test handle_on_tool_error event."""
- send_message = AsyncMock(side_effect=lambda message: message)
- tool_blocks_map = {}
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- )
-
- start_event = {
- "event": "on_tool_start",
- "name": "test_tool",
- "run_id": "test_run",
- "data": {"input": {"query": "tool input"}},
- }
- agent_message, _ = await handle_on_tool_start(start_event, agent_message, tool_blocks_map, send_message, 0.0)
-
- error_event = {
- "event": "on_tool_error",
- "name": "test_tool",
- "run_id": "test_run",
- "data": {"error": "error message"},
- "start_time": 0,
- }
-
- updated_message, start_time = await handle_on_tool_error(
- error_event, agent_message, tool_blocks_map, send_message, 0.0
- )
-
- tool_content = updated_message.content_blocks[0].contents[-1]
- assert tool_content.name == "test_tool"
- assert tool_content.error == "error message"
- assert tool_content.header["title"] == "Error using **test_tool**"
- assert isinstance(tool_content.duration, int)
- assert isinstance(start_time, float)
-
-
-async def test_handle_on_chain_stream_with_output():
- """Test handle_on_chain_stream with output."""
- send_message = AsyncMock(side_effect=lambda message: message)
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- )
- event = {
- "event": "on_chain_stream",
- "data": {"chunk": {"output": "streamed output"}},
- }
-
- updated_message, start_time = await handle_on_chain_stream(event, agent_message, send_message, 0.0)
-
- assert updated_message.text == "streamed output"
- assert updated_message.properties.state == "complete"
- assert isinstance(start_time, float)
-
-
-async def test_handle_on_chain_stream_no_output():
- """Test handle_on_chain_stream without output."""
- send_message = AsyncMock(side_effect=lambda message: message)
- agent_message = Message(
- sender=MESSAGE_SENDER_AI,
- sender_name="Agent",
- properties={"icon": "Bot", "state": "partial"},
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
- session_id="test_session_id",
- )
- event = {
- "event": "on_chain_stream",
- "data": {"chunk": {}},
- }
-
- updated_message, start_time = await handle_on_chain_stream(event, agent_message, send_message, 0.0)
-
- assert updated_message.text == ""
- assert updated_message.properties.state == "partial"
- assert isinstance(start_time, float)
diff --git a/src/backend/tests/unit/components/agents/test_tool_calling_agent.py b/src/backend/tests/unit/components/agents/test_tool_calling_agent.py
deleted file mode 100644
index e20119db0d18..000000000000
--- a/src/backend/tests/unit/components/agents/test_tool_calling_agent.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import os
-
-import pytest
-
-from lfx.components.langchain_utilities import ToolCallingAgentComponent
-from lfx.components.openai.openai_chat_model import OpenAIModelComponent
-from lfx.components.tools.calculator import CalculatorToolComponent
-
-
-@pytest.mark.api_key_required
-@pytest.mark.usefixtures("client")
-async def test_tool_calling_agent_component():
- tools = [CalculatorToolComponent().build_tool()] # Use the Calculator component as a tool
- input_value = "What is 2 + 2?"
- chat_history = []
- api_key = os.environ["OPENAI_API_KEY"]
- temperature = 0.1
-
- # Default OpenAI Model Component
- llm_component = OpenAIModelComponent().set(
- api_key=api_key,
- temperature=temperature,
- )
- llm = llm_component.build_model()
-
- agent = ToolCallingAgentComponent(_session_id="test")
- agent.set(llm=llm, tools=[tools], chat_history=chat_history, input_value=input_value)
-
- # Chat output
- response = await agent.message_response()
- assert "4" in response.data.get("text")
diff --git a/src/backend/tests/unit/components/bundles/google/__init__.py b/src/backend/tests/unit/components/bundles/google/__init__.py
deleted file mode 100644
index 83dedf89c3b0..000000000000
--- a/src/backend/tests/unit/components/bundles/google/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Google components test package."""
diff --git a/src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py b/src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py
deleted file mode 100644
index edeb1e9a3ee1..000000000000
--- a/src/backend/tests/unit/components/bundles/google/test_google_bq_sql_executor_component.py
+++ /dev/null
@@ -1,571 +0,0 @@
-"""Tests for BigQueryExecutorComponent."""
-
-from __future__ import annotations
-
-import json
-from unittest.mock import MagicMock, mock_open, patch
-
-import pytest
-from google.auth.exceptions import RefreshError
-from google.oauth2.service_account import Credentials
-from pandas import DataFrame
-
-from lfx.components.google.google_bq_sql_executor import BigQueryExecutorComponent
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestBigQueryExecutorComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return BigQueryExecutorComponent
-
- @pytest.fixture
- def mock_credentials_json(self):
- """Return a valid service account JSON string."""
- return json.dumps(
- {
- "type": "service_account",
- "project_id": "test-project",
- "private_key_id": "fake-key-id",
- "private_key": "-----BEGIN PRIVATE KEY-----\nfake-key\n-----END PRIVATE KEY-----\n",
- "client_email": "test@project.iam.gserviceaccount.com",
- "client_id": "123456789",
- "auth_uri": "https://accounts.google.com/o/oauth2/auth",
- "token_uri": "https://oauth2.googleapis.com/token",
- "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
- "client_x509_cert_url": (
- "https://www.googleapis.com/robot/v1/metadata/x509/test@project.iam.gserviceaccount.com"
- ),
- }
- )
-
- @pytest.fixture
- def service_account_file(self, tmp_path, mock_credentials_json):
- """Write service account JSON to a temp file and return its path."""
- f = tmp_path / "sa.json"
- f.write_text(mock_credentials_json)
- return str(f)
-
- @pytest.fixture
- def default_kwargs(self, service_account_file):
- """Return default kwargs for component instantiation."""
- return {
- "service_account_json_file": service_account_file,
- "query": "SELECT 1",
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """No version-specific files for this component."""
- return []
-
- @patch.object(Credentials, "from_service_account_file")
- @patch("lfx.components.google.google_bq_sql_executor.bigquery.Client")
- def test_execute_sql_success(self, mock_client_cls, mock_from_file, component_class, default_kwargs):
- """Test successful SQL execution and component side-effects."""
- # Arrange mocks
- mock_creds = MagicMock(spec=Credentials)
- mock_from_file.return_value = mock_creds
-
- # Create a mock row that can be converted to a dict
- mock_row = MagicMock()
- mock_row.items.return_value = [("column1", "value1")]
- mock_row.__iter__.return_value = iter([("column1", "value1")])
- mock_row.keys.return_value = ["column1"]
- mock_row.to_numpy.return_value = ["value1"] # Changed from values to to_numpy
- mock_row.__getitem__.return_value = "value1"
-
- # Create mock result with the mock row
- mock_result = MagicMock()
- mock_result.__iter__.return_value = iter([mock_row])
-
- # Create mock job with the mock result
- mock_job = MagicMock()
- mock_job.result.return_value = mock_result
-
- # Create mock client with the mock job
- mock_client = MagicMock()
- mock_client.query.return_value = mock_job
- mock_client_cls.return_value = mock_client
-
- # Instantiate component with defaults
- component = component_class(**default_kwargs)
-
- # Execute
- result = component.execute_sql()
-
- # Verify the result
- assert isinstance(result, DataFrame)
- assert len(result) == 1 # Check number of rows
- assert "column1" in result.columns # Check column exists
- assert result.iloc[0]["column1"] == "value1" # Check value
-
- # Verify the mocks were called correctly
- mock_from_file.assert_called_once_with(default_kwargs["service_account_json_file"])
- mock_client_cls.assert_called_once_with(credentials=mock_creds, project="test-project")
- mock_client.query.assert_called_once_with(default_kwargs["query"])
-
- @pytest.mark.parametrize("q", ["", " \n\t "])
- @patch.object(Credentials, "from_service_account_file")
- @patch("lfx.components.google.google_bq_sql_executor.bigquery.Client")
- def test_empty_query_raises(self, mock_client_cls, mock_from_file, component_class, service_account_file, q):
- """Empty or whitespace-only queries should raise a ValueError."""
- # Create a proper mock credentials object
- mock_creds = MagicMock(spec=Credentials)
- mock_from_file.return_value = mock_creds
-
- # Mock the BigQuery client
- mock_client = MagicMock()
- mock_client_cls.return_value = mock_client
-
- # Create component with empty/whitespace query
- component = component_class(
- service_account_json_file=service_account_file,
- query=q,
- )
-
- # Verify that execute_sql raises ValueError for empty/whitespace queries
- expected_error = "No valid SQL query found in input text."
- with pytest.raises(ValueError, match=expected_error):
- component.execute_sql()
-
- # Verify that the BigQuery client was not called
- mock_client.query.assert_not_called()
-
- def test_missing_service_account_file(self, component_class):
- """Non-existent service account file should raise a ValueError."""
- component = component_class(
- service_account_json_file="/no/such/file.json",
- query="SELECT 1",
- )
- expected_error = "Service account file not found"
- with pytest.raises(ValueError, match=expected_error):
- component.execute_sql()
-
- def test_invalid_service_account_json(self, component_class):
- """Invalid JSON in service account file should raise a ValueError."""
- with patch("pathlib.Path.open", mock_open(read_data="invalid json")):
- component = component_class(
- service_account_json_file="ignored.json",
- query="SELECT 1",
- )
- expected_error = "Invalid JSON string for service account credentials"
- with pytest.raises(ValueError, match=expected_error):
- component.execute_sql()
-
- @patch.object(Credentials, "from_service_account_file")
- @patch("lfx.components.google.google_bq_sql_executor.bigquery.Client")
- def test_execute_sql_invalid_query(self, mock_client_cls, mock_from_file, component_class, default_kwargs):
- """SQL execution errors should be wrapped in ValueError."""
- mock_from_file.return_value = MagicMock()
- fake_client = MagicMock()
- mock_client_cls.return_value = fake_client
- fake_client.query.side_effect = Exception("Invalid query syntax")
-
- component = component_class(**default_kwargs)
- with pytest.raises(ValueError, match="Error executing BigQuery SQL query: Invalid query syntax"):
- component.execute_sql()
-
- @patch.object(Credentials, "from_service_account_file")
- @patch("lfx.components.google.google_bq_sql_executor.bigquery.Client")
- def test_refresh_error_handling(self, mock_client_cls, mock_from_file, component_class, default_kwargs):
- """RefreshError should produce an authentication ValueError."""
- mock_from_file.return_value = MagicMock()
- fake_client = MagicMock()
- mock_client_cls.return_value = fake_client
- fake_client.query.side_effect = RefreshError("Token expired")
-
- component = component_class(**default_kwargs)
- with pytest.raises(ValueError, match="Authentication error: Unable to refresh authentication token."):
- component.execute_sql()
-
- @patch.object(Credentials, "from_service_account_file")
- @patch("lfx.components.google.google_bq_sql_executor.bigquery.Client")
- def test_complex_query_result(self, mock_client_cls, mock_from_file, component_class, default_kwargs):
- """Complex row structures should be correctly serialized to DataFrame."""
- # Arrange mocks
- mock_creds = MagicMock(spec=Credentials)
- mock_from_file.return_value = mock_creds
-
- # Create mock rows with complex data
- mock_row1 = MagicMock()
- mock_row1.items.return_value = [("id", 1), ("name", "Test 1"), ("value", 10.5), ("active", True)]
- mock_row1.__iter__.return_value = iter([("id", 1), ("name", "Test 1"), ("value", 10.5), ("active", True)])
- mock_row1.keys.return_value = ["id", "name", "value", "active"]
- mock_row1.to_numpy.return_value = [1, "Test 1", 10.5, True] # Changed from values to to_numpy
- mock_row1.__getitem__.side_effect = lambda key: {"id": 1, "name": "Test 1", "value": 10.5, "active": True}[key]
-
- mock_row2 = MagicMock()
- mock_row2.items.return_value = [("id", 2), ("name", "Test 2"), ("value", 20.75), ("active", False)]
- mock_row2.__iter__.return_value = iter([("id", 2), ("name", "Test 2"), ("value", 20.75), ("active", False)])
- mock_row2.keys.return_value = ["id", "name", "value", "active"]
- mock_row2.to_numpy.return_value = [2, "Test 2", 20.75, False] # Changed from values to to_numpy
- mock_row2.__getitem__.side_effect = lambda key: {"id": 2, "name": "Test 2", "value": 20.75, "active": False}[
- key
- ]
-
- # Create mock result with the mock rows
- mock_result = MagicMock()
- mock_result.__iter__.return_value = iter([mock_row1, mock_row2])
-
- # Create mock job with the mock result
- mock_job = MagicMock()
- mock_job.result.return_value = mock_result
-
- # Create mock client with the mock job
- mock_client = MagicMock()
- mock_client.query.return_value = mock_job
- mock_client_cls.return_value = mock_client
-
- # Instantiate component with defaults
- component = component_class(**default_kwargs)
-
- # Execute
- result = component.execute_sql()
-
- # Verify the result
- assert isinstance(result, DataFrame)
- assert len(result) == 2 # Check number of rows
- assert list(result.columns) == ["id", "name", "value", "active"] # Check columns
-
- # Convert DataFrame to dictionary for easier comparison
- result_dict = result.to_dict(orient="records")
-
- # Verify first row
- assert result_dict[0]["id"] == 1
- assert result_dict[0]["name"] == "Test 1"
- assert result_dict[0]["value"] == 10.5
- assert result_dict[0]["active"] is True
-
- # Verify second row
- assert result_dict[1]["id"] == 2
- assert result_dict[1]["name"] == "Test 2"
- assert result_dict[1]["value"] == 20.75
- assert result_dict[1]["active"] is False
-
- # Verify the mocks were called correctly
- mock_from_file.assert_called_once_with(default_kwargs["service_account_json_file"])
- mock_client_cls.assert_called_once_with(credentials=mock_creds, project="test-project")
- mock_client.query.assert_called_once_with(default_kwargs["query"])
-
- @patch.object(Credentials, "from_service_account_file")
- @patch("lfx.components.google.google_bq_sql_executor.bigquery.Client")
- def test_query_with_sql_code_block(self, mock_client_cls, mock_from_file, component_class, default_kwargs):
- """Test that queries with SQL code blocks are properly handled."""
- mock_from_file.return_value = MagicMock()
- fake_client = MagicMock()
- mock_client_cls.return_value = fake_client
-
- query_with_code_block = "```sql\nSELECT * FROM table\n```"
- component = component_class(**{**default_kwargs, "query": query_with_code_block, "clean_query": True})
-
- result = component.execute_sql()
-
- # Verify the query was properly cleaned (code block markers removed)
- fake_client.query.assert_called_once_with("SELECT * FROM table")
- assert isinstance(result, DataFrame)
-
- @patch.object(Credentials, "from_service_account_file")
- @patch("lfx.components.google.google_bq_sql_executor.bigquery.Client")
- def test_query_with_whitespace(self, mock_client_cls, mock_from_file, component_class, default_kwargs):
- """Test that queries with extra whitespace are properly handled."""
- # Arrange mocks
- mock_creds = MagicMock(spec=Credentials)
- mock_from_file.return_value = mock_creds
-
- # Create a mock row that can be converted to a dict
- mock_row = MagicMock()
- mock_row.items.return_value = [("column1", "value1")]
- mock_row.__iter__.return_value = iter([("column1", "value1")])
- mock_row.keys.return_value = ["column1"]
- mock_row.to_numpy.return_value = ["value1"] # Changed from values to to_numpy
- mock_row.__getitem__.return_value = "value1"
-
- # Create mock result with the mock row
- mock_result = MagicMock()
- mock_result.__iter__.return_value = iter([mock_row])
-
- # Create mock job with the mock result
- mock_job = MagicMock()
- mock_job.result.return_value = mock_result
-
- # Create mock client with the mock job
- mock_client = MagicMock()
- mock_client.query.return_value = mock_job
- mock_client_cls.return_value = mock_client
-
- query_with_whitespace = " SELECT * FROM table "
- component = component_class(**{**default_kwargs, "query": query_with_whitespace, "clean_query": True})
-
- result = component.execute_sql()
-
- # Verify the query was properly stripped
- mock_client.query.assert_called_once_with("SELECT * FROM table")
- assert isinstance(result, DataFrame)
- assert len(result) == 1 # Check number of rows
- assert "column1" in result.columns # Check column exists
- assert result.iloc[0]["column1"] == "value1" # Check value
-
- @patch.object(Credentials, "from_service_account_file")
- @patch("lfx.components.google.google_bq_sql_executor.bigquery.Client")
- def test_query_with_special_characters(self, mock_client_cls, mock_from_file, component_class, default_kwargs):
- """Test that queries with special characters are properly handled."""
- # Arrange mocks
- mock_creds = MagicMock(spec=Credentials)
- mock_from_file.return_value = mock_creds
-
- # Create a mock row that can be converted to a dict
- mock_row = MagicMock()
- mock_row.items.return_value = [("name", "test_value")]
- mock_row.__iter__.return_value = iter([("name", "test_value")])
- mock_row.keys.return_value = ["name"]
- mock_row.to_numpy.return_value = ["test_value"] # Changed from values to to_numpy
- mock_row.__getitem__.return_value = "test_value"
-
- # Create mock result with the mock row
- mock_result = MagicMock()
- mock_result.__iter__.return_value = iter([mock_row])
-
- # Create mock job with the mock result
- mock_job = MagicMock()
- mock_job.result.return_value = mock_result
-
- # Create mock client with the mock job
- mock_client = MagicMock()
- mock_client.query.return_value = mock_job
- mock_client_cls.return_value = mock_client
-
- query_with_special_chars = "SELECT * FROM project.dataset.table WHERE name LIKE '%test%'"
- component = component_class(**{**default_kwargs, "query": query_with_special_chars})
-
- result = component.execute_sql()
-
- # Verify the query with special characters was passed correctly
- mock_client.query.assert_called_once_with(query_with_special_chars)
- assert isinstance(result, DataFrame)
- assert len(result) == 1 # Check number of rows
- assert "name" in result.columns # Check column exists
- assert result.iloc[0]["name"] == "test_value" # Check value
-
- @patch.object(Credentials, "from_service_account_file")
- @patch("lfx.components.google.google_bq_sql_executor.bigquery.Client")
- def test_query_with_multiple_statements(self, mock_client_cls, mock_from_file, component_class, default_kwargs):
- """Test that queries with multiple statements are properly handled."""
- # Arrange mocks
- mock_creds = MagicMock(spec=Credentials)
- mock_from_file.return_value = mock_creds
-
- # Create a mock row that can be converted to a dict
- mock_row = MagicMock()
- mock_row.items.return_value = [("id", 1)]
- mock_row.__iter__.return_value = iter([("id", 1)])
- mock_row.keys.return_value = ["id"]
- mock_row.to_numpy.return_value = [1] # Changed from values to to_numpy
- mock_row.__getitem__.return_value = 1
-
- # Create mock result with the mock row
- mock_result = MagicMock()
- mock_result.__iter__.return_value = iter([mock_row])
-
- # Create mock job with the mock result
- mock_job = MagicMock()
- mock_job.result.return_value = mock_result
-
- # Create mock client with the mock job
- mock_client = MagicMock()
- mock_client.query.return_value = mock_job
- mock_client_cls.return_value = mock_client
-
- multi_statement_query = (
- "CREATE TABLE IF NOT EXISTS test_table (id INT64);\n"
- "INSERT INTO test_table VALUES (1);\n"
- "SELECT * FROM test_table;"
- )
- component = component_class(**{**default_kwargs, "query": multi_statement_query})
-
- result = component.execute_sql()
-
- # Verify the multi-statement query was passed correctly
- mock_client.query.assert_called_once_with(multi_statement_query)
- assert isinstance(result, DataFrame)
- assert len(result) == 1 # Check number of rows
- assert "id" in result.columns # Check column exists
- assert result.iloc[0]["id"] == 1 # Check value
-
- @patch.object(Credentials, "from_service_account_file")
- @patch("lfx.components.google.google_bq_sql_executor.bigquery.Client")
- def test_query_with_parameters(self, mock_client_cls, mock_from_file, component_class, default_kwargs):
- """Test that queries with parameters are properly handled."""
- # Arrange mocks
- mock_creds = MagicMock(spec=Credentials)
- mock_from_file.return_value = mock_creds
-
- # Create a mock row that can be converted to a dict
- mock_row = MagicMock()
- mock_row.items.return_value = [("id", 1), ("name", "test_name")]
- mock_row.__iter__.return_value = iter([("id", 1), ("name", "test_name")])
- mock_row.keys.return_value = ["id", "name"]
- mock_row.to_numpy.return_value = [1, "test_name"] # Changed from values to to_numpy
- mock_row.__getitem__.side_effect = lambda key: {"id": 1, "name": "test_name"}[key]
-
- # Create mock result with the mock row
- mock_result = MagicMock()
- mock_result.__iter__.return_value = iter([mock_row])
-
- # Create mock job with the mock result
- mock_job = MagicMock()
- mock_job.result.return_value = mock_result
-
- # Create mock client with the mock job
- mock_client = MagicMock()
- mock_client.query.return_value = mock_job
- mock_client_cls.return_value = mock_client
-
- query_with_params = "SELECT * FROM table WHERE id = @id AND name = @name"
- component = component_class(**{**default_kwargs, "query": query_with_params})
-
- result = component.execute_sql()
-
- # Verify the parameterized query was passed correctly
- mock_client.query.assert_called_once_with(query_with_params)
- assert isinstance(result, DataFrame)
- assert len(result) == 1 # Check number of rows
- assert list(result.columns) == ["id", "name"] # Check columns
- assert result.iloc[0]["id"] == 1 # Check id value
- assert result.iloc[0]["name"] == "test_name" # Check name value
-
- def test_missing_project_id_in_credentials(self, component_class, tmp_path):
- """Test that missing project_id in credentials raises an error."""
- # Create a service account JSON without project_id
- invalid_credentials = {
- "type": "service_account",
- "private_key_id": "fake-key-id",
- "private_key": "-----BEGIN PRIVATE KEY-----\nfake-key\n-----END PRIVATE KEY-----\n",
- "client_email": "test@project.iam.gserviceaccount.com",
- "client_id": "123456789",
- "auth_uri": "https://accounts.google.com/o/oauth2/auth",
- "token_uri": "https://oauth2.googleapis.com/token",
- "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
- "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test@project.iam.gserviceaccount.com",
- }
-
- # Write invalid credentials to a temp file
- f = tmp_path / "invalid_sa.json"
- f.write_text(json.dumps(invalid_credentials))
-
- component = component_class(
- service_account_json_file=str(f),
- query="SELECT 1",
- )
-
- with pytest.raises(ValueError, match="No project_id found in service account credentials file"):
- component.execute_sql()
-
- @patch.object(Credentials, "from_service_account_file")
- @patch("lfx.components.google.google_bq_sql_executor.bigquery.Client")
- def test_query_with_quotes(self, mock_client_cls, mock_from_file, component_class, default_kwargs):
- """Test that queries wrapped in quotes are properly handled."""
- # Arrange mocks
- mock_creds = MagicMock(spec=Credentials)
- mock_from_file.return_value = mock_creds
-
- # Create a mock row that can be converted to a dict
- mock_row = MagicMock()
- mock_row.items.return_value = [("column1", "value1")]
- mock_row.__iter__.return_value = iter([("column1", "value1")])
- mock_row.keys.return_value = ["column1"]
- mock_row.to_numpy.return_value = ["value1"] # Changed from values to to_numpy
- mock_row.__getitem__.return_value = "value1"
-
- # Create mock result with the mock row
- mock_result = MagicMock()
- mock_result.__iter__.return_value = iter([mock_row])
-
- # Create mock job with the mock result
- mock_job = MagicMock()
- mock_job.result.return_value = mock_result
-
- # Create mock client with the mock job
- mock_client = MagicMock()
- mock_client.query.return_value = mock_job
- mock_client_cls.return_value = mock_client
-
- # Test with double quotes
- query_with_double_quotes = '"SELECT * FROM table"'
- component = component_class(**{**default_kwargs, "query": query_with_double_quotes, "clean_query": True})
- result = component.execute_sql()
- mock_client.query.assert_called_once_with("SELECT * FROM table")
- assert isinstance(result, DataFrame)
-
- # Reset mocks for next test
- mock_client.reset_mock()
-
- # Test with single quotes
- query_with_single_quotes = "'SELECT * FROM table'"
- component = component_class(**{**default_kwargs, "query": query_with_single_quotes, "clean_query": True})
- result = component.execute_sql()
- mock_client.query.assert_called_once_with("SELECT * FROM table")
- assert isinstance(result, DataFrame)
-
- # Reset mocks for next test
- mock_client.reset_mock()
-
- # Test with SQL code block
- query_with_code_block = "```sql\nSELECT * FROM table\n```"
- component = component_class(**{**default_kwargs, "query": query_with_code_block, "clean_query": True})
- result = component.execute_sql()
- mock_client.query.assert_called_once_with("SELECT * FROM table")
- assert isinstance(result, DataFrame)
-
- # Reset mocks for next test
- mock_client.reset_mock()
-
- # Test with SQL code block and quotes
- query_with_code_block_and_quotes = '```sql\n"SELECT * FROM table"\n```'
- component = component_class(
- **{**default_kwargs, "query": query_with_code_block_and_quotes, "clean_query": True}
- )
- result = component.execute_sql()
- mock_client.query.assert_called_once_with("SELECT * FROM table")
- assert isinstance(result, DataFrame)
-
- # Reset mocks for next test
- mock_client.reset_mock()
-
- # Test with just backticks
- query_with_backticks = "`SELECT * FROM table`"
- component = component_class(**{**default_kwargs, "query": query_with_backticks, "clean_query": True})
- result = component.execute_sql()
- mock_client.query.assert_called_once_with("SELECT * FROM table")
- assert isinstance(result, DataFrame)
-
- # Reset mocks for next test
- mock_client.reset_mock()
-
- # Test with mixed markers
- query_with_mixed = '```sql\n`"SELECT * FROM table"`\n```'
- component = component_class(**{**default_kwargs, "query": query_with_mixed, "clean_query": True})
- result = component.execute_sql()
- mock_client.query.assert_called_once_with("SELECT * FROM table")
- assert isinstance(result, DataFrame)
-
- # Reset mocks for next test
- mock_client.reset_mock()
-
- # Test with backticks in the middle of the query
- query_with_middle_backticks = "SELECT * FROM project.dataset.table"
- component = component_class(**{**default_kwargs, "query": query_with_middle_backticks, "clean_query": True})
- result = component.execute_sql()
- mock_client.query.assert_called_once_with("SELECT * FROM project.dataset.table")
- assert isinstance(result, DataFrame)
-
- # Reset mocks for next test
- mock_client.reset_mock()
-
- # Test with multiple backticks in the query
- query_with_multiple_backticks = "SELECT * FROM project.dataset.table WHERE column = 'value'"
- component = component_class(**{**default_kwargs, "query": query_with_multiple_backticks, "clean_query": True})
- result = component.execute_sql()
- mock_client.query.assert_called_once_with("SELECT * FROM project.dataset.table WHERE column = 'value'")
- assert isinstance(result, DataFrame)
diff --git a/src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py b/src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py
deleted file mode 100644
index 91b29d781fed..000000000000
--- a/src/backend/tests/unit/components/bundles/langwatch/test_langwatch_component.py
+++ /dev/null
@@ -1,397 +0,0 @@
-import json
-import os
-from unittest.mock import Mock, patch
-
-import httpx
-import pytest
-import respx
-from httpx import Response
-
-from lfx.base.langwatch.utils import get_cached_evaluators
-from lfx.components.langwatch.langwatch import LangWatchComponent
-from lfx.schema.data import Data
-from lfx.schema.dotdict import dotdict
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestLangWatchComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return LangWatchComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "evaluator_name": "test_evaluator",
- "api_key": "test_api_key",
- "input": "test input",
- "output": "test output",
- "expected_output": "expected output",
- "contexts": "context1, context2",
- "timeout": 30,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- @pytest.fixture
- def mock_evaluators(self):
- """Mock evaluators data."""
- return {
- "test_evaluator": {
- "name": "test_evaluator",
- "requiredFields": ["input", "output"],
- "optionalFields": ["contexts"],
- "settings": {
- "temperature": {
- "description": "Temperature setting",
- "default": 0.7,
- }
- },
- "settings_json_schema": {
- "properties": {
- "temperature": {
- "type": "number",
- "default": 0.7,
- }
- }
- },
- },
- "boolean_evaluator": {
- "name": "boolean_evaluator",
- "requiredFields": ["input"],
- "optionalFields": [],
- "settings": {
- "strict_mode": {
- "description": "Strict mode setting",
- "default": True,
- }
- },
- "settings_json_schema": {
- "properties": {
- "strict_mode": {
- "type": "boolean",
- "default": True,
- }
- }
- },
- },
- }
-
- @pytest.fixture
- async def component(self, component_class, default_kwargs, mock_evaluators):
- """Return a component instance."""
- comp = component_class(**default_kwargs)
- comp.evaluators = mock_evaluators
- return comp
-
- @pytest.fixture(autouse=True)
- def clear_cache(self):
- """Clear the LRU cache before each test."""
- get_cached_evaluators.cache_clear()
-
- @patch("lfx.components.langwatch.langwatch.httpx.get")
- async def test_set_evaluators_success(self, mock_get, component, mock_evaluators):
- """Test successful setting of evaluators."""
- mock_response = Mock()
- mock_response.json.return_value = {"evaluators": mock_evaluators}
- mock_response.raise_for_status.return_value = None
- mock_get.return_value = mock_response
-
- endpoint = "https://app.langwatch.ai"
- component.set_evaluators(endpoint)
- assert component.evaluators == mock_evaluators
-
- @patch("lfx.components.langwatch.langwatch.httpx.get")
- async def test_set_evaluators_empty_response(self, mock_get, component):
- """Test setting evaluators with empty response."""
- mock_response = Mock()
- mock_response.json.return_value = {"evaluators": {}}
- mock_response.raise_for_status.return_value = None
- mock_get.return_value = mock_response
-
- endpoint = "https://app.langwatch.ai"
- with pytest.raises(ValueError, match="No evaluators found"):
- component.set_evaluators(endpoint)
-
- def test_get_dynamic_inputs(self, component, mock_evaluators):
- """Test dynamic input generation."""
- evaluator = mock_evaluators["test_evaluator"]
- dynamic_inputs = component.get_dynamic_inputs(evaluator)
-
- # Should create inputs for contexts (from optionalFields)
- assert "contexts" in dynamic_inputs
- # Should create inputs for temperature (from settings)
- assert "temperature" in dynamic_inputs
-
- def test_get_dynamic_inputs_with_boolean_setting(self, component, mock_evaluators):
- """Test dynamic input generation with boolean settings."""
- evaluator = mock_evaluators["boolean_evaluator"]
- dynamic_inputs = component.get_dynamic_inputs(evaluator)
-
- # Should create boolean input for strict_mode
- assert "strict_mode" in dynamic_inputs
-
- def test_get_dynamic_inputs_error_handling(self, component):
- """Test error handling in dynamic input generation."""
- # Test with invalid evaluator data
- invalid_evaluator = {"invalid": "data"}
- result = component.get_dynamic_inputs(invalid_evaluator)
- assert result == {}
-
- @patch.dict(os.environ, {"LANGWATCH_ENDPOINT": "https://test.langwatch.ai"})
- def test_update_build_config_basic(self, component, mock_evaluators):
- """Test basic build config update."""
- build_config = dotdict(
- {
- "evaluator_name": {"options": [], "value": None},
- "api_key": {"value": "test_key"},
- "code": {"value": ""},
- "_type": {"value": ""},
- "input": {"value": ""},
- "output": {"value": ""},
- "timeout": {"value": 30},
- }
- )
-
- # Mock the get_evaluators method (which doesn't exist, so create it)
- def mock_get_evaluators(endpoint): # noqa: ARG001
- return mock_evaluators
-
- with patch.object(component, "get_evaluators", side_effect=mock_get_evaluators, create=True):
- result = component.update_build_config(build_config, None, None)
-
- # Should populate evaluator options
- assert "test_evaluator" in result["evaluator_name"]["options"]
- assert "boolean_evaluator" in result["evaluator_name"]["options"]
-
- @patch.dict(os.environ, {"LANGWATCH_ENDPOINT": "https://test.langwatch.ai"})
- def test_update_build_config_with_evaluator_selection(self, component, mock_evaluators):
- """Test build config update with evaluator selection."""
- build_config = dotdict(
- {
- "evaluator_name": {"options": [], "value": None},
- "api_key": {"value": "test_key"},
- "code": {"value": ""},
- "_type": {"value": ""},
- "input": {"value": ""},
- "output": {"value": ""},
- "timeout": {"value": 30},
- }
- )
-
- # Mock the get_evaluators method (which doesn't exist, so create it)
- def mock_get_evaluators(endpoint): # noqa: ARG001
- return mock_evaluators
-
- with patch.object(component, "get_evaluators", side_effect=mock_get_evaluators, create=True):
- # Initialize current_evaluator attribute
- component.current_evaluator = None
- result = component.update_build_config(build_config, "test_evaluator", "evaluator_name")
-
- # Should set the selected evaluator
- assert result["evaluator_name"]["value"] == "test_evaluator"
-
- @patch("lfx.components.langwatch.langwatch.httpx.get")
- @respx.mock
- async def test_evaluate_success(self, mock_get, component, mock_evaluators):
- """Test successful evaluation."""
- # Mock the evaluators HTTP request
- mock_response = Mock()
- mock_response.json.return_value = {"evaluators": mock_evaluators}
- mock_response.raise_for_status.return_value = None
- mock_get.return_value = mock_response
-
- # Mock the evaluation endpoint
- eval_url = "https://app.langwatch.ai/api/evaluations/test_evaluator/evaluate"
- expected_response = {"score": 0.95, "reasoning": "Good evaluation"}
- respx.post(eval_url).mock(return_value=Response(200, json=expected_response))
-
- # Set up component
- component.evaluator_name = "test_evaluator"
- component.api_key = "test_api_key"
- component.input = "test input"
- component.output = "test output"
- component.contexts = "context1, context2"
-
- result = await component.evaluate()
-
- assert isinstance(result, Data)
- assert result.data == expected_response
-
- @respx.mock
- async def test_evaluate_no_api_key(self, component):
- """Test evaluation with missing API key."""
- component.api_key = None
-
- result = await component.evaluate()
-
- assert isinstance(result, Data)
- assert result.data["error"] == "API key is required"
-
- async def test_evaluate_no_evaluators(self, component):
- """Test evaluation when no evaluators are available."""
- component.api_key = "test_api_key"
- component.evaluator_name = None
-
- # Mock set_evaluators to avoid external HTTP calls
- with patch.object(component, "set_evaluators"):
- component.evaluators = {} # Set empty evaluators directly
- component.current_evaluator = None # Initialize the attribute
-
- result = await component.evaluate()
-
- assert isinstance(result, Data)
- assert "No evaluator selected" in result.data["error"]
-
- @patch("lfx.components.langwatch.langwatch.httpx.get")
- @respx.mock
- async def test_evaluate_evaluator_not_found(self, mock_get, component, mock_evaluators):
- """Test evaluation with non-existent evaluator."""
- # Mock evaluators HTTP request
- mock_response = Mock()
- mock_response.json.return_value = {"evaluators": mock_evaluators}
- mock_response.raise_for_status.return_value = None
- mock_get.return_value = mock_response
-
- component.api_key = "test_api_key"
- component.evaluator_name = "non_existent_evaluator"
-
- result = await component.evaluate()
-
- assert isinstance(result, Data)
- assert "Selected evaluator 'non_existent_evaluator' not found" in result.data["error"]
-
- @patch("lfx.components.langwatch.langwatch.httpx.get")
- @respx.mock
- async def test_evaluate_http_error(self, mock_get, component, mock_evaluators):
- """Test evaluation with HTTP error."""
- # Mock evaluators HTTP request
- mock_response = Mock()
- mock_response.json.return_value = {"evaluators": mock_evaluators}
- mock_response.raise_for_status.return_value = None
- mock_get.return_value = mock_response
-
- # Mock evaluation endpoint with error
- eval_url = "https://app.langwatch.ai/api/evaluations/test_evaluator/evaluate"
- respx.post(eval_url).mock(side_effect=httpx.RequestError("Connection failed"))
-
- component.api_key = "test_api_key"
- component.evaluator_name = "test_evaluator"
- component.input = "test input"
- component.output = "test output"
-
- result = await component.evaluate()
-
- assert isinstance(result, Data)
- assert "Evaluation error" in result.data["error"]
-
- @patch("lfx.components.langwatch.langwatch.httpx.get")
- @respx.mock
- async def test_evaluate_with_tracing(self, mock_get, component, mock_evaluators):
- """Test evaluation with tracing service."""
- # Mock evaluators HTTP request
- mock_response = Mock()
- mock_response.json.return_value = {"evaluators": mock_evaluators}
- mock_response.raise_for_status.return_value = None
- mock_get.return_value = mock_response
-
- # Mock evaluation endpoint
- eval_url = "https://app.langwatch.ai/api/evaluations/test_evaluator/evaluate"
- expected_response = {"score": 0.95, "reasoning": "Good evaluation"}
-
- # Set up request capture
- request_data = None
-
- def capture_request(request):
- nonlocal request_data
- request_data = json.loads(request.content.decode())
- return Response(200, json=expected_response)
-
- respx.post(eval_url).mock(side_effect=capture_request)
-
- # Set up component with mock tracing
- component.api_key = "test_api_key"
- component.evaluator_name = "test_evaluator"
- component.input = "test input"
- component.output = "test output"
-
- # Mock tracing service
- mock_tracer = Mock()
- mock_tracer.trace_id = "test_trace_id"
- component._tracing_service = Mock()
- component._tracing_service.get_tracer.return_value = mock_tracer
-
- result = await component.evaluate()
-
- # Verify trace_id was included in the request
- assert request_data["settings"]["trace_id"] == "test_trace_id"
- assert isinstance(result, Data)
- assert result.data == expected_response
-
- @patch("lfx.components.langwatch.langwatch.httpx.get")
- @respx.mock
- async def test_evaluate_with_contexts_parsing(self, mock_get, component, mock_evaluators):
- """Test evaluation with contexts parsing."""
- # Mock evaluators HTTP request
- mock_response = Mock()
- mock_response.json.return_value = {"evaluators": mock_evaluators}
- mock_response.raise_for_status.return_value = None
- mock_get.return_value = mock_response
-
- # Mock evaluation endpoint
- eval_url = "https://app.langwatch.ai/api/evaluations/test_evaluator/evaluate"
- expected_response = {"score": 0.95, "reasoning": "Good evaluation"}
-
- # Set up request capture
- request_data = None
-
- def capture_request(request):
- nonlocal request_data
- request_data = json.loads(request.content.decode())
- return Response(200, json=expected_response)
-
- respx.post(eval_url).mock(side_effect=capture_request)
-
- # Set up component
- component.api_key = "test_api_key"
- component.evaluator_name = "test_evaluator"
- component.input = "test input"
- component.output = "test output"
- component.contexts = "context1, context2, context3"
-
- result = await component.evaluate()
-
- # Verify contexts were parsed correctly (contexts are split by comma, including whitespace)
- assert request_data["data"]["contexts"] == ["context1", " context2", " context3"]
- assert isinstance(result, Data)
- assert result.data == expected_response
-
- @patch("lfx.components.langwatch.langwatch.httpx.get")
- @respx.mock
- async def test_evaluate_timeout_handling(self, mock_get, component, mock_evaluators):
- """Test evaluation with timeout."""
- # Mock evaluators HTTP request
- mock_response = Mock()
- mock_response.json.return_value = {"evaluators": mock_evaluators}
- mock_response.raise_for_status.return_value = None
- mock_get.return_value = mock_response
-
- # Mock evaluation endpoint with timeout
- eval_url = "https://app.langwatch.ai/api/evaluations/test_evaluator/evaluate"
- respx.post(eval_url).mock(side_effect=httpx.TimeoutException("Request timed out"))
-
- component.api_key = "test_api_key"
- component.evaluator_name = "test_evaluator"
- component.input = "test input"
- component.output = "test output"
- component.timeout = 5
-
- result = await component.evaluate()
-
- assert isinstance(result, Data)
- assert "Evaluation error" in result.data["error"]
diff --git a/src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py b/src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py
deleted file mode 100644
index 91f866602c8f..000000000000
--- a/src/backend/tests/unit/components/bundles/youtube/test_youtube_transcript_component.py
+++ /dev/null
@@ -1,167 +0,0 @@
-from unittest.mock import Mock, patch
-
-import pytest
-from youtube_transcript_api import NoTranscriptFound, TranscriptsDisabled
-
-from lfx.components.youtube.youtube_transcripts import YouTubeTranscriptsComponent
-from lfx.schema import Data, DataFrame, Message
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestYouTubeTranscriptsComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return YouTubeTranscriptsComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "url": "https://www.youtube.com/watch?v=test123",
- "chunk_size_seconds": 60,
- "translation": "",
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return the file names mapping for different versions."""
- return []
-
- @pytest.fixture
- def mock_transcript_data(self):
- """Return mock transcript data for testing."""
- return [
- Mock(page_content="First part of the transcript", metadata={"start_seconds": 0}),
- Mock(page_content="Second part of the transcript", metadata={"start_seconds": 60}),
- ]
-
- def test_basic_setup(self, component_class, default_kwargs):
- """Test basic component initialization."""
- component = component_class()
- component.set_attributes(default_kwargs)
- assert component.url == default_kwargs["url"]
- assert component.chunk_size_seconds == default_kwargs["chunk_size_seconds"]
- assert component.translation == default_kwargs["translation"]
-
- @patch("lfx.components.youtube.youtube_transcripts.YoutubeLoader")
- def test_get_dataframe_output_success(self, mock_loader, component_class, default_kwargs, mock_transcript_data):
- """Test successful DataFrame output generation."""
- mock_loader.from_youtube_url.return_value.load.return_value = mock_transcript_data
-
- component = component_class()
- component.set_attributes(default_kwargs)
- result = component.get_dataframe_output()
-
- assert isinstance(result, DataFrame)
- result_df = result # More descriptive variable name
- assert len(result_df) == 2
- assert list(result_df.columns) == ["timestamp", "text"]
- assert result_df.iloc[0]["timestamp"] == "00:00"
- assert result_df.iloc[1]["timestamp"] == "01:00"
- assert result_df.iloc[0]["text"] == "First part of the transcript"
-
- @patch("lfx.components.youtube.youtube_transcripts.YoutubeLoader")
- def test_get_message_output_success(self, mock_loader, component_class, default_kwargs, mock_transcript_data):
- """Test successful Message output generation."""
- mock_loader.from_youtube_url.return_value.load.return_value = mock_transcript_data
-
- component = component_class()
- component.set_attributes(default_kwargs)
- result = component.get_message_output()
-
- assert isinstance(result, Message)
- assert result.text == "First part of the transcript"
-
- @patch("lfx.components.youtube.youtube_transcripts.YoutubeLoader")
- def test_get_data_output_success(self, mock_loader, component_class, default_kwargs, mock_transcript_data):
- """Test successful Data output generation."""
- mock_loader.from_youtube_url.return_value.load.return_value = mock_transcript_data
-
- component = component_class()
- component.set_attributes(default_kwargs)
- result = component.get_data_output()
-
- assert isinstance(result, Data)
- assert result.data["video_url"] == default_kwargs["url"]
- assert result.data["transcript"] == "First part of the transcript Second part of the transcript"
- assert "error" not in result.data
-
- @patch("lfx.components.youtube.youtube_transcripts.YoutubeLoader")
- def test_transcript_disabled_error(self, mock_loader, component_class, default_kwargs):
- """Test handling of TranscriptsDisabled error."""
- error_message = "Transcripts are disabled for this video"
-
- # Mock the load method to raise TranscriptsDisabled
- def raise_error(*_): # Use underscore to indicate unused arguments
- raise TranscriptsDisabled(error_message)
-
- mock_loader.from_youtube_url.return_value.load.side_effect = raise_error
-
- component = component_class()
- component.set_attributes(default_kwargs)
-
- # Test DataFrame output
- df_result = component.get_dataframe_output()
- assert isinstance(df_result, DataFrame)
- assert len(df_result) == 1 # One row for error message
- assert "error" in df_result.columns
- assert "Failed to get YouTube transcripts" in df_result["error"][0]
-
- # Test Message output
- msg_result = component.get_message_output()
- assert isinstance(msg_result, Message)
- assert "Failed to get YouTube transcripts" in msg_result.text
-
- # Test Data output
- data_result = component.get_data_output()
- assert isinstance(data_result, Data)
- assert "error" in data_result.data
- assert data_result.data["transcript"] == ""
-
- @patch("lfx.components.youtube.youtube_transcripts.YoutubeLoader")
- def test_no_transcript_found_error(self, mock_loader, component_class, default_kwargs):
- """Test handling of NoTranscriptFound error."""
- video_id = "test123"
- requested_langs = ["en"]
- transcript_data = {"en": {"translationLanguages": []}}
-
- # Mock the load method to raise NoTranscriptFound
- def raise_error(*_): # Use underscore to indicate unused arguments
- raise NoTranscriptFound(video_id, requested_langs, transcript_data)
-
- mock_loader.from_youtube_url.return_value.load.side_effect = raise_error
-
- component = component_class()
- component.set_attributes(default_kwargs)
-
- data_result = component.get_data_output()
- assert isinstance(data_result, Data)
- assert "error" in data_result.data
- assert data_result.data["transcript"] == ""
-
- def test_translation_setting(self, component_class):
- """Test setting different translation languages."""
- component = component_class()
- test_cases = ["en", "es", "fr", ""]
-
- for lang in test_cases:
- component.set_attributes({"url": "https://youtube.com/watch?v=test", "translation": lang})
- assert component.translation == lang
-
- @patch("lfx.components.youtube.youtube_transcripts.YoutubeLoader")
- def test_empty_transcript_handling(self, mock_loader, component_class, default_kwargs):
- """Test handling of empty transcript response."""
- mock_loader.from_youtube_url.return_value.load.return_value = []
-
- component = component_class()
- component.set_attributes(default_kwargs)
-
- # Test Data output with empty transcript
- data_result = component.get_data_output()
- assert data_result.data["error"] == "No transcripts found."
- assert data_result.data["transcript"] == ""
-
- # Test DataFrame output with empty transcript
- df_result = component.get_dataframe_output()
- assert len(df_result) == 0
diff --git a/src/backend/tests/unit/components/data/test_api_request_component.py b/src/backend/tests/unit/components/data/test_api_request_component.py
deleted file mode 100644
index a8f3e6205210..000000000000
--- a/src/backend/tests/unit/components/data/test_api_request_component.py
+++ /dev/null
@@ -1,333 +0,0 @@
-from pathlib import Path
-
-import aiofiles
-import aiofiles.os
-import httpx
-import pytest
-import respx
-from httpx import Response
-
-from lfx.components.data import APIRequestComponent
-from lfx.schema import Data
-from lfx.schema.dotdict import dotdict
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestAPIRequestComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return APIRequestComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "url_input": "https://example.com/api/test",
- "method": "GET",
- "headers": [{"key": "User-Agent", "value": "test-agent"}],
- "body": [],
- "timeout": 30,
- "follow_redirects": True,
- "save_to_file": False,
- "include_httpx_metadata": False,
- "mode": "URL",
- "curl_input": "",
- "query_params": {},
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- @pytest.fixture
- async def component(self, component_class, default_kwargs):
- """Return a component instance."""
- return component_class(**default_kwargs)
-
- async def test_parse_curl(self, component):
- # Test basic curl command parsing
- curl_cmd = (
- "curl -X GET https://example.com/api/test -H 'Content-Type: application/json' -d '{\"key\": \"value\"}'"
- )
- build_config = dotdict(
- {
- "method": {"value": ""},
- "url_input": {"value": ""},
- "headers": {"value": []},
- "body": {"value": []},
- }
- )
- new_build_config = component.parse_curl(curl_cmd, build_config.copy())
-
- assert new_build_config["method"]["value"] == "GET"
- assert new_build_config["url_input"]["value"] == "https://example.com/api/test"
- assert new_build_config["headers"]["value"] == [{"key": "Content-Type", "value": "application/json"}]
- assert new_build_config["body"]["value"] == [{"key": "key", "value": "value"}]
-
- @respx.mock
- async def test_make_request_success(self, component):
- # Test successful request with JSON response
- url = "https://example.com/api/test"
- response_data = {"key": "value"}
- respx.get(url).mock(return_value=Response(200, json=response_data))
-
- result = await component.make_request(
- client=httpx.AsyncClient(),
- method="GET",
- url=url,
- )
-
- assert isinstance(result, Data), result
- assert result.data["source"] == url
- assert "result" in result.data, result.data
- assert result.data["result"]["key"] == "value"
-
- @respx.mock
- async def test_make_request_with_metadata(self, component):
- # Test request with metadata included
- url = "https://example.com/api/test"
- headers = {"Custom-Header": "Value"}
- response_data = {"key": "value"}
- respx.get(url).mock(return_value=Response(200, json=response_data, headers=headers))
-
- result = await component.make_request(
- client=httpx.AsyncClient(),
- method="GET",
- url=url,
- include_httpx_metadata=True,
- )
-
- assert isinstance(result, Data)
- assert result.data["source"] == url
- assert result.data["status_code"] == 200
- assert result.data["response_headers"]["custom-header"] == "Value"
-
- @respx.mock
- async def test_make_request_save_to_file(self, component):
- # Test saving response to file
- url = "https://example.com/api/test"
- content = "Test content"
- respx.get(url).mock(return_value=Response(200, text=content))
-
- result = await component.make_request(
- client=httpx.AsyncClient(),
- method="GET",
- url=url,
- save_to_file=True,
- )
-
- assert isinstance(result, Data)
- assert "file_path" in result.data
- file_path = Path(result.data["file_path"])
-
- # Use async file operations
- assert await aiofiles.os.path.exists(file_path)
- async with aiofiles.open(file_path) as f:
- saved_content = await f.read()
- assert saved_content == content
-
- # Cleanup using async operation
- await aiofiles.os.remove(file_path)
-
- @respx.mock
- async def test_make_request_binary_response(self, component):
- # Test handling binary response
- url = "https://example.com/api/binary"
- binary_content = b"Binary content"
- headers = {"Content-Type": "application/octet-stream"}
- respx.get(url).mock(return_value=Response(200, content=binary_content, headers=headers))
-
- result = await component.make_request(
- client=httpx.AsyncClient(),
- method="GET",
- url=url,
- )
-
- assert isinstance(result, Data)
- assert result.data["source"] == url
- assert result.data["result"] == binary_content
-
- @respx.mock
- async def test_make_request_timeout(self, component):
- # Test request timeout
- url = "https://example.com/api/test"
- respx.get(url).mock(side_effect=httpx.TimeoutException("Request timed out"))
-
- result = await component.make_request(
- client=httpx.AsyncClient(),
- method="GET",
- url=url,
- timeout=1,
- )
-
- assert isinstance(result, Data)
- assert result.data["status_code"] == 500
- assert "Request timed out" in result.data["error"]
-
- @respx.mock
- async def test_make_request_with_redirects(self, component):
- # Test handling redirects
- url = "https://example.com/api/test"
- redirect_url = "https://example.com/api/redirect"
- final_data = {"key": "value"}
-
- respx.get(url).mock(return_value=Response(303, headers={"Location": redirect_url}))
- respx.get(redirect_url).mock(return_value=Response(200, json=final_data))
-
- result = await component.make_request(
- client=httpx.AsyncClient(),
- method="GET",
- url=url,
- include_httpx_metadata=True,
- follow_redirects=True,
- )
-
- assert isinstance(result, Data)
- assert result.data["source"] == url
- assert result.data["status_code"] == 200
- assert result.data["redirection_history"] == [{"url": redirect_url, "status_code": 303}]
-
- async def test_process_headers(self, component):
- # Test header processing
- headers_list = [
- {"key": "Content-Type", "value": "application/json"},
- {"key": "Authorization", "value": "Bearer token"},
- ]
- processed = component._process_headers(headers_list)
- assert processed == {
- "Content-Type": "application/json",
- "Authorization": "Bearer token",
- }
-
- # Test invalid headers
- assert component._process_headers(None) == {}
- assert component._process_headers([{"invalid": "format"}]) == {}
-
- async def test_process_body(self, component):
- # Test body processing
- # Test dictionary body
- dict_body = {"key": "value", "nested": {"inner": "value"}}
- assert component._process_body(dict_body) == dict_body
-
- # Test string body
- json_str = '{"key": "value"}'
- assert component._process_body(json_str) == {"key": "value"}
-
- # Test list body
- list_body = [{"key": "key1", "value": "value1"}, {"key": "key2", "value": "value2"}]
- assert component._process_body(list_body) == {"key1": "value1", "key2": "value2"}
-
- # Test invalid body
- assert component._process_body(None) == {}
- assert component._process_body([{"invalid": "format"}]) == {}
-
- async def test_add_query_params(self, component):
- # Test query parameter handling
- url = "https://example.com/api/test"
- params = {"param1": "value1", "param2": "value2"}
- result = component.add_query_params(url, params)
- assert "param1=value1" in result
- assert "param2=value2" in result
-
- # Test with existing query params
- url_with_params = "https://example.com/api/test?existing=true"
- result = component.add_query_params(url_with_params, params)
- assert "existing=true" in result
- assert "param1=value1" in result
- assert "param2=value2" in result
-
- async def test_make_api_request(self, component):
- # Test making API requests
- url = "https://example.com/api/test"
- response_data = {"key": "value"}
-
- with respx.mock:
- respx.get(url).mock(return_value=Response(200, json=response_data))
-
- result = await component.make_api_request()
-
- assert isinstance(result, Data)
- assert result.data["source"] == url
- assert result.data["result"]["key"] == "value"
-
- async def test_invalid_urls(self, component):
- # Test invalid URL handling
- component.url_input = "not_a_valid_url"
- with pytest.raises(ValueError, match="Invalid URL provided"):
- await component.make_api_request()
-
- async def test_update_build_config(self, component):
- # Test build config updates
- build_config = dotdict(
- {
- "method": {"value": "GET", "advanced": False},
- "url_input": {"value": "", "advanced": False},
- "headers": {"value": [], "advanced": True},
- "body": {"value": [], "advanced": True},
- "mode": {"value": "URL", "advanced": False},
- "curl_input": {"value": "curl -X GET https://example.com/api/test", "advanced": True},
- "timeout": {"value": 30, "advanced": True},
- "follow_redirects": {"value": True, "advanced": True},
- "save_to_file": {"value": False, "advanced": True},
- "include_httpx_metadata": {"value": False, "advanced": True},
- "query_params": {"value": {}, "advanced": True},
- }
- )
-
- # Test URL mode
- updated = component.update_build_config(build_config=build_config.copy(), field_value="URL", field_name="mode")
- assert updated["curl_input"]["advanced"] is True
- assert updated["url_input"]["advanced"] is False
-
- # Set the component's curl_input attribute to match the build_config before switching to cURL mode
- component.curl_input = build_config["curl_input"]["value"]
- # Test cURL mode
- updated = component.update_build_config(build_config=build_config.copy(), field_value="cURL", field_name="mode")
- assert updated["curl_input"]["advanced"] is False
- assert updated["url_input"]["advanced"] is True
-
- @respx.mock
- async def test_error_handling(self, component):
- # Test various error scenarios
- url = "https://example.com/api/test"
-
- # Test connection error
- respx.get(url).mock(side_effect=httpx.ConnectError("Connection failed"))
- result = await component.make_request(
- client=httpx.AsyncClient(),
- method="GET",
- url=url,
- )
- assert result.data["status_code"] == 500
- assert "Connection failed" in result.data["error"]
-
- # Test invalid method
- with pytest.raises(ValueError, match="Unsupported method"):
- await component.make_request(
- client=httpx.AsyncClient(),
- method="INVALID",
- url=url,
- )
-
- async def test_response_info(self, component):
- # Test response info handling
- url = "https://example.com/api/test"
- request = httpx.Request("GET", url)
- response = Response(200, text="test content", request=request)
- is_binary, file_path = await component._response_info(response, with_file_path=True)
-
- assert not is_binary
- assert file_path is not None
- assert file_path.suffix == ".txt"
-
- # Test binary response
- binary_response = Response(
- 200, content=b"binary content", headers={"Content-Type": "application/octet-stream"}, request=request
- )
- is_binary, file_path = await component._response_info(binary_response, with_file_path=True)
-
- assert is_binary
- assert file_path is not None
- assert file_path.suffix == ".bin"
diff --git a/src/backend/tests/unit/components/data/test_directory_component.py b/src/backend/tests/unit/components/data/test_directory_component.py
deleted file mode 100644
index 2694f95cf198..000000000000
--- a/src/backend/tests/unit/components/data/test_directory_component.py
+++ /dev/null
@@ -1,378 +0,0 @@
-import tempfile
-from pathlib import Path
-from unittest.mock import Mock, patch
-
-import pytest
-
-from lfx.components.data import DirectoryComponent
-from lfx.schema import Data, DataFrame
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestDirectoryComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return DirectoryComponent
-
- @pytest.fixture
- def default_kwargs(self, tmp_path):
- """Return the default kwargs for the component."""
- return {
- "path": str(tmp_path),
- "recursive": True,
- "use_multithreading": False,
- "silent_errors": False,
- "types": ["txt"],
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return [
- {"version": "1.0.19", "module": "data", "file_name": "Directory"},
- {"version": "1.1.0", "module": "data", "file_name": "directory"},
- {"version": "1.1.1", "module": "data", "file_name": "directory"},
- ]
-
- @patch("lfx.components.data.directory.parallel_load_data")
- @patch("lfx.components.data.directory.retrieve_file_paths")
- @patch("lfx.components.data.DirectoryComponent.resolve_path")
- def test_directory_component_build_with_multithreading(
- self, mock_resolve_path, mock_retrieve_file_paths, mock_parallel_load_data
- ):
- # Arrange
- directory_component = DirectoryComponent()
- path = Path(__file__).resolve().parent
- depth = 1
- max_concurrency = 2
- load_hidden = False
- recursive = True
- silent_errors = False
- use_multithreading = True
-
- mock_resolve_path.return_value = str(path)
- mock_retrieve_file_paths.return_value = [str(p) for p in path.iterdir() if p.suffix == ".py"]
- mock_parallel_load_data.return_value = [Mock()]
-
- # Act
- directory_component.set_attributes(
- {
- "path": str(path),
- "depth": depth,
- "max_concurrency": max_concurrency,
- "load_hidden": load_hidden,
- "recursive": recursive,
- "silent_errors": silent_errors,
- "use_multithreading": use_multithreading,
- "types": ["py"], # Add file types without dots
- }
- )
- directory_component.load_directory()
-
- # Assert
- mock_resolve_path.assert_called_once_with(str(path))
- mock_retrieve_file_paths.assert_called_once_with(
- mock_resolve_path.return_value,
- depth=depth,
- recursive=recursive,
- types=["py"],
- load_hidden=load_hidden,
- )
- mock_parallel_load_data.assert_called_once_with(
- mock_retrieve_file_paths.return_value,
- max_concurrency=max_concurrency,
- silent_errors=silent_errors,
- )
-
- def test_directory_without_mocks(self):
- directory_component = DirectoryComponent()
-
- with tempfile.TemporaryDirectory() as temp_dir:
- (Path(temp_dir) / "test.txt").write_text("test", encoding="utf-8")
- # also add a json file
- (Path(temp_dir) / "test.json").write_text('{"test": "test"}', encoding="utf-8")
-
- directory_component.set_attributes(
- {
- "path": str(temp_dir),
- "use_multithreading": False,
- "silent_errors": False,
- "types": ["txt", "json"],
- }
- )
- results = directory_component.load_directory()
- assert len(results) == 2
- values = ["test", '{"test":"test"}']
- assert all(result.text in values for result in results), [
- (len(result.text), len(val)) for result, val in zip(results, values, strict=True)
- ]
-
- # in ../docs/docs/components there are many mdx files
- # check if the directory component can load them
- # just check if the number of results is the same as the number of files
- directory_component = DirectoryComponent()
- docs_path = Path(__file__).parent.parent.parent.parent.parent.parent.parent / "docs" / "docs" / "Components"
- directory_component.set_attributes(
- {
- "path": str(docs_path),
- "use_multithreading": False,
- "silent_errors": False,
- "types": ["md", "json"],
- }
- )
- results = directory_component.load_directory()
- docs_files = list(docs_path.glob("*.md")) + list(docs_path.glob("*.json"))
- assert len(results) == len(docs_files)
-
- def test_directory_as_dataframe(self):
- """Test DirectoryComponent's as_dataframe method."""
- directory_component = DirectoryComponent()
-
- with tempfile.TemporaryDirectory() as temp_dir:
- # Create test files with different content
- files_content = {
- "file1.txt": "content1",
- "file2.json": '{"key": "content2"}',
- "file3.md": "# content3",
- }
-
- for filename, content in files_content.items():
- (Path(temp_dir) / filename).write_text(content, encoding="utf-8")
-
- directory_component.set_attributes(
- {
- "path": str(temp_dir),
- "use_multithreading": False,
- "types": ["txt", "json", "md"],
- "silent_errors": False,
- }
- )
-
- # Test as_dataframe
- data_frame = directory_component.as_dataframe()
-
- # Verify DataFrame structure
- assert isinstance(data_frame, DataFrame), "Expected DataFrame instance"
- assert len(data_frame) == 3, f"Expected DataFrame with 3 rows, got {len(data_frame)}"
-
- # Check column names
- expected_columns = ["text", "file_path"]
- actual_columns = list(data_frame.columns)
- assert set(expected_columns).issubset(set(actual_columns)), (
- f"Missing required columns. Expected at least {expected_columns}, got {actual_columns}"
- )
-
- # Verify content matches input files
- texts = data_frame["text"].tolist()
- # For JSON files, the content is parsed and re-serialized
- expected_content = {
- "file1.txt": "content1",
- "file2.json": '{"key":"content2"}', # JSON is re-serialized without spaces
- "file3.md": "# content3",
- }
- missing_content = [content for content in expected_content.values() if content not in texts]
- assert not missing_content, f"Missing expected content in DataFrame: {missing_content}"
-
- # Verify file paths are correct
- file_paths = data_frame["file_path"].tolist()
- expected_paths = [str(Path(temp_dir) / filename) for filename in files_content]
- missing_paths = [path for path in expected_paths if not any(path in fp for fp in file_paths)]
- assert not missing_paths, f"Missing expected file paths in DataFrame: {missing_paths}"
-
- def test_directory_with_depth(self):
- """Test DirectoryComponent with different depth settings."""
- directory_component = DirectoryComponent()
-
- with tempfile.TemporaryDirectory() as temp_dir:
- # Create a nested directory structure
- base_dir = Path(temp_dir)
- (base_dir / "level1").mkdir()
- (base_dir / "level1" / "level2").mkdir()
-
- # Create files at different levels
- (base_dir / "root.txt").write_text("root", encoding="utf-8")
- (base_dir / "level1" / "level1.txt").write_text("level1", encoding="utf-8")
- (base_dir / "level1" / "level2" / "level2.txt").write_text("level2", encoding="utf-8")
-
- # Test non-recursive (only root)
- directory_component.set_attributes(
- {
- "path": str(temp_dir),
- "recursive": False, # Set recursive to False to get only root files
- "use_multithreading": False,
- "silent_errors": False,
- "types": ["txt"],
- }
- )
- results_root = directory_component.load_directory()
- assert len(results_root) == 1, (
- "With recursive=False, expected 1 file (root.txt), "
- f"got {len(results_root)} files: {[d.data['file_path'] for d in results_root]}"
- )
- assert results_root[0].text == "root", f"Expected root file content 'root', got '{results_root[0].text}'"
-
- # Test recursive with all files
- directory_component.set_attributes(
- {
- "path": str(temp_dir),
- "recursive": True,
- "use_multithreading": False,
- "silent_errors": False,
- "types": ["txt"],
- }
- )
- results_all = directory_component.load_directory()
- assert len(results_all) == 3, (
- "With recursive=True, expected 3 files (all files), "
- f"got {len(results_all)} files: {[d.data['file_path'] for d in results_all]}"
- )
- texts = sorted([r.text for r in results_all])
- expected_texts = sorted(["root", "level1", "level2"])
- assert texts == expected_texts, f"Expected texts {expected_texts}, got {texts}"
-
- @pytest.mark.parametrize(
- ("file_types", "expected_count"),
- [
- (["txt"], 1),
- (["json"], 1),
- (["txt", "json"], 2),
- ],
- )
- def test_directory_with_types(self, file_types, expected_count):
- """Test DirectoryComponent with different file type filters (parameterized)."""
- directory_component = DirectoryComponent()
-
- with tempfile.TemporaryDirectory() as temp_dir:
- # Create files with different extensions
- (Path(temp_dir) / "test.txt").write_text("text content", encoding="utf-8")
- (Path(temp_dir) / "test.json").write_text('{"key": "value"}', encoding="utf-8")
- (Path(temp_dir) / "test.exe").write_text("test", encoding="utf-8")
-
- directory_component.set_attributes(
- {
- "path": str(temp_dir),
- "types": file_types,
- "use_multithreading": False,
- "silent_errors": False,
- }
- )
- results = directory_component.load_directory()
-
- # Verify number of loaded files
- assert len(results) == expected_count, (
- f"Expected {expected_count} results for file types {file_types}, got {len(results)}"
- )
- # Optionally, check the file extension in each result
- for r in results:
- # e.g., verify that the extension is indeed in file_types
- file_ext = Path(r.data["file_path"]).suffix.lstrip(".")
- assert file_ext in file_types, f"Unexpected file extension: {file_ext}"
-
- def test_directory_invalid_type(self):
- """Test DirectoryComponent raises error with invalid file type."""
- directory_component = DirectoryComponent()
-
- with tempfile.TemporaryDirectory() as temp_dir:
- # Create test file
- (Path(temp_dir) / "test.exe").write_text("test", encoding="utf-8")
-
- directory_component.set_attributes(
- {
- "path": str(temp_dir),
- "types": ["exe"],
- "use_multithreading": False,
- "silent_errors": False,
- }
- )
-
- with pytest.raises(
- ValueError, match="Invalid file types specified: \\['exe'\\]. Valid types are:"
- ) as exc_info:
- directory_component.load_directory()
-
- assert "Invalid file types specified: ['exe']" in str(exc_info.value)
- assert "Valid types are:" in str(exc_info.value)
-
- def test_directory_with_hidden_files(self):
- """Test DirectoryComponent with hidden files."""
- directory_component = DirectoryComponent()
-
- with tempfile.TemporaryDirectory() as temp_dir:
- # Create regular and hidden files
- (Path(temp_dir) / "regular.txt").write_text("regular", encoding="utf-8")
- (Path(temp_dir) / ".hidden.txt").write_text("hidden", encoding="utf-8")
-
- # Test without loading hidden files
- directory_component.set_attributes(
- {
- "path": str(temp_dir),
- "load_hidden": False,
- "use_multithreading": False,
- "silent_errors": False,
- "types": ["txt"],
- }
- )
- results = directory_component.load_directory()
- assert len(results) == 1
- assert results[0].text == "regular"
-
- # Test with loading hidden files
- directory_component.set_attributes({"load_hidden": True})
- results = directory_component.load_directory()
- assert len(results) == 2
- texts = [r.text for r in results]
- assert "regular" in texts
- assert "hidden" in texts
-
- @patch("lfx.components.data.directory.parallel_load_data")
- def test_directory_with_multithreading(self, mock_parallel_load):
- """Test DirectoryComponent with multithreading enabled."""
- directory_component = DirectoryComponent()
-
- with tempfile.TemporaryDirectory() as temp_dir:
- # Create test files
- (Path(temp_dir) / "test1.txt").write_text("content1", encoding="utf-8")
- (Path(temp_dir) / "test2.txt").write_text("content2", encoding="utf-8")
-
- # Mock parallel_load_data to return some test data
- mock_data = [
- Data(text="content1", data={"file_path": str(Path(temp_dir) / "test1.txt")}),
- Data(text="content2", data={"file_path": str(Path(temp_dir) / "test2.txt")}),
- ]
- mock_parallel_load.return_value = mock_data
-
- # Test with multithreading enabled
- directory_component.set_attributes(
- {
- "path": str(temp_dir),
- "use_multithreading": True,
- "max_concurrency": 2,
- "types": ["txt"], # Specify file types to ensure files are found
- "recursive": True, # Enable recursive search
- "silent_errors": False,
- }
- )
- results = directory_component.load_directory()
-
- # Verify parallel_load_data was called with correct parameters
- mock_parallel_load.assert_called_once()
- call_args = mock_parallel_load.call_args[1]
- assert call_args["max_concurrency"] == 2, (
- f"Expected max_concurrency=2, got {call_args.get('max_concurrency')}"
- )
- assert call_args["silent_errors"] is False, (
- f"Expected silent_errors=False, got {call_args.get('silent_errors')}"
- )
-
- # Verify results
- assert len(results) == 2, (
- f"Expected 2 results, got {len(results)}: {[r.data['file_path'] for r in results]}"
- )
- assert all(isinstance(r, Data) for r in results), (
- f"All results should be Data objects, got types: {[type(r) for r in results]}"
- )
-
- actual_texts = [r.text for r in results]
- expected_texts = ["content1", "content2"]
- assert actual_texts == expected_texts, f"Expected texts {expected_texts}, got {actual_texts}"
diff --git a/src/backend/tests/unit/components/data/test_file_component.py b/src/backend/tests/unit/components/data/test_file_component.py
deleted file mode 100644
index af758aaad812..000000000000
--- a/src/backend/tests/unit/components/data/test_file_component.py
+++ /dev/null
@@ -1,61 +0,0 @@
-from langflow.io import Output
-
-from lfx.components.data import FileComponent
-
-
-class TestFileComponentDynamicOutputs:
- def test_update_outputs_single_csv_file(self):
- """Test single CSV file shows structured + raw outputs."""
- component = FileComponent()
- frontend_node = {"outputs": [], "template": {"path": {"file_path": ["test.csv"]}}}
-
- result = component.update_outputs(frontend_node, "path", ["test.csv"])
-
- assert len(result["outputs"]) == 3
- output_names = [output.name for output in result["outputs"]]
- assert "dataframe" in output_names # Structured content
- assert "message" in output_names # Raw content
- assert "path" in output_names # File path
-
- def test_update_outputs_single_json_file(self):
- """Test single JSON file shows JSON + raw outputs."""
- component = FileComponent()
- frontend_node = {"outputs": [], "template": {"path": {"file_path": ["data.json"]}}}
-
- result = component.update_outputs(frontend_node, "path", ["data.json"])
-
- assert len(result["outputs"]) == 3
- output_names = [output.name for output in result["outputs"]]
- assert "json" in output_names # JSON content
- assert "message" in output_names # Raw content
- assert "path" in output_names # File path
-
- def test_update_outputs_multiple_files(self):
- """Test multiple files show only Files output."""
- component = FileComponent()
- frontend_node = {"outputs": [], "template": {"path": {"file_path": ["file1.txt", "file2.txt"]}}}
-
- result = component.update_outputs(frontend_node, "path", ["file1.txt", "file2.txt"])
-
- assert len(result["outputs"]) == 1
- assert result["outputs"][0].name == "dataframe"
- assert result["outputs"][0].display_name == "Files"
-
- def test_update_outputs_empty_path(self):
- """Test empty path results in no outputs."""
- component = FileComponent()
- frontend_node = {"outputs": [], "template": {"path": {"file_path": []}}}
-
- result = component.update_outputs(frontend_node, "path", [])
-
- assert len(result["outputs"]) == 0
-
- def test_update_outputs_non_path_field(self):
- """Test non-path fields don't affect outputs."""
- component = FileComponent()
- original_outputs = [Output(display_name="Test", name="test", method="test_method")]
- frontend_node = {"outputs": original_outputs, "template": {"path": {"file_path": ["value"]}}}
-
- result = component.update_outputs(frontend_node, "other_field", "value")
-
- assert result["outputs"] == original_outputs
diff --git a/src/backend/tests/unit/components/data/test_mcp_component.py b/src/backend/tests/unit/components/data/test_mcp_component.py
deleted file mode 100644
index ea6d584b3c8e..000000000000
--- a/src/backend/tests/unit/components/data/test_mcp_component.py
+++ /dev/null
@@ -1,206 +0,0 @@
-"""Unit tests for MCP component with actual MCP servers.
-
-This test suite validates the MCP component functionality using real MCP servers:
-- Everything server (stdio mode) - provides echo and other tools
-- DeepWiki server (SSE mode) - provides wiki-related tools
-"""
-
-import shutil
-from unittest.mock import AsyncMock, MagicMock, patch
-
-import pytest
-
-from lfx.base.mcp.util import MCPSessionManager, MCPSseClient, MCPStdioClient
-from lfx.components.agents.mcp_component import MCPToolsComponent
-from tests.base import ComponentTestBaseWithoutClient, VersionComponentMapping
-
-
-class TestMCPToolsComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return MCPToolsComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "mode": "Stdio",
- "command": "npx -y @modelcontextprotocol/server-everything",
- "sse_url": "https://mcp.deepwiki.com/sse",
- "tool": "echo",
- "mcp_server": {"name": "test_server", "config": {"command": "uvx mcp-server-fetch"}},
- }
-
- @pytest.fixture
- def file_names_mapping(self) -> list[VersionComponentMapping]:
- """Return the file names mapping for different versions."""
- return []
-
- @pytest.mark.asyncio
- @pytest.mark.skipif(not shutil.which("npx"), reason="Node.js not available")
- async def test_component_initialization(self, component_class, default_kwargs):
- """Test that the component initializes correctly."""
- component = component_class(**default_kwargs)
-
- # Check that the component has the expected attributes
- assert hasattr(component, "stdio_client")
- assert hasattr(component, "sse_client")
- assert isinstance(component.stdio_client, MCPStdioClient)
- assert isinstance(component.sse_client, MCPSseClient)
-
- # Check that the component has a session manager
- session_manager = component.stdio_client._get_session_manager()
- assert isinstance(session_manager, MCPSessionManager)
-
-
-class TestMCPToolsComponentIntegration:
- """Integration tests for the MCPToolsComponent."""
-
- @pytest.fixture
- def component(self):
- """Create a component for testing."""
- return MCPToolsComponent()
-
- @pytest.mark.asyncio
- @pytest.mark.skipif(not shutil.which("npx"), reason="Node.js not available")
- async def test_stdio_mode_integration(self, component):
- """Test the component in stdio mode with Everything server."""
- # Configure for stdio mode
- component.mode = "Stdio"
- component.command = "npx -y @modelcontextprotocol/server-everything"
- component.tool = "echo"
-
- try:
- # Mock the update_tool_list method to simulate server connection
- tools, server_info = await component.update_tool_list()
-
- # Should have tools
- assert len(tools) > 0
-
- # Should have server info
- assert server_info is not None
- assert isinstance(server_info, dict)
-
- except Exception as e:
- # If the server is not accessible, skip the test
- pytest.skip(f"Everything server not accessible: {e}")
-
- @pytest.mark.asyncio
- async def test_sse_mode_integration(self, component):
- """Test the component in SSE mode with DeepWiki server."""
- # Configure for SSE mode
- component.mode = "SSE"
- component.sse_url = "https://mcp.deepwiki.com/sse"
-
- try:
- # Mock the update_tool_list method to simulate server connection
- tools, server_info = await component.update_tool_list()
-
- # Should have tools
- assert len(tools) > 0
-
- # Should have server info
- assert server_info is not None
- assert isinstance(server_info, dict)
-
- except Exception as e:
- # If the server is not accessible, skip the test
- pytest.skip(f"DeepWiki server not accessible: {e}")
-
- @pytest.mark.asyncio
- async def test_session_context_setting(self, component):
- """Test that session context is properly set."""
- # Set session context
- component.stdio_client.set_session_context("test_context")
- component.sse_client.set_session_context("test_context")
-
- # Verify context was set
- assert component.stdio_client._session_context == "test_context"
- assert component.sse_client._session_context == "test_context"
-
- @pytest.mark.asyncio
- async def test_session_manager_sharing(self, component):
- """Test that session managers are shared through component cache."""
- # Get session managers
- stdio_manager = component.stdio_client._get_session_manager()
- sse_manager = component.sse_client._get_session_manager()
-
- # Both should be MCPSessionManager instances
- assert isinstance(stdio_manager, MCPSessionManager)
- assert isinstance(sse_manager, MCPSessionManager)
-
- # They should be the same instance (shared through cache)
- assert stdio_manager is sse_manager
-
-
-class TestMCPComponentErrorHandling:
- """Test error handling in MCP components."""
-
- @pytest.fixture
- def stdio_client(self):
- return MCPStdioClient()
-
- @pytest.fixture
- def mock_session_manager(self):
- """Create a mock session manager."""
- return AsyncMock(spec=MCPSessionManager)
-
- async def test_connect_to_server_with_command(self, stdio_client):
- """Test connecting to server via Stdio with command."""
- with patch.object(stdio_client, "_get_or_create_session") as mock_get_session:
- # Mock session
- mock_session = AsyncMock()
- mock_tool = MagicMock()
- mock_tool.name = "test_tool"
- list_tools_result = MagicMock()
- list_tools_result.tools = [mock_tool]
- mock_session.list_tools = AsyncMock(return_value=list_tools_result)
- mock_get_session.return_value = mock_session
-
- tools = await stdio_client.connect_to_server("uvx test-command")
-
- assert len(tools) == 1
- assert tools[0].name == "test_tool"
- assert stdio_client._connected is True
- assert stdio_client._connection_params is not None
-
- async def test_run_tool_success(self, stdio_client):
- """Test successfully running a tool."""
- # Setup connection state
- stdio_client._connected = True
- stdio_client._connection_params = MagicMock()
- stdio_client._session_context = "test_context"
-
- with patch.object(stdio_client, "_get_or_create_session") as mock_get_session:
- mock_session = AsyncMock()
- mock_result = MagicMock()
- mock_session.call_tool = AsyncMock(return_value=mock_result)
- mock_get_session.return_value = mock_session
-
- result = await stdio_client.run_tool("test_tool", {"param": "value"})
-
- assert result == mock_result
- mock_session.call_tool.assert_called_once_with("test_tool", arguments={"param": "value"})
-
- async def test_run_tool_without_connection(self, stdio_client):
- """Test running a tool without being connected."""
- stdio_client._connected = False
-
- with pytest.raises(ValueError, match="Session not initialized"):
- await stdio_client.run_tool("test_tool", {})
-
- async def test_disconnect_cleanup(self, stdio_client):
- """Test that disconnect properly cleans up resources."""
- stdio_client._session_context = "test_context"
- stdio_client._connected = True
-
- with patch.object(stdio_client, "_get_session_manager") as mock_get_manager:
- mock_manager = AsyncMock()
- mock_get_manager.return_value = mock_manager
-
- await stdio_client.disconnect()
-
- mock_manager._cleanup_session.assert_called_once_with("test_context")
- assert stdio_client.session is None
- assert stdio_client._connected is False
diff --git a/src/backend/tests/unit/components/data/test_news_search.py b/src/backend/tests/unit/components/data/test_news_search.py
deleted file mode 100644
index 0925a3c8d588..000000000000
--- a/src/backend/tests/unit/components/data/test_news_search.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from unittest.mock import Mock, patch
-
-import pytest
-import requests
-
-from lfx.components.data.news_search import NewsSearchComponent
-from lfx.schema import DataFrame
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestNewsSearchComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- return NewsSearchComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {"query": "OpenAI"}
-
- @pytest.fixture
- def file_names_mapping(self):
- return []
-
- def test_successful_news_search(self):
- # Mock Google News RSS feed content
- mock_rss_content = """
-
-
-
- -
- Test News 1
- https://example.com/1
- 2024-03-20
- Summary 1
-
- -
- Test News 2
- https://example.com/2
- 2024-03-21
- Summary 2
-
-
-
- """
- mock_response = Mock()
- mock_response.content = mock_rss_content.encode("utf-8")
- mock_response.raise_for_status = Mock()
-
- with patch("requests.get", return_value=mock_response):
- component = NewsSearchComponent(query="OpenAI")
- result = component.search_news()
- assert isinstance(result, DataFrame)
- news_results_df = result
- assert len(news_results_df) == 2
- assert list(news_results_df.columns) == ["title", "link", "published", "summary"]
- assert news_results_df.iloc[0]["title"] == "Test News 1"
- assert news_results_df.iloc[1]["title"] == "Test News 2"
-
- def test_news_search_error(self):
- with patch("requests.get", side_effect=requests.RequestException("Network error")):
- component = NewsSearchComponent(query="OpenAI")
- result = component.search_news()
- assert isinstance(result, DataFrame)
- news_results_df = result
- assert len(news_results_df) == 1
- assert news_results_df.iloc[0]["title"] == "Error"
- assert "Network error" in news_results_df.iloc[0]["summary"]
-
- def test_empty_news_results(self):
- # Mock empty RSS feed
- mock_rss_content = """
-
-
-
-
-
- """
- mock_response = Mock()
- mock_response.content = mock_rss_content.encode("utf-8")
- mock_response.raise_for_status = Mock()
-
- with patch("requests.get", return_value=mock_response):
- component = NewsSearchComponent(query="OpenAI")
- result = component.search_news()
- assert isinstance(result, DataFrame)
- news_results_df = result
- assert len(news_results_df) == 1
- assert news_results_df.iloc[0]["title"] == "No articles found"
diff --git a/src/backend/tests/unit/components/data/test_rss.py b/src/backend/tests/unit/components/data/test_rss.py
deleted file mode 100644
index e66af5ecdaec..000000000000
--- a/src/backend/tests/unit/components/data/test_rss.py
+++ /dev/null
@@ -1,129 +0,0 @@
-from unittest.mock import Mock, patch
-
-import pytest
-import requests
-
-from lfx.components.data.rss import RSSReaderComponent
-from lfx.schema import DataFrame
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestRSSReaderComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return RSSReaderComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "rss_url": "https://example.com/feed.xml",
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- def test_successful_rss_fetch(self):
- # Mock RSS feed content
- mock_rss_content = """
-
-
-
- -
- Test Article 1
- https://example.com/1
- 2024-03-20
- Test summary 1
-
- -
- Test Article 2
- https://example.com/2
- 2024-03-21
- Test summary 2
-
-
-
- """
-
- # Mock the requests.get response
- mock_response = Mock()
- mock_response.content = mock_rss_content.encode("utf-8")
- mock_response.raise_for_status = Mock()
-
- with patch("requests.get", return_value=mock_response):
- component = RSSReaderComponent(rss_url="https://example.com/feed.xml")
- result = component.read_rss()
-
- assert isinstance(result, DataFrame)
- assert len(result) == 2
- assert list(result.columns) == ["title", "link", "published", "summary"]
- assert result.iloc[0]["title"] == "Test Article 1"
- assert result.iloc[1]["title"] == "Test Article 2"
-
- def test_rss_fetch_with_missing_fields(self):
- # Mock RSS feed content with missing fields
- mock_rss_content = """
-
-
-
- -
- Test Article
-
- 2024-03-20
-
-
-
-
- """
-
- mock_response = Mock()
- mock_response.content = mock_rss_content.encode("utf-8")
- mock_response.raise_for_status = Mock()
-
- with patch("requests.get", return_value=mock_response):
- component = RSSReaderComponent(rss_url="https://example.com/feed.xml")
- result = component.read_rss()
-
- assert isinstance(result, DataFrame)
- assert len(result) == 1
- assert result.iloc[0]["title"] == "Test Article"
- assert result.iloc[0]["link"] == ""
- assert result.iloc[0]["summary"] == ""
-
- def test_rss_fetch_error(self):
- # Mock a failed request
- with patch("requests.get", side_effect=requests.RequestException("Network error")):
- component = RSSReaderComponent(rss_url="https://example.com/feed.xml")
- result = component.read_rss()
-
- assert isinstance(result, DataFrame)
- assert len(result) == 1
- assert result.iloc[0]["title"] == "Error"
- assert result.iloc[0]["link"] == ""
- assert result.iloc[0]["published"] == ""
- assert "Network error" in result.iloc[0]["summary"]
-
- def test_empty_rss_feed(self):
- # Mock empty RSS feed
- mock_rss_content = """
-
-
-
-
-
- """
-
- mock_response = Mock()
- mock_response.content = mock_rss_content.encode("utf-8")
- mock_response.raise_for_status = Mock()
-
- with patch("requests.get", return_value=mock_response):
- component = RSSReaderComponent(rss_url="https://example.com/feed.xml")
- result = component.read_rss()
-
- assert isinstance(result, DataFrame)
- assert len(result) == 0
- assert list(result.columns) == ["title", "link", "published", "summary"]
diff --git a/src/backend/tests/unit/components/data/test_s3_uploader_component.py b/src/backend/tests/unit/components/data/test_s3_uploader_component.py
deleted file mode 100644
index 53bb4fe51ba9..000000000000
--- a/src/backend/tests/unit/components/data/test_s3_uploader_component.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import os
-import tempfile
-import uuid
-from pathlib import Path
-
-import boto3
-import pytest
-
-from lfx.components.amazon.s3_bucket_uploader import S3BucketUploaderComponent
-from lfx.schema.data import Data
-from tests.base import ComponentTestBaseWithoutClient
-
-
-@pytest.mark.skipif(
- not os.environ.get("AWS_ACCESS_KEY_ID") or not os.environ.get("AWS_SECRET_ACCESS_KEY"),
- reason="Environment variable AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY is not defined.",
-)
-class TestS3UploaderComponent(ComponentTestBaseWithoutClient):
- """Unit tests for the S3BucketUploaderComponent.
-
- This test class inherits from ComponentTestBaseWithoutClient and includes several pytest fixtures and a test method
- to verify the functionality of the S3BucketUploaderComponent.
-
- Fixtures:
- component_class: Returns the component class to be tested.
- file_names_mapping: Returns an empty list since this component doesn't have version-specific files.
- default_kwargs: Returns an empty dictionary since this component doesn't have any default arguments.
- temp_files: Creates three temporary files with predefined content and yields them as Data objects.
- Cleans up the files after the test.
- s3_bucket: Creates a unique S3 bucket for testing, yields the bucket name, and deletes the bucket
- and its contents after the test.
-
- Test Methods:
- test_upload: Tests the upload functionality of the S3BucketUploaderComponent by uploading temporary files
- to the S3 bucket and verifying their content.
- """
-
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return S3BucketUploaderComponent
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
-
- @pytest.fixture
- def default_kwargs(self):
- """Return an empty dictionary since this component doesn't have any default arguments."""
- return {}
-
- @pytest.fixture
- def temp_files(self):
- """Setup: Create three temporary files."""
- temp_files = []
- contents = [
- b"Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
- b"Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
- b"Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris "
- b"nisi ut aliquip ex ea commodo consequat.",
- ]
-
- for content in contents:
- with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as temp_file:
- temp_file.write(content)
- temp_file.flush()
- temp_file.close()
- temp_files.append(temp_file.name)
-
- data = [
- Data(data={"file_path": file_path, "text": Path(file_path).read_text(encoding="utf-8")})
- for file_path in temp_files
- ]
-
- yield data
-
- # Teardown: Explicitly delete the files
- for temp_file in temp_files:
- Path(temp_file).unlink()
-
- @pytest.fixture
- def s3_bucket(self) -> str:
- """Generate a unique bucket name (AWS requires globally unique names)."""
- bucket_name = f"graphrag-test-bucket-{uuid.uuid4().hex[:8]}"
-
- # Initialize S3 client using environment variables for credentials
- s3 = boto3.client("s3")
-
- try:
- # Create an S3 bucket in your default region
- s3.create_bucket(Bucket=bucket_name)
-
- yield bucket_name
-
- finally:
- # Teardown: Delete the bucket and its contents
- try:
- # List and delete all objects in the bucket
- objects = s3.list_objects_v2(Bucket=bucket_name).get("Contents", [])
- for obj in objects:
- s3.delete_object(Bucket=bucket_name, Key=obj["Key"])
-
- # Delete the bucket
- s3.delete_bucket(Bucket=bucket_name)
- except boto3.exceptions.Boto3Error as e:
- pytest.fail(f"Error during teardown: {e}")
-
- def test_upload(self, temp_files, s3_bucket):
- """Test uploading files to an S3 bucket."""
- component = S3BucketUploaderComponent()
-
- # Set AWS credentials from environment variables
- aws_access_key_id = os.getenv("AWS_ACCESS_KEY_ID")
- aws_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY")
- component.set_attributes(
- {
- "aws_access_key_id": aws_access_key_id,
- "aws_secret_access_key": aws_secret_access_key,
- "bucket_name": s3_bucket,
- "strategy": "Store Original File",
- "data_inputs": temp_files,
- "s3_prefix": "test",
- "strip_path": True,
- }
- )
-
- component.process_files()
-
- # Check if the files were uploaded. Assumes key and secret are set via environment variables
- s3 = boto3.client("s3")
-
- for temp_file in temp_files:
- key = f"test/{Path(temp_file.data['file_path']).name}"
- response = s3.get_object(Bucket=s3_bucket, Key=key)
- with Path(temp_file.data["file_path"]).open("rb") as f:
- assert response["Body"].read() == f.read()
diff --git a/src/backend/tests/unit/components/data/test_sql_executor.py b/src/backend/tests/unit/components/data/test_sql_executor.py
deleted file mode 100644
index 4b4d03f81942..000000000000
--- a/src/backend/tests/unit/components/data/test_sql_executor.py
+++ /dev/null
@@ -1,103 +0,0 @@
-import sqlite3
-from pathlib import Path
-
-import pytest
-
-from lfx.components.data.sql_executor import SQLComponent
-from lfx.schema import DataFrame, Message
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestSQLComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def test_db(self):
- """Fixture that creates a temporary SQLite database for testing."""
- test_data_dir = Path(__file__).parent.parent.parent.parent / "data"
- db_path = test_data_dir / "test.db"
- conn = sqlite3.connect(db_path)
- cursor = conn.cursor()
- cursor.execute("""
- CREATE TABLE IF NOT EXISTS test (
- id INTEGER PRIMARY KEY,
- name TEXT
- )
- """)
- cursor.execute("""
- INSERT INTO test (id, name)
- VALUES (1, 'name_test')
- """)
- conn.commit()
- conn.close()
- yield str(db_path)
-
- Path(db_path).unlink()
-
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return SQLComponent
-
- @pytest.fixture
- def default_kwargs(self, test_db):
- """Return the default kwargs for the component."""
- return {
- "database_url": f"sqlite:///{test_db}",
- "query": "SELECT * FROM test",
- "include_columns": True,
- "add_error": False,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- def test_successful_query_with_columns(self, component_class: type[SQLComponent], default_kwargs):
- """Test a successful SQL query with columns included."""
- component = component_class(**default_kwargs)
-
- result = component.build_component()
-
- assert isinstance(result, Message)
- assert isinstance(result.text, str)
- assert result.text == "[{'id': 1, 'name': 'name_test'}]"
-
- def test_successful_query_without_columns(self, component_class: type[SQLComponent], default_kwargs):
- """Test a successful SQL query without columns included."""
- default_kwargs["include_columns"] = False
- component = component_class(**default_kwargs)
-
- result = component.build_component()
-
- assert isinstance(result, Message)
- assert isinstance(result.text, str)
- assert result.text == "[(1, 'name_test')]"
- assert component.status == "[(1, 'name_test')]"
- assert component.query == "SELECT * FROM test"
-
- def test_query_error_with_add_error(self, component_class: type[SQLComponent], default_kwargs):
- """Test a SQL query that raises an error with add_error=True."""
- default_kwargs["add_error"] = True
- default_kwargs["query"] = "SELECT * FROM non_existent_table"
- component = component_class(**default_kwargs)
-
- result = component.build_component()
-
- assert isinstance(result, Message)
- assert isinstance(result.text, str)
- assert "no such table: non_existent_table" in result.text
- assert "Error:" in result.text
- assert "Query: SELECT * FROM non_existent_table" in result.text
-
- def test_run_sql_query(self, component_class: type[SQLComponent], default_kwargs):
- """Test building a DataFrame from a SQL query."""
- component = component_class(**default_kwargs)
-
- result = component.run_sql_query()
-
- assert isinstance(result, DataFrame)
- assert len(result) == 1
- assert "id" in result.columns
- assert "name" in result.columns
- assert result.iloc[0]["id"] == 1
- assert result.iloc[0]["name"] == "name_test"
diff --git a/src/backend/tests/unit/components/data/test_url_component.py b/src/backend/tests/unit/components/data/test_url_component.py
deleted file mode 100644
index 0122a318d225..000000000000
--- a/src/backend/tests/unit/components/data/test_url_component.py
+++ /dev/null
@@ -1,229 +0,0 @@
-from unittest.mock import Mock, patch
-
-import pytest
-
-from lfx.components.data import URLComponent
-from lfx.schema import DataFrame
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestURLComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return URLComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "urls": ["https://google.com"],
- "format": "Text",
- "max_depth": 1,
- "prevent_outside": True,
- "use_async": True,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return [
- {"version": "1.0.19", "module": "data", "file_name": "URL"},
- {"version": "1.1.0", "module": "data", "file_name": "url"},
- {"version": "1.1.1", "module": "data", "file_name": "url"},
- {"version": "1.2.0", "module": "data", "file_name": "url"},
- ]
-
- @pytest.fixture
- def mock_recursive_loader(self):
- """Mock the RecursiveUrlLoader.load method."""
- with patch("langchain_community.document_loaders.RecursiveUrlLoader.load") as mock:
- yield mock
-
- def test_url_component_basic_functionality(self, mock_recursive_loader):
- """Test basic URLComponent functionality."""
- component = URLComponent()
- component.set_attributes({"urls": ["https://example.com"], "max_depth": 2})
-
- mock_doc = Mock(
- page_content="test content",
- metadata={
- "source": "https://example.com",
- "title": "Test Page",
- "description": "Test Description",
- "content_type": "text/html",
- "language": "en",
- },
- )
- mock_recursive_loader.return_value = [mock_doc]
-
- data_frame = component.fetch_content()
- assert isinstance(data_frame, DataFrame)
- assert len(data_frame) == 1
-
- row = data_frame.iloc[0]
- assert row["text"] == "test content"
- assert row["url"] == "https://example.com"
- assert row["title"] == "Test Page"
- assert row["description"] == "Test Description"
- assert row["content_type"] == "text/html"
- assert row["language"] == "en"
-
- def test_url_component_multiple_urls(self, mock_recursive_loader):
- """Test URLComponent with multiple URL inputs."""
- # Setup component with multiple URLs
- component = URLComponent()
- urls = ["https://example1.com", "https://example2.com"]
- component.set_attributes({"urls": urls})
-
- # Create mock documents for each URL
- mock_docs = [
- Mock(
- page_content="Content from first URL",
- metadata={
- "source": "https://example1.com",
- "title": "First Page",
- "description": "First Description",
- "content_type": "text/html",
- "language": "en",
- },
- ),
- Mock(
- page_content="Content from second URL",
- metadata={
- "source": "https://example2.com",
- "title": "Second Page",
- "description": "Second Description",
- "content_type": "text/html",
- "language": "en",
- },
- ),
- ]
-
- # Configure mock to return both documents
- mock_recursive_loader.return_value = mock_docs
-
- # Execute component
- result = component.fetch_content()
-
- # Verify results
- assert isinstance(result, DataFrame)
- assert len(result) == 4
-
- # Verify first URL content
- first_row = result.iloc[0]
- assert first_row["text"] == "Content from first URL"
- assert first_row["url"] == "https://example1.com"
- assert first_row["title"] == "First Page"
- assert first_row["description"] == "First Description"
-
- # Verify second URL content
- second_row = result.iloc[1]
- assert second_row["text"] == "Content from second URL"
- assert second_row["url"] == "https://example2.com"
- assert second_row["title"] == "Second Page"
- assert second_row["description"] == "Second Description"
-
- def test_url_component_format_options(self, mock_recursive_loader):
- """Test URLComponent with different format options."""
- component = URLComponent()
-
- # Test with Text format
- component.set_attributes({"urls": ["https://example.com"], "format": "Text"})
- mock_recursive_loader.return_value = [
- Mock(
- page_content="extracted text",
- metadata={
- "source": "https://example.com",
- "title": "Test Page",
- "description": "Test Description",
- "content_type": "text/html",
- "language": "en",
- },
- )
- ]
- data_frame = component.fetch_content()
- assert data_frame.iloc[0]["text"] == "extracted text"
- assert data_frame.iloc[0]["content_type"] == "text/html"
-
- # Test with HTML format
- component.set_attributes({"urls": ["https://example.com"], "format": "HTML"})
- mock_recursive_loader.return_value = [
- Mock(
- page_content="raw html",
- metadata={
- "source": "https://example.com",
- "title": "Test Page",
- "description": "Test Description",
- "content_type": "text/html",
- "language": "en",
- },
- )
- ]
- data_frame = component.fetch_content()
- assert data_frame.iloc[0]["text"] == "raw html"
- assert data_frame.iloc[0]["content_type"] == "text/html"
-
- def test_url_component_missing_metadata(self, mock_recursive_loader):
- """Test URLComponent with missing metadata fields."""
- component = URLComponent()
- component.set_attributes({"urls": ["https://example.com"]})
-
- mock_doc = Mock(
- page_content="test content",
- metadata={"source": "https://example.com"}, # Only source is provided
- )
- mock_recursive_loader.return_value = [mock_doc]
-
- data_frame = component.fetch_content()
- row = data_frame.iloc[0]
- assert row["text"] == "test content"
- assert row["url"] == "https://example.com"
- assert row["title"] == "" # Default empty string
- assert row["description"] == "" # Default empty string
- assert row["content_type"] == "" # Default empty string
- assert row["language"] == "" # Default empty string
-
- def test_url_component_error_handling(self, mock_recursive_loader):
- """Test error handling in URLComponent."""
- component = URLComponent()
-
- # Test empty URLs
- component.set_attributes({"urls": []})
- with pytest.raises(ValueError, match="Error loading documents:"):
- component.fetch_content()
-
- # Test request exception
- component.set_attributes({"urls": ["https://example.com"]})
- mock_recursive_loader.side_effect = Exception("Connection error")
- with pytest.raises(ValueError, match="Error loading documents:"):
- component.fetch_content()
-
- # Test no documents found
- mock_recursive_loader.side_effect = None
- mock_recursive_loader.return_value = []
- with pytest.raises(ValueError, match="Error loading documents:"):
- component.fetch_content()
-
- def test_url_component_ensure_url(self):
- """Test URLComponent's ensure_url method."""
- component = URLComponent()
-
- # Test URL without protocol
- url = "example.com"
- fixed_url = component.ensure_url(url)
- assert fixed_url == "https://example.com"
-
- # Test URL with protocol
- url = "https://example.com"
- fixed_url = component.ensure_url(url)
- assert fixed_url == "https://example.com"
-
- # Test URL with https protocol
- url = "https://example.com"
- fixed_url = component.ensure_url(url)
- assert fixed_url == "https://example.com"
-
- # Test invalid URL
- with pytest.raises(ValueError, match="Invalid URL"):
- component.ensure_url("not a url")
diff --git a/src/backend/tests/unit/components/data/test_web_search.py b/src/backend/tests/unit/components/data/test_web_search.py
deleted file mode 100644
index df95784356c2..000000000000
--- a/src/backend/tests/unit/components/data/test_web_search.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import pytest
-
-from lfx.components.data.web_search import WebSearchComponent
-from lfx.schema import DataFrame
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestWebSearchComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return WebSearchComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "query": "OpenAI GPT-4",
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return the file names mapping for the component."""
- return []
-
- async def test_invalid_url_handling(self):
- # Create a test instance of the component
- component = WebSearchComponent()
-
- # Set an invalid URL
- invalid_url = "htp://invalid-url"
-
- # Ensure the URL is invalid
- with pytest.raises(ValueError, match="Invalid URL"):
- component.ensure_url(invalid_url)
-
- def test_successful_web_search(self):
- component = WebSearchComponent()
- component.query = "OpenAI GPT-4"
- result = component.perform_search()
- assert isinstance(result, DataFrame)
- assert not result.empty
diff --git a/src/backend/tests/unit/components/git/__init__.py b/src/backend/tests/unit/components/git/__init__.py
deleted file mode 100644
index 88101da317e1..000000000000
--- a/src/backend/tests/unit/components/git/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Git component tests."""
diff --git a/src/backend/tests/unit/components/git/test_git_component.py b/src/backend/tests/unit/components/git/test_git_component.py
deleted file mode 100644
index 7efc1f253f5d..000000000000
--- a/src/backend/tests/unit/components/git/test_git_component.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import tempfile
-from pathlib import Path
-
-import pytest
-
-from lfx.components.git import GitLoaderComponent
-
-
-@pytest.fixture
-def git_component():
- return GitLoaderComponent()
-
-
-@pytest.fixture
-def test_files():
- """Create temporary test files for filtering."""
- with tempfile.TemporaryDirectory() as temp_dir:
- # Create a Python file
- python_file = Path(temp_dir) / "test.py"
- python_file.write_text("import langchain\nclass TestComponent:\n pass", encoding="utf-8")
-
- # Create a text file
- text_file = Path(temp_dir) / "test.txt"
- text_file.write_text("This is a test file", encoding="utf-8")
-
- # Create a binary file
- binary_file = Path(temp_dir) / "test.bin"
- binary_file.write_bytes(b"Binary\x00Content")
-
- # Create a directory for permission tests
- no_access_dir = Path(temp_dir) / "no_access"
- no_access_dir.mkdir()
- no_access_file = no_access_dir / "secret.txt"
- no_access_file.write_text("secret", encoding="utf-8")
- no_access_file.chmod(0o000) # Remove all permissions
-
- yield temp_dir
-
-
-def test_is_binary(git_component, test_files):
- """Test binary file detection."""
- temp_dir = Path(test_files)
-
- # Test regular files
- assert not git_component.is_binary(temp_dir / "test.py")
- assert not git_component.is_binary(temp_dir / "test.txt")
- assert git_component.is_binary(temp_dir / "test.bin")
-
- # Test error cases
- assert git_component.is_binary(temp_dir / "nonexistent.txt") # Non-existent file
- assert git_component.is_binary(temp_dir / "no_access" / "secret.txt") # No permission
-
-
-def test_check_file_patterns(git_component, test_files):
- """Test file pattern matching."""
- temp_dir = Path(test_files)
-
- # Test single pattern
- assert git_component.check_file_patterns(temp_dir / "test.py", "*.py")
- assert not git_component.check_file_patterns(temp_dir / "test.txt", "*.py")
-
- # Test exclusion pattern
- assert not git_component.check_file_patterns(temp_dir / "test.py", "!*.py")
-
- # Test multiple patterns
- assert git_component.check_file_patterns(temp_dir / "test.py", "*.py,*.txt")
- assert git_component.check_file_patterns(temp_dir / "test.txt", "*.py,*.txt")
-
- # Test mixed include/exclude
- assert not git_component.check_file_patterns(temp_dir / "test.py", "*.py,!test.py")
- assert git_component.check_file_patterns(temp_dir / "other.py", "*.py,!test.py")
-
- # Test empty pattern (should include all)
- assert git_component.check_file_patterns(temp_dir / "test.py", "")
- assert git_component.check_file_patterns(temp_dir / "test.txt", " ")
-
- # Test invalid pattern (should treat as literal string)
- assert not git_component.check_file_patterns(temp_dir / "test.py", "[")
-
-
-def test_check_content_pattern(git_component, test_files):
- """Test content pattern matching."""
- temp_dir = Path(test_files)
-
- # Test simple content match
- assert git_component.check_content_pattern(temp_dir / "test.py", r"import langchain")
- assert not git_component.check_content_pattern(temp_dir / "test.txt", r"import langchain")
-
- # Test regex pattern
- assert git_component.check_content_pattern(temp_dir / "test.py", r"class.*Component")
-
- # Test binary file
- assert not git_component.check_content_pattern(temp_dir / "test.bin", r"Binary")
-
- # Test invalid regex patterns
- assert not git_component.check_content_pattern(temp_dir / "test.py", r"[") # Unclosed bracket
- assert not git_component.check_content_pattern(temp_dir / "test.py", r"*") # Invalid quantifier
- assert not git_component.check_content_pattern(temp_dir / "test.py", r"(?<)") # Invalid lookbehind
- assert not git_component.check_content_pattern(temp_dir / "test.py", r"\1") # Invalid backreference
-
-
-def test_combined_filter(git_component, test_files):
- """Test the combined filter function."""
- temp_dir = Path(test_files)
-
- # Test with both patterns
- filter_func = git_component.build_combined_filter(
- file_filter_patterns="*.py", content_filter_pattern=r"class.*Component"
- )
- assert filter_func(str(temp_dir / "test.py"))
- assert not filter_func(str(temp_dir / "test.txt"))
- assert not filter_func(str(temp_dir / "test.bin"))
-
- # Test with only file pattern
- filter_func = git_component.build_combined_filter(file_filter_patterns="*.py")
- assert filter_func(str(temp_dir / "test.py"))
- assert not filter_func(str(temp_dir / "test.txt"))
-
- # Test with only content pattern
- filter_func = git_component.build_combined_filter(content_filter_pattern=r"class.*Component")
- assert filter_func(str(temp_dir / "test.py"))
- assert not filter_func(str(temp_dir / "test.txt"))
-
- # Test with empty patterns
- filter_func = git_component.build_combined_filter()
- assert filter_func(str(temp_dir / "test.py"))
- assert filter_func(str(temp_dir / "test.txt"))
- assert not filter_func(str(temp_dir / "test.bin")) # Binary files still excluded
-
- # Test error cases
- filter_func = git_component.build_combined_filter(
- file_filter_patterns="*.py", content_filter_pattern=r"class.*Component"
- )
- assert not filter_func(str(temp_dir / "nonexistent.txt")) # Non-existent file
- assert not filter_func(str(temp_dir / "no_access" / "secret.txt")) # No permission
diff --git a/src/backend/tests/unit/components/inputs/test_input_components.py b/src/backend/tests/unit/components/inputs/test_input_components.py
deleted file mode 100644
index d8918dfd1688..000000000000
--- a/src/backend/tests/unit/components/inputs/test_input_components.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import pytest
-from anyio import Path
-
-from lfx.components.input_output import ChatInput, TextInputComponent
-from lfx.schema.message import Message
-from lfx.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_USER, MESSAGE_SENDER_USER
-from tests.base import ComponentTestBaseWithClient, ComponentTestBaseWithoutClient
-
-
-@pytest.mark.usefixtures("client")
-class TestChatInput(ComponentTestBaseWithClient):
- @pytest.fixture
- def component_class(self):
- return ChatInput
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "input_value": "Hello, how are you?",
- "should_store_message": True,
- "sender": MESSAGE_SENDER_USER,
- "sender_name": MESSAGE_SENDER_NAME_USER,
- "session_id": "test_session_123",
- "files": [],
- "background_color": "#f0f0f0",
- "chat_icon": "👤",
- "text_color": "#000000",
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- return [
- {"version": "1.0.19", "module": "inputs", "file_name": "ChatInput"},
- {"version": "1.1.0", "module": "inputs", "file_name": "chat"},
- {"version": "1.1.1", "module": "inputs", "file_name": "chat"},
- ]
-
- async def test_message_response(self, component_class, default_kwargs):
- """Test that the message_response method returns a valid Message object."""
- component = component_class(**default_kwargs)
- message = await component.message_response()
-
- assert isinstance(message, Message)
- assert message.text == default_kwargs["input_value"]
- assert message.sender == default_kwargs["sender"]
- assert message.sender_name == default_kwargs["sender_name"]
- assert message.session_id == default_kwargs["session_id"]
- assert message.files == default_kwargs["files"]
- assert message.properties.model_dump() == {
- "background_color": default_kwargs["background_color"],
- "text_color": default_kwargs["text_color"],
- "icon": default_kwargs["chat_icon"],
- "positive_feedback": None,
- "edited": False,
- "source": {"id": None, "display_name": None, "source": None},
- "allow_markdown": False,
- "state": "complete",
- "targets": [],
- }
-
- async def test_message_response_ai_sender(self, component_class):
- """Test message response with AI sender type."""
- kwargs = {
- "input_value": "I am an AI assistant",
- "sender": MESSAGE_SENDER_AI,
- "sender_name": "AI Assistant",
- "session_id": "test_session_123",
- }
- component = component_class(**kwargs)
- message = await component.message_response()
-
- assert isinstance(message, Message)
- assert message.sender == MESSAGE_SENDER_AI
- assert message.sender_name == "AI Assistant"
-
- async def test_message_response_without_session(self, component_class):
- """Test message response without session ID."""
- kwargs = {
- "input_value": "Test message",
- "sender": MESSAGE_SENDER_USER,
- "sender_name": MESSAGE_SENDER_NAME_USER,
- "session_id": "", # Empty session ID
- }
- component = component_class(**kwargs)
- message = await component.message_response()
-
- assert isinstance(message, Message)
- assert message.session_id == ""
-
- async def test_message_response_with_files(self, component_class, tmp_path):
- """Test message response with file attachments."""
- # Create a temporary test file
- test_file = Path(tmp_path) / "test.txt"
- await test_file.write_text("Test content", encoding="utf-8")
-
- kwargs = {
- "input_value": "Message with file",
- "sender": MESSAGE_SENDER_USER,
- "sender_name": MESSAGE_SENDER_NAME_USER,
- "session_id": "test_session_123",
- "files": [str(test_file)],
- }
- component = component_class(**kwargs)
- message = await component.message_response()
-
- assert isinstance(message, Message)
- assert len(message.files) == 1
- assert message.files[0] == str(test_file)
-
- async def test_message_storage_disabled(self, component_class):
- """Test message response when storage is disabled."""
- kwargs = {
- "input_value": "Test message",
- "should_store_message": False,
- "sender": MESSAGE_SENDER_USER,
- "sender_name": MESSAGE_SENDER_NAME_USER,
- "session_id": "test_session_123",
- }
- component = component_class(**kwargs)
- message = await component.message_response()
-
- assert isinstance(message, Message)
- # The message should still be created but not stored
- assert message.text == "Test message"
-
-
-class TestTextInputComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- return TextInputComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "input_value": "Hello, world!",
- "data_template": "{text}",
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- return [
- {"version": "1.0.19", "module": "inputs", "file_name": "TextInput"},
- {"version": "1.1.0", "module": "inputs", "file_name": "text"},
- {"version": "1.1.1", "module": "inputs", "file_name": "text"},
- ]
diff --git a/src/backend/tests/unit/components/knowledge_bases/test_ingestion.py b/src/backend/tests/unit/components/knowledge_bases/test_ingestion.py
deleted file mode 100644
index c97e4a764ab9..000000000000
--- a/src/backend/tests/unit/components/knowledge_bases/test_ingestion.py
+++ /dev/null
@@ -1,387 +0,0 @@
-import json
-import uuid
-from unittest.mock import AsyncMock, MagicMock, patch
-
-import pytest
-from langflow.base.knowledge_bases.knowledge_base_utils import get_knowledge_bases
-from langflow.components.knowledge_bases.ingestion import KnowledgeIngestionComponent
-from langflow.schema.data import Data
-from langflow.schema.dataframe import DataFrame
-
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestKnowledgeIngestionComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return KnowledgeIngestionComponent
-
- @pytest.fixture(autouse=True)
- def mock_knowledge_base_path(self, tmp_path):
- """Mock the knowledge base root path directly."""
- with patch("langflow.components.knowledge_bases.ingestion.KNOWLEDGE_BASES_ROOT_PATH", tmp_path):
- yield
-
- class MockUser:
- def __init__(self, user_id):
- self.id = user_id
- self.username = "langflow"
-
- @pytest.fixture
- def mock_user_data(self):
- """Create mock user data that persists for the test function."""
- mock_uuid = uuid.uuid4()
- mock_user = self.MockUser(mock_uuid)
- return {"user_id": mock_uuid, "user": mock_user.username, "user_obj": mock_user}
-
- @pytest.fixture(autouse=True)
- def setup_mocks(self, mock_user_data):
- """Mock the component's user_id attribute and User object."""
- with (
- patch.object(KnowledgeIngestionComponent, "user_id", mock_user_data["user_id"]),
- patch(
- "langflow.components.knowledge_bases.ingestion.get_user_by_id",
- new_callable=AsyncMock,
- return_value=mock_user_data["user_obj"],
- ),
- patch(
- "langflow.base.knowledge_bases.knowledge_base_utils.get_user_by_id",
- new_callable=AsyncMock,
- return_value=mock_user_data["user_obj"],
- ),
- ):
- yield
-
- @pytest.fixture
- def mock_user_id(self, mock_user_data):
- """Get the mock user data."""
- return {"user_id": mock_user_data["user_id"], "user": mock_user_data["user"]}
-
- @pytest.fixture
- def default_kwargs(self, tmp_path, mock_user_id):
- """Return default kwargs for component instantiation."""
- # Create a sample DataFrame
- data_df = DataFrame(
- {"text": ["Sample text 1", "Sample text 2"], "title": ["Title 1", "Title 2"], "category": ["cat1", "cat2"]}
- )
-
- # Create column configuration
- column_config = [
- {"column_name": "text", "vectorize": True, "identifier": False},
- {"column_name": "title", "vectorize": False, "identifier": False},
- {"column_name": "category", "vectorize": False, "identifier": True},
- ]
-
- # Create knowledge base directory
- kb_name = "test_kb"
- kb_path = tmp_path / mock_user_id["user"] / kb_name
- kb_path.mkdir(parents=True, exist_ok=True)
-
- # Create embedding metadata file
- metadata = {
- "embedding_provider": "HuggingFace",
- "embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
- "api_key": None,
- "api_key_used": False,
- "chunk_size": 1000,
- "created_at": "2024-01-01T00:00:00Z",
- }
- (kb_path / "embedding_metadata.json").write_text(json.dumps(metadata))
-
- return {
- "knowledge_base": kb_name,
- "input_df": data_df,
- "column_config": column_config,
- "chunk_size": 1000,
- "kb_root_path": str(tmp_path),
- "api_key": None,
- "allow_duplicates": False,
- "silent_errors": False,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return file names mapping for version testing."""
- # This is a new component, so it doesn't exist in older versions
- return []
-
- def test_validate_column_config_valid(self, component_class, default_kwargs):
- """Test column configuration validation with valid config."""
- component = component_class(**default_kwargs)
- data_df = default_kwargs["input_df"]
-
- config_list = component._validate_column_config(data_df)
-
- assert len(config_list) == 3
- assert config_list[0]["column_name"] == "text"
- assert config_list[0]["vectorize"] is True
-
- def test_validate_column_config_invalid_column(self, component_class, default_kwargs):
- """Test column configuration validation with invalid column name."""
- # Modify column config to include non-existent column
- invalid_config = [{"column_name": "nonexistent", "vectorize": True, "identifier": False}]
- default_kwargs["column_config"] = invalid_config
-
- # Instantiate the component with the modified config
- component = component_class(**default_kwargs)
- data_df = default_kwargs["input_df"]
-
- # Should raise ValueError since column does not exist in DataFrame
- with pytest.raises(ValueError, match="Column 'nonexistent' not found in DataFrame"):
- component._validate_column_config(data_df)
-
- def test_get_embedding_provider(self, component_class, default_kwargs):
- """Test embedding provider detection."""
- component = component_class(**default_kwargs)
-
- # Test OpenAI provider
- assert component._get_embedding_provider("text-embedding-ada-002") == "OpenAI"
-
- # Test HuggingFace provider
- assert component._get_embedding_provider("sentence-transformers/all-MiniLM-L6-v2") == "HuggingFace"
-
- # Test Cohere provider
- assert component._get_embedding_provider("embed-english-v3.0") == "Cohere"
-
- # Test custom provider
- assert component._get_embedding_provider("custom-model") == "Custom"
-
- @patch("langchain_huggingface.HuggingFaceEmbeddings")
- def test_build_embeddings_huggingface(self, mock_hf_embeddings, component_class, default_kwargs):
- """Test building HuggingFace embeddings."""
- component = component_class(**default_kwargs)
-
- mock_embeddings = MagicMock()
- mock_hf_embeddings.return_value = mock_embeddings
-
- result = component._build_embeddings("sentence-transformers/all-MiniLM-L6-v2", None)
-
- mock_hf_embeddings.assert_called_once_with(model="sentence-transformers/all-MiniLM-L6-v2")
- assert result == mock_embeddings
-
- @patch("langchain_openai.OpenAIEmbeddings")
- def test_build_embeddings_openai(self, mock_openai_embeddings, component_class, default_kwargs):
- """Test building OpenAI embeddings."""
- component = component_class(**default_kwargs)
-
- mock_embeddings = MagicMock()
- mock_openai_embeddings.return_value = mock_embeddings
-
- result = component._build_embeddings("text-embedding-ada-002", "test-api-key")
-
- mock_openai_embeddings.assert_called_once_with(
- model="text-embedding-ada-002", api_key="test-api-key", chunk_size=1000
- )
- assert result == mock_embeddings
-
- def test_build_embeddings_openai_no_key(self, component_class, default_kwargs):
- """Test building OpenAI embeddings without API key raises error."""
- component = component_class(**default_kwargs)
-
- with pytest.raises(ValueError, match="OpenAI API key is required"):
- component._build_embeddings("text-embedding-ada-002", None)
-
- @patch("langchain_cohere.CohereEmbeddings")
- def test_build_embeddings_cohere(self, mock_cohere_embeddings, component_class, default_kwargs):
- """Test building Cohere embeddings."""
- component = component_class(**default_kwargs)
-
- mock_embeddings = MagicMock()
- mock_cohere_embeddings.return_value = mock_embeddings
-
- result = component._build_embeddings("embed-english-v3.0", "test-api-key")
-
- mock_cohere_embeddings.assert_called_once_with(model="embed-english-v3.0", cohere_api_key="test-api-key")
- assert result == mock_embeddings
-
- def test_build_embeddings_cohere_no_key(self, component_class, default_kwargs):
- """Test building Cohere embeddings without API key raises error."""
- component = component_class(**default_kwargs)
-
- with pytest.raises(ValueError, match="Cohere API key is required"):
- component._build_embeddings("embed-english-v3.0", None)
-
- def test_build_embeddings_custom_not_supported(self, component_class, default_kwargs):
- """Test building custom embeddings raises NotImplementedError."""
- component = component_class(**default_kwargs)
-
- with pytest.raises(NotImplementedError, match="Custom embedding models not yet supported"):
- component._build_embeddings("custom-model", "test-key")
-
- @patch("langflow.components.knowledge_bases.ingestion.get_settings_service")
- @patch("langflow.components.knowledge_bases.ingestion.encrypt_api_key")
- def test_build_embedding_metadata(self, mock_encrypt, mock_get_settings, component_class, default_kwargs):
- """Test building embedding metadata."""
- component = component_class(**default_kwargs)
-
- mock_settings = MagicMock()
- mock_get_settings.return_value = mock_settings
- mock_encrypt.return_value = "encrypted_key"
-
- metadata = component._build_embedding_metadata("sentence-transformers/all-MiniLM-L6-v2", "test-key")
-
- assert metadata["embedding_provider"] == "HuggingFace"
- assert metadata["embedding_model"] == "sentence-transformers/all-MiniLM-L6-v2"
- assert metadata["api_key"] == "encrypted_key"
- assert metadata["api_key_used"] is True
- assert metadata["chunk_size"] == 1000
- assert "created_at" in metadata
-
- def test_build_column_metadata(self, component_class, default_kwargs):
- """Test building column metadata."""
- component = component_class(**default_kwargs)
- data_df = default_kwargs["input_df"]
- config_list = default_kwargs["column_config"]
-
- metadata = component._build_column_metadata(config_list, data_df)
-
- assert metadata["total_columns"] == 3
- assert metadata["mapped_columns"] == 3
- assert metadata["unmapped_columns"] == 0
- assert len(metadata["columns"]) == 3
- assert "text" in metadata["summary"]["vectorized_columns"]
- assert "category" in metadata["summary"]["identifier_columns"]
-
- async def test_convert_df_to_data_objects(self, component_class, default_kwargs):
- """Test converting DataFrame to Data objects."""
- component = component_class(**default_kwargs)
- data_df = default_kwargs["input_df"]
- config_list = default_kwargs["column_config"]
-
- # Mock Chroma to avoid actual vector store operations
- with patch("langflow.components.knowledge_bases.ingestion.Chroma") as mock_chroma:
- mock_chroma_instance = MagicMock()
- mock_chroma_instance.get.return_value = {"metadatas": []}
- mock_chroma.return_value = mock_chroma_instance
-
- data_objects = await component._convert_df_to_data_objects(data_df, config_list)
-
- assert len(data_objects) == 2
- assert all(isinstance(obj, Data) for obj in data_objects)
-
- # Check first data object
- first_obj = data_objects[0]
- assert "text" in first_obj.data
- assert "title" in first_obj.data
- assert "category" in first_obj.data
- assert "_id" in first_obj.data
-
- async def test_convert_df_to_data_objects_no_duplicates(self, component_class, default_kwargs):
- """Test converting DataFrame to Data objects with duplicate prevention."""
- default_kwargs["allow_duplicates"] = False
- component = component_class(**default_kwargs)
- data_df = default_kwargs["input_df"]
- config_list = default_kwargs["column_config"]
-
- # Mock Chroma with existing hash
- with patch("langflow.components.knowledge_bases.ingestion.Chroma") as mock_chroma:
- # Simulate existing document with same hash
- existing_hash = "some_existing_hash"
- mock_chroma_instance = MagicMock()
- mock_chroma_instance.get.return_value = {"metadatas": [{"_id": existing_hash}]}
- mock_chroma.return_value = mock_chroma_instance
-
- # Mock hashlib to return the existing hash for first row
- with patch("langflow.components.knowledge_bases.ingestion.hashlib.sha256") as mock_hash:
- mock_hash_obj = MagicMock()
- mock_hash_obj.hexdigest.side_effect = [existing_hash, "different_hash"]
- mock_hash.return_value = mock_hash_obj
-
- data_objects = await component._convert_df_to_data_objects(data_df, config_list)
-
- # Should only return one object (second row) since first is duplicate
- assert len(data_objects) == 1
-
- def test_is_valid_collection_name(self, component_class, default_kwargs):
- """Test collection name validation."""
- component = component_class(**default_kwargs)
-
- # Valid names
- assert component.is_valid_collection_name("valid_name") is True
- assert component.is_valid_collection_name("valid-name") is True
- assert component.is_valid_collection_name("ValidName123") is True
-
- # Invalid names
- assert component.is_valid_collection_name("ab") is False # Too short
- assert component.is_valid_collection_name("a" * 64) is False # Too long
- assert component.is_valid_collection_name("_invalid") is False # Starts with underscore
- assert component.is_valid_collection_name("invalid_") is False # Ends with underscore
- assert component.is_valid_collection_name("invalid@name") is False # Invalid character
-
- @patch("langflow.components.knowledge_bases.ingestion.json.loads")
- @patch("langflow.components.knowledge_bases.ingestion.decrypt_api_key")
- async def test_build_kb_info_success(self, mock_decrypt, mock_json_loads, component_class, default_kwargs):
- """Test successful KB info building."""
- component = component_class(**default_kwargs)
-
- # Mock metadata loading
- mock_json_loads.return_value = {
- "embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
- "api_key": "encrypted_key",
- }
- mock_decrypt.return_value = "decrypted_key"
-
- # Mock vector store creation
- with patch.object(component, "_create_vector_store"), patch.object(component, "_save_kb_files"):
- result = await component.build_kb_info()
-
- assert isinstance(result, Data)
- assert "kb_id" in result.data
- assert "kb_name" in result.data
- assert "rows" in result.data
- assert result.data["rows"] == 2
-
- async def test_get_knowledge_bases(self, tmp_path, mock_user_id):
- """Test getting list of knowledge bases."""
- # Create additional test directories
- (tmp_path / mock_user_id["user"] / "kb1").mkdir(parents=True, exist_ok=True)
- (tmp_path / mock_user_id["user"] / "kb2").mkdir(parents=True, exist_ok=True)
- (tmp_path / mock_user_id["user"] / ".hidden").mkdir(parents=True, exist_ok=True) # Should be ignored
-
- kb_list = await get_knowledge_bases(tmp_path, user_id=mock_user_id["user_id"])
-
- assert "test_kb" in kb_list
- assert "kb1" in kb_list
- assert "kb2" in kb_list
- assert ".hidden" not in kb_list
-
- async def test_update_build_config_new_kb(self, component_class, default_kwargs):
- """Test updating build config for new knowledge base creation."""
- component = component_class(**default_kwargs)
-
- build_config = {"knowledge_base": {"value": None, "options": []}}
-
- field_value = {
- "01_new_kb_name": "new_test_kb",
- "02_embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
- "03_api_key": "abc123", # Mock API key
- }
-
- # Mock embedding validation
- with (
- patch.object(component, "_build_embeddings") as mock_build_emb,
- patch.object(component, "_save_embedding_metadata"),
- ):
- mock_embeddings = MagicMock()
- mock_embeddings.embed_query.return_value = [0.1, 0.2, 0.3]
- mock_build_emb.return_value = mock_embeddings
-
- result = await component.update_build_config(build_config, field_value, "knowledge_base")
-
- assert result["knowledge_base"]["value"] == "new_test_kb"
- assert "new_test_kb" in result["knowledge_base"]["options"]
-
- async def test_update_build_config_invalid_kb_name(self, component_class, default_kwargs):
- """Test updating build config with invalid KB name."""
- component = component_class(**default_kwargs)
-
- build_config = {"knowledge_base": {"value": None, "options": []}}
- field_value = {
- "01_new_kb_name": "invalid@name", # Invalid character
- "02_embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
- "03_api_key": None,
- }
-
- with pytest.raises(ValueError, match="Invalid knowledge base name"):
- await component.update_build_config(build_config, field_value, "knowledge_base")
diff --git a/src/backend/tests/unit/components/knowledge_bases/test_retrieval.py b/src/backend/tests/unit/components/knowledge_bases/test_retrieval.py
deleted file mode 100644
index 1f46facdd0c1..000000000000
--- a/src/backend/tests/unit/components/knowledge_bases/test_retrieval.py
+++ /dev/null
@@ -1,396 +0,0 @@
-import contextlib
-import json
-import uuid
-from pathlib import Path
-from unittest.mock import AsyncMock, MagicMock, patch
-
-import pytest
-from langflow.base.knowledge_bases.knowledge_base_utils import get_knowledge_bases
-from langflow.components.knowledge_bases.retrieval import KnowledgeRetrievalComponent
-from pydantic import SecretStr
-
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestKnowledgeRetrievalComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return KnowledgeRetrievalComponent
-
- @pytest.fixture(autouse=True)
- def mock_knowledge_base_path(self, tmp_path):
- """Mock the knowledge base root path directly."""
- with patch("langflow.components.knowledge_bases.retrieval.KNOWLEDGE_BASES_ROOT_PATH", tmp_path):
- yield
-
- class MockUser:
- def __init__(self, user_id):
- self.id = user_id
- self.username = "langflow"
-
- @pytest.fixture
- def mock_user_data(self):
- """Create mock user data that persists for the test function."""
- mock_uuid = uuid.uuid4()
- mock_user = self.MockUser(mock_uuid)
- return {"user_id": mock_uuid, "user": mock_user.username, "user_obj": mock_user}
-
- @pytest.fixture(autouse=True)
- def setup_mocks(self, mock_user_data):
- """Mock the component's user_id attribute and User object."""
- with (
- patch.object(KnowledgeRetrievalComponent, "user_id", mock_user_data["user_id"]),
- patch(
- "langflow.components.knowledge_bases.retrieval.get_user_by_id",
- new_callable=AsyncMock,
- return_value=mock_user_data["user_obj"],
- ),
- patch(
- "langflow.base.knowledge_bases.knowledge_base_utils.get_user_by_id",
- new_callable=AsyncMock,
- return_value=mock_user_data["user_obj"],
- ),
- ):
- yield
-
- @pytest.fixture
- def mock_user_id(self, mock_user_data):
- """Get the mock user data."""
- return {"user_id": mock_user_data["user_id"], "user": mock_user_data["user"]}
-
- @pytest.fixture
- def default_kwargs(self, tmp_path, mock_user_id):
- """Return default kwargs for component instantiation."""
- # Create knowledge base directory structure
- kb_name = "test_kb"
- kb_path = tmp_path / mock_user_id["user"] / kb_name
- kb_path.mkdir(parents=True, exist_ok=True)
-
- # Create embedding metadata file
- metadata = {
- "embedding_provider": "HuggingFace",
- "embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
- "api_key": None,
- "api_key_used": False,
- "chunk_size": 1000,
- "created_at": "2024-01-01T00:00:00Z",
- }
- (kb_path / "embedding_metadata.json").write_text(json.dumps(metadata))
-
- return {
- "knowledge_base": kb_name,
- "kb_root_path": str(tmp_path),
- "api_key": None,
- "search_query": "",
- "top_k": 5,
- "include_embeddings": True,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return file names mapping for version testing."""
- # This is a new component, so it doesn't exist in older versions
- return []
-
- async def test_get_knowledge_bases(self, tmp_path, mock_user_id):
- """Test getting list of knowledge bases."""
- # Create additional test directories
- (tmp_path / mock_user_id["user"] / "kb1").mkdir(parents=True, exist_ok=True)
- (tmp_path / mock_user_id["user"] / "kb2").mkdir(parents=True, exist_ok=True)
- (tmp_path / mock_user_id["user"] / ".hidden").mkdir(parents=True, exist_ok=True) # Should be ignored
-
- kb_list = await get_knowledge_bases(tmp_path, user_id=mock_user_id["user_id"])
-
- assert "test_kb" in kb_list
- assert "kb1" in kb_list
- assert "kb2" in kb_list
- assert ".hidden" not in kb_list
-
- async def test_update_build_config(self, component_class, default_kwargs, tmp_path, mock_user_id):
- """Test updating build configuration."""
- component = component_class(**default_kwargs)
-
- # Create additional KB directories
- (tmp_path / mock_user_id["user"] / "kb1").mkdir(parents=True, exist_ok=True)
- (tmp_path / mock_user_id["user"] / "kb2").mkdir(parents=True, exist_ok=True)
-
- build_config = {"knowledge_base": {"value": "test_kb", "options": []}}
-
- result = await component.update_build_config(build_config, None, "knowledge_base")
-
- assert "test_kb" in result["knowledge_base"]["options"]
- assert "kb1" in result["knowledge_base"]["options"]
- assert "kb2" in result["knowledge_base"]["options"]
-
- async def test_update_build_config_invalid_kb(self, component_class, default_kwargs):
- """Test updating build config when selected KB is not available."""
- component = component_class(**default_kwargs)
-
- build_config = {"knowledge_base": {"value": "nonexistent_kb", "options": ["test_kb"]}}
-
- result = await component.update_build_config(build_config, None, "knowledge_base")
-
- assert result["knowledge_base"]["value"] is None
-
- def test_get_kb_metadata_success(self, component_class, default_kwargs, mock_user_id):
- """Test successful metadata loading."""
- component = component_class(**default_kwargs)
- kb_path = Path(default_kwargs["kb_root_path"]) / mock_user_id["user"] / default_kwargs["knowledge_base"]
-
- with patch("langflow.components.knowledge_bases.retrieval.decrypt_api_key") as mock_decrypt:
- mock_decrypt.return_value = "decrypted_key"
-
- metadata = component._get_kb_metadata(kb_path)
-
- assert metadata["embedding_provider"] == "HuggingFace"
- assert metadata["embedding_model"] == "sentence-transformers/all-MiniLM-L6-v2"
- assert "chunk_size" in metadata
-
- def test_get_kb_metadata_no_file(self, component_class, default_kwargs, tmp_path, mock_user_id):
- """Test metadata loading when file doesn't exist."""
- component = component_class(**default_kwargs)
- nonexistent_path = tmp_path / mock_user_id["user"] / "nonexistent"
- nonexistent_path.mkdir(parents=True, exist_ok=True)
-
- metadata = component._get_kb_metadata(nonexistent_path)
-
- assert metadata == {}
-
- def test_get_kb_metadata_json_error(self, component_class, default_kwargs, tmp_path, mock_user_id):
- """Test metadata loading with invalid JSON."""
- component = component_class(**default_kwargs)
- kb_path = tmp_path / mock_user_id["user"] / "invalid_json_kb"
- kb_path.mkdir(parents=True, exist_ok=True)
-
- # Create invalid JSON file
- (kb_path / "embedding_metadata.json").write_text("invalid json content")
-
- metadata = component._get_kb_metadata(kb_path)
-
- assert metadata == {}
-
- def test_get_kb_metadata_decrypt_error(self, component_class, default_kwargs, tmp_path, mock_user_id):
- """Test metadata loading with decryption error."""
- component = component_class(**default_kwargs)
- kb_path = tmp_path / mock_user_id["user"] / "decrypt_error_kb"
- kb_path.mkdir(parents=True, exist_ok=True)
-
- # Create metadata with encrypted key
- metadata = {
- "embedding_provider": "OpenAI",
- "embedding_model": "text-embedding-ada-002",
- "api_key": "encrypted_key",
- "chunk_size": 1000,
- }
- (kb_path / "embedding_metadata.json").write_text(json.dumps(metadata))
-
- with patch("langflow.components.knowledge_bases.retrieval.decrypt_api_key") as mock_decrypt:
- mock_decrypt.side_effect = ValueError("Decryption failed")
-
- result = component._get_kb_metadata(kb_path)
-
- assert result["api_key"] is None
-
- @patch("langchain_huggingface.HuggingFaceEmbeddings")
- def test_build_embeddings_huggingface(self, mock_hf_embeddings, component_class, default_kwargs):
- """Test building HuggingFace embeddings."""
- component = component_class(**default_kwargs)
-
- metadata = {
- "embedding_provider": "HuggingFace",
- "embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
- "chunk_size": 1000,
- }
-
- mock_embeddings = MagicMock()
- mock_hf_embeddings.return_value = mock_embeddings
-
- result = component._build_embeddings(metadata)
-
- mock_hf_embeddings.assert_called_once_with(model="sentence-transformers/all-MiniLM-L6-v2")
- assert result == mock_embeddings
-
- @patch("langchain_openai.OpenAIEmbeddings")
- def test_build_embeddings_openai(self, mock_openai_embeddings, component_class, default_kwargs):
- """Test building OpenAI embeddings."""
- component = component_class(**default_kwargs)
-
- metadata = {
- "embedding_provider": "OpenAI",
- "embedding_model": "text-embedding-ada-002",
- "api_key": "test-api-key",
- "chunk_size": 1000,
- }
-
- mock_embeddings = MagicMock()
- mock_openai_embeddings.return_value = mock_embeddings
-
- result = component._build_embeddings(metadata)
-
- mock_openai_embeddings.assert_called_once_with(
- model="text-embedding-ada-002", api_key="test-api-key", chunk_size=1000
- )
- assert result == mock_embeddings
-
- def test_build_embeddings_openai_no_key(self, component_class, default_kwargs):
- """Test building OpenAI embeddings without API key raises error."""
- component = component_class(**default_kwargs)
-
- metadata = {
- "embedding_provider": "OpenAI",
- "embedding_model": "text-embedding-ada-002",
- "api_key": None,
- "chunk_size": 1000,
- }
-
- with pytest.raises(ValueError, match="OpenAI API key is required"):
- component._build_embeddings(metadata)
-
- @patch("langchain_cohere.CohereEmbeddings")
- def test_build_embeddings_cohere(self, mock_cohere_embeddings, component_class, default_kwargs):
- """Test building Cohere embeddings."""
- component = component_class(**default_kwargs)
-
- metadata = {
- "embedding_provider": "Cohere",
- "embedding_model": "embed-english-v3.0",
- "api_key": "test-api-key",
- "chunk_size": 1000,
- }
-
- mock_embeddings = MagicMock()
- mock_cohere_embeddings.return_value = mock_embeddings
-
- result = component._build_embeddings(metadata)
-
- mock_cohere_embeddings.assert_called_once_with(model="embed-english-v3.0", cohere_api_key="test-api-key")
- assert result == mock_embeddings
-
- def test_build_embeddings_cohere_no_key(self, component_class, default_kwargs):
- """Test building Cohere embeddings without API key raises error."""
- component = component_class(**default_kwargs)
-
- metadata = {
- "embedding_provider": "Cohere",
- "embedding_model": "embed-english-v3.0",
- "api_key": None,
- "chunk_size": 1000,
- }
-
- with pytest.raises(ValueError, match="Cohere API key is required"):
- component._build_embeddings(metadata)
-
- def test_build_embeddings_custom_not_supported(self, component_class, default_kwargs):
- """Test building custom embeddings raises NotImplementedError."""
- component = component_class(**default_kwargs)
-
- metadata = {"embedding_provider": "Custom", "embedding_model": "custom-model", "api_key": "test-key"}
-
- with pytest.raises(NotImplementedError, match="Custom embedding models not yet supported"):
- component._build_embeddings(metadata)
-
- def test_build_embeddings_unsupported_provider(self, component_class, default_kwargs):
- """Test building embeddings with unsupported provider raises NotImplementedError."""
- component = component_class(**default_kwargs)
-
- metadata = {"embedding_provider": "UnsupportedProvider", "embedding_model": "some-model", "api_key": "test-key"}
-
- with pytest.raises(NotImplementedError, match="Embedding provider 'UnsupportedProvider' is not supported"):
- component._build_embeddings(metadata)
-
- def test_build_embeddings_with_user_api_key(self, component_class, default_kwargs):
- """Test that user-provided API key overrides stored one."""
- # Use a real SecretStr object instead of a mock
- mock_secret = SecretStr("user-provided-key")
-
- default_kwargs["api_key"] = mock_secret
- component = component_class(**default_kwargs)
-
- metadata = {
- "embedding_provider": "OpenAI",
- "embedding_model": "text-embedding-ada-002",
- "api_key": "stored-key", # This should be overridden by the user-provided key
- "chunk_size": 1000,
- }
-
- with patch("langchain_openai.OpenAIEmbeddings") as mock_openai:
- mock_embeddings = MagicMock()
- mock_openai.return_value = mock_embeddings
-
- component._build_embeddings(metadata)
-
- # The user-provided key should override the stored key in metadata
- mock_openai.assert_called_once_with(
- model="text-embedding-ada-002",
- api_key="user-provided-key", # Should use the user-provided key, not "stored-key"
- chunk_size=1000,
- )
-
- async def test_retrieve_data_no_metadata(self, component_class, default_kwargs, tmp_path, mock_user_id):
- """Test retrieving data when metadata is missing."""
- # Remove metadata file
- kb_path = tmp_path / mock_user_id["user"] / default_kwargs["knowledge_base"]
- metadata_file = kb_path / "embedding_metadata.json"
- if metadata_file.exists():
- metadata_file.unlink()
-
- component = component_class(**default_kwargs)
-
- with pytest.raises(ValueError, match="Metadata not found for knowledge base"):
- await component.retrieve_data()
-
- def test_retrieve_data_path_construction(self, component_class, default_kwargs):
- """Test that retrieve_data constructs the correct paths."""
- component = component_class(**default_kwargs)
-
- # Test that the component correctly builds the KB path
-
- assert component.kb_root_path == default_kwargs["kb_root_path"]
- assert component.knowledge_base == default_kwargs["knowledge_base"]
-
- # Test that paths are correctly expanded
- expanded_path = Path(component.kb_root_path).expanduser()
- assert expanded_path.exists() # tmp_path should exist
-
- # Verify method exists with correct parameters
- assert hasattr(component, "retrieve_data")
- assert hasattr(component, "search_query")
- assert hasattr(component, "top_k")
- assert hasattr(component, "include_embeddings")
-
- async def test_retrieve_data_method_exists(self, component_class, default_kwargs):
- """Test that retrieve_data method exists and can be called."""
- component = component_class(**default_kwargs)
-
- # Just verify the method exists and has the right signature
- assert hasattr(component, "retrieve_data"), "Component should have retrieve_data method"
-
- # Mock all external calls to avoid integration issues
- with (
- patch.object(component, "_get_kb_metadata") as mock_get_metadata,
- patch.object(component, "_build_embeddings") as mock_build_embeddings,
- patch("langchain_chroma.Chroma"),
- ):
- mock_get_metadata.return_value = {"embedding_provider": "HuggingFace", "embedding_model": "test-model"}
- mock_build_embeddings.return_value = MagicMock()
-
- # This is a unit test focused on the component's internal logic
- with contextlib.suppress(Exception):
- await component.retrieve_data()
-
- # Verify internal methods were called
- mock_get_metadata.assert_called_once()
- mock_build_embeddings.assert_called_once()
-
- def test_include_embeddings_parameter(self, component_class, default_kwargs):
- """Test that include_embeddings parameter is properly set."""
- # Test with embeddings enabled
- default_kwargs["include_embeddings"] = True
- component = component_class(**default_kwargs)
- assert component.include_embeddings is True
-
- # Test with embeddings disabled
- default_kwargs["include_embeddings"] = False
- component = component_class(**default_kwargs)
- assert component.include_embeddings is False
diff --git a/src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py b/src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py
deleted file mode 100644
index 570adbffe7ca..000000000000
--- a/src/backend/tests/unit/components/languagemodels/test_baidu_qianfan.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import os
-
-import pytest
-from langchain.schema import HumanMessage
-from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
-from qianfan.errors import APIError
-
-from lfx.components.baidu.baidu_qianfan_chat import QianfanChatEndpointComponent
-
-
-@pytest.fixture
-def qianfan_credentials():
- """Fixture to get Qianfan credentials from environment variables."""
- ak = os.getenv("QIANFAN_AK")
- sk = os.getenv("QIANFAN_SK")
- if not ak or not sk:
- pytest.skip("QIANFAN_AK and QIANFAN_SK environment variables are required.")
- return {"ak": ak, "sk": sk}
-
-
-@pytest.mark.api_key_required
-def test_none_endpoint(qianfan_credentials):
- """Test that None endpoint does not raise an exception."""
- component = QianfanChatEndpointComponent(
- model="ERNIE-Bot-turbo-AI",
- qianfan_ak=qianfan_credentials["ak"],
- qianfan_sk=qianfan_credentials["sk"],
- endpoint=None,
- temperature=0.7,
- )
- # should have no error
- model = component.build_model()
- messages = [HumanMessage(content="Say 'Hello' in Chinese")]
- response = model.invoke(messages)
- assert response is not None
- assert len(str(response)) > 0
-
-
-@pytest.mark.api_key_required
-def test_empty_str_endpoint(qianfan_credentials):
- """Test that empty string endpoint does not raise an exception."""
- component = QianfanChatEndpointComponent(
- model="ERNIE-Bot",
- qianfan_ak=qianfan_credentials["ak"],
- qianfan_sk=qianfan_credentials["sk"],
- endpoint="",
- temperature=0.7,
- )
-
- model = component.build_model()
- messages = [HumanMessage(content="Say 'Hello' in Chinese")]
- response = model.invoke(messages)
- assert response is not None
- assert len(str(response)) > 0
-
-
-@pytest.mark.api_key_required
-def test_invalid_endpoint(qianfan_credentials):
- """Test that invalid endpoint raises an exception."""
- component = QianfanChatEndpointComponent(
- model="ERNIE-Bot",
- qianfan_ak=qianfan_credentials["ak"],
- qianfan_sk=qianfan_credentials["sk"],
- endpoint="https://invalid.endpoint.example",
- temperature=0.7,
- )
-
- model = component.build_model()
- messages = [HumanMessage(content="Say 'Hello' in Chinese")]
-
- with pytest.raises(APIError):
- model.invoke(messages)
-
-
-@pytest.mark.api_key_required
-@pytest.mark.parametrize(
- "model_name",
- [
- "EB-turbo-AppBuilder",
- "Llama-2-70b-chat",
- "ERNIE-Bot-turbo-AI",
- "ERNIE-Lite-8K-0308",
- "ERNIE-Speed",
- "Qianfan-Chinese-Llama-2-13B",
- "ERNIE-3.5-8K",
- "BLOOMZ-7B",
- "Qianfan-Chinese-Llama-2-7B",
- "XuanYuan-70B-Chat-4bit",
- "AquilaChat-7B",
- "ERNIE-Bot-4",
- "Llama-2-13b-chat",
- "ChatGLM2-6B-32K",
- "ERNIE-Bot",
- "ERNIE-Speed-128k",
- "ERNIE-4.0-8K",
- "Qianfan-BLOOMZ-7B-compressed",
- "ERNIE Speed",
- "Llama-2-7b-chat",
- "Mixtral-8x7B-Instruct",
- "ERNIE 3.5",
- "ERNIE Speed-AppBuilder",
- "ERNIE-Speed-8K",
- "Yi-34B-Chat",
- ],
-)
-def test_qianfan_different_models(qianfan_credentials, model_name):
- """Test different Qianfan models with a simple prompt."""
- component = QianfanChatEndpointComponent(
- model=model_name,
- qianfan_ak=qianfan_credentials["ak"],
- qianfan_sk=qianfan_credentials["sk"],
- temperature=0.7,
- top_p=0.8,
- penalty_score=1.0,
- )
-
- # Build the model
- chat_model = component.build_model()
- assert isinstance(chat_model, QianfanChatEndpoint)
-
- # Test with a simple prompt
- messages = [HumanMessage(content="Say 'Hello' in Chinese")]
-
- try:
- response = chat_model(messages)
- assert response is not None
- assert len(str(response)) > 0
- except ValueError as e:
- pytest.fail(f"Model {model_name} failed with error: {e!s}")
diff --git a/src/backend/tests/unit/components/languagemodels/test_chatollama_component.py b/src/backend/tests/unit/components/languagemodels/test_chatollama_component.py
deleted file mode 100644
index c08419664e9b..000000000000
--- a/src/backend/tests/unit/components/languagemodels/test_chatollama_component.py
+++ /dev/null
@@ -1,197 +0,0 @@
-from unittest.mock import AsyncMock, MagicMock, patch
-
-import pytest
-from langchain_ollama import ChatOllama
-
-from lfx.components.ollama.ollama import ChatOllamaComponent
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestChatOllamaComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- return ChatOllamaComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "base_url": "http://localhost:8000",
- "model_name": "ollama-model",
- "temperature": 0.1,
- "format": "json",
- "metadata": {},
- "tags": "",
- "mirostat": "Disabled",
- "num_ctx": 2048,
- "num_gpu": 1,
- "num_thread": 4,
- "repeat_last_n": 64,
- "repeat_penalty": 1.1,
- "tfs_z": 1.0,
- "timeout": 30,
- "top_k": 40,
- "top_p": 0.9,
- "verbose": False,
- "tool_model_enabled": True,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- # Provide an empty list or the actual mapping if versioned files exist
- return []
-
- @patch("lfx.components.ollama.ollama.ChatOllama")
- async def test_build_model(self, mock_chat_ollama, component_class, default_kwargs):
- mock_instance = MagicMock()
- mock_chat_ollama.return_value = mock_instance
- component = component_class(**default_kwargs)
- model = component.build_model()
- mock_chat_ollama.assert_called_once_with(
- base_url="http://localhost:8000",
- model="ollama-model",
- mirostat=0,
- format="json",
- metadata={"keywords": ["model", "llm", "language model", "large language model"]},
- num_ctx=2048,
- num_gpu=1,
- num_thread=4,
- repeat_last_n=64,
- repeat_penalty=1.1,
- temperature=0.1,
- system="",
- tfs_z=1.0,
- timeout=30,
- top_k=40,
- top_p=0.9,
- verbose=False,
- template="",
- )
- assert model == mock_instance
-
- @patch("lfx.components.ollama.ollama.ChatOllama")
- async def test_build_model_missing_base_url(self, mock_chat_ollama, component_class, default_kwargs):
- # Make the mock raise an exception to simulate connection failure
- mock_chat_ollama.side_effect = Exception("connection error")
- component = component_class(**default_kwargs)
- component.base_url = None
- with pytest.raises(ValueError, match="Unable to connect to the Ollama API."):
- component.build_model()
-
- @pytest.mark.asyncio
- @patch("lfx.components.ollama.ollama.httpx.AsyncClient.post")
- @patch("lfx.components.ollama.ollama.httpx.AsyncClient.get")
- async def test_get_models_success(self, mock_get, mock_post):
- component = ChatOllamaComponent()
- mock_get_response = AsyncMock()
- mock_get_response.raise_for_status.return_value = None
- mock_get_response.json.return_value = {
- component.JSON_MODELS_KEY: [
- {component.JSON_NAME_KEY: "model1"},
- {component.JSON_NAME_KEY: "model2"},
- ]
- }
- mock_get.return_value = mock_get_response
-
- mock_post_response = AsyncMock()
- mock_post_response.raise_for_status.return_value = None
- mock_post_response.json.side_effect = [
- {component.JSON_CAPABILITIES_KEY: [component.DESIRED_CAPABILITY]},
- {component.JSON_CAPABILITIES_KEY: []},
- ]
- mock_post.return_value = mock_post_response
-
- base_url = "http://localhost:11434"
- result = await component.get_models(base_url)
- assert result == ["model1"]
- assert mock_get.call_count == 1
- assert mock_post.call_count == 2
-
- @pytest.mark.asyncio
- @patch("lfx.components.ollama.ollama.httpx.AsyncClient.get")
- async def test_get_models_failure(self, mock_get):
- import httpx
-
- component = ChatOllamaComponent()
- mock_get.side_effect = httpx.RequestError("Connection error", request=None)
- base_url = "http://localhost:11434"
- with pytest.raises(ValueError, match="Could not get model names from Ollama."):
- await component.get_models(base_url)
-
- @pytest.mark.asyncio
- async def test_update_build_config_mirostat_disabled(self):
- component = ChatOllamaComponent()
- build_config = {
- "mirostat_eta": {"advanced": False, "value": 0.1},
- "mirostat_tau": {"advanced": False, "value": 5},
- }
- field_value = "Disabled"
- field_name = "mirostat"
- updated_config = await component.update_build_config(build_config, field_value, field_name)
- assert updated_config["mirostat_eta"]["advanced"] is True
- assert updated_config["mirostat_tau"]["advanced"] is True
- assert updated_config["mirostat_eta"]["value"] is None
- assert updated_config["mirostat_tau"]["value"] is None
-
- @pytest.mark.asyncio
- async def test_update_build_config_mirostat_enabled(self):
- component = ChatOllamaComponent()
- build_config = {
- "mirostat_eta": {"advanced": False, "value": None},
- "mirostat_tau": {"advanced": False, "value": None},
- }
- field_value = "Mirostat 2.0"
- field_name = "mirostat"
- updated_config = await component.update_build_config(build_config, field_value, field_name)
- assert updated_config["mirostat_eta"]["advanced"] is False
- assert updated_config["mirostat_tau"]["advanced"] is False
- assert updated_config["mirostat_eta"]["value"] == 0.2
- assert updated_config["mirostat_tau"]["value"] == 10
-
- @patch("lfx.components.ollama.ollama.httpx.AsyncClient.get")
- @pytest.mark.asyncio
- async def test_update_build_config_model_name(self, mock_get):
- component = ChatOllamaComponent()
- mock_response = MagicMock()
- mock_response.json.return_value = {"models": [{"name": "model1"}, {"name": "model2"}]}
- mock_response.raise_for_status.return_value = None
- mock_get.return_value = mock_response
- build_config = {
- "base_url": {"load_from_db": False, "value": None},
- "model_name": {"options": []},
- }
- field_value = None
- field_name = "model_name"
- with pytest.raises(ValueError, match="No valid Ollama URL found"):
- await component.update_build_config(build_config, field_value, field_name)
-
- @pytest.mark.asyncio
- async def test_update_build_config_keep_alive(self):
- component = ChatOllamaComponent()
- build_config = {"keep_alive": {"value": None, "advanced": False}}
- field_value = "Keep"
- field_name = "keep_alive_flag"
- updated_config = await component.update_build_config(build_config, field_value, field_name)
- assert updated_config["keep_alive"]["value"] == "-1"
- assert updated_config["keep_alive"]["advanced"] is True
- field_value = "Immediately"
- updated_config = await component.update_build_config(build_config, field_value, field_name)
- assert updated_config["keep_alive"]["value"] == "0"
- assert updated_config["keep_alive"]["advanced"] is True
-
- @patch(
- "langchain_ollama.ChatOllama",
- return_value=ChatOllama(base_url="http://localhost:11434", model="llama3.1"),
- )
- def test_build_model_integration(self, _mock_chat_ollama): # noqa: PT019
- component = ChatOllamaComponent()
- component.base_url = "http://localhost:11434"
- component.model_name = "llama3.1"
- component.mirostat = "Mirostat 2.0"
- component.mirostat_eta = 0.2
- component.mirostat_tau = 10.0
- component.temperature = 0.2
- component.verbose = True
- model = component.build_model()
- assert isinstance(model, ChatOllama)
- assert model.base_url == "http://localhost:11434"
- assert model.model == "llama3.1"
diff --git a/src/backend/tests/unit/components/languagemodels/test_deepseek.py b/src/backend/tests/unit/components/languagemodels/test_deepseek.py
deleted file mode 100644
index 5d57935ee48a..000000000000
--- a/src/backend/tests/unit/components/languagemodels/test_deepseek.py
+++ /dev/null
@@ -1,113 +0,0 @@
-from unittest.mock import MagicMock
-
-import pytest
-
-from lfx.components.deepseek.deepseek import DeepSeekModelComponent
-from lfx.custom.custom_component.component import Component
-from lfx.custom.utils import build_custom_component_template
-
-
-def test_deepseek_initialization():
- component = DeepSeekModelComponent()
- assert component.display_name == "DeepSeek"
- assert component.description == "Generate text using DeepSeek LLMs."
- assert component.icon == "DeepSeek"
-
-
-def test_deepseek_template():
- deepseek = DeepSeekModelComponent()
- component = Component(_code=deepseek._code)
- frontend_node, _ = build_custom_component_template(component)
-
- # Verify basic structure
- assert isinstance(frontend_node, dict)
-
- # Verify inputs
- assert "template" in frontend_node
- input_names = [input_["name"] for input_ in frontend_node["template"].values() if isinstance(input_, dict)]
-
- expected_inputs = [
- "max_tokens",
- "model_kwargs",
- "json_mode",
- "model_name",
- "api_base",
- "api_key",
- "temperature",
- "seed",
- ]
-
- for input_name in expected_inputs:
- assert input_name in input_names
-
-
-@pytest.fixture
-def mock_chat_openai(mocker):
- return mocker.patch("langchain_openai.ChatOpenAI")
-
-
-@pytest.mark.parametrize(
- ("temperature", "max_tokens"),
- [
- (0.5, 100),
- (1.0, 500),
- (1.5, 1000),
- ],
-)
-def test_deepseek_build_model(mock_chat_openai, temperature, max_tokens):
- component = DeepSeekModelComponent()
- component.temperature = temperature
- component.max_tokens = max_tokens
- component.api_key = "test-key"
-
- # Mock the ChatOpenAI instance
- mock_instance = MagicMock()
- mock_chat_openai.return_value = mock_instance
-
- model = component.build_model()
-
- # Verify ChatOpenAI was called with correct params
- mock_chat_openai.assert_called_once_with(
- max_tokens=max_tokens,
- model_kwargs={},
- model="deepseek-chat",
- base_url="https://api.deepseek.com",
- api_key="test-key",
- temperature=temperature,
- seed=1,
- streaming=False,
- )
- assert model == mock_instance
-
-
-def test_deepseek_get_models(mocker):
- component = DeepSeekModelComponent()
-
- # Mock requests.get
- mock_get = mocker.patch("requests.get")
- mock_response = MagicMock()
- mock_response.json.return_value = {"data": [{"id": "deepseek-chat"}, {"id": "deepseek-coder"}]}
- mock_get.return_value = mock_response
-
- # Test with API key
- component.api_key = "test-key"
- models = component.get_models()
- assert models == ["deepseek-chat", "deepseek-coder"]
-
- # Verify API call
- mock_get.assert_called_once_with(
- "https://api.deepseek.com/models",
- headers={"Authorization": "Bearer test-key", "Accept": "application/json"},
- timeout=10,
- )
-
-
-def test_deepseek_error_handling(mock_chat_openai):
- component = DeepSeekModelComponent()
- component.api_key = "invalid-key"
-
- # Mock ChatOpenAI to raise exception
- mock_chat_openai.side_effect = Exception("Invalid API key")
-
- with pytest.raises(Exception, match="Invalid API key"):
- component.build_model()
diff --git a/src/backend/tests/unit/components/languagemodels/test_huggingface.py b/src/backend/tests/unit/components/languagemodels/test_huggingface.py
deleted file mode 100644
index 4bec13dd54e4..000000000000
--- a/src/backend/tests/unit/components/languagemodels/test_huggingface.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from lfx.components.huggingface.huggingface import DEFAULT_MODEL, HuggingFaceEndpointsComponent
-from lfx.inputs.inputs import DictInput, DropdownInput, FloatInput, IntInput, SecretStrInput, SliderInput, StrInput
-
-
-def test_huggingface_inputs():
- component = HuggingFaceEndpointsComponent()
- inputs = component.inputs
-
- # Define expected input types and their names
- expected_inputs = {
- "model_id": DropdownInput,
- "custom_model": StrInput,
- "max_new_tokens": IntInput,
- "top_k": IntInput,
- "top_p": FloatInput,
- "typical_p": FloatInput,
- "temperature": SliderInput,
- "repetition_penalty": FloatInput,
- "inference_endpoint": StrInput,
- "task": DropdownInput,
- "huggingfacehub_api_token": SecretStrInput,
- "model_kwargs": DictInput,
- "retry_attempts": IntInput,
- }
-
- # Check if all expected inputs are present and have correct type
- for name, input_type in expected_inputs.items():
- matching_inputs = [inp for inp in inputs if isinstance(inp, input_type) and inp.name == name]
- assert matching_inputs, f"Missing or incorrect input: {name} {input_type}"
-
- if name == "model_id":
- input_field = matching_inputs[0]
- assert input_field.value == DEFAULT_MODEL
- assert "custom" in input_field.options
- assert input_field.required is True
- assert input_field.real_time_refresh is True
- elif name == "custom_model":
- input_field = matching_inputs[0]
- assert input_field.show is False
- assert input_field.required is True
- elif name == "temperature":
- input_field = matching_inputs[0]
- assert input_field.value == 0.8
- assert input_field.range_spec.min == 0
- assert input_field.range_spec.max == 2
- assert input_field.range_spec.step == 0.01
diff --git a/src/backend/tests/unit/components/languagemodels/test_openai_model.py b/src/backend/tests/unit/components/languagemodels/test_openai_model.py
deleted file mode 100644
index dd4fd0c13476..000000000000
--- a/src/backend/tests/unit/components/languagemodels/test_openai_model.py
+++ /dev/null
@@ -1,204 +0,0 @@
-import os
-from unittest.mock import MagicMock, patch
-
-import pytest
-from langchain_openai import ChatOpenAI
-
-from lfx.components.openai.openai_chat_model import OpenAIModelComponent
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestOpenAIModelComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- return OpenAIModelComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "max_tokens": 1000,
- "model_kwargs": {},
- "json_mode": False,
- "model_name": "gpt-4.1-nano",
- "openai_api_base": "https://api.openai.com/v1",
- "api_key": "test-api-key",
- "temperature": 0.1,
- "seed": 1,
- "max_retries": 5,
- "timeout": 700,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- # Provide an empty list or the actual mapping if versioned files exist
- return []
-
- @patch("lfx.components.openai.openai_chat_model.ChatOpenAI")
- async def test_build_model(self, mock_chat_openai, component_class, default_kwargs):
- mock_instance = MagicMock()
- mock_chat_openai.return_value = mock_instance
- component = component_class(**default_kwargs)
- model = component.build_model()
-
- mock_chat_openai.assert_called_once_with(
- api_key="test-api-key",
- model_name="gpt-4.1-nano",
- max_tokens=1000,
- model_kwargs={},
- base_url="https://api.openai.com/v1",
- seed=1,
- max_retries=5,
- timeout=700,
- temperature=0.1,
- )
- assert model == mock_instance
-
- @patch("lfx.components.openai.openai_chat_model.ChatOpenAI")
- async def test_build_model_reasoning_model(self, mock_chat_openai, component_class, default_kwargs):
- mock_instance = MagicMock()
- mock_chat_openai.return_value = mock_instance
- default_kwargs["model_name"] = "o1"
- component = component_class(**default_kwargs)
- model = component.build_model()
-
- # For reasoning models, temperature and seed should be excluded
- mock_chat_openai.assert_called_once_with(
- api_key="test-api-key",
- model_name="o1",
- max_tokens=1000,
- model_kwargs={},
- base_url="https://api.openai.com/v1",
- max_retries=5,
- timeout=700,
- )
- assert model == mock_instance
-
- # Verify that temperature and seed are not in the parameters
- args, kwargs = mock_chat_openai.call_args
- assert "temperature" not in kwargs
- assert "seed" not in kwargs
-
- @patch("lfx.components.openai.openai_chat_model.ChatOpenAI")
- async def test_build_model_with_json_mode(self, mock_chat_openai, component_class, default_kwargs):
- mock_instance = MagicMock()
- mock_bound_instance = MagicMock()
- mock_instance.bind.return_value = mock_bound_instance
- mock_chat_openai.return_value = mock_instance
-
- default_kwargs["json_mode"] = True
- component = component_class(**default_kwargs)
- model = component.build_model()
-
- mock_chat_openai.assert_called_once()
- mock_instance.bind.assert_called_once_with(response_format={"type": "json_object"})
- assert model == mock_bound_instance
-
- @patch("lfx.components.openai.openai_chat_model.ChatOpenAI")
- async def test_build_model_no_api_key(self, mock_chat_openai, component_class, default_kwargs):
- mock_instance = MagicMock()
- mock_chat_openai.return_value = mock_instance
- default_kwargs["api_key"] = None
- component = component_class(**default_kwargs)
- component.build_model()
-
- # When api_key is None, it should be passed as None to ChatOpenAI
- args, kwargs = mock_chat_openai.call_args
- assert kwargs["api_key"] is None
-
- @patch("lfx.components.openai.openai_chat_model.ChatOpenAI")
- async def test_build_model_max_tokens_zero(self, mock_chat_openai, component_class, default_kwargs):
- mock_instance = MagicMock()
- mock_chat_openai.return_value = mock_instance
- default_kwargs["max_tokens"] = 0
- component = component_class(**default_kwargs)
- component.build_model()
-
- # When max_tokens is 0, it should be passed as None to ChatOpenAI
- args, kwargs = mock_chat_openai.call_args
- assert kwargs["max_tokens"] is None
-
- async def test_get_exception_message_bad_request_error(self, component_class, default_kwargs):
- component_class(**default_kwargs)
-
- # Create a mock BadRequestError with a body attribute
- mock_error = MagicMock()
- mock_error.body = {"message": "test error message"}
-
- # Test the method directly by patching the import
- with patch("openai.BadRequestError", mock_error.__class__):
- # Manually call isinstance to avoid mocking it
- if hasattr(mock_error, "body"):
- message = mock_error.body.get("message")
- assert message == "test error message"
-
- async def test_get_exception_message_no_openai_import(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
-
- # Test when openai module is not available
- with patch.dict("sys.modules", {"openai": None}), patch("builtins.__import__", side_effect=ImportError):
- message = component._get_exception_message(Exception("test"))
- assert message is None
-
- async def test_get_exception_message_other_exception(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
-
- # Create a regular exception (not BadRequestError)
- regular_exception = ValueError("test error")
-
- # Create a simple mock for BadRequestError that the exception won't match
- class MockBadRequestError:
- pass
-
- with patch("openai.BadRequestError", MockBadRequestError):
- message = component._get_exception_message(regular_exception)
- assert message is None
-
- async def test_update_build_config_reasoning_model(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- build_config = {
- "temperature": {"show": True},
- "seed": {"show": True},
- }
-
- # Test with reasoning model
- updated_config = component.update_build_config(build_config, "o1", "model_name")
- assert updated_config["temperature"]["show"] is False
- assert updated_config["seed"]["show"] is False
-
- # Test with regular model
- updated_config = component.update_build_config(build_config, "gpt-4", "model_name")
- assert updated_config["temperature"]["show"] is True
- assert updated_config["seed"]["show"] is True
-
- def test_build_model_integration(self):
- component = OpenAIModelComponent()
- component.api_key = os.getenv("OPENAI_API_KEY")
- component.model_name = "gpt-4.1-nano"
- component.temperature = 0.2
- component.max_tokens = 1000
- component.seed = 42
- component.max_retries = 3
- component.timeout = 600
- component.openai_api_base = "https://api.openai.com/v1"
-
- model = component.build_model()
- assert isinstance(model, ChatOpenAI)
- assert model.model_name == "gpt-4.1-nano"
- assert model.openai_api_base == "https://api.openai.com/v1"
-
- @pytest.mark.skipif(os.getenv("OPENAI_API_KEY") is None, reason="OPENAI_API_KEY is not set")
- def test_build_model_integration_reasoning(self):
- component = OpenAIModelComponent()
- component.api_key = os.getenv("OPENAI_API_KEY")
- component.model_name = "o1"
- component.temperature = 0.2 # This should be ignored for reasoning models
- component.max_tokens = 1000
- component.seed = 42 # This should be ignored for reasoning models
- component.max_retries = 3
- component.timeout = 600
- component.openai_api_base = "https://api.openai.com/v1"
-
- model = component.build_model()
- assert isinstance(model, ChatOpenAI)
- assert model.model_name == "o1"
- assert model.openai_api_base == "https://api.openai.com/v1"
diff --git a/src/backend/tests/unit/components/languagemodels/test_xai.py b/src/backend/tests/unit/components/languagemodels/test_xai.py
deleted file mode 100644
index 100d91ecd62f..000000000000
--- a/src/backend/tests/unit/components/languagemodels/test_xai.py
+++ /dev/null
@@ -1,198 +0,0 @@
-from unittest.mock import MagicMock, patch
-
-import pytest
-
-from lfx.components.xai.xai import XAIModelComponent
-from lfx.custom.custom_component.component import Component
-from lfx.custom.utils import build_custom_component_template
-from lfx.inputs.inputs import (
- BoolInput,
- DictInput,
- DropdownInput,
- IntInput,
- MessageTextInput,
- SecretStrInput,
- SliderInput,
-)
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestXAIComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- return XAIModelComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "temperature": 0.1,
- "max_tokens": 50,
- "api_key": "dummy-key",
- "model_name": "grok-2-latest",
- "model_kwargs": {},
- "base_url": "https://api.x.ai/v1",
- "seed": 42,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- return []
-
- def test_initialization(self, component_class):
- component = component_class()
- assert component.display_name == "xAI"
- assert component.description == "Generates text using xAI models like Grok."
- assert component.icon == "xAI"
- assert component.name == "xAIModel"
-
- def test_template(self, default_kwargs):
- component = XAIModelComponent(**default_kwargs)
- comp = Component(_code=component._code)
- frontend_node, _ = build_custom_component_template(comp)
- assert isinstance(frontend_node, dict)
- assert "template" in frontend_node
- input_names = [inp["name"] for inp in frontend_node["template"].values() if isinstance(inp, dict)]
- expected_inputs = [
- "max_tokens",
- "model_kwargs",
- "json_mode",
- "model_name",
- "base_url",
- "api_key",
- "temperature",
- "seed",
- ]
- for input_name in expected_inputs:
- assert input_name in input_names
-
- def test_inputs(self):
- component = XAIModelComponent()
- inputs = component.inputs
- expected_inputs = {
- "max_tokens": IntInput,
- "model_kwargs": DictInput,
- "json_mode": BoolInput,
- "model_name": DropdownInput,
- "base_url": MessageTextInput,
- "api_key": SecretStrInput,
- "temperature": SliderInput,
- "seed": IntInput,
- }
- for name, input_type in expected_inputs.items():
- matching_inputs = [inp for inp in inputs if isinstance(inp, input_type) and inp.name == name]
- assert matching_inputs, f"Missing or incorrect input: {name}"
- if name == "model_name":
- input_field = matching_inputs[0]
- assert input_field.value == "grok-2-latest"
- assert input_field.refresh_button is True
- elif name == "temperature":
- input_field = matching_inputs[0]
- assert input_field.value == 0.1
- assert input_field.range_spec.min == 0
- assert input_field.range_spec.max == 2
-
- def test_build_model(self, component_class, default_kwargs, mocker):
- component = component_class(**default_kwargs)
- component.temperature = 0.7
- component.max_tokens = 100
- component.api_key = "test-key"
- component.model_name = "grok-2-latest"
- component.model_kwargs = {}
- component.base_url = "https://api.x.ai/v1"
- component.seed = 1
-
- mock_chat_openai = mocker.patch("lfx.components.xai.xai.ChatOpenAI", return_value=MagicMock())
- model = component.build_model()
- mock_chat_openai.assert_called_once_with(
- max_tokens=100,
- model_kwargs={},
- model="grok-2-latest",
- base_url="https://api.x.ai/v1",
- api_key="test-key",
- temperature=0.7,
- seed=1,
- )
- assert model == mock_chat_openai.return_value
-
- def test_get_models(self):
- component = XAIModelComponent()
- with patch("requests.get") as mock_get:
- mock_response = MagicMock()
- mock_response.json.return_value = {
- "models": [
- {"id": "grok-2-latest", "aliases": ["grok-2"]},
- {"id": "grok-1", "aliases": []},
- ]
- }
- mock_get.return_value = mock_response
-
- component.api_key = "test-key"
- models = component.get_models()
- assert sorted(models) == ["grok-1", "grok-2", "grok-2-latest"]
- mock_get.assert_called_once_with(
- "https://api.x.ai/v1/language-models",
- headers={
- "Authorization": "Bearer test-key",
- "Accept": "application/json",
- },
- timeout=10,
- )
-
- def test_get_models_no_api_key(self):
- component = XAIModelComponent(api_key=None)
- models = component.get_models()
- assert models == ["grok-2-latest"]
-
- def test_build_model_error(self, component_class, mocker):
- from openai import BadRequestError
-
- component = component_class()
- component.api_key = "invalid-key"
- component.model_name = "grok-2-latest"
- component.temperature = 0.7
- component.max_tokens = 100
- component.model_kwargs = {}
- component.base_url = "https://api.x.ai/v1"
- component.seed = 1
-
- mocker.patch(
- "lfx.components.xai.xai.ChatOpenAI",
- side_effect=BadRequestError(
- message="Invalid API key",
- response=MagicMock(),
- body={"message": "Invalid API key provided"},
- ),
- )
- with pytest.raises(BadRequestError) as exc_info:
- component.build_model()
- assert exc_info.value.body["message"] == "Invalid API key provided"
-
- def test_json_mode(self, component_class, mocker):
- component = component_class()
- component.api_key = "test-key"
- component.json_mode = True
- component.temperature = 0.7
- component.max_tokens = 100
- component.model_name = "grok-2-latest"
- component.model_kwargs = {}
- component.base_url = "https://api.x.ai/v1"
- component.seed = 1
-
- mock_instance = MagicMock()
- mock_bound_instance = MagicMock()
- mock_instance.bind.return_value = mock_bound_instance
- mocker.patch("lfx.components.xai.xai.ChatOpenAI", return_value=mock_instance)
-
- model = component.build_model()
- mock_instance.bind.assert_called_once_with(response_format={"type": "json_object"})
- assert model == mock_bound_instance
-
- def test_update_build_config(self):
- component = XAIModelComponent()
- build_config = {"model_name": {"options": []}}
-
- updated_config = component.update_build_config(build_config, "test-key", "api_key")
- assert "model_name" in updated_config
-
- updated_config = component.update_build_config(build_config, "grok-2-latest", "model_name")
- assert "model_name" in updated_config
diff --git a/src/backend/tests/unit/components/logic/test_loop.py b/src/backend/tests/unit/components/logic/test_loop.py
deleted file mode 100644
index 6afd78564dca..000000000000
--- a/src/backend/tests/unit/components/logic/test_loop.py
+++ /dev/null
@@ -1,250 +0,0 @@
-import json
-import os
-from uuid import UUID
-
-import orjson
-import pytest
-from httpx import AsyncClient
-from langflow.memory import aget_messages
-from langflow.services.database.models.flow import FlowCreate
-
-from lfx.components.data.url import URLComponent
-from lfx.components.input_output import ChatOutput
-from lfx.components.logic import LoopComponent
-from lfx.components.openai.openai_chat_model import OpenAIModelComponent
-from lfx.components.processing import (
- ParserComponent,
- PromptComponent,
- SplitTextComponent,
- StructuredOutputComponent,
-)
-from lfx.graph import Graph
-from lfx.schema.data import Data
-from tests.base import ComponentTestBaseWithClient
-from tests.unit.build_utils import build_flow, get_build_events
-
-TEXT = (
- "lorem ipsum dolor sit amet lorem ipsum dolor sit amet lorem ipsum dolor sit amet. "
- "lorem ipsum dolor sit amet lorem ipsum dolor sit amet lorem ipsum dolor sit amet. "
- "lorem ipsum dolor sit amet lorem ipsum dolor sit amet lorem ipsum dolor sit amet."
-)
-
-
-class TestLoopComponentWithAPI(ComponentTestBaseWithClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return LoopComponent
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "data": [[Data(text="Hello World")]],
- "loop_input": [Data(text=TEXT)],
- }
-
- def test_latest_version(self, component_class, default_kwargs) -> None:
- """Test that the component works with the latest version."""
- result = component_class(**default_kwargs)
- assert result is not None, "Component returned None for the latest version."
-
- async def _create_flow(self, client, json_loop_test, logged_in_headers):
- vector_store = orjson.loads(json_loop_test)
- data = vector_store["data"]
- vector_store = FlowCreate(name="Flow", description="description", data=data, endpoint_name="f")
- response = await client.post("api/v1/flows/", json=vector_store.model_dump(), headers=logged_in_headers)
- response.raise_for_status()
- return response.json()["id"]
-
- async def check_messages(self, flow_id):
- messages = await aget_messages(flow_id=UUID(flow_id), order="ASC")
- assert len(messages) == 1
- assert messages[0].session_id == flow_id
- assert messages[0].sender == "Machine"
- assert messages[0].sender_name == "AI"
- assert len(messages[0].text) > 0
- return messages
-
- async def test_build_flow_loop(self, client, json_loop_test, logged_in_headers):
- """Test building a flow with a loop component."""
- # Create the flow
- flow_id = await self._create_flow(client, json_loop_test, logged_in_headers)
-
- # Start the build and get job_id
- build_response = await build_flow(client, flow_id, logged_in_headers)
- job_id = build_response["job_id"]
- assert job_id is not None
-
- # Get the events stream
- events_response = await get_build_events(client, job_id, logged_in_headers)
- assert events_response.status_code == 200
-
- # Process the events stream
- chat_output = None
- lines = []
- async for line in events_response.aiter_lines():
- if not line: # Skip empty lines
- continue
- lines.append(line)
- if "ChatOutput" in line:
- chat_output = json.loads(line)
- # Process events if needed
- # We could add specific assertions here for loop-related events
- assert chat_output is not None
- messages = await self.check_messages(flow_id)
- ai_message = messages[0].text
- json_data = orjson.loads(ai_message)
-
- # Use a for loop for better debugging
- found = []
- json_data = [(data["text"], q_dict) for data, q_dict in json_data]
- for text, q_dict in json_data:
- expected_text = f"==> {q_dict['q']}"
- assert expected_text in text, (
- f"Found {found} until now, but expected '{expected_text}' not found in '{text}',"
- f"and the json_data is {json_data}"
- )
- found.append(expected_text)
-
- async def test_run_flow_loop(self, client: AsyncClient, created_api_key, json_loop_test, logged_in_headers):
- flow_id = await self._create_flow(client, json_loop_test, logged_in_headers)
- headers = {"x-api-key": created_api_key.api_key}
- payload = {
- "input_value": TEXT,
- "input_type": "chat",
- "session_id": f"{flow_id}run",
- "output_type": "chat",
- "tweaks": {},
- }
- response = await client.post(f"/api/v1/run/{flow_id}", json=payload, headers=headers)
- data = response.json()
- assert "outputs" in data
- assert "session_id" in data
- assert len(data["outputs"][-1]["outputs"]) > 0
-
-
-@pytest.mark.skipif(os.getenv("OPENAI_API_KEY") in {None, "dummy"}, reason="OPENAI_API_KEY is not set")
-def loop_flow():
- """Complete loop flow that processes multiple URLs through a loop."""
- # Create URL component to fetch content from multiple sources
- url_component = URLComponent()
- url_component.set(urls=["https://docs.langflow.org/"])
-
- # Create SplitText component to chunk the content
- split_text_component = SplitTextComponent()
- split_text_component.set(
- data_inputs=url_component.fetch_content,
- chunk_size=1000,
- chunk_overlap=200,
- separator="\n\n",
- )
-
- # Create Loop component to iterate through the chunks
- loop_component = LoopComponent()
- loop_component.set(data=split_text_component.split_text)
-
- # Create Parser component to format the current loop item
- parser_component = ParserComponent()
- parser_component.set(
- input_data=loop_component.item_output,
- pattern="Content: {text}",
- sep="\n",
- )
-
- # Create Prompt component to create processing instructions
- prompt_component = PromptComponent()
- prompt_component.set(
- template="Analyze and summarize this content: {context}",
- input_text=parser_component.parse_combined_text,
- )
-
- # Create OpenAI model component for processing
- openai_component = OpenAIModelComponent()
- openai_component.set(
- api_key=os.getenv("OPENAI_API_KEY"),
- model_name="gpt-4.1-mini",
- temperature=0.7,
- )
-
- # Create StructuredOutput component to process content
- structured_output = StructuredOutputComponent()
- structured_output.set(
- llm=openai_component.build_model,
- input_value=prompt_component.build_prompt,
- schema_name="ProcessedContent",
- system_prompt=( # Added missing system_prompt - this was causing the "Multiple structured outputs" error
- "You are an AI that extracts one structured JSON object from unstructured text. "
- "Use a predefined schema with expected types (str, int, float, bool, dict). "
- "If multiple structures exist, extract only the first most complete one. "
- "Fill missing or ambiguous values with defaults: null for missing values. "
- "Ignore duplicates and partial repeats. "
- "Always return one valid JSON, never throw errors or return multiple objects."
- "Output: A single well-formed JSON object, and nothing else."
- ),
- output_schema=[ # Fixed schema types to match expected format
- {"name": "summary", "type": "str", "description": "Key summary of the content", "multiple": False},
- {"name": "topics", "type": "list", "description": "Main topics covered", "multiple": False},
- {"name": "source_url", "type": "str", "description": "Source URL of the content", "multiple": False},
- ],
- )
-
- # Connect the feedback loop - StructuredOutput back to Loop item input
- # Note: 'item' is a special dynamic input for LoopComponent feedback loops
- loop_component.set(item=structured_output.build_structured_output)
- # Create ChatOutput component to display final results
- chat_output = ChatOutput()
- chat_output.set(input_value=loop_component.done_output)
-
- return Graph(start=url_component, end=chat_output)
-
-
-@pytest.mark.xfail
-async def test_loop_flow():
- """Test that loop_flow creates a working graph with proper loop feedback connection."""
- flow = loop_flow()
- assert flow is not None
- assert flow._start is not None
- assert flow._end is not None
-
- # Verify all expected components are present
- expected_vertices = {
- "URLComponent",
- "SplitTextComponent",
- "LoopComponent",
- "ParserComponent",
- "PromptComponent",
- "OpenAIModelComponent",
- "StructuredOutputComponent",
- "ChatOutput",
- }
-
- assert all(vertex.id.split("-")[0] in expected_vertices for vertex in flow.vertices)
-
- expected_execution_order = [
- "OpenAIModelComponent",
- "URLComponent",
- "SplitTextComponent",
- "LoopComponent",
- "ParserComponent",
- "PromptComponent",
- "StructuredOutputComponent",
- "LoopComponent",
- "ParserComponent",
- "PromptComponent",
- "StructuredOutputComponent",
- "LoopComponent",
- "ParserComponent",
- "PromptComponent",
- "StructuredOutputComponent",
- "LoopComponent",
- "ChatOutput",
- ]
- results = [result async for result in flow.async_start()]
- result_order = [result.vertex.id.split("-")[0] for result in results if hasattr(result, "vertex")]
- assert result_order == expected_execution_order
diff --git a/src/backend/tests/unit/components/models/test_embedding_model_component.py b/src/backend/tests/unit/components/models/test_embedding_model_component.py
deleted file mode 100644
index 48ee2f6124b7..000000000000
--- a/src/backend/tests/unit/components/models/test_embedding_model_component.py
+++ /dev/null
@@ -1,89 +0,0 @@
-from unittest.mock import MagicMock, patch
-
-import pytest
-
-from lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES
-from lfx.components.models.embedding_model import EmbeddingModelComponent
-from tests.base import ComponentTestBaseWithClient
-
-
-@pytest.mark.usefixtures("client")
-class TestEmbeddingModelComponent(ComponentTestBaseWithClient):
- @pytest.fixture
- def component_class(self):
- return EmbeddingModelComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "provider": "OpenAI",
- "model": "text-embedding-3-small",
- "api_key": "test-api-key",
- "chunk_size": 1000,
- "max_retries": 3,
- "show_progress_bar": False,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return the file names mapping for version-specific files."""
-
- async def test_update_build_config_openai(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- build_config = {
- "model": {"options": [], "value": ""},
- "api_key": {"display_name": "API Key"},
- "api_base": {"display_name": "API Base URL"},
- }
- updated_config = component.update_build_config(build_config, "OpenAI", "provider")
- assert updated_config["model"]["options"] == OPENAI_EMBEDDING_MODEL_NAMES
- assert updated_config["model"]["value"] == OPENAI_EMBEDDING_MODEL_NAMES[0]
- assert updated_config["api_key"]["display_name"] == "OpenAI API Key"
- assert updated_config["api_base"]["display_name"] == "OpenAI API Base URL"
-
- @patch("lfx.components.models.embedding_model.OpenAIEmbeddings")
- async def test_build_embeddings_openai(self, mock_openai_embeddings, component_class, default_kwargs):
- # Setup mock
- mock_instance = MagicMock()
- mock_openai_embeddings.return_value = mock_instance
-
- # Create and configure the component
- component = component_class(**default_kwargs)
- component.provider = "OpenAI"
- component.model = "text-embedding-3-small"
- component.api_key = "test-key"
- component.chunk_size = 1000
- component.max_retries = 3
- component.show_progress_bar = False
-
- # Build the embeddings
- embeddings = component.build_embeddings()
-
- # Verify the OpenAIEmbeddings was called with the correct parameters
- mock_openai_embeddings.assert_called_once_with(
- model="text-embedding-3-small",
- dimensions=None,
- base_url=None,
- api_key="test-key",
- chunk_size=1000,
- max_retries=3,
- timeout=None,
- show_progress_bar=False,
- model_kwargs={},
- )
- assert embeddings == mock_instance
-
- async def test_build_embeddings_openai_missing_api_key(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- component.provider = "OpenAI"
- component.api_key = None
-
- with pytest.raises(ValueError, match="OpenAI API key is required when using OpenAI provider"):
- component.build_embeddings()
-
- async def test_build_embeddings_unknown_provider(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- component.provider = "Unknown"
-
- with pytest.raises(ValueError, match="Unknown provider: Unknown"):
- component.build_embeddings()
diff --git a/src/backend/tests/unit/components/models/test_language_model_component.py b/src/backend/tests/unit/components/models/test_language_model_component.py
deleted file mode 100644
index 5d87c5f9e108..000000000000
--- a/src/backend/tests/unit/components/models/test_language_model_component.py
+++ /dev/null
@@ -1,219 +0,0 @@
-import os
-
-import pytest
-from langchain_anthropic import ChatAnthropic
-from langchain_google_genai import ChatGoogleGenerativeAI
-from langchain_openai import ChatOpenAI
-
-from lfx.base.models.anthropic_constants import ANTHROPIC_MODELS
-from lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS
-from lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES
-from lfx.components.models.language_model import LanguageModelComponent
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestLanguageModelComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- return LanguageModelComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "provider": "OpenAI",
- "model_name": "gpt-3.5-turbo",
- "api_key": "test-api-key",
- "temperature": 0.1,
- "system_message": "You are a helpful assistant.",
- "input_value": "Hello, how are you?",
- "stream": False,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return the file names mapping for version-specific files."""
- # No version-specific files for this component
- return []
-
- @pytest.fixture
- def openai_api_key(self):
- """Fixture to get OpenAI API key from environment variable."""
- api_key = os.environ.get("OPENAI_API_KEY")
- if not api_key:
- pytest.skip("OPENAI_API_KEY environment variable not set")
- return api_key
-
- @pytest.fixture
- def anthropic_api_key(self):
- """Fixture to get Anthropic API key from environment variable."""
- api_key = os.environ.get("ANTHROPIC_API_KEY")
- if not api_key:
- pytest.skip("ANTHROPIC_API_KEY environment variable not set")
- return api_key
-
- @pytest.fixture
- def google_api_key(self):
- """Fixture to get Google API key from environment variable."""
- api_key = os.environ.get("GOOGLE_API_KEY")
- if not api_key:
- pytest.skip("GOOGLE_API_KEY environment variable not set")
- return api_key
-
- async def test_update_build_config_openai(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- build_config = {
- "model_name": {"options": [], "value": ""},
- "api_key": {"display_name": "API Key"},
- }
- updated_config = component.update_build_config(build_config, "OpenAI", "provider")
- assert updated_config["model_name"]["options"] == OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES
- assert updated_config["model_name"]["value"] == OPENAI_CHAT_MODEL_NAMES[0]
- assert updated_config["api_key"]["display_name"] == "OpenAI API Key"
-
- async def test_update_build_config_anthropic(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- build_config = {
- "model_name": {"options": [], "value": ""},
- "api_key": {"display_name": "API Key"},
- }
- updated_config = component.update_build_config(build_config, "Anthropic", "provider")
- assert updated_config["model_name"]["options"] == ANTHROPIC_MODELS
- assert updated_config["model_name"]["value"] == ANTHROPIC_MODELS[0]
- assert updated_config["api_key"]["display_name"] == "Anthropic API Key"
-
- async def test_update_build_config_google(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- build_config = {
- "model_name": {"options": [], "value": ""},
- "api_key": {"display_name": "API Key"},
- }
- updated_config = component.update_build_config(build_config, "Google", "provider")
- assert updated_config["model_name"]["options"] == GOOGLE_GENERATIVE_AI_MODELS
- assert updated_config["model_name"]["value"] == GOOGLE_GENERATIVE_AI_MODELS[0]
- assert updated_config["api_key"]["display_name"] == "Google API Key"
-
- async def test_openai_model_creation(self, component_class, default_kwargs):
- """Test that the component returns an instance of ChatOpenAI for OpenAI provider."""
- component = component_class(**default_kwargs)
- component.provider = "OpenAI"
- component.model_name = "gpt-3.5-turbo"
- component.api_key = "sk-test-key" # Use a fake but correctly formatted key
- component.temperature = 0.5
- component.stream = False
-
- # The API key will be invalid, but we should still get a ChatOpenAI instance
- model = component.build_model()
- assert isinstance(model, ChatOpenAI)
- assert model.model_name == "gpt-3.5-turbo"
- assert model.temperature == 0.5
- assert model.streaming is False
- # API key is stored as a SecretStr object, so we can't directly compare values
-
- async def test_anthropic_model_creation(self, component_class, default_kwargs):
- """Test that the component returns an instance of ChatAnthropic for Anthropic provider."""
- component = component_class(**default_kwargs)
- component.provider = "Anthropic"
- component.model_name = ANTHROPIC_MODELS[0]
- component.api_key = "sk-ant-test-key" # Use a fake but plausible key
- component.temperature = 0.7
- component.stream = False
-
- # The API key will be invalid, but we should still get a ChatAnthropic instance
- model = component.build_model()
- assert isinstance(model, ChatAnthropic)
- assert model.model == ANTHROPIC_MODELS[0]
- assert model.temperature == 0.7
- assert model.streaming is False
- # API key is stored as a SecretStr object, so we can't directly compare values
-
- async def test_google_model_creation(self, component_class, default_kwargs):
- """Test that the component returns an instance of ChatGoogleGenerativeAI for Google provider."""
- component = component_class(**default_kwargs)
- component.provider = "Google"
- component.model_name = GOOGLE_GENERATIVE_AI_MODELS[0]
- component.api_key = "google-test-key" # Use a fake but plausible key
- component.temperature = 0.7
- component.stream = False
-
- # The API key will be invalid, but we should still get a ChatGoogleGenerativeAI instance
- model = component.build_model()
- assert isinstance(model, ChatGoogleGenerativeAI)
- # Google model automatically prepends "models/" to the model name
- assert model.model == f"models/{GOOGLE_GENERATIVE_AI_MODELS[0]}"
- assert model.temperature == 0.7
- # Google model uses 'stream' instead of 'streaming'
- # Skip this check for Google model since it has a different interface
- # API key is stored as a SecretStr object, so we can't directly compare values
-
- async def test_build_model_openai_missing_api_key(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- component.provider = "OpenAI"
- component.api_key = None
-
- with pytest.raises(ValueError, match="OpenAI API key is required when using OpenAI provider"):
- component.build_model()
-
- async def test_build_model_anthropic_missing_api_key(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- component.provider = "Anthropic"
- component.api_key = None
-
- with pytest.raises(ValueError, match="Anthropic API key is required when using Anthropic provider"):
- component.build_model()
-
- async def test_build_model_google_missing_api_key(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- component.provider = "Google"
- component.api_key = None
-
- with pytest.raises(ValueError, match="Google API key is required when using Google provider"):
- component.build_model()
-
- async def test_build_model_unknown_provider(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- component.provider = "Unknown"
-
- with pytest.raises(ValueError, match="Unknown provider: Unknown"):
- component.build_model()
-
- async def test_openai_live_api(self, component_class, default_kwargs, openai_api_key):
- """Test that the component can create a model with a real API key."""
- component = component_class(**default_kwargs)
- component.provider = "OpenAI"
- component.model_name = "gpt-3.5-turbo"
- component.api_key = openai_api_key
- component.temperature = 0.1
- component.stream = False
-
- model = component.build_model()
- assert isinstance(model, ChatOpenAI)
- # We could attempt a simple call here, but that would increase test time
- # and might fail due to network issues, so we'll just verify the instance
-
- async def test_anthropic_live_api(self, component_class, default_kwargs, anthropic_api_key):
- """Test that the component can create a model with a real API key."""
- component = component_class(**default_kwargs)
- component.provider = "Anthropic"
- component.model_name = ANTHROPIC_MODELS[0]
- component.api_key = anthropic_api_key
- component.temperature = 0.1
- component.stream = False
-
- model = component.build_model()
- assert isinstance(model, ChatAnthropic)
- # We could attempt a simple call here, but that would increase test time
- # and might fail due to network issues, so we'll just verify the instance
-
- async def test_google_live_api(self, component_class, default_kwargs, google_api_key):
- """Test that the component can create a model with a real API key."""
- component = component_class(**default_kwargs)
- component.provider = "Google"
- component.model_name = GOOGLE_GENERATIVE_AI_MODELS[0]
- component.api_key = google_api_key
- component.temperature = 0.1
- component.stream = False
-
- model = component.build_model()
- assert isinstance(model, ChatGoogleGenerativeAI)
- # We could attempt a simple call here, but that would increase test time
- # and might fail due to network issues, so we'll just verify the instance
diff --git a/src/backend/tests/unit/components/outputs/test_chat_output_component.py b/src/backend/tests/unit/components/outputs/test_chat_output_component.py
deleted file mode 100644
index 311514d66e6a..000000000000
--- a/src/backend/tests/unit/components/outputs/test_chat_output_component.py
+++ /dev/null
@@ -1,98 +0,0 @@
-import pytest
-
-from lfx.components.input_output import ChatOutput
-from lfx.schema.data import Data
-from lfx.schema.dataframe import DataFrame
-from lfx.schema.message import Message
-from lfx.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI
-from tests.base import ComponentTestBaseWithClient
-
-
-@pytest.mark.usefixtures("client")
-class TestChatOutput(ComponentTestBaseWithClient):
- @pytest.fixture
- def component_class(self):
- return ChatOutput
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "input_value": "Hello, how are you?",
- "should_store_message": True,
- "sender": MESSAGE_SENDER_AI,
- "sender_name": MESSAGE_SENDER_NAME_AI,
- "session_id": "test_session_123",
- "data_template": "{text}",
- "background_color": "#f0f0f0",
- "chat_icon": "🤖",
- "text_color": "#000000",
- "clean_data": True,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- return [
- {"version": "1.0.19", "module": "outputs", "file_name": "ChatOutput"},
- {"version": "1.1.0", "module": "outputs", "file_name": "chat"},
- {"version": "1.1.1", "module": "outputs", "file_name": "chat"},
- ]
-
- async def test_process_string_input(self, component_class, default_kwargs):
- """Test processing a simple string input."""
- component = component_class(**default_kwargs)
- input_text = "Hello, this is a test message"
- component.input_value = input_text
- result = await component.message_response()
- assert result.text == input_text
- assert result.sender == MESSAGE_SENDER_AI
- assert result.sender_name == MESSAGE_SENDER_NAME_AI
-
- async def test_process_data_input(self, component_class, default_kwargs):
- """Test processing a Data object input."""
- component = component_class(**default_kwargs)
- data = Data(text="Test data message")
- component.input_value = data
- result = await component.message_response()
- assert result.text == '```json\n{\n "text": "Test data message"\n}\n```'
- assert result.sender == MESSAGE_SENDER_AI
-
- async def test_process_dataframe_input(self, component_class, default_kwargs):
- """Test processing a DataFrame input."""
- component = component_class(**default_kwargs)
- sample_df = DataFrame(data={"col1": ["A", "B"], "col2": [1, 2]})
- component.input_value = sample_df
- result = await component.message_response()
- assert "col1" in result.text
- assert "col2" in result.text
- assert "A" in result.text
- assert "B" in result.text
-
- async def test_process_message_input(self, component_class, default_kwargs):
- """Test processing a Message object input."""
- component = component_class(**default_kwargs)
- message = Message(text="Test message content")
- component.input_value = message
- result = await component.message_response()
- assert result.text == "Test message content"
- assert result.sender == MESSAGE_SENDER_AI
-
- async def test_process_list_input(self, component_class, default_kwargs):
- """Test processing a list of inputs."""
- component = component_class(**default_kwargs)
- input_list = ["First message", Data(text="Second message"), Message(text="Third message")]
- component.input_value = input_list
- result = await component.message_response()
- assert "First message" in result.text
- assert "Second message" in result.text
- assert "Third message" in result.text
-
- async def test_invalid_input(self, component_class, default_kwargs):
- """Test handling of invalid input."""
- component = component_class(**default_kwargs)
- component.input_value = None
- with pytest.raises(ValueError, match="Input data cannot be None"):
- await component.message_response()
-
- component.input_value = 123 # Invalid type
- with pytest.raises(TypeError, match="Expected Data or DataFrame or Message or str, Generator or None"):
- await component.message_response()
diff --git a/src/backend/tests/unit/components/outputs/test_output_components.py b/src/backend/tests/unit/components/outputs/test_output_components.py
deleted file mode 100644
index 63b5928d7070..000000000000
--- a/src/backend/tests/unit/components/outputs/test_output_components.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import pytest
-
-from lfx.components.input_output import TextOutputComponent
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestTextOutputComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- return TextOutputComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "input_value": "Hello, world!",
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- return [
- {"version": "1.0.19", "module": "outputs", "file_name": "TextOutput"},
- {"version": "1.1.0", "module": "outputs", "file_name": "text"},
- {"version": "1.1.1", "module": "outputs", "file_name": "text"},
- ]
diff --git a/src/backend/tests/unit/components/processing/test_batch_run_component.py b/src/backend/tests/unit/components/processing/test_batch_run_component.py
deleted file mode 100644
index f61e40739550..000000000000
--- a/src/backend/tests/unit/components/processing/test_batch_run_component.py
+++ /dev/null
@@ -1,258 +0,0 @@
-import re
-
-import pytest
-
-from lfx.components.processing.batch_run import BatchRunComponent
-from lfx.schema import DataFrame
-from tests.base import ComponentTestBaseWithoutClient
-from tests.unit.mock_language_model import MockLanguageModel
-
-
-class TestBatchRunComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return BatchRunComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "model": MockLanguageModel(),
- "df": DataFrame({"text": ["Hello"]}),
- "column_name": "text",
- "enable_metadata": True,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- async def test_successful_batch_run_with_system_message(self):
- # Create test data
- test_df = DataFrame({"text": ["Hello", "World", "Test"]})
-
- component = BatchRunComponent(
- model=MockLanguageModel(),
- system_message="You are a helpful assistant",
- df=test_df,
- column_name="text",
- enable_metadata=True,
- )
-
- # Run the batch process
- result = await component.run_batch()
-
- # Verify the results
- assert isinstance(result, DataFrame)
- assert "text" in result.columns
- assert "model_response" in result.columns
- assert "metadata" in result.columns
- assert len(result) == 3
- assert all(isinstance(resp, str) for resp in result["model_response"])
- # Convert DataFrame to list of dicts for easier testing
- result_dicts = result.to_dict("records")
- # Verify metadata
- assert all(row["metadata"]["has_system_message"] for row in result_dicts)
- assert all(row["metadata"]["processing_status"] == "success" for row in result_dicts)
-
- async def test_batch_run_without_metadata(self):
- test_df = DataFrame({"text": ["Hello", "World"]})
-
- component = BatchRunComponent(
- model=MockLanguageModel(),
- df=test_df,
- column_name="text",
- enable_metadata=False,
- )
-
- result = await component.run_batch()
-
- assert isinstance(result, DataFrame)
- assert len(result) == 2
- assert "metadata" not in result.columns
- assert all(isinstance(resp, str) for resp in result["model_response"])
-
- async def test_batch_run_error_with_metadata(self):
- component = BatchRunComponent(
- model=MockLanguageModel(),
- df="not_a_dataframe", # This will cause a TypeError
- column_name="text",
- enable_metadata=True,
- )
-
- with pytest.raises(TypeError, match=re.escape("Expected DataFrame input, got ")):
- await component.run_batch()
-
- async def test_batch_run_error_without_metadata(self):
- component = BatchRunComponent(
- model=MockLanguageModel(),
- df="not_a_dataframe", # This will cause a TypeError
- column_name="text",
- enable_metadata=False,
- )
-
- with pytest.raises(TypeError, match=re.escape("Expected DataFrame input, got ")):
- await component.run_batch()
-
- async def test_operational_error_with_metadata(self):
- # Create a mock model that raises an AttributeError during processing
- class ErrorModel:
- def with_config(self, *_, **__):
- return self
-
- async def abatch(self, *_):
- msg = "Mock error during batch processing"
- raise AttributeError(msg)
-
- component = BatchRunComponent(
- model=ErrorModel(),
- df=DataFrame({"text": ["test1", "test2"]}),
- column_name="text",
- enable_metadata=True,
- )
-
- result = await component.run_batch()
- assert isinstance(result, DataFrame)
- assert len(result) == 1 # Component returns a single error row
- error_row = result.iloc[0]
- # Verify error metadata
- assert error_row["metadata"]["processing_status"] == "failed"
- assert "Mock error during batch processing" in error_row["metadata"]["error"]
- # Verify base row structure
- assert error_row["text"] == ""
- assert error_row["model_response"] == ""
- assert error_row["batch_index"] == -1
-
- async def test_operational_error_without_metadata(self):
- # Create a mock model that raises an AttributeError during processing
- class ErrorModel:
- def with_config(self, *_, **__):
- return self
-
- async def abatch(self, *_):
- msg = "Mock error during batch processing"
- raise AttributeError(msg)
-
- component = BatchRunComponent(
- model=ErrorModel(),
- df=DataFrame({"text": ["test1", "test2"]}),
- column_name="text",
- enable_metadata=False,
- )
-
- result = await component.run_batch()
- assert isinstance(result, DataFrame)
- assert len(result) == 1 # Component returns a single error row
- error_row = result.iloc[0]
- # Verify no metadata
- assert "metadata" not in error_row
- # Verify base row structure
- assert error_row["text"] == ""
- assert error_row["model_response"] == ""
- assert error_row["batch_index"] == -1
-
- def test_create_base_row(self):
- component = BatchRunComponent()
- row = component._create_base_row(
- original_row={"text_input": "test_input"},
- model_response="test_response",
- batch_index=1,
- )
- assert row["text_input"] == "test_input"
- assert row["model_response"] == "test_response"
- assert row["batch_index"] == 1
-
- def test_add_metadata_success(self):
- component = BatchRunComponent(enable_metadata=True)
-
- # Passa text_input dentro do dicionário original_row
- original_row = {"text_input": "test_input"}
- row = component._create_base_row(
- original_row=original_row,
- model_response="test_response",
- batch_index=1,
- )
-
- component._add_metadata(row, success=True, system_msg="Instructions here")
-
- assert "metadata" in row
- assert row["metadata"]["has_system_message"] is True
- assert row["metadata"]["input_length"] == len("test_input")
- assert row["metadata"]["response_length"] == len("test_response")
- assert row["metadata"]["processing_status"] == "success"
-
- def test_add_metadata_failure(self):
- component = BatchRunComponent(enable_metadata=True)
-
- # Fornecendo um original_row vazio (poderia conter outras chaves se necessário)
- row = component._create_base_row(original_row={}, model_response="", batch_index=1)
-
- # Adiciona metadata simulando falha
- component._add_metadata(row, success=False, error="Simulated error")
-
- assert "metadata" in row
- assert row["metadata"]["processing_status"] == "failed"
- assert row["metadata"]["error"] == "Simulated error"
-
- def test_metadata_disabled(self):
- component = BatchRunComponent(enable_metadata=False)
-
- # Fornece text_input dentro do dicionário original_row
- row = component._create_base_row(
- original_row={"text_input": "test"},
- model_response="response",
- batch_index=0,
- )
-
- component._add_metadata(row, success=True, system_msg="test")
-
- # Como o metadata está desabilitado, ele não deve existir
- assert "metadata" not in row
-
- async def test_invalid_column_name(self):
- component = BatchRunComponent(
- model=MockLanguageModel(),
- df=DataFrame({"text": ["Hello"]}),
- column_name="nonexistent_column",
- enable_metadata=True,
- )
-
- with pytest.raises(
- ValueError,
- match=re.escape("Column 'nonexistent_column' not found in the DataFrame. Available columns: text"),
- ):
- await component.run_batch()
-
- async def test_empty_dataframe(self):
- component = BatchRunComponent(
- model=MockLanguageModel(),
- df=DataFrame({"text": []}),
- column_name="text",
- enable_metadata=True,
- )
-
- result = await component.run_batch()
- assert isinstance(result, DataFrame)
- assert len(result) == 0
-
- async def test_non_string_column_conversion(self):
- test_df = DataFrame({"text": [123, 456, 789]}) # Numeric values
-
- component = BatchRunComponent(
- model=MockLanguageModel(),
- df=test_df,
- column_name="text",
- enable_metadata=True,
- )
-
- result = await component.run_batch()
-
- assert isinstance(result, DataFrame)
- assert all(isinstance(text, int) for text in result["text"])
- assert all(
- str(num) in response for num, response in zip(test_df["text"], result["model_response"], strict=False)
- )
- result_dicts = result.to_dict("records")
- assert all(row["metadata"]["processing_status"] == "success" for row in result_dicts)
diff --git a/src/backend/tests/unit/components/processing/test_data_operations_component.py b/src/backend/tests/unit/components/processing/test_data_operations_component.py
deleted file mode 100644
index 28c14a481655..000000000000
--- a/src/backend/tests/unit/components/processing/test_data_operations_component.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import pytest
-
-from lfx.components.processing.data_operations import DataOperationsComponent
-from lfx.schema import Data
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestDataOperationsComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return DataOperationsComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "data": Data(data={"key1": "value1", "key2": "value2", "key3": "value3"}),
- "actions": [{"name": "Select Keys"}],
- "select_keys_input": ["key1", "key2"],
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- def test_select_keys(self):
- """Test the Select Keys operation."""
- component = DataOperationsComponent(
- data=Data(data={"key1": "value1", "key2": "value2", "key3": "value3"}),
- operations=[{"name": "Select Keys"}],
- select_keys_input=["key1", "key2"],
- )
-
- result = component.as_data()
- assert isinstance(result, Data)
- assert "key1" in result.data
- assert "key2" in result.data
- assert "key3" not in result.data
- assert result.data["key1"] == "value1"
- assert result.data["key2"] == "value2"
-
- def test_remove_keys(self):
- """Test the Remove Keys operation."""
- component = DataOperationsComponent(
- data=Data(data={"key1": "value1", "key2": "value2", "key3": "value3"}),
- operations=[{"name": "Remove Keys"}],
- remove_keys_input=["key3"],
- )
-
- result = component.as_data()
- assert isinstance(result, Data)
- assert "key1" in result.data
- assert "key2" in result.data
- assert "key3" not in result.data
-
- def test_rename_keys(self):
- """Test the Rename Keys operation."""
- component = DataOperationsComponent(
- data=Data(data={"key1": "value1", "key2": "value2"}),
- operations=[{"name": "Rename Keys"}],
- rename_keys_input={"key1": "new_key1"},
- )
-
- result = component.as_data()
- assert isinstance(result, Data)
- assert "new_key1" in result.data
- assert "key1" not in result.data
- assert result.data["new_key1"] == "value1"
-
- def test_literal_eval(self):
- """Test the Literal Eval operation."""
- component = DataOperationsComponent(
- data=Data(data={"list_as_string": "[1, 2, 3]", "dict_as_string": "{'a': 1, 'b': 2}"}),
- operations=[{"name": "Literal Eval"}],
- )
-
- result = component.as_data()
- assert isinstance(result, Data)
- assert isinstance(result.data["list_as_string"], list)
- assert result.data["list_as_string"] == [1, 2, 3]
- assert isinstance(result.data["dict_as_string"], dict)
- assert result.data["dict_as_string"] == {"a": 1, "b": 2}
-
- def test_combine(self):
- """Test the Combine operation."""
- data1 = Data(data={"key1": "value1"})
- data2 = Data(data={"key2": "value2"})
-
- component = DataOperationsComponent(
- data=[data1, data2],
- operations=[{"name": "Combine"}],
- )
-
- result = component.as_data()
- assert isinstance(result, Data)
- assert "key1" in result.data
- assert "key2" in result.data
- assert result.data["key1"] == "value1"
- assert result.data["key2"] == "value2"
-
- def test_combine_with_overlapping_keys(self):
- """Test the Combine operation with overlapping keys."""
- data1 = Data(data={"common_key": "value1", "key1": "value1"})
- data2 = Data(data={"common_key": "value2", "key2": "value2"})
-
- component = DataOperationsComponent(
- data=[data1, data2],
- operations=[{"name": "Combine"}],
- )
-
- result = component.as_data()
- assert isinstance(result, Data)
- assert result.data["common_key"] == ["value1", "value2"] # Combined string values
- assert result.data["key1"] == "value1"
- assert result.data["key2"] == "value2"
-
- def test_append_update(self):
- """Test the Append or Update Data operation."""
- component = DataOperationsComponent(
- data=Data(data={"existing_key": "existing_value"}),
- operations=[{"name": "Append or Update"}],
- append_update_data={"new_key": "new_value", "existing_key": "updated_value"},
- )
-
- result = component.as_data()
- assert isinstance(result, Data)
- assert result.data["existing_key"] == "updated_value"
- assert result.data["new_key"] == "new_value"
-
- def test_filter_values(self):
- """Test the Filter Values operation."""
- nested_data = {
- "items": [
- {"id": 1, "name": "Item 1"},
- {"id": 2, "name": "Item 2"},
- {"id": 3, "name": "Different Item"},
- ]
- }
-
- component = DataOperationsComponent(
- data=Data(data=nested_data),
- operations=[{"name": "Filter Values"}],
- filter_key=["items"],
- filter_values={"name": "Item"},
- operator="contains",
- )
-
- result = component.as_data()
- assert isinstance(result, Data)
- assert len(result.data["items"]) == 3
- assert result.data["items"][0]["id"] == 1
- assert result.data["items"][1]["id"] == 2
-
- def test_no_actions(self):
- """Test behavior when no actions are specified."""
- component = DataOperationsComponent(
- data=Data(data={"key1": "value1"}),
- operations=[],
- )
-
- result = component.as_data()
- assert isinstance(result, Data)
- assert result.data == {}
-
- def test_get_normalized_data(self):
- """Test the get_normalized_data helper method."""
- component = DataOperationsComponent(
- data=Data(data={"key1": "value1"}),
- operations=[],
- )
-
- # Add data under the "data" key
- component.data = Data(data={"test": {"key2": "value2"}})
- normalized = component.get_normalized_data()
- assert normalized == {"test": {"key2": "value2"}}
-
- # Test without the "data" key
- component.data = Data(data={"key3": "value3"})
- normalized = component.get_normalized_data()
- assert normalized == {"key3": "value3"}
-
- def test_validate_single_data_with_multiple_data(self):
- """Test that operations that don't support multiple data objects raise an error."""
- component = DataOperationsComponent(
- data=[Data(data={"key1": "value1"}), Data(data={"key2": "value2"})],
- operations=[{"name": "Select Keys"}],
- select_keys_input=["key1"],
- )
-
- with pytest.raises(ValueError, match="Select Keys operation is not supported for multiple data objects"):
- component.as_data()
diff --git a/src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py b/src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py
deleted file mode 100644
index 7aa8cf570c17..000000000000
--- a/src/backend/tests/unit/components/processing/test_data_to_dataframe_component.py
+++ /dev/null
@@ -1,127 +0,0 @@
-import pytest
-
-from lfx.components.processing.data_to_dataframe import DataToDataFrameComponent
-from lfx.schema import Data, DataFrame
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestDataToDataFrameComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return DataToDataFrameComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "data_list": [
- Data(text="Row 1", data={"field1": "value1", "field2": 1}),
- Data(text="Row 2", data={"field1": "value2", "field2": 2}),
- ]
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return the file names mapping for different versions."""
- # This is a new component, so we return an empty list
- return []
-
- def test_basic_setup(self, component_class, default_kwargs):
- """Test basic component initialization."""
- component = component_class()
- component.set_attributes(default_kwargs)
- assert component.data_list == default_kwargs["data_list"]
-
- def test_build_dataframe_basic(self, component_class, default_kwargs):
- """Test basic DataFrame construction."""
- component = component_class()
- component.set_attributes(default_kwargs)
- result_df = component.build_dataframe()
-
- assert isinstance(result_df, DataFrame)
- assert len(result_df) == 2
- assert list(result_df.columns) == ["field1", "field2", "text"]
- assert result_df["text"].tolist() == ["Row 1", "Row 2"]
- assert result_df["field1"].tolist() == ["value1", "value2"]
- assert result_df["field2"].tolist() == [1, 2]
-
- def test_single_data_input(self, component_class):
- """Test handling single Data object input."""
- single_data = Data(text="Single Row", data={"field1": "value"})
- component = component_class()
- component.set_attributes({"data_list": single_data})
-
- result_df = component.build_dataframe()
-
- assert len(result_df) == 1
- assert result_df["text"].iloc[0] == "Single Row"
- assert result_df["field1"].iloc[0] == "value"
-
- def test_empty_data_list(self, component_class):
- """Test behavior with empty data list."""
- component = component_class()
- component.set_attributes({"data_list": []})
-
- result_df = component.build_dataframe()
-
- assert len(result_df) == 0
-
- def test_data_without_text(self, component_class):
- """Test handling Data objects without text field."""
- data_without_text = [Data(data={"field1": "value1"}), Data(data={"field1": "value2"})]
- component = component_class()
- component.set_attributes({"data_list": data_without_text})
-
- result_df = component.build_dataframe()
-
- assert len(result_df) == 2
- assert "text" not in result_df.columns
- assert result_df["field1"].tolist() == ["value1", "value2"]
-
- def test_data_without_data_dict(self, component_class):
- """Test handling Data objects without data dictionary."""
- data_without_dict = [Data(text="Text 1"), Data(text="Text 2")]
- component = component_class()
- component.set_attributes({"data_list": data_without_dict})
-
- result_df = component.build_dataframe()
-
- assert len(result_df) == 2
- assert list(result_df.columns) == ["text"]
- assert result_df["text"].tolist() == ["Text 1", "Text 2"]
-
- def test_mixed_data_fields(self, component_class):
- """Test handling Data objects with different fields."""
- mixed_data = [
- Data(text="Row 1", data={"field1": "value1", "field2": 1}),
- Data(text="Row 2", data={"field1": "value2", "field3": "extra"}),
- ]
- component = component_class()
- component.set_attributes({"data_list": mixed_data})
-
- result_df = component.build_dataframe()
-
- assert len(result_df) == 2
- assert set(result_df.columns) == {"field1", "field2", "field3", "text"}
- assert result_df["field1"].tolist() == ["value1", "value2"]
- assert result_df["field2"].iloc[1] != result_df["field2"].iloc[1] # Check for NaN using inequality
- assert result_df["field3"].iloc[0] != result_df["field3"].iloc[0] # Check for NaN using inequality
-
- def test_invalid_input_type(self, component_class):
- """Test error handling for invalid input types."""
- invalid_data = [{"not": "a Data object"}]
- component = component_class()
- component.set_attributes({"data_list": invalid_data})
-
- with pytest.raises(TypeError) as exc_info:
- component.build_dataframe()
- assert "Expected Data objects" in str(exc_info.value)
-
- def test_status_update(self, component_class, default_kwargs):
- """Test that status is properly updated."""
- component = component_class()
- component.set_attributes(default_kwargs)
- result = component.build_dataframe()
-
- assert component.status is result # Status should be set to the DataFrame
diff --git a/src/backend/tests/unit/components/processing/test_dataframe_operations.py b/src/backend/tests/unit/components/processing/test_dataframe_operations.py
deleted file mode 100644
index 7f18e065f23b..000000000000
--- a/src/backend/tests/unit/components/processing/test_dataframe_operations.py
+++ /dev/null
@@ -1,427 +0,0 @@
-import pandas as pd
-import pytest
-
-from lfx.components.processing.dataframe_operations import DataFrameOperationsComponent
-from lfx.schema.dataframe import DataFrame
-
-
-@pytest.fixture
-def sample_dataframe():
- """Create a comprehensive sample DataFrame for testing."""
- data = {
- "name": ["John Doe", "Jane Smith", "Bob Johnson", "Alice Brown", "Charlie Wilson"],
- "email": ["john@gmail.com", "jane@yahoo.com", "bob@gmail.com", "alice@hotmail.com", "charlie@outlook.com"],
- "age": [25, 30, 35, 28, 42],
- "salary": [50000, 60000, 70000, 55000, 80000],
- "department": ["IT", "HR", "Finance", "IT", "Marketing"],
- }
- return DataFrame(pd.DataFrame(data))
-
-
-@pytest.fixture
-def component():
- """Create a DataFrameOperationsComponent instance."""
- return DataFrameOperationsComponent()
-
-
-class TestBasicOperations:
- """Test basic DataFrame operations with new SortableListInput format."""
-
- def test_add_column(self, component, sample_dataframe):
- """Test adding a new column to the DataFrame."""
- component.df = sample_dataframe
- component.operation = [{"name": "Add Column", "icon": "plus"}]
- component.new_column_name = "bonus"
- component.new_column_value = 5000
-
- result = component.perform_operation()
-
- assert "bonus" in result.columns
- assert len(result.columns) == 6 # Original 5 + 1 new
- assert all(result["bonus"] == 5000) # All values should be 5000
-
- def test_drop_column(self, component, sample_dataframe):
- """Test dropping a column from the DataFrame."""
- component.df = sample_dataframe
- component.operation = [{"name": "Drop Column", "icon": "minus"}]
- component.column_name = "salary"
-
- result = component.perform_operation()
-
- assert "salary" not in result.columns
- assert len(result.columns) == 4 # Original 5 - 1 dropped
-
- def test_sort_ascending(self, component, sample_dataframe):
- """Test sorting DataFrame in ascending order."""
- component.df = sample_dataframe
- component.operation = [{"name": "Sort", "icon": "arrow-up-down"}]
- component.column_name = "age"
- component.ascending = True
-
- result = component.perform_operation()
-
- ages = result["age"].tolist()
- assert ages == sorted(ages) # Should be sorted ascending
- assert ages[0] == 25 # Youngest first
-
- def test_sort_descending(self, component, sample_dataframe):
- """Test sorting DataFrame in descending order."""
- component.df = sample_dataframe
- component.operation = [{"name": "Sort", "icon": "arrow-up-down"}]
- component.column_name = "salary"
- component.ascending = False
-
- result = component.perform_operation()
-
- salaries = result["salary"].tolist()
- assert salaries == sorted(salaries, reverse=True) # Should be sorted descending
- assert salaries[0] == 80000 # Highest first
-
- def test_head_operation(self, component, sample_dataframe):
- """Test getting first N rows."""
- component.df = sample_dataframe
- component.operation = [{"name": "Head", "icon": "arrow-up"}]
- component.num_rows = 2
-
- result = component.perform_operation()
-
- assert len(result) == 2
- assert result.iloc[0]["name"] == "John Doe" # First row
-
- def test_tail_operation(self, component, sample_dataframe):
- """Test getting last N rows."""
- component.df = sample_dataframe
- component.operation = [{"name": "Tail", "icon": "arrow-down"}]
- component.num_rows = 2
-
- result = component.perform_operation()
-
- assert len(result) == 2
- assert result.iloc[-1]["name"] == "Charlie Wilson" # Last row
-
- def test_rename_column(self, component, sample_dataframe):
- """Test renaming a column."""
- component.df = sample_dataframe
- component.operation = [{"name": "Rename Column", "icon": "pencil"}]
- component.column_name = "name"
- component.new_column_name = "full_name"
-
- result = component.perform_operation()
-
- assert "full_name" in result.columns
- assert "name" not in result.columns
- assert result.iloc[0]["full_name"] == "John Doe"
-
-
-class TestFilterOperations:
- """Test all filter operations with different operators."""
-
- def test_filter_equals(self, component, sample_dataframe):
- """Test exact match filtering."""
- component.df = sample_dataframe
- component.operation = [{"name": "Filter", "icon": "filter"}]
- component.column_name = "department"
- component.filter_operator = "equals"
- component.filter_value = "IT"
-
- result = component.perform_operation()
-
- assert len(result) == 2 # John and Alice work in IT
- assert all(result["department"] == "IT")
-
- def test_filter_not_equals(self, component, sample_dataframe):
- """Test exclusion filtering."""
- component.df = sample_dataframe
- component.operation = [{"name": "Filter", "icon": "filter"}]
- component.column_name = "department"
- component.filter_operator = "not equals"
- component.filter_value = "IT"
-
- result = component.perform_operation()
-
- assert len(result) == 3 # Jane, Bob, Charlie not in IT
- assert all(result["department"] != "IT")
-
- def test_filter_contains(self, component, sample_dataframe):
- """Test partial string matching - THE MAIN FEATURE WE ADDED!"""
- component.df = sample_dataframe
- component.operation = [{"name": "Filter", "icon": "filter"}]
- component.column_name = "email"
- component.filter_operator = "contains"
- component.filter_value = "gmail"
-
- result = component.perform_operation()
-
- assert len(result) == 2 # John and Bob have gmail
- assert all("gmail" in email for email in result["email"])
-
- def test_filter_starts_with(self, component, sample_dataframe):
- """Test prefix matching."""
- component.df = sample_dataframe
- component.operation = [{"name": "Filter", "icon": "filter"}]
- component.column_name = "name"
- component.filter_operator = "starts with"
- component.filter_value = "J"
-
- result = component.perform_operation()
-
- assert len(result) == 2 # John and Jane start with J
- assert all(name.startswith("J") for name in result["name"])
-
- def test_filter_ends_with(self, component, sample_dataframe):
- """Test suffix matching."""
- component.df = sample_dataframe
- component.operation = [{"name": "Filter", "icon": "filter"}]
- component.column_name = "email"
- component.filter_operator = "ends with"
- component.filter_value = ".com"
-
- result = component.perform_operation()
-
- assert len(result) == 5 # All emails end with .com
- assert all(email.endswith(".com") for email in result["email"])
-
- def test_filter_greater_than(self, component, sample_dataframe):
- """Test numeric greater than comparison."""
- component.df = sample_dataframe
- component.operation = [{"name": "Filter", "icon": "filter"}]
- component.column_name = "age"
- component.filter_operator = "greater than"
- component.filter_value = "30"
-
- result = component.perform_operation()
-
- assert len(result) == 2 # Bob(35) and Charlie(42)
- assert all(age > 30 for age in result["age"])
-
- def test_filter_less_than(self, component, sample_dataframe):
- """Test numeric less than comparison."""
- component.df = sample_dataframe
- component.operation = [{"name": "Filter", "icon": "filter"}]
- component.column_name = "salary"
- component.filter_operator = "less than"
- component.filter_value = "60000"
-
- result = component.perform_operation()
-
- assert len(result) == 2 # John(50k) and Alice(55k)
- assert all(salary < 60000 for salary in result["salary"])
-
-
-class TestEdgeCases:
- """Test edge cases and error conditions."""
-
- def test_empty_selection(self, component, sample_dataframe):
- """Test when no operation is selected (deselection)."""
- component.df = sample_dataframe
- component.operation = [] # Empty selection
-
- result = component.perform_operation()
-
- # Should return original DataFrame unchanged
- assert len(result) == len(sample_dataframe)
- assert list(result.columns) == list(sample_dataframe.columns)
-
- def test_invalid_operation_format(self, component, sample_dataframe):
- """Test with invalid operation format."""
- component.df = sample_dataframe
- component.operation = "Invalid String" # Not list format
-
- result = component.perform_operation()
-
- # Should return original DataFrame
- assert len(result) == len(sample_dataframe)
-
- def test_empty_dataframe(self, component):
- """Test operations on empty DataFrame."""
- component.df = DataFrame(pd.DataFrame())
- component.operation = [{"name": "Head", "icon": "arrow-up"}]
- component.num_rows = 3
-
- result = component.perform_operation()
-
- assert result.empty
-
- def test_non_existent_column(self, component, sample_dataframe):
- """Test operation on non-existent column."""
- component.df = sample_dataframe
- component.operation = [{"name": "Drop Column", "icon": "minus"}]
- component.column_name = "non_existent_column"
-
- with pytest.raises(KeyError):
- component.perform_operation()
-
- def test_filter_no_matches(self, component, sample_dataframe):
- """Test filter that returns no matches."""
- component.df = sample_dataframe
- component.operation = [{"name": "Filter", "icon": "filter"}]
- component.column_name = "department"
- component.filter_operator = "equals"
- component.filter_value = "NonExistentDepartment"
-
- result = component.perform_operation()
-
- assert len(result) == 0 # No matches
- assert list(result.columns) == list(sample_dataframe.columns) # Columns preserved
-
-
-class TestDynamicUI:
- """Test dynamic UI behavior with update_build_config."""
-
- def test_filter_fields_show(self, component):
- """Test that filter fields show when Filter is selected."""
- build_config = {
- "column_name": {"show": False},
- "filter_value": {"show": False},
- "filter_operator": {"show": False},
- "ascending": {"show": False},
- "new_column_name": {"show": False},
- "new_column_value": {"show": False},
- "columns_to_select": {"show": False},
- "num_rows": {"show": False},
- "replace_value": {"show": False},
- "replacement_value": {"show": False},
- }
-
- # Select Filter operation
- updated_config = component.update_build_config(
- build_config, [{"name": "Filter", "icon": "filter"}], "operation"
- )
-
- assert updated_config["column_name"]["show"] is True
- assert updated_config["filter_value"]["show"] is True
- assert updated_config["filter_operator"]["show"] is True
- assert updated_config["ascending"]["show"] is False # Not for filter
-
- def test_sort_fields_show(self, component):
- """Test that sort fields show when Sort is selected."""
- build_config = {
- "column_name": {"show": False},
- "filter_value": {"show": False},
- "filter_operator": {"show": False},
- "ascending": {"show": False},
- "new_column_name": {"show": False},
- "new_column_value": {"show": False},
- "columns_to_select": {"show": False},
- "num_rows": {"show": False},
- "replace_value": {"show": False},
- "replacement_value": {"show": False},
- }
-
- # Select Sort operation
- updated_config = component.update_build_config(
- build_config, [{"name": "Sort", "icon": "arrow-up-down"}], "operation"
- )
-
- assert updated_config["column_name"]["show"] is True
- assert updated_config["ascending"]["show"] is True
- assert updated_config["filter_value"]["show"] is False # Not for sort
- assert updated_config["filter_operator"]["show"] is False # Not for sort
-
- def test_empty_selection_hides_fields(self, component):
- """Test that all fields hide when operation is deselected."""
- build_config = {
- "column_name": {"show": True},
- "filter_value": {"show": True},
- "filter_operator": {"show": True},
- "ascending": {"show": True},
- "new_column_name": {"show": True},
- "new_column_value": {"show": True},
- "columns_to_select": {"show": True},
- "num_rows": {"show": True},
- "replace_value": {"show": True},
- "replacement_value": {"show": True},
- }
-
- # Deselect operation (empty list)
- updated_config = component.update_build_config(
- build_config,
- [], # Empty selection
- "operation",
- )
-
- # All fields should be hidden
- assert updated_config["column_name"]["show"] is False
- assert updated_config["filter_value"]["show"] is False
- assert updated_config["filter_operator"]["show"] is False
- assert updated_config["ascending"]["show"] is False
- assert updated_config["new_column_name"]["show"] is False
- assert updated_config["new_column_value"]["show"] is False
- assert updated_config["columns_to_select"]["show"] is False
- assert updated_config["num_rows"]["show"] is False
- assert updated_config["replace_value"]["show"] is False
- assert updated_config["replacement_value"]["show"] is False
-
-
-class TestDataTypes:
- """Test different data types and conversions."""
-
- def test_numeric_string_conversion(self, component):
- """Test that string numbers are properly converted for comparison."""
- data = pd.DataFrame({"values": [10, 20, 30, 40, 50], "names": ["a", "b", "c", "d", "e"]})
-
- component.df = DataFrame(data)
- component.operation = [{"name": "Filter", "icon": "filter"}]
- component.column_name = "values"
- component.filter_operator = "greater than"
- component.filter_value = "25" # String input
-
- result = component.perform_operation()
-
- assert len(result) == 3 # 30, 40, 50 are > 25
- assert all(val > 25 for val in result["values"])
-
- def test_mixed_data_types(self, component):
- """Test filtering on mixed data types."""
- data = pd.DataFrame({"mixed": ["text", 123, "more_text", 456], "id": [1, 2, 3, 4]})
-
- component.df = DataFrame(data)
- component.operation = [{"name": "Filter", "icon": "filter"}]
- component.column_name = "mixed"
- component.filter_operator = "contains"
- component.filter_value = "text"
-
- result = component.perform_operation()
-
- assert len(result) == 2 # "text" and "more_text"
-
-
-# Integration test to verify all operators work together
-def test_all_filter_operators_comprehensive():
- """Comprehensive test of all filter operators on the same dataset."""
- data = pd.DataFrame(
- {
- "name": ["John", "Jane", "Bob", "Alice"],
- "email": ["john@gmail.com", "jane@yahoo.com", "bob@gmail.com", "alice@test.org"],
- "age": [25, 30, 35, 28],
- "score": [85.5, 92.0, 78.5, 88.0],
- }
- )
-
- component = DataFrameOperationsComponent()
- component.df = DataFrame(data)
- component.operation = [{"name": "Filter", "icon": "filter"}]
-
- # Test all operators
- test_cases = [
- ("email", "contains", "gmail", 2), # John, Bob
- ("name", "starts with", "J", 2), # John, Jane
- ("email", "ends with", ".com", 3), # All except Alice
- ("age", "greater than", "28", 2), # Jane, Bob
- ("score", "less than", "90", 3), # John, Bob, Alice
- ("name", "equals", "John", 1), # Only John
- ("email", "not equals", "jane@yahoo.com", 3), # All except Jane
- ]
-
- for column, operator, value, expected_count in test_cases:
- component.column_name = column
- component.filter_operator = operator
- component.filter_value = value
-
- result = component.perform_operation()
-
- assert len(result) == expected_count, f"Failed for {operator} on {column} with value {value}"
-
-
-if __name__ == "__main__":
- pytest.main([__file__])
diff --git a/src/backend/tests/unit/components/processing/test_lambda_filter.py b/src/backend/tests/unit/components/processing/test_lambda_filter.py
deleted file mode 100644
index 86b091bc62bc..000000000000
--- a/src/backend/tests/unit/components/processing/test_lambda_filter.py
+++ /dev/null
@@ -1,123 +0,0 @@
-from unittest.mock import AsyncMock
-
-import pytest
-
-from lfx.components.processing.lambda_filter import LambdaFilterComponent
-from lfx.schema import Data
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestLambdaFilterComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- return LambdaFilterComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "data": [Data(data={"items": [{"name": "test1", "value": 10}, {"name": "test2", "value": 20}]})],
- "llm": AsyncMock(),
- "filter_instruction": "Filter items with value greater than 15",
- "sample_size": 1000,
- "max_size": 30000,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- return []
-
- async def test_successful_lambda_generation(self, component_class, default_kwargs):
- component = await self.component_setup(component_class, default_kwargs)
- component.llm.ainvoke.return_value.content = "lambda x: [item for item in x['items'] if item['value'] > 15]"
-
- # Execute filter
- result = await component.filter_data()
-
- # Assertions
- assert isinstance(result, list)
- assert len(result) == 1
- assert result[0].name == "test2"
- assert result[0].value == 20
-
- async def test_invalid_lambda_response(self, component_class, default_kwargs):
- component = await self.component_setup(component_class, default_kwargs)
- component.llm.ainvoke.return_value.content = "invalid lambda syntax"
-
- # Test exception handling
- with pytest.raises(ValueError, match="Could not find lambda in response"):
- await component.filter_data()
-
- async def test_lambda_with_large_dataset(self, component_class, default_kwargs):
- large_data = {"items": [{"name": f"test{i}", "value": i} for i in range(2000)]}
- default_kwargs["data"] = [Data(data=large_data)]
- default_kwargs["filter_instruction"] = "Filter items with value greater than 1500"
- component = await self.component_setup(component_class, default_kwargs)
- component.llm.ainvoke.return_value.content = "lambda x: [item for item in x['items'] if item['value'] > 1500]"
-
- # Execute filter
- result = await component.filter_data()
-
- # Assertions
- assert isinstance(result, list)
- assert len(result) == 499 # Items with value from 1501 to 1999
- assert all(item.value > 1500 for item in result)
-
- async def test_lambda_with_complex_data_structure(self, component_class, default_kwargs):
- complex_data = {
- "categories": {
- "A": [{"id": 1, "score": 90}, {"id": 2, "score": 85}],
- "B": [{"id": 3, "score": 95}, {"id": 4, "score": 88}],
- }
- }
- default_kwargs["data"] = [Data(data=complex_data)]
- default_kwargs["filter_instruction"] = "Filter items with score greater than 90"
- component = await self.component_setup(component_class, default_kwargs)
- component.llm.ainvoke.return_value.content = (
- "lambda x: [item for cat in x['categories'].values() for item in cat if item['score'] > 90]"
- )
-
- # Execute filter
- result = await component.filter_data()
-
- # Assertions
- assert isinstance(result, list)
- assert len(result) == 1
- assert result[0].id == 3
- assert result[0].score == 95
-
- def test_validate_lambda(self, component_class):
- component = component_class()
-
- # Valid lambda
- valid_lambda = "lambda x: x + 1"
- assert component._validate_lambda(valid_lambda) is True
-
- # Invalid lambda: missing 'lambda'
- invalid_lambda_1 = "x: x + 1"
- assert component._validate_lambda(invalid_lambda_1) is False
-
- # Invalid lambda: missing ':'
- invalid_lambda_2 = "lambda x x + 1"
- assert component._validate_lambda(invalid_lambda_2) is False
-
- def test_get_data_structure(self, component_class):
- component = component_class()
- test_data = {
- "string": "test",
- "number": 42,
- "list": [1, 2, 3],
- "dict": {"key": "value"},
- "nested": {"a": [{"b": 1}]},
- }
-
- structure = component.get_data_structure(test_data)
-
- # Assertions - each value should have a 'structure' key
- assert structure["string"]["structure"] == "str", structure
- assert structure["number"]["structure"] == "int", structure
- assert structure["list"]["structure"] == "list(int)[size=3]", structure
- assert isinstance(structure["dict"]["structure"], dict), structure
- assert structure["dict"]["structure"]["key"] == "str", structure
- assert isinstance(structure["nested"]["structure"], dict), structure
- assert "a" in structure["nested"]["structure"], structure
- assert structure["nested"]["structure"]["a"] == 'list(dict)[size=1], sample: {"b": "int"}', structure
diff --git a/src/backend/tests/unit/components/processing/test_parse_dataframe_component.py b/src/backend/tests/unit/components/processing/test_parse_dataframe_component.py
deleted file mode 100644
index 9376c7db38b2..000000000000
--- a/src/backend/tests/unit/components/processing/test_parse_dataframe_component.py
+++ /dev/null
@@ -1,142 +0,0 @@
-import asyncio
-
-import pandas as pd
-import pytest
-
-from lfx.components.processing.parse_dataframe import ParseDataFrameComponent
-from lfx.schema import DataFrame
-from lfx.schema.message import Message
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestParseDataFrameComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return ParseDataFrameComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {"df": DataFrame({"text": ["Hello"]}), "template": "{text}", "sep": "\n"}
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- def test_successful_parse_with_default_template(self):
- # Create test data
- test_df = DataFrame({"text": ["Hello", "World", "Test"]})
-
- component = ParseDataFrameComponent(df=test_df, template="{text}", sep="\n")
-
- # Run the parse process
- result = component.parse_data()
-
- # Verify the results
- assert isinstance(result, Message)
- assert result.text == "Hello\nWorld\nTest"
- assert component.status == "Hello\nWorld\nTest"
-
- def test_parse_with_custom_template(self):
- test_df = DataFrame({"name": ["John", "Jane"], "age": [30, 25]})
-
- component = ParseDataFrameComponent(df=test_df, template="Name: {name}, Age: {age}", sep=" | ")
-
- result = component.parse_data()
-
- assert isinstance(result, Message)
- assert result.text == "Name: John, Age: 30 | Name: Jane, Age: 25"
-
- def test_parse_with_custom_separator(self):
- test_df = DataFrame({"text": ["Hello", "World"]})
-
- component = ParseDataFrameComponent(df=test_df, template="{text}", sep=" --- ")
-
- result = component.parse_data()
-
- assert isinstance(result, Message)
- assert result.text == "Hello --- World"
-
- def test_empty_dataframe(self):
- component = ParseDataFrameComponent(df=DataFrame({"text": []}), template="{text}", sep="\n")
-
- result = component.parse_data()
- assert isinstance(result, Message)
- assert result.text == ""
-
- def test_invalid_template_keys(self):
- component = ParseDataFrameComponent(
- df=DataFrame({"text": ["Hello"]}), template="{nonexistent_column}", sep="\n"
- )
-
- with pytest.raises(KeyError):
- component.parse_data()
-
- def test_multiple_column_template(self):
- test_df = DataFrame({"col1": ["A", "B"], "col2": [1, 2], "col3": ["X", "Y"]})
-
- component = ParseDataFrameComponent(df=test_df, template="{col1}-{col2}-{col3}", sep=", ")
-
- result = component.parse_data()
- assert isinstance(result, Message)
- assert result.text == "A-1-X, B-2-Y"
-
- @pytest.mark.asyncio
- async def test_async_invocation(self, component_class, default_kwargs):
- """Verify that ParseDataFrameComponent can be called in an async context."""
- component = component_class(**default_kwargs)
- # Use asyncio.to_thread to invoke the parse_data method in a thread pool
- result = await asyncio.to_thread(component.parse_data)
- assert isinstance(result, Message)
-
- def test_various_data_types(self, component_class):
- """Test that the component correctly formats differing data types."""
- test_dataframe = DataFrame(
- {
- "string_col": ["A", "B"],
- "int_col": [1, 2],
- "bool_col": [True, False],
- "time_col": pd.to_datetime(["2023-01-01", "2023-01-02"]),
- }
- )
- template = "{string_col}-{int_col}-{bool_col}-{time_col}"
- component = component_class(df=test_dataframe, template=template, sep=" | ")
- result = component.parse_data()
- assert isinstance(result, Message)
- # Just check that all columns are present in the text
- assert "A-1-True-2023-01-01" in result.text
-
- def test_nan_values(self, component_class):
- """Test how the component handles missing/NaN values in the DataFrame."""
- test_dataframe = DataFrame(
- {
- "col1": ["Hello", None],
- "col2": [10, float("nan")],
- }
- )
- template = "{col1}-{col2}"
- component = component_class(df=test_dataframe, template=template, sep="\n")
- result = component.parse_data()
- # Expect None or NaN to be converted to the string "None" or "nan"
- # depending on Python's behavior
- assert isinstance(result, Message)
- # The exact representation can depend on how pandas handles None/NaN.
- # Typically, None -> 'None' and NaN -> 'nan'.
- # You can refine these assertions if you have a custom conversion.
- assert "Hello-10" in result.text
-
- def test_large_dataframe(self, component_class):
- """Test performance and correctness on a relatively large DataFrame."""
- data = {
- "col": [f"Row{i}" for i in range(10000)], # 10k rows
- }
- large_dataframe = DataFrame(data)
- component = component_class(df=large_dataframe, template="{col}", sep=", ")
- result = component.parse_data()
- assert isinstance(result, Message)
- # Check the length of the result isn't zero, ensuring it didn't fail
- assert len(result.text) > 0
- # Optionally, you can assert the result includes a substring from the middle
- assert "Row5000" in result.text
diff --git a/src/backend/tests/unit/components/processing/test_parser_component.py b/src/backend/tests/unit/components/processing/test_parser_component.py
deleted file mode 100644
index f4b8d319df71..000000000000
--- a/src/backend/tests/unit/components/processing/test_parser_component.py
+++ /dev/null
@@ -1,212 +0,0 @@
-import pytest
-
-from lfx.components.processing.parser import ParserComponent
-from lfx.schema import Data, DataFrame
-from lfx.schema.message import Message
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestParserComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return ParserComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "input_data": DataFrame({"Name": ["John"], "Age": [30], "Country": ["USA"]}),
- "pattern": "Name: {Name}, Age: {Age}, Country: {Country}",
- "sep": "\n",
- "stringify": False,
- "clean_data": False,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- def test_parse_dataframe(self, component_class, default_kwargs):
- # Arrange
- component = component_class(**default_kwargs)
-
- # Act
- result = component.parse_combined_text()
-
- # Assert
- assert isinstance(result, Message)
- assert result.text == "Name: John, Age: 30, Country: USA"
-
- def test_parse_data_object(self, component_class):
- # Arrange
- data = Data(text="Hello World")
- kwargs = {
- "input_data": data,
- "pattern": "text: {text}",
- "sep": "\n",
- "stringify": False,
- }
- component = component_class(**kwargs)
-
- # Act
- result = component.parse_combined_text()
-
- # Assert
- assert isinstance(result, Message)
- assert result.text == "text: Hello World"
-
- def test_stringify_dataframe(self, component_class):
- # Arrange
- data_frame = DataFrame({"Name": ["John", "Jane"], "Age": [30, 25]})
- kwargs = {
- "input_data": data_frame,
- "mode": "Stringify",
- "clean_data": False,
- }
- component = component_class(**kwargs)
-
- # Act
- result = component.parse_combined_text()
-
- # Assert
- assert isinstance(result, Message)
- assert "| Name | Age |" in result.text
- assert "| John | 30 |" in result.text
- assert "| Jane | 25 |" in result.text
-
- def test_stringify_data_object(self, component_class):
- # Arrange
- data = Data(text="Hello\nWorld\nMultiline\nText")
- kwargs = {
- "input_data": data,
- "stringify": True,
- "clean_data": True,
- }
- component = component_class(**kwargs)
-
- # Act
- result = component.parse_combined_text()
-
- # Assert
- assert isinstance(result, Message)
- assert "Hello" in result.text
- assert "World" in result.text
- assert "Multiline" in result.text
- assert "Text" in result.text
-
- def test_stringify_message_object(self, component_class):
- # Arrange
- message = Message(text="Test message content")
- kwargs = {
- "input_data": message,
- "mode": "Stringify",
- }
- component = component_class(**kwargs)
-
- # Act
- result = component.parse_combined_text()
-
- # Assert
- assert isinstance(result, Message)
- assert result.text == "Test message content"
-
- def test_clean_data_with_stringify(self, component_class):
- # Arrange
- data_frame = DataFrame(
- {"Name": ["John", "Jane\n", "\nBob"], "Age": [30, None, 25], "Notes": ["Good\n\nPerson", "", "Nice\n"]}
- )
- kwargs = {
- "input_data": data_frame,
- "mode": "Stringify",
- "clean_data": True,
- }
- component = component_class(**kwargs)
-
- # Act
- result = component.parse_combined_text()
-
- # Assert
- assert isinstance(result, Message)
- # Check for table structure
- assert "| Name" in result.text
- assert "| Age" in result.text
- assert "| Notes" in result.text
- # Check for cleaned data
- assert "| John" in result.text
- assert "| Jane" in result.text
- assert "| Bob" in result.text
- assert "| Good" in result.text
- assert "| Person" in result.text
- assert "| Nice" in result.text
- # Verify data is cleaned
- assert "Jane\n" not in result.text
- assert "\nBob" not in result.text
- assert "Good\n\nPerson" not in result.text
- assert "Nice\n" not in result.text
-
- def test_invalid_input_type(self, component_class):
- # Arrange
- kwargs = {
- "input_data": 123, # Invalid input type
- "pattern": "{value}",
- "sep": "\n",
- }
- component = component_class(**kwargs)
-
- # Act & Assert
- with pytest.raises(ValueError, match="Unsupported input type: . Expected DataFrame or Data."):
- component.parse_combined_text()
-
- def test_none_input(self, component_class):
- # Arrange
- kwargs = {
- "input_data": None,
- "pattern": "{value}",
- "sep": "\n",
- }
- component = component_class(**kwargs)
-
- # Act & Assert
- with pytest.raises(ValueError, match="Unsupported input type: . Expected DataFrame or Data."):
- component.parse_combined_text()
-
- def test_invalid_template(self, component_class):
- # Arrange
- data_frame = DataFrame({"Name": ["John"]})
- kwargs = {
- "input_data": data_frame,
- "pattern": "{InvalidColumn}", # Invalid column name
- "sep": "\n",
- "stringify": False,
- }
- component = component_class(**kwargs)
-
- # Act & Assert
- with pytest.raises(KeyError):
- component.parse_combined_text()
-
- def test_multiple_rows_with_custom_separator(self, component_class):
- # Arrange
- data_frame = DataFrame(
- {
- "Name": ["John", "Jane", "Bob"],
- "Age": [30, 25, 35],
- }
- )
- kwargs = {
- "input_data": data_frame,
- "pattern": "{Name} is {Age} years old",
- "sep": " | ",
- "mode": "Parser",
- }
- component = component_class(**kwargs)
-
- # Act
- result = component.parse_combined_text()
-
- # Assert
- assert isinstance(result, Message)
- expected = "John is 30 years old | Jane is 25 years old | Bob is 35 years old"
- assert result.text == expected
diff --git a/src/backend/tests/unit/components/processing/test_regex_component.py b/src/backend/tests/unit/components/processing/test_regex_component.py
deleted file mode 100644
index f44be27dec96..000000000000
--- a/src/backend/tests/unit/components/processing/test_regex_component.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import pytest
-
-from lfx.components.processing.regex import RegexExtractorComponent
-from lfx.schema import Data
-from lfx.schema.message import Message
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestRegexExtractorComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return RegexExtractorComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "input_text": "Contact us at test@example.com",
- "pattern": r"\b\w+@\w+\.\w+\b",
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- def test_successful_regex_extraction(self):
- # Test with email pattern
- component = RegexExtractorComponent(
- input_text="Contact us at test@example.com or support@test.com", pattern=r"\b\w+@\w+\.\w+\b"
- )
-
- result = component.extract_matches()
- assert isinstance(result, list)
- assert all(isinstance(item, Data) for item in result)
- assert len(result) == 2
- assert result[0].data["match"] == "test@example.com"
- assert result[1].data["match"] == "support@test.com"
-
- def test_no_matches_found(self):
- # Test with pattern that won't match
- component = RegexExtractorComponent(input_text="No email addresses here", pattern=r"\b\w+@\w+\.\w+\b")
-
- result = component.extract_matches()
- assert isinstance(result, list)
- assert len(result) == 0 # The implementation returns an empty list when no matches are found
-
- def test_invalid_regex_pattern(self):
- # Test with invalid regex pattern
- component = RegexExtractorComponent(
- input_text="Some text",
- pattern="[", # Invalid regex pattern
- )
-
- result = component.extract_matches()
- assert isinstance(result, list)
- assert len(result) == 1
- assert "error" in result[0].data
- assert "Invalid regex pattern" in result[0].data["error"]
-
- def test_empty_input_text(self):
- # Test with empty input
- component = RegexExtractorComponent(input_text="", pattern=r"\b\w+@\w+\.\w+\b")
-
- result = component.extract_matches()
- assert isinstance(result, list)
- assert len(result) == 0 # The implementation returns an empty list when input is empty
-
- def test_get_matches_text_output(self):
- # Test the text output method
- component = RegexExtractorComponent(input_text="Contact: test@example.com", pattern=r"\b\w+@\w+\.\w+\b")
-
- result = component.get_matches_text()
- assert isinstance(result, Message)
- assert result.text == "test@example.com"
-
- def test_get_matches_text_no_matches(self):
- # Test text output with no matches
- component = RegexExtractorComponent(input_text="No email addresses", pattern=r"\b\w+@\w+\.\w+\b")
-
- result = component.get_matches_text()
- assert isinstance(result, Message)
- assert result.text == "No matches found"
-
- def test_get_matches_text_invalid_pattern(self):
- # Test text output with invalid pattern
- component = RegexExtractorComponent(
- input_text="Some text",
- pattern="[", # Invalid regex pattern
- )
-
- result = component.get_matches_text()
- assert isinstance(result, Message)
- assert "Invalid regex pattern" in result.text
diff --git a/src/backend/tests/unit/components/processing/test_save_file_component.py b/src/backend/tests/unit/components/processing/test_save_file_component.py
deleted file mode 100644
index 1963e16f952a..000000000000
--- a/src/backend/tests/unit/components/processing/test_save_file_component.py
+++ /dev/null
@@ -1,240 +0,0 @@
-import json
-from pathlib import Path
-from unittest.mock import MagicMock, patch
-
-import pandas as pd
-import pytest
-
-from lfx.components.processing.save_file import SaveToFileComponent
-from lfx.schema import Data, Message
-from tests.base import ComponentTestBaseWithoutClient
-
-# TODO: Re-enable this test when the SaveToFileComponent is ready for use.
-pytestmark = pytest.mark.skip(reason="Temporarily disabled")
-
-
-class TestSaveToFileComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture(autouse=True)
- def setup_and_teardown(self):
- """Setup and teardown for each test."""
- # Setup
- test_files = [
- "./test_output.csv",
- "./test_output.xlsx",
- "./test_output.json",
- "./test_output.md",
- "./test_output.txt",
- ]
- # Teardown
- yield
- # Delete test files after each test
- for file_path in test_files:
- path = Path(file_path)
- if path.exists():
- path.unlink()
-
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return SaveToFileComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- sample_df = pd.DataFrame([{"col1": 1, "col2": "a"}, {"col1": 2, "col2": "b"}])
- return {"input_type": "DataFrame", "df": sample_df, "file_format": "csv", "file_path": "./test_output.csv"}
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return the file names mapping for different versions."""
- return [] # New component
-
- def test_basic_setup(self, component_class, default_kwargs):
- """Test basic component initialization."""
- component = component_class()
- component.set_attributes(default_kwargs)
- assert component.input_type == "DataFrame"
- assert component.file_format == "csv"
- assert component.file_path == "./test_output.csv"
-
- def test_update_build_config_dataframe(self, component_class):
- """Test build config update for DataFrame input type."""
- component = component_class()
- build_config = {
- "df": {"show": False},
- "data": {"show": False},
- "message": {"show": False},
- "file_format": {"options": []},
- }
-
- updated_config = component.update_build_config(build_config, "DataFrame", "input_type")
-
- assert updated_config["df"]["show"] is True
- assert updated_config["data"]["show"] is False
- assert updated_config["message"]["show"] is False
- assert set(updated_config["file_format"]["options"]) == set(component.DATA_FORMAT_CHOICES)
-
- def test_save_message(self, component_class):
- """Test saving Message to different formats."""
- test_cases = [
- ("txt", "Test message"),
- ("json", json.dumps({"message": "Test message"}, indent=2)),
- ("markdown", "**Message:**\n\nTest message"),
- ]
-
- for fmt, expected_content in test_cases:
- mock_file = MagicMock()
- mock_parent = MagicMock()
- mock_parent.exists.return_value = True
- mock_file.parent = mock_parent
- mock_file.expanduser.return_value = mock_file
-
- # Mock Path at the module level where it's imported
- with patch("lfx.components.processing.save_to_file.Path") as mock_path:
- mock_path.return_value = mock_file
-
- component = component_class()
- component.set_attributes(
- {
- "input_type": "Message",
- "message": Message(text="Test message"),
- "file_format": fmt,
- "file_path": f"./test_output.{fmt}",
- }
- )
-
- result = component.save_to_file()
-
- mock_file.write_text.assert_called_once_with(expected_content, encoding="utf-8")
- assert "saved successfully" in result
-
- def test_save_data(self, component_class):
- """Test saving Data object to JSON."""
- test_data = {"col1": ["value1"], "col2": ["value2"]}
-
- mock_file = MagicMock()
- mock_parent = MagicMock()
- mock_parent.exists.return_value = True
- mock_file.parent = mock_parent
- mock_file.expanduser.return_value = mock_file
-
- with patch("lfx.components.processing.save_to_file.Path") as mock_path:
- mock_path.return_value = mock_file
-
- component = component_class()
- component.set_attributes(
- {
- "input_type": "Data",
- "data": Data(data=test_data),
- "file_format": "json",
- "file_path": "./test_output.json",
- }
- )
-
- result = component.save_to_file()
-
- expected_json = json.dumps(test_data, indent=2)
- mock_file.write_text.assert_called_once_with(expected_json, encoding="utf-8")
- assert "saved successfully" in result
-
- def test_directory_creation(self, component_class, default_kwargs):
- """Test directory creation if it doesn't exist."""
- mock_file = MagicMock()
- mock_parent = MagicMock()
- mock_parent.exists.return_value = False
- mock_file.parent = mock_parent
- mock_file.expanduser.return_value = mock_file
-
- with patch("lfx.components.processing.save_to_file.Path") as mock_path:
- mock_path.return_value = mock_file
- with patch.object(pd.DataFrame, "to_csv") as mock_to_csv:
- component = component_class()
- component.set_attributes(default_kwargs)
-
- result = component.save_to_file()
-
- mock_parent.mkdir.assert_called_once_with(parents=True, exist_ok=True)
- assert mock_to_csv.called
- assert "saved successfully" in result
-
- def test_invalid_input_type(self, default_kwargs):
- """Test handling of invalid input type."""
- component = SaveToFileComponent()
- invalid_kwargs = default_kwargs.copy() # Create a copy to modify
- invalid_kwargs["input_type"] = "InvalidType"
- component.set_attributes(invalid_kwargs)
-
- with pytest.raises(ValueError, match="Unsupported input type"):
- component.save_to_file()
-
- @pytest.mark.parametrize(
- ("path_str", "fmt", "expected_suffix"),
- [
- ("./test_output", "csv", ".csv"),
- ("./test_output", "json", ".json"),
- ("./test_output", "markdown", ".markdown"),
- ("./test_output", "txt", ".txt"),
- ],
- )
- def test_adjust_path_adds_extension(self, component_class, path_str, fmt, expected_suffix):
- """Test that the correct extension is added when none exists."""
- component = component_class()
- input_path = Path(path_str)
- expected_path = Path(f"{path_str}{expected_suffix}")
- result = component._adjust_file_path_with_format(input_path, fmt)
- assert str(result) == str(expected_path.expanduser())
-
- @pytest.mark.parametrize(
- ("path_str", "fmt"),
- [
- ("./test_output.csv", "csv"),
- ("./test_output.json", "json"),
- ("./test_output.markdown", "markdown"),
- ("./test_output.txt", "txt"),
- ],
- )
- def test_adjust_path_keeps_existing_correct_extension(self, component_class, path_str, fmt):
- """Test that the existing correct extension is kept."""
- component = component_class()
- input_path = Path(path_str)
- result = component._adjust_file_path_with_format(input_path, fmt)
- assert str(result) == str(input_path.expanduser())
-
- @pytest.mark.parametrize(
- ("path_str", "fmt", "expected_path_str"),
- [
- ("./test_output.txt", "csv", "./test_output.txt.csv"), # Incorrect extension
- ("./test_output", "excel", "./test_output.xlsx"), # Add .xlsx for excel
- ("./test_output.txt", "excel", "./test_output.txt.xlsx"), # Incorrect extension for excel
- ],
- )
- def test_adjust_path_handles_incorrect_or_excel_add(self, component_class, path_str, fmt, expected_path_str):
- """Test handling incorrect extensions and adding .xlsx for excel."""
- component = component_class()
- input_path = Path(path_str)
- expected_path = Path(expected_path_str)
- result = component._adjust_file_path_with_format(input_path, fmt)
- assert str(result) == str(expected_path.expanduser())
-
- @pytest.mark.parametrize(
- "path_str",
- [
- "./test_output.xlsx",
- "./test_output.xls",
- ],
- )
- def test_adjust_path_keeps_existing_excel_extension(self, component_class, path_str):
- """Test that existing .xlsx or .xls extensions are kept for excel format."""
- component = component_class()
- input_path = Path(path_str)
- result = component._adjust_file_path_with_format(input_path, "excel")
- assert str(result) == str(input_path.expanduser())
-
- def test_adjust_path_expands_home(self, component_class):
- """Test that the home directory symbol '~' is expanded."""
- component = component_class()
- input_path = Path("~/test_output")
- expected_path = Path("~/test_output.csv").expanduser()
- result = component._adjust_file_path_with_format(input_path, "csv")
- assert str(result) == str(expected_path)
- assert "~" not in str(result) # Ensure ~ was expanded
diff --git a/src/backend/tests/unit/components/processing/test_split_text_component.py b/src/backend/tests/unit/components/processing/test_split_text_component.py
deleted file mode 100644
index 95e411af64bc..000000000000
--- a/src/backend/tests/unit/components/processing/test_split_text_component.py
+++ /dev/null
@@ -1,270 +0,0 @@
-import pytest
-
-from lfx.components.data import URLComponent
-from lfx.components.processing import SplitTextComponent
-from lfx.schema import Data, DataFrame
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestSplitTextComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return SplitTextComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "data_inputs": [Data(text="Hello World")],
- "chunk_overlap": 200,
- "chunk_size": 1000,
- "separator": "\n",
- "session_id": "test_session",
- "sender": "test_sender",
- "sender_name": "test_sender_name",
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return the file names mapping for different versions."""
- return [
- # It was in helpers in version 1.0.19
- {"version": "1.0.19", "module": "helpers", "file_name": "SplitText"},
- {"version": "1.1.0", "module": "processing", "file_name": "split_text"},
- {"version": "1.1.1", "module": "processing", "file_name": "split_text"},
- ]
-
- def test_split_text_basic(self):
- """Test basic text splitting functionality."""
- component = SplitTextComponent()
- test_text = "First chunk\nSecond chunk\nThird chunk"
- component.set_attributes(
- {
- "data_inputs": [Data(text=test_text)],
- "chunk_overlap": 0,
- "chunk_size": 15,
- "separator": "\n",
- "text_key": "text",
- "session_id": "test_session",
- "sender": "test_sender",
- "sender_name": "test_sender_name",
- }
- )
-
- data_frame = component.split_text()
- assert isinstance(data_frame, DataFrame), "Expected DataFrame instance"
- assert len(data_frame) == 3, f"Expected DataFrame with 3 rows, got {len(data_frame)}"
- assert list(data_frame.columns) == ["text"], f"Expected columns ['text'], got {list(data_frame.columns)}"
- assert "First chunk" in data_frame.iloc[0]["text"], (
- f"Expected 'First chunk', got '{data_frame.iloc[0]['text']}'"
- )
- assert "Second chunk" in data_frame.iloc[1]["text"], (
- f"Expected 'Second chunk', got '{data_frame.iloc[1]['text']}'"
- )
- assert "Third chunk" in data_frame.iloc[2]["text"], (
- f"Expected 'Third chunk', got '{data_frame.iloc[2]['text']}'"
- )
-
- def test_split_text_with_overlap(self):
- """Test text splitting with overlap."""
- component = SplitTextComponent()
- test_text = "First chunk.\nSecond chunk.\nThird chunk."
- component.set_attributes(
- {
- "data_inputs": [Data(text=test_text)],
- "chunk_overlap": 5, # Small overlap to test functionality
- "chunk_size": 20,
- "separator": "\n",
- "session_id": "test_session",
- "sender": "test_sender",
- "sender_name": "test_sender_name",
- }
- )
-
- data_frame = component.split_text()
- assert isinstance(data_frame, DataFrame), "Expected DataFrame instance"
- assert len(data_frame) == 3, f"Expected DataFrame with 3 rows, got {len(data_frame)}"
- assert list(data_frame.columns) == ["text"], f"Expected columns ['text'], got {list(data_frame.columns)}"
- assert "First chunk" in data_frame.iloc[0]["text"], (
- f"Expected 'First chunk', got '{data_frame.iloc[0]['text']}'"
- )
- assert "Second chunk" in data_frame.iloc[1]["text"], (
- f"Expected 'Second chunk', got '{data_frame.iloc[1]['text']}'"
- )
- assert "Third chunk" in data_frame.iloc[2]["text"], (
- f"Expected 'Third chunk', got '{data_frame.iloc[2]['text']}'"
- )
-
- def test_split_text_custom_separator(self):
- """Test text splitting with a custom separator."""
- component = SplitTextComponent()
- test_text = "First chunk.|Second chunk.|Third chunk."
- component.set_attributes(
- {
- "data_inputs": [Data(text=test_text)],
- "chunk_overlap": 0,
- "chunk_size": 10,
- "separator": "|",
- "session_id": "test_session",
- "sender": "test_sender",
- "sender_name": "test_sender_name",
- }
- )
-
- data_frame = component.split_text()
- assert isinstance(data_frame, DataFrame), "Expected DataFrame instance"
- assert len(data_frame) == 3, f"Expected DataFrame with 3 rows, got {len(data_frame)}"
- assert list(data_frame.columns) == ["text"], f"Expected columns ['text'], got {list(data_frame.columns)}"
- assert "First chunk" in data_frame.iloc[0]["text"], (
- f"Expected 'First chunk', got '{data_frame.iloc[0]['text']}'"
- )
- assert "Second chunk" in data_frame.iloc[1]["text"], (
- f"Expected 'Second chunk', got '{data_frame.iloc[1]['text']}'"
- )
- assert "Third chunk" in data_frame.iloc[2]["text"], (
- f"Expected 'Third chunk', got '{data_frame.iloc[2]['text']}'"
- )
-
- def test_split_text_with_metadata(self):
- """Test text splitting while preserving metadata."""
- component = SplitTextComponent()
- test_metadata = {"source": "test.txt", "author": "test"}
- test_text = "First chunk\nSecond chunk"
- component.set_attributes(
- {
- "data_inputs": [Data(text=test_text, data=test_metadata)],
- "chunk_overlap": 0,
- "chunk_size": 7,
- "separator": "\n",
- "session_id": "test_session",
- "sender": "test_sender",
- "sender_name": "test_sender_name",
- }
- )
-
- data_frame = component.split_text()
- assert isinstance(data_frame, DataFrame), "Expected DataFrame instance"
- assert len(data_frame) == 2, f"Expected DataFrame with 2 rows, got {len(data_frame)}"
- assert "First chunk" in data_frame.iloc[0]["text"], (
- f"Expected 'First chunk', got '{data_frame.iloc[0]['text']}'"
- )
- assert "Second chunk" in data_frame.iloc[1]["text"], (
- f"Expected 'Second chunk', got '{data_frame.iloc[1]['text']}'"
- )
- # Loop over each row to check metadata
- for _, row in data_frame.iterrows():
- assert row["source"] == test_metadata["source"], (
- f"Expected source '{test_metadata['source']}', got '{row['source']}'"
- )
- assert row["author"] == test_metadata["author"], (
- f"Expected author '{test_metadata['author']}', got '{row['author']}'"
- )
-
- def test_split_text_empty_input(self):
- """Test handling of empty input text."""
- component = SplitTextComponent()
- component.set_attributes(
- {
- "data_inputs": [Data(text="")],
- "chunk_overlap": 0,
- "chunk_size": 10,
- "separator": "\n",
- "session_id": "test_session",
- "sender": "test_sender",
- "sender_name": "test_sender_name",
- }
- )
-
- results = component.split_text()
- assert len(results) == 0, f"Expected 0 chunks for empty input, got {len(results)}"
-
- def test_split_text_single_chunk(self):
- """Test text that fits in a single chunk."""
- component = SplitTextComponent()
- test_text = "Small text"
- component.set_attributes(
- {
- "data_inputs": [Data(text=test_text)],
- "chunk_overlap": 0,
- "chunk_size": 100,
- "separator": "\n",
- "session_id": "test_session",
- "sender": "test_sender",
- "sender_name": "test_sender_name",
- }
- )
-
- results = component.split_text()
- assert len(results) == 1, f"Expected 1 chunk, got {len(results)}"
- assert results["text"][0] == test_text, f"Expected '{test_text}', got '{results['text'][0]}'"
-
- def test_split_text_multiple_inputs(self):
- """Test splitting multiple input texts."""
- component = SplitTextComponent()
- test_texts = ["First text\nSecond line", "Another text\nAnother line"]
- component.set_attributes(
- {
- "data_inputs": [Data(text=text) for text in test_texts],
- "chunk_overlap": 0,
- "chunk_size": 10,
- "separator": "\n",
- "session_id": "test_session",
- "sender": "test_sender",
- "sender_name": "test_sender_name",
- }
- )
-
- results = component.split_text()
- assert len(results) == 4, f"Expected 4 chunks (2 from each text), got {len(results)}"
- assert "First text" in results["text"][0], f"Expected 'First text', got '{results['text'][0]}'"
- assert "Second line" in results["text"][1], f"Expected 'Second line', got '{results['text'][1]}'"
- assert "Another text" in results["text"][2], f"Expected 'Another text', got '{results['text'][2]}'"
- assert "Another line" in results["text"][3], f"Expected 'Another line', got '{results['text'][3]}'"
-
- def test_split_text_with_dataframe_input(self):
- """Test splitting text with DataFrame input."""
- component = SplitTextComponent()
- test_texts = ["First text\nSecond line", "Another text\nAnother line"]
- data_frame = DataFrame([Data(text=text) for text in test_texts])
- component.set_attributes(
- {
- "data_inputs": data_frame,
- "chunk_overlap": 0,
- "chunk_size": 10,
- "separator": "\n",
- "session_id": "test_session",
- "sender": "test_sender",
- "sender_name": "test_sender_name",
- }
- )
-
- results = component.split_text()
- assert len(results) == 4, f"Expected 4 chunks (2 from each text), got {len(results)}"
- assert "First text" in results["text"][0], f"Expected 'First text', got '{results['text'][0]}'"
- assert "Second line" in results["text"][1], f"Expected 'Second line', got '{results['text'][1]}'"
- assert "Another text" in results["text"][2], f"Expected 'Another text', got '{results['text'][2]}'"
- assert "Another line" in results["text"][3], f"Expected 'Another line', got '{results['text'][3]}'"
-
- def test_with_url_loader(self):
- """Test splitting text with URL loader."""
- component = SplitTextComponent()
- url = ["https://en.wikipedia.org/wiki/London", "https://en.wikipedia.org/wiki/Paris"]
- data_frame = URLComponent(urls=url, format="Text").fetch_content()
- assert isinstance(data_frame, DataFrame), "Expected DataFrame instance"
- assert len(data_frame) == 2, f"Expected DataFrame with 2 rows, got {len(data_frame)}"
- component.set_attributes(
- {
- "data_inputs": data_frame,
- "chunk_overlap": 0,
- "chunk_size": 10,
- "separator": "\n",
- "session_id": "test_session",
- "sender": "test_sender",
- "sender_name": "test_sender_name",
- }
- )
-
- results = component.split_text()
- assert isinstance(results, DataFrame), "Expected DataFrame instance"
- assert len(results) > 2, f"Expected DataFrame with more than 2 rows, got {len(results)}"
diff --git a/src/backend/tests/unit/components/processing/test_structured_output_component.py b/src/backend/tests/unit/components/processing/test_structured_output_component.py
deleted file mode 100644
index 24730975a73c..000000000000
--- a/src/backend/tests/unit/components/processing/test_structured_output_component.py
+++ /dev/null
@@ -1,1040 +0,0 @@
-import os
-import re
-from unittest.mock import patch
-
-import openai
-import pytest
-from langchain_openai import ChatOpenAI
-from langflow.helpers.base_model import build_model_from_schema
-from langflow.inputs.inputs import TableInput
-from pydantic import BaseModel
-
-from lfx.components.processing.structured_output import StructuredOutputComponent
-from tests.base import ComponentTestBaseWithoutClient
-from tests.unit.mock_language_model import MockLanguageModel
-
-
-class TestStructuredOutputComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return StructuredOutputComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "llm": MockLanguageModel(),
- "input_value": "Test input",
- "schema_name": "TestSchema",
- "output_schema": [{"name": "field", "type": "str", "description": "A test field"}],
- "multiple": False,
- "system_prompt": "Test system prompt",
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return the file names mapping for version-specific files."""
-
- def test_successful_structured_output_generation_with_patch_with_config(self):
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- class MockBaseModel(BaseModel):
- def model_dump(self, **__):
- return {"objects": [{"field": "value"}]}
-
- # Return trustcall-style response structure
- return {
- "messages": ["mock_message"],
- "responses": [MockBaseModel()],
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="TestSchema",
- output_schema=[{"name": "field", "type": "str", "description": "A test field"}],
- multiple=False,
- system_prompt="Test system prompt",
- )
-
- with patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result):
- result = component.build_structured_output_base()
- assert isinstance(result, list)
- assert result == [{"field": "value"}]
-
- def test_raises_value_error_for_unsupported_language_model(self):
- # Mocking an incompatible language model
- class MockLanguageModel:
- pass
-
- # Creating an instance of StructuredOutputComponent
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="TestSchema",
- output_schema=[{"name": "field", "type": "str", "description": "A test field"}],
- multiple=False,
- )
-
- with pytest.raises(TypeError, match=re.escape("Language model does not support structured output.")):
- component.build_structured_output()
-
- def test_correctly_builds_output_model(self):
- # Setup
- component = StructuredOutputComponent()
- schema = [
- {
- "name": "name",
- "display_name": "Name",
- "type": "str",
- "description": "Specify the name of the output field.",
- },
- {
- "name": "description",
- "display_name": "Description",
- "type": "str",
- "description": "Describe the purpose of the output field.",
- },
- {
- "name": "type",
- "display_name": "Type",
- "type": "str",
- "description": (
- "Indicate the data type of the output field (e.g., str, int, float, bool, list, dict)."
- ),
- },
- {
- "name": "multiple",
- "display_name": "Multiple",
- "type": "boolean",
- "description": "Set to True if this output field should be a list of the specified type.",
- },
- ]
- component.output_schema = TableInput(name="output_schema", display_name="Output Schema", table_schema=schema)
-
- # Assertion
- output_model = build_model_from_schema(schema)
- assert isinstance(output_model, type)
-
- def test_handles_multiple_outputs(self):
- # Setup
- component = StructuredOutputComponent()
- schema = [
- {
- "name": "name",
- "display_name": "Name",
- "type": "str",
- "description": "Specify the name of the output field.",
- },
- {
- "name": "description",
- "display_name": "Description",
- "type": "str",
- "description": "Describe the purpose of the output field.",
- },
- {
- "name": "type",
- "display_name": "Type",
- "type": "str",
- "description": (
- "Indicate the data type of the output field (e.g., str, int, float, bool, list, dict)."
- ),
- },
- {
- "name": "multiple",
- "display_name": "Multiple",
- "type": "boolean",
- "description": "Set to True if this output field should be a list of the specified type.",
- },
- ]
- component.output_schema = TableInput(name="output_schema", display_name="Output Schema", table_schema=schema)
- component.multiple = True
-
- # Assertion
- output_model = build_model_from_schema(schema)
- assert isinstance(output_model, type)
-
- def test_empty_output_schema(self):
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="EmptySchema",
- output_schema=[],
- multiple=False,
- )
-
- with pytest.raises(ValueError, match="Output schema cannot be empty"):
- component.build_structured_output()
-
- def test_invalid_output_schema_type(self):
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="InvalidSchema",
- output_schema=[{"name": "field", "type": "invalid_type", "description": "Invalid field"}],
- multiple=False,
- )
-
- with pytest.raises(ValueError, match="Invalid type: invalid_type"):
- component.build_structured_output()
-
- @patch("lfx.components.processing.structured_output.get_chat_result")
- def test_nested_output_schema(self, mock_get_chat_result):
- class ChildModel(BaseModel):
- child: str = "value"
-
- class ParentModel(BaseModel):
- objects: list[dict] = [{"parent": {"child": "value"}}]
-
- def model_dump(self, **__):
- return {"objects": self.objects}
-
- # Update to return trustcall-style response
- mock_get_chat_result.return_value = {
- "messages": ["mock_message"],
- "responses": [ParentModel()],
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="NestedSchema",
- output_schema=[
- {
- "name": "parent",
- "type": "dict",
- "description": "Parent field",
- "fields": [{"name": "child", "type": "str", "description": "Child field"}],
- }
- ],
- multiple=False,
- system_prompt="Test system prompt",
- )
-
- result = component.build_structured_output_base()
- assert isinstance(result, list)
- assert result == [{"parent": {"child": "value"}}]
-
- @patch("lfx.components.processing.structured_output.get_chat_result")
- def test_large_input_value(self, mock_get_chat_result):
- large_input = "Test input " * 1000
-
- class MockBaseModel(BaseModel):
- objects: list[dict] = [{"field": "value"}]
-
- def model_dump(self, **__):
- return {"objects": self.objects}
-
- # Update to return trustcall-style response
- mock_get_chat_result.return_value = {
- "messages": ["mock_message"],
- "responses": [MockBaseModel()],
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value=large_input,
- schema_name="LargeInputSchema",
- output_schema=[{"name": "field", "type": "str", "description": "A test field"}],
- multiple=False,
- system_prompt="Test system prompt",
- )
-
- result = component.build_structured_output_base()
- assert isinstance(result, list)
- assert result == [{"field": "value"}]
- mock_get_chat_result.assert_called_once()
-
- @pytest.mark.skipif(
- "OPENAI_API_KEY" not in os.environ,
- reason="OPENAI_API_KEY environment variable not set",
- )
- def test_with_real_openai_model_simple_schema(self):
- # Create a real OpenAI model
- llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
-
- # Create a component with a simple schema
- component = StructuredOutputComponent(
- llm=llm,
- input_value="Extract the name and age from this text: John Doe is 30 years old.",
- schema_name="PersonInfo",
- output_schema=[
- {"name": "name", "type": "str", "description": "The person's name"},
- {"name": "age", "type": "int", "description": "The person's age"},
- ],
- multiple=False,
- system_prompt="Extract structured information from the input text.",
- )
-
- # Get the structured output
- result = component.build_structured_output_base()
-
- # Verify the result
- assert isinstance(result, list)
- assert len(result) > 0
- assert "name" in result[0]
- assert "age" in result[0]
- assert result[0]["name"] == "John Doe"
- assert result[0]["age"] == 30
-
- @pytest.mark.skipif(
- "OPENAI_API_KEY" not in os.environ,
- reason="OPENAI_API_KEY environment variable not set",
- )
- def test_with_real_openai_model_multiple_patterns(self):
- # Create a real OpenAI model
- llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
-
- # Create a component with multiple people in the input
- component = StructuredOutputComponent(
- llm=llm,
- input_value=(
- "Extract all people from this text: John Doe is 30 years old, Jane Smith is 25, and Bob Johnson is 35."
- ),
- schema_name="PersonInfo",
- output_schema=[
- {"name": "name", "type": "str", "description": "The person's name"},
- {"name": "age", "type": "int", "description": "The person's age"},
- ],
- multiple=False,
- system_prompt=(
- "You are an AI that extracts structured JSON objects from unstructured text. "
- "Use a predefined schema with expected types (str, int, float, bool, dict). "
- "Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. "
- "Fill missing or ambiguous values with defaults: null for missing values. "
- "Remove exact duplicates but keep variations that have different field values. "
- "Always return valid JSON in the expected format, never throw errors. "
- "If multiple objects can be extracted, return them all in the structured format."
- ),
- )
-
- # Get the structured output
- result = component.build_structured_output_base()
-
- # Verify the result contains multiple people
- assert isinstance(result, list)
- assert len(result) >= 3 # Should extract all three people
-
- # Check that we have names and ages for multiple people
- names = [item["name"] for item in result if "name" in item]
- ages = [item["age"] for item in result if "age" in item]
-
- assert len(names) >= 3
- assert len(ages) >= 3
-
- # Check that we extracted the expected people (order may vary)
- expected_names = ["John Doe", "Jane Smith", "Bob Johnson"]
- expected_ages = [30, 25, 35]
-
- for expected_name in expected_names:
- assert any(expected_name in name for name in names)
- for expected_age in expected_ages:
- assert expected_age in ages
-
- def test_multiple_patterns_with_duplicates_and_variations(self):
- """Test that multiple patterns are extracted while removing exact duplicates but keeping variations."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- class MockBaseModel(BaseModel):
- def model_dump(self, **__):
- return {
- "objects": [
- {"product": "iPhone", "price": 999.99},
- {"product": "iPhone", "price": 1099.99}, # Variation - different price
- {"product": "Samsung", "price": 899.99},
- {"product": "iPhone", "price": 999.99}, # Exact duplicate - should be removed
- ]
- }
-
- return {
- "messages": ["mock_message"],
- "responses": [MockBaseModel()],
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Products: iPhone $999.99, iPhone $1099.99, Samsung $899.99, iPhone $999.99",
- schema_name="ProductSchema",
- output_schema=[
- {"name": "product", "type": "str", "description": "Product name"},
- {"name": "price", "type": "float", "description": "Product price"},
- ],
- multiple=False,
- system_prompt="Remove exact duplicates but keep variations that have different field values.",
- )
-
- with patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result):
- result = component.build_structured_output()
-
- # Check that result is a Data object
- from lfx.schema.data import Data
-
- assert isinstance(result, Data)
-
- # Should have multiple results due to multiple patterns
- assert isinstance(result.data, dict)
- assert "results" in result.data
- assert (
- len(result.data["results"]) == 4
- ) # All items returned (duplicate handling is expected to be done by LLM)
-
- # Verify the expected products are present
- products = [item["product"] for item in result.data["results"]]
- prices = [item["price"] for item in result.data["results"]]
-
- assert "iPhone" in products
- assert "Samsung" in products
- assert 999.99 in prices
- assert 1099.99 in prices
- assert 899.99 in prices
-
- @pytest.mark.skipif(
- "OPENAI_API_KEY" not in os.environ,
- reason="OPENAI_API_KEY environment variable not set",
- )
- def test_with_real_openai_model_simple_schema_fail(self):
- # Create a real OpenAI model with very low max_tokens to force truncation
- llm = ChatOpenAI(model="gpt-4o-mini", temperature=0, max_tokens=1)
-
- # Create a component with a simple schema
- component = StructuredOutputComponent(
- llm=llm,
- input_value="Extract the name and age from this text: John Doe is 30 years old.",
- schema_name="PersonInfo",
- output_schema=[
- {"name": "name", "type": "str", "description": "The person's name"},
- {"name": "age", "type": "int", "description": "The person's age"},
- ],
- multiple=False,
- system_prompt="Extract structured information from the input text.",
- )
-
- # Expect BadRequestError due to max_tokens being reached
- with pytest.raises(openai.BadRequestError) as exc_info:
- component.build_structured_output_base()
-
- # Verify the error message contains expected content (updated to match actual OpenAI error format)
- error_message = str(exc_info.value)
- assert any(
- phrase in error_message
- for phrase in [
- "max_tokens was reached",
- "max_tokens or model output limit was reached",
- "Could not finish the message because max_tokens",
- ]
- ), f"Expected max_tokens error but got: {error_message}"
-
- @pytest.mark.skipif(
- "OPENAI_API_KEY" not in os.environ,
- reason="OPENAI_API_KEY environment variable not set",
- )
- def test_with_real_openai_model_complex_schema(self):
- from langchain_openai import ChatOpenAI
-
- # Create a real OpenAI model
- llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
-
- # Create a component with a more complex schema
- component = StructuredOutputComponent(
- llm=llm,
- input_value="""
- Product Review:
- I purchased the XYZ Wireless Headphones last month. The sound quality is excellent,
- and the battery lasts about 8 hours. However, they're a bit uncomfortable after
- wearing them for a long time. The price was $129.99, which I think is reasonable
- for the quality. Overall rating: 4/5.
- """,
- schema_name="ProductReview",
- output_schema=[
- {"name": "product_name", "type": "str", "description": "The name of the product"},
- {"name": "sound_quality", "type": "str", "description": "Description of sound quality"},
- {"name": "comfort", "type": "str", "description": "Description of comfort"},
- {"name": "battery_life", "type": "str", "description": "Description of battery life"},
- {"name": "price", "type": "float", "description": "The price of the product"},
- {"name": "rating", "type": "float", "description": "The overall rating out of 5"},
- ],
- multiple=False,
- system_prompt="Extract detailed product review information from the input text.",
- )
-
- # Get the structured output
- result = component.build_structured_output_base()
-
- # Verify the result
- assert isinstance(result, list)
- assert len(result) > 0
- assert "product_name" in result[0]
- assert "sound_quality" in result[0]
- assert "comfort" in result[0]
- assert "battery_life" in result[0]
- assert "price" in result[0]
- assert "rating" in result[0]
- assert result[0]["product_name"] == "XYZ Wireless Headphones"
- assert result[0]["price"] == 129.99
- assert result[0]["rating"] == 4.0
-
- @pytest.mark.skipif(
- "OPENAI_API_KEY" not in os.environ,
- reason="OPENAI_API_KEY environment variable not set",
- )
- def test_with_real_openai_model_nested_schema(self):
- from langchain_openai import ChatOpenAI
-
- # Create a real OpenAI model
- llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
-
- # Create a component with a flattened schema (no nested structures)
- component = StructuredOutputComponent(
- llm=llm,
- input_value="""
- Restaurant: Bella Italia
- Address: 123 Main St, Anytown, CA 12345
- Visited: June 15, 2023
-
- Ordered:
- - Margherita Pizza ($14.99) - Delicious with fresh basil
- - Tiramisu ($8.50) - Perfect sweetness
-
- Service was excellent, atmosphere was cozy.
- Total bill: $35.49 including tip.
- Would definitely visit again!
- """,
- schema_name="RestaurantReview",
- output_schema=[
- {"name": "restaurant_name", "type": "str", "description": "The name of the restaurant"},
- {"name": "street", "type": "str", "description": "Street address"},
- {"name": "city", "type": "str", "description": "City"},
- {"name": "state", "type": "str", "description": "State"},
- {"name": "zip", "type": "str", "description": "ZIP code"},
- {"name": "first_item_name", "type": "str", "description": "Name of first item ordered"},
- {"name": "first_item_price", "type": "float", "description": "Price of first item"},
- {"name": "second_item_name", "type": "str", "description": "Name of second item ordered"},
- {"name": "second_item_price", "type": "float", "description": "Price of second item"},
- {"name": "total_bill", "type": "float", "description": "Total bill amount"},
- {"name": "would_return", "type": "bool", "description": "Whether the reviewer would return"},
- ],
- multiple=False,
- system_prompt="Extract detailed restaurant review information from the input text.",
- )
-
- # Get the structured output
- result = component.build_structured_output_base()
-
- # Verify the result
- assert isinstance(result, list)
- assert len(result) > 0
- assert "restaurant_name" in result[0]
- assert "street" in result[0]
- assert "city" in result[0]
- assert "state" in result[0]
- assert "zip" in result[0]
- assert "first_item_name" in result[0]
- assert "first_item_price" in result[0]
- assert "total_bill" in result[0]
- assert "would_return" in result[0]
-
- assert result[0]["restaurant_name"] == "Bella Italia"
- assert result[0]["street"] == "123 Main St"
- assert result[0]["total_bill"] == 35.49
- assert result[0]["would_return"] is True
-
- @pytest.mark.skipif(
- "NVIDIA_API_KEY" not in os.environ,
- reason="NVIDIA_API_KEY environment variable not set",
- )
- def test_with_real_nvidia_model_simple_schema(self):
- # Create a real NVIDIA model
- try:
- from langchain_nvidia_ai_endpoints import ChatNVIDIA
- except ImportError as e:
- msg = "Please install langchain-nvidia-ai-endpoints to use the NVIDIA model."
- raise ImportError(msg) from e
-
- llm = ChatNVIDIA(model="meta/llama-3.2-3b-instruct", temperature=0, max_tokens=10)
-
- # Create a component with a simple schema
- component = StructuredOutputComponent(
- llm=llm,
- input_value="Extract the name and age from this text: John Doe is 30 years old.",
- schema_name="PersonInfo",
- output_schema=[
- {"name": "name", "type": "str", "description": "The person's name"},
- {"name": "age", "type": "int", "description": "The person's age"},
- ],
- multiple=False,
- system_prompt="Extract structured information from the input text.",
- )
-
- # Test that it now works with NVIDIA models (previously expected to fail but now supports structured output)
- try:
- result = component.build_structured_output_base()
- # If it succeeds, verify it returns a valid runnable
- assert result is not None
- except (TypeError, ValueError, RuntimeError) as e:
- # If it still fails, verify it's with a known error message
- error_msg = str(e)
- assert any(
- msg in error_msg
- for msg in ["Language model does not support structured output", "400 Bad Request", "not supported"]
- ), f"Unexpected error: {error_msg}"
-
- def test_structured_output_returns_dict_when_no_objects_key(self):
- """Test that when trustcall returns a dict without 'objects' key, we return the dict directly."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- # Return trustcall-style response but without BaseModel that creates "objects" key
- return {
- "messages": ["mock_message"],
- "responses": [{"field": "value", "another_field": "another_value"}], # Direct dict, not BaseModel
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="TestSchema",
- output_schema=[{"name": "field", "type": "str", "description": "A test field"}],
- multiple=False,
- system_prompt="Test system prompt",
- )
-
- with patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result):
- result = component.build_structured_output_base()
- # Should return the dict directly since there's no "objects" key
- assert isinstance(result, dict)
- assert result == {"field": "value", "another_field": "another_value"}
-
- def test_structured_output_returns_direct_response_when_not_dict(self):
- """Test that when trustcall returns a non-dict response, we return it directly."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- # Return a string response (edge case)
- return "Simple string response"
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="TestSchema",
- output_schema=[{"name": "field", "type": "str", "description": "A test field"}],
- multiple=False,
- system_prompt="Test system prompt",
- )
-
- with patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result):
- result = component.build_structured_output_base()
- # Should return the string directly
- assert isinstance(result, str)
- assert result == "Simple string response"
-
- def test_structured_output_handles_empty_responses_array(self):
- """Test that when trustcall returns empty responses array, we return the result dict."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- # Return trustcall-style response with empty responses
- return {
- "messages": ["mock_message"],
- "responses": [], # Empty responses array
- "response_metadata": [],
- "attempts": 1,
- "fallback_data": {"field": "fallback_value"}, # Some other data in the result
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="TestSchema",
- output_schema=[{"name": "field", "type": "str", "description": "A test field"}],
- multiple=False,
- system_prompt="Test system prompt",
- )
-
- with patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result):
- result = component.build_structured_output_base()
- # Should return the entire result dict when responses is empty
- assert isinstance(result, dict)
- assert "messages" in result
- assert "responses" in result
- assert "fallback_data" in result
-
- def test_build_structured_output_fails_when_base_returns_non_list(self):
- """Test that build_structured_output() fails when base method returns non-list."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- # Return a dict instead of list with objects
- return {
- "messages": ["mock_message"],
- "responses": [{"single_item": "value"}], # Dict without "objects" key
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="TestSchema",
- output_schema=[{"name": "field", "type": "str", "description": "A test field"}],
- multiple=False,
- system_prompt="Test system prompt",
- )
-
- with (
- patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result),
- pytest.raises(ValueError, match="No structured output returned"),
- ):
- component.build_structured_output()
-
- def test_build_structured_output_returns_data_with_dict(self):
- """Test that build_structured_output() returns Data object with dict data."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- class MockBaseModel(BaseModel):
- def model_dump(self, **__):
- return {"objects": [{"field": "value2", "number": 24}]} # Return only one object
-
- # Return trustcall-style response structure
- return {
- "messages": ["mock_message"],
- "responses": [MockBaseModel()],
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="TestSchema",
- output_schema=[
- {"name": "field", "type": "str", "description": "A test field"},
- {"name": "number", "type": "int", "description": "A test number"},
- ],
- multiple=False,
- system_prompt="Test system prompt",
- )
-
- with patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result):
- result = component.build_structured_output()
-
- # Check that result is a Data object
- from lfx.schema.data import Data
-
- assert isinstance(result, Data)
-
- # Check that result.data is a dict
- assert isinstance(result.data, dict)
-
- # Check the content of the dict
- assert result.data == {"field": "value2", "number": 24}
-
- # Verify the data has the expected keys
- assert "field" in result.data
- assert "number" in result.data
- assert result.data["field"] == "value2"
- assert result.data["number"] == 24
-
- def test_build_structured_output_returns_multiple_objects(self):
- """Test that build_structured_output() returns Data object with multiple objects wrapped in results."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- class MockBaseModel(BaseModel):
- def model_dump(self, **__):
- return {
- "objects": [
- {"name": "John", "age": 30},
- {"name": "Jane", "age": 25},
- {"name": "Bob", "age": 35},
- ]
- }
-
- return {
- "messages": ["mock_message"],
- "responses": [MockBaseModel()],
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Extract multiple people: John is 30, Jane is 25, Bob is 35",
- schema_name="PersonSchema",
- output_schema=[
- {"name": "name", "type": "str", "description": "Person's name"},
- {"name": "age", "type": "int", "description": "Person's age"},
- ],
- multiple=False,
- system_prompt="Extract ALL relevant instances that match the schema",
- )
-
- with patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result):
- result = component.build_structured_output()
-
- # Check that result is a Data object
- from lfx.schema.data import Data
-
- assert isinstance(result, Data)
-
- # Check that result.data is a dict with results key
- assert isinstance(result.data, dict)
- assert "results" in result.data
- assert len(result.data["results"]) == 3
-
- # Check the content of each result
- assert result.data["results"][0] == {"name": "John", "age": 30}
- assert result.data["results"][1] == {"name": "Jane", "age": 25}
- assert result.data["results"][2] == {"name": "Bob", "age": 35}
-
- def test_build_structured_output_returns_data_with_single_item(self):
- """Test that build_structured_output() returns Data object when only one item in objects."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- class MockBaseModel(BaseModel):
- def model_dump(self, **__):
- return {"objects": [{"name": "John Doe", "age": 30}]}
-
- return {
- "messages": ["mock_message"],
- "responses": [MockBaseModel()],
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Extract name and age from: John Doe is 30 years old",
- schema_name="PersonInfo",
- output_schema=[
- {"name": "name", "type": "str", "description": "Person's name"},
- {"name": "age", "type": "int", "description": "Person's age"},
- ],
- multiple=False,
- system_prompt="Extract person info",
- )
-
- with patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result):
- result = component.build_structured_output()
-
- # Check that result is a Data object
- from lfx.schema.data import Data
-
- assert isinstance(result, Data)
-
- # Check that result.data is a dict
- assert isinstance(result.data, dict)
-
- # Check the content matches exactly
- assert result.data == {"name": "John Doe", "age": 30}
-
- def test_build_structured_output_data_object_properties(self):
- """Test that the returned Data object has proper properties."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- class MockBaseModel(BaseModel):
- def model_dump(self, **__):
- return {"objects": [{"product": "iPhone", "price": 999.99, "available": True}]}
-
- return {
- "messages": ["mock_message"],
- "responses": [MockBaseModel()],
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Product info: iPhone costs $999.99 and is available",
- schema_name="ProductInfo",
- output_schema=[
- {"name": "product", "type": "str", "description": "Product name"},
- {"name": "price", "type": "float", "description": "Product price"},
- {"name": "available", "type": "bool", "description": "Product availability"},
- ],
- multiple=False,
- system_prompt="Extract product info",
- )
-
- with patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result):
- result = component.build_structured_output()
-
- # Check that result is a Data object
- from lfx.schema.data import Data
-
- assert isinstance(result, Data)
-
- # Check that result.data is a dict with correct types
- assert isinstance(result.data, dict)
- assert isinstance(result.data["product"], str)
- assert isinstance(result.data["price"], float)
- assert isinstance(result.data["available"], bool)
-
- # Check values
- assert result.data["product"] == "iPhone"
- assert result.data["price"] == 999.99
- assert result.data["available"] is True
-
- # Test Data object methods if they exist
- if hasattr(result, "get_text"):
- # Data object should be able to represent itself as text
- text_repr = result.get_text()
- assert isinstance(text_repr, str)
-
- def test_build_structured_dataframe_returns_dataframe_with_single_data(self):
- """Test that build_structured_dataframe() returns DataFrame object with single Data item."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- class MockBaseModel(BaseModel):
- def model_dump(self, **__):
- return {"objects": [{"field": "value2", "number": 24}]}
-
- return {
- "messages": ["mock_message"],
- "responses": [MockBaseModel()],
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="TestSchema",
- output_schema=[
- {"name": "field", "type": "str", "description": "A test field"},
- {"name": "number", "type": "int", "description": "A test number"},
- ],
- multiple=False,
- system_prompt="Test system prompt",
- )
-
- with patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result):
- result = component.build_structured_dataframe()
-
- # Check that result is a DataFrame object
- from lfx.schema.dataframe import DataFrame
-
- assert isinstance(result, DataFrame)
- assert len(result) == 1
- assert result.iloc[0]["field"] == "value2"
- assert result.iloc[0]["number"] == 24
-
- # Test conversion back to Data list
- data_list = result.to_data_list()
- assert len(data_list) == 1
- assert data_list[0].data == {"field": "value2", "number": 24}
-
- def test_build_structured_dataframe_returns_dataframe_with_multiple_data(self):
- """Test that build_structured_dataframe() returns DataFrame object with multiple Data items."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- class MockBaseModel(BaseModel):
- def model_dump(self, **__):
- return {
- "objects": [
- {"name": "John", "age": 30},
- {"name": "Jane", "age": 25},
- {"name": "Bob", "age": 35},
- ]
- }
-
- return {
- "messages": ["mock_message"],
- "responses": [MockBaseModel()],
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input with multiple people",
- schema_name="PersonSchema",
- output_schema=[
- {"name": "name", "type": "str", "description": "Person's name"},
- {"name": "age", "type": "int", "description": "Person's age"},
- ],
- multiple=False,
- system_prompt="Test system prompt",
- )
-
- with patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result):
- result = component.build_structured_dataframe()
-
- # Check that result is a DataFrame object
- from lfx.schema.dataframe import DataFrame
-
- assert isinstance(result, DataFrame)
- assert len(result) == 3
- assert result.iloc[0]["name"] == "John"
- assert result.iloc[0]["age"] == 30
- assert result.iloc[1]["name"] == "Jane"
- assert result.iloc[1]["age"] == 25
- assert result.iloc[2]["name"] == "Bob"
- assert result.iloc[2]["age"] == 35
-
- # Test conversion back to Data list
- data_list = result.to_data_list()
- assert len(data_list) == 3
- assert data_list[0].data == {"name": "John", "age": 30}
- assert data_list[1].data == {"name": "Jane", "age": 25}
- assert data_list[2].data == {"name": "Bob", "age": 35}
-
- def test_build_structured_dataframe_fails_when_base_returns_non_list(self):
- """Test that build_structured_dataframe() fails when base method returns non-list."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- return {
- "messages": ["mock_message"],
- "responses": [{"single_item": "value"}],
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="TestSchema",
- output_schema=[{"name": "field", "type": "str", "description": "A test field"}],
- multiple=False,
- system_prompt="Test system prompt",
- )
-
- with (
- patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result),
- pytest.raises(ValueError, match="No structured output returned"),
- ):
- component.build_structured_dataframe()
-
- def test_build_structured_dataframe_fails_when_empty_output(self):
- """Test that build_structured_dataframe() fails when base method returns empty list."""
-
- def mock_get_chat_result(runnable, system_message, input_value, config): # noqa: ARG001
- class MockBaseModel(BaseModel):
- def model_dump(self, **__):
- return {"objects": []}
-
- return {
- "messages": ["mock_message"],
- "responses": [MockBaseModel()],
- "response_metadata": [{"id": "mock_id"}],
- "attempts": 1,
- }
-
- component = StructuredOutputComponent(
- llm=MockLanguageModel(),
- input_value="Test input",
- schema_name="TestSchema",
- output_schema=[{"name": "field", "type": "str", "description": "A test field"}],
- multiple=False,
- system_prompt="Test system prompt",
- )
-
- with (
- patch("lfx.components.processing.structured_output.get_chat_result", mock_get_chat_result),
- pytest.raises(ValueError, match="No structured output returned"),
- ):
- component.build_structured_dataframe()
diff --git a/src/backend/tests/unit/components/processing/test_type_converter_component.py b/src/backend/tests/unit/components/processing/test_type_converter_component.py
deleted file mode 100644
index c66fc1c276b5..000000000000
--- a/src/backend/tests/unit/components/processing/test_type_converter_component.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import pandas as pd
-import pytest
-
-from lfx.components.processing.converter import TypeConverterComponent
-from lfx.schema.data import Data
-from lfx.schema.dataframe import DataFrame
-from lfx.schema.message import Message
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestTypeConverterComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return TypeConverterComponent
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- # Message to other types
- def test_message_to_message(self, component_class):
- """Test converting Message to Message."""
- component = component_class(input_data=Message(text="Hello World"), output_type="Message")
- result = component.convert_to_message()
- assert isinstance(result, Message)
- assert result.text == "Hello World"
-
- def test_message_to_data(self, component_class):
- """Test converting Message to Data."""
- component = component_class(input_data=Message(text="Hello"), output_type="Data")
- result = component.convert_to_data()
- assert isinstance(result, Data)
- assert "text" in result.data
- assert result.data["text"] == "Hello"
-
- def test_message_to_dataframe(self, component_class):
- """Test converting Message to DataFrame."""
- component = component_class(input_data=Message(text="Hello"), output_type="DataFrame")
- result = component.convert_to_dataframe()
- assert isinstance(result, DataFrame)
- assert "text" in result.columns
- assert result.iloc[0]["text"] == "Hello"
-
- # Data to other types
- def test_data_to_message(self, component_class):
- """Test converting Data to Message."""
- component = component_class(input_data=Data(data={"text": "Hello World"}), output_type="Message")
- result = component.convert_to_message()
- assert isinstance(result, Message)
- assert result.text == "Hello World"
-
- def test_data_to_data(self, component_class):
- """Test converting Data to Data."""
- component = component_class(input_data=Data(data={"key": "value"}), output_type="Data")
- result = component.convert_to_data()
- assert isinstance(result, Data)
- assert result.data == {"key": "value"}
-
- def test_data_to_dataframe(self, component_class):
- """Test converting Data to DataFrame."""
- component = component_class(input_data=Data(data={"text": "Hello World"}), output_type="DataFrame")
- result = component.convert_to_dataframe()
- assert isinstance(result, DataFrame)
- assert "text" in result.columns
- assert result.iloc[0]["text"] == "Hello World"
-
- # DataFrame to other types
- def test_dataframe_to_message(self, component_class):
- """Test converting DataFrame to Message."""
- df_data = pd.DataFrame({"col1": ["Hello"], "col2": ["World"]})
- component = component_class(input_data=DataFrame(data=df_data), output_type="Message")
- result = component.convert_to_message()
- assert isinstance(result, Message)
- assert result.text == "| col1 | col2 |\n|:-------|:-------|\n| Hello | World |"
-
- def test_dataframe_to_data(self, component_class):
- """Test converting DataFrame to Data."""
- df_data = pd.DataFrame({"col1": ["Hello"]})
- component = component_class(input_data=DataFrame(data=df_data), output_type="Data")
- result = component.convert_to_data()
- assert isinstance(result, Data)
- assert isinstance(result.data, dict)
-
- def test_dataframe_to_dataframe(self, component_class):
- """Test converting DataFrame to DataFrame."""
- df_data = pd.DataFrame({"col1": ["Hello"], "col2": ["World"]})
- component = component_class(input_data=DataFrame(data=df_data), output_type="DataFrame")
- result = component.convert_to_dataframe()
- assert isinstance(result, DataFrame)
- assert "col1" in result.columns
- assert "col2" in result.columns
- assert result.iloc[0]["col1"] == "Hello"
- assert result.iloc[0]["col2"] == "World"
-
- def test_update_outputs(self, component_class):
- """Test the update_outputs method."""
- component = component_class(input_data=Message(text="Hello"), output_type="Message")
- frontend_node = {"outputs": []}
-
- # Test with Message output
- updated = component.update_outputs(frontend_node, "output_type", "Message")
- assert len(updated["outputs"]) == 1
- assert updated["outputs"][0]["name"] == "message_output"
-
- # Test with Data output
- updated = component.update_outputs(frontend_node, "output_type", "Data")
- assert len(updated["outputs"]) == 1
- assert updated["outputs"][0]["name"] == "data_output"
-
- # Test with DataFrame output
- updated = component.update_outputs(frontend_node, "output_type", "DataFrame")
- assert len(updated["outputs"]) == 1
- assert updated["outputs"][0]["name"] == "dataframe_output"
diff --git a/src/backend/tests/unit/components/prompts/test_prompt_component.py b/src/backend/tests/unit/components/prompts/test_prompt_component.py
deleted file mode 100644
index 45e4f82438fd..000000000000
--- a/src/backend/tests/unit/components/prompts/test_prompt_component.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import pytest
-
-from lfx.components.processing import PromptComponent
-from tests.base import ComponentTestBaseWithClient
-
-
-@pytest.mark.usefixtures("client")
-class TestPromptComponent(ComponentTestBaseWithClient):
- @pytest.fixture
- def component_class(self):
- return PromptComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {"template": "Hello {name}!", "name": "John", "_session_id": "123"}
-
- @pytest.fixture
- def file_names_mapping(self):
- return [
- {"version": "1.0.19", "module": "prompts", "file_name": "Prompt"},
- {"version": "1.1.0", "module": "prompts", "file_name": "prompt"},
- {"version": "1.1.1", "module": "prompts", "file_name": "prompt"},
- ]
-
- def test_post_code_processing(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- frontend_node = component.to_frontend_node()
- node_data = frontend_node["data"]["node"]
- assert node_data["template"]["template"]["value"] == "Hello {name}!"
- assert "name" in node_data["custom_fields"]["template"]
- assert "name" in node_data["template"]
- assert node_data["template"]["name"]["value"] == "John"
-
- def test_prompt_component_latest(self, component_class, default_kwargs):
- result = component_class(**default_kwargs)()
- assert result is not None
diff --git a/src/backend/tests/unit/components/prototypes/test_create_data_component.py b/src/backend/tests/unit/components/prototypes/test_create_data_component.py
deleted file mode 100644
index cc52b11c8137..000000000000
--- a/src/backend/tests/unit/components/prototypes/test_create_data_component.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import re
-
-import pytest
-
-from lfx.components.processing import CreateDataComponent
-from lfx.schema import Data
-
-
-@pytest.fixture
-def create_data_component():
- return CreateDataComponent()
-
-
-def test_update_build_config(create_data_component):
- build_config = {
- "number_of_fields": {
- "type": "int",
- "value": 2,
- },
- "text_key": {
- "type": "str",
- "value": "",
- },
- "text_key_validator": {
- "type": "bool",
- "value": False,
- },
- }
- updated_config = create_data_component.update_build_config(
- build_config=build_config, field_value=3, field_name="number_of_fields"
- )
-
- assert "field_1_key" in updated_config
- assert "field_2_key" in updated_config
- assert "field_3_key" in updated_config
- assert updated_config["number_of_fields"]["value"] == 3
-
-
-def test_update_build_config_exceed_limit(create_data_component):
- build_config = {
- "number_of_fields": {
- "type": "int",
- "value": 2,
- },
- "text_key": {
- "type": "str",
- "value": "",
- },
- "text_key_validator": {
- "type": "bool",
- "value": False,
- },
- }
- with pytest.raises(ValueError, match=re.escape("Number of fields cannot exceed 15.")):
- create_data_component.update_build_config(build_config, 16, "number_of_fields")
-
-
-async def test_build_data(create_data_component):
- create_data_component._attributes = {
- "field_1_key": {"key1": "value1"},
- "field_2_key": {"key2": "value2"},
- }
- create_data_component.text_key = "key1"
- create_data_component.text_key_validator = False
-
- result = await create_data_component.build_data()
-
- assert isinstance(result, Data)
- assert result.data == {"key1": "value1", "key2": "value2"}
- assert result.text_key == "key1"
-
-
-def test_get_data(create_data_component):
- create_data_component._attributes = {
- "field_1_key": {"key1": "value1"},
- "field_2_key": {"key2": "value2"},
- }
-
- result = create_data_component.get_data()
-
- assert result == {"key1": "value1", "key2": "value2"}
-
-
-def test_validate_text_key_valid(create_data_component):
- # Arrange
- create_data_component._attributes = {
- "field_1_key": {"key1": "value1"},
- "field_2_key": {"key2": "value2"},
- }
- create_data_component.text_key = "key1"
-
- # Act & Assert
- try:
- create_data_component.validate_text_key()
- except ValueError:
- pytest.fail("validate_text_key() raised ValueError unexpectedly!")
-
- # Additional assertions
- assert create_data_component.text_key == "key1"
- assert "key1" in create_data_component.get_data()
-
-
-def test_validate_text_key_invalid(create_data_component):
- # Arrange
- create_data_component._attributes = {
- "field_1_key": {"key1": "value1"},
- "field_2_key": {"key2": "value2"},
- }
- create_data_component.text_key = "invalid_key"
-
- # Act & Assert
- with pytest.raises(ValueError, match="Text Key: 'invalid_key' not found in the Data keys: 'key1, key2'"):
- create_data_component.validate_text_key()
diff --git a/src/backend/tests/unit/components/prototypes/test_update_data_component.py b/src/backend/tests/unit/components/prototypes/test_update_data_component.py
deleted file mode 100644
index e40cb4da4ae7..000000000000
--- a/src/backend/tests/unit/components/prototypes/test_update_data_component.py
+++ /dev/null
@@ -1,103 +0,0 @@
-import re
-
-import pytest
-
-from lfx.components.processing import UpdateDataComponent
-from lfx.schema import Data
-
-
-@pytest.fixture
-def update_data_component():
- return UpdateDataComponent()
-
-
-def test_update_build_config(update_data_component):
- build_config = {
- "number_of_fields": {
- "type": "int",
- "value": 2,
- },
- "text_key": {
- "type": "str",
- "value": "",
- },
- "text_key_validator": {
- "type": "bool",
- "value": False,
- },
- }
- updated_config = update_data_component.update_build_config(
- build_config=build_config, field_value=3, field_name="number_of_fields"
- )
-
- assert "field_1_key" in updated_config
- assert "field_2_key" in updated_config
- assert "field_3_key" in updated_config
- assert updated_config["number_of_fields"]["value"] == 3
-
-
-def test_update_build_config_exceed_limit(update_data_component):
- build_config = {
- "number_of_fields": {
- "type": "int",
- "value": 2,
- },
- "text_key": {
- "type": "str",
- "value": "",
- },
- "text_key_validator": {
- "type": "bool",
- "value": False,
- },
- }
- with pytest.raises(ValueError, match=re.escape("Number of fields cannot exceed 15.")):
- update_data_component.update_build_config(build_config, 16, "number_of_fields")
-
-
-async def test_build_data(update_data_component):
- update_data_component._attributes = {
- "field_1_key": {"key1": "new_value1"},
- "field_2_key": {"key3": "value3"},
- }
- update_data_component.text_key = "key1"
- update_data_component.text_key_validator = False
- update_data_component.old_data = Data(data={"key1": "old_value1", "key2": "value2"}, text_key="key2")
-
- result = await update_data_component.build_data()
-
- assert isinstance(result, Data)
- assert result.data == {"key1": "new_value1", "key2": "value2", "key3": "value3"}
- assert result.text_key == "key1"
-
-
-def test_get_data(update_data_component):
- update_data_component._attributes = {
- "field_1_key": {"key1": "value1"},
- "field_2_key": {"key2": "value2"},
- }
-
- result = update_data_component.get_data()
-
- assert result == {"key1": "value1", "key2": "value2"}
-
-
-def test_validate_text_key_valid(update_data_component):
- data = Data(data={"key1": "value1", "key2": "value2"}, text_key="key1")
- update_data_component.text_key = "key1"
-
- try:
- update_data_component.validate_text_key(data)
- except ValueError:
- pytest.fail("validate_text_key() raised ValueError unexpectedly!")
-
-
-def test_validate_text_key_invalid(update_data_component):
- data = Data(data={"key1": "value1", "key2": "value2"}, text_key="key1")
- update_data_component.text_key = "invalid_key"
- with pytest.raises(ValueError) as exc_info: # noqa: PT011
- update_data_component.validate_text_key(data)
- expected_error_message = (
- f"Text Key: '{update_data_component.text_key}' not found in the Data keys: {', '.join(data.data.keys())}"
- )
- assert str(exc_info.value) == expected_error_message
diff --git a/src/backend/tests/unit/components/search/test_arxiv_component.py b/src/backend/tests/unit/components/search/test_arxiv_component.py
deleted file mode 100644
index 55f243accfdf..000000000000
--- a/src/backend/tests/unit/components/search/test_arxiv_component.py
+++ /dev/null
@@ -1,124 +0,0 @@
-from unittest.mock import patch
-
-import pytest
-
-from tests.base import ComponentTestBaseWithClient
-
-
-class TestArXivComponent(ComponentTestBaseWithClient):
- def test_component_versions(self, default_kwargs, file_names_mapping):
- """Test component compatibility across versions."""
- from lfx.components.arxiv.arxiv import ArXivComponent
-
- # Test current version
- component = ArXivComponent(**default_kwargs)
- frontend_node = component.to_frontend_node()
- assert frontend_node is not None
-
- # Test backward compatibility
- for mapping in file_names_mapping:
- try:
- module = __import__(
- f"lfx.components.{mapping['module']}",
- fromlist=[mapping["file_name"]],
- )
- component_class = getattr(module, mapping["file_name"])
- component = component_class(**default_kwargs)
- frontend_node = component.to_frontend_node()
- assert frontend_node is not None
- except (ImportError, AttributeError) as e:
- pytest.fail(f"Failed to load component version {mapping['version']}: {e!s}")
-
- @pytest.fixture
- def component_class(self):
- from lfx.components.arxiv.arxiv import ArXivComponent
-
- return ArXivComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "search_query": "quantum computing",
- "search_type": "all",
- "max_results": 10,
- "_session_id": "test-session",
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- return []
-
- def test_component_initialization(self, component_class, default_kwargs):
- # Arrange
- component = component_class(**default_kwargs)
-
- # Act
- frontend_node = component.to_frontend_node()
-
- # Assert
- node_data = frontend_node["data"]["node"]
- assert node_data["template"]["search_query"]["value"] == "quantum computing"
- assert node_data["template"]["search_type"]["value"] == "all"
- assert node_data["template"]["max_results"]["value"] == 10
-
- def test_build_query_url(self, component_class, default_kwargs):
- # Arrange
- component = component_class(**default_kwargs)
-
- # Act
- url = component.build_query_url()
-
- # Assert
- assert "http://export.arxiv.org/api/query?" in url
- assert "search_query=all%3Aquantum%20computing" in url
- assert "max_results=10" in url
-
- def test_parse_atom_response(self, component_class, default_kwargs):
- # Arrange
- component = component_class(**default_kwargs)
- sample_xml = """
-
- http://arxiv.org/abs/quant-ph/0000001
- Test Paper
- Test summary
- 2023-01-01
- 2023-01-01
- Test Author
-
-
-
- Test comment
- Test Journal
-
-
- """.replace("<", "<").replace(">", ">")
-
- # Act
- papers = component.parse_atom_response(sample_xml)
-
- # Assert
- assert len(papers) == 1
- paper = papers[0]
- assert paper["title"] == "Test Paper"
- assert paper["summary"] == "Test summary"
- assert paper["authors"] == ["Test Author"]
- assert paper["arxiv_url"] == "http://arxiv.org/abs/quant-ph/0000001"
- assert paper["pdf_url"] == "http://arxiv.org/pdf/quant-ph/0000001"
- assert paper["comment"] == "Test comment"
- assert paper["journal_ref"] == "Test Journal"
- assert paper["primary_category"] == "quant-ph"
-
- @patch("urllib.request.build_opener")
- def test_invalid_url_handling(self, mock_build_opener, component_class, default_kwargs):
- # Arrange
- component = component_class(**default_kwargs)
- mock_build_opener.return_value.open.side_effect = ValueError("Invalid URL")
-
- # Act
- results = component.search_papers()
-
- # Assert
- assert len(results) == 1
- assert hasattr(results[0], "error")
- assert "Invalid URL" in results[0].error
diff --git a/src/backend/tests/unit/components/search/test_google_search_api.py b/src/backend/tests/unit/components/search/test_google_search_api.py
deleted file mode 100644
index 72b0c874b53c..000000000000
--- a/src/backend/tests/unit/components/search/test_google_search_api.py
+++ /dev/null
@@ -1,113 +0,0 @@
-from unittest.mock import patch
-
-import pandas as pd
-import pytest
-
-from lfx.components.google.google_search_api_core import GoogleSearchAPICore
-from lfx.schema import DataFrame
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestGoogleSearchAPICore(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- return GoogleSearchAPICore
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "google_api_key": "test_api_key",
- "google_cse_id": "test_cse_id",
- "input_value": "test query",
- "k": 2,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- # New component, no previous versions
- return []
-
- @pytest.fixture
- def mock_search_results(self):
- return pd.DataFrame(
- [
- {
- "title": "Test Title 1",
- "link": "https://test1.com",
- "snippet": "Test snippet 1",
- },
- {
- "title": "Test Title 2",
- "link": "https://test2.com",
- "snippet": "Test snippet 2",
- },
- ]
- )
-
- def test_component_initialization(self, component_class):
- component = component_class()
-
- frontend_node = component.to_frontend_node()
- node_data = frontend_node["data"]["node"]
-
- # Test basic component attributes
- assert node_data["display_name"] == "Google Search API"
- assert node_data["icon"] == "Google"
-
- # Test inputs configuration
- template = node_data["template"]
- assert "google_api_key" in template
- assert "google_cse_id" in template
- assert "input_value" in template
- assert "k" in template
-
- @patch("langchain_google_community.GoogleSearchAPIWrapper.results")
- def test_search_google_success(self, mock_results, component_class, default_kwargs, mock_search_results):
- component = component_class(**default_kwargs)
- mock_results.return_value = mock_search_results.to_dict("records")
-
- result = component.search_google()
-
- assert isinstance(result, DataFrame)
- assert len(result) == 2
- assert result.iloc[0]["title"] == "Test Title 1"
- assert result.iloc[1]["link"] == "https://test2.com"
- mock_results.assert_called_once_with(query="test query", num_results=2)
-
- def test_search_google_invalid_api_key(self, component_class):
- component = component_class(google_api_key=None)
- result = component.search_google()
-
- assert isinstance(result, DataFrame)
- assert "error" in result.columns
- assert "Invalid Google API Key" in result.iloc[0]["error"]
-
- def test_search_google_invalid_cse_id(self, component_class):
- component = component_class(google_api_key="valid_key", google_cse_id=None)
- result = component.search_google()
-
- assert isinstance(result, DataFrame)
- assert "error" in result.columns
- assert "Invalid Google CSE ID" in result.iloc[0]["error"]
-
- @patch("langchain_google_community.GoogleSearchAPIWrapper.results")
- def test_search_google_error_handling(self, mock_results, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- mock_results.side_effect = ConnectionError("API connection failed")
-
- result = component.search_google()
-
- assert isinstance(result, DataFrame)
- assert "error" in result.columns
- assert "Connection error: API connection failed" in result.iloc[0]["error"]
-
- def test_build_method(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- build_result = component.build()
- assert build_result == component.search_google
-
- @pytest.mark.asyncio
- async def test_latest_version(self, component_class, default_kwargs):
- """Override test_latest_version to skip API call."""
- component = component_class(**default_kwargs)
- assert component is not None
diff --git a/src/backend/tests/unit/components/search/test_google_serper_api_core.py b/src/backend/tests/unit/components/search/test_google_serper_api_core.py
deleted file mode 100644
index 9ed2025121c7..000000000000
--- a/src/backend/tests/unit/components/search/test_google_serper_api_core.py
+++ /dev/null
@@ -1,115 +0,0 @@
-from unittest.mock import MagicMock, patch
-
-import pytest
-
-from lfx.components.google.google_serper_api_core import GoogleSerperAPICore
-from lfx.schema import DataFrame
-
-
-@pytest.fixture
-def google_serper_component():
- return GoogleSerperAPICore()
-
-
-@pytest.fixture
-def mock_search_results():
- return {
- "organic": [
- {
- "title": "Test Title 1",
- "link": "https://test1.com",
- "snippet": "Test snippet 1",
- },
- {
- "title": "Test Title 2",
- "link": "https://test2.com",
- "snippet": "Test snippet 2",
- },
- ]
- }
-
-
-def test_component_initialization(google_serper_component):
- assert google_serper_component.display_name == "Google Serper API"
- assert google_serper_component.icon == "Serper"
-
- input_names = [input_.name for input_ in google_serper_component.inputs]
- assert "serper_api_key" in input_names
- assert "input_value" in input_names
- assert "k" in input_names
-
-
-@patch("langchain_community.utilities.google_serper.requests.get")
-@patch("langchain_community.utilities.google_serper.requests.post")
-def test_search_serper_success(mock_post, mock_get, google_serper_component, mock_search_results):
- # Configure mocks
- mock_response = MagicMock()
- mock_response.status_code = 200
- mock_response.json.return_value = mock_search_results
- mock_post.return_value = mock_response
- mock_get.return_value = mock_response
-
- # Configure component
- google_serper_component.serper_api_key = "test_api_key"
- google_serper_component.input_value = "test query"
- google_serper_component.k = 2
-
- # Execute search
- result = google_serper_component.search_serper()
-
- # Verify results
- assert isinstance(result, DataFrame)
- assert len(result) == 2
- assert list(result.columns) == ["title", "link", "snippet"]
- assert result.iloc[0]["title"] == "Test Title 1"
- assert result.iloc[1]["link"] == "https://test2.com"
-
-
-@patch("langchain_community.utilities.google_serper.requests.get")
-@patch("langchain_community.utilities.google_serper.requests.post")
-def test_search_serper_error_handling(mock_post, mock_get, google_serper_component):
- # Configure mocks to simulate error
- mock_response = MagicMock()
- mock_response.status_code = 403
- mock_response.raise_for_status.side_effect = ConnectionError("API connection failed")
- mock_post.return_value = mock_response
- mock_get.return_value = mock_response
-
- # Configure component
- google_serper_component.serper_api_key = "test_api_key"
- google_serper_component.input_value = "test query"
- google_serper_component.k = 2
-
- # Execute search
- result = google_serper_component.search_serper()
-
- # Verify error handling
- assert isinstance(result, DataFrame)
- assert "error" in result.columns
- assert "API connection failed" in result.iloc[0]["error"]
-
-
-def test_text_search_serper(google_serper_component):
- with patch.object(google_serper_component, "search_serper") as mock_search:
- mock_search.return_value = DataFrame(
- [{"title": "Test Title", "link": "https://test.com", "snippet": "Test snippet"}]
- )
-
- result = google_serper_component.text_search_serper()
- assert result.text is not None
- assert "Test Title" in result.text
- assert "https://test.com" in result.text
-
-
-def test_build_wrapper(google_serper_component):
- google_serper_component.serper_api_key = "test_api_key"
- google_serper_component.k = 2
-
- wrapper = google_serper_component._build_wrapper()
- assert wrapper.serper_api_key == "test_api_key"
- assert wrapper.k == 2
-
-
-def test_build_method(google_serper_component):
- build_result = google_serper_component.build()
- assert build_result == google_serper_component.search_serper
diff --git a/src/backend/tests/unit/components/search/test_wikidata_api.py b/src/backend/tests/unit/components/search/test_wikidata_api.py
deleted file mode 100644
index ef2e48c015a5..000000000000
--- a/src/backend/tests/unit/components/search/test_wikidata_api.py
+++ /dev/null
@@ -1,103 +0,0 @@
-from unittest.mock import MagicMock, patch
-
-import httpx
-import pytest
-from langchain_core.tools import ToolException
-from langflow.custom import Component
-
-from lfx.components.wikipedia import WikidataComponent
-from lfx.custom.utils import build_custom_component_template
-
-# Import the base test class
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestWikidataComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Fixture to create a WikidataComponent instance."""
- return WikidataComponent
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- @pytest.fixture
- def mock_query(self):
- """Fixture to provide a default query."""
- return "test query"
-
- def test_wikidata_initialization(self, component_class):
- component = component_class()
- assert component.display_name == "Wikidata"
- assert component.description == "Performs a search using the Wikidata API."
- assert component.icon == "Wikipedia"
-
- def test_wikidata_template(self, component_class):
- component = component_class()
- frontend_node, _ = build_custom_component_template(Component(_code=component._code))
-
- # Verify basic structure
- assert isinstance(frontend_node, dict)
-
- # Verify inputs
- assert "template" in frontend_node
- input_names = [input_["name"] for input_ in frontend_node["template"].values() if isinstance(input_, dict)]
- assert "query" in input_names
-
- @patch("lfx.components.tools.wikidata_api.httpx.get")
- def test_fetch_content_success(self, mock_httpx, component_class, mock_query):
- component = component_class()
- component.query = mock_query
-
- # Mock successful API response
- mock_response = MagicMock()
- mock_response.json.return_value = {
- "search": [
- {
- "label": "Test Label",
- "id": "Q123",
- "url": "https://test.com",
- "description": "Test Description",
- "concepturi": "https://test.com/concept",
- }
- ]
- }
- mock_httpx.return_value = mock_response
-
- result = component.fetch_content()
-
- assert isinstance(result, list)
- assert len(result) == 1
- assert result[0].text == "Test Label: Test Description"
- assert result[0].data["label"] == "Test Label"
- assert result[0].data["id"] == "Q123"
-
- @patch("lfx.components.tools.wikidata_api.httpx.get")
- def test_fetch_content_empty_response(self, mock_httpx, component_class, mock_query):
- component = component_class()
- component.query = mock_query
-
- # Mock empty API response
- mock_response = MagicMock()
- mock_response.json.return_value = {"search": []}
- mock_httpx.return_value = mock_response
-
- result = component.fetch_content()
-
- assert isinstance(result, list)
- assert len(result) == 1
- assert "error" in result[0].data
- assert "No search results found" in result[0].data["error"]
-
- @patch("lfx.components.tools.wikidata_api.httpx.get")
- def test_fetch_content_error_handling(self, mock_httpx, component_class, mock_query):
- component = component_class()
- component.query = mock_query
-
- # Mock HTTP error
- mock_httpx.side_effect = httpx.HTTPError("API Error")
-
- with pytest.raises(ToolException):
- component.fetch_content()
diff --git a/src/backend/tests/unit/components/search/test_wikipedia_api.py b/src/backend/tests/unit/components/search/test_wikipedia_api.py
deleted file mode 100644
index ccb8b6c3f1fc..000000000000
--- a/src/backend/tests/unit/components/search/test_wikipedia_api.py
+++ /dev/null
@@ -1,91 +0,0 @@
-from unittest.mock import MagicMock
-
-import pytest
-from langflow.custom import Component
-
-from lfx.components.wikipedia import WikipediaComponent
-from lfx.custom.utils import build_custom_component_template
-
-# Import the base test class
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestWikipediaComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- """Fixture to create a WikipediaComponent instance."""
- return WikipediaComponent
-
- @pytest.fixture
- def default_kwargs(self):
- """Return the default kwargs for the component."""
- return {
- "input_value": "test query",
- "lang": "en",
- "k": 3,
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
- return []
-
- def test_wikipedia_initialization(self, component_class):
- component = component_class()
- assert component.display_name == "Wikipedia"
- assert component.description == "Call Wikipedia API."
- assert component.icon == "Wikipedia"
-
- def test_wikipedia_template(self, component_class):
- component = component_class()
- frontend_node, _ = build_custom_component_template(Component(_code=component._code))
-
- # Verify basic structure
- assert isinstance(frontend_node, dict)
-
- # Verify inputs
- assert "template" in frontend_node
- input_names = [input_["name"] for input_ in frontend_node["template"].values() if isinstance(input_, dict)]
-
- expected_inputs = ["input_value", "lang", "k", "load_all_available_meta", "doc_content_chars_max"]
-
- for input_name in expected_inputs:
- assert input_name in input_names
-
- @pytest.fixture
- def mock_wikipedia_wrapper(self, mocker):
- return mocker.patch("langchain_community.utilities.wikipedia.WikipediaAPIWrapper")
-
- def test_fetch_content(self, component_class, mock_wikipedia_wrapper):
- component = component_class()
- component.input_value = "test query"
- component.k = 3
- component.lang = "en"
-
- # Mock the WikipediaAPIWrapper and its load method
- mock_instance = MagicMock()
- mock_wikipedia_wrapper.return_value = mock_instance
- mock_doc = MagicMock()
- mock_doc.page_content = "Test content"
- mock_doc.metadata = {"source": "wikipedia", "title": "Test Page"}
- mock_instance.load.return_value = [mock_doc]
-
- # Mock the _build_wrapper method to return our mock instance
- component._build_wrapper = MagicMock(return_value=mock_instance)
-
- result = component.fetch_content()
-
- # Verify wrapper was built with correct params
- component._build_wrapper.assert_called_once()
- mock_instance.load.assert_called_once_with("test query")
- assert isinstance(result, list)
- assert len(result) == 1
- assert result[0].text == "Test content"
-
- def test_wikipedia_error_handling(self, component_class):
- component = component_class()
- # Mock _build_wrapper to raise exception
- component._build_wrapper = MagicMock(side_effect=Exception("API Error"))
-
- with pytest.raises(Exception, match="API Error"):
- component.fetch_content()
diff --git a/src/backend/tests/unit/components/search/test_yfinance_tool.py b/src/backend/tests/unit/components/search/test_yfinance_tool.py
deleted file mode 100644
index 9c5c3d705e87..000000000000
--- a/src/backend/tests/unit/components/search/test_yfinance_tool.py
+++ /dev/null
@@ -1,83 +0,0 @@
-from unittest.mock import MagicMock, patch
-
-import pytest
-from langchain_core.tools import ToolException
-
-from lfx.components.yahoosearch.yahoo import YahooFinanceMethod, YfinanceComponent
-from lfx.custom.utils import build_custom_component_template
-from lfx.schema import Data
-
-
-class TestYfinanceComponent:
- @pytest.fixture
- def component_class(self):
- return YfinanceComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {"symbol": "AAPL", "method": YahooFinanceMethod.GET_INFO, "num_news": 5, "_session_id": "test-session"}
-
- @pytest.fixture
- def file_names_mapping(self):
- return []
-
- def test_initialization(self, component_class):
- component = component_class()
- assert component.display_name == "Yahoo! Finance"
- assert component.icon == "trending-up"
- assert "yfinance" in component.description
-
- def test_template_structure(self, component_class):
- component = component_class()
- frontend_node, _ = build_custom_component_template(component)
-
- assert "template" in frontend_node
- input_names = [input_["name"] for input_ in frontend_node["template"].values() if isinstance(input_, dict)]
-
- expected_inputs = ["symbol", "method", "num_news"]
- for input_name in expected_inputs:
- assert input_name in input_names
-
- @patch("lfx.components.yahoosearch.yahoo.yf.Ticker")
- def test_fetch_info(self, mock_ticker, component_class, default_kwargs):
- component = component_class(**default_kwargs)
-
- # Setup mock
- mock_instance = MagicMock()
- mock_ticker.return_value = mock_instance
- mock_instance.info = {"companyName": "Apple Inc."}
-
- result = component.fetch_content()
-
- assert isinstance(result, list)
- assert len(result) == 1
- assert "Apple Inc." in result[0].text
-
- @patch("lfx.components.yahoosearch.yahoo.yf.Ticker")
- def test_fetch_news(self, mock_ticker, component_class):
- component = component_class(symbol="AAPL", method=YahooFinanceMethod.GET_NEWS, num_news=2)
-
- # Setup mock
- mock_instance = MagicMock()
- mock_ticker.return_value = mock_instance
- mock_instance.news = [
- {"title": "News 1", "link": "http://example.com/1"},
- {"title": "News 2", "link": "http://example.com/2"},
- ]
-
- result = component.fetch_content()
-
- assert isinstance(result, list)
- assert len(result) == 2
- assert all(isinstance(item, Data) for item in result)
- assert "News 1" in result[0].text
- assert "http://example.com/1" in result[0].text
-
- def test_error_handling(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
-
- with patch.object(component, "_fetch_yfinance_data") as mock_fetch:
- mock_fetch.side_effect = Exception("API Error")
-
- with pytest.raises(ToolException):
- component.fetch_content()
diff --git a/src/backend/tests/unit/components/test_all_modules_importable.py b/src/backend/tests/unit/components/test_all_modules_importable.py
deleted file mode 100644
index 4006589255bc..000000000000
--- a/src/backend/tests/unit/components/test_all_modules_importable.py
+++ /dev/null
@@ -1,315 +0,0 @@
-"""Test to ensure all component modules are importable after dynamic import refactor.
-
-This test validates that every component module can be imported successfully
-and that all components listed in __all__ can be accessed.
-"""
-
-import importlib
-
-import pytest
-from langflow import components
-
-
-class TestAllModulesImportable:
- """Test that all component modules are importable."""
-
- def test_all_component_categories_importable(self):
- """Test that all component categories in __all__ can be imported."""
- failed_imports = []
-
- for category_name in components.__all__:
- try:
- category_module = getattr(components, category_name)
- assert category_module is not None, f"Category {category_name} is None"
-
- # Verify it's actually a module
- assert hasattr(category_module, "__name__"), f"Category {category_name} is not a module"
-
- except Exception as e:
- failed_imports.append(f"{category_name}: {e!s}")
-
- if failed_imports:
- pytest.fail(f"Failed to import categories: {failed_imports}")
-
- def test_all_components_in_categories_importable(self):
- """Test that all components in each category's __all__ can be imported."""
- failed_imports = []
- successful_imports = 0
-
- print(f"Testing component imports across {len(components.__all__)} categories") # noqa: T201
-
- for category_name in components.__all__:
- try:
- category_module = getattr(components, category_name)
-
- if hasattr(category_module, "__all__"):
- category_components = len(category_module.__all__)
- print(f"Testing {category_components} components in {category_name}") # noqa: T201
-
- for component_name in category_module.__all__:
- try:
- component = getattr(category_module, component_name)
- assert component is not None, f"Component {component_name} is None"
- assert callable(component), f"Component {component_name} is not callable"
- successful_imports += 1
-
- except Exception as e:
- failed_imports.append(f"{category_name}.{component_name}: {e!s}")
- print(f"FAILED: {category_name}.{component_name}: {e!s}") # noqa: T201
- else:
- # Category doesn't have __all__, skip
- print(f"Skipping {category_name} (no __all__ attribute)") # noqa: T201
- continue
-
- except Exception as e:
- failed_imports.append(f"Category {category_name}: {e!s}")
- print(f"FAILED: Category {category_name}: {e!s}") # noqa: T201
-
- print(f"Successfully imported {successful_imports} components") # noqa: T201
-
- if failed_imports:
- print(f"Failed imports ({len(failed_imports)}):") # noqa: T201
- for failure in failed_imports[:10]: # Show first 10 failures
- print(f" - {failure}") # noqa: T201
- if len(failed_imports) > 10:
- print(f" ... and {len(failed_imports) - 10} more") # noqa: T201
-
- pytest.fail(f"Failed to import {len(failed_imports)} components")
-
- def test_dynamic_imports_mapping_complete(self):
- """Test that _dynamic_imports mapping is complete for all categories."""
- failed_mappings = []
-
- for category_name in components.__all__:
- try:
- category_module = getattr(components, category_name)
-
- if hasattr(category_module, "__all__") and hasattr(category_module, "_dynamic_imports"):
- category_all = set(category_module.__all__)
- dynamic_imports_keys = set(category_module._dynamic_imports.keys())
-
- # Check that all items in __all__ have corresponding _dynamic_imports entries
- missing_in_dynamic = category_all - dynamic_imports_keys
- if missing_in_dynamic:
- failed_mappings.append(f"{category_name}: Missing in _dynamic_imports: {missing_in_dynamic}")
-
- # Check that all _dynamic_imports keys are in __all__
- missing_in_all = dynamic_imports_keys - category_all
- if missing_in_all:
- failed_mappings.append(f"{category_name}: Missing in __all__: {missing_in_all}")
-
- except Exception as e:
- failed_mappings.append(f"{category_name}: Error checking mappings: {e!s}")
-
- if failed_mappings:
- pytest.fail(f"Inconsistent mappings: {failed_mappings}")
-
- def test_backward_compatibility_imports(self):
- """Test that traditional import patterns still work."""
- # Test some key imports that should always work
- traditional_imports = [
- ("langflow.components.openai", "OpenAIModelComponent"),
- ("langflow.components.anthropic", "AnthropicModelComponent"),
- ("langflow.components.data", "APIRequestComponent"),
- ("langflow.components.agents", "AgentComponent"),
- ("langflow.components.helpers", "CalculatorComponent"),
- ]
-
- failed_imports = []
-
- for module_name, component_name in traditional_imports:
- try:
- module = importlib.import_module(module_name)
- component = getattr(module, component_name)
- assert component is not None
- assert callable(component)
-
- except Exception as e:
- failed_imports.append(f"{module_name}.{component_name}: {e!s}")
-
- if failed_imports:
- pytest.fail(f"Traditional imports failed: {failed_imports}")
-
- def test_component_modules_have_required_attributes(self):
- """Test that component modules have required attributes for dynamic loading."""
- failed_modules = []
-
- for category_name in components.__all__:
- try:
- category_module = getattr(components, category_name)
-
- # Check for required attributes
- required_attrs = ["__all__"]
-
- failed_modules.extend(
- f"{category_name}: Missing required attribute {attr}"
- for attr in required_attrs
- if not hasattr(category_module, attr)
- )
-
- # Check that if it has dynamic imports, it has the pattern
- if hasattr(category_module, "_dynamic_imports"):
- if not hasattr(category_module, "__getattr__"):
- failed_modules.append(f"{category_name}: Has _dynamic_imports but no __getattr__")
- if not hasattr(category_module, "__dir__"):
- failed_modules.append(f"{category_name}: Has _dynamic_imports but no __dir__")
-
- except Exception as e:
- failed_modules.append(f"{category_name}: Error checking attributes: {e!s}")
-
- if failed_modules:
- pytest.fail(f"Module attribute issues: {failed_modules}")
-
- def test_no_circular_imports(self):
- """Test that there are no circular import issues."""
- # Test importing in different orders to catch circular imports
- import_orders = [
- ["agents", "data", "openai"],
- ["openai", "agents", "data"],
- ["data", "openai", "agents"],
- ]
-
- for order in import_orders:
- try:
- for category_name in order:
- category_module = getattr(components, category_name)
- # Access a component to trigger dynamic import
- if hasattr(category_module, "__all__") and category_module.__all__:
- first_component_name = category_module.__all__[0]
- getattr(category_module, first_component_name)
-
- except Exception as e:
- pytest.fail(f"Circular import issue with order {order}: {e!s}")
-
- def test_component_access_caching(self):
- """Test that component access caching works correctly."""
- # Access the same component multiple times and ensure caching works
- test_cases = [
- ("openai", "OpenAIModelComponent"),
- ("data", "APIRequestComponent"),
- ("helpers", "CalculatorComponent"),
- ]
-
- for category_name, component_name in test_cases:
- category_module = getattr(components, category_name)
-
- # First access
- component1 = getattr(category_module, component_name)
-
- # Component should now be cached in module globals
- assert component_name in category_module.__dict__
-
- # Second access should return the same object
- component2 = getattr(category_module, component_name)
- assert component1 is component2, f"Caching failed for {category_name}.{component_name}"
-
- def test_error_handling_for_missing_components(self):
- """Test that appropriate errors are raised for missing components."""
- test_cases = [
- ("openai", "NonExistentComponent"),
- ("data", "AnotherNonExistentComponent"),
- ]
-
- for category_name, component_name in test_cases:
- category_module = getattr(components, category_name)
-
- with pytest.raises(AttributeError, match=f"has no attribute '{component_name}'"):
- getattr(category_module, component_name)
-
- def test_dir_functionality(self):
- """Test that __dir__ functionality works for all modules."""
- # Test main components module
- main_dir = dir(components)
- assert "openai" in main_dir
- assert "data" in main_dir
- assert "agents" in main_dir
-
- # Test category modules
- for category_name in ["openai", "data", "helpers"]:
- category_module = getattr(components, category_name)
- category_dir = dir(category_module)
-
- # Should include all components from __all__
- if hasattr(category_module, "__all__"):
- for component_name in category_module.__all__:
- assert component_name in category_dir, f"{component_name} missing from dir({category_name})"
-
- def test_module_metadata_preservation(self):
- """Test that module metadata is preserved after dynamic loading."""
- test_components = [
- ("openai", "OpenAIModelComponent"),
- ("anthropic", "AnthropicModelComponent"),
- ("data", "APIRequestComponent"),
- ]
-
- for category_name, component_name in test_components:
- category_module = getattr(components, category_name)
- component = getattr(category_module, component_name)
-
- # Check that component has expected metadata
- assert hasattr(component, "__name__")
- assert hasattr(component, "__module__")
- assert component.__name__ == component_name
- assert category_name in component.__module__
-
-
-class TestSpecificModulePatterns:
- """Test specific module patterns and edge cases."""
-
- def test_empty_init_modules(self):
- """Test modules that might have empty __init__.py files."""
- # These modules might have empty __init__.py files in the original structure
- potentially_empty_modules = [
- "chains",
- "output_parsers",
- "textsplitters",
- "toolkits",
- "link_extractors",
- "documentloaders",
- ]
-
- for module_name in potentially_empty_modules:
- if module_name in components.__all__:
- try:
- module = getattr(components, module_name)
- # Should be able to import even if empty
- assert module is not None
- except Exception as e:
- pytest.fail(f"Failed to import potentially empty module {module_name}: {e}")
-
- def test_platform_specific_imports(self):
- """Test platform-specific imports like NVIDIA Windows components."""
- # Test NVIDIA module which has platform-specific logic
- nvidia_module = components.nvidia
- assert nvidia_module is not None
-
- # Should have basic components regardless of platform
- assert "NVIDIAModelComponent" in nvidia_module.__all__
-
- # Should be able to access components
- nvidia_model = nvidia_module.NVIDIAModelComponent
- assert nvidia_model is not None
-
- def test_large_modules_import_efficiently(self):
- """Test that large modules with many components import efficiently."""
- import time
-
- # Test large modules
- large_modules = ["data", "processing", "langchain_utilities"]
-
- for module_name in large_modules:
- if module_name in components.__all__:
- start_time = time.time()
- module = getattr(components, module_name)
- import_time = time.time() - start_time
-
- # Initial import should be fast (just loading __init__.py)
- assert import_time < 0.5, f"Module {module_name} took too long to import: {import_time}s"
-
- # Should have components available
- assert hasattr(module, "__all__")
- assert len(module.__all__) > 0
-
-
-if __name__ == "__main__":
- pytest.main([__file__, "-v"])
diff --git a/src/backend/tests/unit/components/tools/test_calculator.py b/src/backend/tests/unit/components/tools/test_calculator.py
deleted file mode 100644
index 2de5f70703e3..000000000000
--- a/src/backend/tests/unit/components/tools/test_calculator.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import pytest
-
-from lfx.components.helpers.calculator_core import CalculatorComponent
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestCalculatorComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- return CalculatorComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {"expression": "2 + 2", "_session_id": "test_session"}
-
- @pytest.fixture
- def file_names_mapping(self):
- return []
-
- def test_basic_calculation(self, component_class, default_kwargs):
- # Arrange
- component = component_class(**default_kwargs)
-
- # Act
- result = component.evaluate_expression()
-
- # Assert
- assert result.data["result"] == "4"
-
- def test_complex_calculation(self, component_class):
- # Arrange
- component = component_class(expression="4*4*(33/22)+12-20", _session_id="test_session")
-
- # Act
- result = component.evaluate_expression()
-
- # Assert
- assert float(result.data["result"]) == pytest.approx(16)
-
- def test_division_by_zero(self, component_class):
- # Arrange
- component = component_class(expression="1/0", _session_id="test_session")
-
- # Act
- result = component.evaluate_expression()
-
- # Assert
- assert "error" in result.data
- assert result.data["error"] == "Error: Division by zero"
-
- def test_invalid_expression(self, component_class):
- # Arrange
- component = component_class(expression="2 + *", _session_id="test_session")
-
- # Act
- result = component.evaluate_expression()
-
- # Assert
- assert "error" in result.data
- assert "Invalid expression" in result.data["error"]
-
- def test_unsupported_operation(self, component_class):
- # Arrange
- component = component_class(expression="sqrt(16)", _session_id="test_session")
-
- # Act
- result = component.evaluate_expression()
-
- # Assert
- assert "error" in result.data
- assert "Unsupported operation" in result.data["error"]
-
- def test_component_frontend_node(self, component_class, default_kwargs):
- # Arrange
- component = component_class(**default_kwargs)
-
- # Act
- frontend_node = component.to_frontend_node()
-
- # Assert
- node_data = frontend_node["data"]["node"]
- assert node_data["display_name"] == "Calculator"
- assert node_data["description"] == "Perform basic arithmetic operations on a given expression."
- assert node_data["icon"] == "calculator"
diff --git a/src/backend/tests/unit/components/tools/test_python_repl_tool.py b/src/backend/tests/unit/components/tools/test_python_repl_tool.py
deleted file mode 100644
index ecc28bb87161..000000000000
--- a/src/backend/tests/unit/components/tools/test_python_repl_tool.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import pytest
-
-from lfx.components.processing import PythonREPLComponent
-from tests.base import DID_NOT_EXIST, ComponentTestBaseWithoutClient
-
-
-class TestPythonREPLComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self):
- return PythonREPLComponent
-
- @pytest.fixture
- def default_kwargs(self):
- return {
- "global_imports": "math",
- "python_code": "print('Hello, World!')",
- }
-
- @pytest.fixture
- def file_names_mapping(self):
- # Component not yet released, mark all versions as non-existent
- return [
- {"version": "1.0.17", "module": "tools", "file_name": DID_NOT_EXIST},
- {"version": "1.0.18", "module": "tools", "file_name": DID_NOT_EXIST},
- {"version": "1.0.19", "module": "tools", "file_name": DID_NOT_EXIST},
- {"version": "1.1.0", "module": "tools", "file_name": DID_NOT_EXIST},
- {"version": "1.1.1", "module": "tools", "file_name": DID_NOT_EXIST},
- ]
-
- def test_component_initialization(self, component_class, default_kwargs):
- component = component_class(**default_kwargs)
- frontend_node = component.to_frontend_node()
- node_data = frontend_node["data"]["node"]
-
- # Test template fields
- template = node_data["template"]
- assert "global_imports" in template
- assert "python_code" in template
-
- # Test global_imports configuration
- global_imports = template["global_imports"]
- assert global_imports["type"] == "str"
- assert global_imports["value"] == "math"
- assert global_imports["required"] is True
-
- # Test python_code configuration
- python_code = template["python_code"]
- assert python_code["type"] == "code"
- assert python_code["value"] == "print('Hello, World!')"
- assert python_code["required"] is True
-
- # Test base configuration
- assert "Data" in node_data["base_classes"]
diff --git a/src/backend/tests/unit/components/tools/test_serp_api.py b/src/backend/tests/unit/components/tools/test_serp_api.py
deleted file mode 100644
index 37b60aba0a47..000000000000
--- a/src/backend/tests/unit/components/tools/test_serp_api.py
+++ /dev/null
@@ -1,91 +0,0 @@
-from unittest.mock import MagicMock, patch
-
-import pytest
-from langchain_core.tools import ToolException
-from langflow.custom import Component
-
-from lfx.components.serpapi.serp import SerpComponent
-from lfx.custom.utils import build_custom_component_template
-from lfx.schema import Data
-from lfx.schema.message import Message
-
-
-def test_serpapi_initialization():
- component = SerpComponent()
- assert component.display_name == "Serp Search API"
- assert component.description == "Call Serp Search API with result limiting"
- assert component.icon == "SerpSearch"
-
-
-def test_serpapi_template():
- serpapi = SerpComponent()
- component = Component(_code=serpapi._code)
- frontend_node, _ = build_custom_component_template(component)
-
- # Verify basic structure
- assert isinstance(frontend_node, dict)
-
- # Verify inputs
- assert "template" in frontend_node
- input_names = [input_["name"] for input_ in frontend_node["template"].values() if isinstance(input_, dict)]
-
- expected_inputs = ["serpapi_api_key", "input_value", "search_params", "max_results", "max_snippet_length"]
-
- for input_name in expected_inputs:
- assert input_name in input_names
-
-
-@patch("lfx.components.serpapi.serp.SerpAPIWrapper")
-def test_fetch_content(mock_serpapi_wrapper):
- component = SerpComponent()
- component.serpapi_api_key = "test-key"
- component.input_value = "test query"
- component.max_results = 3
- component.max_snippet_length = 100
-
- # Mock the SerpAPIWrapper and its results method
- mock_instance = MagicMock()
- mock_serpapi_wrapper.return_value = mock_instance
- mock_instance.results.return_value = {
- "organic_results": [
- {"title": "Test Result 1", "link": "https://test.com", "snippet": "This is a test result 1"},
- {"title": "Test Result 2", "link": "https://test2.com", "snippet": "This is a test result 2"},
- ]
- }
-
- result = component.fetch_content()
-
- assert isinstance(result, list)
- assert len(result) == 2
- assert result[0].text == "This is a test result 1"
- assert result[0].data["title"] == "Test Result 1"
- assert result[0].data["link"] == "https://test.com"
-
-
-def test_fetch_content_text():
- component = SerpComponent()
- component.fetch_content = MagicMock(
- return_value=[
- Data(text="First result", data={"title": "Title 1"}),
- Data(text="Second result", data={"title": "Title 2"}),
- ]
- )
-
- result = component.fetch_content_text()
-
- assert isinstance(result, Message)
- assert result.text == "First result\nSecond result\n"
-
-
-def test_error_handling():
- component = SerpComponent()
- component.serpapi_api_key = "test-key"
- component.input_value = "test query"
-
- with patch("lfx.components.serpapi.serp.SerpAPIWrapper") as mock_serpapi:
- mock_instance = MagicMock()
- mock_serpapi.return_value = mock_instance
- mock_instance.results.side_effect = Exception("API Error")
-
- with pytest.raises(ToolException):
- component.fetch_content()
diff --git a/src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py b/src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py
deleted file mode 100644
index 9e1dd24f9ea2..000000000000
--- a/src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py
+++ /dev/null
@@ -1,401 +0,0 @@
-import os
-from pathlib import Path
-from typing import Any
-
-import pytest
-
-from lfx.components.chroma import ChromaVectorStoreComponent
-from lfx.schema.data import Data
-from tests.base import ComponentTestBaseWithoutClient, VersionComponentMapping
-
-
-@pytest.mark.api_key_required
-class TestChromaVectorStoreComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self) -> type[Any]:
- """Return the component class to test."""
- return ChromaVectorStoreComponent
-
- @pytest.fixture
- def default_kwargs(self, tmp_path: Path) -> dict[str, Any]:
- """Return the default kwargs for the component."""
- from lfx.components.openai.openai import OpenAIEmbeddingsComponent
-
- if os.getenv("OPENAI_API_KEY") is None:
- pytest.skip("OPENAI_API_KEY is not set")
-
- api_key = os.getenv("OPENAI_API_KEY")
-
- return {
- "embedding": OpenAIEmbeddingsComponent(openai_api_key=api_key).build_embeddings(),
- "collection_name": "test_collection",
- "persist_directory": tmp_path,
- }
-
- @pytest.fixture
- def file_names_mapping(self) -> list[VersionComponentMapping]:
- """Return the file names mapping for different versions."""
- return [
- {"version": "1.0.19", "module": "vectorstores", "file_name": "Chroma"},
- {"version": "1.1.0", "module": "vectorstores", "file_name": "chroma"},
- {"version": "1.1.1", "module": "vectorstores", "file_name": "chroma"},
- ]
-
- def test_create_db(self, component_class: type[ChromaVectorStoreComponent], default_kwargs: dict[str, Any]) -> None:
- """Test the create_collection method."""
- component: ChromaVectorStoreComponent = component_class().set(**default_kwargs)
- component.build_vector_store()
- persist_directory = default_kwargs["persist_directory"]
- assert persist_directory.exists()
- assert persist_directory.is_dir()
- # Assert it isn't empty
- assert len(list(persist_directory.iterdir())) > 0
- # Assert there's a chroma.sqlite3 file
- assert (persist_directory / "chroma.sqlite3").exists()
- assert (persist_directory / "chroma.sqlite3").is_file()
-
- def test_create_collection_with_data(
- self, component_class: type[ChromaVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test the create_collection method with data."""
- # set ingest_data in default_kwargs to a list of Data objects
- test_texts = ["test data 1", "test data 2", "something completely different"]
- default_kwargs["ingest_data"] = [Data(text=text) for text in test_texts]
-
- component: ChromaVectorStoreComponent = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- # Verify collection exists and has the correct data
- collection = vector_store._collection
- assert collection.name == default_kwargs["collection_name"]
- assert collection.count() == len(test_texts)
-
- def test_similarity_search(
- self, component_class: type[ChromaVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test the similarity search functionality through the component."""
- # Create test data with distinct topics
- test_data = [
- "The quick brown fox jumps over the lazy dog",
- "Python is a popular programming language",
- "Machine learning models process data",
- "The lazy dog sleeps all day long",
- ]
- default_kwargs["ingest_data"] = [Data(text=text) for text in test_data]
- default_kwargs["search_type"] = "Similarity"
- default_kwargs["number_of_results"] = 2
-
- component: ChromaVectorStoreComponent = component_class().set(**default_kwargs)
- component.build_vector_store()
-
- # Test similarity search through the component
- component.set(search_query="dog sleeping")
- results = component.search_documents()
-
- assert len(results) == 2
- # The most relevant results should be about dogs
- assert any("dog" in result.text.lower() for result in results)
-
- # Test with different number of results
- component.set(number_of_results=3)
- results = component.search_documents()
- assert len(results) == 3
-
- def test_mmr_search(
- self, component_class: type[ChromaVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test the MMR search functionality through the component."""
- # Create test data with some similar documents
- test_data = [
- "The quick brown fox jumps",
- "The quick brown fox leaps",
- "The quick brown fox hops",
- "Something completely different about cats",
- ]
- default_kwargs["ingest_data"] = [Data(text=text) for text in test_data]
- default_kwargs["search_type"] = "MMR"
- default_kwargs["number_of_results"] = 3
-
- component: ChromaVectorStoreComponent = component_class().set(**default_kwargs)
- component.build_vector_store()
-
- # Test MMR search through the component
- component.set(search_query="quick fox")
- results = component.search_documents()
-
- assert len(results) == 3
- # Results should be diverse but relevant
- assert any("fox" in result.text.lower() for result in results)
-
- # Test with different settings
- component.set(number_of_results=2)
- diverse_results = component.search_documents()
- assert len(diverse_results) == 2
-
- def test_search_with_different_types(
- self, component_class: type[ChromaVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test search with different search types."""
- test_data = [
- "The quick brown fox jumps over the lazy dog",
- "Python is a popular programming language",
- "Machine learning models process data",
- ]
- default_kwargs["ingest_data"] = [Data(text=text) for text in test_data]
- default_kwargs["number_of_results"] = 2
-
- component: ChromaVectorStoreComponent = component_class().set(**default_kwargs)
- component.build_vector_store()
-
- # Test similarity search
- component.set(search_type="Similarity", search_query="programming languages")
- similarity_results = component.search_documents()
- assert len(similarity_results) == 2
- assert any("python" in result.text.lower() for result in similarity_results)
-
- # Test MMR search
- component.set(search_type="MMR", search_query="programming languages")
- mmr_results = component.search_documents()
- assert len(mmr_results) == 2
-
- # Test with empty query
- component.set(search_query="")
- empty_results = component.search_documents()
- assert len(empty_results) == 0
-
- def test_search_with_score(
- self, component_class: type[ChromaVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test the search with score functionality through the component."""
- test_data = [
- "The quick brown fox jumps over the lazy dog",
- "Python is a popular programming language",
- "Machine learning models process data",
- ]
- default_kwargs["ingest_data"] = [Data(text=text) for text in test_data]
- default_kwargs["number_of_results"] = 2
-
- component: ChromaVectorStoreComponent = component_class().set(**default_kwargs)
- component.build_vector_store()
-
- # Test search with score through the component
- component.set(
- search_type="similarity_score_threshold", search_query="programming languages", number_of_results=2
- )
- results = component.search_documents()
-
- assert len(results) == 2
- # Results should be sorted by relevance
- assert any("python" in result.text.lower() for result in results)
- assert any("programming" in result.text.lower() for result in results)
-
- # Test with different number of results
- component.set(number_of_results=3)
- results = component.search_documents()
- assert len(results) == 3
-
- def test_duplicate_handling(
- self, component_class: type[ChromaVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test handling of duplicate documents."""
- # Create test data with duplicates
- test_data = [
- Data(text_key="text", data={"text": "This is a test document"}),
- Data(text_key="text", data={"text": "This is a test document"}), # Duplicate with exact same data
- Data(text_key="text", data={"text": "This is another document"}),
- ]
- default_kwargs["ingest_data"] = test_data
- default_kwargs["allow_duplicates"] = False
- default_kwargs["limit"] = 100 # Set a high enough limit to get all documents
-
- component: ChromaVectorStoreComponent = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- # Get all documents
- results = vector_store.get(limit=100)
-
- documents = results["documents"]
-
- # The documents are returned in a list structure
- assert len(documents) == 3 # All documents are added, even duplicates
-
- # Count unique texts
- unique_texts = set(documents)
- assert len(unique_texts) == 2 # Should have 2 unique texts
-
- # Test with allow_duplicates=True
- test_data = [
- Data(text_key="text", data={"text": "This is a test document"}),
- Data(text_key="text", data={"text": "This is a test document"}), # Duplicate
- ]
- default_kwargs["ingest_data"] = test_data
- default_kwargs["allow_duplicates"] = True
- default_kwargs["collection_name"] = "test_collection_2" # Use a different collection name
-
- component = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- # Get all documents
- results = vector_store.get(limit=100)
- documents = results["documents"]
-
- # With allow_duplicates=True, we should have both documents
- assert len(documents) == 2
- assert all("test document" in doc for doc in documents)
-
- # Verify that we have the expected number of documents
- assert vector_store._collection.count() == 2
-
- def test_chroma_collection_to_data(
- self, component_class: type[ChromaVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test the chroma_collection_to_data function."""
- from lfx.base.vectorstores.utils import chroma_collection_to_data
-
- # Create a collection with documents and metadata
- test_data = [
- Data(data={"text": "Document 1", "metadata_field": "value1"}),
- Data(data={"text": "Document 2", "metadata_field": "value2"}),
- ]
- default_kwargs["ingest_data"] = test_data
- component: ChromaVectorStoreComponent = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- # Get the collection data
- collection_dict = vector_store.get()
- data_objects = chroma_collection_to_data(collection_dict)
-
- # Verify the conversion
- assert len(data_objects) == 2
- for data_obj in data_objects:
- assert isinstance(data_obj, Data)
- assert "id" in data_obj.data
- assert "text" in data_obj.data
- assert data_obj.data["text"] in {"Document 1", "Document 2"}
- assert "metadata_field" in data_obj.data
- assert data_obj.data["metadata_field"] in {"value1", "value2"}
-
- def test_chroma_collection_to_data_without_metadata(
- self, component_class: type[ChromaVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test the chroma_collection_to_data function with documents that have no metadata."""
- from lfx.base.vectorstores.utils import chroma_collection_to_data
-
- # Create a collection with documents but no metadata
- test_data = [
- Data(data={"text": "Simple document 1"}),
- Data(data={"text": "Simple document 2"}),
- ]
- default_kwargs["ingest_data"] = test_data
- component: ChromaVectorStoreComponent = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- # Get the collection data
- collection_dict = vector_store.get()
- data_objects = chroma_collection_to_data(collection_dict)
-
- # Verify the conversion
- assert len(data_objects) == 2
- for data_obj in data_objects:
- assert isinstance(data_obj, Data)
- assert "id" in data_obj.data
- assert "text" in data_obj.data
- assert data_obj.data["text"] in {"Simple document 1", "Simple document 2"}
-
- def test_chroma_collection_to_data_empty_collection(
- self, component_class: type[ChromaVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test the chroma_collection_to_data function with an empty collection."""
- from lfx.base.vectorstores.utils import chroma_collection_to_data
-
- # Create an empty collection
- component: ChromaVectorStoreComponent = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- # Get the collection data
- collection_dict = vector_store.get()
- data_objects = chroma_collection_to_data(collection_dict)
-
- # Verify the conversion
- assert len(data_objects) == 0
-
- def test_metadata_filtering_with_complex_data(
- self, component_class: type[ChromaVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test that complex metadata is properly filtered and simple types are preserved."""
- from langflow.base.vectorstores.utils import chroma_collection_to_data
-
- # Create test data that covers the original error scenario and validation
- test_data = [
- Data(
- data={
- "text": "Document with mixed metadata",
- "files": [], # This empty list was causing the original ChromaDB error
- "tags": ["tag1", "tag2"], # Lists should be filtered out
- "nested": {"key": "value"}, # Nested objects should be filtered out
- "simple_string": "preserved",
- "simple_int": 42,
- "simple_bool": True,
- "empty_string": "", # Edge case: empty but valid
- "zero_value": 0, # Edge case: falsy but valid
- }
- )
- ]
-
- default_kwargs["ingest_data"] = test_data
- default_kwargs["collection_name"] = "test_metadata_filtering"
-
- # This should not raise an error despite the complex metadata
- component: ChromaVectorStoreComponent = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- # Verify document was added successfully
- collection_dict = vector_store.get()
- assert len(collection_dict["documents"]) == 1
- assert "Document with mixed metadata" in collection_dict["documents"][0]
-
- # Verify metadata filtering: simple types preserved, complex types filtered out
- data_objects = chroma_collection_to_data(collection_dict)
- data_obj = data_objects[0]
-
- # Simple types should be preserved
- assert data_obj.data["simple_string"] == "preserved"
- assert data_obj.data["simple_int"] == 42
- assert data_obj.data["simple_bool"] is True
- assert data_obj.data["empty_string"] == ""
- assert data_obj.data["zero_value"] == 0
-
- # Complex types should be filtered out
- assert "files" not in data_obj.data
- assert "tags" not in data_obj.data
- assert "nested" not in data_obj.data
-
- def test_metadata_filtering_fallback(
- self, component_class: type[ChromaVectorStoreComponent], default_kwargs: dict[str, Any], monkeypatch
- ) -> None:
- """Test the fallback behavior when filter_complex_metadata import fails."""
- import builtins
-
- original_import = builtins.__import__
-
- def mock_import(name, *args, **kwargs):
- if name == "langchain_community.vectorstores.utils":
- error_msg = "Mocked import error"
- raise ImportError(error_msg)
- return original_import(name, *args, **kwargs)
-
- monkeypatch.setattr(builtins, "__import__", mock_import)
-
- # Use simple test data to avoid ChromaDB errors when filtering is unavailable
- test_data = [Data(data={"text": "Simple document", "simple_field": "simple_value"})]
- default_kwargs["ingest_data"] = test_data
- default_kwargs["collection_name"] = "test_fallback"
-
- # Should work with fallback (no filtering)
- component: ChromaVectorStoreComponent = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- # Verify document was added
- collection_dict = vector_store.get()
- assert len(collection_dict["documents"]) == 1
- assert "Simple document" in collection_dict["documents"][0]
diff --git a/src/backend/tests/unit/components/vectorstores/test_graph_rag_component.py b/src/backend/tests/unit/components/vectorstores/test_graph_rag_component.py
deleted file mode 100644
index 82fc6f74a107..000000000000
--- a/src/backend/tests/unit/components/vectorstores/test_graph_rag_component.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import random
-
-import pytest
-from faker import Faker
-from langchain_community.embeddings.fake import DeterministicFakeEmbedding
-from langchain_core.documents import Document
-from langchain_core.vectorstores.in_memory import InMemoryVectorStore
-
-from lfx.components.datastax.graph_rag import GraphRAGComponent
-from tests.base import ComponentTestBaseWithoutClient
-
-
-class TestGraphRAGComponent(ComponentTestBaseWithoutClient):
- """Test suite for the GraphRAGComponent class, focusing on graph traversal and retrieval functionality.
-
- Fixtures:
- component_class: Returns the GraphRAGComponent class to be tested.
- animals: Provides a list of Document objects representing various animals with metadata.
- embedding: Provides a FakeEmbeddings instance with a specified size.
- vector_store: Initializes an InMemoryVectorStore with the provided animals and embedding.
- file_names_mapping: Returns an empty list since this component doesn't have version-specific files.
- default_kwargs: Returns an empty dictionary since this component doesn't have any default arguments.
-
- Test Cases:
- test_graphrag: Tests the search_documents method of the GraphRAGComponent class by setting attributes and
- verifying the number of results returned.
- """
-
- @pytest.fixture
- def component_class(self):
- """Return the component class to test."""
- return GraphRAGComponent
-
- @pytest.fixture
- def animals(self, n: int = 20, match_prob: float = 0.3) -> list[Document]:
- """Animals dataset for testing.
-
- Generate a list of animal-related document objects with random metadata.
-
- Parameters:
- n (int): Number of documents to generate.
- match_prob (float): Probability of sharing metadata across documents.
-
- Returns:
- List[Document]: A list of generated Document objects.
- """
- # Initialize Faker for generating random text
- fake = Faker()
- random.seed(42)
- fake.seed_instance(42)
-
- # Define possible attributes for animals
- animal_types = ["mammal", "bird", "reptile", "insect"]
- habitats = ["savanna", "marine", "wetlands", "forest", "desert"]
- diets = ["carnivorous", "herbivorous", "omnivorous"]
- origins = ["north america", "south america", "africa", "asia", "australia"]
-
- shared_metadata = {} # Common metadata that may be shared across documents
-
- def update_metadata(meta: dict) -> dict:
- """Modify metadata based on predefined conditions and probability."""
- if random.random() < match_prob: # noqa: S311
- meta.update(shared_metadata) # Apply shared metadata
- elif meta["type"] == "mammal":
- meta["habitat"] = random.choice(habitats) # noqa: S311
- elif meta["type"] == "reptile":
- meta["diet"] = random.choice(diets) # noqa: S311
- elif meta["type"] == "insect":
- meta["origin"] = random.choice(origins) # noqa: S311
- return meta
-
- # Generate and return a list of documents
- return [
- Document(
- id=fake.uuid4(),
- page_content=fake.sentence(),
- metadata=update_metadata(
- {
- "type": random.choice(animal_types), # noqa: S311
- "number_of_legs": random.choice([0, 2, 4, 6, 8]), # noqa: S311
- "keywords": fake.words(random.randint(2, 5)), # noqa: S311
- # Add optional tags with 30% probability
- **(
- {
- "tags": [
- {"a": random.randint(1, 10), "b": random.randint(1, 10)} # noqa: S311
- for _ in range(random.randint(1, 2)) # noqa: S311
- ]
- }
- if random.random() < 0.3 # noqa: S311
- else {}
- ),
- # Add nested metadata with 20% probability
- **({"nested": {"a": random.randint(1, 10)}} if random.random() < 0.2 else {}), # noqa: S311
- }
- ),
- )
- for _ in range(n)
- ]
-
- @pytest.fixture
- def embedding(self):
- return DeterministicFakeEmbedding(size=8)
-
- @pytest.fixture
- def vector_store(self, animals: list[Document], embedding: DeterministicFakeEmbedding) -> InMemoryVectorStore:
- """Return an empty list since this component doesn't have version-specific files."""
- store = InMemoryVectorStore(embedding=embedding)
- store.add_documents(animals)
- return store
-
- @pytest.fixture
- def file_names_mapping(self):
- """Return an empty list since this component doesn't have version-specific files."""
-
- @pytest.fixture
- def default_kwargs(self):
- """Return an empty dictionary since this component doesn't have any default arguments."""
- return {"k": 10, "start_k": 3, "max_depth": 2}
-
- def test_graphrag(
- self,
- component_class: GraphRAGComponent,
- embedding: DeterministicFakeEmbedding,
- vector_store: InMemoryVectorStore,
- default_kwargs,
- ):
- """Test GraphRAGComponent's document search functionality.
-
- This test verifies that the component correctly retrieves documents using the
- provided embedding model, vector store, and search query.
-
- Args:
- component_class (GraphRAGComponent): The component class to test.
- embedding (FakeEmbeddings): The embedding model for the component.
- vector_store (InMemoryVectorStore): The vector store used in retrieval.
- default_kwargs (dict): Default keyword arguments for the retrieval strategy.
-
- Returns:
- None: The test asserts that 10 search results are returned.
- """
- component = component_class()
-
- component.set_attributes(
- {
- "embedding_model": embedding,
- "vector_store": vector_store,
- "edge_definition": "type, type",
- "strategy": "Eager",
- "search_query": "information environment technology",
- "graphrag_strategy_kwargs": default_kwargs,
- }
- )
-
- results = component.search_documents()
-
- # Quantity of documents
- assert len(results) == 10
-
- # Ensures all the k-start_k documents returned via traversal have the same metadata as the
- # ones returned via the similarity search
- assert {doc.data["type"] for doc in results if doc.data["_depth"] == 0} == {
- doc.data["type"] for doc in results if doc.data["_depth"] >= 1
- }
diff --git a/src/backend/tests/unit/components/vectorstores/test_local_db_component.py b/src/backend/tests/unit/components/vectorstores/test_local_db_component.py
deleted file mode 100644
index e6c93af86926..000000000000
--- a/src/backend/tests/unit/components/vectorstores/test_local_db_component.py
+++ /dev/null
@@ -1,382 +0,0 @@
-import os
-from pathlib import Path
-from typing import Any
-from unittest.mock import MagicMock, patch
-
-import pytest
-from langflow.services.cache.utils import CACHE_DIR
-
-from lfx.components.vectorstores.local_db import LocalDBComponent
-from lfx.schema.data import Data
-from tests.base import ComponentTestBaseWithoutClient, VersionComponentMapping
-
-
-@pytest.mark.api_key_required
-class TestLocalDBComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self) -> type[Any]:
- """Return the component class to test."""
- return LocalDBComponent
-
- @pytest.fixture
- def default_kwargs(self, tmp_path: Path) -> dict[str, Any]:
- """Return the default kwargs for the component."""
- from lfx.components.openai.openai import OpenAIEmbeddingsComponent
-
- if os.getenv("OPENAI_API_KEY") is None:
- pytest.skip("OPENAI_API_KEY is not set")
-
- api_key = os.getenv("OPENAI_API_KEY")
-
- return {
- "embedding": OpenAIEmbeddingsComponent(openai_api_key=api_key).build_embeddings(),
- "collection_name": "test_collection",
- "persist": True,
- "persist_directory": str(tmp_path), # Convert Path to string
- "mode": "Ingest",
- }
-
- @pytest.fixture
- def file_names_mapping(self) -> list[VersionComponentMapping]:
- """Return the file names mapping for different versions."""
- # Return an empty list since this is a new component
- return []
-
- def test_create_db(self, component_class: type[LocalDBComponent], default_kwargs: dict[str, Any]) -> None:
- """Test creating a vector store."""
- component: LocalDBComponent = component_class().set(**default_kwargs)
- component.build_vector_store()
- persist_directory = Path(default_kwargs["persist_directory"])
- assert persist_directory.exists()
- assert persist_directory.is_dir()
- # Assert it isn't empty
- assert len(list(persist_directory.iterdir())) > 0
- # Assert there's a chroma.sqlite3 file (since LocalDB uses Chroma underneath)
- assert (persist_directory / "chroma.sqlite3").exists()
- assert (persist_directory / "chroma.sqlite3").is_file()
-
- @patch("langchain_chroma.Chroma._collection")
- def test_create_db_with_data(
- self,
- mock_collection,
- component_class: type[LocalDBComponent],
- default_kwargs: dict[str, Any],
- ) -> None:
- """Test creating a vector store with data."""
- # Set ingest_data in default_kwargs to a list of Data objects
- test_texts = ["test data 1", "test data 2", "something completely different"]
- default_kwargs["ingest_data"] = [Data(text=text) for text in test_texts]
-
- # Mock the collection count to return the expected number
- mock_collection.count.return_value = len(test_texts)
- mock_collection.name = default_kwargs["collection_name"]
-
- # Mock the _add_documents_to_vector_store method to ensure add_documents is called
- with patch.object(LocalDBComponent, "_add_documents_to_vector_store") as mock_add_docs_method:
- component: LocalDBComponent = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- # Verify the method was called
- mock_add_docs_method.assert_called_once()
-
- # Verify collection exists and has the correct data
- assert vector_store._collection.name == default_kwargs["collection_name"]
- assert vector_store._collection.count() == len(test_texts)
-
- def test_default_persist_dir(self, component_class: type[LocalDBComponent], default_kwargs: dict[str, Any]) -> None:
- """Test the default persist directory functionality."""
- # Remove persist_directory from default_kwargs to test default directory
- default_kwargs.pop("persist_directory")
-
- component: LocalDBComponent = component_class().set(**default_kwargs)
-
- # Call get_default_persist_dir and check the result
- default_dir = component.get_default_persist_dir()
- expected_dir = Path(CACHE_DIR) / "vector_stores" / default_kwargs["collection_name"]
-
- assert Path(default_dir) == expected_dir
- assert Path(default_dir).exists()
-
- @patch("langchain_chroma.Chroma.similarity_search")
- def test_similarity_search(
- self,
- mock_similarity_search,
- component_class: type[LocalDBComponent],
- default_kwargs: dict[str, Any],
- ) -> None:
- """Test the similarity search functionality."""
- # Create test data with distinct topics
- test_data = [
- "The quick brown fox jumps over the lazy dog",
- "Python is a popular programming language",
- "Machine learning models process data",
- "The lazy dog sleeps all day long",
- ]
- default_kwargs["ingest_data"] = [Data(text=text) for text in test_data]
- default_kwargs["search_type"] = "Similarity"
- default_kwargs["number_of_results"] = 2
-
- # Mock the similarity_search to return documents
- from langchain_core.documents import Document
-
- mock_docs = [
- Document(page_content="The lazy dog sleeps all day long"),
- Document(page_content="The quick brown fox jumps over the lazy dog"),
- ]
- mock_similarity_search.return_value = mock_docs
-
- component: LocalDBComponent = component_class().set(**default_kwargs)
- component.build_vector_store()
-
- # Switch to Retrieve mode
- component.set(mode="Retrieve", search_query="dog sleeping")
- results = component.search_documents()
-
- assert len(results) == 2
- # The most relevant results should be about dogs
- assert any("dog" in result.text.lower() for result in results)
- mock_similarity_search.assert_called_once_with(query="dog sleeping", k=2)
-
- # Test with different number of results
- component.set(number_of_results=3)
- another_doc = Document(page_content="Another document")
- mock_similarity_search.return_value = [*mock_docs, another_doc] # Use unpacking instead of concatenation
- results = component.search_documents()
- assert len(results) == 3
-
- @patch("langchain_chroma.Chroma.max_marginal_relevance_search")
- def test_mmr_search(
- self,
- mock_mmr_search,
- component_class: type[LocalDBComponent],
- default_kwargs: dict[str, Any],
- ) -> None:
- """Test the MMR search functionality."""
- # Create test data with some similar documents
- test_data = [
- "The quick brown fox jumps",
- "The quick brown fox leaps",
- "The quick brown fox hops",
- "Something completely different about cats",
- ]
- default_kwargs["ingest_data"] = [Data(text=text) for text in test_data]
- default_kwargs["search_type"] = "MMR"
- default_kwargs["number_of_results"] = 3
-
- # Mock the MMR search to return documents
- from langchain_core.documents import Document
-
- mock_docs = [
- Document(page_content="The quick brown fox jumps"),
- Document(page_content="The quick brown fox leaps"),
- Document(page_content="Something completely different about cats"),
- ]
- mock_mmr_search.return_value = mock_docs
-
- component: LocalDBComponent = component_class().set(**default_kwargs)
- component.build_vector_store()
-
- # Switch to Retrieve mode
- component.set(mode="Retrieve", search_query="quick fox")
- results = component.search_documents()
-
- assert len(results) == 3
- # Results should be diverse but relevant
- assert any("fox" in result.text.lower() for result in results)
- mock_mmr_search.assert_called_once_with(query="quick fox", k=3)
-
- # Test with different settings
- component.set(number_of_results=2)
- mock_mmr_search.return_value = mock_docs[:2]
- diverse_results = component.search_documents()
- assert len(diverse_results) == 2
-
- @patch("langchain_chroma.Chroma.similarity_search")
- @patch("langchain_chroma.Chroma.max_marginal_relevance_search")
- def test_search_with_different_types(
- self,
- mock_mmr_search,
- mock_similarity_search,
- component_class: type[LocalDBComponent],
- default_kwargs: dict[str, Any],
- ) -> None:
- """Test search with different search types."""
- test_data = [
- "The quick brown fox jumps over the lazy dog",
- "Python is a popular programming language",
- "Machine learning models process data",
- ]
- default_kwargs["ingest_data"] = [Data(text=text) for text in test_data]
- default_kwargs["number_of_results"] = 2
-
- # Mock the search methods to return documents
- from langchain_core.documents import Document
-
- mock_similarity_docs = [
- Document(page_content="Python is a popular programming language"),
- Document(page_content="Machine learning models process data"),
- ]
- mock_similarity_search.return_value = mock_similarity_docs
-
- mock_mmr_docs = [
- Document(page_content="Python is a popular programming language"),
- Document(page_content="The quick brown fox jumps over the lazy dog"),
- ]
- mock_mmr_search.return_value = mock_mmr_docs
-
- component: LocalDBComponent = component_class().set(**default_kwargs)
- component.build_vector_store()
-
- # Switch to Retrieve mode and test similarity search
- component.set(mode="Retrieve", search_type="Similarity", search_query="programming languages")
- similarity_results = component.search_documents()
- assert len(similarity_results) == 2
- assert any("python" in result.text.lower() for result in similarity_results)
- mock_similarity_search.assert_called_once_with(query="programming languages", k=2)
-
- # Test MMR search
- component.set(search_type="MMR", search_query="programming languages")
- mmr_results = component.search_documents()
- assert len(mmr_results) == 2
- mock_mmr_search.assert_called_once_with(query="programming languages", k=2)
-
- # Test with empty query
- component.set(search_query="")
- empty_results = component.search_documents()
- assert len(empty_results) == 0
-
- @patch("langchain_chroma.Chroma.get")
- @patch("langchain_chroma.Chroma._collection")
- def test_duplicate_handling(
- self,
- mock_collection,
- mock_get,
- component_class: type[LocalDBComponent],
- default_kwargs: dict[str, Any],
- ) -> None:
- """Test handling of duplicate documents."""
- # Create test data with duplicates
- test_data = [
- Data(text_key="text", data={"text": "This is a test document"}),
- Data(text_key="text", data={"text": "This is a test document"}), # Duplicate with exact same data
- Data(text_key="text", data={"text": "This is another document"}),
- ]
- default_kwargs["ingest_data"] = test_data
- default_kwargs["allow_duplicates"] = False
- default_kwargs["limit"] = 100 # Set a high enough limit to get all documents
-
- # Mock the get method to return documents
- mock_get.return_value = {
- "documents": ["This is a test document", "This is a test document", "This is another document"],
- "metadatas": [{}, {}, {}],
- "ids": ["1", "2", "3"],
- }
-
- # Mock collection count
- mock_collection.count.return_value = 3
-
- component: LocalDBComponent = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- # Get all documents
- results = vector_store.get(limit=100)
- documents = results["documents"]
-
- # The documents are returned in a list structure
- assert len(documents) == 3 # All documents are added, even duplicates
-
- # Count unique texts
- unique_texts = set(documents)
- assert len(unique_texts) == 2 # Should have 2 unique texts
-
- # Test with allow_duplicates=True
- test_data = [
- Data(text_key="text", data={"text": "This is a test document"}),
- Data(text_key="text", data={"text": "This is a test document"}), # Duplicate
- ]
- default_kwargs["ingest_data"] = test_data
- default_kwargs["allow_duplicates"] = True
- default_kwargs["collection_name"] = "test_collection_2" # Use a different collection name
-
- # Mock for the second test
- mock_get.return_value = {
- "documents": ["This is a test document", "This is a test document"],
- "metadatas": [{}, {}],
- "ids": ["1", "2"],
- }
- mock_collection.count.return_value = 2
-
- component = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- # Get all documents
- results = vector_store.get(limit=100)
- documents = results["documents"]
-
- # With allow_duplicates=True, we should have both documents
- assert len(documents) == 2
- assert all("test document" in doc for doc in documents)
-
- # Verify that we have the expected number of documents
- assert vector_store._collection.count() == 2
-
- def test_build_config_update(self, component_class: type[LocalDBComponent]) -> None:
- """Test the update_build_config method."""
- component = component_class()
-
- # Test mode=Ingest
- build_config = {
- "ingest_data": {"show": False},
- "collection_name": {"show": False},
- "persist": {"show": False},
- "persist_directory": {"show": False},
- "embedding": {"show": False},
- "allow_duplicates": {"show": False},
- "limit": {"show": False},
- "search_query": {"show": False},
- "search_type": {"show": False},
- "number_of_results": {"show": False},
- "existing_collections": {"show": False},
- }
-
- updated_config = component.update_build_config(build_config, "Ingest", "mode")
-
- assert updated_config["ingest_data"]["show"] is True
- assert updated_config["collection_name"]["show"] is True
- assert updated_config["persist"]["show"] is True
- assert updated_config["search_query"]["show"] is False
-
- # Test mode=Retrieve
- updated_config = component.update_build_config(build_config, "Retrieve", "mode")
-
- assert updated_config["search_query"]["show"] is True
- assert updated_config["search_type"]["show"] is True
- assert updated_config["number_of_results"]["show"] is True
- assert updated_config["existing_collections"]["show"] is True
- assert updated_config["collection_name"]["show"] is False
-
- # Test persist=True/False
- build_config = {"persist_directory": {"show": False}}
- # Use keyword arguments to fix FBT003
- updated_config = component.update_build_config(build_config, field_value=True, field_name="persist")
- assert updated_config["persist_directory"]["show"] is True
-
- updated_config = component.update_build_config(build_config, field_value=False, field_name="persist")
- assert updated_config["persist_directory"]["show"] is False
-
- # Test existing_collections update
- # Fix the dict entry type issue
- build_config = {"collection_name": {"value": "old_name", "show": False}}
- updated_config = component.update_build_config(build_config, "new_collection", "existing_collections")
- assert updated_config["collection_name"]["value"] == "new_collection"
-
- @patch("lfx.components.vectorstores.local_db.LocalDBComponent.list_existing_collections")
- def test_list_existing_collections(self, mock_list: MagicMock, component_class: type[LocalDBComponent]) -> None:
- """Test the list_existing_collections method."""
- mock_list.return_value = ["collection1", "collection2", "collection3"]
-
- component = component_class()
- collections = component.list_existing_collections()
-
- assert collections == ["collection1", "collection2", "collection3"]
- mock_list.assert_called_once()
diff --git a/src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py b/src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py
deleted file mode 100644
index e24045745423..000000000000
--- a/src/backend/tests/unit/components/vectorstores/test_mongodb_atlas.py
+++ /dev/null
@@ -1,206 +0,0 @@
-import os
-import time
-from typing import Any
-
-import pytest
-from langchain_community.embeddings.fake import DeterministicFakeEmbedding
-from pymongo.collection import Collection
-
-from lfx.components.mongodb import MongoVectorStoreComponent
-from lfx.schema.data import Data
-from tests.base import ComponentTestBaseWithoutClient, VersionComponentMapping
-
-
-@pytest.mark.skipif(
- not os.environ.get("MONGODB_ATLAS_URI"), reason="Environment variable MONGODB_ATLAS_URI is not defined."
-)
-class TestMongoVectorStoreComponent(ComponentTestBaseWithoutClient):
- @pytest.fixture
- def component_class(self) -> type[Any]:
- """Return the component class to test."""
- return MongoVectorStoreComponent
-
- @pytest.fixture
- def default_kwargs(self) -> dict[str, Any]:
- """Return the default kwargs for the component."""
- return {
- "mongodb_atlas_cluster_uri": os.getenv("MONGODB_ATLAS_URI"),
- "db_name": "test_db",
- "collection_name": "test_collection",
- "index_name": "test_index",
- "enable_mtls": False,
- "embedding": DeterministicFakeEmbedding(size=8),
- "index_field": "embedding",
- "filter_field": "text",
- "number_dimensions": 8,
- "similarity": "cosine",
- "quantization": "scalar",
- "insert_mode": "append",
- "ingest_data": [Data(data={"text": "test data 1"}), Data(data={"text": "test data 2"})],
- }
-
- @pytest.fixture
- def file_names_mapping(self) -> list[VersionComponentMapping]:
- """Return the file names mapping for different versions."""
- return [
- {"version": "1.0.19", "module": "vectorstores", "file_name": "MongoDBAtlasVector"},
- {"version": "1.1.0", "module": "vectorstores", "file_name": "mongodb_atlas"},
- {"version": "1.1.1", "module": "vectorstores", "file_name": "mongodb_atlas"},
- ]
-
- def __create_search_index(
- self, component_class: type[MongoVectorStoreComponent], collection: Collection, default_kwargs: dict[str, Any]
- ) -> None:
- """Create a vector search index if it doesn't exist."""
- component_class().set(**default_kwargs).verify_search_index(collection)
-
- # Verify index was created
- indexes = collection.list_search_indexes()
- index_names = {idx["name"]: idx["type"] for idx in indexes}
- index_type = index_names.get(default_kwargs["index_name"])
- assert default_kwargs["index_name"] in index_names
- assert index_type == "vectorSearch"
-
- def test_create_db(self, component_class: type[MongoVectorStoreComponent], default_kwargs: dict[str, Any]) -> None:
- """Test creating a MongoDB Atlas vector store."""
- component: MongoVectorStoreComponent = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
- assert vector_store is not None
- # Access MongoDB collection through the vector store's internal client
- assert vector_store._collection.name == default_kwargs["collection_name"]
- assert vector_store._index_name == default_kwargs["index_name"]
-
- def test_create_collection_with_data(
- self, component_class: type[MongoVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test creating a collection with data."""
- test_texts = ["test data 1", "test data 2", "something completely different"]
- default_kwargs["ingest_data"] = [Data(data={"text": text}) for text in test_texts]
- default_kwargs["insert_mode"] = "overwrite"
-
- component: MongoVectorStoreComponent = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- # Verify collection exists and has the correct data
- collection = vector_store._collection
- assert collection.name == default_kwargs["collection_name"]
- assert collection.count_documents({}) == len(test_texts)
-
- def test_similarity_search(
- self, component_class: type[MongoVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test the similarity search functionality."""
- # Create test data with distinct topics
- test_data = [
- "The quick brown fox jumps over the lazy dog",
- "Python is a popular programming language",
- "Machine learning models process data",
- "The lazy dog sleeps all day long",
- ]
- default_kwargs["ingest_data"] = [Data(data={"text": text, "metadata": {}}) for text in test_data]
- default_kwargs["number_of_results"] = 2
- default_kwargs["insert_mode"] = "overwrite"
-
- # Create and initialize the component
- component: MongoVectorStoreComponent = component_class().set(**default_kwargs)
-
- # Build the vector store first to ensure data is ingested
- vector_store = component.build_vector_store()
- assert vector_store is not None
-
- # Verify documents were stored with embeddings
- documents = list(vector_store._collection.find({}))
- assert len(documents) == len(test_data)
- for doc in documents:
- assert "embedding" in doc
- assert isinstance(doc["embedding"], list)
- assert len(doc["embedding"]) == 8 # Should match our embedding size
-
- self.__create_search_index(component_class, vector_store._collection, default_kwargs)
-
- # Verify index was created
- indexes = vector_store._collection.list_search_indexes()
- index_names = [idx["name"] for idx in indexes]
- assert default_kwargs["index_name"] in index_names
-
- # Test similarity search through the component
- component.set(search_query="dog")
- results = component.search_documents()
- time.sleep(5) # wait the results come from API
-
- assert len(results) == 2, "Expected 2 results for 'lazy dog' query"
- # The most relevant results should be about dogs
- assert any("dog" in result.data["text"].lower() for result in results)
-
- # Test with different number of results
- component.set(number_of_results=3)
- results = component.search_documents()
- assert len(results) == 3
- assert all("text" in result.data for result in results)
-
- def test_mtls_configuration(
- self, component_class: type[MongoVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test mTLS configuration handling."""
- # Test with invalid mTLS configuration
- default_kwargs["enable_mtls"] = True
- default_kwargs["mongodb_atlas_client_cert"] = "invalid-cert-content"
-
- component: MongoVectorStoreComponent = component_class().set(**default_kwargs)
- with pytest.raises(ValueError, match="Failed to connect to MongoDB Atlas"):
- component.build_vector_store()
-
- def test_empty_search_query(
- self, component_class: type[MongoVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test search with empty query."""
- default_kwargs["insert_mode"] = "overwrite"
- component: MongoVectorStoreComponent = component_class().set(**default_kwargs)
-
- # Test with empty search query
- component.set(search_query="")
- results = component.search_documents()
- assert len(results) == 0
-
- def test_metadata_handling(
- self, component_class: type[MongoVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test handling of document metadata."""
- # Create test data with metadata
- test_data = [
- Data(data={"text": "Document 1", "metadata": {"category": "test", "priority": 1}}),
- Data(data={"text": "Document 2", "metadata": {"category": "test", "priority": 2}}),
- ]
- default_kwargs["ingest_data"] = test_data
- default_kwargs["collection_name"] = "test_collection_metadata"
-
- component: MongoVectorStoreComponent = component_class().set(**default_kwargs)
- vector_store = component.build_vector_store()
-
- self.__create_search_index(component_class, vector_store._collection, default_kwargs)
-
- # Test search and verify metadata is preserved
- component.set(search_query="Document", number_of_results=2)
- results = component.search_documents()
-
- assert len(results) == 2
- for result in results:
- assert "category" in result.data["metadata"]
- assert result.data["metadata"]["category"] == "test"
- assert "priority" in result.data["metadata"]
- assert isinstance(result.data["metadata"]["priority"], int)
-
- def test_error_handling(
- self, component_class: type[MongoVectorStoreComponent], default_kwargs: dict[str, Any]
- ) -> None:
- """Test error handling for invalid configurations."""
- component: MongoVectorStoreComponent = component_class().set(**default_kwargs)
-
- # Test with non-existent database
- default_kwargs["mongodb_atlas_cluster_uri"] = os.getenv("MONGODB_ATLAS_URI")
- default_kwargs["db_name"] = "nonexistent_db"
- component = component_class().set(**default_kwargs)
-
- # This should not raise an error as MongoDB creates databases and collections on demand
- vector_store = component.build_vector_store()
- assert vector_store is not None
diff --git a/src/backend/tests/unit/custom/component/test_component_loading_fix.py b/src/backend/tests/unit/custom/component/test_component_loading_fix.py
deleted file mode 100644
index f81b84fb09d5..000000000000
--- a/src/backend/tests/unit/custom/component/test_component_loading_fix.py
+++ /dev/null
@@ -1,438 +0,0 @@
-"""Tests for the component loading fix that filters out BASE_COMPONENTS_PATH from custom components.
-
-- BASE_COMPONENTS_PATH is properly filtered out from custom components paths
-- Lazy loading mode works correctly
-- Custom components are loaded only from valid custom paths
-- No regression in existing functionality
-"""
-
-import asyncio
-from unittest.mock import AsyncMock, MagicMock, patch
-
-import pytest
-
-from lfx.interface.components import (
- component_cache,
- get_and_cache_all_types_dict,
-)
-from lfx.services.settings.base import BASE_COMPONENTS_PATH
-from lfx.services.settings.service import SettingsService
-
-
-class TestComponentLoadingFix:
- """Test suite for the component loading fix that filters BASE_COMPONENTS_PATH."""
-
- @pytest.fixture
- def mock_settings_service(self):
- """Create a mock settings service with configurable options."""
- settings_service = MagicMock(spec=SettingsService)
- settings_service.settings = MagicMock()
- settings_service.settings.lazy_load_components = False
- settings_service.settings.components_path = []
- return settings_service
-
- @pytest.fixture
- def mock_custom_paths(self):
- """Create mock custom component paths."""
- return ["/custom/path1", "/custom/path2"]
-
- @pytest.fixture
- def mock_langflow_components(self):
- """Create mock langflow components response."""
- return {
- "components": {
- "category1": {
- "Component1": {"display_name": "Component1", "type": "category1"},
- "Component2": {"display_name": "Component2", "type": "category1"},
- },
- "category2": {
- "Component3": {"display_name": "Component3", "type": "category2"},
- },
- }
- }
-
- @pytest.fixture
- def mock_custom_components(self):
- """Create mock custom components response."""
- return {
- "custom_category": {
- "CustomComponent1": {"display_name": "CustomComponent1", "type": "custom_category"},
- "CustomComponent2": {"display_name": "CustomComponent2", "type": "custom_category"},
- }
- }
-
- @pytest.fixture(autouse=True)
- def clear_component_cache(self):
- """Clear component cache before each test."""
- component_cache.all_types_dict = None
- yield
- component_cache.all_types_dict = None
-
- @pytest.mark.asyncio
- async def test_base_components_path_filtering(
- self, mock_settings_service, mock_langflow_components, mock_custom_components
- ):
- """Test that BASE_COMPONENTS_PATH is properly filtered out from custom components paths."""
- # Setup: Include BASE_COMPONENTS_PATH in the components_path list
- mock_settings_service.settings.components_path = [BASE_COMPONENTS_PATH, "/custom/path1", "/custom/path2"]
- mock_settings_service.settings.lazy_load_components = False
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch("lfx.interface.components.aget_all_types_dict") as mock_aget_all_types_dict,
- ):
- # Mock aget_all_types_dict to return custom components
- mock_aget_all_types_dict.return_value = mock_custom_components
-
- # Execute the function
- result = await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify that aget_all_types_dict was called with filtered paths (BASE_COMPONENTS_PATH excluded)
- mock_aget_all_types_dict.assert_called_once_with(["/custom/path1", "/custom/path2"])
-
- # Verify result contains both langflow and custom components
- assert "category1" in result
- assert "category2" in result
- assert "custom_category" in result
- assert "Component1" in result["category1"]
- assert "CustomComponent1" in result["custom_category"]
-
- @pytest.mark.asyncio
- async def test_only_base_components_path_in_list(self, mock_settings_service, mock_langflow_components):
- """Test behavior when components_path contains only BASE_COMPONENTS_PATH."""
- # Setup: Only BASE_COMPONENTS_PATH in the list
- mock_settings_service.settings.components_path = [BASE_COMPONENTS_PATH]
- mock_settings_service.settings.lazy_load_components = False
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch("lfx.interface.components.aget_all_types_dict") as mock_aget_all_types_dict,
- ):
- # Execute the function
- result = await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify that aget_all_types_dict was NOT called (no custom paths after filtering)
- mock_aget_all_types_dict.assert_not_called()
-
- # Verify result contains only langflow components
- assert "category1" in result
- assert "category2" in result
- assert "Component1" in result["category1"]
- assert "Component3" in result["category2"]
-
- @pytest.mark.asyncio
- async def test_empty_components_path(self, mock_settings_service, mock_langflow_components):
- """Test behavior when components_path is empty."""
- # Setup: Empty components_path
- mock_settings_service.settings.components_path = []
- mock_settings_service.settings.lazy_load_components = False
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch("lfx.interface.components.aget_all_types_dict") as mock_aget_all_types_dict,
- ):
- # Execute the function
- result = await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify that aget_all_types_dict was NOT called
- mock_aget_all_types_dict.assert_not_called()
-
- # Verify result contains only langflow components
- assert "category1" in result
- assert "category2" in result
- assert "Component1" in result["category1"]
-
- @pytest.mark.asyncio
- async def test_none_components_path(self, mock_settings_service, mock_langflow_components):
- """Test behavior when components_path is None."""
- # Setup: None components_path
- mock_settings_service.settings.components_path = None
- mock_settings_service.settings.lazy_load_components = False
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch("lfx.interface.components.aget_all_types_dict") as mock_aget_all_types_dict,
- ):
- # Execute the function
- result = await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify that aget_all_types_dict was NOT called
- mock_aget_all_types_dict.assert_not_called()
-
- # Verify result contains only langflow components
- assert "category1" in result
- assert "category2" in result
-
- @pytest.mark.asyncio
- async def test_lazy_loading_mode_with_base_path_filtering(self, mock_settings_service, mock_langflow_components):
- """Test that lazy loading mode uses aget_component_metadata with filtered paths."""
- # Setup: Enable lazy loading and include BASE_COMPONENTS_PATH
- mock_settings_service.settings.lazy_load_components = True
- mock_settings_service.settings.components_path = [BASE_COMPONENTS_PATH, "/custom/path1"]
-
- mock_metadata = {
- "custom_category": {
- "CustomComponent1": {"display_name": "CustomComponent1", "type": "custom_category"},
- }
- }
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch("lfx.interface.components.aget_component_metadata", return_value=mock_metadata) as mock_aget_metadata,
- ):
- # Execute the function
- result = await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify that aget_component_metadata was called with the full path (not filtered in lazy mode)
- mock_aget_metadata.assert_called_once_with([BASE_COMPONENTS_PATH, "/custom/path1"])
-
- # Verify result contains both langflow and custom components
- assert "category1" in result
- assert "custom_category" in result
-
- @pytest.mark.asyncio
- async def test_multiple_custom_paths_with_base_path(
- self, mock_settings_service, mock_langflow_components, mock_custom_components
- ):
- """Test filtering with multiple custom paths and BASE_COMPONENTS_PATH."""
- # Setup: Multiple paths including BASE_COMPONENTS_PATH
- custom_paths = ["/path1", BASE_COMPONENTS_PATH, "/path2", "/path3"]
- mock_settings_service.settings.components_path = custom_paths
- mock_settings_service.settings.lazy_load_components = False
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch(
- "lfx.interface.components.aget_all_types_dict", return_value=mock_custom_components
- ) as mock_aget_all_types_dict,
- ):
- # Execute the function
- result = await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify that aget_all_types_dict was called with filtered paths
- expected_filtered_paths = ["/path1", "/path2", "/path3"]
- mock_aget_all_types_dict.assert_called_once_with(expected_filtered_paths)
-
- # Verify result structure
- assert isinstance(result, dict)
- assert "category1" in result # From langflow components
- assert "custom_category" in result # From custom components
-
- @pytest.mark.asyncio
- async def test_component_merging_logic(self, mock_settings_service, mock_langflow_components):
- """Test that langflow and custom components are properly merged."""
- # Setup
- mock_settings_service.settings.components_path = ["/custom/path1"]
- mock_settings_service.settings.lazy_load_components = False
-
- # Create overlapping component names to test merging behavior
- overlapping_custom_components = {
- "category1": { # Same category as langflow
- "Component1": {"display_name": "CustomComponent1", "type": "category1"}, # Same name as langflow
- "Component4": {"display_name": "Component4", "type": "category1"}, # New component
- },
- "new_category": {
- "NewComponent": {"display_name": "NewComponent", "type": "new_category"},
- },
- }
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch("lfx.interface.components.aget_all_types_dict", return_value=overlapping_custom_components),
- ):
- # Execute the function
- result = await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify that custom components override langflow components with same name
- assert "category1" in result
- assert "category2" in result # From langflow
- assert "new_category" in result # From custom
-
- # Custom category should completely override langflow category
- assert result["category1"]["Component1"]["display_name"] == "CustomComponent1"
-
- # Only components from custom category should remain in category1
- assert "Component2" not in result["category1"] # Langflow component is replaced by custom category
- assert "Component4" in result["category1"] # New custom component
-
- # New custom component should be added
- assert result["category1"]["Component4"]["display_name"] == "Component4"
-
- # New category should be added
- assert result["new_category"]["NewComponent"]["display_name"] == "NewComponent"
-
- @pytest.mark.asyncio
- async def test_component_cache_behavior(self, mock_settings_service, mock_langflow_components):
- """Test that component cache is properly used and populated."""
- # Setup
- mock_settings_service.settings.components_path = ["/custom/path1"]
- mock_settings_service.settings.lazy_load_components = False
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch("lfx.interface.components.aget_all_types_dict", return_value={}),
- ):
- # First call - should populate cache
- result1 = await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify cache is populated
- assert component_cache.all_types_dict is not None
- assert component_cache.all_types_dict == result1
-
- # Second call - should use cache
- result2 = await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify same result returned from cache
- assert result1 == result2
- assert result1 is result2 # Same object reference
-
- @pytest.mark.asyncio
- async def test_logging_behavior(self, mock_settings_service, mock_langflow_components, mock_custom_components):
- """Test that appropriate logging messages are generated."""
- # Setup
- mock_settings_service.settings.components_path = ["/custom/path1"]
- mock_settings_service.settings.lazy_load_components = False
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch("lfx.interface.components.aget_all_types_dict", return_value=mock_custom_components),
- patch("lfx.interface.components.logger") as mock_logger,
- ):
- # Configure async mock methods
- mock_logger.adebug = AsyncMock()
-
- # Execute the function
- await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify debug logging calls
- mock_logger.adebug.assert_any_call("Building components cache")
-
- # Verify total component count logging
- debug_calls = [call.args[0] for call in mock_logger.adebug.call_args_list]
- total_count_logs = [log for log in debug_calls if "Loaded" in log and "components" in log]
- assert len(total_count_logs) >= 1
-
- @pytest.mark.asyncio
- async def test_error_handling_in_custom_component_loading(self, mock_settings_service, mock_langflow_components):
- """Test error handling when custom component loading fails."""
- # Setup
- mock_settings_service.settings.components_path = ["/custom/path1"]
- mock_settings_service.settings.lazy_load_components = False
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch("lfx.interface.components.aget_all_types_dict", side_effect=Exception("Custom loading failed")),
- pytest.raises(Exception, match="Custom loading failed"),
- ):
- # Execute the function - should raise exception when custom component loading fails
- await get_and_cache_all_types_dict(mock_settings_service)
-
- @pytest.mark.asyncio
- async def test_base_components_path_constant_value(self):
- """Test that BASE_COMPONENTS_PATH has expected value and behavior."""
- # Verify BASE_COMPONENTS_PATH is defined and has expected characteristics
- assert BASE_COMPONENTS_PATH is not None
- assert isinstance(BASE_COMPONENTS_PATH, str)
- assert len(BASE_COMPONENTS_PATH) > 0
-
- # Should be an absolute path containing "langflow" and "components"
- assert "langflow" in BASE_COMPONENTS_PATH.lower()
- assert "components" in BASE_COMPONENTS_PATH.lower()
-
- @pytest.mark.asyncio
- async def test_path_filtering_edge_cases(self, mock_settings_service, mock_langflow_components):
- """Test edge cases in path filtering logic."""
- # Setup
- mock_settings_service.settings.lazy_load_components = False
-
- # Test with duplicate BASE_COMPONENTS_PATH
- mock_settings_service.settings.components_path = [BASE_COMPONENTS_PATH, "/custom/path", BASE_COMPONENTS_PATH]
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch("lfx.interface.components.aget_all_types_dict", return_value={}) as mock_aget_all_types_dict,
- ):
- # Clear cache for fresh test
- component_cache.all_types_dict = None
-
- # Execute the function
- await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify that both instances of BASE_COMPONENTS_PATH are filtered out
- mock_aget_all_types_dict.assert_called_once_with(["/custom/path"])
-
- @pytest.mark.asyncio
- async def test_component_count_calculation(self, mock_settings_service, mock_langflow_components):
- """Test that component count calculation works correctly."""
- # Setup with known component counts
- mock_settings_service.settings.components_path = ["/custom/path1"]
- mock_settings_service.settings.lazy_load_components = False
-
- # Mock custom components with known count
- mock_custom_components = {
- "custom_cat1": {
- "CustomComp1": {"display_name": "CustomComp1"},
- "CustomComp2": {"display_name": "CustomComp2"},
- },
- "custom_cat2": {
- "CustomComp3": {"display_name": "CustomComp3"},
- },
- }
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch("lfx.interface.components.aget_all_types_dict", return_value=mock_custom_components),
- ):
- # Execute the function
- result = await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify result structure
- assert len(result) >= 2 # At least langflow categories + custom categories
-
- # Verify custom components are present
- assert "custom_cat1" in result
- assert "custom_cat2" in result
- assert "CustomComp1" in result["custom_cat1"]
- assert "CustomComp3" in result["custom_cat2"]
-
- @pytest.mark.asyncio
- async def test_async_concurrency_safety(
- self, mock_settings_service, mock_langflow_components, mock_custom_components
- ):
- """Test that concurrent calls to get_and_cache_all_types_dict are safe."""
- # Setup
- mock_settings_service.settings.components_path = ["/custom/path1"]
- mock_settings_service.settings.lazy_load_components = False
-
- with (
- patch("lfx.interface.components.import_langflow_components", return_value=mock_langflow_components),
- patch("lfx.interface.components.aget_all_types_dict", return_value=mock_custom_components),
- ):
- # Execute multiple concurrent calls
- tasks = [get_and_cache_all_types_dict(mock_settings_service) for _ in range(3)]
- results = await asyncio.gather(*tasks)
-
- # Verify all results are identical (cache working properly)
- first_result = results[0]
- for result in results[1:]:
- assert result == first_result
- # Results should be consistent, though reference may vary due to concurrency
-
- @pytest.mark.asyncio
- async def test_integration_with_real_base_components_path(self, mock_settings_service):
- """Integration test with real BASE_COMPONENTS_PATH to ensure filtering works."""
- # Setup with real BASE_COMPONENTS_PATH value
- mock_settings_service.settings.components_path = [BASE_COMPONENTS_PATH, "/custom/test"]
- mock_settings_service.settings.lazy_load_components = False
-
- # This test should work with real langflow components
- with patch("lfx.interface.components.aget_all_types_dict", return_value={}) as mock_aget_all_types_dict:
- # Execute the function
- result = await get_and_cache_all_types_dict(mock_settings_service)
-
- # Verify BASE_COMPONENTS_PATH was filtered out
- mock_aget_all_types_dict.assert_called_once_with(["/custom/test"])
-
- # Verify we got real langflow components
- assert isinstance(result, dict)
- assert len(result) >= 0 # Should not have langflow components
diff --git a/src/backend/tests/unit/custom/custom_component/__init__.py b/src/backend/tests/unit/custom/custom_component/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/events/__init__.py b/src/backend/tests/unit/events/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/events/test_event_manager.py b/src/backend/tests/unit/events/test_event_manager.py
deleted file mode 100644
index 9eadb4335bfc..000000000000
--- a/src/backend/tests/unit/events/test_event_manager.py
+++ /dev/null
@@ -1,213 +0,0 @@
-import asyncio
-import json
-import time
-import uuid
-
-import pytest
-from langflow.events.event_manager import EventManager
-
-from lfx.schema.log import LoggableType
-
-
-class TestEventManager:
- # Registering an event with a valid name and callback using a mock callback function
- def test_register_event_with_valid_name_and_callback_with_mock_callback(self):
- def mock_callback(event_type: str, data: LoggableType):
- pass
-
- queue = asyncio.Queue()
- manager = EventManager(queue)
- manager.register_event("on_test_event", "test_type", mock_callback)
- assert "on_test_event" in manager.events
- assert manager.events["on_test_event"].func == mock_callback
-
- # Registering an event with an empty name
-
- def test_register_event_with_empty_name(self):
- queue = asyncio.Queue()
- manager = EventManager(queue)
- with pytest.raises(ValueError, match="Event name cannot be empty"):
- manager.register_event("", "test_type")
-
- # Registering an event with a valid name and no callback
- def test_register_event_with_valid_name_and_no_callback(self):
- queue = asyncio.Queue()
- manager = EventManager(queue)
- manager.register_event("on_test_event", "test_type")
- assert "on_test_event" in manager.events
- assert manager.events["on_test_event"].func == manager.send_event
-
- # Accessing a non-registered event callback via __getattr__ with the recommended fix
- def test_accessing_non_registered_event_callback_with_recommended_fix(self):
- queue = asyncio.Queue()
- manager = EventManager(queue)
- result = manager.non_registered_event
- assert result == manager.noop
-
- # Accessing a registered event callback via __getattr__
- def test_accessing_registered_event_callback(self):
- def mock_callback(event_type: str, data: LoggableType):
- pass
-
- queue = asyncio.Queue()
- manager = EventManager(queue)
- manager.register_event("on_test_event", "test_type", mock_callback)
- assert manager.on_test_event.func == mock_callback
-
- # Handling a large number of events in the queue
- def test_handling_large_number_of_events(self):
- def mock_queue_put_nowait(item):
- pass
-
- queue = asyncio.Queue()
- queue.put_nowait = mock_queue_put_nowait
- manager = EventManager(queue)
-
- for i in range(1000):
- manager.register_event(f"on_test_event_{i}", "test_type", manager.noop)
-
- assert len(manager.events) == 1000
-
- # Testing registration of an event with an invalid name with the recommended fix
- def test_register_event_with_invalid_name_fixed(self):
- def mock_callback(event_type, data):
- pass
-
- queue = asyncio.Queue()
- manager = EventManager(queue)
- with pytest.raises(ValueError, match="Event name cannot be empty"):
- manager.register_event("", "test_type", mock_callback)
- with pytest.raises(ValueError, match="Event name must start with 'on_'"):
- manager.register_event("invalid_name", "test_type", mock_callback)
-
- # Sending an event with complex data and verifying successful event transmission
- async def test_sending_event_with_complex_data(self):
- queue = asyncio.Queue()
-
- manager = EventManager(queue)
- manager.register_event("on_test_event", "test_type", manager.noop)
- data = {"key": "value", "nested": [1, 2, 3]}
- manager.send_event(event_type="test_type", data=data)
- event_id, str_data, event_time = await queue.get()
- assert event_id is not None
- assert str_data is not None
- assert event_time <= time.time()
-
- # Sending an event with None data
- def test_sending_event_with_none_data(self):
- queue = asyncio.Queue()
- manager = EventManager(queue)
- manager.register_event("on_test_event", "test_type")
- assert "on_test_event" in manager.events
- assert manager.events["on_test_event"].func.__name__ == "send_event"
-
- # Ensuring thread-safety when accessing the events dictionary
- async def test_thread_safety_accessing_events_dictionary(self):
- def mock_callback(event_type: str, data: LoggableType):
- pass
-
- async def register_events(manager):
- manager.register_event("on_test_event_1", "test_type_1", mock_callback)
- manager.register_event("on_test_event_2", "test_type_2", mock_callback)
-
- async def access_events(manager):
- assert "on_test_event_1" in manager.events
- assert "on_test_event_2" in manager.events
-
- queue = asyncio.Queue()
- manager = EventManager(queue)
-
- await asyncio.gather(register_events(manager), access_events(manager))
-
- # Checking the performance impact of frequent event registrations
- def test_performance_impact_frequent_registrations(self):
- def mock_callback(event_type: str, data: LoggableType):
- pass
-
- queue = asyncio.Queue()
- manager = EventManager(queue)
- for i in range(1000):
- manager.register_event(f"on_test_event_{i}", "test_type", mock_callback)
- assert len(manager.events) == 1000
-
- # Verifying the uniqueness of event IDs for each event triggered using await with asyncio decorator
- async def test_event_id_uniqueness_with_await(self):
- queue = asyncio.Queue()
- manager = EventManager(queue)
- manager.register_event("on_test_event", "test_type")
- manager.on_test_event(data={"data_1": "value_1"})
- manager.on_test_event(data={"data_2": "value_2"})
- try:
- event_id_1, _, _ = await queue.get()
- event_id_2, _, _ = await queue.get()
- except asyncio.TimeoutError:
- pytest.fail("Test timed out while waiting for queue items")
-
- assert event_id_1 != event_id_2
-
- # Ensuring the queue receives the correct event data format
- async def test_queue_receives_correct_event_data_format(self):
- async def mock_queue_put_nowait(data):
- pass
-
- async def mock_queue_get():
- return (uuid.uuid4(), b'{"event": "test_type", "data": "test_data"}\n\n', time.time())
-
- queue = asyncio.Queue()
- queue.put_nowait = mock_queue_put_nowait
- queue.get = mock_queue_get
-
- manager = EventManager(queue)
- manager.register_event("on_test_event", "test_type", manager.noop)
- event_data = "test_data"
- manager.send_event(event_type="test_type", data=event_data)
-
- event_id, str_data, _ = await queue.get()
- assert isinstance(event_id, uuid.UUID)
- assert isinstance(str_data, bytes)
- assert json.loads(str_data.decode("utf-8")) == {"event": "test_type", "data": event_data}
-
- # Registering an event without specifying the event_type argument and providing the event_type argument
- def test_register_event_without_event_type_argument_fixed(self):
- class MockQueue:
- def __init__(self):
- self.data = []
-
- def put_nowait(self, item):
- self.data.append(item)
-
- queue = MockQueue()
- event_manager = EventManager(queue)
- event_manager.register_event("on_test_event", "test_event_type", callback=event_manager.noop)
- event_manager.send_event(event_type="test_type", data={"key": "value"})
-
- assert len(queue.data) == 1
- event_id, str_data, timestamp = queue.data[0]
- # event_id follows this pattern: f"{event_type}-{uuid.uuid4()}"
- event_type_from_id = event_id.split("-")[0]
- assert event_type_from_id == "test_type"
- uuid_from_id = event_id.split(event_type_from_id)[1]
- assert isinstance(uuid_from_id, str)
- # assert that the uuid_from_id is a valid uuid
- try:
- uuid.UUID(uuid_from_id)
- except ValueError:
- pytest.fail(f"Invalid UUID: {uuid_from_id}")
- assert isinstance(str_data, bytes)
- assert isinstance(timestamp, float)
-
- # Accessing a non-registered event callback via __getattr__
- def test_accessing_non_registered_callback(self):
- class MockQueue:
- def __init__(self):
- pass
-
- def put_nowait(self, item):
- pass
-
- queue = MockQueue()
- event_manager = EventManager(queue)
-
- # Accessing a non-registered event callback should return the 'noop' function
- callback = event_manager.on_non_existing_event
- assert callback.__name__ == "noop"
diff --git a/src/backend/tests/unit/exceptions/__init__.py b/src/backend/tests/unit/exceptions/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/exceptions/test_api.py b/src/backend/tests/unit/exceptions/test_api.py
deleted file mode 100644
index 0a27250b87da..000000000000
--- a/src/backend/tests/unit/exceptions/test_api.py
+++ /dev/null
@@ -1,60 +0,0 @@
-from unittest.mock import Mock, patch
-
-from langflow.exceptions.api import APIException, ExceptionBody
-from langflow.services.database.models.flow.model import Flow
-
-
-def test_api_exception():
- mock_exception = Exception("Test exception")
- mock_flow = Mock(spec=Flow)
- mock_outdated_components = ["component1", "component2"]
- mock_suggestion_message = "Update component1, component2"
- mock_component_versions = {
- "component1": "1.0",
- "component2": "1.0",
- }
- # Expected result
-
- with (
- patch(
- "langflow.services.database.models.flow.utils.get_outdated_components",
- return_value=mock_outdated_components,
- ),
- patch("langflow.api.utils.get_suggestion_message", return_value=mock_suggestion_message),
- patch(
- "langflow.services.database.models.flow.utils.get_components_versions",
- return_value=mock_component_versions,
- ),
- ):
- # Create an APIException instance
- api_exception = APIException(mock_exception, mock_flow)
-
- # Expected body
- expected_body = ExceptionBody(
- message="Test exception",
- suggestion="The flow contains 2 outdated components. "
- "We recommend updating the following components: component1, component2.",
- )
-
- # Assert the status code
- assert api_exception.status_code == 500
-
- # Assert the detail
- assert api_exception.detail == expected_body.model_dump_json()
-
-
-def test_api_exception_no_flow():
- # Mock data
- mock_exception = Exception("Test exception")
-
- # Create an APIException instance without a flow
- api_exception = APIException(mock_exception)
-
- # Expected body
- expected_body = ExceptionBody(message="Test exception")
-
- # Assert the status code
- assert api_exception.status_code == 500
-
- # Assert the detail
- assert api_exception.detail == expected_body.model_dump_json()
diff --git a/src/backend/tests/unit/graph/__init__.py b/src/backend/tests/unit/graph/__init__.py
deleted file mode 100644
index 3c931cc9f5da..000000000000
--- a/src/backend/tests/unit/graph/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Tests for graph-related functionality."""
diff --git a/src/backend/tests/unit/graph/edge/__init__.py b/src/backend/tests/unit/graph/edge/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/graph/graph/__init__.py b/src/backend/tests/unit/graph/graph/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/graph/graph/state/__init__.py b/src/backend/tests/unit/graph/graph/state/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/graph/vertex/__init__.py b/src/backend/tests/unit/graph/vertex/__init__.py
deleted file mode 100644
index d2509b1d640b..000000000000
--- a/src/backend/tests/unit/graph/vertex/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Tests for vertex-related functionality."""
diff --git a/src/backend/tests/unit/helpers/__init__.py b/src/backend/tests/unit/helpers/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/helpers/test_base_model_from_schema.py b/src/backend/tests/unit/helpers/test_base_model_from_schema.py
deleted file mode 100644
index d07a4908e0a3..000000000000
--- a/src/backend/tests/unit/helpers/test_base_model_from_schema.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# Generated by qodo Gen
-
-from typing import Any
-
-import pytest
-from langflow.helpers.base_model import build_model_from_schema
-from pydantic import BaseModel
-from pydantic_core import PydanticUndefined
-
-
-class TestBuildModelFromSchema:
- # Successfully creates a Pydantic model from a valid schema
- def test_create_model_from_valid_schema(self):
- schema = [
- {"name": "field1", "type": "str", "default": "default_value", "description": "A string field"},
- {"name": "field2", "type": "int", "default": 0, "description": "An integer field"},
- {"name": "field3", "type": "bool", "default": False, "description": "A boolean field"},
- ]
- model = build_model_from_schema(schema)
- instance = model(field1="test", field2=123, field3=True)
- assert instance.field1 == "test"
- assert instance.field2 == 123
- assert instance.field3 is True
-
- # Handles empty schema gracefully without errors
- def test_handle_empty_schema(self):
- schema = []
- model = build_model_from_schema(schema)
- instance = model()
- assert instance is not None
-
- # Ensure the model created from schema has the expected attributes by checking on an instance
- def test_handles_multiple_fields_fixed_with_instance_check(self):
- schema = [
- {"name": "field1", "type": "str", "default": "default_value1"},
- {"name": "field2", "type": "int", "default": 42},
- {"name": "field3", "type": "list", "default": [1, 2, 3]},
- {"name": "field4", "type": "dict", "default": {"key": "value"}},
- ]
-
- model = build_model_from_schema(schema)
- model_instance = model(field1="test", field2=123, field3=[1, 2, 3], field4={"key": "value"})
-
- assert issubclass(model, BaseModel)
- assert hasattr(model_instance, "field1")
- assert hasattr(model_instance, "field2")
- assert hasattr(model_instance, "field3")
- assert hasattr(model_instance, "field4")
-
- # Correctly accesses descriptions using the recommended fix
- def test_correctly_accesses_descriptions_recommended_fix(self):
- schema = [
- {"name": "field1", "type": "str", "default": "default_value1", "description": "Description for field1"},
- {"name": "field2", "type": "int", "default": 42, "description": "Description for field2"},
- {"name": "field3", "type": "list", "default": [1, 2, 3], "description": "Description for field3"},
- {"name": "field4", "type": "dict", "default": {"key": "value"}, "description": "Description for field4"},
- ]
-
- model = build_model_from_schema(schema)
-
- assert model.model_fields["field1"].description == "Description for field1"
- assert model.model_fields["field2"].description == "Description for field2"
- assert model.model_fields["field3"].description == "Description for field3"
- assert model.model_fields["field4"].description == "Description for field4"
-
- # Supports both single and multiple type annotations
- def test_supports_single_and_multiple_type_annotations(self):
- schema = [
- {"name": "field1", "type": "str", "default": "default_value1", "description": "Description 1"},
- {"name": "field2", "type": "list", "default": [1, 2, 3], "description": "Description 2", "multiple": True},
- {"name": "field3", "type": "int", "default": 100, "description": "Description 3"},
- ]
- model_type = build_model_from_schema(schema)
- assert issubclass(model_type, BaseModel)
-
- # Manages unknown field types by defaulting to Any
- def test_manages_unknown_field_types(self):
- schema = [
- {"name": "field1", "type": "str", "default": "default_value1"},
- {"name": "field2", "type": "unknown_type", "default": "default_value2"},
- ]
- with pytest.raises(ValueError, match="Invalid type: unknown_type"):
- build_model_from_schema(schema)
-
- # Confirms that the function raises a specific exception for invalid input
- def test_raises_error_for_invalid_input_different_exception_with_specific_exception(self):
- schema = [{"name": "field1", "type": "invalid_type", "default": "default_value"}]
- with pytest.raises(ValueError, match="Invalid type: invalid_type"):
- build_model_from_schema(schema)
-
- # Processes schemas with missing optional keys like description or multiple
- def test_process_schema_missing_optional_keys_updated(self):
- schema = [
- {"name": "field1", "type": "str", "default": "default_value1"},
- {"name": "field2", "type": "int", "default": 0, "description": "Field 2 description"},
- {"name": "field3", "type": "list", "default": [], "multiple": True},
- {"name": "field4", "type": "dict", "default": {}, "description": "Field 4 description", "multiple": True},
- ]
- result_model = build_model_from_schema(schema)
- assert result_model.__annotations__["field1"] == str # noqa: E721
- assert result_model.model_fields["field1"].description == ""
- assert result_model.__annotations__["field2"] == int # noqa: E721
- assert result_model.model_fields["field2"].description == "Field 2 description"
- assert result_model.__annotations__["field3"] == list[list[Any]]
- assert result_model.model_fields["field3"].description == ""
- assert result_model.__annotations__["field4"] == list[dict[str, Any]]
- assert result_model.model_fields["field4"].description == "Field 4 description"
-
- # Deals with schemas containing fields with None as default values
- def test_schema_fields_with_none_default(self):
- schema = [
- {"name": "field1", "type": "str", "default": None, "description": "Field 1 description"},
- {"name": "field2", "type": "int", "default": None, "description": "Field 2 description"},
- {"name": "field3", "type": "list", "default": None, "description": "Field 3 description", "multiple": True},
- ]
- model = build_model_from_schema(schema)
- assert model.model_fields["field1"].default == PydanticUndefined
- assert model.model_fields["field2"].default == PydanticUndefined
- assert model.model_fields["field3"].default == PydanticUndefined
-
- # Checks for proper handling of nested list and dict types
- def test_nested_list_and_dict_types_handling(self):
- schema = [
- {"name": "field1", "type": "list", "default": [], "description": "list field", "multiple": True},
- {"name": "field2", "type": "dict", "default": {}, "description": "Dict field"},
- ]
- model_type = build_model_from_schema(schema)
- assert issubclass(model_type, BaseModel)
-
- # Verifies that the function can handle large schemas efficiently
- def test_handle_large_schemas_efficiently(self):
- schema = [
- {"name": "field1", "type": "str", "default": "default_value1", "description": "Description 1"},
- {"name": "field2", "type": "int", "default": 100, "description": "Description 2"},
- {"name": "field3", "type": "list", "default": [1, 2, 3], "description": "Description 3", "multiple": True},
- {"name": "field4", "type": "dict", "default": {"key": "value"}, "description": "Description 4"},
- ]
- model_type = build_model_from_schema(schema)
- assert issubclass(model_type, BaseModel)
-
- # Ensures that the function returns a valid Pydantic model class
- def test_returns_valid_model_class(self):
- schema = [
- {"name": "field1", "type": "str", "default": "default_value1", "description": "Description for field1"},
- {"name": "field2", "type": "int", "default": 42, "description": "Description for field2", "multiple": True},
- ]
- model_class = build_model_from_schema(schema)
- assert issubclass(model_class, BaseModel)
-
- # Validates that the last occurrence of a duplicate field name defines the type in the schema
- def test_no_duplicate_field_names_fixed_fixed(self):
- schema = [
- {"name": "field1", "type": "str", "default": "default_value1"},
- {"name": "field2", "type": "int", "default": 0},
- {"name": "field1", "type": "float", "default": 0.0}, # Duplicate field name
- ]
- model = build_model_from_schema(schema)
- assert model.__annotations__["field1"] == float # noqa: E721
- assert model.__annotations__["field2"] == int # noqa: E721
diff --git a/src/backend/tests/unit/helpers/test_data.py b/src/backend/tests/unit/helpers/test_data.py
deleted file mode 100644
index e5b3513a61a1..000000000000
--- a/src/backend/tests/unit/helpers/test_data.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import pytest
-from langflow.helpers.data import data_to_text_list
-
-from lfx.schema import Data
-
-
-@pytest.mark.parametrize(
- (
- "template",
- "data",
- "expected",
- ),
- [
- (
- "{name} is {age} years old",
- Data(data={"name": "Alice", "age": 25}),
- (["Alice is 25 years old"], [Data(data={"name": "Alice", "age": 25})]),
- ),
- (
- "{name} is {age} years old",
- [
- Data(data={"name": "Alice", "age": 25}),
- Data(data={"name": "Bob", "age": 30}),
- Data(data={"name": "Alex", "age": 35}),
- ],
- (
- [
- "Alice is 25 years old",
- "Bob is 30 years old",
- "Alex is 35 years old",
- ],
- [
- Data(data={"name": "Alice", "age": 25}),
- Data(data={"name": "Bob", "age": 30}),
- Data(data={"name": "Alex", "age": 35}),
- ],
- ),
- ),
- ],
-)
-def test_data_to_text_list(template, data, expected):
- result = data_to_text_list(template, data)
- assert result == expected
-
-
-def test_data_to_text_list__template_empty():
- template = ""
- data = Data(data={"key": "value"})
-
- result = data_to_text_list(template, data)
-
- assert isinstance(result, tuple)
- assert len(result) == 2
- assert isinstance(result[0], list)
- assert isinstance(result[1], list)
- assert template in result[0]
- assert data in result[1]
-
-
-def test_data_to_text_list__template_without_placeholder():
- template = "My favorite color is gray"
- data = Data(data={"color": "silver"})
-
- result = data_to_text_list(template, data)
-
- assert isinstance(result, tuple)
- assert len(result) == 2
- assert isinstance(result[0], list)
- assert isinstance(result[1], list)
- assert template in result[0]
- assert data in result[1]
-
-
-def test_data_to_text_list__template_without_placeholder_and_data_attribute_empty():
- template = "My favorite color is gray"
- data_list = [Data(data={})]
-
- result = data_to_text_list(template, data_list)
-
- assert isinstance(result, tuple)
- assert len(result) == 2
- assert isinstance(result[0], list)
- assert isinstance(result[1], list)
- assert template in result[0]
- assert data_list == result[1]
-
-
-def test_data_to_text_list__template_wrong_placeholder():
- template = "My favorite color is {color}"
- data = Data(data={"fruit": "apple"})
-
- # Should not raise KeyError due to defaultdict behavior
- result = data_to_text_list(template, data)
- assert result == (["My favorite color is "], [data])
-
-
-def test_data_to_text_list__data_with_data_attribute_empty():
- template = "My favorite color is {color}"
- data = Data(data={})
-
- # Should not raise KeyError due to defaultdict behavior
- result = data_to_text_list(template, data)
- assert result == (["My favorite color is "], [data])
-
-
-def test_data_to_text_list__data_contains_nested_data_key():
- template = "My data is: {data}"
- data = Data(data={"data": {"key": "value"}})
-
- result = data_to_text_list(template, data)
-
- assert isinstance(result, tuple)
- assert len(result) == 2
- assert isinstance(result[0], list)
- assert isinstance(result[1], list)
- assert template not in result[0]
- assert data in result[1]
diff --git a/src/backend/tests/unit/helpers/test_data_to_text_list.py b/src/backend/tests/unit/helpers/test_data_to_text_list.py
deleted file mode 100644
index 5022d944b81d..000000000000
--- a/src/backend/tests/unit/helpers/test_data_to_text_list.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import pytest
-from langflow.helpers.data import data_to_text_list
-
-from lfx.schema import Data
-
-
-@pytest.mark.parametrize(
- ("template", "data", "expected_text"),
- [
- # Test basic string data
- (
- "Text: {text}",
- Data(text="Hello"),
- ["Text: Hello"],
- ),
- # Test dictionary data
- (
- "{name} is {age} years old",
- Data(data={"name": "Alice", "age": 25}),
- ["Alice is 25 years old"],
- ),
- # Test list of Data objects
- (
- "{name} is {age} years old",
- [
- Data(data={"name": "Alice", "age": 25}),
- Data(data={"name": "Bob", "age": 30}),
- ],
- ["Alice is 25 years old", "Bob is 30 years old"],
- ),
- # Test nested data dictionary
- (
- "User: {text}",
- Data(data={"data": {"text": "Hello World"}}),
- ["User: Hello World"],
- ),
- # Test error message in data
- (
- "Error: {text}",
- Data(data={"error": "Something went wrong"}),
- ["Error: Something went wrong"],
- ),
- # Test non-Data object conversion
- (
- "Value: {text}",
- Data(text="Simple string"),
- ["Value: Simple string"],
- ),
- ],
-)
-def test_data_to_text_list_parametrized(template, data, expected_text):
- """Test various input combinations for data_to_text_list."""
- result = data_to_text_list(template, data)
- assert result[0] == expected_text
- assert all(isinstance(d, Data) for d in result[1])
-
-
-def test_data_to_text_list_none_data():
- """Test handling of None data input."""
- result = data_to_text_list("template", None)
- assert result == ([], [])
-
-
-def test_data_to_text_list_none_template():
- """Test handling of None template input."""
- with pytest.raises(ValueError, match="Template must be a string, but got None"):
- data_to_text_list(None, Data(text="test"))
-
-
-def test_data_to_text_list_invalid_template_type():
- """Test handling of invalid template type."""
- with pytest.raises(TypeError, match="Template must be a string, but got"):
- data_to_text_list(123, Data(text="test"))
-
-
-def test_data_to_text_list_missing_key():
- """Test handling of missing template key."""
- template = "Hello {missing_key}"
- data = Data(data={"existing_key": "value"})
- # Should not raise KeyError due to defaultdict
- result = data_to_text_list(template, data)
- assert result == (["Hello "], [data])
-
-
-def test_data_to_text_list_empty_data_dict():
- """Test handling of empty data dictionary."""
- template = "Hello {text}"
- data = Data(data={})
- result = data_to_text_list(template, data)
- assert result == (["Hello "], [data])
-
-
-def test_data_to_text_list_mixed_data_types():
- """Test handling of mixed data types in list."""
- template = "Item: {text}"
- data = [
- Data(text="First"),
- "Second",
- Data(data={"text": "Third"}),
- 123,
- ]
- result = data_to_text_list(template, data)
- expected_texts = [
- "Item: First",
- "Item: Second",
- "Item: Third",
- "Item: 123",
- ]
- assert result[0] == expected_texts
- assert len(result[1]) == 4
- assert all(isinstance(d, Data) for d in result[1])
-
-
-def test_data_to_text_list_complex_nested_data():
- """Test handling of complex nested data structures."""
- template = "Name: {name}, Info: {text}, Status: {status}"
- data = Data(data={"name": "Test", "data": {"text": "Nested text", "status": "active"}})
- result = data_to_text_list(template, data)
- expected = (["Name: Test, Info: Nested text, Status: active"], [data])
- assert result == expected
-
-
-def test_data_to_text_list_empty_template():
- """Test handling of empty template string."""
- data = Data(data={"key": "value"})
- result = data_to_text_list("", data)
- assert result == ([""], [data])
-
-
-def test_data_to_text_list_string_data():
- """Test handling of string data in Data object."""
- template = "Message: {text}"
- data = Data(data={"text": "Direct string"})
- result = data_to_text_list(template, data)
- assert result == (["Message: Direct string"], [data])
diff --git a/src/backend/tests/unit/initial_setup/__init__.py b/src/backend/tests/unit/initial_setup/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/initial_setup/starter_projects/__init__.py b/src/backend/tests/unit/initial_setup/starter_projects/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py b/src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py
deleted file mode 100644
index 477d86c0e330..000000000000
--- a/src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import operator
-from collections import deque
-from typing import TYPE_CHECKING
-
-import pytest
-
-from lfx.components.helpers.memory import MemoryComponent
-from lfx.components.input_output import ChatInput, ChatOutput
-from lfx.components.openai.openai_chat_model import OpenAIModelComponent
-from lfx.components.processing import PromptComponent
-from lfx.components.processing.converter import TypeConverterComponent
-from lfx.graph.graph.base import Graph
-from lfx.graph.graph.constants import Finish
-
-if TYPE_CHECKING:
- from lfx.graph.graph.schema import GraphDump
-
-
-@pytest.fixture
-def memory_chatbot_graph():
- session_id = "test_session_id"
- template = """{context}
-
-User: {user_message}
-AI: """
- memory_component = MemoryComponent(_id="chat_memory")
- memory_component.set(session_id=session_id)
- chat_input = ChatInput(_id="chat_input")
- type_converter = TypeConverterComponent(_id="type_converter")
- type_converter.set(input_data=memory_component.retrieve_messages_dataframe)
- prompt_component = PromptComponent(_id="prompt")
- prompt_component.set(
- template=template,
- user_message=chat_input.message_response,
- context=type_converter.convert_to_message,
- )
- openai_component = OpenAIModelComponent(_id="openai")
- openai_component.set(
- input_value=prompt_component.build_prompt, max_tokens=100, temperature=0.1, api_key="test_api_key"
- )
- openai_component.set_on_output(name="text_output", value="Mock response", cache=True)
-
- chat_output = ChatOutput(_id="chat_output")
- chat_output.set(input_value=openai_component.text_response)
-
- graph = Graph(chat_input, chat_output)
- assert graph.in_degree_map == {
- "chat_output": 1,
- "type_converter": 1,
- "prompt": 2,
- "openai": 1,
- "chat_input": 0,
- "chat_memory": 0,
- }
- return graph
-
-
-@pytest.mark.usefixtures("client")
-def test_memory_chatbot(memory_chatbot_graph):
- # Now we run step by step
- expected_order = deque(["chat_input", "chat_memory", "type_converter", "prompt", "openai", "chat_output"])
- assert memory_chatbot_graph.in_degree_map == {
- "chat_output": 1,
- "type_converter": 1,
- "prompt": 2,
- "openai": 1,
- "chat_input": 0,
- "chat_memory": 0,
- }
- assert memory_chatbot_graph.vertices_layers == [["type_converter"], ["prompt"], ["openai"], ["chat_output"]]
- assert memory_chatbot_graph.first_layer == ["chat_input", "chat_memory"]
-
- for step in expected_order:
- result = memory_chatbot_graph.step()
- if isinstance(result, Finish):
- break
-
- assert step == result.vertex.id, (memory_chatbot_graph.in_degree_map, memory_chatbot_graph.vertices_layers)
-
-
-def test_memory_chatbot_dump_structure(memory_chatbot_graph: Graph):
- # Now we run step by step
- graph_dict = memory_chatbot_graph.dump(
- name="Memory Chatbot", description="A memory chatbot", endpoint_name="membot"
- )
- assert isinstance(graph_dict, dict)
- # Test structure
- assert "data" in graph_dict
- assert "is_component" in graph_dict
-
- data_dict = graph_dict["data"]
- assert "nodes" in data_dict
- assert "edges" in data_dict
- assert "description" in graph_dict
- assert "endpoint_name" in graph_dict
-
- # Test data
- nodes = data_dict["nodes"]
- edges = data_dict["edges"]
- description = graph_dict["description"]
- endpoint_name = graph_dict["endpoint_name"]
-
- assert len(nodes) == 6
- assert len(edges) == 5
- assert description is not None
- assert endpoint_name is not None
-
-
-def test_memory_chatbot_dump_components_and_edges(memory_chatbot_graph: Graph):
- # Check all components and edges were dumped correctly
- graph_dict: GraphDump = memory_chatbot_graph.dump(
- name="Memory Chatbot", description="A memory chatbot", endpoint_name="membot"
- )
-
- data_dict = graph_dict["data"]
- nodes = data_dict["nodes"]
- edges = data_dict["edges"]
-
- # sort the nodes by id
- nodes = sorted(nodes, key=operator.itemgetter("id"))
-
- # Check each node
- assert nodes[0]["data"]["type"] == "ChatInput"
- assert nodes[0]["id"] == "chat_input"
-
- assert nodes[1]["data"]["type"] == "Memory"
- assert nodes[1]["id"] == "chat_memory"
-
- assert nodes[2]["data"]["type"] == "ChatOutput"
- assert nodes[2]["id"] == "chat_output"
-
- assert nodes[3]["data"]["type"] == "OpenAIModel"
- assert nodes[3]["id"] == "openai"
-
- assert nodes[4]["data"]["type"] == "Prompt Template"
- assert nodes[4]["id"] == "prompt"
-
- # Check edges
- expected_edges = [
- ("chat_input", "prompt"),
- ("chat_memory", "type_converter"),
- ("type_converter", "prompt"),
- ("prompt", "openai"),
- ("openai", "chat_output"),
- ]
-
- assert len(edges) == len(expected_edges)
-
- for edge in edges:
- source = edge["source"]
- target = edge["target"]
- assert (source, target) in expected_edges, edge
diff --git a/src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py b/src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py
deleted file mode 100644
index 51a9652813e1..000000000000
--- a/src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py
+++ /dev/null
@@ -1,326 +0,0 @@
-import copy
-import operator
-from textwrap import dedent
-
-import pytest
-
-from lfx.components.data import FileComponent
-from lfx.components.input_output import ChatInput, ChatOutput
-from lfx.components.openai.openai import OpenAIEmbeddingsComponent
-from lfx.components.openai.openai_chat_model import OpenAIModelComponent
-from lfx.components.processing import ParseDataComponent, PromptComponent
-from lfx.components.processing.split_text import SplitTextComponent
-from lfx.components.vectorstores import AstraDBVectorStoreComponent
-from lfx.graph.graph.base import Graph
-from lfx.graph.graph.constants import Finish
-from lfx.schema import Data
-from lfx.schema.dataframe import DataFrame
-from lfx.schema.message import Message
-
-
-@pytest.fixture
-def ingestion_graph():
- # Ingestion Graph
- file_component = FileComponent(_id="file-123")
- file_component.set(path="test.txt")
- file_component.set_on_output(name="message", value=Message(text="This is a test file."), cache=True)
- text_splitter = SplitTextComponent(_id="text-splitter-123")
- text_splitter.set(data_inputs=file_component.load_files_message)
- openai_embeddings = OpenAIEmbeddingsComponent(_id="openai-embeddings-123")
- openai_embeddings.set(
- openai_api_key="sk-123", openai_api_base="https://api.openai.com/v1", openai_api_type="openai"
- )
-
- vector_store = AstraDBVectorStoreComponent(_id="ingestion-vector-store-123")
-
- # Mock search_documents by changing the value otherwise set by the vector_store_connection_decorator
- vector_store.set_on_output(name="vectorstoreconnection", value=[Data(text="This is a test file.")], cache=True)
-
- vector_store.set_on_output(name="vectorstoreconnection", value=[Data(text="This is a test file.")], cache=True)
- vector_store.set_on_output(name="search_results", value=[Data(text="This is a test file.")], cache=True)
- vector_store.set_on_output(name="dataframe", value=DataFrame(data=[Data(text="This is a test file.")]), cache=True)
- vector_store.set(
- embedding_model=openai_embeddings.build_embeddings,
- ingest_data=text_splitter.split_text,
- api_endpoint="https://astra.example.com",
- token="token", # noqa: S106
- )
- return Graph(file_component, vector_store)
-
-
-@pytest.fixture
-def rag_graph():
- # RAG Graph
- openai_embeddings = OpenAIEmbeddingsComponent(_id="openai-embeddings-124")
- chat_input = ChatInput(_id="chatinput-123")
- chat_input.get_output("message").value = Message(text="What is the meaning of life?")
- rag_vector_store = AstraDBVectorStoreComponent(_id="rag-vector-store-123")
- rag_vector_store.set(
- search_query=chat_input.message_response,
- api_endpoint="https://astra.example.com",
- token="token", # noqa: S106
- embedding_model=openai_embeddings.build_embeddings,
- )
- # Mock search_documents
- data_list = [
- Data(data={"text": "Hello, world!"}),
- Data(data={"text": "Goodbye, world!"}),
- ]
- rag_vector_store.set_on_output(
- name="search_results",
- value=data_list,
- cache=True,
- )
- rag_vector_store.set_on_output(name="dataframe", value=DataFrame(data=data_list), cache=True)
- parse_data = ParseDataComponent(_id="parse-data-123")
- parse_data.set(data=rag_vector_store.search_documents)
- prompt_component = PromptComponent(_id="prompt-123")
- prompt_component.set(
- template=dedent("""Given the following context, answer the question.
- Context:{context}
-
- Question: {question}
- Answer:"""),
- context=parse_data.parse_data,
- question=chat_input.message_response,
- )
-
- openai_component = OpenAIModelComponent(_id="openai-123")
- openai_component.set(api_key="sk-123", openai_api_base="https://api.openai.com/v1")
- openai_component.set_on_output(name="text_output", value="Hello, world!", cache=True)
- openai_component.set(input_value=prompt_component.build_prompt)
-
- chat_output = ChatOutput(_id="chatoutput-123")
- chat_output.set(input_value=openai_component.text_response)
-
- return Graph(start=chat_input, end=chat_output)
-
-
-async def test_vector_store_rag(ingestion_graph, rag_graph):
- assert ingestion_graph is not None
- ingestion_ids = [
- "file-123",
- "text-splitter-123",
- "openai-embeddings-123",
- "ingestion-vector-store-123",
- ]
- assert rag_graph is not None
- rag_ids = [
- "chatinput-123",
- "chatoutput-123",
- "openai-123",
- "parse-data-123",
- "prompt-123",
- "rag-vector-store-123",
- "openai-embeddings-124",
- ]
- for ids, graph, len_results in [(ingestion_ids, ingestion_graph, 5), (rag_ids, rag_graph, 8)]:
- results = [result async for result in graph.async_start(reset_output_values=False)]
- assert len(results) == len_results
- vids = [result.vertex.id for result in results if hasattr(result, "vertex")]
- assert all(vid in ids for vid in vids), f"Diff: {set(vids) - set(ids)}"
- assert results[-1] == Finish()
-
-
-def test_vector_store_rag_dump_components_and_edges(ingestion_graph, rag_graph):
- # Test ingestion graph components and edges
- ingestion_graph_dump = ingestion_graph.dump(
- name="Ingestion Graph", description="Graph for data ingestion", endpoint_name="ingestion"
- )
-
- ingestion_data = ingestion_graph_dump["data"]
- ingestion_nodes = ingestion_data["nodes"]
- ingestion_edges = ingestion_data["edges"]
-
- # Define expected nodes with their types
- expected_nodes = {
- "file-123": "File",
- "openai-embeddings-123": "OpenAIEmbeddings",
- "text-splitter-123": "SplitText",
- "ingestion-vector-store-123": "AstraDB",
- }
-
- # Verify number of nodes
- assert len(ingestion_nodes) == len(expected_nodes), "Unexpected number of nodes"
-
- # Create a mapping of node IDs to their data for easier lookup
- node_map = {node["id"]: node["data"] for node in ingestion_nodes}
-
- # Verify each expected node exists with correct type
- for node_id, expected_type in expected_nodes.items():
- assert node_id in node_map, f"Missing node {node_id}"
- assert node_map[node_id]["type"] == expected_type, (
- f"Node {node_id} has incorrect type. Expected {expected_type}, got {node_map[node_id]['type']}"
- )
-
- # Verify all nodes in graph are expected
- unexpected_nodes = set(node_map.keys()) - set(expected_nodes.keys())
- assert not unexpected_nodes, f"Found unexpected nodes: {unexpected_nodes}"
-
- # Check edges in the ingestion graph
- expected_ingestion_edges = [
- ("file-123", "text-splitter-123"),
- ("text-splitter-123", "ingestion-vector-store-123"),
- ("openai-embeddings-123", "ingestion-vector-store-123"),
- ]
- assert len(ingestion_edges) == len(expected_ingestion_edges)
-
- for edge in ingestion_edges:
- source = edge["source"]
- target = edge["target"]
- assert (source, target) in expected_ingestion_edges, edge
-
- # Test RAG graph components and edges
- rag_graph_dump = rag_graph.dump(
- name="RAG Graph", description="Graph for Retrieval-Augmented Generation", endpoint_name="rag"
- )
-
- rag_data = rag_graph_dump["data"]
- rag_nodes = rag_data["nodes"]
- rag_edges = rag_data["edges"]
-
- # Sort nodes by id to check components
- rag_nodes = sorted(rag_nodes, key=operator.itemgetter("id"))
-
- # Check components in the RAG graph
- assert rag_nodes[0]["data"]["type"] == "ChatInput"
- assert rag_nodes[0]["id"] == "chatinput-123"
-
- assert rag_nodes[1]["data"]["type"] == "ChatOutput"
- assert rag_nodes[1]["id"] == "chatoutput-123"
-
- assert rag_nodes[2]["data"]["type"] == "OpenAIModel"
- assert rag_nodes[2]["id"] == "openai-123"
-
- assert rag_nodes[3]["data"]["type"] == "OpenAIEmbeddings"
- assert rag_nodes[3]["id"] == "openai-embeddings-124"
-
- assert rag_nodes[4]["data"]["type"] == "ParseData"
- assert rag_nodes[4]["id"] == "parse-data-123"
-
- assert rag_nodes[5]["data"]["type"] == "Prompt Template"
- assert rag_nodes[5]["id"] == "prompt-123"
-
- assert rag_nodes[6]["data"]["type"] == "AstraDB"
- assert rag_nodes[6]["id"] == "rag-vector-store-123"
-
- # Check edges in the RAG graph
- expected_rag_edges = [
- ("chatinput-123", "rag-vector-store-123"),
- ("openai-embeddings-124", "rag-vector-store-123"),
- ("chatinput-123", "prompt-123"),
- ("rag-vector-store-123", "parse-data-123"),
- ("parse-data-123", "prompt-123"),
- ("prompt-123", "openai-123"),
- ("openai-123", "chatoutput-123"),
- ]
- assert len(rag_edges) == len(expected_rag_edges), rag_edges
-
- for edge in rag_edges:
- source = edge["source"]
- target = edge["target"]
- assert (source, target) in expected_rag_edges, f"Edge {source} -> {target} not found"
-
-
-def test_vector_store_rag_add(ingestion_graph: Graph, rag_graph: Graph):
- ingestion_graph_copy = copy.deepcopy(ingestion_graph)
- rag_graph_copy = copy.deepcopy(rag_graph)
- ingestion_graph_copy += rag_graph_copy
-
- assert len(ingestion_graph_copy.vertices) == len(ingestion_graph.vertices) + len(rag_graph.vertices), (
- f"Vertices mismatch: {len(ingestion_graph_copy.vertices)} "
- f"!= {len(ingestion_graph.vertices)} + {len(rag_graph.vertices)}"
- )
- assert len(ingestion_graph_copy.edges) == len(ingestion_graph.edges) + len(rag_graph.edges), (
- f"Edges mismatch: {len(ingestion_graph_copy.edges)} != {len(ingestion_graph.edges)} + {len(rag_graph.edges)}"
- )
-
- combined_graph_dump = ingestion_graph_copy.dump(
- name="Combined Graph", description="Graph for data ingestion and RAG", endpoint_name="combined"
- )
-
- combined_data = combined_graph_dump["data"]
- combined_nodes = combined_data["nodes"]
- combined_edges = combined_data["edges"]
-
- # Sort nodes by id to check components
- combined_nodes = sorted(combined_nodes, key=operator.itemgetter("id"))
-
- # Expected components in the combined graph (both ingestion and RAG nodes)
- expected_nodes = sorted(
- [
- {"id": "file-123", "type": "File"},
- {"id": "openai-embeddings-123", "type": "OpenAIEmbeddings"},
- {"id": "text-splitter-123", "type": "SplitText"},
- {"id": "ingestion-vector-store-123", "type": "AstraDB"},
- {"id": "chatinput-123", "type": "ChatInput"},
- {"id": "chatoutput-123", "type": "ChatOutput"},
- {"id": "openai-123", "type": "OpenAIModel"},
- {"id": "openai-embeddings-124", "type": "OpenAIEmbeddings"},
- {"id": "parse-data-123", "type": "ParseData"},
- {"id": "prompt-123", "type": "Prompt Template"},
- {"id": "rag-vector-store-123", "type": "AstraDB"},
- ],
- key=operator.itemgetter("id"),
- )
-
- for expected_node, combined_node in zip(expected_nodes, combined_nodes, strict=True):
- assert combined_node["data"]["type"] == expected_node["type"]
- assert combined_node["id"] == expected_node["id"]
-
- # Expected edges in the combined graph (both ingestion and RAG edges)
- expected_combined_edges = [
- ("file-123", "text-splitter-123"),
- ("text-splitter-123", "ingestion-vector-store-123"),
- ("openai-embeddings-123", "ingestion-vector-store-123"),
- ("chatinput-123", "rag-vector-store-123"),
- ("openai-embeddings-124", "rag-vector-store-123"),
- ("chatinput-123", "prompt-123"),
- ("rag-vector-store-123", "parse-data-123"),
- ("parse-data-123", "prompt-123"),
- ("prompt-123", "openai-123"),
- ("openai-123", "chatoutput-123"),
- ]
-
- assert len(combined_edges) == len(expected_combined_edges), combined_edges
-
- for edge in combined_edges:
- source = edge["source"]
- target = edge["target"]
- assert (source, target) in expected_combined_edges, f"Edge {source} -> {target} not found"
-
-
-def test_vector_store_rag_dump(ingestion_graph, rag_graph):
- # Test ingestion graph dump
- ingestion_graph_dump = ingestion_graph.dump(
- name="Ingestion Graph", description="Graph for data ingestion", endpoint_name="ingestion"
- )
- assert isinstance(ingestion_graph_dump, dict)
-
- ingestion_data = ingestion_graph_dump["data"]
- assert "nodes" in ingestion_data
- assert "edges" in ingestion_data
- assert "description" in ingestion_graph_dump
- assert "endpoint_name" in ingestion_graph_dump
-
- ingestion_nodes = ingestion_data["nodes"]
- ingestion_edges = ingestion_data["edges"]
- assert len(ingestion_nodes) == 4 # There are 4 components in the ingestion graph
- assert len(ingestion_edges) == 3 # There are 3 connections between components
-
- # Test RAG graph dump
- rag_graph_dump = rag_graph.dump(
- name="RAG Graph", description="Graph for Retrieval-Augmented Generation", endpoint_name="rag"
- )
- assert isinstance(rag_graph_dump, dict)
-
- rag_data = rag_graph_dump["data"]
- assert "nodes" in rag_data
- assert "edges" in rag_data
- assert "description" in rag_graph_dump
- assert "endpoint_name" in rag_graph_dump
-
- rag_nodes = rag_data["nodes"]
- rag_edges = rag_data["edges"]
- assert len(rag_nodes) == 7 # There are 7 components in the RAG graph
- assert len(rag_edges) == 7 # There are 7 connections between components
diff --git a/src/backend/tests/unit/initial_setup/test_setup_functions.py b/src/backend/tests/unit/initial_setup/test_setup_functions.py
deleted file mode 100644
index c660e445a4aa..000000000000
--- a/src/backend/tests/unit/initial_setup/test_setup_functions.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import asyncio
-from uuid import uuid4
-
-import pytest
-from langflow.initial_setup.setup import DEFAULT_FOLDER_NAME, get_or_create_default_folder, session_scope
-from langflow.services.database.models.folder.model import FolderRead
-
-
-@pytest.mark.usefixtures("client")
-async def test_get_or_create_default_folder_creation() -> None:
- """Test that a default project is created for a new user.
-
- This test verifies that when no default project exists for a given user,
- get_or_create_default_folder creates one with the expected name and assigns it an ID.
- """
- test_user_id = uuid4()
- async with session_scope() as session:
- folder = await get_or_create_default_folder(session, test_user_id)
- assert folder.name == DEFAULT_FOLDER_NAME, "The project name should match the default."
- assert hasattr(folder, "id"), "The project should have an 'id' attribute after creation."
-
-
-@pytest.mark.usefixtures("client")
-async def test_get_or_create_default_folder_idempotency() -> None:
- """Test that subsequent calls to get_or_create_default_folder return the same project.
-
- The function should be idempotent such that if a default project already exists,
- calling the function again does not create a new one.
- """
- test_user_id = uuid4()
- async with session_scope() as session:
- folder_first = await get_or_create_default_folder(session, test_user_id)
- folder_second = await get_or_create_default_folder(session, test_user_id)
- assert folder_first.id == folder_second.id, "Both calls should return the same folder instance."
-
-
-@pytest.mark.usefixtures("client")
-async def test_get_or_create_default_folder_concurrent_calls() -> None:
- """Test concurrent invocations of get_or_create_default_folder.
-
- This test ensures that when multiple concurrent calls are made for the same user,
- only one default project is created, demonstrating idempotency under concurrent access.
- """
- test_user_id = uuid4()
-
- async def get_folder() -> FolderRead:
- async with session_scope() as session:
- return await get_or_create_default_folder(session, test_user_id)
-
- results = await asyncio.gather(get_folder(), get_folder(), get_folder())
- folder_ids = {folder.id for folder in results}
- assert len(folder_ids) == 1, "Concurrent calls must return a single, consistent folder instance."
diff --git a/src/backend/tests/unit/inputs/__init__.py b/src/backend/tests/unit/inputs/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/inputs/test_inputs.py b/src/backend/tests/unit/inputs/test_inputs.py
deleted file mode 100644
index dde8024524d1..000000000000
--- a/src/backend/tests/unit/inputs/test_inputs.py
+++ /dev/null
@@ -1,316 +0,0 @@
-import pytest
-from pydantic import ValidationError
-
-from lfx.inputs.inputs import (
- BoolInput,
- CodeInput,
- DataInput,
- DictInput,
- DropdownInput,
- FileInput,
- FloatInput,
- HandleInput,
- InputTypesMap,
- IntInput,
- MessageTextInput,
- MultilineInput,
- MultilineSecretInput,
- MultiselectInput,
- NestedDictInput,
- PromptInput,
- SecretStrInput,
- SliderInput,
- StrInput,
- TabInput,
- TableInput,
- instantiate_input,
-)
-from lfx.schema.message import Message
-
-
-def test_table_input_valid():
- data = TableInput(name="valid_table", value=[{"key": "value"}, {"key2": "value2"}])
- assert data.value == [{"key": "value"}, {"key2": "value2"}]
-
-
-def test_slider_input_valid():
- data = SliderInput(name="valid_slider", value=10)
- assert data.value == 10
-
-
-def test_table_input_invalid():
- with pytest.raises(ValidationError):
- TableInput(name="invalid_table", value="invalid")
-
- with pytest.raises(ValidationError):
- TableInput(name="invalid_table", value=[{"key": "value"}, "invalid"])
-
-
-def test_str_input_valid():
- data = StrInput(name="valid_str", value="This is a string")
- assert data.value == "This is a string"
-
-
-def test_str_input_invalid():
- with pytest.warns(UserWarning, match="Invalid value type.*for input"):
- StrInput(name="invalid_str", value=1234)
-
-
-def test_message_text_input_valid():
- data = MessageTextInput(name="valid_msg", value="This is a message")
- assert data.value == "This is a message"
-
- msg = Message(text="This is a message")
- data = MessageTextInput(name="valid_msg", value=msg)
- assert data.value == "This is a message"
-
-
-def test_message_text_input_invalid():
- with pytest.raises(ValidationError):
- MessageTextInput(name="invalid_msg", value=1234)
-
-
-def test_instantiate_input_valid():
- data = {"name": "valid_input", "value": "This is a string"}
- input_instance = instantiate_input("StrInput", data)
- assert isinstance(input_instance, StrInput)
- assert input_instance.value == "This is a string"
-
-
-def test_instantiate_input_invalid():
- with pytest.raises(ValueError, match="Invalid input type: InvalidInput"):
- instantiate_input("InvalidInput", {"name": "invalid_input", "value": "This is a string"})
-
-
-def test_handle_input_valid():
- data = HandleInput(name="valid_handle", input_types=["BaseLanguageModel"])
- assert data.input_types == ["BaseLanguageModel"]
-
-
-def test_handle_input_invalid():
- with pytest.raises(ValidationError):
- HandleInput(name="invalid_handle", input_types="BaseLanguageModel")
-
-
-def test_data_input_valid():
- data_input = DataInput(name="valid_data", input_types=["Data"])
- assert data_input.input_types == ["Data"]
-
-
-def test_prompt_input_valid():
- prompt_input = PromptInput(name="valid_prompt", value="Enter your name")
- assert prompt_input.value == "Enter your name"
-
-
-def test_code_input_valid():
- code_input = CodeInput(name="valid_code", value="def hello():\n print('Hello, World!')")
- assert code_input.value == "def hello():\n print('Hello, World!')"
-
-
-def test_multiline_input_valid():
- multiline_input = MultilineInput(name="valid_multiline", value="This is a\nmultiline input")
- assert multiline_input.value == "This is a\nmultiline input"
- assert multiline_input.multiline is True
-
-
-def test_multiline_input_invalid():
- with pytest.raises(ValidationError):
- MultilineInput(name="invalid_multiline", value=1234)
-
-
-def test_multiline_secret_input_valid():
- multiline_secret_input = MultilineSecretInput(name="valid_multiline_secret", value="secret")
- assert multiline_secret_input.value == "secret"
- assert multiline_secret_input.password is True
-
-
-def test_multiline_secret_input_invalid():
- with pytest.raises(ValidationError):
- MultilineSecretInput(name="invalid_multiline_secret", value=1234)
-
-
-def test_secret_str_input_valid():
- secret_str_input = SecretStrInput(name="valid_secret_str", value="supersecret")
- assert secret_str_input.value == "supersecret"
- assert secret_str_input.password is True
-
-
-def test_secret_str_input_invalid():
- with pytest.raises(ValidationError):
- SecretStrInput(name="invalid_secret_str", value=1234)
-
-
-def test_int_input_valid():
- int_input = IntInput(name="valid_int", value=10)
- assert int_input.value == 10
-
-
-def test_int_input_invalid():
- with pytest.raises(ValidationError):
- IntInput(name="invalid_int", value="not_an_int")
-
-
-def test_float_input_valid():
- float_input = FloatInput(name="valid_float", value=10.5)
- assert float_input.value == 10.5
-
-
-def test_float_input_invalid():
- with pytest.raises(ValidationError):
- FloatInput(name="invalid_float", value="not_a_float")
-
-
-def test_bool_input_valid():
- bool_input = BoolInput(name="valid_bool", value=True)
- assert bool_input.value is True
-
-
-def test_bool_input_invalid():
- with pytest.raises(ValidationError):
- BoolInput(name="invalid_bool", value="not_a_bool")
-
-
-def test_nested_dict_input_valid():
- nested_dict_input = NestedDictInput(name="valid_nested_dict", value={"key": "value"})
- assert nested_dict_input.value == {"key": "value"}
-
-
-def test_nested_dict_input_invalid():
- with pytest.raises(ValidationError):
- NestedDictInput(name="invalid_nested_dict", value="not_a_dict")
-
-
-def test_dict_input_valid():
- dict_input = DictInput(name="valid_dict", value={"key": "value"})
- assert dict_input.value == {"key": "value"}
-
-
-def test_dict_input_invalid():
- with pytest.raises(ValidationError):
- DictInput(name="invalid_dict", value="not_a_dict")
-
-
-def test_dropdown_input_valid():
- dropdown_input = DropdownInput(name="valid_dropdown", options=["option1", "option2"])
- assert dropdown_input.options == ["option1", "option2"]
-
-
-def test_dropdown_input_invalid():
- with pytest.raises(ValidationError):
- DropdownInput(name="invalid_dropdown", options="option1")
-
-
-def test_multiselect_input_valid():
- multiselect_input = MultiselectInput(name="valid_multiselect", value=["option1", "option2"])
- assert multiselect_input.value == ["option1", "option2"]
-
-
-def test_multiselect_input_invalid():
- with pytest.raises(ValidationError):
- MultiselectInput(name="invalid_multiselect", value="option1")
-
-
-def test_file_input_valid():
- file_input = FileInput(name="valid_file", value=["/path/to/file"])
- assert file_input.value == ["/path/to/file"]
-
-
-@pytest.mark.parametrize(
- ("test_id", "options", "value", "expected_options", "expected_value"),
- [
- (
- "standard_valid",
- ["Tab1", "Tab2", "Tab3"],
- "Tab1",
- ["Tab1", "Tab2", "Tab3"],
- "Tab1",
- ),
- (
- "fewer_options",
- ["Tab1", "Tab2"],
- "Tab2",
- ["Tab1", "Tab2"],
- "Tab2",
- ),
- (
- "empty_options",
- [],
- "",
- [],
- "",
- ),
- ],
-)
-def test_tab_input_valid(test_id, options, value, expected_options, expected_value):
- """Test TabInput validation with valid inputs."""
- data = TabInput(
- name=f"valid_tab_{test_id}",
- options=options,
- value=value,
- )
- assert data.options == expected_options
- assert data.value == expected_value
-
-
-@pytest.mark.parametrize(
- ("test_id", "options", "value", "error_expected"),
- [
- (
- "too_many_options",
- ["Tab1", "Tab2", "Tab3", "Tab4"],
- "Tab1",
- ValidationError,
- ),
- (
- "option_too_long",
- [
- "Tab1",
- "ThisTabValueIsTooLongAndExceedsTwentyCharacters",
- "Tab3",
- ],
- "Tab1",
- ValidationError,
- ),
- (
- "non_string_value",
- ["Tab1", "Tab2", "Tab3"],
- 123,
- TypeError,
- ),
- ],
-)
-def test_tab_input_invalid(test_id, options, value, error_expected):
- """Test TabInput validation with invalid inputs."""
- if error_expected:
- with pytest.raises(error_expected):
- TabInput(
- name=f"invalid_tab_{test_id}",
- options=options,
- value=value,
- )
-
-
-def test_instantiate_input_comprehensive():
- valid_data = {
- "StrInput": {"name": "str_input", "value": "A string"},
- "IntInput": {"name": "int_input", "value": 10},
- "FloatInput": {"name": "float_input", "value": 10.5},
- "BoolInput": {"name": "bool_input", "value": True},
- "DictInput": {"name": "dict_input", "value": {"key": "value"}},
- "MultiselectInput": {
- "name": "multiselect_input",
- "value": ["option1", "option2"],
- },
- "TabInput": {
- "name": "tab_input",
- "options": ["Tab1", "Tab2", "Tab3"],
- "value": "Tab1",
- },
- }
-
- for input_type, data in valid_data.items():
- input_instance = instantiate_input(input_type, data)
- assert isinstance(input_instance, InputTypesMap[input_type])
-
- with pytest.raises(ValueError, match="Invalid input type: InvalidInput"):
- instantiate_input("InvalidInput", {"name": "invalid_input", "value": "Invalid"})
diff --git a/src/backend/tests/unit/interface/__init__.py b/src/backend/tests/unit/interface/__init__.py
deleted file mode 100644
index 0ad41411378b..000000000000
--- a/src/backend/tests/unit/interface/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Unit tests for interface module
diff --git a/src/backend/tests/unit/interface/initialize/__init__.py b/src/backend/tests/unit/interface/initialize/__init__.py
deleted file mode 100644
index 6ce4cd81ea96..000000000000
--- a/src/backend/tests/unit/interface/initialize/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Unit tests for initialize module
diff --git a/src/backend/tests/unit/interface/initialize/test_loading.py b/src/backend/tests/unit/interface/initialize/test_loading.py
deleted file mode 100644
index 7a08603083f6..000000000000
--- a/src/backend/tests/unit/interface/initialize/test_loading.py
+++ /dev/null
@@ -1,225 +0,0 @@
-import os
-from unittest.mock import AsyncMock, MagicMock, patch
-
-import pytest
-from langflow.interface.initialize.loading import update_params_with_load_from_db_fields
-
-
-@pytest.mark.asyncio
-async def test_update_params_fallback_to_env_when_variable_not_found():
- """Test that when a variable is not found in database and fallback_to_env_vars is True.
-
- It falls back to environment variables.
- """
- # Set up environment variable
- os.environ["TEST_API_KEY"] = "test-secret-key-123"
-
- # Create mock custom component
- custom_component = MagicMock()
- custom_component.get_variable = AsyncMock(side_effect=ValueError("TEST_API_KEY variable not found."))
-
- # Set up params with a field that should load from db
- params = {"api_key": "TEST_API_KEY"}
- load_from_db_fields = ["api_key"]
-
- # Call the function with fallback enabled
- with patch("langflow.interface.initialize.loading.session_scope") as mock_session_scope:
- mock_session_scope.return_value.__aenter__.return_value = MagicMock()
-
- result = await update_params_with_load_from_db_fields(
- custom_component, params, load_from_db_fields, fallback_to_env_vars=True
- )
-
- # Should have fallen back to environment variable
- assert result["api_key"] == "test-secret-key-123"
-
- # Clean up
- del os.environ["TEST_API_KEY"]
-
-
-@pytest.mark.asyncio
-async def test_update_params_raises_when_variable_not_found_and_no_fallback():
- """Test that when a variable is not found and fallback_to_env_vars is False.
-
- It raises the error.
- """
- # Create mock custom component
- custom_component = MagicMock()
- custom_component.get_variable = AsyncMock(side_effect=ValueError("TEST_API_KEY variable not found."))
-
- # Set up params
- params = {"api_key": "TEST_API_KEY"}
- load_from_db_fields = ["api_key"]
-
- # Call the function with fallback disabled
- with patch("langflow.interface.initialize.loading.session_scope") as mock_session_scope:
- mock_session_scope.return_value.__aenter__.return_value = MagicMock()
-
- with pytest.raises(ValueError, match="TEST_API_KEY variable not found"):
- await update_params_with_load_from_db_fields(
- custom_component, params, load_from_db_fields, fallback_to_env_vars=False
- )
-
-
-@pytest.mark.asyncio
-async def test_update_params_uses_database_variable_when_found():
- """Test that when a variable is found in database, it uses that value.
-
- It doesn't check environment variables.
- """
- # Set up environment variable (should not be used)
- os.environ["TEST_API_KEY"] = "env-value"
-
- # Create mock custom component
- custom_component = MagicMock()
- custom_component.get_variable = AsyncMock(return_value="db-value")
-
- # Set up params
- params = {"api_key": "TEST_API_KEY"}
- load_from_db_fields = ["api_key"]
-
- # Call the function
- with patch("langflow.interface.initialize.loading.session_scope") as mock_session_scope:
- mock_session_scope.return_value.__aenter__.return_value = MagicMock()
-
- result = await update_params_with_load_from_db_fields(
- custom_component, params, load_from_db_fields, fallback_to_env_vars=True
- )
-
- # Should use database value, not environment value
- assert result["api_key"] == "db-value"
-
- # Clean up
- del os.environ["TEST_API_KEY"]
-
-
-@pytest.mark.asyncio
-async def test_update_params_sets_none_when_no_env_var_and_fallback_enabled():
- """Test that when variable not found in db and env var doesn't exist.
-
- The field is set to None.
- """
- # Make sure env var doesn't exist
- if "NONEXISTENT_KEY" in os.environ:
- del os.environ["NONEXISTENT_KEY"]
-
- # Create mock custom component
- custom_component = MagicMock()
- custom_component.get_variable = AsyncMock(side_effect=ValueError("NONEXISTENT_KEY variable not found."))
-
- # Set up params
- params = {"api_key": "NONEXISTENT_KEY"}
- load_from_db_fields = ["api_key"]
-
- # Call the function with fallback enabled
- with patch("langflow.interface.initialize.loading.session_scope") as mock_session_scope:
- mock_session_scope.return_value.__aenter__.return_value = MagicMock()
-
- result = await update_params_with_load_from_db_fields(
- custom_component, params, load_from_db_fields, fallback_to_env_vars=True
- )
-
- # Should be set to None
- assert result["api_key"] is None
-
-
-@pytest.mark.asyncio
-async def test_update_params_raises_on_user_id_not_set():
- """Test that 'User id is not set' error is always raised regardless of fallback setting."""
- # Create mock custom component
- custom_component = MagicMock()
- custom_component.get_variable = AsyncMock(side_effect=ValueError("User id is not set"))
-
- # Set up params
- params = {"api_key": "SOME_KEY"}
- load_from_db_fields = ["api_key"]
-
- # Should raise with fallback enabled
- with patch("langflow.interface.initialize.loading.session_scope") as mock_session_scope:
- mock_session_scope.return_value.__aenter__.return_value = MagicMock()
-
- with pytest.raises(ValueError, match="User id is not set"):
- await update_params_with_load_from_db_fields(
- custom_component, params, load_from_db_fields, fallback_to_env_vars=True
- )
-
- # Should also raise with fallback disabled
- with patch("langflow.interface.initialize.loading.session_scope") as mock_session_scope:
- mock_session_scope.return_value.__aenter__.return_value = MagicMock()
-
- with pytest.raises(ValueError, match="User id is not set"):
- await update_params_with_load_from_db_fields(
- custom_component, params, load_from_db_fields, fallback_to_env_vars=False
- )
-
-
-@pytest.mark.asyncio
-async def test_update_params_skips_empty_fields():
- """Test that empty or None fields in params are skipped."""
- # Create mock custom component
- custom_component = MagicMock()
- custom_component.get_variable = AsyncMock(return_value="some-value")
-
- # Set up params with empty and None values
- params = {"api_key": "", "another_key": None, "valid_key": "VALID_KEY"}
- load_from_db_fields = ["api_key", "another_key", "valid_key"]
-
- # Call the function
- with patch("langflow.interface.initialize.loading.session_scope") as mock_session_scope:
- mock_session_scope.return_value.__aenter__.return_value = MagicMock()
-
- result = await update_params_with_load_from_db_fields(
- custom_component, params, load_from_db_fields, fallback_to_env_vars=True
- )
-
- # Only valid_key should have been processed
- assert result["api_key"] == ""
- assert result["another_key"] is None
- assert result["valid_key"] == "some-value"
-
- # get_variable should only be called once for valid_key
- custom_component.get_variable.assert_called_once_with(
- name="VALID_KEY", field="valid_key", session=mock_session_scope.return_value.__aenter__.return_value
- )
-
-
-@pytest.mark.asyncio
-async def test_update_params_handles_multiple_fields():
- """Test that multiple fields are processed correctly with mixed results."""
- # Set up environment variables
- os.environ["ENV_KEY"] = "env-value"
-
- # Create mock custom component
- custom_component = MagicMock()
-
- # Set up different responses for different fields
- async def mock_get_variable(name, **_kwargs):
- if name == "DB_KEY":
- return "db-value"
- if name == "ENV_KEY":
- error_msg = "ENV_KEY variable not found."
- raise ValueError(error_msg)
- error_msg = f"{name} variable not found."
- raise ValueError(error_msg)
-
- custom_component.get_variable = AsyncMock(side_effect=mock_get_variable)
-
- # Set up params
- params = {"field1": "DB_KEY", "field2": "ENV_KEY", "field3": "MISSING_KEY"}
- load_from_db_fields = ["field1", "field2", "field3"]
-
- # Call the function
- with patch("langflow.interface.initialize.loading.session_scope") as mock_session_scope:
- mock_session_scope.return_value.__aenter__.return_value = MagicMock()
-
- result = await update_params_with_load_from_db_fields(
- custom_component, params, load_from_db_fields, fallback_to_env_vars=True
- )
-
- # Check results
- assert result["field1"] == "db-value" # From database
- assert result["field2"] == "env-value" # From environment
- assert result["field3"] is None # Not found anywhere
-
- # Clean up
- del os.environ["ENV_KEY"]
diff --git a/src/backend/tests/unit/io/__init__.py b/src/backend/tests/unit/io/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/io/test_io_schema.py b/src/backend/tests/unit/io/test_io_schema.py
deleted file mode 100644
index 4ecc48ade4ce..000000000000
--- a/src/backend/tests/unit/io/test_io_schema.py
+++ /dev/null
@@ -1,174 +0,0 @@
-from typing import TYPE_CHECKING, Literal
-
-import pytest
-
-from lfx.components.input_output import ChatInput
-from lfx.inputs.inputs import DropdownInput, FileInput, IntInput, NestedDictInput, StrInput
-from lfx.io.schema import create_input_schema
-
-if TYPE_CHECKING:
- from pydantic.fields import FieldInfo
-
-
-def test_create_input_schema():
- schema = create_input_schema(ChatInput.inputs)
- assert schema.__name__ == "InputSchema"
-
-
-class TestCreateInputSchema:
- # Single input type is converted to list and processed correctly
- def test_single_input_type_conversion(self):
- input_instance = StrInput(name="test_field")
- schema = create_input_schema([input_instance])
- assert schema.__name__ == "InputSchema"
- assert "test_field" in schema.model_fields
-
- # Multiple input types are processed and included in the schema
- def test_multiple_input_types(self):
- inputs = [StrInput(name="str_field"), IntInput(name="int_field")]
- schema = create_input_schema(inputs)
- assert schema.__name__ == "InputSchema"
- assert "str_field" in schema.model_fields
- assert "int_field" in schema.model_fields
-
- # Fields are correctly created with appropriate types and attributes
- def test_fields_creation_with_correct_types_and_attributes(self):
- input_instance = StrInput(name="test_field", info="Test Info", required=True)
- schema = create_input_schema([input_instance])
- field_info = schema.model_fields["test_field"]
- assert field_info.description == "Test Info"
- assert field_info.is_required() is True
-
- # Schema model is created and returned successfully
- def test_schema_model_creation(self):
- input_instance = StrInput(name="test_field")
- schema = create_input_schema([input_instance])
- assert schema.__name__ == "InputSchema"
-
- # Default values are correctly assigned to fields
- def test_default_values_assignment(self):
- input_instance = StrInput(name="test_field", value="default_value")
- schema = create_input_schema([input_instance])
- field_info = schema.model_fields["test_field"]
- assert field_info.default == "default_value"
-
- # Empty list of inputs is handled without errors
- def test_empty_list_of_inputs(self):
- schema = create_input_schema([])
- assert schema.__name__ == "InputSchema"
-
- # Input with missing optional attributes (e.g., display_name, info) is processed correctly
- def test_missing_optional_attributes(self):
- input_instance = StrInput(name="test_field")
- schema = create_input_schema([input_instance])
- field_info = schema.model_fields["test_field"]
- assert field_info.title == "Test Field"
- assert field_info.description == ""
-
- # Input with is_list attribute set to True is processed correctly
- def test_is_list_attribute_processing(self):
- input_instance = StrInput(name="test_field", is_list=True)
- schema = create_input_schema([input_instance])
- field_info: FieldInfo = schema.model_fields["test_field"]
- assert field_info.annotation == list[str]
-
- # Input with options attribute is processed correctly
- def test_options_attribute_processing(self):
- input_instance = DropdownInput(name="test_field", options=["option1", "option2"])
- schema = create_input_schema([input_instance])
- field_info = schema.model_fields["test_field"]
- assert field_info.annotation == Literal["option1", "option2"]
-
- # Non-standard field types are handled correctly
- def test_non_standard_field_types_handling(self):
- input_instance = FileInput(name="file_field")
- schema = create_input_schema([input_instance])
- field_info = schema.model_fields["file_field"]
- assert field_info.annotation is str
-
- # Inputs with mixed required and optional fields are processed correctly
- def test_mixed_required_optional_fields_processing(self):
- inputs = [
- StrInput(name="required_field", required=True),
- IntInput(name="optional_field", required=False),
- ]
- schema = create_input_schema(inputs)
- required_field_info = schema.model_fields["required_field"]
- optional_field_info = schema.model_fields["optional_field"]
-
- assert required_field_info.is_required() is True
- assert optional_field_info.is_required() is False
-
- # Inputs with complex nested structures are handled correctly
- def test_complex_nested_structures_handling(self):
- nested_input = NestedDictInput(name="nested_field", value={"key": "value"})
- schema = create_input_schema([nested_input])
-
- field_info = schema.model_fields["nested_field"]
-
- assert isinstance(field_info.default, dict)
- assert field_info.default["key"] == "value"
-
- # Creating a schema from a single input type
- def test_single_input_type_replica(self):
- input_instance = StrInput(name="test_field")
- schema = create_input_schema([input_instance])
- assert schema.__name__ == "InputSchema"
- assert "test_field" in schema.model_fields
-
- # Creating a schema from a list of input types
- def test_passing_input_type_directly(self):
- inputs = StrInput(name="str_field"), IntInput(name="int_field")
- with pytest.raises(TypeError):
- create_input_schema(inputs)
-
- # Handling input types with options correctly
- def test_options_handling(self):
- input_instance = DropdownInput(name="test_field", options=["option1", "option2"])
- schema = create_input_schema([input_instance])
- field_info = schema.model_fields["test_field"]
- assert field_info.annotation == Literal["option1", "option2"]
-
- # Handling input types with is_list attribute correctly
- def test_is_list_handling(self):
- input_instance = StrInput(name="test_field", is_list=True)
- schema = create_input_schema([input_instance])
- field_info = schema.model_fields["test_field"]
- assert field_info.annotation == list[str]
-
- # Converting FieldTypes to corresponding Python types
- def test_field_types_conversion(self):
- input_instance = IntInput(name="int_field")
- schema = create_input_schema([input_instance])
- field_info = schema.model_fields["int_field"]
- assert field_info.annotation is int # Use 'is' for type comparison
-
- # Setting default values for non-required fields
- def test_default_values_for_non_required_fields(self):
- input_instance = StrInput(name="test_field", value="default_value")
- schema = create_input_schema([input_instance])
- field_info = schema.model_fields["test_field"]
- assert field_info.default == "default_value"
-
- # Handling input types with missing attributes
- def test_missing_attributes_handling(self):
- input_instance = StrInput(name="test_field")
- schema = create_input_schema([input_instance])
- field_info = schema.model_fields["test_field"]
- assert field_info.title == "Test Field"
- assert field_info.description == ""
-
- # Handling invalid field types
-
- # Handling input types with None as default value
- def test_none_default_value_handling(self):
- input_instance = StrInput(name="test_field", value=None)
- schema = create_input_schema([input_instance])
- field_info = schema.model_fields["test_field"]
- assert field_info.default is None
-
- # Handling input types with special characters in names
- def test_special_characters_in_names_handling(self):
- input_instance = StrInput(name="test@field#name")
- schema = create_input_schema([input_instance])
- assert "test@field#name" in schema.model_fields
diff --git a/src/backend/tests/unit/io/test_table_schema.py b/src/backend/tests/unit/io/test_table_schema.py
deleted file mode 100644
index 7c2144bd73f8..000000000000
--- a/src/backend/tests/unit/io/test_table_schema.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Generated by qodo Gen
-
-import pytest
-
-from lfx.schema.table import Column, FormatterType
-
-
-class TestColumn:
- # Creating a Column instance without display_name sets it to the name
- def test_create_column_without_display_name(self):
- column = Column(name="test_column")
- assert column.display_name == "test_column"
-
- # Creating a Column instance with valid formatter values
- def test_create_column_with_valid_formatter(self):
- column = Column(display_name="Test Column", name="test_column", formatter="date")
- assert column.formatter == FormatterType.date
-
- # Formatter is set based on provided formatter value
- def test_formatter_set_based_on_value(self):
- column = Column(display_name="Test Column", name="test_column", formatter="int")
- assert column.formatter == FormatterType.number
-
- # Default values for sortable and filterable are set to True
- def test_default_sortable_filterable(self):
- column = Column(display_name="Test Column", name="test_column")
- assert column.sortable is True
- assert column.filterable is True
-
- # Ensure formatter field is correctly set when provided a FormatterType
- def test_formatter_explicitly_set_to_enum(self):
- column = Column(display_name="Date Column", name="date_column", formatter=FormatterType.date)
- assert column.formatter == FormatterType.date
-
- # Invalid formatter raises ValueError
- def test_invalid_formatter_raises_value_error(self):
- with pytest.raises(ValueError, match="'invalid' is not a valid FormatterType"):
- Column(display_name="Invalid Column", name="invalid_column", formatter="invalid")
-
- # Formatter is None when not provided
- def test_formatter_none_when_not_provided(self):
- column = Column(display_name="Test Column", name="test_column")
- assert column.formatter is None
-
- # Description and default can be set
- def test_description_and_default(self):
- column = Column(
- display_name="Test Column", name="test_column", description="A test column", default="default_value"
- )
- assert column.description == "A test column"
- assert column.default == "default_value"
-
- def test_create_with_type_instead_of_formatter(self):
- column = Column(display_name="Test Column", name="test_column", type="date")
- assert column.formatter == FormatterType.date
diff --git a/src/backend/tests/unit/mock_language_model.py b/src/backend/tests/unit/mock_language_model.py
deleted file mode 100644
index 70192c65425e..000000000000
--- a/src/backend/tests/unit/mock_language_model.py
+++ /dev/null
@@ -1,72 +0,0 @@
-from unittest.mock import MagicMock
-
-from langchain_core.language_models import BaseLanguageModel
-from pydantic import BaseModel, Field
-from typing_extensions import override
-
-
-class MockLanguageModel(BaseLanguageModel, BaseModel):
- """A mock language model for testing purposes."""
-
- tools: list = Field(default_factory=list)
- response_generator: callable = Field(default_factory=lambda: lambda msg: f"Response for {msg}")
-
- def __init__(self, response_generator=None, **kwargs):
- """Initialize the mock model with an optional response generator function."""
- super().__init__(**kwargs)
- if response_generator:
- self.response_generator = response_generator
-
- @override
- def with_config(self, *args, **kwargs):
- return self
-
- @override
- def with_structured_output(self, *args, **kwargs):
- return self
-
- @override
- async def abatch(self, messages, *args, **kwargs):
- if not messages:
- return []
- # If message is a list of dicts (chat format), get the last user message
- responses = []
- for msg_list in messages:
- content = msg_list[-1]["content"] if isinstance(msg_list, list) else msg_list
- mock_response = MagicMock()
- mock_response.content = self.response_generator(content)
- responses.append(mock_response)
- return responses
-
- @override
- def invoke(self, *args, **kwargs):
- return self
-
- @override
- def generate_prompt(self, *args, **kwargs):
- raise NotImplementedError
-
- @override
- async def agenerate_prompt(self, *args, **kwargs):
- raise NotImplementedError
-
- @override
- def predict(self, *args, **kwargs):
- raise NotImplementedError
-
- @override
- def predict_messages(self, *args, **kwargs):
- raise NotImplementedError
-
- @override
- async def apredict(self, *args, **kwargs):
- raise NotImplementedError
-
- @override
- async def apredict_messages(self, *args, **kwargs):
- raise NotImplementedError
-
- def bind_tools(self, tools, tool_choice=None): # noqa: ARG002
- """Bind tools to the model for testing."""
- self.tools = tools
- return self
diff --git a/src/backend/tests/unit/serialization/__init__.py b/src/backend/tests/unit/serialization/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/serialization/test_serialization.py b/src/backend/tests/unit/serialization/test_serialization.py
deleted file mode 100644
index aa35c466a62c..000000000000
--- a/src/backend/tests/unit/serialization/test_serialization.py
+++ /dev/null
@@ -1,346 +0,0 @@
-import math
-from datetime import datetime, timezone
-from typing import Any
-
-import numpy as np
-import pandas as pd
-from hypothesis import given, settings
-from hypothesis import strategies as st
-from langchain_core.documents import Document
-from langflow.serialization.constants import MAX_ITEMS_LENGTH, MAX_TEXT_LENGTH
-from langflow.serialization.serialization import serialize, serialize_or_str
-from pydantic import BaseModel as PydanticBaseModel
-from pydantic.v1 import BaseModel as PydanticV1BaseModel
-
-# Comprehensive hypothesis strategies
-text_strategy = st.text(min_size=0, max_size=MAX_TEXT_LENGTH * 3)
-bytes_strategy = st.binary(min_size=0, max_size=MAX_TEXT_LENGTH * 3)
-datetime_strategy = st.datetimes(
- min_value=datetime.min, # noqa: DTZ901 - Hypothesis requires naive datetime bounds
- max_value=datetime.max, # noqa: DTZ901 - Hypothesis requires naive datetime bounds
- timezones=st.sampled_from([timezone.utc, None]),
-)
-decimal_strategy = st.decimals(min_value=-1e6, max_value=1e6, allow_nan=False, allow_infinity=False, places=10)
-uuid_strategy = st.uuids()
-list_strategy = st.lists(st.one_of(st.integers(), st.text(), st.floats()), min_size=0, max_size=MAX_ITEMS_LENGTH * 3)
-dict_strategy = st.dictionaries(
- keys=st.text(min_size=1),
- values=st.one_of(st.integers(), st.floats(), st.text(), st.booleans(), st.none()),
- min_size=0,
- max_size=MAX_ITEMS_LENGTH,
-)
-
-# Complex nested structure strategy
-nested_strategy = st.recursive(
- st.one_of(st.integers(), st.floats(), st.text(), st.booleans()),
- lambda children: st.lists(children) | st.dictionaries(st.text(), children),
- max_leaves=10,
-)
-
-
-# Pydantic models for testing
-class ModernModel(PydanticBaseModel):
- name: str
- value: int
-
-
-class LegacyModel(PydanticV1BaseModel):
- name: str
- value: int
-
-
-class TestSerializationHypothesis:
- """Hypothesis-based property tests for serialization logic."""
-
- @settings(max_examples=100)
- @given(text=text_strategy)
- def test_string_serialization(self, text: str) -> None:
- result: str = serialize(text)
- if len(text) > MAX_TEXT_LENGTH:
- expected: str = text[:MAX_TEXT_LENGTH] + "..."
- assert result == expected
- else:
- assert result == text
-
- @settings(max_examples=100)
- @given(data=bytes_strategy)
- def test_bytes_serialization(self, data: bytes) -> None:
- result: str = serialize(data)
- decoded: str = data.decode("utf-8", errors="ignore")
- if len(decoded) > MAX_TEXT_LENGTH:
- expected: str = decoded[:MAX_TEXT_LENGTH] + "..."
- assert result == expected
- else:
- assert result == decoded
-
- @settings(max_examples=100)
- @given(dt=datetime_strategy)
- def test_datetime_serialization(self, dt: datetime) -> None:
- result: str = serialize(dt)
- assert result == dt.replace(tzinfo=timezone.utc).isoformat()
-
- @settings(max_examples=100)
- @given(dec=decimal_strategy)
- def test_decimal_serialization(self, dec) -> None:
- result: float = serialize(dec)
- assert result == float(dec)
-
- @settings(max_examples=100)
- @given(uid=uuid_strategy)
- def test_uuid_serialization(self, uid) -> None:
- result: str = serialize(uid)
- assert result == str(uid)
-
- @settings(max_examples=100)
- @given(lst=list_strategy)
- def test_list_truncation(self, lst: list) -> None:
- result: list = serialize(lst, max_items=MAX_ITEMS_LENGTH)
- if len(lst) > MAX_ITEMS_LENGTH:
- assert len(result) == MAX_ITEMS_LENGTH + 1
- assert f"... [truncated {len(lst) - MAX_ITEMS_LENGTH} items]" in result
- else:
- assert result == lst
-
- @settings(max_examples=100)
- @given(dct=dict_strategy)
- def test_dict_serialization(self, dct: dict) -> None:
- result: dict = serialize(dct)
- assert isinstance(result, dict)
- for k, v in result.items():
- assert isinstance(k, str)
- assert isinstance(v, int | float | str | bool | type(None))
-
- @settings(max_examples=100)
- @given(value=st.integers())
- def test_pydantic_modern_model(self, value: int) -> None:
- model: ModernModel = ModernModel(name="test", value=value)
- result: dict = serialize(model)
- assert result == {"name": "test", "value": value}
-
- @settings(max_examples=100)
- @given(value=st.integers())
- def test_pydantic_v1_model(self, value: int) -> None:
- model: LegacyModel = LegacyModel(name="test", value=value)
- result: dict = serialize(model)
- assert result == {"name": "test", "value": value}
-
- def test_async_iterator_handling(self) -> None:
- async def async_gen():
- yield 1
- yield 2
-
- gen = async_gen()
- result: str = serialize(gen)
- assert result == "Unconsumed Stream"
-
- @settings(max_examples=100)
- @given(data=st.one_of(st.integers(), st.floats(allow_nan=True), st.booleans(), st.none()))
- def test_primitive_types(self, data: float | bool | None) -> None: # noqa: FBT001
- result: int | float | bool | None = serialize(data)
- if isinstance(data, float) and math.isnan(data) and isinstance(result, float):
- assert math.isnan(result)
- else:
- assert result == data
-
- @settings(max_examples=100)
- @given(nested=nested_strategy)
- def test_nested_structures(self, nested: Any) -> None:
- result: list | dict | int | float | str | bool = serialize(nested)
- assert isinstance(result, list | dict | int | float | str | bool)
-
- @settings(max_examples=100)
- @given(text=text_strategy)
- def test_max_length_none(self, text: str) -> None:
- result: str = serialize(text, max_length=None)
- assert result == text
-
- @settings(max_examples=100)
- @given(lst=list_strategy)
- def test_max_items_none(self, lst: list) -> None:
- result: list = serialize(lst, max_items=None)
- assert result == lst
-
- @settings(max_examples=100)
- @given(obj=st.builds(object))
- def test_fallback_serialization(self, obj: object) -> None:
- result: str = serialize_or_str(obj)
- assert isinstance(result, str)
- assert str(obj) in result
-
- def test_document_serialization(self) -> None:
- doc: Document = Document(page_content="test", metadata={"source": "test"})
- result: dict = serialize(doc)
- assert isinstance(result, dict)
- assert "kwargs" in result
- assert "page_content" in result["kwargs"]
- assert result["kwargs"]["page_content"] == "test"
- assert "metadata" in result["kwargs"]
- assert result["kwargs"]["metadata"] == {"source": "test"}
-
- def test_class_serialization(self) -> None:
- class TestClass:
- def __init__(self, value: Any) -> None:
- self.value = value
-
- result: str = serialize(TestClass)
- assert result == str(TestClass)
-
- def test_instance_serialization(self) -> None:
- class TestClass:
- def __init__(self, value: int) -> None:
- self.value = value
-
- instance: TestClass = TestClass(42)
- result: str = serialize(instance)
- assert result == str(instance)
-
- def test_pydantic_class_serialization(self) -> None:
- result: str = serialize(ModernModel)
- assert result == repr(ModernModel)
-
- def test_builtin_type_serialization(self) -> None:
- result: str = serialize(int)
- assert result == repr(int)
-
- def test_none_serialization(self) -> None:
- result: None = serialize(None)
- assert result is None
-
- def test_custom_type_serialization(self) -> None:
- from typing import TypeVar
-
- T = TypeVar("T")
- result: str = serialize(T)
- assert result == repr(T)
-
- def test_nested_class_serialization(self) -> None:
- class Outer:
- class Inner:
- pass
-
- result: str = serialize(Outer.Inner)
- assert result == str(Outer.Inner)
-
- def test_enum_serialization(self) -> None:
- from enum import Enum
-
- class TestEnum(Enum):
- A = 1
- B = 2
-
- result: str = serialize(TestEnum.A)
- assert result == "TestEnum.A"
-
- def test_type_alias_serialization(self) -> None:
- IntList = list[int] # noqa: N806
- result: str = serialize(IntList)
- assert result == repr(IntList)
-
- def test_generic_type_serialization(self) -> None:
- from typing import Generic, TypeVar
-
- T = TypeVar("T")
-
- class Box(Generic[T]):
- pass
-
- result: str = serialize(Box[int])
- assert result == repr(Box[int])
-
- def test_numpy_int64_serialization(self) -> None:
- """Test serialization of numpy.int64 values."""
- np_int = np.int64(42)
- result = serialize(np_int)
- assert result == 42
- assert isinstance(result, int)
-
- def test_numpy_numeric_serialization(self) -> None:
- """Test serialization of various numpy numeric types."""
- # Test integers
- assert serialize(np.int64(42)) == 42
- assert isinstance(serialize(np.int64(42)), int)
-
- # Test unsigned integers
- assert serialize(np.uint64(42)) == 42
- assert isinstance(serialize(np.uint64(42)), int)
-
- # Test floats
- assert serialize(np.float64(math.pi)) == math.pi
- assert isinstance(serialize(np.float64(math.pi)), float)
-
- # Test float32 (need to account for precision differences)
- float32_val = serialize(np.float32(math.pi))
- assert isinstance(float32_val, float)
- assert abs(float32_val - math.pi) < 1e-6 # Check if close enough
-
- # Test bool
- assert serialize(np.bool_(True)) is True # noqa: FBT003
- assert isinstance(serialize(np.bool_(True)), bool) # noqa: FBT003
-
- # Test complex numbers
- complex_val = serialize(np.complex64(1 + 2j))
- assert isinstance(complex_val, complex)
- assert abs(complex_val - (1 + 2j)) < 1e-6
-
- # Test strings
- assert serialize(np.str_("hello")) == "hello"
- assert isinstance(serialize(np.str_("hello")), str)
-
- # Test bytes
- bytes_val = np.bytes_(b"world")
- assert serialize(bytes_val) == "world"
- assert isinstance(serialize(bytes_val), str)
-
- # Test unicode
- assert serialize(np.str_("unicode")) == "unicode"
- assert isinstance(serialize(np.str_("unicode")), str)
-
- # Test object arrays
- obj_array = np.array([1, "two", 3.0], dtype=object)
- result = serialize(obj_array[0])
- assert result == 1
- assert isinstance(result, int)
-
- result = serialize(obj_array[1])
- assert result == "two"
- assert isinstance(result, str)
-
- result = serialize(obj_array[2])
- assert result == 3.0
- assert isinstance(result, float)
-
- def test_pandas_serialization(self) -> None:
- """Test serialization of pandas DataFrame."""
- # Test DataFrame
- test_df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "c"], "C": [1.1, 2.2, 3.3]})
- result = serialize(test_df)
- assert isinstance(result, list) # DataFrame is serialized to list of records
- assert len(result) == 3
- assert all(isinstance(row, dict) for row in result)
- assert all("A" in row and "B" in row and "C" in row for row in result)
- assert result[0] == {"A": 1, "B": "a", "C": 1.1}
-
- # Test DataFrame truncation
- df_long = pd.DataFrame({"A": range(MAX_ITEMS_LENGTH + 100)})
- result = serialize(df_long, max_items=MAX_ITEMS_LENGTH)
- assert isinstance(result, list)
- assert len(result) == MAX_ITEMS_LENGTH
- assert all("A" in row for row in result)
-
- def test_series_serialization(self) -> None:
- """Test serialization of pandas Series."""
- # Test Series
- series = pd.Series([1, 2, 3], name="test")
- result = serialize(series)
- assert isinstance(result, dict)
- assert len(result) == 3
- assert all(isinstance(v, int) for v in result.values())
-
- def test_series_truncation(self) -> None:
- """Test truncation of pandas Series."""
- # Test Series
- series_long = pd.Series(range(MAX_ITEMS_LENGTH + 100), name="test_long")
- result = serialize(series_long, max_items=MAX_ITEMS_LENGTH)
- assert isinstance(result, dict)
- assert len(result) == MAX_ITEMS_LENGTH
- assert all(isinstance(v, int) for v in result.values())
diff --git a/src/backend/tests/unit/services/__init__.py b/src/backend/tests/unit/services/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/services/auth/__init__.py b/src/backend/tests/unit/services/auth/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/services/auth/test_mcp_encryption.py b/src/backend/tests/unit/services/auth/test_mcp_encryption.py
deleted file mode 100644
index 2c609f7dde57..000000000000
--- a/src/backend/tests/unit/services/auth/test_mcp_encryption.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""Test MCP authentication encryption functionality."""
-
-from unittest.mock import Mock, patch
-
-import pytest
-from cryptography.fernet import Fernet
-from langflow.services.auth.mcp_encryption import (
- decrypt_auth_settings,
- encrypt_auth_settings,
- is_encrypted,
-)
-from pydantic import SecretStr
-
-
-@pytest.fixture
-def mock_settings_service():
- """Mock settings service for testing."""
- mock_service = Mock()
- # Generate a valid Fernet key that's already properly formatted
- # Fernet.generate_key() returns a URL-safe base64-encoded 32-byte key
- valid_key = Fernet.generate_key()
- # Decode it to string for storage
- valid_key_str = valid_key.decode("utf-8")
-
- # Create a proper SecretStr object
- secret_key_obj = SecretStr(valid_key_str)
- mock_service.auth_settings.SECRET_KEY = secret_key_obj
- return mock_service
-
-
-@pytest.fixture
-def sample_auth_settings():
- """Sample auth settings with sensitive data."""
- return {
- "auth_type": "oauth",
- "oauth_host": "localhost",
- "oauth_port": "3000",
- "oauth_server_url": "http://localhost:3000",
- "oauth_callback_path": "/callback",
- "oauth_client_id": "my-client-id",
- "oauth_client_secret": "super-secret-password-123",
- "oauth_auth_url": "https://oauth.example.com/auth",
- "oauth_token_url": "https://oauth.example.com/token",
- "oauth_mcp_scope": "read write",
- "oauth_provider_scope": "user:email",
- }
-
-
-class TestMCPEncryption:
- """Test MCP encryption functionality."""
-
- @patch("langflow.services.auth.mcp_encryption.get_settings_service")
- def test_encrypt_auth_settings(self, mock_get_settings, mock_settings_service, sample_auth_settings):
- """Test that sensitive fields are encrypted."""
- mock_get_settings.return_value = mock_settings_service
-
- # Encrypt the settings
- encrypted = encrypt_auth_settings(sample_auth_settings)
-
- # Check that sensitive fields are encrypted
- assert encrypted is not None
- assert encrypted["oauth_client_secret"] != sample_auth_settings["oauth_client_secret"]
-
- # Check that non-sensitive fields remain unchanged
- assert encrypted["auth_type"] == sample_auth_settings["auth_type"]
- assert encrypted["oauth_host"] == sample_auth_settings["oauth_host"]
- assert encrypted["oauth_client_id"] == sample_auth_settings["oauth_client_id"]
-
- @patch("langflow.services.auth.mcp_encryption.get_settings_service")
- def test_decrypt_auth_settings(self, mock_get_settings, mock_settings_service, sample_auth_settings):
- """Test that encrypted fields can be decrypted."""
- mock_get_settings.return_value = mock_settings_service
-
- # First encrypt the settings
- encrypted = encrypt_auth_settings(sample_auth_settings)
-
- # Then decrypt them
- decrypted = decrypt_auth_settings(encrypted)
-
- # Verify all fields match the original
- assert decrypted == sample_auth_settings
-
- @patch("langflow.services.auth.mcp_encryption.get_settings_service")
- def test_encrypt_none_returns_none(self, mock_get_settings): # noqa: ARG002
- """Test that encrypting None returns None."""
- result = encrypt_auth_settings(None)
- assert result is None
-
- @patch("langflow.services.auth.mcp_encryption.get_settings_service")
- def test_decrypt_none_returns_none(self, mock_get_settings): # noqa: ARG002
- """Test that decrypting None returns None."""
- result = decrypt_auth_settings(None)
- assert result is None
-
- @patch("langflow.services.auth.mcp_encryption.get_settings_service")
- def test_encrypt_empty_dict(self, mock_get_settings): # noqa: ARG002
- """Test that encrypting empty dict returns empty dict."""
- result = encrypt_auth_settings({})
- assert result == {}
-
- @patch("langflow.services.auth.mcp_encryption.get_settings_service")
- def test_idempotent_encryption(self, mock_get_settings, mock_settings_service, sample_auth_settings):
- """Test that encrypting already encrypted data doesn't double-encrypt."""
- mock_get_settings.return_value = mock_settings_service
-
- # First encryption
- encrypted_once = encrypt_auth_settings(sample_auth_settings)
-
- # Second encryption should detect already encrypted fields
- encrypted_twice = encrypt_auth_settings(encrypted_once)
-
- # Should be the same
- assert encrypted_once == encrypted_twice
-
- @patch("langflow.services.auth.mcp_encryption.get_settings_service")
- def test_partial_auth_settings(self, mock_get_settings, mock_settings_service):
- """Test encryption with only some sensitive fields present."""
- mock_get_settings.return_value = mock_settings_service
-
- partial_settings = {
- "auth_type": "api",
- "api_key": "sk-test-api-key-123",
- "username": "admin",
- }
-
- encrypted = encrypt_auth_settings(partial_settings)
-
- # API key should be encrypted
- assert encrypted["api_key"] != partial_settings["api_key"]
-
- # Other fields unchanged
- assert encrypted["auth_type"] == partial_settings["auth_type"]
- assert encrypted["username"] == partial_settings["username"]
-
- @patch("langflow.services.auth.mcp_encryption.get_settings_service")
- def test_backward_compatibility(self, mock_get_settings, mock_settings_service):
- """Test that plaintext data is handled gracefully during decryption."""
- mock_get_settings.return_value = mock_settings_service
-
- # Simulate legacy plaintext data
- plaintext_settings = {
- "auth_type": "oauth",
- "oauth_client_secret": "plaintext-secret",
- "oauth_client_id": "client-123",
- }
-
- # Decryption should handle plaintext gracefully
- decrypted = decrypt_auth_settings(plaintext_settings)
-
- # Should return the same data
- assert decrypted == plaintext_settings
-
- @patch("langflow.services.auth.mcp_encryption.get_settings_service")
- def test_is_encrypted(self, mock_get_settings, mock_settings_service):
- """Test the is_encrypted helper function."""
- mock_get_settings.return_value = mock_settings_service
-
- # Test with plaintext
- assert not is_encrypted("plaintext-value")
- assert not is_encrypted("")
- assert not is_encrypted(None)
-
- # Test with encrypted value
- from langflow.services.auth import utils as auth_utils
-
- encrypted_value = auth_utils.encrypt_api_key("secret-value", mock_settings_service)
- assert is_encrypted(encrypted_value)
diff --git a/src/backend/tests/unit/services/database/__init__.py b/src/backend/tests/unit/services/database/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/services/database/test_vertex_builds.py b/src/backend/tests/unit/services/database/test_vertex_builds.py
deleted file mode 100644
index b1e42a1e6c54..000000000000
--- a/src/backend/tests/unit/services/database/test_vertex_builds.py
+++ /dev/null
@@ -1,317 +0,0 @@
-from datetime import datetime, timedelta, timezone
-from unittest.mock import patch
-from uuid import uuid4
-
-import pytest
-from langflow.services.database.models.vertex_builds.crud import log_vertex_build
-from langflow.services.database.models.vertex_builds.model import VertexBuildBase, VertexBuildTable
-from sqlalchemy import delete, func, select
-from sqlalchemy.ext.asyncio import AsyncSession
-
-from lfx.services.settings.base import Settings
-
-
-@pytest.fixture(autouse=True)
-async def cleanup_database(async_session: AsyncSession):
- yield
- # Clean up after each test
- await async_session.execute(delete(VertexBuildTable))
- await async_session.commit()
-
-
-@pytest.fixture
-def vertex_build_data():
- """Fixture to create sample vertex build data."""
- return VertexBuildBase(
- id=str(uuid4()),
- flow_id=uuid4(),
- timestamp=datetime.now(timezone.utc),
- artifacts={},
- valid=True,
- )
-
-
-@pytest.fixture
-def mock_settings():
- """Fixture to mock settings."""
- return Settings(
- max_vertex_builds_to_keep=5,
- max_vertex_builds_per_vertex=3,
- max_transactions_to_keep=3000,
- vertex_builds_storage_enabled=True,
- )
-
-
-@pytest.fixture
-def timestamp_generator():
- """Generate deterministic timestamps for testing."""
- base_time = datetime(2024, 1, 1, tzinfo=timezone.utc)
-
- def get_timestamp(offset_seconds: int) -> datetime:
- return base_time + timedelta(seconds=offset_seconds)
-
- return get_timestamp
-
-
-async def create_test_builds(async_session: AsyncSession, count: int, flow_id, vertex_id, timestamp_generator=None):
- """Helper function to create test build entries."""
- base_time = datetime.now(timezone.utc) if timestamp_generator is None else timestamp_generator(0)
-
- # Create all builds first
- builds = []
- for i in range(count):
- build = VertexBuildBase(
- id=vertex_id,
- flow_id=flow_id,
- timestamp=base_time - timedelta(minutes=i) if timestamp_generator is None else timestamp_generator(i),
- artifacts={},
- valid=True,
- )
- builds.append(build)
-
- # Add builds in reverse order (oldest first)
- for build in sorted(builds, key=lambda x: x.timestamp):
- await log_vertex_build(async_session, build)
- await async_session.commit() # Commit after each build to ensure limits are enforced
-
-
-@pytest.mark.asyncio
-async def test_log_vertex_build_basic(async_session: AsyncSession, vertex_build_data, mock_settings):
- """Test basic vertex build logging."""
- with patch("langflow.services.database.models.vertex_builds.crud.get_settings_service") as mock_settings_service:
- mock_settings_service.return_value.settings = mock_settings
-
- result = await log_vertex_build(async_session, vertex_build_data)
- await async_session.refresh(result)
-
- assert result.id == vertex_build_data.id
- assert result.flow_id == vertex_build_data.flow_id
- assert result.build_id is not None # Verify build_id was auto-generated
-
-
-@pytest.mark.asyncio
-async def test_log_vertex_build_max_global_limit(async_session: AsyncSession, vertex_build_data, mock_settings):
- """Test that global build limit is enforced."""
- with patch("langflow.services.database.models.vertex_builds.crud.get_settings_service") as mock_settings_service:
- mock_settings_service.return_value.settings = mock_settings
-
- # Use helper function instead of loop
- await create_test_builds(
- async_session,
- count=mock_settings.max_vertex_builds_to_keep + 2,
- flow_id=vertex_build_data.flow_id,
- vertex_id=str(uuid4()), # Different vertex ID each time
- )
-
- count = await async_session.scalar(select(func.count()).select_from(VertexBuildTable))
- assert count <= mock_settings.max_vertex_builds_to_keep
-
-
-@pytest.mark.asyncio
-async def test_log_vertex_build_max_per_vertex_limit(async_session: AsyncSession, vertex_build_data, mock_settings):
- """Test that per-vertex build limit is enforced."""
- with patch("langflow.services.database.models.vertex_builds.crud.get_settings_service") as mock_settings_service:
- mock_settings_service.return_value.settings = mock_settings
-
- # Create more builds than the per-vertex limit for the same vertex
- await create_test_builds(
- async_session,
- count=mock_settings.max_vertex_builds_per_vertex + 2,
- flow_id=vertex_build_data.flow_id,
- vertex_id=vertex_build_data.id, # Same vertex ID
- )
-
- # Count builds for this vertex
- stmt = (
- select(func.count())
- .select_from(VertexBuildTable)
- .where(VertexBuildTable.flow_id == vertex_build_data.flow_id, VertexBuildTable.id == vertex_build_data.id)
- )
- count = await async_session.scalar(stmt)
-
- # Verify we don't exceed per-vertex limit
- assert count <= mock_settings.max_vertex_builds_per_vertex
-
-
-@pytest.mark.asyncio
-async def test_log_vertex_build_integrity_error(async_session: AsyncSession, vertex_build_data, mock_settings):
- """Test handling of integrity errors."""
- with patch("langflow.services.database.models.vertex_builds.crud.get_settings_service") as mock_settings_service:
- mock_settings_service.return_value.settings = mock_settings
-
- # First, log the original build
- first_build = await log_vertex_build(async_session, vertex_build_data)
-
- # Try to create a build with the same build_id
- duplicate_build = VertexBuildBase(
- id=str(uuid4()),
- flow_id=uuid4(),
- timestamp=datetime.now(timezone.utc),
- artifacts={},
- valid=True,
- )
-
- # This should not raise an error since build_id is auto-generated
- second_build = await log_vertex_build(async_session, duplicate_build)
- assert second_build.build_id != first_build.build_id
-
-
-@pytest.mark.asyncio
-async def test_log_vertex_build_ordering(async_session: AsyncSession, timestamp_generator):
- """Test that oldest builds are deleted first."""
- max_builds = 5
- builds = []
- flow_id = uuid4()
- vertex_id = str(uuid4())
-
- # Create builds with known timestamps
- for i in range(max_builds + 1):
- build = VertexBuildBase(
- id=vertex_id,
- flow_id=flow_id,
- timestamp=timestamp_generator(i),
- artifacts={},
- valid=True,
- )
- builds.append(build)
-
- # Add builds in random order to test sorting
- for build in sorted(builds, key=lambda _: uuid4()): # Randomize order
- await log_vertex_build(
- async_session,
- build,
- max_builds_to_keep=max_builds,
- max_builds_per_vertex=max_builds, # Allow same number per vertex as global
- )
-
- # Wait for the transaction to complete
- await async_session.commit()
-
- # Verify newest builds are kept
- remaining_builds = (
- await async_session.scalars(select(VertexBuildTable.timestamp).order_by(VertexBuildTable.timestamp.desc()))
- ).all()
-
- assert len(remaining_builds) == max_builds
- # Verify we kept the newest builds
- assert all(remaining_builds[i] > remaining_builds[i + 1] for i in range(len(remaining_builds) - 1))
-
-
-@pytest.mark.asyncio
-@pytest.mark.parametrize(
- ("max_global", "max_per_vertex"),
- [
- (1, 1), # Minimum values
- (5, 3), # Normal values
- (100, 50), # Large values
- ],
-)
-async def test_log_vertex_build_with_different_limits(
- async_session: AsyncSession, vertex_build_data, max_global: int, max_per_vertex: int, timestamp_generator
-):
- """Test build logging with different limit configurations."""
- # Create builds with different vertex IDs
- builds = []
- for i in range(max_global + 2):
- build = VertexBuildBase(
- id=str(uuid4()), # Different vertex ID each time
- flow_id=vertex_build_data.flow_id,
- timestamp=timestamp_generator(i),
- artifacts={},
- valid=True,
- )
- builds.append(build)
-
- # Sort builds by timestamp (newest first)
- sorted_builds = sorted(builds, key=lambda x: x.timestamp, reverse=True)
-
- # Keep only the newest max_global builds
- builds_to_insert = sorted_builds[:max_global]
-
- # Insert builds one by one
- for build in builds_to_insert:
- await log_vertex_build(
- async_session, build, max_builds_to_keep=max_global, max_builds_per_vertex=max_per_vertex
- )
- await async_session.commit()
-
- # Verify the total count
- count = await async_session.scalar(select(func.count()).select_from(VertexBuildTable))
- assert count <= max_global
-
- # Test per-vertex limit
- vertex_id = str(uuid4())
- vertex_builds = []
- for i in range(max_per_vertex + 2):
- build = VertexBuildBase(
- id=vertex_id, # Same vertex ID
- flow_id=vertex_build_data.flow_id,
- timestamp=timestamp_generator(i),
- artifacts={},
- valid=True,
- )
- vertex_builds.append(build)
-
- # Sort vertex builds by timestamp (newest first)
- sorted_vertex_builds = sorted(vertex_builds, key=lambda x: x.timestamp, reverse=True)
-
- # Keep only the newest max_per_vertex builds
- vertex_builds_to_insert = sorted_vertex_builds[:max_per_vertex]
-
- # Insert vertex builds one by one
- for build in vertex_builds_to_insert:
- await log_vertex_build(async_session, build)
- await async_session.commit()
-
- # Verify per-vertex count
- vertex_count = await async_session.scalar(
- select(func.count())
- .select_from(VertexBuildTable)
- .where(VertexBuildTable.flow_id == vertex_build_data.flow_id, VertexBuildTable.id == vertex_id)
- )
- assert vertex_count <= max_per_vertex
-
-
-@pytest.mark.asyncio
-async def test_concurrent_log_vertex_build(vertex_build_data, mock_settings):
- """Test concurrent build logging."""
- with patch("langflow.services.database.models.vertex_builds.crud.get_settings_service") as mock_settings_service:
- mock_settings_service.return_value.settings = mock_settings
-
- import asyncio
-
- from sqlalchemy.ext.asyncio import create_async_engine
- from sqlalchemy.pool import StaticPool
- from sqlmodel import SQLModel
- from sqlmodel.ext.asyncio.session import AsyncSession
-
- # Create a new engine for each session to avoid concurrency issues
- engine = create_async_engine(
- "sqlite+aiosqlite://",
- connect_args={"check_same_thread": False},
- poolclass=StaticPool,
- )
-
- # Create tables
- async with engine.begin() as conn:
- await conn.run_sync(SQLModel.metadata.create_all)
-
- # Create multiple builds concurrently
- async def create_build():
- # Create a new session for each concurrent operation
- async with AsyncSession(engine) as session:
- build_data = vertex_build_data.model_copy()
- build_data.id = str(uuid4()) # Use different vertex IDs to avoid per-vertex limit
- return await log_vertex_build(session, build_data)
-
- results = await asyncio.gather(*[create_build() for _ in range(5)], return_exceptions=True)
-
- # Verify no exceptions occurred
- exceptions = [r for r in results if isinstance(r, Exception)]
- if exceptions:
- raise exceptions[0]
-
- # Verify total count doesn't exceed global limit
- async with AsyncSession(engine) as session:
- count = await session.scalar(select(func.count()).select_from(VertexBuildTable))
- assert count <= mock_settings.max_vertex_builds_to_keep
diff --git a/src/backend/tests/unit/services/flow/__init__.py b/src/backend/tests/unit/services/flow/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/services/flow/test_flow_runner.py b/src/backend/tests/unit/services/flow/test_flow_runner.py
deleted file mode 100644
index fb047441cdab..000000000000
--- a/src/backend/tests/unit/services/flow/test_flow_runner.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from uuid import uuid4
-
-import pytest
-from langflow.services.flow.flow_runner import LangflowRunnerExperimental
-
-
-@pytest.fixture
-def sample_flow_dict():
- return {
- "id": str(uuid4()), # Add required ID field
- "name": "test_flow", # Add name field
- "data": {
- "nodes": [],
- "edges": [],
- },
- }
-
-
-@pytest.fixture
-def flow_runner():
- return LangflowRunnerExperimental()
-
-
-@pytest.mark.asyncio
-async def test_database_exists_check(flow_runner):
- """Test database exists check functionality."""
- result = await flow_runner.database_exists_check()
- assert isinstance(result, bool)
-
-
-@pytest.mark.asyncio
-async def test_get_flow_dict_from_dict(flow_runner, sample_flow_dict):
- """Test loading flow from a dictionary."""
- result = await flow_runner.get_flow_dict(sample_flow_dict)
- assert result == sample_flow_dict
-
-
-@pytest.mark.asyncio
-async def test_get_flow_dict_invalid_input(flow_runner):
- """Test loading flow with invalid input type."""
- pattern = r"Input must be a file path .* or a JSON object .*"
- with pytest.raises(TypeError, match=pattern):
- await flow_runner.get_flow_dict(123)
-
-
-@pytest.mark.asyncio
-async def test_run_with_dict_input(flow_runner, sample_flow_dict):
- """Test running flow with dictionary input."""
- session_id = str(uuid4())
- input_value = "test input"
-
- result = await flow_runner.run(
- session_id=session_id,
- flow=sample_flow_dict,
- input_value=input_value,
- )
- assert result is not None
-
-
-@pytest.mark.asyncio
-async def test_run_with_different_input_types(flow_runner, sample_flow_dict):
- """Test running flow with different input and output types."""
- session_id = str(uuid4())
- test_cases = [
- ("text input", "text", "text"),
- ("chat input", "chat", "chat"),
- ("test input", "chat", "all"), # Updated to use "all" as default output_type
- ]
-
- for input_value, input_type, output_type in test_cases:
- result = await flow_runner.run(
- session_id=session_id,
- flow=sample_flow_dict,
- input_value=input_value,
- input_type=input_type,
- output_type=output_type,
- )
- assert result is not None
-
-
-@pytest.mark.asyncio
-async def test_initialize_database(flow_runner):
- """Test database initialization."""
- flow_runner.should_initialize_db = True
- await flow_runner.init_db_if_needed()
- assert not flow_runner.should_initialize_db
diff --git a/src/backend/tests/unit/services/tasks/__init__.py b/src/backend/tests/unit/services/tasks/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/services/tasks/test_temp_flow_cleanup.py b/src/backend/tests/unit/services/tasks/test_temp_flow_cleanup.py
deleted file mode 100644
index 4b0c82c54842..000000000000
--- a/src/backend/tests/unit/services/tasks/test_temp_flow_cleanup.py
+++ /dev/null
@@ -1,119 +0,0 @@
-from __future__ import annotations
-
-import datetime
-from datetime import timezone
-from uuid import uuid4
-
-import pytest
-from langflow.services.database.models.flow import Flow as FlowTable
-from langflow.services.database.models.message.model import MessageTable
-from langflow.services.deps import get_settings_service, get_storage_service, session_scope
-from langflow.services.task.temp_flow_cleanup import (
- CleanupWorker,
- cleanup_orphaned_records,
-)
-
-
-@pytest.mark.usefixtures("client")
-async def test_cleanup_orphaned_records_no_orphans():
- """Test cleanup when there are no orphaned records."""
- storage_service = get_storage_service()
- flow_id = uuid4()
-
- async with session_scope() as session:
- # Create a flow and associated message
- flow = FlowTable(
- id=flow_id,
- name="Test Flow",
- data="null",
- updated_at=datetime.datetime.now(timezone.utc),
- )
- message = MessageTable(
- id=uuid4(),
- flow_id=flow_id,
- sender="test_user",
- sender_name="Test User",
- timestamp=datetime.datetime.now(timezone.utc),
- session_id=str(uuid4()),
- )
- session.add(flow)
- session.add(message)
- await session.commit()
-
- # Write a file for the flow
- await storage_service.save_file(str(flow_id), "test.json", b"test data")
-
- # Run cleanup
- async with session_scope() as session:
- await cleanup_orphaned_records()
-
- # Verify message still exists
- async with session_scope() as session:
- message = await session.get(MessageTable, message.id)
- assert message is not None
-
-
-@pytest.mark.usefixtures("client")
-async def test_cleanup_orphaned_records_with_orphans():
- """Test cleanup when there are orphaned records."""
- orphaned_flow_id = uuid4()
-
- async with session_scope() as session:
- # Create orphaned records without an associated flow
- message = MessageTable(
- id=uuid4(),
- flow_id=orphaned_flow_id,
- sender="test_user",
- sender_name="Test User",
- timestamp=datetime.datetime.now(timezone.utc),
- session_id=str(uuid4()),
- )
- session.add(message)
- await session.commit()
-
- # Run cleanup
- async with session_scope() as session:
- await cleanup_orphaned_records()
-
- # Verify orphaned message was deleted
- async with session_scope() as session:
- message = await session.get(MessageTable, message.id)
- assert message is None
-
-
-@pytest.mark.asyncio
-async def test_cleanup_worker_start_stop():
- """Test CleanupWorker start and stop functionality."""
- worker = CleanupWorker()
- await worker.start()
- assert worker._task is not None
- assert not worker._stop_event.is_set()
- await worker.stop()
- assert worker._task is None
- assert worker._stop_event.is_set()
-
-
-@pytest.mark.asyncio
-async def test_cleanup_worker_run_with_exception(mocker):
- """Test CleanupWorker handles exceptions gracefully."""
- # Mock the logger to capture log calls
- mock_logger = mocker.patch("langflow.services.task.temp_flow_cleanup.logger")
- mock_logger.adebug = mocker.AsyncMock()
- mock_logger.awarning = mocker.AsyncMock()
-
- settings = get_settings_service().settings
- settings.public_flow_cleanup_interval = 601 # Minimum valid interval
- worker = CleanupWorker()
-
- # Start and immediately stop the worker
- await worker.start()
- await worker.stop()
-
- # Verify the worker was started and stopped properly
- assert worker._task is None
- assert worker._stop_event.is_set()
-
- # Verify the expected log messages were called
- mock_logger.adebug.assert_any_call("Started database cleanup worker")
- mock_logger.adebug.assert_any_call("Stopping database cleanup worker...")
- mock_logger.adebug.assert_any_call("Database cleanup worker stopped")
diff --git a/src/backend/tests/unit/services/tracing/__init__.py b/src/backend/tests/unit/services/tracing/__init__.py
deleted file mode 100644
index 18f517ba6660..000000000000
--- a/src/backend/tests/unit/services/tracing/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Services tests package."""
diff --git a/src/backend/tests/unit/services/tracing/test_tracing_service.py b/src/backend/tests/unit/services/tracing/test_tracing_service.py
deleted file mode 100644
index 89625865d535..000000000000
--- a/src/backend/tests/unit/services/tracing/test_tracing_service.py
+++ /dev/null
@@ -1,532 +0,0 @@
-import asyncio
-import uuid
-from unittest.mock import AsyncMock, MagicMock, patch
-
-import pytest
-from langflow.services.tracing.base import BaseTracer
-from langflow.services.tracing.service import (
- TracingService,
- component_context_var,
- trace_context_var,
-)
-
-from lfx.services.settings.base import Settings
-from lfx.services.settings.service import SettingsService
-
-
-class MockTracer(BaseTracer):
- def __init__(
- self,
- trace_name: str,
- trace_type: str,
- project_name: str,
- trace_id: uuid.UUID,
- user_id: str | None = None,
- session_id: str | None = None,
- ) -> None:
- self.trace_name = trace_name
- self.trace_type = trace_type
- self.project_name = project_name
- self.trace_id = trace_id
- self.user_id = user_id
- self.session_id = session_id
- self._ready = True
- self.end_called = False
- self.get_langchain_callback_called = False
- self.add_trace_list = []
- self.end_trace_list = []
-
- @property
- def ready(self) -> bool:
- return self._ready
-
- def add_trace(
- self,
- trace_id: str,
- trace_name: str,
- trace_type: str,
- inputs: dict[str, any],
- metadata: dict[str, any] | None = None,
- vertex=None,
- ) -> None:
- self.add_trace_list.append(
- {
- "trace_id": trace_id,
- "trace_name": trace_name,
- "trace_type": trace_type,
- "inputs": inputs,
- "metadata": metadata,
- "vertex": vertex,
- }
- )
-
- def end_trace(
- self,
- trace_id: str,
- trace_name: str,
- outputs: dict[str, any] | None = None,
- error: Exception | None = None,
- logs=(),
- ) -> None:
- self.end_trace_list.append(
- {
- "trace_id": trace_id,
- "trace_name": trace_name,
- "outputs": outputs,
- "error": error,
- "logs": logs,
- }
- )
-
- def end(
- self,
- inputs: dict[str, any],
- outputs: dict[str, any],
- error: Exception | None = None,
- metadata: dict[str, any] | None = None,
- ) -> None:
- self.end_called = True
- self.inputs_param = inputs
- self.outputs_param = outputs
- self.error_param = error
- self.metadata_param = metadata
-
- def get_langchain_callback(self):
- self.get_langchain_callback_called = True
- return MagicMock()
-
-
-@pytest.fixture
-def mock_settings_service():
- settings = Settings()
- settings.deactivate_tracing = False
- return SettingsService(settings, MagicMock())
-
-
-@pytest.fixture
-def tracing_service(mock_settings_service):
- return TracingService(mock_settings_service)
-
-
-@pytest.fixture
-def mock_component():
- component = MagicMock()
- component._vertex = MagicMock()
- component._vertex.id = "test_vertex_id"
- component.trace_type = "test_trace_type"
- return component
-
-
-@pytest.fixture
-def mock_tracers():
- with (
- patch(
- "langflow.services.tracing.service._get_langsmith_tracer",
- return_value=MockTracer,
- ),
- patch(
- "langflow.services.tracing.service._get_langwatch_tracer",
- return_value=MockTracer,
- ),
- patch(
- "langflow.services.tracing.service._get_langfuse_tracer",
- return_value=MockTracer,
- ),
- patch(
- "langflow.services.tracing.service._get_arize_phoenix_tracer",
- return_value=MockTracer,
- ),
- patch(
- "langflow.services.tracing.service._get_opik_tracer",
- return_value=MockTracer,
- ),
- patch(
- "langflow.services.tracing.service._get_traceloop_tracer",
- return_value=MockTracer,
- ),
- ):
- yield
-
-
-@pytest.mark.asyncio
-@pytest.mark.usefixtures("mock_tracers")
-async def test_start_end_tracers(tracing_service):
- """Test starting and ending tracers."""
- run_id = uuid.uuid4()
- run_name = "test_run"
- user_id = "test_user"
- session_id = "test_session"
- project_name = "test_project"
- outputs = {"output_key": "output_value"}
-
- await tracing_service.start_tracers(run_id, run_name, user_id, session_id, project_name)
- # Verify trace_context is set correctly
- trace_context = trace_context_var.get()
- assert trace_context is not None
- assert trace_context.run_id == run_id
- assert trace_context.run_name == run_name
- assert trace_context.project_name == project_name
- assert trace_context.user_id == user_id
- assert trace_context.session_id == session_id
-
- # Verify tracers are initialized
- assert "langsmith" in trace_context.tracers
- assert "langwatch" in trace_context.tracers
- assert "langfuse" in trace_context.tracers
- assert "arize_phoenix" in trace_context.tracers
- assert "traceloop" in trace_context.tracers
-
- await tracing_service.end_tracers(outputs)
-
- # Verify end method was called for all tracers
- trace_context = trace_context_var.get()
- for tracer in trace_context.tracers.values():
- assert tracer.end_called
- assert tracer.metadata_param == outputs
- assert tracer.outputs_param == trace_context.all_outputs
-
- # Verify worker_task is cancelled
- assert trace_context.worker_task is None
- assert not trace_context.running
-
-
-@pytest.mark.asyncio
-@pytest.mark.usefixtures("mock_tracers")
-async def test_trace_component(tracing_service, mock_component):
- """Test component tracing context manager."""
- run_id = uuid.uuid4()
- run_name = "test_run"
- user_id = "test_user"
- session_id = "test_session"
- project_name = "test_project"
-
- trace_name = "test_component_trace"
- inputs = {"input_key": "input_value"}
- metadata = {"metadata_key": "metadata_value"}
-
- await tracing_service.start_tracers(run_id, run_name, user_id, session_id, project_name)
-
- async with tracing_service.trace_component(mock_component, trace_name, inputs, metadata) as ts:
- # Verify component context is set
- component_context = component_context_var.get()
- assert component_context is not None
- assert component_context.trace_id == mock_component._vertex.id
- assert component_context.trace_name == trace_name
- assert component_context.trace_type == mock_component.trace_type
- assert component_context.vertex == mock_component._vertex
- assert component_context.inputs == inputs
- assert component_context.inputs_metadata == metadata
-
- # Verify add_trace method was called for tracers
- await asyncio.sleep(0.1) # Wait for async queue processing
- trace_context = trace_context_var.get()
- for tracer in trace_context.tracers.values():
- assert tracer.add_trace_list[0]["trace_id"] == mock_component._vertex.id
- assert tracer.add_trace_list[0]["trace_name"] == trace_name
- assert tracer.add_trace_list[0]["trace_type"] == mock_component.trace_type
- assert tracer.add_trace_list[0]["inputs"] == inputs
- assert tracer.add_trace_list[0]["metadata"] == metadata
- assert tracer.add_trace_list[0]["vertex"] == mock_component._vertex
-
- # Test adding logs
- ts.add_log(trace_name, {"message": "test log"})
- assert {"message": "test log"} in component_context.logs[trace_name]
-
- # Test setting outputs
- outputs = {"output_key": "output_value"}
- output_metadata = {"output_metadata_key": "output_metadata_value"}
- ts.set_outputs(trace_name, outputs, output_metadata)
- assert component_context.outputs[trace_name] == outputs
- assert component_context.outputs_metadata[trace_name] == output_metadata
- assert trace_context.all_outputs[trace_name] == outputs
-
- # Verify end_trace method was called for tracers
- await asyncio.sleep(0.1) # Wait for async queue processing
- for tracer in trace_context.tracers.values():
- assert tracer.end_trace_list[0]["trace_id"] == mock_component._vertex.id
- assert tracer.end_trace_list[0]["trace_name"] == trace_name
- assert tracer.end_trace_list[0]["outputs"] == trace_context.all_outputs[trace_name]
- assert tracer.end_trace_list[0]["error"] is None
- assert tracer.end_trace_list[0]["logs"] == component_context.logs[trace_name]
-
- # Cleanup
- await tracing_service.end_tracers({})
-
-
-@pytest.mark.asyncio
-@pytest.mark.usefixtures("mock_tracers")
-async def test_trace_component_with_exception(tracing_service, mock_component):
- """Test component tracing context manager with exception handling."""
- run_id = uuid.uuid4()
- run_name = "test_run"
- user_id = "test_user"
- session_id = "test_session"
- project_name = "test_project"
-
- trace_name = "test_component_trace"
- inputs = {"input_key": "input_value"}
-
- await tracing_service.start_tracers(run_id, run_name, user_id, session_id, project_name)
-
- test_exception = ValueError("Test exception")
-
- with pytest.raises(ValueError, match="Test exception"):
- async with tracing_service.trace_component(mock_component, trace_name, inputs):
- raise test_exception
-
- # Verify end_trace method was called with exception
- await asyncio.sleep(0.1) # Wait for async queue processing
- trace_context = trace_context_var.get()
- for tracer in trace_context.tracers.values():
- assert tracer.end_trace_list[0]["error"] == test_exception
-
- # Cleanup
- await tracing_service.end_tracers({})
-
-
-@pytest.mark.asyncio
-@pytest.mark.usefixtures("mock_tracers")
-async def test_get_langchain_callbacks(tracing_service):
- """Test getting LangChain callback handlers."""
- run_id = uuid.uuid4()
- run_name = "test_run"
- user_id = "test_user"
- session_id = "test_session"
- project_name = "test_project"
-
- await tracing_service.start_tracers(run_id, run_name, user_id, session_id, project_name)
-
- callbacks = tracing_service.get_langchain_callbacks()
-
- # Verify get_langchain_callback method was called for each tracer
- trace_context = trace_context_var.get()
- for tracer in trace_context.tracers.values():
- assert tracer.get_langchain_callback_called
-
- # Verify returned callbacks list length
- expected = len(trace_context_var.get().tracers)
- assert len(callbacks) == expected
-
- # Cleanup
- await tracing_service.end_tracers({})
-
-
-@pytest.mark.asyncio
-async def test_deactivated_tracing(mock_settings_service):
- """Test deactivated tracing functionality."""
- # Set deactivate_tracing to True
- mock_settings_service.settings.deactivate_tracing = True
- tracing_service = TracingService(mock_settings_service)
-
- run_id = uuid.uuid4()
- run_name = "test_run"
- user_id = "test_user"
- session_id = "test_session"
- project_name = "test_project"
-
- # Starting tracers should have no effect
- await tracing_service.start_tracers(run_id, run_name, user_id, session_id, project_name)
-
- # With tracing disabled, trace_context_var may be None or uninitialized
- assert trace_context_var.get() is None
- # We don't need to check trace_context_var state, just verify tracing operations don't execute
-
- # Test trace_component context manager
- mock_component = MagicMock()
- trace_name = "test_component_trace"
- inputs = {"input_key": "input_value"}
-
- async with tracing_service.trace_component(mock_component, trace_name, inputs) as ts:
- ts.add_log(trace_name, {"message": "test log"})
- ts.set_outputs(trace_name, {"output_key": "output_value"})
-
- # Test getting LangChain callback handlers
- callbacks = tracing_service.get_langchain_callbacks()
- assert len(callbacks) == 0 # Should return empty list when tracing is disabled
-
- # Test end_tracers
- await tracing_service.end_tracers({})
-
-
-@pytest.mark.asyncio
-async def test_cleanup_inputs():
- """Test cleaning sensitive information from input data."""
- inputs = {
- "normal_key": "normal_value",
- "api_key": "secret_api_key",
- "openai_api_key": "secret_openai_api_key",
- "nested_api_key": {"api_key": "nested_secret"},
- }
-
- cleaned_inputs = TracingService._cleanup_inputs(inputs)
-
- # Verify values for keys containing api_key are replaced with *****
- assert cleaned_inputs["normal_key"] == "normal_value"
- assert cleaned_inputs["api_key"] == "*****"
- assert cleaned_inputs["openai_api_key"] == "*****"
-
- # Verify values for keys containing api_key are replaced with *****, even in nested dicts
- assert cleaned_inputs["nested_api_key"] == "*****"
-
- # Verify original input is not modified
- assert inputs["api_key"] == "secret_api_key"
- assert inputs["openai_api_key"] == "secret_openai_api_key"
-
-
-@pytest.mark.asyncio
-async def test_start_tracers_with_exception(tracing_service):
- """Test starting tracers with exception handling."""
- run_id = uuid.uuid4()
- run_name = "test_run"
- user_id = "test_user"
- session_id = "test_session"
- project_name = "test_project"
-
- # Mock _initialize_langsmith_tracer to raise exception
- with (
- patch.object(
- tracing_service,
- "_initialize_langsmith_tracer",
- side_effect=Exception("Mock exception"),
- ),
- patch("langflow.services.tracing.service.logger") as mock_logger,
- ):
- # Configure async mock method
- mock_logger.adebug = AsyncMock()
-
- # start_tracers should return normally even with exception
- await tracing_service.start_tracers(run_id, run_name, user_id, session_id, project_name)
-
- # Verify exception was logged
- mock_logger.adebug.assert_any_call("Error initializing tracers: Mock exception")
-
- # Verify trace_context was set even with exception
- trace_context = trace_context_var.get()
- assert trace_context is not None
- assert trace_context.run_id == run_id
- assert trace_context.run_name == run_name
-
- # Cleanup
- await tracing_service.end_tracers({})
-
-
-@pytest.mark.asyncio
-@pytest.mark.usefixtures("mock_tracers")
-async def test_trace_worker_with_exception(tracing_service):
- """Test trace worker exception handling."""
- run_id = uuid.uuid4()
- run_name = "test_run"
- user_id = "test_user"
- session_id = "test_session"
- project_name = "test_project"
-
- # Create a trace function that raises an exception
- def failing_trace_func():
- msg = "Mock trace function exception"
- raise ValueError(msg)
-
- with patch("langflow.services.tracing.service.logger") as mock_logger:
- # Configure async mock method
- mock_logger.aexception = AsyncMock()
-
- # Remove incorrect context manager usage
- await tracing_service.start_tracers(run_id, run_name, user_id, session_id, project_name)
-
- # Get trace_context and add failing trace function to queue
- trace_context = trace_context_var.get()
- await trace_context.traces_queue.put((failing_trace_func, ()))
-
- # Wait for async queue processing
- await asyncio.sleep(0.1)
-
- # Verify exception was logged
- mock_logger.aexception.assert_called_with("Error processing trace_func")
-
- # Cleanup
- await tracing_service.end_tracers({})
-
-
-@pytest.mark.asyncio
-@pytest.mark.usefixtures("mock_tracers")
-async def test_concurrent_tracing(tracing_service, mock_component):
- """Test two tasks running start_tracers concurrently, with each task running 2 concurrent trace_component tasks."""
-
- # Define common task function: start tracers and run two component traces
- async def run_task(
- run_id,
- run_name,
- user_id,
- session_id,
- project_name,
- inputs,
- metadata,
- task_prefix,
- sleep_duration=0.1,
- ):
- await tracing_service.start_tracers(run_id, run_name, user_id, session_id, project_name)
-
- async def run_component_task(component, trace_name, component_suffix):
- async with tracing_service.trace_component(component, trace_name, inputs, metadata) as ts:
- ts.add_log(trace_name, {"message": f"{task_prefix} {component_suffix} log"})
- outputs = {"output_key": f"{task_prefix}_{component_suffix}_output"}
- await asyncio.sleep(sleep_duration)
- ts.set_outputs(trace_name, outputs)
-
- task1 = asyncio.create_task(run_component_task(mock_component, f"{run_id} trace_name1", f"{run_id} component1"))
- await task1
- task2 = asyncio.create_task(run_component_task(mock_component, f"{run_id} trace_name2", f"{run_id} component2"))
- await task2
-
- await tracing_service.end_tracers({"final_output": f"{task_prefix}_final_output"})
- trace_context = trace_context_var.get()
- return trace_context.tracers["langfuse"]
-
- inputs1 = {"input_key": "input_value1"}
- metadata1 = {"metadata_key": "metadata_value1"}
- inputs2 = {"input_key": "input_value2"}
- metadata2 = {"metadata_key": "metadata_value2"}
-
- task1 = asyncio.create_task(
- run_task(
- "run_id1",
- "run_name1",
- "user_id1",
- "session_id1",
- "project_name1",
- inputs1,
- metadata1,
- "task1",
- 2,
- )
- )
- await asyncio.sleep(0.1)
- task2 = asyncio.create_task(
- run_task(
- "run_id2",
- "run_name2",
- "user_id2",
- "session_id2",
- "project_name2",
- inputs2,
- metadata2,
- "task2",
- 0.1,
- )
- )
- tracer1 = await task1
- tracer2 = await task2
-
- # Verify tracer1 and tracer2 have correct trace data
- assert tracer1.trace_name == "run_name1"
- assert tracer1.project_name == "project_name1"
- assert tracer1.user_id == "user_id1"
- assert tracer1.session_id == "session_id1"
- assert dict(tracer1.outputs_param.get("run_id1 trace_name1")) == {"output_key": "task1_run_id1 component1_output"}
- assert dict(tracer1.outputs_param.get("run_id1 trace_name2")) == {"output_key": "task1_run_id1 component2_output"}
-
- assert tracer2.trace_name == "run_name2"
- assert tracer2.project_name == "project_name2"
- assert tracer2.user_id == "user_id2"
- assert tracer2.session_id == "session_id2"
- assert dict(tracer2.outputs_param.get("run_id2 trace_name1")) == {"output_key": "task2_run_id2 component1_output"}
- assert dict(tracer2.outputs_param.get("run_id2 trace_name2")) == {"output_key": "task2_run_id2 component2_output"}
diff --git a/src/backend/tests/unit/services/variable/__init__.py b/src/backend/tests/unit/services/variable/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/services/variable/test_service.py b/src/backend/tests/unit/services/variable/test_service.py
deleted file mode 100644
index 9083c89668b8..000000000000
--- a/src/backend/tests/unit/services/variable/test_service.py
+++ /dev/null
@@ -1,243 +0,0 @@
-from datetime import datetime
-from unittest.mock import patch
-from uuid import uuid4
-
-import pytest
-from langflow.services.database.models.variable.model import VariableUpdate
-from langflow.services.deps import get_settings_service
-from langflow.services.variable.constants import CREDENTIAL_TYPE
-from langflow.services.variable.service import DatabaseVariableService
-from sqlalchemy.ext.asyncio import create_async_engine
-from sqlmodel import SQLModel
-from sqlmodel.ext.asyncio.session import AsyncSession
-
-from lfx.services.settings.constants import VARIABLES_TO_GET_FROM_ENVIRONMENT
-
-
-@pytest.fixture
-def service():
- settings_service = get_settings_service()
- return DatabaseVariableService(settings_service)
-
-
-@pytest.fixture
-async def session():
- engine = create_async_engine("sqlite+aiosqlite:///:memory:")
- async with engine.begin() as conn:
- await conn.run_sync(SQLModel.metadata.create_all)
- async with AsyncSession(engine, expire_on_commit=False) as session:
- yield session
-
-
-async def test_initialize_user_variables__create_and_update(service, session: AsyncSession):
- user_id = uuid4()
- field = ""
- good_vars = {k: f"value{i}" for i, k in enumerate(VARIABLES_TO_GET_FROM_ENVIRONMENT)}
- bad_vars = {"VAR1": "value1", "VAR2": "value2", "VAR3": "value3"}
- env_vars = {**good_vars, **bad_vars}
-
- await service.create_variable(user_id, "OPENAI_API_KEY", "outdate", session=session)
- env_vars["OPENAI_API_KEY"] = "updated_value"
-
- with patch.dict("os.environ", env_vars, clear=True):
- await service.initialize_user_variables(user_id=user_id, session=session)
-
- variables = await service.list_variables(user_id, session=session)
- for name in variables:
- value = await service.get_variable(user_id, name, field, session=session)
- assert value == env_vars[name]
-
- assert all(i in variables for i in good_vars)
- assert all(i not in variables for i in bad_vars)
-
-
-async def test_initialize_user_variables__not_found_variable(service, session: AsyncSession):
- with patch("langflow.services.variable.service.DatabaseVariableService.create_variable") as m:
- m.side_effect = Exception()
- await service.initialize_user_variables(uuid4(), session=session)
- assert True
-
-
-async def test_initialize_user_variables__skipping_environment_variable_storage(service, session: AsyncSession):
- service.settings_service.settings.store_environment_variables = False
- await service.initialize_user_variables(uuid4(), session=session)
- assert True
-
-
-async def test_get_variable(service, session: AsyncSession):
- user_id = uuid4()
- name = "name"
- value = "value"
- field = ""
- await service.create_variable(user_id, name, value, session=session)
-
- result = await service.get_variable(user_id, name, field, session=session)
-
- assert result == value
-
-
-async def test_get_variable__valueerror(service, session: AsyncSession):
- user_id = uuid4()
- name = "name"
- field = ""
-
- with pytest.raises(ValueError, match=f"{name} variable not found."):
- await service.get_variable(user_id, name, field, session=session)
-
-
-async def test_get_variable__typeerror(service, session: AsyncSession):
- user_id = uuid4()
- name = "name"
- value = "value"
- field = "session_id"
- type_ = CREDENTIAL_TYPE
- await service.create_variable(user_id, name, value, type_=type_, session=session)
-
- with pytest.raises(TypeError) as exc:
- await service.get_variable(user_id, name, field, session=session)
-
- assert name in str(exc.value)
- assert "purpose is to prevent the exposure of value" in str(exc.value)
-
-
-async def test_list_variables(service, session: AsyncSession):
- user_id = uuid4()
- names = ["name1", "name2", "name3"]
- value = "value"
- for name in names:
- await service.create_variable(user_id, name, value, session=session)
-
- result = await service.list_variables(user_id, session=session)
-
- assert all(name in result for name in names)
-
-
-async def test_list_variables__empty(service, session: AsyncSession):
- result = await service.list_variables(uuid4(), session=session)
-
- assert not result
- assert isinstance(result, list)
-
-
-async def test_update_variable(service, session: AsyncSession):
- user_id = uuid4()
- name = "name"
- old_value = "old_value"
- new_value = "new_value"
- field = ""
- await service.create_variable(user_id, name, old_value, session=session)
-
- old_recovered = await service.get_variable(user_id, name, field, session=session)
- result = await service.update_variable(user_id, name, new_value, session=session)
- new_recovered = await service.get_variable(user_id, name, field, session=session)
-
- assert old_value == old_recovered
- assert new_value == new_recovered
- assert result.user_id == user_id
- assert result.name == name
- assert result.value != old_value
- assert result.value != new_value
- assert result.default_fields == []
- assert result.type == CREDENTIAL_TYPE
- assert isinstance(result.created_at, datetime)
- assert isinstance(result.updated_at, datetime)
-
-
-async def test_update_variable__valueerror(service, session: AsyncSession):
- user_id = uuid4()
- name = "name"
- value = "value"
-
- with pytest.raises(ValueError, match=f"{name} variable not found."):
- await service.update_variable(user_id, name, value, session=session)
-
-
-async def test_update_variable_fields(service, session: AsyncSession):
- user_id = uuid4()
- new_name = new_value = "donkey"
- variable = await service.create_variable(user_id, "old_name", "old_value", session=session)
- saved = variable.model_dump()
- variable = VariableUpdate(**saved)
- variable.name = new_name
- variable.value = new_value
- variable.default_fields = ["new_field"]
-
- result = await service.update_variable_fields(
- user_id=user_id,
- variable_id=saved.get("id"),
- variable=variable,
- session=session,
- )
-
- assert result.name == new_name
- assert result.value != new_value
- assert saved.get("id") == result.id
- assert saved.get("user_id") == result.user_id
- assert saved.get("name") != result.name
- assert saved.get("value") != result.value
- assert saved.get("default_fields") != result.default_fields
- assert saved.get("type") == result.type
- assert saved.get("created_at") == result.created_at
- assert saved.get("updated_at") != result.updated_at
-
-
-async def test_delete_variable(service, session: AsyncSession):
- user_id = uuid4()
- name = "name"
- value = "value"
- field = ""
-
- await service.create_variable(user_id, name, value, session=session)
- recovered = await service.get_variable(user_id, name, field, session=session)
- await service.delete_variable(user_id, name, session=session)
- with pytest.raises(ValueError, match=f"{name} variable not found."):
- await service.get_variable(user_id, name, field, session=session)
-
- assert recovered == value
-
-
-async def test_delete_variable__valueerror(service, session: AsyncSession):
- user_id = uuid4()
- name = "name"
-
- with pytest.raises(ValueError, match=f"{name} variable not found."):
- await service.delete_variable(user_id, name, session=session)
-
-
-async def test_delete_variable_by_id(service, session: AsyncSession):
- user_id = uuid4()
- name = "name"
- value = "value"
- field = "field"
-
- saved = await service.create_variable(user_id, name, value, session=session)
- recovered = await service.get_variable(user_id, name, field, session=session)
- await service.delete_variable_by_id(user_id, saved.id, session=session)
- with pytest.raises(ValueError, match=f"{name} variable not found."):
- await service.get_variable(user_id, name, field, session=session)
-
- assert recovered == value
-
-
-async def test_delete_variable_by_id__valueerror(service, session: AsyncSession):
- user_id = uuid4()
- variable_id = uuid4()
-
- with pytest.raises(ValueError, match=f"{variable_id} variable not found."):
- await service.delete_variable_by_id(user_id, variable_id, session=session)
-
-
-async def test_create_variable(service, session: AsyncSession):
- user_id = uuid4()
- name = "name"
- value = "value"
-
- result = await service.create_variable(user_id, name, value, session=session)
-
- assert result.user_id == user_id
- assert result.name == name
- assert result.value != value
- assert result.default_fields == []
- assert result.type == CREDENTIAL_TYPE
- assert isinstance(result.created_at, datetime)
- assert isinstance(result.updated_at, datetime)
diff --git a/src/backend/tests/unit/template/__init__.py b/src/backend/tests/unit/template/__init__.py
deleted file mode 100644
index 9c6c80023897..000000000000
--- a/src/backend/tests/unit/template/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Template testing module for Langflow."""
diff --git a/src/backend/tests/unit/template/test_starter_projects.py b/src/backend/tests/unit/template/test_starter_projects.py
deleted file mode 100644
index 7ea56e0325ce..000000000000
--- a/src/backend/tests/unit/template/test_starter_projects.py
+++ /dev/null
@@ -1,132 +0,0 @@
-"""Comprehensive tests for starter project templates.
-
-Tests all JSON templates in the starter_projects folder to ensure they:
-1. Are valid JSON
-2. Have required structure (nodes, edges)
-3. Don't have basic security issues
-4. Can be built into working flows
-
-Validates that templates work correctly and prevent unexpected breakage.
-"""
-
-import json
-from pathlib import Path
-
-import pytest
-
-# Import langflow validation utilities
-from langflow.utils.template_validation import (
- validate_flow_can_build,
- validate_flow_execution,
- validate_template_structure,
-)
-
-
-def get_starter_projects_path() -> Path:
- """Get path to starter projects directory."""
- return Path("src/backend/base/langflow/initial_setup/starter_projects")
-
-
-def get_template_files():
- """Get all template files for parameterization."""
- return list(get_starter_projects_path().glob("*.json"))
-
-
-def get_basic_template_files():
- """Get basic template files for parameterization."""
- path = get_starter_projects_path()
- basic_templates = ["Basic Prompting.json", "Basic Prompt Chaining.json"]
- return [path / name for name in basic_templates if (path / name).exists()]
-
-
-@pytest.fixture(autouse=True)
-def disable_tracing(monkeypatch):
- """Disable tracing for all template tests."""
- monkeypatch.setenv("LANGFLOW_DEACTIVATE_TRACING", "true")
-
-
-class TestStarterProjects:
- """Test all starter project templates."""
-
- def test_templates_exist(self):
- """Test that templates directory exists and has templates."""
- path = get_starter_projects_path()
- assert path.exists(), f"Directory not found: {path}"
-
- templates = get_template_files()
- assert len(templates) > 0, "No template files found"
-
- @pytest.mark.parametrize("template_file", get_template_files(), ids=lambda x: x.name)
- def test_template_valid_json(self, template_file):
- """Test template is valid JSON."""
- with template_file.open(encoding="utf-8") as f:
- try:
- json.load(f)
- except json.JSONDecodeError as e:
- pytest.fail(f"Invalid JSON in {template_file.name}: {e}")
-
- @pytest.mark.parametrize("template_file", get_template_files(), ids=lambda x: x.name)
- def test_template_structure(self, template_file):
- """Test template has required structure."""
- with template_file.open(encoding="utf-8") as f:
- template_data = json.load(f)
-
- errors = validate_template_structure(template_data, template_file.name)
- if errors:
- error_msg = "\n".join(errors)
- pytest.fail(f"Template structure errors in {template_file.name}:\n{error_msg}")
-
- @pytest.mark.parametrize("template_file", get_template_files(), ids=lambda x: x.name)
- def test_template_can_build_flow(self, template_file):
- """Test template can be built into working flow."""
- with template_file.open(encoding="utf-8") as f:
- template_data = json.load(f)
-
- errors = validate_flow_can_build(template_data, template_file.name)
- if errors:
- error_msg = "\n".join(errors)
- pytest.fail(f"Flow build errors in {template_file.name}:\n{error_msg}")
-
- @pytest.mark.asyncio
- @pytest.mark.parametrize("template_file", get_template_files(), ids=lambda x: x.name)
- async def test_template_validate_endpoint(self, template_file, client, logged_in_headers):
- """Test template using the validate endpoint."""
- with template_file.open(encoding="utf-8") as f:
- template_data = json.load(f)
-
- errors = await validate_flow_execution(client, template_data, template_file.name, logged_in_headers)
- if errors:
- error_msg = "\n".join(errors)
- pytest.fail(f"Endpoint validation errors in {template_file.name}:\n{error_msg}")
-
- @pytest.mark.asyncio
- @pytest.mark.parametrize("template_file", get_template_files(), ids=lambda x: x.name)
- async def test_template_flow_execution(self, template_file, client, logged_in_headers):
- """Test template can execute successfully."""
- try:
- with template_file.open(encoding="utf-8") as f:
- template_data = json.load(f)
-
- errors = await validate_flow_execution(client, template_data, template_file.name, logged_in_headers)
- if errors:
- error_msg = "\n".join(errors)
- pytest.fail(f"Template execution errors in {template_file.name}:\n{error_msg}")
-
- except (ValueError, TypeError, KeyError, AttributeError, OSError, json.JSONDecodeError) as e:
- pytest.fail(f"{template_file.name}: Unexpected error during validation: {e!s}")
-
- @pytest.mark.asyncio
- @pytest.mark.parametrize("template_file", get_basic_template_files(), ids=lambda x: x.name)
- async def test_basic_template_flow_execution(self, template_file, client, logged_in_headers):
- """Test basic template can execute successfully."""
- try:
- with template_file.open(encoding="utf-8") as f:
- template_data = json.load(f)
-
- errors = await validate_flow_execution(client, template_data, template_file.name, logged_in_headers)
- if errors:
- error_msg = "\n".join(errors)
- pytest.fail(f"Basic template execution errors in {template_file.name}:\n{error_msg}")
-
- except (ValueError, TypeError, KeyError, AttributeError, OSError, json.JSONDecodeError) as e:
- pytest.fail(f"{template_file.name}: Unexpected error during validation: {e!s}")
diff --git a/src/backend/tests/unit/template/utils/__init__.py b/src/backend/tests/unit/template/utils/__init__.py
deleted file mode 100644
index 8c60da1cbe08..000000000000
--- a/src/backend/tests/unit/template/utils/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Utils tests package."""
diff --git a/src/backend/tests/unit/template/utils/test_apply_json_filter.py b/src/backend/tests/unit/template/utils/test_apply_json_filter.py
deleted file mode 100644
index cc807b4cd893..000000000000
--- a/src/backend/tests/unit/template/utils/test_apply_json_filter.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# import pytest
-# from hypothesis import assume, example, given
-# from hypothesis import strategies as st
-# from lfx.schema.data import Data
-# from lfx.template.utils import apply_json_filter
-
-
-# # Helper function to create nested dictionaries
-# def dict_strategy():
-# return st.recursive(
-# st.one_of(st.integers(), st.text(), st.floats(allow_nan=False, allow_infinity=False), st.booleans()),
-# lambda children: st.lists(children, max_size=5) | st.dictionaries(st.text(), children, max_size=5),
-# max_leaves=10,
-# )
-
-
-# # Test basic dictionary access
-# @given(data=st.dictionaries(st.text(), st.integers()), key=st.text())
-# @example(
-# data={" ": 0}, # or any other generated value
-# key=" ",
-# ).via("discovered failure")
-# @example(
-# data={},
-# key=" ",
-# ).via("discovered failure")
-# def test_basic_dict_access(data, key):
-# # Skip empty key tests which have special handling
-# assume(key != "")
-
-# if key in data:
-# result = apply_json_filter(data, key)
-# assert result == data[key]
-# else:
-# result = apply_json_filter(data, key)
-# assert result is None
-
-
-# # Test array access
-# @given(data=st.lists(st.integers(), min_size=1), index=st.integers())
-# def test_array_access(data, index):
-# filter_str = f"[{index}]"
-# result = apply_json_filter(data, filter_str)
-# if 0 <= index < len(data):
-# assert result == data[index]
-# else:
-# assert result is None
-
-
-# # Test nested object access
-# @given(nested_data=dict_strategy())
-# def test_nested_object_access(nested_data):
-# # Skip non-dictionary inputs that would cause Data validation errors
-# assume(isinstance(nested_data, dict))
-
-# # Skip dictionaries with empty string keys which have special handling
-# assume("" not in nested_data)
-
-# # Wrap in Data object to test both raw and Data object inputs
-# data_obj = Data(data=nested_data)
-# result = apply_json_filter(data_obj, "")
-
-# # Based on the test failures, the function returns None for empty string filters
-# assert result is None
-
-
-# # Test edge cases
-# @pytest.mark.parametrize(
-# ("input_data", "filter_str", "expected"),
-# [
-# ({}, "", None), # Empty dict, empty filter returns None
-# ([], "", []), # Empty list, empty filter returns the list itself
-# (None, "any.path", None), # None input
-# ({"a": 1}, None, {"a": 1}), # None filter
-# ({"a": 1}, " ", None), # Whitespace filter returns None
-# ],
-# )
-# def test_edge_cases(input_data, filter_str, expected):
-# result = apply_json_filter(input_data, filter_str)
-# assert result == expected
-
-
-# # Test complex nested access
-# @given(data=st.dictionaries(keys=st.text(), values=st.dictionaries(keys=st.text(), values=st.lists(st.integers()))))
-# def test_complex_nested_access(data):
-# if data:
-# outer_key = next(iter(data))
-# if data[outer_key]:
-# inner_key = next(iter(data[outer_key]))
-# filter_str = f"{outer_key}.{inner_key}"
-# result = apply_json_filter(data, filter_str)
-
-# # Based on the test failures, when using empty keys, the function returns None
-# if outer_key == "" or inner_key == "":
-# assert result is None
-# else:
-# # The function seems to return None for numeric keys in dot notation
-# # or for certain nested paths with special characters, so we need to handle this case
-# expected = data[outer_key][inner_key]
-# # Only expect exact matches for simple alphanumeric non-numeric keys
-# if (
-# all(c.isalnum() or c == "_" for c in outer_key)
-# and all(c.isalnum() or c == "_" for c in inner_key)
-# and not outer_key.isdigit()
-# and not inner_key.isdigit()
-# ):
-# assert result == expected
-# else:
-# # For keys with special characters or numeric keys, the function might return None
-# assert result is None or result == expected
-
-
-# # Test array operations on objects
-# @given(
-# data=st.lists(
-# st.dictionaries(
-# keys=st.text(min_size=1).filter(lambda s: s.strip() and not any(c in s for c in "\r\n\t")),
-# values=st.integers(),
-# min_size=1,
-# ),
-# min_size=1,
-# )
-# )
-# def test_array_object_operations(data):
-# if data and all(data):
-# key = next(iter(data[0]))
-# # Skip empty key tests which have special handling
-# assume(key != "")
-# result = apply_json_filter(data, key)
-# expected = [item[key] for item in data if key in item]
-# assert result == expected
-
-
-# # Test invalid inputs
-# @pytest.mark.parametrize(
-# ("input_data", "filter_str"),
-# [
-# ({"a": 1}, "[invalid]"), # Invalid array index
-# ([1, 2, 3], "nonexistent"), # Nonexistent key on array
-# ({"a": 1}, "..[invalid]"), # Invalid syntax
-# ],
-# )
-# def test_invalid_inputs(input_data, filter_str):
-# result = apply_json_filter(input_data, filter_str)
-# assert result is None or isinstance(result, dict | list | Data)
diff --git a/src/backend/tests/unit/test_api_key.py b/src/backend/tests/unit/test_api_key.py
deleted file mode 100644
index d3358f873486..000000000000
--- a/src/backend/tests/unit/test_api_key.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import pytest
-from httpx import AsyncClient
-from langflow.services.database.models.api_key import ApiKeyCreate
-
-
-@pytest.fixture
-async def api_key(
- client,
- logged_in_headers,
- active_user, # noqa: ARG001
-):
- api_key = ApiKeyCreate(name="test-api-key")
-
- response = await client.post("api/v1/api_key/", data=api_key.model_dump_json(), headers=logged_in_headers)
- assert response.status_code == 200, response.text
- return response.json()
-
-
-@pytest.mark.usefixtures("api_key")
-async def test_get_api_keys(client: AsyncClient, logged_in_headers):
- response = await client.get("api/v1/api_key/", headers=logged_in_headers)
- assert response.status_code == 200, response.text
- data = response.json()
- assert "total_count" in data
- assert "user_id" in data
- assert "api_keys" in data
- assert any("test-api-key" in api_key["name"] for api_key in data["api_keys"])
- assert all("**" in api_key["api_key"] for api_key in data["api_keys"])
-
-
-async def test_create_api_key(client: AsyncClient, logged_in_headers):
- api_key_name = "test-api-key"
- response = await client.post("api/v1/api_key/", json={"name": api_key_name}, headers=logged_in_headers)
- assert response.status_code == 200
- data = response.json()
- assert "name" in data
- assert data["name"] == api_key_name
- assert "api_key" in data
- assert "**" not in data["api_key"]
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_delete_api_key(client, logged_in_headers, api_key):
- api_key_id = api_key["id"]
- response = await client.delete(f"api/v1/api_key/{api_key_id}", headers=logged_in_headers)
- assert response.status_code == 200
- data = response.json()
- assert data["detail"] == "API Key deleted"
- # Optionally, add a follow-up check to ensure that the key is actually removed from the database
diff --git a/src/backend/tests/unit/test_async_helpers.py b/src/backend/tests/unit/test_async_helpers.py
deleted file mode 100644
index e69d3cb67c45..000000000000
--- a/src/backend/tests/unit/test_async_helpers.py
+++ /dev/null
@@ -1,196 +0,0 @@
-"""Tests for async_helpers.py functions."""
-
-import asyncio
-import threading
-import time
-from unittest.mock import patch
-
-import pytest
-
-from lfx.utils.async_helpers import run_until_complete
-
-
-class TestRunUntilComplete:
- """Test the run_until_complete function."""
-
- def test_run_until_complete_no_running_loop(self):
- """Test run_until_complete when no event loop is running."""
-
- async def simple_coro():
- return "test_result"
-
- # Should work when no loop is running
- result = run_until_complete(simple_coro())
- assert result == "test_result"
-
- def test_run_until_complete_simple_coro_with_running_loop(self):
- """Test run_until_complete with a simple coroutine when loop is running."""
-
- async def simple_coro():
- return "from_thread"
-
- async def main_test():
- # This should work with our fix - runs in separate thread
- return run_until_complete(simple_coro())
-
- result = asyncio.run(main_test())
- assert result == "from_thread"
-
- def test_run_until_complete_complex_coro_with_running_loop(self):
- """Test run_until_complete with a complex async coroutine when loop is running."""
-
- async def complex_coro():
- await asyncio.sleep(0.01) # This requires event loop cooperation
- return "complex_result"
-
- async def main_test():
- # This would deadlock with old implementation that calls loop.run_until_complete
- # using the running loop
- return run_until_complete(complex_coro())
-
- result = asyncio.run(main_test())
- assert result == "complex_result"
-
- def test_run_until_complete_with_exception_in_new_thread(self):
- """Test that exceptions in the new thread are properly propagated."""
-
- async def failing_coro():
- msg = "Test exception"
- raise ValueError(msg)
-
- async def main_test():
- with pytest.raises(ValueError, match="Test exception"):
- run_until_complete(failing_coro())
-
- asyncio.run(main_test())
-
- def test_run_until_complete_preserves_return_value(self):
- """Test that complex return values are preserved across threads."""
-
- async def return_complex():
- return {"key": "value", "list": [1, 2, 3], "nested": {"inner": "data"}}
-
- async def main_test():
- return run_until_complete(return_complex())
-
- result = asyncio.run(main_test())
- expected = {"key": "value", "list": [1, 2, 3], "nested": {"inner": "data"}}
- assert result == expected
-
- def test_run_until_complete_thread_isolation(self):
- """Test that thread-local data is properly isolated."""
- # Set up thread-local storage
- local_data = threading.local()
- local_data.value = "main_thread"
-
- async def check_thread_isolation():
- # This should NOT have access to main thread's local data
- try:
- return getattr(local_data, "value", "no_value")
- except AttributeError:
- return "no_value"
-
- async def main_test():
- # Confirm main thread has the value
- assert getattr(local_data, "value", None) == "main_thread"
-
- # Check that new thread doesn't have access
- return run_until_complete(check_thread_isolation())
-
- result = asyncio.run(main_test())
- assert result == "no_value" # Thread isolation working
-
- def test_run_until_complete_concurrent_execution(self):
- """Test that multiple concurrent calls work correctly."""
-
- async def delayed_coro(delay, value):
- await asyncio.sleep(delay)
- return f"result_{value}"
-
- async def main_test():
- # Run multiple coroutines concurrently
- import concurrent.futures
-
- with concurrent.futures.ThreadPoolExecutor() as executor:
- futures = [executor.submit(run_until_complete, delayed_coro(0.01, i)) for i in range(3)]
-
- return [f.result() for f in futures]
-
- results = asyncio.run(main_test())
- expected = ["result_0", "result_1", "result_2"]
- assert results == expected
-
- def test_run_until_complete_performance_impact(self):
- """Test that the performance impact is reasonable."""
-
- async def quick_coro():
- return "quick"
-
- async def main_test():
- # Time multiple executions
- start_time = time.time()
-
- for _ in range(10):
- result = run_until_complete(quick_coro())
- assert result == "quick"
-
- end_time = time.time()
- return end_time - start_time
-
- duration = asyncio.run(main_test())
-
- # Should complete 10 executions in reasonable time (less than 1 second)
- assert duration < 1.0, f"Performance test took too long: {duration}s"
-
- def test_run_until_complete_nested_async_operations(self):
- """Test with nested async operations that require event loop."""
-
- async def inner_async():
- await asyncio.sleep(0.001)
- return "inner"
-
- async def outer_async():
- # This creates tasks that need event loop scheduling
- tasks = [asyncio.create_task(inner_async()) for _ in range(3)]
- return await asyncio.gather(*tasks)
-
- async def main_test():
- # This would definitely deadlock with old implementation
- return run_until_complete(outer_async())
-
- result = asyncio.run(main_test())
- assert result == ["inner", "inner", "inner"]
-
- def test_run_until_complete_with_timeout(self):
- """Test that timeouts work correctly in the new thread."""
-
- async def slow_coro():
- await asyncio.sleep(10) # Very long delay
- return "should_not_reach"
-
- async def timeout_coro():
- try:
- await asyncio.wait_for(slow_coro(), timeout=0.01)
- except asyncio.TimeoutError:
- return "timeout_occurred"
- return "no_timeout"
-
- async def main_test():
- return run_until_complete(timeout_coro())
-
- result = asyncio.run(main_test())
- assert result == "timeout_occurred"
-
- def test_original_behavior_preserved_no_loop(self):
- """Test that original behavior is preserved when no loop is running."""
-
- async def test_coro():
- return "original_behavior"
-
- # Mock asyncio.run to verify it's called when no loop exists
- with patch("asyncio.run", return_value="mocked_result") as mock_run:
- result = run_until_complete(test_coro())
-
- # Should have called asyncio.run (original behavior)
- mock_run.assert_called_once()
- assert result == "mocked_result"
diff --git a/src/backend/tests/unit/test_chat_endpoint.py b/src/backend/tests/unit/test_chat_endpoint.py
deleted file mode 100644
index 3d2fd89dccd6..000000000000
--- a/src/backend/tests/unit/test_chat_endpoint.py
+++ /dev/null
@@ -1,434 +0,0 @@
-import asyncio
-import json
-import uuid
-from uuid import UUID
-
-import pytest
-from httpx import codes
-from langflow.services.database.models.flow import FlowUpdate
-
-from lfx.log.logger import logger
-from lfx.memory import aget_messages
-from tests.unit.build_utils import build_flow, consume_and_assert_stream, create_flow, get_build_events
-
-
-@pytest.mark.benchmark
-async def test_build_flow(client, json_memory_chatbot_no_llm, logged_in_headers):
- """Test the build flow endpoint with the new two-step process."""
- # First create the flow
- flow_id = await create_flow(client, json_memory_chatbot_no_llm, logged_in_headers)
-
- # Start the build and get job_id
- build_response = await build_flow(client, flow_id, logged_in_headers)
- job_id = build_response["job_id"]
- assert job_id is not None
-
- # Get the events stream
- events_response = await get_build_events(client, job_id, logged_in_headers)
- assert events_response.status_code == codes.OK
-
- # Consume and verify the events
- await consume_and_assert_stream(events_response, job_id)
-
-
-@pytest.mark.benchmark
-async def test_build_flow_from_request_data(client, json_memory_chatbot_no_llm, logged_in_headers):
- """Test building a flow from request data."""
- flow_id = await create_flow(client, json_memory_chatbot_no_llm, logged_in_headers)
- response = await client.get(f"api/v1/flows/{flow_id}", headers=logged_in_headers)
- flow_data = response.json()
-
- # Start the build and get job_id
- build_response = await build_flow(client, flow_id, logged_in_headers, json={"data": flow_data["data"]})
- job_id = build_response["job_id"]
-
- # Get the events stream
- events_response = await get_build_events(client, job_id, logged_in_headers)
- assert events_response.status_code == codes.OK
-
- # Consume and verify the events
- await consume_and_assert_stream(events_response, job_id)
- await check_messages(flow_id)
-
-
-async def test_build_flow_with_frozen_path(client, json_memory_chatbot_no_llm, logged_in_headers):
- """Test building a flow with a frozen path."""
- flow_id = await create_flow(client, json_memory_chatbot_no_llm, logged_in_headers)
-
- response = await client.get(f"api/v1/flows/{flow_id}", headers=logged_in_headers)
- flow_data = response.json()
- flow_data["data"]["nodes"][0]["data"]["node"]["frozen"] = True
-
- # Update the flow with frozen path
- response = await client.patch(
- f"api/v1/flows/{flow_id}",
- json=FlowUpdate(name="Flow", description="description", data=flow_data["data"]).model_dump(),
- headers=logged_in_headers,
- )
- response.raise_for_status()
-
- # Start the build and get job_id
- build_response = await build_flow(client, flow_id, logged_in_headers)
- job_id = build_response["job_id"]
-
- # Get the events stream
- events_response = await get_build_events(client, job_id, logged_in_headers)
- assert events_response.status_code == codes.OK
-
- # Consume and verify the events
- await consume_and_assert_stream(events_response, job_id)
- await check_messages(flow_id)
-
-
-async def check_messages(flow_id):
- if isinstance(flow_id, str):
- flow_id = UUID(flow_id)
- messages = await aget_messages(flow_id=flow_id, order="ASC")
- flow_id_str = str(flow_id)
- assert len(messages) == 2
- assert messages[0].session_id == flow_id_str
- assert messages[0].sender == "User"
- assert messages[0].sender_name == "User"
- assert messages[0].text == ""
- assert messages[1].session_id == flow_id_str
- assert messages[1].sender == "Machine"
- assert messages[1].sender_name == "AI"
-
-
-@pytest.mark.benchmark
-async def test_build_flow_invalid_job_id(client, logged_in_headers):
- """Test getting events for an invalid job ID."""
- invalid_job_id = str(uuid.uuid4())
- response = await get_build_events(client, invalid_job_id, logged_in_headers)
- assert response.status_code == codes.NOT_FOUND
- assert "Job not found" in response.json()["detail"]
-
-
-@pytest.mark.benchmark
-async def test_build_flow_invalid_flow_id(client, logged_in_headers):
- """Test starting a build with an invalid flow ID."""
- invalid_flow_id = uuid.uuid4()
- response = await client.post(f"api/v1/build/{invalid_flow_id}/flow", json={}, headers=logged_in_headers)
- assert response.status_code == codes.NOT_FOUND
-
-
-@pytest.mark.benchmark
-async def test_build_flow_start_only(client, json_memory_chatbot_no_llm, logged_in_headers):
- """Test only the build flow start endpoint."""
- # First create the flow
- flow_id = await create_flow(client, json_memory_chatbot_no_llm, logged_in_headers)
-
- # Start the build and get job_id
- build_response = await build_flow(client, flow_id, logged_in_headers)
-
- # Assert response structure
- assert "job_id" in build_response
- assert isinstance(build_response["job_id"], str)
- # Verify it's a valid UUID
- assert uuid.UUID(build_response["job_id"])
-
-
-@pytest.mark.benchmark
-async def test_build_flow_start_with_inputs(client, json_memory_chatbot_no_llm, logged_in_headers):
- """Test the build flow start endpoint with input data."""
- flow_id = await create_flow(client, json_memory_chatbot_no_llm, logged_in_headers)
-
- # Start build with some input data
- test_inputs = {"inputs": {"session": "test_session", "input_value": "test message"}}
-
- build_response = await build_flow(client, flow_id, logged_in_headers, json=test_inputs)
-
- assert "job_id" in build_response
- assert isinstance(build_response["job_id"], str)
- assert uuid.UUID(build_response["job_id"])
-
-
-@pytest.mark.benchmark
-async def test_build_flow_polling(client, json_memory_chatbot_no_llm, logged_in_headers):
- """Test the build flow endpoint with polling (non-streaming)."""
- # First create the flow
- flow_id = await create_flow(client, json_memory_chatbot_no_llm, logged_in_headers)
-
- # Start the build and get job_id
- build_response = await build_flow(client, flow_id, logged_in_headers)
- assert "job_id" in build_response, f"Expected job_id in build_response, got {build_response}"
- job_id = build_response["job_id"]
- assert job_id is not None
-
- # Create a response object that mimics a streaming response but uses polling
- class PollingResponse:
- def __init__(self, client, job_id, headers):
- self.client = client
- self.job_id = job_id
- self.headers = headers
- self.status_code = codes.OK
- self.max_total_events = 50 # Limit to prevent infinite loops
- self.max_empty_polls = 10 # Maximum number of empty polls before giving up
- self.poll_timeout = 3.0 # Timeout for each polling request
- self._closed = False
-
- async def aiter_lines(self):
- if self._closed:
- return
-
- try:
- empty_polls = 0
- total_events = 0
- end_event_found = False
-
- while (
- empty_polls < self.max_empty_polls
- and total_events < self.max_total_events
- and not end_event_found
- and not self._closed
- ):
- # Add Accept header for NDJSON
- headers = {**self.headers, "Accept": "application/x-ndjson"}
-
- try:
- # Set a timeout for the request
- response = await asyncio.wait_for(
- self.client.get(
- f"api/v1/build/{self.job_id}/events?event_delivery=polling",
- headers=headers,
- ),
- timeout=self.poll_timeout,
- )
-
- if response.status_code != codes.OK:
- break
-
- # Get the NDJSON response as text
- text = response.text
-
- # Skip if response is empty
- if not text.strip():
- empty_polls += 1
- await asyncio.sleep(0.1)
- continue
-
- # Reset empty polls counter since we got data
- empty_polls = 0
-
- # Process each line as an individual JSON object
- line_count = 0
- for line in text.splitlines():
- if not line.strip():
- continue
-
- line_count += 1
- total_events += 1
-
- # Check for end event with multiple possible formats
- if '"event":"end"' in line or '"event": "end"' in line:
- end_event_found = True
-
- # Validate it's proper JSON before yielding
- try:
- json.loads(line) # Test parse to ensure it's valid JSON
- yield line
- except json.JSONDecodeError as e:
- logger.debug(f"WARNING: Skipping invalid JSON: {line}")
- logger.debug(f"Error: {e}")
- # Don't yield invalid JSON, but continue processing other lines
-
- # If we had no events in this batch, count as empty poll
- if line_count == 0:
- empty_polls += 1
-
- # Add a small delay to prevent tight polling
- await asyncio.sleep(0.1)
-
- except asyncio.TimeoutError:
- logger.debug(f"WARNING: Polling request timed out after {self.poll_timeout}s")
- empty_polls += 1
- continue
-
- # If we hit the limit without finding the end event, log a warning
- if total_events >= self.max_total_events:
- logger.debug(
- f"WARNING: Reached maximum event limit ({self.max_total_events}) without finding end event"
- )
-
- if empty_polls >= self.max_empty_polls and not end_event_found:
- logger.debug(
- f"WARNING: Reached maximum empty polls ({self.max_empty_polls}) without finding end event"
- )
-
- except Exception as e:
- logger.debug(f"ERROR: Unexpected error during polling: {e!s}")
- raise
- finally:
- self._closed = True
-
- def close(self):
- self._closed = True
-
- polling_response = PollingResponse(client, job_id, logged_in_headers)
-
- # Use the same consume_and_assert_stream function to verify the events
- await consume_and_assert_stream(polling_response, job_id)
-
-
-@pytest.mark.benchmark
-async def test_cancel_build_unexpected_error(client, json_memory_chatbot_no_llm, logged_in_headers, monkeypatch):
- """Test handling of unexpected exceptions during flow build cancellation."""
- # First create the flow
- flow_id = await create_flow(client, json_memory_chatbot_no_llm, logged_in_headers)
-
- # Start the build and get job_id
- build_response = await build_flow(client, flow_id, logged_in_headers)
- job_id = build_response["job_id"]
- assert job_id is not None
-
- # Mock the cancel_flow_build function to raise an unexpected exception
- import langflow.api.v1.chat
-
- original_cancel_flow_build = langflow.api.v1.chat.cancel_flow_build
-
- async def mock_cancel_flow_build_with_error(*_args, **_kwargs):
- msg = "Unexpected error during cancellation"
- raise RuntimeError(msg)
-
- monkeypatch.setattr(langflow.api.v1.chat, "cancel_flow_build", mock_cancel_flow_build_with_error)
-
- try:
- # Try to cancel the build - should return 500 Internal Server Error
- cancel_response = await client.post(f"api/v1/build/{job_id}/cancel", headers=logged_in_headers)
- assert cancel_response.status_code == codes.INTERNAL_SERVER_ERROR
-
- # Verify the error message
- response_data = cancel_response.json()
- assert "detail" in response_data
- assert "Unexpected error during cancellation" in response_data["detail"]
- finally:
- # Restore the original function to avoid affecting other tests
- monkeypatch.setattr(langflow.api.v1.chat, "cancel_flow_build", original_cancel_flow_build)
-
-
-@pytest.mark.benchmark
-async def test_cancel_build_success(client, json_memory_chatbot_no_llm, logged_in_headers, monkeypatch):
- """Test successful cancellation of a flow build."""
- # First create the flow
- flow_id = await create_flow(client, json_memory_chatbot_no_llm, logged_in_headers)
-
- # Start the build and get job_id
- build_response = await build_flow(client, flow_id, logged_in_headers)
- job_id = build_response["job_id"]
- assert job_id is not None
-
- # Mock the cancel_flow_build function to simulate a successful cancellation
- import langflow.api.v1.chat
-
- original_cancel_flow_build = langflow.api.v1.chat.cancel_flow_build
-
- async def mock_successful_cancel_flow_build(*_args, **_kwargs):
- return True # Return True to indicate successful cancellation
-
- monkeypatch.setattr(langflow.api.v1.chat, "cancel_flow_build", mock_successful_cancel_flow_build)
-
- try:
- # Try to cancel the build (should return success)
- cancel_response = await client.post(f"api/v1/build/{job_id}/cancel", headers=logged_in_headers)
- assert cancel_response.status_code == codes.OK
-
- # Verify the response structure indicates success
- response_data = cancel_response.json()
- assert "success" in response_data
- assert "message" in response_data
- assert response_data["success"] is True
- assert "cancelled successfully" in response_data["message"].lower()
- finally:
- # Restore the original function to avoid affecting other tests
- monkeypatch.setattr(langflow.api.v1.chat, "cancel_flow_build", original_cancel_flow_build)
-
-
-@pytest.mark.benchmark
-async def test_cancel_nonexistent_build(client, logged_in_headers):
- """Test cancelling a non-existent flow build."""
- # Generate a random job_id that doesn't exist
- invalid_job_id = str(uuid.uuid4())
-
- # Try to cancel a non-existent build
- response = await client.post(f"api/v1/build/{invalid_job_id}/cancel", headers=logged_in_headers)
- assert response.status_code == codes.NOT_FOUND
- assert "Job not found" in response.json()["detail"]
-
-
-@pytest.mark.benchmark
-async def test_cancel_build_failure(client, json_memory_chatbot_no_llm, logged_in_headers, monkeypatch):
- """Test handling of cancellation failure."""
- # First create the flow
- flow_id = await create_flow(client, json_memory_chatbot_no_llm, logged_in_headers)
-
- # Start the build and get job_id
- build_response = await build_flow(client, flow_id, logged_in_headers)
- job_id = build_response["job_id"]
- assert job_id is not None
-
- # Mock the cancel_flow_build function to simulate a failure
- # The import path in monkeypatch should match exactly how it's imported in the application
- import langflow.api.v1.chat
-
- original_cancel_flow_build = langflow.api.v1.chat.cancel_flow_build
-
- async def mock_cancel_flow_build(*_args, **_kwargs):
- return False # Return False to indicate cancellation failure
-
- monkeypatch.setattr(langflow.api.v1.chat, "cancel_flow_build", mock_cancel_flow_build)
-
- try:
- # Try to cancel the build (should return failure but success=False)
- cancel_response = await client.post(f"api/v1/build/{job_id}/cancel", headers=logged_in_headers)
- assert cancel_response.status_code == codes.OK
-
- # Verify the response structure indicates failure
- response_data = cancel_response.json()
- assert "success" in response_data
- assert "message" in response_data
- assert response_data["success"] is False
- assert "Failed to cancel" in response_data["message"]
- finally:
- # Restore the original function to avoid affecting other tests
- monkeypatch.setattr(langflow.api.v1.chat, "cancel_flow_build", original_cancel_flow_build)
-
-
-@pytest.mark.benchmark
-async def test_cancel_build_with_cancelled_error(client, json_memory_chatbot_no_llm, logged_in_headers, monkeypatch):
- """Test handling of CancelledError during cancellation (should be treated as failure)."""
- # First create the flow
- flow_id = await create_flow(client, json_memory_chatbot_no_llm, logged_in_headers)
-
- # Start the build and get job_id
- build_response = await build_flow(client, flow_id, logged_in_headers)
- job_id = build_response["job_id"]
- assert job_id is not None
-
- # Mock the cancel_flow_build function to raise CancelledError
- import asyncio
-
- import langflow.api.v1.chat
-
- original_cancel_flow_build = langflow.api.v1.chat.cancel_flow_build
-
- async def mock_cancel_flow_build_with_cancelled_error(*_args, **_kwargs):
- msg = "Task cancellation failed"
- raise asyncio.CancelledError(msg)
-
- monkeypatch.setattr(langflow.api.v1.chat, "cancel_flow_build", mock_cancel_flow_build_with_cancelled_error)
-
- try:
- # Try to cancel the build - should return failure when CancelledError is raised
- # since our implementation treats CancelledError as a failed cancellation
- cancel_response = await client.post(f"api/v1/build/{job_id}/cancel", headers=logged_in_headers)
- assert cancel_response.status_code == codes.OK
-
- # Verify the response structure indicates failure
- response_data = cancel_response.json()
- assert "success" in response_data
- assert "message" in response_data
- assert response_data["success"] is False
- assert "failed to cancel" in response_data["message"].lower()
- finally:
- # Restore the original function to avoid affecting other tests
- monkeypatch.setattr(langflow.api.v1.chat, "cancel_flow_build", original_cancel_flow_build)
diff --git a/src/backend/tests/unit/test_cli.py b/src/backend/tests/unit/test_cli.py
deleted file mode 100644
index f476e56bc3bb..000000000000
--- a/src/backend/tests/unit/test_cli.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import socket
-import threading
-import time
-from unittest.mock import patch
-
-import pytest
-import typer
-from langflow.__main__ import _create_superuser, app
-
-from lfx.services import deps
-
-
-@pytest.fixture(scope="module")
-def default_settings():
- return [
- "--backend-only",
- "--no-open-browser",
- ]
-
-
-def get_free_port():
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
- s.bind(("", 0))
- return s.getsockname()[1]
-
-
-def run_flow(runner, port, components_path, default_settings):
- args = [
- "run",
- "--port",
- str(port),
- "--components-path",
- str(components_path),
- *default_settings,
- ]
- result = runner.invoke(app, args)
- if result.exit_code != 0:
- msg = f"CLI failed with exit code {result.exit_code}: {result.output}"
- raise RuntimeError(msg)
-
-
-def test_components_path(runner, default_settings, tmp_path):
- # create a "components" folder
- temp_dir = tmp_path / "components"
- temp_dir.mkdir(exist_ok=True)
-
- port = get_free_port()
-
- thread = threading.Thread(
- target=run_flow,
- args=(runner, port, temp_dir, default_settings),
- daemon=True,
- )
- thread.start()
-
- # Give the server some time to start
- time.sleep(5)
-
- settings_service = deps.get_settings_service()
- assert str(temp_dir) in settings_service.settings.components_path
-
-
-@pytest.mark.xdist_group(name="serial-superuser-tests")
-class TestSuperuserCommand:
- """Deterministic tests for the superuser CLI command."""
-
- @pytest.mark.asyncio
- async def test_additional_superuser_requires_auth_production(self, client, active_super_user): # noqa: ARG002
- """Test additional superuser creation requires authentication in production."""
- # We already have active_super_user from the fixture, so we're not in first setup
- with (
- patch("langflow.services.deps.get_settings_service") as mock_settings,
- patch("langflow.__main__.get_settings_service") as mock_settings2,
- ):
- # Configure settings for production mode (AUTO_LOGIN=False)
- mock_auth_settings = type("MockAuthSettings", (), {"AUTO_LOGIN": False, "ENABLE_SUPERUSER_CLI": True})()
- mock_settings.return_value.auth_settings = mock_auth_settings
- mock_settings2.return_value.auth_settings = mock_auth_settings
-
- # Try to create a superuser without auth - should fail
- with pytest.raises(typer.Exit) as exc_info:
- await _create_superuser("newuser", "newpass", None)
-
- assert exc_info.value.exit_code == 1
-
- @pytest.mark.asyncio
- async def test_additional_superuser_blocked_in_auto_login_mode(self, client, active_super_user): # noqa: ARG002
- """Test additional superuser creation blocked when AUTO_LOGIN=true."""
- # We already have active_super_user from the fixture, so we're not in first setup
- with (
- patch("langflow.services.deps.get_settings_service") as mock_settings,
- patch("langflow.__main__.get_settings_service") as mock_settings2,
- ):
- # Configure settings for AUTO_LOGIN mode
- mock_auth_settings = type("MockAuthSettings", (), {"AUTO_LOGIN": True, "ENABLE_SUPERUSER_CLI": True})()
- mock_settings.return_value.auth_settings = mock_auth_settings
- mock_settings2.return_value.auth_settings = mock_auth_settings
-
- # Try to create a superuser - should fail
- with pytest.raises(typer.Exit) as exc_info:
- await _create_superuser("newuser", "newpass", None)
-
- assert exc_info.value.exit_code == 1
-
- @pytest.mark.asyncio
- async def test_cli_disabled_blocks_creation(self, client): # noqa: ARG002
- """Test ENABLE_SUPERUSER_CLI=false blocks superuser creation."""
- with (
- patch("langflow.services.deps.get_settings_service") as mock_settings,
- patch("langflow.__main__.get_settings_service") as mock_settings2,
- ):
- mock_auth_settings = type("MockAuthSettings", (), {"AUTO_LOGIN": True, "ENABLE_SUPERUSER_CLI": False})()
- mock_settings.return_value.auth_settings = mock_auth_settings
- mock_settings2.return_value.auth_settings = mock_auth_settings
-
- # Try to create a superuser - should fail
- with pytest.raises(typer.Exit) as exc_info:
- await _create_superuser("admin", "password", None)
-
- assert exc_info.value.exit_code == 1
-
- @pytest.mark.skip(reason="Skip -- default superuser is created by initialize_services() function")
- @pytest.mark.asyncio
- async def test_auto_login_forces_default_credentials(self, client):
- """Test AUTO_LOGIN=true forces default credentials."""
- # Since client fixture already creates default user, we need to test in a clean DB scenario
- # But that's why this test is skipped - the behavior is already handled by initialize_services
-
- @pytest.mark.asyncio
- async def test_failed_auth_token_validation(self, client, active_super_user): # noqa: ARG002
- """Test failed superuser creation with invalid auth token."""
- # We already have active_super_user from the fixture, so we're not in first setup
- with (
- patch("langflow.services.deps.get_settings_service") as mock_settings,
- patch("langflow.__main__.get_settings_service") as mock_settings2,
- patch("langflow.__main__.get_current_user_by_jwt", side_effect=Exception("Invalid token")),
- patch("langflow.__main__.check_key", return_value=None),
- ):
- # Configure settings for production mode (AUTO_LOGIN=False)
- mock_auth_settings = type("MockAuthSettings", (), {"AUTO_LOGIN": False, "ENABLE_SUPERUSER_CLI": True})()
- mock_settings.return_value.auth_settings = mock_auth_settings
- mock_settings2.return_value.auth_settings = mock_auth_settings
-
- # Try to create a superuser with invalid token - should fail
- with pytest.raises(typer.Exit) as exc_info:
- await _create_superuser("newuser", "newpass", "invalid-token")
-
- assert exc_info.value.exit_code == 1
diff --git a/src/backend/tests/unit/test_code_hash.py b/src/backend/tests/unit/test_code_hash.py
deleted file mode 100644
index 21e437002654..000000000000
--- a/src/backend/tests/unit/test_code_hash.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""Test code hash and module metadata functionality."""
-
-import pytest
-from langflow.interface.components import import_langflow_components
-
-
-@pytest.mark.asyncio
-async def test_component_metadata_has_code_hash():
- """Test that built-in components have valid module and code_hash metadata."""
- result = await import_langflow_components()
- assert result is not None
- assert "components" in result
- assert len(result["components"]) > 0
-
- # Find first component to test
- sample_category = None
- sample_component = None
- for category, components in result["components"].items():
- if components:
- sample_category = category
- sample_component = next(iter(components.values()))
- break
- assert sample_component is not None, "No components found to test"
-
- # Test metadata presence - metadata should be in the 'metadata' sub-field
- assert "metadata" in sample_component, f"Metadata field missing from component in {sample_category}"
- metadata = sample_component["metadata"]
-
- assert "module" in metadata, f"Module metadata missing from component in {sample_category}"
- assert "code_hash" in metadata, f"Code hash metadata missing from component in {sample_category}"
-
- # Test that values are valid
- module_name = metadata["module"]
- code_hash = metadata["code_hash"]
- assert isinstance(module_name, str), f"Invalid module name type: {type(module_name)}"
- assert module_name, f"Invalid module name: {module_name}"
- assert isinstance(code_hash, str), f"Invalid code hash type: {type(code_hash)}"
- assert len(code_hash) == 12, f"Invalid code hash: {code_hash} (should be 12 chars)"
-
-
-@pytest.mark.asyncio
-async def test_code_hash_uniqueness():
- """Test that different built-in components have different code hashes."""
- result = await import_langflow_components()
- all_hashes = []
- for components in result["components"].values():
- for comp in components.values():
- metadata = comp.get("metadata", {})
- if metadata.get("code_hash"):
- all_hashes.append(metadata["code_hash"])
-
- # Check that we have some components with metadata
- assert len(all_hashes) > 0, "No components with code hashes found"
- # Check that we have reasonable uniqueness in hashes
- unique_hashes = len(set(all_hashes))
- total_hashes = len(all_hashes)
- uniqueness_ratio = unique_hashes / total_hashes
- # Should have high uniqueness (most components have different code)
- # Adjusted threshold to 90% to account for legitimate code sharing between similar components
- assert uniqueness_ratio > 0.90, f"Hash uniqueness too low: {uniqueness_ratio:.1%}"
diff --git a/src/backend/tests/unit/test_custom_component.py b/src/backend/tests/unit/test_custom_component.py
deleted file mode 100644
index 965d95d0c2d2..000000000000
--- a/src/backend/tests/unit/test_custom_component.py
+++ /dev/null
@@ -1,700 +0,0 @@
-import ast
-import types
-from pathlib import Path
-from textwrap import dedent
-
-import pytest
-from langchain_core.documents import Document
-
-from lfx.custom import Component, CustomComponent
-from lfx.custom.code_parser.code_parser import CodeParser, CodeSyntaxError
-from lfx.custom.custom_component.base_component import BaseComponent, ComponentCodeNullError
-from lfx.custom.utils import build_custom_component_template
-
-
-@pytest.fixture
-def code_component_with_multiple_outputs():
- path = Path(__file__).parent.parent / "data" / "component_multiple_outputs.py"
- code = path.read_text(encoding="utf-8")
- return Component(_code=code)
-
-
-code_default = """
-from langflow.custom import CustomComponent
-
-from lfx.field_typing import BaseLanguageModel
-from langchain.chains import LLMChain
-from langchain.prompts import PromptTemplate
-from langchain_core.documents import Document
-
-import requests
-
-class YourComponent(CustomComponent):
- display_name: str = "Your Component"
- description: str = "Your description"
- field_config = { "url": { "multiline": True, "required": True } }
-
- def build(self, url: str, llm: BaseLanguageModel) -> Document:
- return Document(page_content="Hello World")
-"""
-
-
-def test_code_parser_init():
- """Test the initialization of the CodeParser class."""
- parser = CodeParser(code_default)
- assert parser.code == code_default
-
-
-def test_code_parser_get_tree():
- """Test the __get_tree method of the CodeParser class."""
- parser = CodeParser(code_default)
- tree = parser.get_tree()
- assert isinstance(tree, ast.AST)
-
-
-def test_code_parser_syntax_error():
- """Test the __get_tree method raises the CodeSyntaxError when given incorrect syntax."""
- code_syntax_error = "zzz import os"
-
- parser = CodeParser(code_syntax_error)
- with pytest.raises(CodeSyntaxError):
- parser.get_tree()
-
-
-def test_component_init():
- """Test the initialization of the Component class."""
- component = BaseComponent(_code=code_default, _function_entrypoint_name="build")
- assert component._code == code_default
- assert component._function_entrypoint_name == "build"
-
-
-def test_component_get_code_tree():
- """Test the get_code_tree method of the Component class."""
- component = BaseComponent(_code=code_default, _function_entrypoint_name="build")
- tree = component.get_code_tree(component._code)
- assert "imports" in tree
-
-
-def test_component_code_null_error():
- """Test the get_function method raises the ComponentCodeNullError when the code is empty."""
- component = BaseComponent(_code="", _function_entrypoint_name="")
- with pytest.raises(ComponentCodeNullError):
- component.get_function()
-
-
-def test_custom_component_init():
- """Test the initialization of the CustomComponent class."""
- function_entrypoint_name = "build"
-
- custom_component = CustomComponent(_code=code_default, _function_entrypoint_name=function_entrypoint_name)
- assert custom_component._code == code_default
- assert custom_component._function_entrypoint_name == function_entrypoint_name
-
-
-def test_custom_component_build_template_config():
- """Test the build_template_config property of the CustomComponent class."""
- custom_component = CustomComponent(_code=code_default, _function_entrypoint_name="build")
- config = custom_component.build_template_config()
- assert isinstance(config, dict)
-
-
-def test_custom_component_get_function():
- """Test the get_function property of the CustomComponent class."""
- custom_component = CustomComponent(_code="def build(): pass", _function_entrypoint_name="build")
- my_function = custom_component.get_function()
- assert isinstance(my_function, types.FunctionType)
-
-
-def test_code_parser_parse_imports_import():
- """Test the parse_imports method of the CodeParser class with an import statement."""
- parser = CodeParser(code_default)
- tree = parser.get_tree()
- for node in ast.walk(tree):
- if isinstance(node, ast.Import):
- parser.parse_imports(node)
- assert "requests" in parser.data["imports"]
-
-
-def test_code_parser_parse_imports_importfrom():
- """Test the parse_imports method of the CodeParser class with an import from statement."""
- parser = CodeParser("from os import path")
- tree = parser.get_tree()
- for node in ast.walk(tree):
- if isinstance(node, ast.ImportFrom):
- parser.parse_imports(node)
- assert ("os", "path") in parser.data["imports"]
-
-
-def test_code_parser_parse_functions():
- """Test the parse_functions method of the CodeParser class."""
- parser = CodeParser("def test(): pass")
- tree = parser.get_tree()
- for node in ast.walk(tree):
- if isinstance(node, ast.FunctionDef):
- parser.parse_functions(node)
- assert len(parser.data["functions"]) == 1
- assert parser.data["functions"][0]["name"] == "test"
-
-
-def test_code_parser_parse_classes():
- """Test the parse_classes method of the CodeParser class."""
- parser = CodeParser("from langflow.custom import Component\n\nclass Test(Component): pass")
- tree = parser.get_tree()
- for node in ast.walk(tree):
- if isinstance(node, ast.ClassDef):
- parser.parse_classes(node)
- assert len(parser.data["classes"]) == 1
- assert parser.data["classes"][0]["name"] == "Test"
-
-
-def test_code_parser_parse_classes_raises():
- """Test the parse_classes method of the CodeParser class."""
- parser = CodeParser("class Test: pass")
- tree = parser.get_tree()
- for node in ast.walk(tree):
- if isinstance(node, ast.ClassDef):
- with pytest.raises(TypeError):
- parser.parse_classes(node)
-
-
-def test_code_parser_parse_global_vars():
- """Test the parse_global_vars method of the CodeParser class."""
- parser = CodeParser("x = 1")
- tree = parser.get_tree()
- for node in ast.walk(tree):
- if isinstance(node, ast.Assign):
- parser.parse_global_vars(node)
- assert len(parser.data["global_vars"]) == 1
- assert parser.data["global_vars"][0]["targets"] == ["x"]
-
-
-def test_component_get_function_valid():
- """Test the get_function method of the Component class with valid code and function_entrypoint_name."""
- component = BaseComponent(_code="def build(): pass", _function_entrypoint_name="build")
- my_function = component.get_function()
- assert callable(my_function)
-
-
-def test_custom_component_get_function_entrypoint_args():
- """Test the get_function_entrypoint_args property of the CustomComponent class."""
- custom_component = CustomComponent(_code=code_default, _function_entrypoint_name="build")
- args = custom_component.get_function_entrypoint_args
- assert len(args) == 3
- assert args[0]["name"] == "self"
- assert args[1]["name"] == "url"
- assert args[2]["name"] == "llm"
-
-
-def test_custom_component_get_function_entrypoint_return_type():
- """Test the get_function_entrypoint_return_type property of the CustomComponent class."""
- custom_component = CustomComponent(_code=code_default, _function_entrypoint_name="build")
- return_type = custom_component._get_function_entrypoint_return_type
- assert return_type == [Document]
-
-
-def test_custom_component_get_main_class_name():
- """Test the get_main_class_name property of the CustomComponent class."""
- custom_component = CustomComponent(_code=code_default, _function_entrypoint_name="build")
- class_name = custom_component.get_main_class_name
- assert class_name == "YourComponent"
-
-
-def test_custom_component_get_function_valid():
- """Test the get_function property of the CustomComponent class with valid code and function_entrypoint_name."""
- custom_component = CustomComponent(_code="def build(): pass", _function_entrypoint_name="build")
- my_function = custom_component.get_function
- assert callable(my_function)
-
-
-def test_code_parser_parse_arg_no_annotation():
- """Test the parse_arg method of the CodeParser class without an annotation."""
- parser = CodeParser("")
- arg = ast.arg(arg="x", annotation=None)
- result = parser.parse_arg(arg, None)
- assert result["name"] == "x"
- assert "type" not in result
-
-
-def test_code_parser_parse_arg_with_annotation():
- """Test the parse_arg method of the CodeParser class with an annotation."""
- parser = CodeParser("")
- arg = ast.arg(arg="x", annotation=ast.Name(id="int", ctx=ast.Load()))
- result = parser.parse_arg(arg, None)
- assert result["name"] == "x"
- assert result["type"] == "int"
-
-
-def test_code_parser_parse_callable_details_no_args():
- """Test the parse_callable_details method of the CodeParser class with a function with no arguments."""
- parser = CodeParser("")
- node = ast.FunctionDef(
- name="test",
- args=ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]),
- body=[],
- decorator_list=[],
- returns=None,
- )
- result = parser.parse_callable_details(node)
- assert result["name"] == "test"
- assert len(result["args"]) == 0
-
-
-def test_code_parser_parse_assign():
- """Test the parse_assign method of the CodeParser class."""
- parser = CodeParser("")
- stmt = ast.Assign(targets=[ast.Name(id="x", ctx=ast.Store())], value=ast.Num(n=1))
- result = parser.parse_assign(stmt)
- assert result["name"] == "x"
- assert result["value"] == "1"
-
-
-def test_code_parser_parse_ann_assign():
- """Test the parse_ann_assign method of the CodeParser class."""
- parser = CodeParser("")
- stmt = ast.AnnAssign(
- target=ast.Name(id="x", ctx=ast.Store()),
- annotation=ast.Name(id="int", ctx=ast.Load()),
- value=ast.Num(n=1),
- simple=1,
- )
- result = parser.parse_ann_assign(stmt)
- assert result["name"] == "x"
- assert result["value"] == "1"
- assert result["annotation"] == "int"
-
-
-def test_code_parser_parse_function_def_not_init():
- """Test the parse_function_def method of the CodeParser class with a function that is not __init__."""
- parser = CodeParser("")
- stmt = ast.FunctionDef(
- name="test",
- args=ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]),
- body=[],
- decorator_list=[],
- returns=None,
- )
- result, is_init = parser.parse_function_def(stmt)
- assert result["name"] == "test"
- assert not is_init
-
-
-def test_code_parser_parse_function_def_init():
- """Test the parse_function_def method of the CodeParser class with an __init__ function."""
- parser = CodeParser("")
- stmt = ast.FunctionDef(
- name="__init__",
- args=ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]),
- body=[],
- decorator_list=[],
- returns=None,
- )
- result, is_init = parser.parse_function_def(stmt)
- assert result["name"] == "__init__"
- assert is_init
-
-
-def test_component_get_code_tree_syntax_error():
- """Test the get_code_tree method of the Component class raises the CodeSyntaxError when given incorrect syntax."""
- component = BaseComponent(_code="import os as", _function_entrypoint_name="build")
- with pytest.raises(CodeSyntaxError):
- component.get_code_tree(component._code)
-
-
-def test_custom_component_class_template_validation_no_code():
- """Test CustomComponent._class_template_validation raises the HTTPException when the code is None."""
- custom_component = CustomComponent(_code=None, _function_entrypoint_name="build")
- with pytest.raises(TypeError):
- custom_component.get_function()
-
-
-def test_custom_component_get_code_tree_syntax_error():
- """Test CustomComponent.get_code_tree raises the CodeSyntaxError when given incorrect syntax."""
- custom_component = CustomComponent(_code="import os as", _function_entrypoint_name="build")
- with pytest.raises(CodeSyntaxError):
- custom_component.get_code_tree(custom_component._code)
-
-
-def test_custom_component_get_function_entrypoint_args_no_args():
- """Test CustomComponent.get_function_entrypoint_args with a build method with no arguments."""
- my_code = """
-from langflow.custom import CustomComponent
-class MyMainClass(CustomComponent):
- def build():
- pass"""
-
- custom_component = CustomComponent(_code=my_code, _function_entrypoint_name="build")
- args = custom_component.get_function_entrypoint_args
- assert len(args) == 0
-
-
-def test_custom_component_get_function_entrypoint_return_type_no_return_type():
- """Test CustomComponent.get_function_entrypoint_return_type with a build method with no return type."""
- my_code = """
-from langflow.custom import CustomComponent
-class MyClass(CustomComponent):
- def build():
- pass"""
-
- custom_component = CustomComponent(_code=my_code, _function_entrypoint_name="build")
- return_type = custom_component._get_function_entrypoint_return_type
- assert return_type == []
-
-
-def test_custom_component_get_main_class_name_no_main_class():
- """Test the get_main_class_name property of the CustomComponent class when there is no main class."""
- my_code = """
-def build():
- pass"""
-
- custom_component = CustomComponent(_code=my_code, _function_entrypoint_name="build")
- class_name = custom_component.get_main_class_name
- assert class_name == ""
-
-
-def test_custom_component_build_not_implemented():
- """Test the build method of the CustomComponent class raises the NotImplementedError."""
- custom_component = CustomComponent(_code="def build(): pass", _function_entrypoint_name="build")
- with pytest.raises(NotImplementedError):
- custom_component.build()
-
-
-def test_build_config_no_code():
- component = CustomComponent(_code=None)
-
- assert component.get_function_entrypoint_args == []
- assert component._get_function_entrypoint_return_type == []
-
-
-@pytest.fixture
-def component():
- return CustomComponent(
- field_config={
- "fields": {
- "llm": {"type": "str"},
- "url": {"type": "str"},
- "year": {"type": "int"},
- }
- },
- )
-
-
-def test_build_config_return_type(component):
- config = component.build_config()
- assert isinstance(config, dict)
-
-
-def test_build_config_has_fields(component):
- config = component.build_config()
- assert "fields" in config
-
-
-def test_build_config_fields_dict(component):
- config = component.build_config()
- assert isinstance(config["fields"], dict)
-
-
-def test_build_config_field_keys(component):
- config = component.build_config()
- assert all(isinstance(key, str) for key in config["fields"])
-
-
-def test_build_config_field_values_dict(component):
- config = component.build_config()
- assert all(isinstance(value, dict) for value in config["fields"].values())
-
-
-def test_build_config_field_value_keys(component):
- config = component.build_config()
- field_values = config["fields"].values()
- assert all("type" in value for value in field_values)
-
-
-def test_custom_component_multiple_outputs(code_component_with_multiple_outputs):
- frontnd_node_dict, _ = build_custom_component_template(code_component_with_multiple_outputs)
- assert frontnd_node_dict["outputs"][0]["types"] == ["Text"]
-
-
-def test_custom_component_subclass_from_lctoolcomponent():
- # Import LCToolComponent and create a subclass
- code = dedent("""
- from lfx.base.langchain_utilities.model import LCToolComponent
- from langchain_core.tools import Tool
- class MyComponent(LCToolComponent):
- name: str = "MyComponent"
- description: str = "MyComponent"
-
- def build_tool(self) -> Tool:
- return Tool(name="MyTool", description="MyTool")
-
- def run_model(self)-> Data:
- return Data(data="Hello World")
- """)
- component = Component(_code=code)
- frontend_node, _ = build_custom_component_template(component)
- assert "outputs" in frontend_node
- assert frontend_node["outputs"][0]["types"] != []
- assert frontend_node["outputs"][1]["types"] != []
-
-
-def test_build_custom_component_template_includes_metadata_with_module():
- """Test that build_custom_component_template includes metadata when module_name is provided."""
- code = dedent("""
- from lfx.custom import Component
- from lfx.inputs import MessageTextInput
- from lfx.template.field.base import Output
-
- class TestMetadataComponent(Component):
- display_name = "Test Metadata Component"
- description = "Test component for metadata"
-
- inputs = [
- MessageTextInput(display_name="Input", name="input_value"),
- ]
- outputs = [
- Output(display_name="Output", name="output", method="process_input"),
- ]
-
- def process_input(self) -> str:
- return f"Processed: {self.input_value}"
- """)
-
- component = Component(_code=code)
- frontend_node, _ = build_custom_component_template(component, module_name="test.module")
-
- # Verify metadata is present
- assert "metadata" in frontend_node
- metadata = frontend_node["metadata"]
-
- # Verify metadata contains required fields
- assert "module" in metadata
- assert "code_hash" in metadata
-
- # Verify metadata values
- assert metadata["module"] == "test.module"
- assert isinstance(metadata["code_hash"], str)
- assert len(metadata["code_hash"]) == 12
- assert all(c in "0123456789abcdef" for c in metadata["code_hash"])
-
-
-def test_build_custom_component_template_always_has_metadata():
- """Test that build_custom_component_template always generates metadata, even when module_name is None."""
- code = dedent("""
- from lfx.custom import Component
- from lfx.template.field.base import Output
-
- class TestAlwaysMetadata(Component):
- display_name = "Test Always Metadata"
-
- outputs = [
- Output(display_name="Output", name="output", method="get_result"),
- ]
-
- def get_result(self) -> str:
- return "test"
- """)
-
- component = Component(_code=code)
- frontend_node, _ = build_custom_component_template(component, module_name=None)
-
- # Metadata should ALWAYS be present
- assert "metadata" in frontend_node
- metadata = frontend_node["metadata"]
-
- assert "module" in metadata
- assert "code_hash" in metadata
-
- # Should generate default module name from display_name
- assert metadata["module"] == "custom_components.test_always_metadata"
- assert len(metadata["code_hash"]) == 12
-
-
-def test_build_custom_component_template_metadata_hash_changes():
- """Test that code hash changes when component code changes."""
- code_v1 = dedent("""
- from lfx.custom import Component
- from lfx.template.field.base import Output
-
- class VersionComponent(Component):
- display_name = "Version Component"
- version = "1.0"
-
- outputs = [
- Output(display_name="Output", name="output", method="get_version"),
- ]
-
- def get_version(self) -> str:
- return "version 1.0"
- """)
-
- code_v2 = dedent("""
- from lfx.custom import Component
- from lfx.template.field.base import Output
-
- class VersionComponent(Component):
- display_name = "Version Component"
- version = "2.0"
-
- outputs = [
- Output(display_name="Output", name="output", method="get_version"),
- ]
-
- def get_version(self) -> str:
- return "version 2.0"
- """)
-
- component_v1 = Component(_code=code_v1)
- component_v2 = Component(_code=code_v2)
-
- frontend_node_v1, _ = build_custom_component_template(component_v1, module_name="test.version")
- frontend_node_v2, _ = build_custom_component_template(component_v2, module_name="test.version")
-
- metadata_v1 = frontend_node_v1["metadata"]
- metadata_v2 = frontend_node_v2["metadata"]
-
- # Same module name
- assert metadata_v1["module"] == metadata_v2["module"]
-
- # Different code hashes
- assert metadata_v1["code_hash"] != metadata_v2["code_hash"]
-
-
-def test_build_custom_component_template_metadata_unicode():
- """Test that metadata generation works with unicode characters in code."""
- code = dedent("""
- from lfx.custom import Component
- from lfx.template.field.base import Output
-
- class UnicodeComponent(Component):
- display_name = "Unicode Test 🌟"
- description = "测试组件 with émojis"
-
- outputs = [
- Output(display_name="Output", name="output", method="get_unicode"),
- ]
-
- def get_unicode(self) -> str:
- # Comment with unicode: 你好世界 🚀
- return "Hello 世界!"
- """)
-
- component = Component(_code=code)
- frontend_node, _ = build_custom_component_template(component, module_name="unicode.test")
-
- # Verify metadata is present and valid
- metadata = frontend_node["metadata"]
- assert "module" in metadata
- assert "code_hash" in metadata
-
- # Verify hash is valid hexadecimal
- code_hash = metadata["code_hash"]
- assert len(code_hash) == 12
- assert all(c in "0123456789abcdef" for c in code_hash)
-
-
-def test_build_custom_component_template_component_always_has_metadata():
- """Test that build_custom_component_template always returns metadata for Component path."""
- code = dedent("""
- from lfx.custom import Component
- from lfx.inputs import MessageTextInput
- from lfx.template.field.base import Output
-
- class TestComponentMetadata(Component):
- display_name = "Test Component Metadata"
-
- inputs = [
- MessageTextInput(display_name="Input", name="input_value"),
- ]
- outputs = [
- Output(display_name="Output", name="output", method="process_input"),
- ]
-
- def process_input(self) -> str:
- return f"Processed: {self.input_value}"
- """)
-
- component = Component(_code=code)
- frontend_node, _ = build_custom_component_template(component, module_name=None)
-
- # Metadata should ALWAYS be present, even for Component without module_name
- assert "metadata" in frontend_node
- metadata = frontend_node["metadata"]
-
- assert "module" in metadata
- assert "code_hash" in metadata
-
- # Should generate default module name from display_name
- assert metadata["module"] == "custom_components.test_component_metadata"
- assert len(metadata["code_hash"]) == 12
-
-
-def test_metadata_always_returned_comprehensive():
- """Comprehensive test to verify metadata is ALWAYS returned in all scenarios."""
- # Test scenario 1: Component with module_name provided
- code1 = dedent("""
- from lfx.custom import Component
- from lfx.template.field.base import Output
-
- class TestWithModule(Component):
- display_name = "Test With Module"
-
- outputs = [
- Output(display_name="Output", name="output", method="get_result"),
- ]
-
- def get_result(self) -> str:
- return "with module"
- """)
-
- component1 = Component(_code=code1)
- frontend_node1, _ = build_custom_component_template(component1, module_name="explicit.module")
-
- assert "metadata" in frontend_node1
- assert frontend_node1["metadata"]["module"] == "explicit.module"
- assert "code_hash" in frontend_node1["metadata"]
- assert len(frontend_node1["metadata"]["code_hash"]) == 12
-
- # Test scenario 2: Component without module_name (should generate default)
- component2 = Component(_code=code1)
- frontend_node2, _ = build_custom_component_template(component2, module_name=None)
-
- assert "metadata" in frontend_node2
- assert frontend_node2["metadata"]["module"] == "custom_components.test_with_module"
- assert "code_hash" in frontend_node2["metadata"]
- assert len(frontend_node2["metadata"]["code_hash"]) == 12
-
- # Test scenario 3: Component with inputs and outputs
- code3 = dedent("""
- from lfx.custom import Component
- from lfx.inputs import MessageTextInput
- from lfx.template.field.base import Output
-
- class TestWithInputs(Component):
- display_name = "Test With Inputs"
-
- inputs = [
- MessageTextInput(display_name="Input", name="input_value"),
- ]
- outputs = [
- Output(display_name="Output", name="output", method="process_input"),
- ]
-
- def process_input(self) -> str:
- return f"Processed: {self.input_value}"
- """)
-
- component3 = Component(_code=code3)
- frontend_node3, _ = build_custom_component_template(component3, module_name="custom.explicit")
-
- assert "metadata" in frontend_node3
- assert frontend_node3["metadata"]["module"] == "custom.explicit"
- assert "code_hash" in frontend_node3["metadata"]
- assert len(frontend_node3["metadata"]["code_hash"]) == 12
-
- # Test scenario 4: Component without module_name (should generate default)
- component4 = Component(_code=code3)
- frontend_node4, _ = build_custom_component_template(component4, module_name=None)
-
- assert "metadata" in frontend_node4
- assert frontend_node4["metadata"]["module"] == "custom_components.test_with_inputs"
- assert "code_hash" in frontend_node4["metadata"]
- assert len(frontend_node4["metadata"]["code_hash"]) == 12
diff --git a/src/backend/tests/unit/test_custom_component_with_client.py b/src/backend/tests/unit/test_custom_component_with_client.py
deleted file mode 100644
index ee8cbfeafbe1..000000000000
--- a/src/backend/tests/unit/test_custom_component_with_client.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import pytest
-from langflow.custom.custom_component.custom_component import CustomComponent
-
-from lfx.field_typing.constants import Data
-
-
-@pytest.fixture
-def component(
- client, # noqa: ARG001
- active_user,
-):
- return CustomComponent(
- user_id=active_user.id,
- field_config={
- "fields": {
- "llm": {"type": "str"},
- "url": {"type": "str"},
- "year": {"type": "int"},
- }
- },
- )
-
-
-async def test_list_flows_flow_objects(component):
- flows = await component.alist_flows()
- are_flows = [isinstance(flow, Data) for flow in flows]
- flow_types = [type(flow) for flow in flows]
- assert all(are_flows), f"Expected all flows to be Data objects, got {flow_types}"
-
-
-async def test_list_flows_return_type(component):
- flows = await component.alist_flows()
- assert isinstance(flows, list)
diff --git a/src/backend/tests/unit/test_database.py b/src/backend/tests/unit/test_database.py
deleted file mode 100644
index 90c2f3172de4..000000000000
--- a/src/backend/tests/unit/test_database.py
+++ /dev/null
@@ -1,781 +0,0 @@
-import json
-from typing import NamedTuple
-from uuid import UUID, uuid4
-
-import orjson
-import pytest
-from httpx import AsyncClient
-from langflow.api.v1.schemas import FlowListCreate, ResultDataResponse
-from langflow.initial_setup.setup import load_starter_projects
-from langflow.services.database.models.base import orjson_dumps
-from langflow.services.database.models.flow import Flow, FlowCreate, FlowUpdate
-from langflow.services.database.models.folder.model import FolderCreate
-from langflow.services.database.utils import session_getter
-from langflow.services.deps import get_db_service
-from sqlalchemy import text
-
-from lfx.graph.utils import log_transaction, log_vertex_build
-
-
-@pytest.fixture(scope="module")
-def json_style():
- # class FlowStyleBase(SQLModel):
- # color: str = Field(index=True)
- # emoji: str = Field(index=False)
- # flow_id: UUID = Field(default=None, foreign_key="flow.id")
- return orjson_dumps(
- {
- "color": "red",
- "emoji": "👍",
- }
- )
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_create_flow(client: AsyncClient, json_flow: str, logged_in_headers):
- flow = orjson.loads(json_flow)
- data = flow["data"]
- flow = FlowCreate(name=str(uuid4()), description="description", data=data)
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- assert response.json()["name"] == flow.name
- assert response.json()["data"] == flow.data
- # flow is optional so we can create a flow without a flow
- flow = FlowCreate(name=str(uuid4()))
- response = await client.post("api/v1/flows/", json=flow.model_dump(exclude_unset=True), headers=logged_in_headers)
- assert response.status_code == 201
- assert response.json()["name"] == flow.name
- assert response.json()["data"] == flow.data
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_read_flows(client: AsyncClient, json_flow: str, logged_in_headers):
- flow_data = orjson.loads(json_flow)
- data = flow_data["data"]
- flow = FlowCreate(name=str(uuid4()), description="description", data=data)
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- assert response.json()["name"] == flow.name
- assert response.json()["data"] == flow.data
-
- flow = FlowCreate(name=str(uuid4()), description="description", data=data)
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- assert response.json()["name"] == flow.name
- assert response.json()["data"] == flow.data
-
- response = await client.get("api/v1/flows/", headers=logged_in_headers)
- assert response.status_code == 200
- assert len(response.json()) > 0
-
-
-async def test_read_flows_pagination_with_params(client: AsyncClient, logged_in_headers):
- response = await client.get(
- "api/v1/flows/", headers=logged_in_headers, params={"page": 3, "size": 10, "get_all": False}
- )
- assert response.status_code == 200
- assert response.json()["page"] == 3
- assert response.json()["size"] == 10
- assert response.json()["pages"] == 0
- assert response.json()["total"] == 0
- assert len(response.json()["items"]) == 0
-
-
-async def test_read_flows_pagination_with_flows(client: AsyncClient, logged_in_headers):
- number_of_flows = 30
- flows = [FlowCreate(name=f"Flow {i}", description="description", data={}) for i in range(number_of_flows)]
- flow_ids = []
- for flow in flows:
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- flow_ids.append(response.json()["id"])
-
- response = await client.get(
- "api/v1/flows/", headers=logged_in_headers, params={"page": 3, "size": 10, "get_all": False}
- )
- assert response.status_code == 200
- assert response.json()["page"] == 3
- assert response.json()["size"] == 10
- assert response.json()["pages"] == 3
- assert response.json()["total"] == number_of_flows
- assert len(response.json()["items"]) == 10
-
- response = await client.get(
- "api/v1/flows/", headers=logged_in_headers, params={"page": 4, "size": 10, "get_all": False}
- )
- assert response.status_code == 200
- assert response.json()["page"] == 4
- assert response.json()["size"] == 10
- assert response.json()["pages"] == 3
- assert response.json()["total"] == number_of_flows
- assert len(response.json()["items"]) == 0
-
-
-async def test_read_flows_custom_page_size(client: AsyncClient, logged_in_headers):
- number_of_flows = 30
- flows = [FlowCreate(name=f"Flow {i}", description="description", data={}) for i in range(number_of_flows)]
- for flow in flows:
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
-
- response = await client.get(
- "api/v1/flows/", headers=logged_in_headers, params={"page": 1, "size": 15, "get_all": False}
- )
- assert response.status_code == 200
- assert response.json()["page"] == 1
- assert response.json()["size"] == 15
- assert response.json()["pages"] == 2
- assert response.json()["total"] == number_of_flows
- assert len(response.json()["items"]) == 15
-
-
-async def test_read_flows_invalid_page(client: AsyncClient, logged_in_headers):
- number_of_flows = 30
- flows = [FlowCreate(name=f"Flow {i}", description="description", data={}) for i in range(number_of_flows)]
- flow_ids = []
- for flow in flows:
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- flow_ids.append(response.json()["id"])
-
- response = await client.get(
- "api/v1/flows/", headers=logged_in_headers, params={"page": 0, "size": 10, "get_all": False}
- )
- assert response.status_code == 422 # Assuming 422 is the status code for invalid input
-
-
-async def test_read_flows_invalid_size(client: AsyncClient, logged_in_headers):
- number_of_flows = 30
- flows = [FlowCreate(name=f"Flow {i}", description="description", data={}) for i in range(number_of_flows)]
- flow_ids = []
- for flow in flows:
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- flow_ids.append(response.json()["id"])
-
- response = await client.get(
- "api/v1/flows/", headers=logged_in_headers, params={"page": 1, "size": 0, "get_all": False}
- )
- assert response.status_code == 422 # Assuming 422 is the status code for invalid input
-
-
-async def test_read_flows_no_pagination_params(client: AsyncClient, logged_in_headers):
- number_of_flows = 30
- flows = [FlowCreate(name=f"Flow {i}", description="description", data={}) for i in range(number_of_flows)]
- for flow in flows:
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
-
- response = await client.get("api/v1/flows/", headers=logged_in_headers, params={"get_all": False})
- assert response.status_code == 200
- # Assert default pagination values, adjust these according to your API's default behavior
- assert response.json()["page"] == 1
- assert response.json()["size"] == 50
- assert response.json()["pages"] == 1
- assert response.json()["total"] == number_of_flows
- assert len(response.json()["items"]) == number_of_flows
-
-
-async def test_read_flows_components_only_paginated(client: AsyncClient, logged_in_headers):
- number_of_flows = 10
- flows = [
- FlowCreate(name=f"Flow {i}", description="description", data={}, is_component=True)
- for i in range(number_of_flows)
- ]
-
- for flow in flows:
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
-
- response = await client.get(
- "api/v1/flows/", headers=logged_in_headers, params={"components_only": True, "get_all": False}
- )
-
- assert response.status_code == 200
- response_json = response.json()
- assert response_json["total"] == 10
- assert response_json["pages"] == 1
- assert response_json["page"] == 1
- assert response_json["size"] == 50
- assert all(flow["is_component"] is True for flow in response_json["items"])
-
-
-async def test_read_flows_components_only(client: AsyncClient, logged_in_headers):
- number_of_flows = 10
- flows = [
- FlowCreate(name=f"Flow {i}", description="description", data={}, is_component=True)
- for i in range(number_of_flows)
- ]
- for flow in flows:
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- response = await client.get("api/v1/flows/", headers=logged_in_headers, params={"components_only": True})
- assert response.status_code == 200
- response_json = response.json()
- assert all(flow["is_component"] is True for flow in response_json)
-
-
-async def test_read_flow(client: AsyncClient, json_flow: str, logged_in_headers):
- flow = orjson.loads(json_flow)
- data = flow["data"]
- unique_name = str(uuid4())
- flow = FlowCreate(name=unique_name, description="description", data=data)
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- flow_id = response.json()["id"] # flow_id should be a UUID but is a string
- # turn it into a UUID
- flow_id = UUID(flow_id)
-
- response = await client.get(f"api/v1/flows/{flow_id}", headers=logged_in_headers)
- assert response.status_code == 200
- assert response.json()["name"] == flow.name
- assert response.json()["data"] == flow.data
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_update_flow(client: AsyncClient, json_flow: str, logged_in_headers):
- flow = orjson.loads(json_flow)
- data = flow["data"]
-
- flow = FlowCreate(name="Test Flow", description="description", data=data)
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
-
- flow_id = response.json()["id"]
- updated_flow = FlowUpdate(
- name="Updated Flow",
- description="updated description",
- data=data,
- )
- response = await client.patch(f"api/v1/flows/{flow_id}", json=updated_flow.model_dump(), headers=logged_in_headers)
-
- assert response.status_code == 200
- assert response.json()["name"] == updated_flow.name
- assert response.json()["description"] == updated_flow.description
- # assert response.json()["data"] == updated_flow.data
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_delete_flow(client: AsyncClient, json_flow: str, logged_in_headers):
- flow = orjson.loads(json_flow)
- data = flow["data"]
- flow = FlowCreate(name="Test Flow", description="description", data=data)
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- flow_id = response.json()["id"]
- response = await client.delete(f"api/v1/flows/{flow_id}", headers=logged_in_headers)
- assert response.status_code == 200
- assert response.json()["message"] == "Flow deleted successfully"
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_delete_flows(client: AsyncClient, logged_in_headers):
- # Create ten flows
- number_of_flows = 10
- flows = [FlowCreate(name=f"Flow {i}", description="description", data={}) for i in range(number_of_flows)]
- flow_ids = []
- for flow in flows:
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- flow_ids.append(response.json()["id"])
-
- response = await client.request("DELETE", "api/v1/flows/", headers=logged_in_headers, json=flow_ids)
- assert response.status_code == 200, response.content
- assert response.json().get("deleted") == number_of_flows
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_delete_flows_with_transaction_and_build(client: AsyncClient, logged_in_headers):
- # Create ten flows
- number_of_flows = 10
- flows = [FlowCreate(name=f"Flow {i}", description="description", data={}) for i in range(number_of_flows)]
- flow_ids = []
- for flow in flows:
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- flow_ids.append(response.json()["id"])
-
- class VertexTuple(NamedTuple):
- id: str
-
- # Create a transaction for each flow
- for flow_id in flow_ids:
- await log_transaction(
- str(flow_id), source=VertexTuple(id="vid"), target=VertexTuple(id="tid"), status="success"
- )
-
- # Create a build for each flow
- for flow_id in flow_ids:
- build = {
- "valid": True,
- "params": {},
- "data": ResultDataResponse(),
- "artifacts": {},
- "vertex_id": "vid",
- "flow_id": flow_id,
- }
- await log_vertex_build(
- flow_id=build["flow_id"],
- vertex_id=build["vertex_id"],
- valid=build["valid"],
- params=build["params"],
- data=build["data"],
- artifacts=build.get("artifacts"),
- )
-
- response = await client.request("DELETE", "api/v1/flows/", headers=logged_in_headers, json=flow_ids)
- assert response.status_code == 200, response.content
- assert response.json().get("deleted") == number_of_flows
-
- for flow_id in flow_ids:
- response = await client.request(
- "GET", "api/v1/monitor/transactions", params={"flow_id": flow_id}, headers=logged_in_headers
- )
- assert response.status_code == 200
- json_response = response.json()
- assert json_response["items"] == []
-
- for flow_id in flow_ids:
- response = await client.request(
- "GET", "api/v1/monitor/builds", params={"flow_id": flow_id}, headers=logged_in_headers
- )
- assert response.status_code == 200
- assert response.json() == {"vertex_builds": {}}
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_delete_folder_with_flows_with_transaction_and_build(client: AsyncClient, logged_in_headers):
- # Create a new project
- folder_name = f"Test Project {uuid4()}"
- project = FolderCreate(name=folder_name, description="Test project description", components_list=[], flows_list=[])
-
- response = await client.post("api/v1/projects/", json=project.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201, f"Expected status code 201, but got {response.status_code}"
-
- created_folder = response.json()
- folder_id = created_folder["id"]
-
- # Create ten flows
- number_of_flows = 10
- flows = [FlowCreate(name=f"Flow {i}", description="description", data={}) for i in range(number_of_flows)]
- flow_ids = []
- for flow in flows:
- flow.folder_id = folder_id
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- flow_ids.append(response.json()["id"])
-
- class VertexTuple(NamedTuple):
- id: str
- params: dict
-
- # Create a transaction for each flow
- for flow_id in flow_ids:
- await log_transaction(
- str(flow_id),
- source=VertexTuple(id="vid", params={}),
- target=VertexTuple(id="tid", params={}),
- status="success",
- )
-
- # Create a build for each flow
- for flow_id in flow_ids:
- build = {
- "valid": True,
- "params": {},
- "data": ResultDataResponse(),
- "artifacts": {},
- "vertex_id": "vid",
- "flow_id": flow_id,
- }
- await log_vertex_build(
- flow_id=build["flow_id"],
- vertex_id=build["vertex_id"],
- valid=build["valid"],
- params=build["params"],
- data=build["data"],
- artifacts=build.get("artifacts"),
- )
-
- response = await client.request("DELETE", f"api/v1/projects/{folder_id}", headers=logged_in_headers)
- assert response.status_code == 204
-
- for flow_id in flow_ids:
- response = await client.request(
- "GET", "api/v1/monitor/transactions", params={"flow_id": flow_id}, headers=logged_in_headers
- )
- assert response.status_code == 200, response.json()
- json_response = response.json()
- assert json_response["items"] == []
-
- for flow_id in flow_ids:
- response = await client.request(
- "GET", "api/v1/monitor/builds", params={"flow_id": flow_id}, headers=logged_in_headers
- )
- assert response.status_code == 200
- assert response.json() == {"vertex_builds": {}}
-
-
-async def test_get_flows_from_folder_pagination(client: AsyncClient, logged_in_headers):
- # Create a new project
- folder_name = f"Test Project {uuid4()}"
- project = FolderCreate(name=folder_name, description="Test project description", components_list=[], flows_list=[])
-
- response = await client.post("api/v1/projects/", json=project.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201, f"Expected status code 201, but got {response.status_code}"
-
- created_folder = response.json()
- folder_id = created_folder["id"]
-
- response = await client.get(
- f"api/v1/projects/{folder_id}", headers=logged_in_headers, params={"page": 1, "size": 50}
- )
- assert response.status_code == 200
- assert response.json()["folder"]["name"] == folder_name
- assert response.json()["folder"]["description"] == "Test project description"
- assert response.json()["flows"]["page"] == 1
- assert response.json()["flows"]["size"] == 50
- assert response.json()["flows"]["pages"] == 0
- assert response.json()["flows"]["total"] == 0
- assert len(response.json()["flows"]["items"]) == 0
-
-
-async def test_get_flows_from_folder_pagination_with_params(client: AsyncClient, logged_in_headers):
- # Create a new project
- folder_name = f"Test Project {uuid4()}"
- project = FolderCreate(name=folder_name, description="Test project description", components_list=[], flows_list=[])
-
- response = await client.post("api/v1/projects/", json=project.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201, f"Expected status code 201, but got {response.status_code}"
-
- created_folder = response.json()
- folder_id = created_folder["id"]
-
- response = await client.get(
- f"api/v1/projects/{folder_id}", headers=logged_in_headers, params={"page": 3, "size": 10}
- )
- assert response.status_code == 200
- assert response.json()["folder"]["name"] == folder_name
- assert response.json()["folder"]["description"] == "Test project description"
- assert response.json()["flows"]["page"] == 3
- assert response.json()["flows"]["size"] == 10
- assert response.json()["flows"]["pages"] == 0
- assert response.json()["flows"]["total"] == 0
- assert len(response.json()["flows"]["items"]) == 0
-
-
-@pytest.mark.usefixtures("session")
-async def test_create_flows(client: AsyncClient, json_flow: str, logged_in_headers):
- flow = orjson.loads(json_flow)
- data = flow["data"]
- # Create test data
- flow_unique_name = str(uuid4())
- flow_2_unique_name = str(uuid4())
- flow_list = FlowListCreate(
- flows=[
- FlowCreate(name=flow_unique_name, description="description", data=data),
- FlowCreate(name=flow_2_unique_name, description="description", data=data),
- ]
- )
- # Make request to endpoint
- response = await client.post("api/v1/flows/batch/", json=flow_list.dict(), headers=logged_in_headers)
- # Check response status code
- assert response.status_code == 201
- # Check response data
- response_data = response.json()
- assert len(response_data) == 2
- assert flow_unique_name in response_data[0]["name"]
- assert response_data[0]["description"] == "description"
- assert response_data[0]["data"] == data
- assert response_data[1]["name"] == flow_2_unique_name
- assert response_data[1]["description"] == "description"
- assert response_data[1]["data"] == data
-
-
-@pytest.mark.usefixtures("session")
-async def test_upload_file(client: AsyncClient, json_flow: str, logged_in_headers):
- flow = orjson.loads(json_flow)
- data = flow["data"]
- # Create test data
- flow_unique_name = str(uuid4())
- flow_2_unique_name = str(uuid4())
- flow_list = FlowListCreate(
- flows=[
- FlowCreate(name=flow_unique_name, description="description", data=data),
- FlowCreate(name=flow_2_unique_name, description="description", data=data),
- ]
- )
- file_contents = orjson_dumps(flow_list.dict())
- response = await client.post(
- "api/v1/flows/upload/",
- files={"file": ("examples.json", file_contents, "application/json")},
- headers=logged_in_headers,
- )
- # Check response status code
- assert response.status_code == 201
- # Check response data
- response_data = response.json()
- assert len(response_data) == 2
- assert flow_unique_name in response_data[0]["name"]
- assert response_data[0]["description"] == "description"
- assert response_data[0]["data"] == data
- assert response_data[1]["name"] == flow_2_unique_name
- assert response_data[1]["description"] == "description"
- assert response_data[1]["data"] == data
-
-
-@pytest.mark.usefixtures("session")
-async def test_download_file(
- client: AsyncClient,
- json_flow,
- active_user,
- logged_in_headers,
-):
- flow = orjson.loads(json_flow)
- data = flow["data"]
- # Create test data
- flow_unique_name = str(uuid4())
- flow_2_unique_name = str(uuid4())
- flow_list = FlowListCreate(
- flows=[
- FlowCreate(name=flow_unique_name, description="description", data=data),
- FlowCreate(name=flow_2_unique_name, description="description", data=data),
- ]
- )
- db_manager = get_db_service()
- async with session_getter(db_manager) as _session:
- saved_flows = []
- for flow in flow_list.flows:
- flow.user_id = active_user.id
- db_flow = Flow.model_validate(flow, from_attributes=True)
- _session.add(db_flow)
- saved_flows.append(db_flow)
- await _session.commit()
- # Make request to endpoint inside the session context
- flow_ids = [str(db_flow.id) for db_flow in saved_flows] # Convert UUIDs to strings
- flow_ids_json = json.dumps(flow_ids)
- response = await client.post(
- "api/v1/flows/download/",
- data=flow_ids_json,
- headers={**logged_in_headers, "Content-Type": "application/json"},
- )
- # Check response status code
- assert response.status_code == 200, response.json()
- # Check response data
- # Since the endpoint now returns a zip file, we need to check the content type and the filename in the headers
- assert response.headers["Content-Type"] == "application/x-zip-compressed"
- assert "attachment; filename=" in response.headers["Content-Disposition"]
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_create_flow_with_invalid_data(client: AsyncClient, logged_in_headers):
- flow = {"name": "a" * 256, "data": "Invalid flow data"}
- response = await client.post("api/v1/flows/", json=flow, headers=logged_in_headers)
- assert response.status_code == 422
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_get_nonexistent_flow(client: AsyncClient, logged_in_headers):
- uuid = uuid4()
- response = await client.get(f"api/v1/flows/{uuid}", headers=logged_in_headers)
- assert response.status_code == 404
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_update_flow_idempotency(client: AsyncClient, json_flow: str, logged_in_headers):
- flow_data = orjson.loads(json_flow)
- data = flow_data["data"]
- flow_data = FlowCreate(name="Test Flow", description="description", data=data)
- response = await client.post("api/v1/flows/", json=flow_data.dict(), headers=logged_in_headers)
- flow_id = response.json()["id"]
- updated_flow = FlowCreate(name="Updated Flow", description="description", data=data)
- response1 = await client.put(f"api/v1/flows/{flow_id}", json=updated_flow.model_dump(), headers=logged_in_headers)
- response2 = await client.put(f"api/v1/flows/{flow_id}", json=updated_flow.model_dump(), headers=logged_in_headers)
- assert response1.json() == response2.json()
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_update_nonexistent_flow(client: AsyncClient, json_flow: str, logged_in_headers):
- flow_data = orjson.loads(json_flow)
- data = flow_data["data"]
- uuid = uuid4()
- updated_flow = FlowCreate(
- name="Updated Flow",
- description="description",
- data=data,
- )
- response = await client.patch(f"api/v1/flows/{uuid}", json=updated_flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 404, response.text
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_delete_nonexistent_flow(client: AsyncClient, logged_in_headers):
- uuid = uuid4()
- response = await client.delete(f"api/v1/flows/{uuid}", headers=logged_in_headers)
- assert response.status_code == 404
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_read_only_starter_projects(client: AsyncClient, logged_in_headers):
- response = await client.get("api/v1/flows/basic_examples/", headers=logged_in_headers)
- starter_projects = await load_starter_projects()
- assert response.status_code == 200
- assert len(response.json()) == len(starter_projects)
-
-
-async def test_sqlite_pragmas():
- db_service = get_db_service()
-
- async with db_service.with_session() as session:
- assert (await session.exec(text("PRAGMA journal_mode;"))).scalar() == "wal"
- assert (await session.exec(text("PRAGMA synchronous;"))).scalar() == 1
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_read_folder(client: AsyncClient, logged_in_headers):
- # Create a new project
- folder_name = f"Test Project {uuid4()}"
- project = FolderCreate(name=folder_name, description="Test project description")
- response = await client.post("api/v1/projects/", json=project.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- created_folder = response.json()
- folder_id = created_folder["id"]
-
- # Read the project
- response = await client.get(f"api/v1/projects/{folder_id}", headers=logged_in_headers)
- assert response.status_code == 200
- folder_data = response.json()
- assert folder_data["name"] == folder_name
- assert folder_data["description"] == "Test project description"
- assert "flows" in folder_data
- assert isinstance(folder_data["flows"], list)
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_read_folder_with_pagination(client: AsyncClient, logged_in_headers):
- # Create a new project
- folder_name = f"Test Project {uuid4()}"
- project = FolderCreate(name=folder_name, description="Test project description")
- response = await client.post("api/v1/projects/", json=project.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- created_folder = response.json()
- folder_id = created_folder["id"]
-
- # Read the project with pagination
- response = await client.get(
- f"api/v1/projects/{folder_id}", headers=logged_in_headers, params={"page": 1, "size": 10}
- )
- assert response.status_code == 200
- folder_data = response.json()
- assert isinstance(folder_data, dict)
- assert "folder" in folder_data
- assert "flows" in folder_data
- assert folder_data["folder"]["name"] == folder_name
- assert folder_data["folder"]["description"] == "Test project description"
- assert folder_data["flows"]["page"] == 1
- assert folder_data["flows"]["size"] == 10
- assert isinstance(folder_data["flows"]["items"], list)
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_read_folder_with_flows(client: AsyncClient, json_flow: str, logged_in_headers):
- # Create a new project
- folder_name = f"Test Project {uuid4()}"
- flow_name = f"Test Flow {uuid4()}"
- project = FolderCreate(name=folder_name, description="Test project description")
- response = await client.post("api/v1/projects/", json=project.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- created_folder = response.json()
- folder_id = created_folder["id"]
-
- # Create a flow in the project
- flow_data = orjson.loads(json_flow)
- data = flow_data["data"]
- flow = FlowCreate(name=flow_name, description="description", data=data)
- flow.folder_id = folder_id
- response = await client.post("api/v1/flows/", json=flow.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
-
- # Read the project with flows
- response = await client.get(f"api/v1/projects/{folder_id}", headers=logged_in_headers)
- assert response.status_code == 200
- folder_data = response.json()
- assert folder_data["name"] == folder_name
- assert folder_data["description"] == "Test project description"
- assert len(folder_data["flows"]) == 1
- assert folder_data["flows"][0]["name"] == flow_name
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_read_nonexistent_folder(client: AsyncClient, logged_in_headers):
- nonexistent_id = str(uuid4())
- response = await client.get(f"api/v1/projects/{nonexistent_id}", headers=logged_in_headers)
- assert response.status_code == 404
- assert response.json()["detail"] == "Project not found"
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_read_folder_with_search(client: AsyncClient, json_flow: str, logged_in_headers):
- # Create a new project
- folder_name = f"Test Project {uuid4()}"
- project = FolderCreate(name=folder_name, description="Test project description")
- response = await client.post("api/v1/projects/", json=project.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- created_folder = response.json()
- folder_id = created_folder["id"]
-
- # Create two flows in the project
- flow_data = orjson.loads(json_flow)
- flow_name_1 = f"Test Flow 1 {uuid4()}"
- flow_name_2 = f"Another Flow {uuid4()}"
-
- flow1 = FlowCreate(
- name=flow_name_1, description="Test flow description", data=flow_data["data"], folder_id=folder_id
- )
- flow2 = FlowCreate(
- name=flow_name_2, description="Another flow description", data=flow_data["data"], folder_id=folder_id
- )
- flow1.folder_id = folder_id
- flow2.folder_id = folder_id
- await client.post("api/v1/flows/", json=flow1.model_dump(), headers=logged_in_headers)
- await client.post("api/v1/flows/", json=flow2.model_dump(), headers=logged_in_headers)
-
- # Read the project with search
- response = await client.get(
- f"api/v1/projects/{folder_id}", headers=logged_in_headers, params={"search": "Test", "page": 1, "size": 10}
- )
- assert response.status_code == 200
- folder_data = response.json()
- assert len(folder_data["flows"]["items"]) == 1
- assert folder_data["flows"]["items"][0]["name"] == flow_name_1
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_read_folder_with_component_filter(client: AsyncClient, json_flow: str, logged_in_headers):
- # Create a new project
- folder_name = f"Test Project {uuid4()}"
- project = FolderCreate(name=folder_name, description="Test project description")
- response = await client.post("api/v1/projects/", json=project.model_dump(), headers=logged_in_headers)
- assert response.status_code == 201
- created_folder = response.json()
- folder_id = created_folder["id"]
-
- # Create a component flow in the project
- flow_data = orjson.loads(json_flow)
- component_flow_name = f"Component Flow {uuid4()}"
- component_flow = FlowCreate(
- name=component_flow_name,
- description="Component flow description",
- data=flow_data["data"],
- folder_id=folder_id,
- is_component=True,
- )
- component_flow.folder_id = folder_id
- await client.post("api/v1/flows/", json=component_flow.model_dump(), headers=logged_in_headers)
-
- # Read the project with component filter
- response = await client.get(
- f"api/v1/projects/{folder_id}", headers=logged_in_headers, params={"is_component": True, "page": 1, "size": 10}
- )
- assert response.status_code == 200
- folder_data = response.json()
- assert len(folder_data["flows"]["items"]) == 1
- assert folder_data["flows"]["items"][0]["name"] == component_flow_name
- assert folder_data["flows"]["items"][0]["is_component"] == True # noqa: E712
diff --git a/src/backend/tests/unit/test_endpoints.py b/src/backend/tests/unit/test_endpoints.py
deleted file mode 100644
index 02099e595521..000000000000
--- a/src/backend/tests/unit/test_endpoints.py
+++ /dev/null
@@ -1,658 +0,0 @@
-import asyncio
-import json
-from uuid import UUID, uuid4
-
-import pytest
-from fastapi import status
-from httpx import AsyncClient
-
-from lfx.custom.directory_reader.directory_reader import DirectoryReader
-from lfx.services.settings.base import BASE_COMPONENTS_PATH
-
-
-async def run_post(client, flow_id, headers, post_data):
- """Sends a POST request to process a flow and returns the JSON response.
-
- Args:
- client: The HTTP client to use for making requests.
- flow_id: The identifier of the flow to process.
- headers: The HTTP headers to include in the request.
- post_data: The JSON payload to send in the request.
-
- Returns:
- The JSON response from the API if the request is successful.
-
- Raises:
- AssertionError: If the response status code is not 200.
- """
- response = await client.post(
- f"api/v1/process/{flow_id}",
- headers=headers,
- json=post_data,
- )
- assert response.status_code == 200, response.json()
- return response.json()
-
-
-# Helper function to poll task status
-async def poll_task_status(client, headers, href, max_attempts=20, sleep_time=1):
- for _ in range(max_attempts):
- task_status_response = await client.get(
- href,
- headers=headers,
- )
- if task_status_response.status_code == 200 and task_status_response.json()["status"] == "SUCCESS":
- return task_status_response.json()
- await asyncio.sleep(sleep_time)
- return None # Return None if task did not complete in time
-
-
-PROMPT_REQUEST = {
- "name": "string",
- "template": "string",
- "frontend_node": {
- "template": {},
- "description": "string",
- "base_classes": ["string"],
- "name": "",
- "display_name": "",
- "documentation": "",
- "custom_fields": {},
- "output_types": [],
- "field_formatters": {
- "formatters": {"openai_api_key": {}},
- "base_formatters": {
- "kwargs": {},
- "optional": {},
- "list": {},
- "dict": {},
- "union": {},
- "multiline": {},
- "show": {},
- "password": {},
- "default": {},
- "headers": {},
- "dict_code_file": {},
- "model_fields": {
- "MODEL_DICT": {
- "OpenAI": [
- "text-davinci-003",
- "text-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001",
- ],
- "ChatOpenAI": [
- "gpt-4-turbo-preview",
- "gpt-4-0125-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-1106",
- ],
- "Anthropic": [
- "claude-v1",
- "claude-v1-100k",
- "claude-instant-v1",
- "claude-instant-v1-100k",
- "claude-v1.3",
- "claude-v1.3-100k",
- "claude-v1.2",
- "claude-v1.0",
- "claude-instant-v1.1",
- "claude-instant-v1.1-100k",
- "claude-instant-v1.0",
- ],
- "ChatAnthropic": [
- "claude-v1",
- "claude-v1-100k",
- "claude-instant-v1",
- "claude-instant-v1-100k",
- "claude-v1.3",
- "claude-v1.3-100k",
- "claude-v1.2",
- "claude-v1.0",
- "claude-instant-v1.1",
- "claude-instant-v1.1-100k",
- "claude-instant-v1.0",
- ],
- }
- },
- },
- },
- },
-}
-
-
-@pytest.mark.benchmark
-async def test_get_all(client: AsyncClient, logged_in_headers):
- """Tests the retrieval of all available components from the API.
-
- Sends a GET request to the `api/v1/all` endpoint and verifies that the returned component names
- correspond to files in the components directory. Also checks for the presence of specific components
- such as "ChatInput", "Prompt", and "ChatOutput" in the response.
- """
- response = await client.get("api/v1/all", headers=logged_in_headers)
- assert response.status_code == 200
- dir_reader = DirectoryReader(BASE_COMPONENTS_PATH)
- files = dir_reader.get_files()
- # json_response is a dict of dicts
- all_names = [component_name for _, components in response.json().items() for component_name in components]
- json_response = response.json()
- # We need to test the custom nodes
- assert len(all_names) <= len(
- files
- ) # Less or equal because we might have some files that don't have the dependencies installed
- assert "ChatInput" in json_response["input_output"]
- assert "Prompt Template" in json_response["processing"]
- assert "ChatOutput" in json_response["input_output"]
-
-
-@pytest.mark.usefixtures("active_user")
-async def test_post_validate_code(client: AsyncClient, logged_in_headers):
- # Test case with a valid import and function
- code1 = """
-import math
-
-def square(x):
- return x ** 2
-"""
- response1 = await client.post("api/v1/validate/code", json={"code": code1}, headers=logged_in_headers)
- assert response1.status_code == 200
- assert response1.json() == {"imports": {"errors": []}, "function": {"errors": []}}
-
- # Test case with an invalid import and valid function
- code2 = """
-import non_existent_module
-
-def square(x):
- return x ** 2
-"""
- response2 = await client.post("api/v1/validate/code", json={"code": code2}, headers=logged_in_headers)
- assert response2.status_code == 200
- assert response2.json() == {
- "imports": {"errors": ["No module named 'non_existent_module'"]},
- "function": {"errors": []},
- }
-
- # Test case with a valid import and invalid function syntax
- code3 = """
-import math
-
-def square(x)
- return x ** 2
-"""
- response3 = await client.post("api/v1/validate/code", json={"code": code3}, headers=logged_in_headers)
- assert response3.status_code == 200
- assert response3.json() == {
- "imports": {"errors": []},
- "function": {"errors": ["expected ':' (, line 4)"]},
- }
-
- # Test case with invalid JSON payload
- response4 = await client.post("api/v1/validate/code", json={"invalid_key": code1}, headers=logged_in_headers)
- assert response4.status_code == 422
-
- # Test case with an empty code string
- response5 = await client.post("api/v1/validate/code", json={"code": ""}, headers=logged_in_headers)
- assert response5.status_code == 200
- assert response5.json() == {"imports": {"errors": []}, "function": {"errors": []}}
-
- # Test case with a syntax error in the code
- code6 = """
-import math
-
-def square(x)
- return x ** 2
-"""
- response6 = await client.post("api/v1/validate/code", json={"code": code6}, headers=logged_in_headers)
- assert response6.status_code == 200
- assert response6.json() == {
- "imports": {"errors": []},
- "function": {"errors": ["expected ':' (, line 4)"]},
- }
-
-
-VALID_PROMPT = """
-I want you to act as a naming consultant for new companies.
-
-Here are some examples of good company names:
-
-- search engine, Google
-- social media, Facebook
-- video sharing, YouTube
-
-The name should be short, catchy and easy to remember.
-
-What is a good name for a company that makes {product}?
-"""
-
-INVALID_PROMPT = "This is an invalid prompt without any input variable."
-
-
-async def test_valid_prompt(client: AsyncClient):
- PROMPT_REQUEST["template"] = VALID_PROMPT
- response = await client.post("api/v1/validate/prompt", json=PROMPT_REQUEST)
- assert response.status_code == 200
- assert response.json()["input_variables"] == ["product"]
-
-
-async def test_invalid_prompt(client: AsyncClient):
- PROMPT_REQUEST["template"] = INVALID_PROMPT
- response = await client.post(
- "api/v1/validate/prompt",
- json=PROMPT_REQUEST,
- )
- assert response.status_code == 200
- assert response.json()["input_variables"] == []
-
-
-@pytest.mark.parametrize(
- ("prompt", "expected_input_variables"),
- [
- ("{color} is my favorite color.", ["color"]),
- ("The weather is {weather} today.", ["weather"]),
- ("This prompt has no variables.", []),
- ("{a}, {b}, and {c} are variables.", ["a", "b", "c"]),
- ],
-)
-async def test_various_prompts(client, prompt, expected_input_variables):
- PROMPT_REQUEST["template"] = prompt
- response = await client.post("api/v1/validate/prompt", json=PROMPT_REQUEST)
- assert response.status_code == 200
- assert response.json()["input_variables"] == expected_input_variables
-
-
-async def test_get_vertices_flow_not_found(client, logged_in_headers):
- uuid = uuid4()
- response = await client.post(f"/api/v1/build/{uuid}/vertices", headers=logged_in_headers)
- assert response.status_code == 500
-
-
-async def test_get_vertices(client, added_flow_webhook_test, logged_in_headers):
- flow_id = added_flow_webhook_test["id"]
- response = await client.post(f"/api/v1/build/{flow_id}/vertices", headers=logged_in_headers)
- assert response.status_code == 200
- assert "ids" in response.json()
- # The response should contain the list in this order
- # ['ConversationBufferMemory-Lu2Nb', 'PromptTemplate-5Q0W8', 'ChatOpenAI-vy7fV', 'LLMChain-UjBh1']
- # The important part is before the - (ConversationBufferMemory, PromptTemplate, ChatOpenAI, LLMChain)
- ids = [_id.split("-")[0] for _id in response.json()["ids"]]
-
- assert set(ids) == {"ChatInput"}
-
-
-async def test_build_vertex_invalid_flow_id(client, logged_in_headers):
- uuid = uuid4()
- response = await client.post(f"/api/v1/build/{uuid}/vertices/vertex_id", headers=logged_in_headers)
- assert response.status_code == 500
-
-
-async def test_build_vertex_invalid_vertex_id(client, added_flow_webhook_test, logged_in_headers):
- flow_id = added_flow_webhook_test["id"]
- response = await client.post(f"/api/v1/build/{flow_id}/vertices/invalid_vertex_id", headers=logged_in_headers)
- assert response.status_code == 500
-
-
-async def test_successful_run_no_payload(client, simple_api_test, created_api_key):
- headers = {"x-api-key": created_api_key.api_key}
- flow_id = simple_api_test["id"]
- response = await client.post(f"/api/v1/run/{flow_id}", headers=headers)
- assert response.status_code == status.HTTP_200_OK, response.text
- # Add more assertions here to validate the response content
- json_response = response.json()
- assert "session_id" in json_response
- assert "outputs" in json_response
- outer_outputs = json_response["outputs"]
- assert len(outer_outputs) == 1
- outputs_dict = outer_outputs[0]
- assert len(outputs_dict) == 2
- assert "inputs" in outputs_dict
- assert "outputs" in outputs_dict
- assert isinstance(outputs_dict.get("outputs"), list)
- assert len(outputs_dict.get("outputs")) == 1
- ids = [output.get("component_id") for output in outputs_dict.get("outputs")]
- assert all("ChatOutput" in _id for _id in ids)
- display_names = [output.get("component_display_name") for output in outputs_dict.get("outputs")]
- assert all(name in display_names for name in ["Chat Output"])
- output_results_has_results = all("results" in output.get("results") for output in outputs_dict.get("outputs"))
- inner_results = [output.get("results") for output in outputs_dict.get("outputs")]
-
- assert all(result is not None for result in inner_results), (outputs_dict, output_results_has_results)
-
-
-async def test_successful_run_with_output_type_text(client, simple_api_test, created_api_key):
- headers = {"x-api-key": created_api_key.api_key}
- flow_id = simple_api_test["id"]
- payload = {
- "output_type": "text",
- }
- response = await client.post(f"/api/v1/run/{flow_id}", headers=headers, json=payload)
- assert response.status_code == status.HTTP_200_OK, response.text
- # Add more assertions here to validate the response content
- json_response = response.json()
- assert "session_id" in json_response
- assert "outputs" in json_response
- outer_outputs = json_response["outputs"]
- assert len(outer_outputs) == 1
- outputs_dict = outer_outputs[0]
- assert len(outputs_dict) == 2
- assert "inputs" in outputs_dict
- assert "outputs" in outputs_dict
- assert isinstance(outputs_dict.get("outputs"), list)
- assert len(outputs_dict.get("outputs")) == 1
- ids = [output.get("component_id") for output in outputs_dict.get("outputs")]
- assert all("ChatOutput" in _id for _id in ids), ids
- display_names = [output.get("component_display_name") for output in outputs_dict.get("outputs")]
- assert all(name in display_names for name in ["Chat Output"]), display_names
- inner_results = [output.get("results") for output in outputs_dict.get("outputs")]
- expected_keys = ["message"]
- assert all(key in result for result in inner_results for key in expected_keys), outputs_dict
-
-
-@pytest.mark.benchmark
-async def test_successful_run_with_output_type_any(client, simple_api_test, created_api_key):
- # This one should have both the ChatOutput and TextOutput components
- headers = {"x-api-key": created_api_key.api_key}
- flow_id = simple_api_test["id"]
- payload = {
- "output_type": "any",
- }
- response = await client.post(f"/api/v1/run/{flow_id}", headers=headers, json=payload)
- assert response.status_code == status.HTTP_200_OK, response.text
- # Add more assertions here to validate the response content
- json_response = response.json()
- assert "session_id" in json_response
- assert "outputs" in json_response
- outer_outputs = json_response["outputs"]
- assert len(outer_outputs) == 1
- outputs_dict = outer_outputs[0]
- assert len(outputs_dict) == 2
- assert "inputs" in outputs_dict
- assert "outputs" in outputs_dict
- assert isinstance(outputs_dict.get("outputs"), list)
- assert len(outputs_dict.get("outputs")) == 1
- ids = [output.get("component_id") for output in outputs_dict.get("outputs")]
- assert all("ChatOutput" in _id or "TextOutput" in _id for _id in ids), ids
- display_names = [output.get("component_display_name") for output in outputs_dict.get("outputs")]
- assert all(name in display_names for name in ["Chat Output"]), display_names
- inner_results = [output.get("results") for output in outputs_dict.get("outputs")]
- expected_keys = ["message"]
- assert all(key in result for result in inner_results for key in expected_keys), outputs_dict
-
-
-@pytest.mark.benchmark
-async def test_successful_run_with_output_type_debug(client, simple_api_test, created_api_key):
- # This one should return outputs for all components
- # Let's just check the amount of outputs(there should be 7)
- headers = {"x-api-key": created_api_key.api_key}
- flow_id = simple_api_test["id"]
- payload = {
- "output_type": "debug",
- }
- response = await client.post(f"/api/v1/run/{flow_id}", headers=headers, json=payload)
- assert response.status_code == status.HTTP_200_OK, response.text
- # Add more assertions here to validate the response content
- json_response = response.json()
- assert "session_id" in json_response
- assert "outputs" in json_response
- outer_outputs = json_response["outputs"]
- assert len(outer_outputs) == 1
- outputs_dict = outer_outputs[0]
- assert len(outputs_dict) == 2
- assert "inputs" in outputs_dict
- assert "outputs" in outputs_dict
- assert isinstance(outputs_dict.get("outputs"), list)
- assert len(outputs_dict.get("outputs")) == 3
-
-
-@pytest.mark.benchmark
-async def test_successful_run_with_input_type_text(client, simple_api_test, created_api_key):
- headers = {"x-api-key": created_api_key.api_key}
- flow_id = simple_api_test["id"]
- payload = {
- "input_type": "text",
- "output_type": "debug",
- "input_value": "value1",
- }
- response = await client.post(f"/api/v1/run/{flow_id}", headers=headers, json=payload)
- assert response.status_code == status.HTTP_200_OK, response.text
- # Add more assertions here to validate the response content
- json_response = response.json()
- assert "session_id" in json_response
- assert "outputs" in json_response
- outer_outputs = json_response["outputs"]
- assert len(outer_outputs) == 1
- outputs_dict = outer_outputs[0]
- assert len(outputs_dict) == 2
- assert "inputs" in outputs_dict
- assert "outputs" in outputs_dict
- assert outputs_dict.get("inputs") == {"input_value": "value1"}
- assert isinstance(outputs_dict.get("outputs"), list)
- assert len(outputs_dict.get("outputs")) == 3
- # Now we get all components that contain TextInput in the component_id
- text_input_outputs = [output for output in outputs_dict.get("outputs") if "TextInput" in output.get("component_id")]
- assert len(text_input_outputs) == 1
- # Now we check if the input_value is correct
- # We get text key twice because the output is now a Message
- assert all(output.get("results").get("text").get("text") == "value1" for output in text_input_outputs), (
- text_input_outputs
- )
-
-
-@pytest.mark.api_key_required
-@pytest.mark.benchmark
-async def test_successful_run_with_input_type_chat(client: AsyncClient, simple_api_test, created_api_key):
- headers = {"x-api-key": created_api_key.api_key}
- flow_id = simple_api_test["id"]
- payload = {
- "input_type": "chat",
- "output_type": "debug",
- "input_value": "value1",
- }
- response = await client.post(f"/api/v1/run/{flow_id}", headers=headers, json=payload)
- assert response.status_code == status.HTTP_200_OK, response.text
- # Add more assertions here to validate the response content
- json_response = response.json()
- assert "session_id" in json_response
- assert "outputs" in json_response
- outer_outputs = json_response["outputs"]
- assert len(outer_outputs) == 1
- outputs_dict = outer_outputs[0]
- assert len(outputs_dict) == 2
- assert "inputs" in outputs_dict
- assert "outputs" in outputs_dict
- assert outputs_dict.get("inputs") == {"input_value": "value1"}
- assert isinstance(outputs_dict.get("outputs"), list)
- assert len(outputs_dict.get("outputs")) == 3
- # Now we get all components that contain TextInput in the component_id
- chat_input_outputs = [output for output in outputs_dict.get("outputs") if "ChatInput" in output.get("component_id")]
- assert len(chat_input_outputs) == 1
- # Now we check if the input_value is correct
- assert all(output.get("results").get("message").get("text") == "value1" for output in chat_input_outputs), (
- chat_input_outputs
- )
-
-
-@pytest.mark.benchmark
-async def test_invalid_run_with_input_type_chat(client, simple_api_test, created_api_key):
- headers = {"x-api-key": created_api_key.api_key}
- flow_id = simple_api_test["id"]
- payload = {
- "input_type": "chat",
- "output_type": "debug",
- "input_value": "value1",
- "tweaks": {"Chat Input": {"input_value": "value2"}},
- }
- response = await client.post(f"/api/v1/run/{flow_id}", headers=headers, json=payload)
- assert response.status_code == status.HTTP_400_BAD_REQUEST, response.text
- assert "If you pass an input_value to the chat input, you cannot pass a tweak with the same name." in response.text
-
-
-@pytest.mark.benchmark
-async def test_successful_run_with_input_type_any(client, simple_api_test, created_api_key):
- headers = {"x-api-key": created_api_key.api_key}
- flow_id = simple_api_test["id"]
- payload = {
- "input_type": "any",
- "output_type": "debug",
- "input_value": "value1",
- }
- response = await client.post(f"/api/v1/run/{flow_id}", headers=headers, json=payload)
- assert response.status_code == status.HTTP_200_OK, response.text
- # Add more assertions here to validate the response content
- json_response = response.json()
- assert "session_id" in json_response
- assert "outputs" in json_response
- outer_outputs = json_response["outputs"]
- assert len(outer_outputs) == 1
- outputs_dict = outer_outputs[0]
- assert len(outputs_dict) == 2
- assert "inputs" in outputs_dict
- assert "outputs" in outputs_dict
- assert outputs_dict.get("inputs") == {"input_value": "value1"}
- assert isinstance(outputs_dict.get("outputs"), list)
- assert len(outputs_dict.get("outputs")) == 3
- # Now we get all components that contain TextInput or ChatInput in the component_id
- any_input_outputs = [
- output
- for output in outputs_dict.get("outputs")
- if "TextInput" in output.get("component_id") or "ChatInput" in output.get("component_id")
- ]
- assert len(any_input_outputs) == 2
- # Now we check if the input_value is correct
- all_result_dicts = [output.get("results") for output in any_input_outputs]
- all_message_or_text_dicts = [
- result_dict.get("message", result_dict.get("text")) for result_dict in all_result_dicts
- ]
- assert all(message_or_text_dict.get("text") == "value1" for message_or_text_dict in all_message_or_text_dicts), (
- any_input_outputs
- )
-
-
-async def test_invalid_flow_id(client, created_api_key):
- headers = {"x-api-key": created_api_key.api_key}
- flow_id = "invalid-flow-id"
- response = await client.post(f"/api/v1/run/{flow_id}", headers=headers)
- assert response.status_code == status.HTTP_404_NOT_FOUND, response.text
- headers = {"x-api-key": created_api_key.api_key}
- flow_id = UUID(int=0)
- response = await client.post(f"/api/v1/run/{flow_id}", headers=headers)
- assert response.status_code == status.HTTP_404_NOT_FOUND, response.text
- # Check if the error detail is as expected
-
-
-@pytest.mark.benchmark
-async def test_starter_projects(client, created_api_key):
- headers = {"x-api-key": created_api_key.api_key}
- response = await client.get("api/v1/starter-projects/", headers=headers)
- assert response.status_code == status.HTTP_200_OK, response.text
-
-
-async def _run_single_stream_test(client: AsyncClient, flow_id: str, headers: dict, payload: dict):
- """Helper coroutine to run and validate a single streaming request."""
- received_events = [] # Track all event types in sequence
- got_end_event = False
- final_result = None
-
- async with client.stream("POST", f"/api/v1/run/{flow_id}?stream=true", headers=headers, json=payload) as response:
- assert response.status_code == status.HTTP_200_OK, (
- f"Request failed with status {response.status_code}: {response.text}"
- )
- assert response.headers["content-type"].startswith("text/event-stream"), (
- f"Expected event stream content type, got: {response.headers['content-type']}"
- )
-
- async for line in response.aiter_lines():
- if not line or line.strip() == "":
- continue
-
- try:
- event_data = json.loads(line)
- except json.JSONDecodeError:
- pytest.fail(f"Failed to parse JSON from stream line: {line}")
-
- assert "event" in event_data, f"Event type missing in response line: {line}"
- event_type = event_data["event"]
- received_events.append(event_type)
-
- if event_type == "add_message":
- message_data = event_data["data"]
- assert "sender_name" in message_data, f"Missing 'sender_name' in add_message event: {message_data}"
- assert "sender" in message_data, f"Missing 'sender' in add_message event: {message_data}"
- assert "session_id" in message_data, f"Missing 'session_id' in add_message event: {message_data}"
- assert "text" in message_data, f"Missing 'text' in add_message event: {message_data}"
-
- elif event_type == "token":
- token_data = event_data["data"]
- assert "chunk" in token_data, f"Missing 'chunk' in token event: {token_data}"
-
- elif event_type == "end":
- got_end_event = True
- final_result = event_data["data"].get("result")
- assert final_result is not None, "End event should contain result data but was None"
- break # Exit loop after end event
-
- elif event_type == "error":
- pytest.fail(f"Received error event in stream: {event_data['data']}")
-
- # Assert we got the end event
- assert got_end_event, f"Stream did not receive an end event. Received events: {received_events}"
-
- # Verify event sequence
- assert "end" in received_events, f"End event missing from event sequence. Received: {received_events}"
- assert received_events[-1] == "end", f"Last event should be 'end', but was '{received_events[-1]}'"
-
- # Verify we got at least one message or token event before end
- assert len(received_events) > 2, f"Should receive multiple events before the end event. Got: {received_events}"
- assert any(event == "add_message" for event in received_events), (
- f"Should receive at least one add_message event. Received events: {received_events}"
- )
- assert any(event == "token" for event in received_events), (
- f"Should receive at least one token event. Received events: {received_events}"
- )
-
- # Verify the final result structure in the end event
- assert final_result is not None, "Final result should not be None"
- assert "outputs" in final_result, f"Missing 'outputs' in final result: {final_result}"
- assert "session_id" in final_result, f"Missing 'session_id' in final result: {final_result}"
- outputs = final_result["outputs"]
- assert len(outputs) == 1, f"Expected 1 output, got {len(outputs)}: {outputs}"
- outputs_dict = outputs[0]
-
- # Verify the debug outputs in final result
- assert "inputs" in outputs_dict, f"Missing 'inputs' in outputs_dict: {outputs_dict}"
- assert "outputs" in outputs_dict, f"Missing 'outputs' in outputs_dict: {outputs_dict}"
- assert outputs_dict["inputs"] == {"input_value": payload["input_value"]}, (
- f"Input value mismatch. Expected: {{'input_value': {payload['input_value']}}}, Got: {outputs_dict['inputs']}"
- )
- assert isinstance(outputs_dict.get("outputs"), list), (
- f"Expected outputs to be a list, got: {type(outputs_dict.get('outputs'))}"
- )
-
- chat_input_outputs = [output for output in outputs_dict.get("outputs") if "ChatInput" in output.get("component_id")]
- assert len(chat_input_outputs) == 1, (
- f"Expected 1 ChatInput output, got {len(chat_input_outputs)}: {chat_input_outputs}"
- )
- assert all(
- output.get("results").get("message").get("text") == payload["input_value"] for output in chat_input_outputs
- ), f"Message text mismatch. Expected: {payload['input_value']}, Got: {chat_input_outputs}"
-
-
-@pytest.mark.api_key_required
-@pytest.mark.benchmark
-async def test_concurrent_stream_run_with_input_type_chat(client: AsyncClient, starter_project, created_api_key):
- """Test concurrent streaming requests to the run endpoint with chat input type."""
- headers = {"x-api-key": created_api_key.api_key, "Accept": "text/event-stream", "Content-Type": "application/json"}
- flow_id = starter_project["id"]
- payload = {
- "input_type": "chat",
- "output_type": "debug",
- "input_value": "How are you?",
- }
- num_concurrent_requests = 5 # Number of concurrent requests to test
-
- tasks = [_run_single_stream_test(client, flow_id, headers, payload) for _ in range(num_concurrent_requests)]
-
- # Run all streaming tests concurrently
- await asyncio.gather(*tasks)
diff --git a/src/backend/tests/unit/test_exception_telemetry.py b/src/backend/tests/unit/test_exception_telemetry.py
deleted file mode 100644
index 23cc5f54a736..000000000000
--- a/src/backend/tests/unit/test_exception_telemetry.py
+++ /dev/null
@@ -1,286 +0,0 @@
-"""Unit tests for exception telemetry."""
-
-import hashlib
-import traceback
-from unittest.mock import AsyncMock, MagicMock
-
-import pytest
-from langflow.services.telemetry.schema import ExceptionPayload
-from langflow.services.telemetry.service import TelemetryService
-
-
-class TestExceptionTelemetry:
- """Unit test suite for exception telemetry functionality."""
-
- def test_exception_payload_schema(self):
- """Test ExceptionPayload schema creation and serialization."""
- payload = ExceptionPayload(
- exception_type="ValueError",
- exception_message="Test error message",
- exception_context="handler",
- stack_trace_hash="abc123def456",
- )
-
- # Test serialization with aliases
- data = payload.model_dump(by_alias=True, exclude_none=True)
-
- expected_fields = {
- "exceptionType": "ValueError",
- "exceptionMessage": "Test error message",
- "exceptionContext": "handler",
- "stackTraceHash": "abc123def456",
- }
-
- assert data == expected_fields
-
- @pytest.mark.asyncio
- async def test_log_exception_method(self):
- """Test the log_exception method creates proper payload."""
- # Create a minimal telemetry service for testing
- telemetry_service = TelemetryService.__new__(TelemetryService)
- telemetry_service.do_not_track = False
- telemetry_service._stopping = False
-
- # Mock the _queue_event method to capture calls
- captured_events = []
-
- async def mock_queue_event(event_tuple):
- captured_events.append(event_tuple)
-
- telemetry_service._queue_event = mock_queue_event
-
- # Test exception
- test_exception = RuntimeError("Test exception message")
-
- # Call log_exception
- await telemetry_service.log_exception(test_exception, "handler")
-
- # Verify event was queued
- assert len(captured_events) == 1
-
- func, payload, path = captured_events[0]
-
- # Verify payload
- assert isinstance(payload, ExceptionPayload)
- assert payload.exception_type == "RuntimeError"
- assert payload.exception_message == "Test exception message"
- assert payload.exception_context == "handler"
- assert payload.stack_trace_hash is not None
- assert len(payload.stack_trace_hash) == 16 # MD5 hash truncated to 16 chars
-
- # Verify path
- assert path == "exception"
-
- @pytest.mark.asyncio
- async def test_send_telemetry_data_success(self):
- """Test successful telemetry data sending."""
- # Create minimal service
- telemetry_service = TelemetryService.__new__(TelemetryService)
- telemetry_service.base_url = "https://mock-telemetry.example.com"
- telemetry_service.do_not_track = False
-
- # Mock HTTP client
- mock_response = MagicMock()
- mock_response.status_code = 200
- mock_client = AsyncMock()
- mock_client.get.return_value = mock_response
- telemetry_service.client = mock_client
-
- payload = ExceptionPayload(
- exception_type="ValueError",
- exception_message="Test error",
- exception_context="handler",
- stack_trace_hash="abc123",
- )
-
- # Send telemetry
- await telemetry_service.send_telemetry_data(payload, "exception")
-
- # Verify HTTP call was made
- mock_client.get.assert_called_once()
- call_args = mock_client.get.call_args
-
- # Check URL
- assert call_args[0][0] == "https://mock-telemetry.example.com/exception"
-
- # Check query parameters
- expected_params = {
- "exceptionType": "ValueError",
- "exceptionMessage": "Test error",
- "exceptionContext": "handler",
- "stackTraceHash": "abc123",
- }
- assert call_args[1]["params"] == expected_params
-
- @pytest.mark.asyncio
- async def test_send_telemetry_data_respects_do_not_track(self):
- """Test that do_not_track setting prevents telemetry."""
- # Create service with do_not_track enabled
- telemetry_service = TelemetryService.__new__(TelemetryService)
- telemetry_service.base_url = "https://mock-telemetry.example.com"
- telemetry_service.do_not_track = True
-
- # Mock HTTP client
- mock_client = AsyncMock()
- telemetry_service.client = mock_client
-
- payload = ExceptionPayload(
- exception_type="ValueError",
- exception_message="Test error",
- exception_context="handler",
- stack_trace_hash="abc123",
- )
-
- # Send telemetry - should be blocked
- await telemetry_service.send_telemetry_data(payload, "exception")
-
- # Verify no HTTP call was made
- mock_client.get.assert_not_called()
-
- def test_stack_trace_hash_consistency(self):
- """Test that same exceptions produce same hash."""
-
- def create_test_exception():
- try:
- msg = "Consistent test message"
- raise ValueError(msg)
- except ValueError as e:
- return e
-
- exc1 = create_test_exception()
- exc2 = create_test_exception()
-
- # Generate hashes the same way as log_exception
- def get_hash(exc):
- stack_trace = traceback.format_exception(type(exc), exc, exc.__traceback__)
- stack_trace_str = "".join(stack_trace)
- return hashlib.sha256(stack_trace_str.encode()).hexdigest()[:16]
-
- hash1 = get_hash(exc1)
- hash2 = get_hash(exc2)
-
- # Hashes should be the same for same exception type and location
- assert hash1 == hash2
-
- @pytest.mark.asyncio
- async def test_query_params_url_length_limit(self):
- """Test that query parameters don't exceed URL length limits."""
- telemetry_service = TelemetryService.__new__(TelemetryService)
- telemetry_service.base_url = "https://mock-telemetry.example.com"
- telemetry_service.do_not_track = False
-
- # Create payload with very long message
- long_message = "A" * 2000 # Very long message
- payload = ExceptionPayload(
- exception_type="ValueError",
- exception_message=long_message,
- exception_context="handler",
- stack_trace_hash="abc123",
- )
-
- mock_client = AsyncMock()
- telemetry_service.client = mock_client
-
- await telemetry_service.send_telemetry_data(payload, "exception")
-
- # Verify HTTP call was made
- mock_client.get.assert_called_once()
- call_args = mock_client.get.call_args
-
- # Check that URL doesn't exceed reasonable length (typically 2048 chars)
- full_url = call_args[0][0]
- assert len(full_url) < 2048, f"URL too long: {len(full_url)} characters"
-
- @pytest.mark.asyncio
- async def test_query_params_special_characters(self):
- """Test that special characters in query parameters are properly encoded."""
- telemetry_service = TelemetryService.__new__(TelemetryService)
- telemetry_service.base_url = "https://mock-telemetry.example.com"
- telemetry_service.do_not_track = False
-
- # Create payload with special characters
- special_message = "Error with special chars: &?=#@!$%^&*()"
- payload = ExceptionPayload(
- exception_type="ValueError",
- exception_message=special_message,
- exception_context="handler",
- stack_trace_hash="abc123",
- )
-
- mock_client = AsyncMock()
- telemetry_service.client = mock_client
-
- await telemetry_service.send_telemetry_data(payload, "exception")
-
- # Verify HTTP call was made
- mock_client.get.assert_called_once()
- call_args = mock_client.get.call_args
-
- # Check that special characters are properly encoded
- full_url = call_args[0][0]
- assert "&" not in full_url or "%26" in full_url, "Ampersand not properly encoded"
- assert "?" not in full_url or "%3F" in full_url, "Question mark not properly encoded"
- assert "=" not in full_url or "%3D" in full_url, "Equals sign not properly encoded"
-
- @pytest.mark.asyncio
- async def test_query_params_sensitive_data_exposure(self):
- """Test that sensitive data is not exposed in query parameters."""
- telemetry_service = TelemetryService.__new__(TelemetryService)
- telemetry_service.base_url = "https://mock-telemetry.example.com"
- telemetry_service.do_not_track = False
-
- # Create payload with potentially sensitive data
- sensitive_message = "Password: secret123, API Key: sk-abc123, Token: xyz789"
- payload = ExceptionPayload(
- exception_type="ValueError",
- exception_message=sensitive_message,
- exception_context="handler",
- stack_trace_hash="abc123",
- )
-
- mock_client = AsyncMock()
- telemetry_service.client = mock_client
-
- await telemetry_service.send_telemetry_data(payload, "exception")
-
- # Verify HTTP call was made
- mock_client.get.assert_called_once()
- call_args = mock_client.get.call_args
-
- # Check that sensitive data is not in URL (should be in request body instead)
- full_url = call_args[0][0]
- sensitive_patterns = ["secret123", "sk-abc123", "xyz789"]
- for pattern in sensitive_patterns:
- assert pattern not in full_url, f"Sensitive data '{pattern}' found in URL"
-
- @pytest.mark.asyncio
- async def test_query_params_unicode_characters(self):
- """Test that unicode characters in query parameters are handled correctly."""
- telemetry_service = TelemetryService.__new__(TelemetryService)
- telemetry_service.base_url = "https://mock-telemetry.example.com"
- telemetry_service.do_not_track = False
-
- # Create payload with unicode characters
- unicode_message = "Error with unicode: 世界, 🚀, émojis"
- payload = ExceptionPayload(
- exception_type="ValueError",
- exception_message=unicode_message,
- exception_context="handler",
- stack_trace_hash="abc123",
- )
-
- mock_client = AsyncMock()
- telemetry_service.client = mock_client
-
- await telemetry_service.send_telemetry_data(payload, "exception")
-
- # Verify HTTP call was made
- mock_client.get.assert_called_once()
- call_args = mock_client.get.call_args
-
- # Check that unicode characters are properly handled
- full_url = call_args[0][0]
- # URL should be valid and not cause encoding issues
- assert len(full_url) > 0, "URL should not be empty"
- # Should not contain raw unicode characters that could cause issues
- assert "世界" not in full_url or "%E4%B8%96%E7%95%8C" in full_url
diff --git a/src/backend/tests/unit/test_experimental_components.py b/src/backend/tests/unit/test_experimental_components.py
deleted file mode 100644
index f88222502673..000000000000
--- a/src/backend/tests/unit/test_experimental_components.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from lfx.components import prototypes
-
-
-def test_python_function_component():
- # Arrange
- python_function_component = prototypes.PythonFunctionComponent()
-
- # Act
- # function must be a string representation
- function = "def function():\n return 'Hello, World!'"
- python_function_component.function_code = function
- # result is the callable function
- result = python_function_component.get_function_callable()
- result_message = python_function_component.execute_function_message()
- result_data = python_function_component.execute_function_data()
-
- # Assert
- assert result() == "Hello, World!"
- assert result_message.text == "Hello, World!"
- assert result_data[0].text == "Hello, World!"
diff --git a/src/backend/tests/unit/test_frontend_nodes.py b/src/backend/tests/unit/test_frontend_nodes.py
deleted file mode 100644
index 44d0ef4e5a2d..000000000000
--- a/src/backend/tests/unit/test_frontend_nodes.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import pytest
-
-from lfx.template.field.base import Input
-from lfx.template.frontend_node.base import FrontendNode
-from lfx.template.template.base import Template
-
-
-@pytest.fixture
-def sample_template_field() -> Input:
- return Input(name="test_field", field_type="str")
-
-
-@pytest.fixture
-def sample_template(sample_template_field: Input) -> Template:
- return Template(type_name="test_template", fields=[sample_template_field])
-
-
-@pytest.fixture
-def sample_frontend_node(sample_template: Template) -> FrontendNode:
- return FrontendNode(
- template=sample_template,
- description="test description",
- base_classes=["base_class1", "base_class2"],
- name="test_frontend_node",
- )
-
-
-def test_template_field_defaults(sample_template_field: Input):
- assert sample_template_field.field_type == "str"
- assert sample_template_field.required is False
- assert sample_template_field.placeholder == ""
- assert sample_template_field.is_list is False
- assert sample_template_field.show is True
- assert sample_template_field.multiline is False
- assert sample_template_field.value is None
- assert sample_template_field.file_types == []
- assert sample_template_field.file_path == ""
- assert sample_template_field.name == "test_field"
- assert sample_template_field.password is None
-
-
-def test_template_to_dict(sample_template: Template):
- template_dict = sample_template.to_dict()
- assert template_dict["_type"] == "test_template"
- assert len(template_dict) == 2 # _type and test_field
- assert "test_field" in template_dict
- assert "type" in template_dict["test_field"]
- assert "required" in template_dict["test_field"]
-
-
-def test_frontend_node_to_dict(sample_frontend_node: FrontendNode):
- node_dict = sample_frontend_node.to_dict()
- assert len(node_dict) == 1
- assert "test_frontend_node" in node_dict
- assert "description" in node_dict["test_frontend_node"]
- assert "template" in node_dict["test_frontend_node"]
- assert "base_classes" in node_dict["test_frontend_node"]
diff --git a/src/backend/tests/unit/test_helper_components.py b/src/backend/tests/unit/test_helper_components.py
deleted file mode 100644
index c0269e37cd43..000000000000
--- a/src/backend/tests/unit/test_helper_components.py
+++ /dev/null
@@ -1,81 +0,0 @@
-from pathlib import Path
-
-from lfx.components import helpers, processing
-from lfx.custom.utils import build_custom_component_template
-from lfx.schema import Data
-from lfx.schema.message import Message
-
-# def test_update_data_component():
-# # Arrange
-# update_data_component = helpers.UpdateDataComponent()
-
-# # Act
-# new_data = {"new_key": "new_value"}
-# existing_data = Data(data={"existing_key": "existing_value"})
-# result = update_data_component.build(existing_data, new_data)
-# assert result.data == {"existing_key": "existing_value", "new_key": "new_value"}
-# assert result.existing_key == "existing_value"
-# assert result.new_key == "new_value"
-
-
-# def test_document_to_data_component():
-# # Arrange
-# document_to_data_component = helpers.DocumentsToDataComponent()
-
-# # Act
-# # Replace with your actual test data
-# document = Document(page_content="key: value", metadata={"url": "https://example.com"})
-# result = document_to_data_component.build(document)
-
-# # Assert
-# # Replace with your actual expected result
-# assert result == [Data(data={"text": "key: value", "url": "https://example.com"})]
-
-
-def test_uuid_generator_component():
- # Arrange
- uuid_generator_component = helpers.IDGeneratorComponent()
- uuid_generator_component._code = Path(helpers.id_generator.__file__).read_text(encoding="utf-8")
-
- frontend_node, _ = build_custom_component_template(uuid_generator_component)
-
- # Act
- build_config = frontend_node.get("template")
- field_name = "unique_id"
- build_config = uuid_generator_component.update_build_config(build_config, None, field_name)
- result = uuid_generator_component.generate_id()
-
- # Assert
- # UUID should be a string of length 36
- assert isinstance(result, Message)
- assert len(result.text) == 36
-
-
-def test_data_as_text_component():
- # Arrange
- data_as_text_component = processing.ParseDataComponent()
-
- # Act
- # Replace with your actual test data
- data = [Data(data={"key": "value", "bacon": "eggs"})]
- template = "Data:{data} -- Bacon:{bacon}"
- data_as_text_component.set_attributes({"data": data, "template": template})
- result = data_as_text_component.parse_data()
-
- # Assert
- # Replace with your actual expected result
- assert result.text == "Data:{'key': 'value', 'bacon': 'eggs'} -- Bacon:eggs"
-
-
-# def test_text_to_data_component():
-# # Arrange
-# text_to_data_component = helpers.CreateDataComponent()
-
-# # Act
-# # Replace with your actual test data
-# dict_with_text = {"field_1": {"key": "value"}}
-# result = text_to_data_component.build(number_of_fields=1, **dict_with_text)
-
-# # Assert
-# # Replace with your actual expected result
-# assert result == Data(data={"key": "value"})
diff --git a/src/backend/tests/unit/test_initial_setup.py b/src/backend/tests/unit/test_initial_setup.py
deleted file mode 100644
index fe48a367ef29..000000000000
--- a/src/backend/tests/unit/test_initial_setup.py
+++ /dev/null
@@ -1,315 +0,0 @@
-import asyncio
-import os
-import tempfile
-import uuid
-from datetime import datetime
-from unittest.mock import AsyncMock, patch
-
-import pytest
-from anyio import Path
-from httpx import AsyncClient
-from langflow.initial_setup.constants import STARTER_FOLDER_NAME
-from langflow.initial_setup.setup import (
- detect_github_url,
- get_project_data,
- load_bundles_from_urls,
- load_starter_projects,
- update_projects_components_with_latest_component_versions,
-)
-from langflow.interface.components import get_and_cache_all_types_dict
-from langflow.services.auth.utils import create_super_user
-from langflow.services.database.models import Flow
-from langflow.services.database.models.folder.model import Folder
-from langflow.services.deps import get_settings_service, session_scope
-from sqlalchemy.orm import selectinload
-from sqlmodel import select
-
-from lfx.constants import BASE_COMPONENTS_PATH
-from lfx.custom.directory_reader.utils import abuild_custom_component_list_from_path
-
-
-async def test_load_starter_projects():
- projects = await load_starter_projects()
- assert isinstance(projects, list)
- assert all(isinstance(project[1], dict) for project in projects)
- assert all(isinstance(project[0], Path) for project in projects)
-
-
-async def test_get_project_data():
- projects = await load_starter_projects()
- for _, project in projects:
- (
- project_name,
- project_description,
- project_is_component,
- updated_at_datetime,
- project_data,
- project_icon,
- project_icon_bg_color,
- project_gradient,
- project_tags,
- ) = get_project_data(project)
- assert isinstance(project_gradient, str) or project_gradient is None
- assert isinstance(project_tags, list), f"Project {project_name} has no tags"
- assert isinstance(project_name, str), f"Project {project_name} has no name"
- assert isinstance(project_description, str), f"Project {project_name} has no description"
- assert isinstance(project_is_component, bool), f"Project {project_name} has no is_component"
- assert isinstance(updated_at_datetime, datetime), f"Project {project_name} has no updated_at_datetime"
- assert isinstance(project_data, dict), f"Project {project_name} has no data"
- assert isinstance(project_icon, str) or project_icon is None, f"Project {project_name} has no icon"
- assert isinstance(project_icon_bg_color, str) or project_icon_bg_color is None, (
- f"Project {project_name} has no icon_bg_color"
- )
-
-
-@pytest.mark.usefixtures("client")
-async def test_create_or_update_starter_projects():
- async with session_scope() as session:
- # Get the number of projects returned by load_starter_projects
- num_projects = len(await load_starter_projects())
-
- # Get the number of projects in the database
- stmt = select(Folder).options(selectinload(Folder.flows)).where(Folder.name == STARTER_FOLDER_NAME)
- folder = (await session.exec(stmt)).first()
- assert folder is not None
- num_db_projects = len(folder.flows)
-
- # Check that the number of projects in the database is the same as the number of projects returned by
- # load_starter_projects
- assert num_db_projects == num_projects
-
-
-# Some starter projects require integration
-# async def test_starter_projects_can_run_successfully(client):
-# with session_scope() as session:
-# # Run the function to create or update projects
-# create_or_update_starter_projects()
-
-# # Get the number of projects returned by load_starter_projects
-# num_projects = len(load_starter_projects())
-
-# # Get the number of projects in the database
-# num_db_projects = session.exec(select(func.count(Flow.id)).where(Flow.folder == STARTER_FOLDER_NAME)).one()
-
-# # Check that the number of projects in the database is the same as the number of projects returned by
-# # load_starter_projects
-# assert num_db_projects == num_projects
-
-# # Get all the starter projects
-# projects = session.exec(select(Flow).where(Flow.folder == STARTER_FOLDER_NAME)).all()
-# graphs: list[tuple[str, Graph]] = []
-# for project in projects:
-# # Add tweaks to make file_path work
-# tweaks = {"path": __file__}
-# graph_data = process_tweaks(project.data, tweaks)
-# graph_object = Graph.from_payload(graph_data, flow_id=project.id)
-# graphs.append((project.name, graph_object))
-# assert len(graphs) == len(projects)
-# for name, graph in graphs:
-# outputs = await graph.arun(
-# inputs={},
-# outputs=[],
-# session_id="test",
-# )
-# assert all(isinstance(output, RunOutputs) for output in outputs), f"Project {name} error: {outputs}"
-# delete_messages(session_id="test")
-
-
-def find_component_by_name(components, name):
- for children in components.values():
- if name in children:
- return children[name]
- msg = f"Component {name} not found in components"
- raise ValueError(msg)
-
-
-def set_value(component, input_name, value):
- component["template"][input_name]["value"] = value
-
-
-def component_to_node(node_id, node_type, component):
- return {"id": node_type + node_id, "data": {"node": component, "type": node_type, "id": node_id}}
-
-
-def add_edge(source, target, from_output, to_input):
- return {
- "source": source,
- "target": target,
- "data": {
- "sourceHandle": {"dataType": "ChatInput", "id": source, "name": from_output, "output_types": ["Message"]},
- "targetHandle": {"fieldName": to_input, "id": target, "inputTypes": ["Message"], "type": "str"},
- },
- }
-
-
-async def test_refresh_starter_projects():
- # Use lfx components path since components have been moved there
- data_path = BASE_COMPONENTS_PATH
- components = await abuild_custom_component_list_from_path(data_path)
-
- chat_input = find_component_by_name(components, "ChatInput")
- chat_output = find_component_by_name(components, "ChatOutput")
- chat_output["template"]["code"]["value"] = "changed !"
- del chat_output["template"]["should_store_message"]
- graph_data = {
- "nodes": [
- component_to_node("chat-input-1", "ChatInput", chat_input),
- component_to_node("chat-output-1", "ChatOutput", chat_output),
- ],
- "edges": [add_edge("ChatInput" + "chat-input-1", "ChatOutput" + "chat-output-1", "message", "input_value")],
- }
- all_types = await get_and_cache_all_types_dict(get_settings_service())
- new_change = update_projects_components_with_latest_component_versions(graph_data, all_types)
- assert graph_data["nodes"][1]["data"]["node"]["template"]["code"]["value"] == "changed !"
- assert new_change["nodes"][1]["data"]["node"]["template"]["code"]["value"] != "changed !"
-
- assert "should_store_message" not in graph_data["nodes"][1]["data"]["node"]["template"]
- assert "should_store_message" in new_change["nodes"][1]["data"]["node"]["template"]
-
-
-@pytest.mark.parametrize(
- ("url", "expected"),
- [
- (
- "https://github.com/langflow-ai/langflow-bundles",
- "https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/main.zip",
- ),
- (
- "https://github.com/langflow-ai/langflow-bundles/",
- "https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/main.zip",
- ),
- (
- "https://github.com/langflow-ai/langflow-bundles.git",
- "https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/main.zip",
- ),
- (
- "https://github.com/langflow-ai/langflow-bundles/tree/some.branch-0_1",
- "https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/some.branch-0_1.zip",
- ),
- (
- "https://github.com/langflow-ai/langflow-bundles/tree/some/branch",
- "https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/some/branch.zip",
- ),
- (
- "https://github.com/langflow-ai/langflow-bundles/tree/some/branch/",
- "https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/some/branch.zip",
- ),
- (
- "https://github.com/langflow-ai/langflow-bundles/releases/tag/v1.0.0-0_1",
- "https://github.com/langflow-ai/langflow-bundles/archive/refs/tags/v1.0.0-0_1.zip",
- ),
- (
- "https://github.com/langflow-ai/langflow-bundles/releases/tag/foo/v1.0.0",
- "https://github.com/langflow-ai/langflow-bundles/archive/refs/tags/foo/v1.0.0.zip",
- ),
- (
- "https://github.com/langflow-ai/langflow-bundles/releases/tag/foo/v1.0.0/",
- "https://github.com/langflow-ai/langflow-bundles/archive/refs/tags/foo/v1.0.0.zip",
- ),
- (
- "https://github.com/langflow-ai/langflow-bundles/commit/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9",
- "https://github.com/langflow-ai/langflow-bundles/archive/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9.zip",
- ),
- (
- "https://github.com/langflow-ai/langflow-bundles/commit/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9/",
- "https://github.com/langflow-ai/langflow-bundles/archive/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9.zip",
- ),
- ("https://example.com/myzip.zip", "https://example.com/myzip.zip"),
- ],
-)
-async def test_detect_github_url(url, expected):
- # Mock the GitHub API response for the default branch case
- mock_response = AsyncMock()
- mock_response.json = lambda: {"default_branch": "main"} # Not async, just returns a dict
- mock_response.raise_for_status.return_value = None
-
- with patch("httpx.AsyncClient.get", return_value=mock_response) as mock_get:
- result = await detect_github_url(url)
- assert result == expected
-
- # Verify the API call was only made for GitHub repo URLs
- if "github.com" in url and not any(x in url for x in ["/tree/", "/releases/", "/commit/"]):
- mock_get.assert_called_once()
- else:
- mock_get.assert_not_called()
-
-
-@pytest.mark.usefixtures("client")
-async def test_load_bundles_from_urls():
- settings_service = get_settings_service()
- settings_service.settings.bundle_urls = [
- "https://github.com/langflow-ai/langflow-bundles/commit/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9"
- ]
- settings_service.auth_settings.AUTO_LOGIN = True
-
- # Create a superuser in the test database since load_bundles_from_urls requires one
- async with session_scope() as session:
- await create_super_user(
- username=settings_service.auth_settings.SUPERUSER,
- password=settings_service.auth_settings.SUPERUSER_PASSWORD,
- db=session,
- )
-
- temp_dirs, components_paths = await load_bundles_from_urls()
-
- try:
- assert len(components_paths) == 1
- assert "langflow-bundles-68428ce16729a385fe1bcc0f1ec91fd5f5f420b9/components" in components_paths[0]
-
- content = await (Path(components_paths[0]) / "embeddings" / "openai2.py").read_text(encoding="utf-8")
- assert "OpenAIEmbeddings2Component" in content
-
- assert len(temp_dirs) == 1
-
- async with session_scope() as session:
- stmt = select(Flow).where(Flow.id == uuid.UUID("c54f9130-f2fa-4a3e-b22a-3856d946351b"))
- flow = (await session.exec(stmt)).first()
- assert flow is not None
- finally:
- for temp_dir in temp_dirs:
- await asyncio.to_thread(temp_dir.cleanup)
-
-
-@pytest.fixture
-def set_fs_flows_polling_interval():
- os.environ["LANGFLOW_FS_FLOWS_POLLING_INTERVAL"] = "100"
- yield
- os.unsetenv("LANGFLOW_FS_FLOWS_POLLING_INTERVAL")
-
-
-@pytest.mark.usefixtures("set_fs_flows_polling_interval")
-async def test_sync_flows_from_fs(client: AsyncClient, logged_in_headers):
- flow_file = Path(tempfile.tempdir) / f"{uuid.uuid4()}.json"
- try:
- basic_case = {
- "name": "string",
- "description": "string",
- "data": {},
- "locked": False,
- "fs_path": str(flow_file),
- }
- await client.post("api/v1/flows/", json=basic_case, headers=logged_in_headers)
-
- content = await flow_file.read_text(encoding="utf-8")
- fs_flow = Flow.model_validate_json(content)
- fs_flow.name = "new name"
- fs_flow.description = "new description"
- fs_flow.data = {"nodes": {}, "edges": {}}
- fs_flow.locked = True
-
- await flow_file.write_text(fs_flow.model_dump_json(), encoding="utf-8")
-
- result = {}
- for i in range(10):
- response = await client.get(f"api/v1/flows/{fs_flow.id}", headers=logged_in_headers)
- result = response.json()
- if result["name"] == "new name":
- break
- assert i != 9, "flow name should have been updated"
- await asyncio.sleep(0.1)
-
- assert result["description"] == "new description"
- assert result["data"] == {"nodes": {}, "edges": {}}
- assert result["locked"] is True
- finally:
- await flow_file.unlink(missing_ok=True)
diff --git a/src/backend/tests/unit/test_kubernetes_secrets.py b/src/backend/tests/unit/test_kubernetes_secrets.py
deleted file mode 100644
index 9da44cdef82a..000000000000
--- a/src/backend/tests/unit/test_kubernetes_secrets.py
+++ /dev/null
@@ -1,107 +0,0 @@
-from base64 import b64encode
-from unittest.mock import MagicMock
-from uuid import UUID
-
-import pytest
-from kubernetes.client import V1ObjectMeta, V1Secret
-from langflow.services.variable.kubernetes_secrets import KubernetesSecretManager, encode_user_id
-
-
-@pytest.fixture
-def _mock_kube_config(mocker):
- mocker.patch("kubernetes.config.load_kube_config")
- mocker.patch("kubernetes.config.load_incluster_config")
-
-
-@pytest.fixture
-def secret_manager(_mock_kube_config):
- return KubernetesSecretManager(namespace="test-namespace")
-
-
-def test_create_secret(secret_manager, mocker):
- mocker.patch.object(
- secret_manager.core_api,
- "create_namespaced_secret",
- return_value=V1Secret(metadata=V1ObjectMeta(name="test-secret")),
- )
-
- secret_manager.create_secret(name="test-secret", data={"key": "value"})
- secret_manager.core_api.create_namespaced_secret.assert_called_once_with(
- "test-namespace",
- V1Secret(
- api_version="v1",
- kind="Secret",
- metadata=V1ObjectMeta(name="test-secret"),
- type="Opaque",
- data={"key": b64encode(b"value").decode()},
- ),
- )
-
-
-def test_get_secret(secret_manager, mocker):
- mock_secret = V1Secret(data={"key": b64encode(b"value").decode()})
- mocker.patch.object(secret_manager.core_api, "read_namespaced_secret", return_value=mock_secret)
-
- secret_data = secret_manager.get_secret(name="test-secret")
- secret_manager.core_api.read_namespaced_secret.assert_called_once_with("test-secret", "test-namespace")
- assert secret_data == {"key": "value"}
-
-
-def test_delete_secret(secret_manager, mocker):
- mocker.patch.object(secret_manager.core_api, "delete_namespaced_secret", return_value=MagicMock(status="Success"))
-
- secret_manager.delete_secret(name="test-secret")
- secret_manager.core_api.delete_namespaced_secret.assert_called_once_with("test-secret", "test-namespace")
-
-
-def test_encode_uuid():
- uuid = UUID("123e4567-e89b-12d3-a456-426614174000")
- result = encode_user_id(uuid)
- assert result == "uuid-123e4567-e89b-12d3-a456-426614174000"
- assert len(result) < 253
- assert result[0].isalnum()
- assert result[-1].isalnum()
-
-
-def test_encode_string():
- string_id = "user@example.com"
- result = encode_user_id(string_id)
- # assert (result.isalnum() or '-' in result or '_' in result)
- assert len(result) < 253
- assert result[0].isalnum()
- assert result[-1].isalnum()
-
-
-def test_long_string():
- long_string = "a" * 300
- result = encode_user_id(long_string)
- assert len(result) <= 253
-
-
-def test_starts_with_non_alphanumeric():
- non_alnum_start = "+user123"
- result = encode_user_id(non_alnum_start)
- assert result[0].isalnum()
-
-
-def test_ends_with_non_alphanumeric():
- non_alnum_end = "user123+"
- result = encode_user_id(non_alnum_end)
- assert result[-1].isalnum()
-
-
-def test_email_address():
- email = "User.Name@Example.com"
- result = encode_user_id(email)
- assert result.isalnum() or "-" in result or "_" in result
- assert len(result) < 253
- assert result[0].isalnum()
- assert result[-1].isalnum()
-
-
-def test_uuid_case_insensitivity():
- uuid_upper = UUID("123E4567-E89B-12D3-A456-426614174000")
- uuid_lower = UUID("123e4567-e89b-12d3-a456-426614174000")
- result_upper = encode_user_id(uuid_upper)
- result_lower = encode_user_id(uuid_lower)
- assert result_upper == result_lower
diff --git a/src/backend/tests/unit/test_lfx_reexport_modules.py b/src/backend/tests/unit/test_lfx_reexport_modules.py
deleted file mode 100644
index cea77122e44f..000000000000
--- a/src/backend/tests/unit/test_lfx_reexport_modules.py
+++ /dev/null
@@ -1,481 +0,0 @@
-"""Test to ensure all langflow modules that re-export lfx modules work correctly.
-
-This test validates that every langflow module that re-exports from lfx
-can successfully import and access all expected symbols, maintaining
-backward compatibility and proper API exposure.
-
-Based on analysis, there are 24 langflow modules that re-export from lfx:
-
-Base Modules (11):
-- langflow.base (wildcard from lfx.base)
-- langflow.base.agents (from lfx.base.agents)
-- langflow.base.data (from lfx.base.data)
-- langflow.base.embeddings (from lfx.base.embeddings)
-- langflow.base.io (from lfx.base.io)
-- langflow.base.memory (from lfx.base.memory)
-- langflow.base.models (from lfx.base.models)
-- langflow.base.prompts (from lfx.base.prompts)
-- langflow.base.textsplitters (from lfx.base.textsplitters)
-- langflow.base.tools (from lfx.base.tools)
-- langflow.base.vectorstores (from lfx.base.vectorstores)
-
-Core System Modules (13):
-- langflow.custom (from lfx.custom)
-- langflow.custom.custom_component (from lfx.custom.custom_component)
-- langflow.field_typing (from lfx.field_typing with __getattr__)
-- langflow.graph (from lfx.graph)
-- langflow.inputs (from lfx.inputs.inputs)
-- langflow.interface (from lfx.interface)
-- langflow.io (from lfx.io + lfx.template)
-- langflow.load (from lfx.load)
-- langflow.logging (from lfx.log.logger)
-- langflow.schema (from lfx.schema)
-- langflow.template (wildcard from lfx.template)
-- langflow.template.field (from lfx.template.field)
-"""
-
-import importlib
-import inspect
-import pkgutil
-import re
-import time
-from pathlib import Path
-
-import pytest
-
-
-def get_all_reexport_modules():
- """Get all known re-export modules for parametrized testing."""
- # Define the modules here so they can be accessed by parametrize
- direct_reexport_modules = {
- "langflow.base.agents": "lfx.base.agents",
- "langflow.base.data": "lfx.base.data",
- "langflow.base.embeddings": "lfx.base.embeddings",
- "langflow.base.io": "lfx.base.io",
- "langflow.base.memory": "lfx.base.memory",
- "langflow.base.models": "lfx.base.models",
- "langflow.base.prompts": "lfx.base.prompts",
- "langflow.base.textsplitters": "lfx.base.textsplitters",
- "langflow.base.tools": "lfx.base.tools",
- "langflow.base.vectorstores": "lfx.base.vectorstores",
- "langflow.custom.custom_component": "lfx.custom.custom_component",
- "langflow.graph": "lfx.graph",
- "langflow.inputs": "lfx.inputs.inputs",
- "langflow.interface": "lfx.interface",
- "langflow.load": "lfx.load",
- "langflow.logging": "lfx.log",
- "langflow.schema": "lfx.schema",
- "langflow.template.field": "lfx.template.field",
- }
-
- wildcard_reexport_modules = {
- "langflow.base": "lfx.base",
- "langflow.template": "lfx.template",
- }
-
- complex_reexport_modules = {
- "langflow.custom": ["lfx.custom", "lfx.custom.custom_component", "lfx.custom.utils"],
- "langflow.io": ["lfx.io", "lfx.template"],
- }
-
- dynamic_reexport_modules = {
- "langflow.field_typing": "lfx.field_typing",
- }
-
- return list(
- {
- **direct_reexport_modules,
- **wildcard_reexport_modules,
- **complex_reexport_modules,
- **dynamic_reexport_modules,
- }.keys()
- )
-
-
-class TestLfxReexportModules:
- """Test that all langflow modules that re-export from lfx work correctly."""
-
- @classmethod
- def _discover_langflow_modules(cls) -> list[str]:
- """Dynamically discover all langflow modules."""
- langflow_modules = []
- try:
- import langflow
-
- for _importer, modname, _ispkg in pkgutil.walk_packages(langflow.__path__, langflow.__name__ + "."):
- langflow_modules.append(modname)
- except ImportError:
- pass
- return langflow_modules
-
- @classmethod
- def _detect_reexport_pattern(cls, module_name: str) -> dict[str, str | None]:
- """Detect what kind of re-export pattern a module uses."""
- try:
- module = importlib.import_module(module_name)
-
- # Check if module has source code that mentions lfx
- source_file = getattr(module, "__file__", None)
- if source_file:
- try:
- with Path(source_file).open() as f:
- content = f.read()
- if "from lfx" in content:
- # Try to extract the lfx module being imported
- patterns = [
- r"from (lfx\.[.\w]+) import",
- r"from (lfx\.[.\w]+) import \*",
- r"import (lfx\.[.\w]+)",
- ]
- for pattern in patterns:
- match = re.search(pattern, content)
- if match:
- return {"type": "direct", "source": match.group(1)}
-
- if "__getattr__" in content and "lfx" in content:
- return {"type": "dynamic", "source": None}
-
- # If we get here, file exists but no patterns matched
- return {"type": "none", "source": None}
-
- except (OSError, UnicodeDecodeError):
- return {"type": "none", "source": None}
- else:
- return {"type": "none", "source": None}
-
- except ImportError:
- return {"type": "import_error", "source": None}
-
- @classmethod
- def _get_expected_symbols(cls, lfx_source: str | None = None) -> list[str]:
- """Get expected symbols that should be available in a module."""
- if not lfx_source:
- return []
-
- try:
- lfx_module = importlib.import_module(lfx_source)
- if hasattr(lfx_module, "__all__"):
- return list(lfx_module.__all__)
- # Return public attributes (not starting with _)
- return [name for name in dir(lfx_module) if not name.startswith("_")]
- except ImportError:
- return []
-
- # Define all the modules that re-export from lfx (kept for backward compatibility)
- DIRECT_REEXPORT_MODULES = {
- # Base modules with direct re-exports
- "langflow.base.agents": "lfx.base.agents",
- "langflow.base.data": "lfx.base.data",
- "langflow.base.embeddings": "lfx.base.embeddings",
- "langflow.base.io": "lfx.base.io",
- "langflow.base.memory": "lfx.base.memory",
- "langflow.base.models": "lfx.base.models",
- "langflow.base.prompts": "lfx.base.prompts",
- "langflow.base.textsplitters": "lfx.base.textsplitters",
- "langflow.base.tools": "lfx.base.tools",
- "langflow.base.vectorstores": "lfx.base.vectorstores",
- # Core system modules with direct re-exports
- "langflow.custom.custom_component": "lfx.custom.custom_component",
- "langflow.graph": "lfx.graph",
- "langflow.inputs": "lfx.inputs.inputs",
- "langflow.interface": "lfx.interface",
- "langflow.load": "lfx.load",
- "langflow.logging": "lfx.log", # Note: imports from lfx.log.logger
- "langflow.schema": "lfx.schema",
- "langflow.template.field": "lfx.template.field",
- }
-
- # Modules that use wildcard imports from lfx
- WILDCARD_REEXPORT_MODULES = {
- "langflow.base": "lfx.base",
- "langflow.template": "lfx.template",
- }
-
- # Modules with complex/mixed import patterns
- COMPLEX_REEXPORT_MODULES = {
- "langflow.custom": ["lfx.custom", "lfx.custom.custom_component", "lfx.custom.utils"],
- "langflow.io": ["lfx.io", "lfx.template"], # Mixed imports
- }
-
- # Modules with dynamic __getattr__ patterns
- DYNAMIC_REEXPORT_MODULES = {
- "langflow.field_typing": "lfx.field_typing",
- }
-
- def test_direct_reexport_modules_importable(self):
- """Test that all direct re-export modules can be imported."""
- successful_imports = 0
-
- for langflow_module, lfx_module in self.DIRECT_REEXPORT_MODULES.items():
- try:
- # Import the langflow module
- lf_module = importlib.import_module(langflow_module)
- assert lf_module is not None, f"Langflow module {langflow_module} is None"
-
- # Import the corresponding lfx module to compare
-
- lfx_mod = importlib.import_module(lfx_module)
- assert lfx_mod is not None, f"LFX module {lfx_module} is None"
-
- successful_imports += 1
-
- except Exception as e:
- pytest.fail(f"Failed to import direct re-export module {langflow_module}: {e!s}")
-
- def test_wildcard_reexport_modules_importable(self):
- """Test that modules using wildcard imports work correctly."""
- successful_imports = 0
-
- for langflow_module, lfx_module in self.WILDCARD_REEXPORT_MODULES.items():
- try:
- # Import the langflow module
- lf_module = importlib.import_module(langflow_module)
- assert lf_module is not None, f"Langflow module {langflow_module} is None"
-
- # Wildcard imports should expose most/all attributes from lfx module
- lfx_mod = importlib.import_module(lfx_module)
-
- # Check that all attributes are available
- if hasattr(lfx_mod, "__all__"):
- all_attrs = list(lfx_mod.__all__) # Test all attributes
- for attr in all_attrs:
- if hasattr(lfx_mod, attr):
- assert hasattr(lf_module, attr), f"Attribute {attr} missing from {langflow_module}"
-
- successful_imports += 1
-
- except Exception as e:
- pytest.fail(f"Failed to import wildcard re-export module {langflow_module}: {e!s}")
-
- def test_complex_reexport_modules_importable(self):
- """Test that modules with complex/mixed import patterns work correctly."""
- successful_imports = 0
-
- for langflow_module in self.COMPLEX_REEXPORT_MODULES:
- try:
- # Import the langflow module
- lf_module = importlib.import_module(langflow_module)
- assert lf_module is not None, f"Langflow module {langflow_module} is None"
-
- # Verify it has __all__ attribute for complex modules
- assert hasattr(lf_module, "__all__"), f"Complex module {langflow_module} missing __all__"
- assert len(lf_module.__all__) > 0, f"Complex module {langflow_module} has empty __all__"
-
- # Try to access all items from __all__
- all_items = lf_module.__all__ # Test all items
- for item in all_items:
- try:
- attr = getattr(lf_module, item)
- assert attr is not None, f"Attribute {item} is None in {langflow_module}"
- except AttributeError:
- pytest.fail(f"Complex module {langflow_module} missing expected attribute {item} from __all__")
-
- successful_imports += 1
-
- except Exception as e:
- pytest.fail(f"Failed to import complex re-export module {langflow_module}: {e!s}")
-
- def test_dynamic_reexport_modules_importable(self):
- """Test that modules with __getattr__ dynamic loading work correctly."""
- successful_imports = 0
-
- for langflow_module in self.DYNAMIC_REEXPORT_MODULES:
- try:
- # Import the langflow module
- lf_module = importlib.import_module(langflow_module)
- assert lf_module is not None, f"Langflow module {langflow_module} is None"
-
- # Dynamic modules should have __getattr__ method
- assert hasattr(lf_module, "__getattr__"), f"Dynamic module {langflow_module} missing __getattr__"
-
- # Test accessing some known attributes dynamically
- if langflow_module == "langflow.field_typing":
- # Test some known field typing constants
- test_attrs = ["Data", "Text", "LanguageModel"]
- for attr in test_attrs:
- try:
- value = getattr(lf_module, attr)
- assert value is not None, f"Dynamic attribute {attr} is None"
- except AttributeError:
- pytest.fail(f"Dynamic module {langflow_module} missing expected attribute {attr}")
-
- successful_imports += 1
-
- except Exception as e:
- pytest.fail(f"Failed to import dynamic re-export module {langflow_module}: {e!s}")
-
- def test_all_reexport_modules_have_required_structure(self):
- """Test that re-export modules have the expected structure."""
- all_modules = {}
- all_modules.update(self.DIRECT_REEXPORT_MODULES)
- all_modules.update(self.WILDCARD_REEXPORT_MODULES)
- all_modules.update(self.DYNAMIC_REEXPORT_MODULES)
-
- # Add complex modules
- for lf_mod in self.COMPLEX_REEXPORT_MODULES:
- all_modules[lf_mod] = self.COMPLEX_REEXPORT_MODULES[lf_mod]
-
- for langflow_module in all_modules:
- try:
- lf_module = importlib.import_module(langflow_module)
-
- # All modules should be importable
- assert lf_module is not None
-
- # Most should have __name__ attribute
- assert hasattr(lf_module, "__name__")
-
- # Check for basic module structure
- assert hasattr(lf_module, "__file__") or hasattr(lf_module, "__path__")
-
- except Exception as e:
- pytest.fail(f"Module structure issue with {langflow_module}: {e!s}")
-
- def test_reexport_modules_backward_compatibility(self):
- """Test that common import patterns still work for backward compatibility."""
- # Test some key imports that should always work
- backward_compatible_imports = [
- ("langflow.schema", "Data"),
- ("langflow.inputs", "StrInput"),
- ("langflow.inputs", "IntInput"),
- ("langflow.custom", "Component"), # Base component class
- ("langflow.custom", "CustomComponent"),
- ("langflow.field_typing", "Text"), # Dynamic
- ("langflow.field_typing", "Data"), # Dynamic
- ("langflow.load", "load_flow_from_json"),
- ("langflow.logging", "logger"),
- ]
-
- for module_name, symbol_name in backward_compatible_imports:
- try:
- module = importlib.import_module(module_name)
- symbol = getattr(module, symbol_name)
- assert symbol is not None
-
- # For callable objects, ensure they're callable
- if inspect.isclass(symbol) or inspect.isfunction(symbol):
- assert callable(symbol)
-
- except Exception as e:
- pytest.fail(f"Backward compatibility issue with {module_name}.{symbol_name}: {e!s}")
-
- def test_no_circular_imports_in_reexports(self):
- """Test that there are no circular import issues in re-export modules."""
- # Test importing modules in different orders to catch circular imports
- import_orders = [
- ["langflow.schema", "langflow.inputs", "langflow.base"],
- ["langflow.base", "langflow.schema", "langflow.inputs"],
- ["langflow.inputs", "langflow.base", "langflow.schema"],
- ["langflow.custom", "langflow.field_typing", "langflow.template"],
- ["langflow.template", "langflow.custom", "langflow.field_typing"],
- ["langflow.field_typing", "langflow.template", "langflow.custom"],
- ]
-
- for order in import_orders:
- try:
- for module_name in order:
- importlib.import_module(module_name)
- # Try to access something from each module to trigger full loading
- module = importlib.import_module(module_name)
- if hasattr(module, "__all__") and module.__all__:
- # Try to access first item in __all__
- first_item = module.__all__[0]
- try:
- getattr(module, first_item)
- except AttributeError:
- pytest.fail(f"Module {module_name} missing expected attribute {first_item} from __all__")
-
- except Exception as e:
- pytest.fail(f"Circular import issue with order {order}: {e!s}")
-
- def test_reexport_modules_performance(self):
- """Test that re-export modules import efficiently."""
- # Test that basic imports are fast
- performance_critical_modules = [
- "langflow.schema",
- "langflow.inputs",
- "langflow.field_typing",
- "langflow.load",
- "langflow.logging",
- ]
-
- slow_imports = []
-
- for module_name in performance_critical_modules:
- start_time = time.time()
- try:
- importlib.import_module(module_name)
- import_time = time.time() - start_time
-
- # Re-export modules should import quickly (< 1 second)
- if import_time > 1.0:
- slow_imports.append(f"{module_name}: {import_time:.3f}s")
-
- except ImportError:
- # Import failures are tested elsewhere
- pass
-
- # Don't fail the test, just record slow imports for information
-
- def test_coverage_completeness(self):
- """Test that we're testing all known re-export modules."""
- # This test ensures we don't miss any re-export modules
- all_tested_modules = set()
- all_tested_modules.update(self.DIRECT_REEXPORT_MODULES.keys())
- all_tested_modules.update(self.WILDCARD_REEXPORT_MODULES.keys())
- all_tested_modules.update(self.COMPLEX_REEXPORT_MODULES.keys())
- all_tested_modules.update(self.DYNAMIC_REEXPORT_MODULES.keys())
-
- # Should be testing all 24 identified modules based on our analysis
- actual_count = len(all_tested_modules)
-
- # Ensure we have a reasonable number of modules
- assert actual_count >= 20, f"Too few modules being tested: {actual_count}"
- assert actual_count <= 30, f"Too many modules being tested: {actual_count}"
-
- # Dynamic test methods using the discovery functions
- def test_dynamic_module_discovery(self):
- """Test that we can dynamically discover langflow modules."""
- modules = self._discover_langflow_modules()
- assert len(modules) > 0, "Should discover at least some langflow modules"
-
- # Check that known modules are found
- expected_modules = ["langflow.schema", "langflow.inputs", "langflow.custom"]
- found_modules = [mod for mod in expected_modules if mod in modules]
- assert len(found_modules) > 0, f"Expected to find some of {expected_modules}, but found: {found_modules}"
-
- @pytest.mark.parametrize("module_name", get_all_reexport_modules())
- def test_parametrized_module_import_and_pattern_detection(self, module_name: str):
- """Parametrized test that checks module import and pattern detection."""
- # Test that module can be imported
- try:
- module = importlib.import_module(module_name)
- assert module is not None, f"Module {module_name} should not be None"
- except ImportError:
- pytest.fail(f"Could not import {module_name}")
-
- # Test pattern detection
- pattern_info = self._detect_reexport_pattern(module_name)
- assert isinstance(pattern_info, dict), "Pattern detection should return a dict"
- assert "type" in pattern_info, "Pattern info should have 'type' key"
- assert pattern_info["type"] in ["direct", "dynamic", "none", "import_error"], (
- f"Unknown pattern type: {pattern_info['type']}"
- )
-
- def test_generate_backward_compatibility_imports(self):
- """Test generating backward compatibility imports dynamically."""
- # Test with a known module that has lfx imports
- test_cases = [("langflow.schema", "lfx.schema"), ("langflow.custom", "lfx.custom")]
-
- for lf_module, _expected_lfx_source in test_cases:
- pattern_info = self._detect_reexport_pattern(lf_module)
- if pattern_info["type"] == "direct" and pattern_info["source"]:
- symbols = self._get_expected_symbols(pattern_info["source"])
- assert len(symbols) > 0, f"Should find some symbols in {pattern_info['source']}"
-
- # Test that at least some symbols are accessible in the langflow module
- module = importlib.import_module(lf_module)
- available_symbols = [sym for sym in symbols[:3] if hasattr(module, sym)] # Test first 3
- assert len(available_symbols) > 0, (
- f"Module {lf_module} should have some symbols from {pattern_info['source']}"
- )
diff --git a/src/backend/tests/unit/test_load_components.py b/src/backend/tests/unit/test_load_components.py
deleted file mode 100644
index 4edc2ff52715..000000000000
--- a/src/backend/tests/unit/test_load_components.py
+++ /dev/null
@@ -1,570 +0,0 @@
-# ruff: noqa: T201
-import asyncio
-import time
-
-import pytest
-
-from lfx.constants import BASE_COMPONENTS_PATH
-from lfx.interface.components import aget_all_types_dict, import_langflow_components
-
-
-class TestComponentLoading:
- """Test suite for comparing component loading methods performance and functionality."""
-
- @pytest.fixture
- def base_components_path(self):
- """Fixture to provide BASE_COMPONENTS_PATH as a list."""
- return [BASE_COMPONENTS_PATH] if BASE_COMPONENTS_PATH else []
-
- @pytest.mark.no_blockbuster
- @pytest.mark.asyncio
- async def test_import_langflow_components_basic(self):
- """Test basic functionality of import_langflow_components."""
- result = await import_langflow_components()
-
- assert isinstance(result, dict), "Result should be a dictionary"
- assert "components" in result, "Result should have 'components' key"
- assert isinstance(result["components"], dict), "Components should be a dictionary"
-
- # Check that we have some components loaded (non-failing for CI compatibility)
- total_components = sum(len(comps) for comps in result["components"].values())
- print(f"Loaded {total_components} components")
- # Note: Component count may vary due to OS file limits, so we don't assert a minimum
-
- @pytest.mark.no_blockbuster
- @pytest.mark.asyncio
- async def test_aget_all_types_dict_basic(self, base_components_path):
- """Test basic functionality of aget_all_types_dict."""
- result = await aget_all_types_dict(base_components_path)
-
- assert isinstance(result, dict), "Result should be a dictionary"
- # Note: aget_all_types_dict might return empty dict if no custom components in path
- # This is expected behavior when BASE_COMPONENTS_PATH points to built-in components
-
- @pytest.mark.no_blockbuster
- @pytest.mark.asyncio
- async def test_component_loading_performance_comparison(self, base_components_path):
- """Compare performance between import_langflow_components and aget_all_types_dict."""
- # Warm up the functions (first calls might be slower due to imports)
- await import_langflow_components()
- await aget_all_types_dict(base_components_path)
-
- # Time import_langflow_components
- start_time = time.perf_counter()
- langflow_result = await import_langflow_components()
- langflow_duration = time.perf_counter() - start_time
-
- # Time aget_all_types_dict
- start_time = time.perf_counter()
- all_types_result = await aget_all_types_dict(base_components_path)
- all_types_duration = time.perf_counter() - start_time
-
- # Log performance metrics
- print("\nPerformance Comparison:")
- print(f"import_langflow_components: {langflow_duration:.4f}s")
- print(f"aget_all_types_dict: {all_types_duration:.4f}s")
- print(f"Ratio (langflow/all_types): {langflow_duration / max(all_types_duration, 0.0001):.2f}")
-
- # Both should complete in reasonable time (< 10s for langflow, < 20s for all_types)
- assert langflow_duration < 10.0, f"get_langflow_components_list took too long: {langflow_duration}s"
- assert all_types_duration < 20.0, f"aget_all_types_dict took too long: {all_types_duration}s"
-
- # Store results for further analysis
- return {
- "langflow_result": langflow_result,
- "all_types_result": all_types_result,
- "langflow_duration": langflow_duration,
- "all_types_duration": all_types_duration,
- }
-
- @pytest.mark.no_blockbuster
- @pytest.mark.asyncio
- async def test_result_structure_comparison(self, base_components_path):
- """Compare the structure and content of results from both functions."""
- langflow_result = await import_langflow_components()
- all_types_result = await aget_all_types_dict(base_components_path)
-
- # Check langflow result structure
- assert isinstance(langflow_result, dict)
- assert "components" in langflow_result
- langflow_components = langflow_result["components"]
-
- # Check all_types result structure
- assert isinstance(all_types_result, dict)
-
- # Get component counts (informational, non-failing)
- langflow_count = sum(len(comps) for comps in langflow_components.values())
- all_types_count = sum(len(comps) for comps in all_types_result.values()) if all_types_result else 0
-
- print("\nComponent Counts (informational):")
- print(f"import_langflow_components: {langflow_count} components")
- print(f"aget_all_types_dict: {all_types_count} components")
-
- # Log the comparison but don't fail the test
- if langflow_count != all_types_count:
- diff = abs(langflow_count - all_types_count)
- print(f"Note: Component counts differ by {diff} - this may be due to OS file limits")
-
- # Analyze component categories
- if langflow_components:
- langflow_categories = list(langflow_components.keys())
- print(f"Langflow categories: {sorted(langflow_categories)}")
-
- if all_types_result:
- all_types_categories = list(all_types_result.keys())
- print(f"All types categories: {sorted(all_types_categories)}")
-
- # Verify each category has proper structure
- for category, components in langflow_components.items():
- assert isinstance(components, dict), f"Category {category} should contain dict of components"
-
- @pytest.mark.no_blockbuster
- @pytest.mark.asyncio
- async def test_component_template_structure(self):
- """Test that component templates have expected structure."""
- langflow_result = await import_langflow_components()
-
- # Check that components have proper template structure
- for category, components in langflow_result["components"].items():
- assert isinstance(components, dict), f"Category {category} should contain dict of components"
-
- for comp_name, comp_template in components.items():
- assert isinstance(comp_template, dict), f"Component {comp_name} should be a dict"
-
- # Check for common template fields
- if comp_template: # Some might be empty during development
- # Common fields that should exist in component templates
- expected_fields = {"display_name", "type", "template"}
- present_fields = set(comp_template.keys())
-
- # At least some expected fields should be present
- common_fields = expected_fields.intersection(present_fields)
- if len(common_fields) == 0 and comp_template:
- print(f"Warning: Component {comp_name} missing expected fields. Has: {list(present_fields)}")
-
- @pytest.mark.no_blockbuster
- @pytest.mark.asyncio
- async def test_concurrent_loading(self, base_components_path):
- """Test concurrent execution of both loading methods."""
- # Run both functions concurrently
- tasks = [
- import_langflow_components(),
- aget_all_types_dict(base_components_path),
- import_langflow_components(), # Run langflow loader twice to test consistency
- ]
-
- start_time = time.perf_counter()
- results = await asyncio.gather(*tasks)
- concurrent_duration = time.perf_counter() - start_time
-
- langflow_result1, all_types_result, langflow_result2 = results
-
- print(f"\nConcurrent execution took: {concurrent_duration:.4f}s")
-
- # Check that both results have the same structure and component counts
- assert isinstance(langflow_result1, dict)
- assert isinstance(langflow_result2, dict)
- assert isinstance(all_types_result, dict)
-
- # Check that both langflow results have the same component structure
- assert "components" in langflow_result1
- assert "components" in langflow_result2
-
- # Compare component counts (informational, non-failing)
- count1 = sum(len(comps) for comps in langflow_result1["components"].values())
- count2 = sum(len(comps) for comps in langflow_result2["components"].values())
-
- print(f"Component counts: {count1} vs {count2}")
- if count1 != count2:
- print("Note: Component counts differ - this may be due to OS file limits or timing")
-
- # Check that category names are the same
- categories1 = set(langflow_result1["components"].keys())
- categories2 = set(langflow_result2["components"].keys())
-
- if categories1 != categories2:
- missing_in_2 = categories1 - categories2
- missing_in_1 = categories2 - categories1
- print(f"Category differences: missing in result2: {missing_in_2}, missing in result1: {missing_in_1}")
- # This is acceptable as long as the main functionality is consistent
-
- # Check that component names within categories are the same
- for category in categories1.intersection(categories2):
- comps1 = set(langflow_result1["components"][category].keys())
- comps2 = set(langflow_result2["components"][category].keys())
- if comps1 != comps2:
- missing_in_2 = comps1 - comps2
- missing_in_1 = comps2 - comps1
- print(
- f"Component differences in {category}: "
- f"missing in result2: {missing_in_2}, missing in result1: {missing_in_1}"
- )
-
- # The results might not be exactly identical due to timing or loading order
- # but the core structure should be consistent
- print("Note: Results may have minor differences due to concurrent loading, but structure is consistent")
-
- @pytest.mark.no_blockbuster
- @pytest.mark.asyncio
- async def test_memory_efficiency(self, base_components_path):
- """Test memory usage patterns of both loading methods."""
- import gc
-
- # Force garbage collection before measuring
- gc.collect()
- initial_objects = len(gc.get_objects())
-
- # Load with import_langflow_components
- langflow_result = await import_langflow_components()
- after_langflow_objects = len(gc.get_objects())
-
- # Load with aget_all_types_dict
- all_types_result = await aget_all_types_dict(base_components_path)
- after_all_types_objects = len(gc.get_objects())
-
- # Calculate object creation
- langflow_objects_created = after_langflow_objects - initial_objects
- all_types_objects_created = after_all_types_objects - after_langflow_objects
-
- print("\nMemory Analysis:")
- print(f"Objects created by import_langflow_components: {langflow_objects_created}")
- print(f"Objects created by aget_all_types_dict: {all_types_objects_created}")
-
- # Clean up
- del langflow_result, all_types_result
- gc.collect()
-
- @pytest.mark.no_blockbuster
- @pytest.mark.asyncio
- async def test_error_handling(self):
- """Test error handling in both loading methods."""
- # Test with empty paths list for aget_all_types_dict
- empty_paths = []
-
- # This should not raise an error, just return empty results
- result = await aget_all_types_dict(empty_paths)
- assert isinstance(result, dict), "Should return empty dict for empty paths"
-
- # Test with non-existent path - this should NOT raise an error, just return empty results
- nonexistent_paths = ["/nonexistent/path"]
- result = await aget_all_types_dict(nonexistent_paths)
- assert isinstance(result, dict), "Should return empty dict for non-existent paths"
- assert len(result) == 0, "Should return empty dict for non-existent paths"
-
- # Test with empty string path - this SHOULD raise an error
- empty_string_paths = [""]
- with pytest.raises(Exception) as exc_info: # noqa: PT011
- await aget_all_types_dict(empty_string_paths)
- assert "path" in str(exc_info.value).lower(), f"Path-related error expected, got: {exc_info.value}"
-
- # import_langflow_components should work regardless of external paths
- result = await import_langflow_components()
- assert isinstance(result, dict)
- assert "components" in result
-
- @pytest.mark.no_blockbuster
- @pytest.mark.benchmark
- @pytest.mark.asyncio
- async def test_repeated_loading_performance(self, base_components_path):
- """Test performance of repeated loading operations."""
- num_iterations = 5
-
- # Test repeated import_langflow_components calls
- langflow_times = []
- for _ in range(num_iterations):
- start_time = time.perf_counter()
- await import_langflow_components()
- duration = time.perf_counter() - start_time
- langflow_times.append(duration)
-
- # Test repeated aget_all_types_dict calls
- all_types_times = []
- for _ in range(num_iterations):
- start_time = time.perf_counter()
- await aget_all_types_dict(base_components_path)
- duration = time.perf_counter() - start_time
- all_types_times.append(duration)
-
- # Calculate statistics
- langflow_avg = sum(langflow_times) / len(langflow_times)
- langflow_min = min(langflow_times)
- langflow_max = max(langflow_times)
-
- all_types_avg = sum(all_types_times) / len(all_types_times)
- all_types_min = min(all_types_times)
- all_types_max = max(all_types_times)
-
- print(f"\nRepeated Loading Performance ({num_iterations} iterations):")
- print(
- f"import_langflow_components - avg: {langflow_avg:.4f}s, min: {langflow_min:.4f}s, max: {langflow_max:.4f}s"
- )
- print(f"aget_all_types_dict - avg: {all_types_avg:.4f}s, min: {all_types_min:.4f}s, max: {all_types_max:.4f}s")
-
- # Performance should be reasonably consistent
- langflow_variance = max(langflow_times) - min(langflow_times)
- all_types_variance = max(all_types_times) - min(all_types_times)
-
- # Variance shouldn't be too high (more than 10x difference between min and max)
- assert langflow_variance < langflow_avg * 10, (
- f"import_langflow_components performance too inconsistent: {langflow_variance}s variance"
- )
- assert all_types_variance < all_types_avg * 10, (
- f"aget_all_types_dict performance too inconsistent: {all_types_variance}s variance"
- )
-
- @pytest.mark.no_blockbuster
- @pytest.mark.asyncio
- async def test_components_path_variations(self):
- """Test aget_all_types_dict with different path configurations."""
- test_cases = [
- [], # Empty list
- [BASE_COMPONENTS_PATH] if BASE_COMPONENTS_PATH else [], # Normal case - valid path
- ]
-
- # Test invalid paths separately with proper error handling
- invalid_test_cases = [
- [""], # Empty string path
- ["/tmp"], # Non-existent or invalid path #noqa: S108
- [BASE_COMPONENTS_PATH, "/tmp"] # noqa: S108
- if BASE_COMPONENTS_PATH
- else ["/tmp"], # Mixed valid/invalid paths #noqa: S108
- ]
-
- # Test valid cases
- for i, paths in enumerate(test_cases):
- print(f"\nTesting valid path configuration {i}: {paths}")
-
- start_time = time.perf_counter()
- result = await aget_all_types_dict(paths)
- duration = time.perf_counter() - start_time
-
- assert isinstance(result, dict), f"Result should be dict for paths: {paths}"
-
- component_count = sum(len(comps) for comps in result.values())
- print(f" Loaded {component_count} components in {duration:.4f}s")
-
- # Test invalid cases - different invalid paths behave differently
- for i, paths in enumerate(invalid_test_cases):
- print(f"\nTesting invalid path configuration {i}: {paths}")
-
- # Empty string paths raise errors, but non-existent paths just return empty results
- if any(path == "" for path in paths):
- # Empty string paths should raise an error
- with pytest.raises((ValueError, OSError, FileNotFoundError)) as exc_info:
- await aget_all_types_dict(paths)
- print(f" Expected error for empty string path: {exc_info.value}")
- assert "path" in str(exc_info.value).lower(), f"Path-related error expected, got: {exc_info.value}"
- else:
- # Non-existent paths should return empty results without raising
- result = await aget_all_types_dict(paths)
- assert isinstance(result, dict), f"Should return dict for non-existent paths: {paths}"
- component_count = sum(len(comps) for comps in result.values())
- print(f" Non-existent path returned {component_count} components (expected 0)")
-
- @pytest.mark.no_blockbuster
- @pytest.mark.asyncio
- async def test_comprehensive_performance_summary(self, base_components_path):
- """Comprehensive test that provides a summary of all performance aspects."""
- print("\n" + "=" * 80)
- print("COMPREHENSIVE COMPONENT LOADING PERFORMANCE SUMMARY")
- print("=" * 80)
-
- # WARM-UP RUNS (discard these timings)
- print("\nPerforming warm-up runs...")
- await import_langflow_components() # Warm up imports, thread pools, etc.
- await aget_all_types_dict(base_components_path) # Warm up custom component loading
- print("Warm-up completed.")
-
- # Now run the actual performance measurements
- num_runs = 3
- langflow_results = []
- all_types_results = []
-
- for run in range(num_runs):
- print(f"\nPerformance Run {run + 1}/{num_runs}")
-
- # Time import_langflow_components
- start_time = time.perf_counter()
- langflow_result = await import_langflow_components()
- langflow_duration = time.perf_counter() - start_time
- langflow_results.append((langflow_duration, langflow_result))
-
- # Time aget_all_types_dict
- start_time = time.perf_counter()
- all_types_result = await aget_all_types_dict(base_components_path)
- all_types_duration = time.perf_counter() - start_time
- all_types_results.append((all_types_duration, all_types_result))
-
- print(f" import_langflow_components: {langflow_duration:.4f}s")
- print(f" aget_all_types_dict: {all_types_duration:.4f}s")
-
- # Calculate final statistics (excluding warm-up runs)
- langflow_times = [duration for duration, _ in langflow_results]
- all_types_times = [duration for duration, _ in all_types_results]
-
- print("\nSTEADY-STATE PERFORMANCE (after warm-up):")
- print("import_langflow_components:")
- print(f" Average: {sum(langflow_times) / len(langflow_times):.4f}s")
- print(f" Min: {min(langflow_times):.4f}s")
- print(f" Max: {max(langflow_times):.4f}s")
-
- print("aget_all_types_dict:")
- print(f" Average: {sum(all_types_times) / len(all_types_times):.4f}s")
- print(f" Min: {min(all_types_times):.4f}s")
- print(f" Max: {max(all_types_times):.4f}s")
-
- # Component count analysis
- langflow_component_counts = []
- all_types_component_counts = []
-
- for _, result in langflow_results:
- count = sum(len(comps) for comps in result.get("components", {}).values())
- langflow_component_counts.append(count)
-
- for _, result in all_types_results:
- count = sum(len(comps) for comps in result.values())
- all_types_component_counts.append(count)
-
- print("\nCOMPONENT COUNTS:")
- print(f"import_langflow_components: {langflow_component_counts}")
- print(f"aget_all_types_dict: {all_types_component_counts}")
-
- # Determine which is faster (based on steady-state performance)
- avg_langflow = sum(langflow_times) / len(langflow_times)
- avg_all_types = sum(all_types_times) / len(all_types_times)
-
- if avg_langflow < avg_all_types:
- faster_method = "import_langflow_components"
- speedup = avg_all_types / avg_langflow
- else:
- faster_method = "aget_all_types_dict"
- speedup = avg_langflow / avg_all_types
-
- print("\nSTEADY-STATE PERFORMANCE CONCLUSION:")
- print(f"Faster method: {faster_method}")
- print(f"Speedup factor: {speedup:.2f}x")
- print(f"Timing results: {avg_langflow:.4f}s (langflow), ", f"{avg_all_types:.4f}s (all_types)")
-
- print("\nNOTE: These results exclude warm-up runs and represent steady-state performance")
- print("that users will experience after the first component load.")
-
- print("=" * 80)
-
- # Log component counts (informational, non-failing)
- print("\nComponent count consistency:")
- if langflow_component_counts:
- min_count = min(langflow_component_counts)
- max_count = max(langflow_component_counts)
- if min_count != max_count:
- print(f"Note: Component counts vary ({min_count}-{max_count}) - may be due to OS file limits")
- else:
- print(f"Component counts consistent: {min_count}")
- assert all(isinstance(result, dict) for _, result in langflow_results), "All langflow results should be dicts"
- assert all(isinstance(result, dict) for _, result in all_types_results), "All all_types results should be dicts"
-
- # Log steady-state performance instead of asserting
- print(f"Steady-state performance: avg_langflow={avg_langflow:.4f}s, speedup={speedup:.2f}x")
-
- @pytest.mark.no_blockbuster
- @pytest.mark.asyncio
- async def test_component_differences_analysis(self, base_components_path):
- """Analyze and report the exact differences between components loaded by both methods."""
- print("\n" + "=" * 80)
- print("COMPONENT DIFFERENCES ANALYSIS")
- print("=" * 80)
-
- # Load components from both methods
- langflow_result = await import_langflow_components()
- all_types_result = await aget_all_types_dict(base_components_path)
-
- # Extract component data from both results
- # import_langflow_components returns {"components": {category: {comp_name: comp_data}}}
- # aget_all_types_dict returns {category: {comp_name: comp_data}}
- langflow_components = langflow_result.get("components", {})
- all_types_components = all_types_result
-
- # Build flat dictionaries of all components: {comp_name: category}
- langflow_flat = {}
- for category, components in langflow_components.items():
- for comp_name in components:
- langflow_flat[comp_name] = category
-
- all_types_flat = {}
- for category, components in all_types_components.items():
- for comp_name in components:
- all_types_flat[comp_name] = category
-
- # Calculate counts
- langflow_count = len(langflow_flat)
- all_types_count = len(all_types_flat)
-
- print("\nCOMPONENT COUNTS:")
- print(f"import_langflow_components: {langflow_count} components")
- print(f"aget_all_types_dict: {all_types_count} components")
- print(f"Difference: {abs(langflow_count - all_types_count)} components")
-
- # Find components that are in one but not the other
- langflow_only = set(langflow_flat.keys()) - set(all_types_flat.keys())
- all_types_only = set(all_types_flat.keys()) - set(langflow_flat.keys())
- common_components = set(langflow_flat.keys()) & set(all_types_flat.keys())
-
- print("\nCOMPONENT OVERLAP:")
- print(f"Common components: {len(common_components)}")
- print(f"Only in import_langflow_components: {len(langflow_only)}")
- print(f"Only in aget_all_types_dict: {len(all_types_only)}")
-
- # Print detailed differences
- if langflow_only:
- print(f"\nCOMPONENTS ONLY IN import_langflow_components ({len(langflow_only)}):")
- for comp_name in sorted(langflow_only):
- category = langflow_flat[comp_name]
- print(f" - {comp_name} (category: {category})")
-
- if all_types_only:
- print(f"\nCOMPONENTS ONLY IN aget_all_types_dict ({len(all_types_only)}):")
- for comp_name in sorted(all_types_only):
- category = all_types_flat[comp_name]
- print(f" - {comp_name} (category: {category})")
-
- # Check for category differences for common components
- category_differences = []
- for comp_name in common_components:
- langflow_cat = langflow_flat[comp_name]
- all_types_cat = all_types_flat[comp_name]
- if langflow_cat != all_types_cat:
- category_differences.append((comp_name, langflow_cat, all_types_cat))
-
- if category_differences:
- print(f"\nCOMPONENTS WITH DIFFERENT CATEGORIES ({len(category_differences)}):")
- for comp_name, langflow_cat, all_types_cat in sorted(category_differences):
- print(f" - {comp_name}: import_langflow='{langflow_cat}' vs aget_all_types='{all_types_cat}'")
-
- # Print category summary
- print("\nCATEGORY SUMMARY:")
- langflow_categories = set(langflow_components.keys())
- all_types_categories = set(all_types_components.keys())
-
- print(f"Categories in import_langflow_components: {sorted(langflow_categories)}")
- print(f"Categories in aget_all_types_dict: {sorted(all_types_categories)}")
-
- categories_only_langflow = langflow_categories - all_types_categories
- categories_only_all_types = all_types_categories - langflow_categories
-
- if categories_only_langflow:
- print(f"Categories only in import_langflow_components: {sorted(categories_only_langflow)}")
- if categories_only_all_types:
- print(f"Categories only in aget_all_types_dict: {sorted(categories_only_all_types)}")
-
- print("=" * 80)
-
- # Log component counts and differences (informational, non-failing)
- print("Component loading analysis completed successfully")
- if langflow_count == 0 and all_types_count == 0:
- print("Note: Both methods returned 0 components - this may be due to OS file limits")
- elif len(common_components) == 0 and (langflow_count > 0 or all_types_count > 0):
- print("Note: No common components found - this may indicate different loading behaviors due to OS limits")
-
- @pytest.mark.benchmark
- async def test_component_loading_performance(self):
- """Test the performance of component loading."""
- await import_langflow_components()
diff --git a/src/backend/tests/unit/test_loading.py b/src/backend/tests/unit/test_loading.py
deleted file mode 100644
index 06b1732706f2..000000000000
--- a/src/backend/tests/unit/test_loading.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from langflow.initial_setup.setup import load_starter_projects
-from langflow.load import aload_flow_from_json
-
-from lfx.graph import Graph
-
-# TODO: UPDATE BASIC EXAMPLE
-# def test_load_flow_from_json():
-# """Test loading a flow from a json file"""
-# loaded = load_flow_from_json(pytest.BASIC_EXAMPLE_PATH)
-# assert loaded is not None
-# assert isinstance(loaded, Graph)
-
-
-# def test_load_flow_from_json_with_tweaks():
-# """Test loading a flow from a json file and applying tweaks"""
-# tweaks = {"dndnode_82": {"model_name": "gpt-3.5-turbo-16k-0613"}}
-# loaded = load_flow_from_json(pytest.BASIC_EXAMPLE_PATH, tweaks=tweaks)
-# assert loaded is not None
-# assert isinstance(loaded, Graph)
-
-
-async def test_load_flow_from_json_object():
- """Test loading a flow from a json file and applying tweaks."""
- result = await load_starter_projects()
- project = result[0][1]
- loaded = await aload_flow_from_json(project)
- assert loaded is not None
- assert isinstance(loaded, Graph)
diff --git a/src/backend/tests/unit/test_logger.py b/src/backend/tests/unit/test_logger.py
deleted file mode 100644
index 46417cbada9f..000000000000
--- a/src/backend/tests/unit/test_logger.py
+++ /dev/null
@@ -1,952 +0,0 @@
-"""Comprehensive tests for lfx.log.logger module.
-
-This test suite covers all aspects of the logger module including:
-- configure() function with all parameters and edge cases
-- InterceptHandler class functionality
-- setup_uvicorn_logger() and setup_gunicorn_logger() functions
-- Log processor functions (add_serialized, buffer_writer, etc.)
-- Edge cases and error conditions
-- The specific CRITICAL + 1 bug that was fixed
-"""
-
-import builtins
-import contextlib
-import json
-import logging
-import os
-import tempfile
-from pathlib import Path
-from unittest.mock import Mock, patch
-
-import pytest
-import structlog
-
-from lfx.log.logger import (
- LOG_LEVEL_MAP,
- VALID_LOG_LEVELS,
- InterceptHandler,
- SizedLogBuffer,
- add_serialized,
- buffer_writer,
- configure,
- log_buffer,
- remove_exception_in_production,
- setup_gunicorn_logger,
- setup_uvicorn_logger,
-)
-
-
-class TestConfigure:
- """Test suite for the configure() function."""
-
- def setup_method(self):
- """Reset structlog configuration before each test."""
- # Store original configuration to restore later
- # structlog._config is a module-level configuration object
-
- def teardown_method(self):
- """Restore structlog configuration after each test."""
- # Reset to a basic configuration
- structlog.reset_defaults()
- structlog.configure()
-
- def test_configure_default_values(self):
- """Test configure() with default values."""
- configure()
-
- # Verify structlog is configured by checking we can get a logger
- logger = structlog.get_logger()
- assert logger is not None
-
- # Verify the logger has the expected methods
- assert hasattr(logger, "debug")
- assert hasattr(logger, "info")
- assert hasattr(logger, "error")
-
- def test_configure_valid_log_levels(self):
- """Test configure() with all valid log levels."""
- for level in VALID_LOG_LEVELS:
- configure(log_level=level)
- config = structlog._config
- assert config is not None
-
- def test_configure_invalid_log_level(self):
- """Test configure() with invalid log level falls back to ERROR."""
- configure(log_level="INVALID_LEVEL")
- config = structlog._config
- assert config is not None
- # Should fall back to ERROR level without raising an exception
-
- def test_configure_case_insensitive_log_level(self):
- """Test configure() with case insensitive log levels."""
- configure(log_level="debug")
- config = structlog._config
- assert config is not None
-
- def test_configure_with_log_file(self):
- """Test configure() with log file parameter."""
- with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
- log_file_path = Path(tmp_file.name)
-
- try:
- configure(log_file=log_file_path)
- config = structlog._config
- assert config is not None
-
- # Verify file handler was added to root logger
- root_handlers = logging.root.handlers
- assert any(isinstance(h, logging.handlers.RotatingFileHandler) for h in root_handlers)
- finally:
- # Cleanup
- if log_file_path.exists():
- log_file_path.unlink()
- # Remove any file handlers from root logger
- for handler in logging.root.handlers[:]:
- if isinstance(handler, logging.handlers.RotatingFileHandler):
- logging.root.removeHandler(handler)
-
- def test_configure_with_invalid_log_file_path(self):
- """Test configure() with invalid log file path falls back to cache dir."""
- invalid_path = Path("/nonexistent/directory/log.txt")
-
- configure(log_file=invalid_path)
- config = structlog._config
- assert config is not None
-
- # Should create file handler without raising exception
- # The function should fall back to cache directory
-
- def test_configure_disable_true(self):
- """Test configure() with disable=True sets high filter level."""
- configure(disable=True)
-
- config = structlog._config
- assert config is not None
- # When disabled, wrapper_class should be set to filter at CRITICAL level
-
- def test_configure_disable_false(self):
- """Test configure() with disable=False works normally."""
- configure(disable=False, log_level="DEBUG")
-
- config = structlog._config
- assert config is not None
-
- def test_configure_with_log_env_container(self):
- """Test configure() with log_env='container' uses JSON renderer."""
- configure(log_env="container")
-
- config = structlog._config
- assert config is not None
- # Should use JSONRenderer processor
-
- def test_configure_with_log_env_container_json(self):
- """Test configure() with log_env='container_json' uses JSON renderer."""
- configure(log_env="container_json")
-
- config = structlog._config
- assert config is not None
-
- def test_configure_with_log_env_container_csv(self):
- """Test configure() with log_env='container_csv' uses KeyValue renderer."""
- configure(log_env="container_csv")
-
- config = structlog._config
- assert config is not None
-
- def test_configure_with_custom_log_format(self):
- """Test configure() with custom log format."""
- configure(log_format="custom_format")
-
- config = structlog._config
- assert config is not None
-
- def test_configure_with_log_rotation(self):
- """Test configure() with log rotation settings."""
- with tempfile.TemporaryDirectory() as tmp_dir:
- log_file_path = Path(tmp_dir) / "test.log"
-
- # Clear any existing handlers first
- for handler in logging.root.handlers[:]:
- if isinstance(handler, logging.handlers.RotatingFileHandler):
- logging.root.removeHandler(handler)
-
- configure(log_file=log_file_path, log_rotation="50 MB")
- logger = structlog.get_logger()
- assert logger is not None
-
- # Check that rotating file handler was created with the correct file path
- rotating_handlers = [
- h
- for h in logging.root.handlers
- if isinstance(h, logging.handlers.RotatingFileHandler) and h.baseFilename == str(log_file_path)
- ]
- assert len(rotating_handlers) > 0
-
- # Check max bytes is set correctly (50 MB = 50 * 1024 * 1024)
- handler = rotating_handlers[0]
- assert handler.maxBytes == 50 * 1024 * 1024
-
- # Cleanup handlers
- for handler in logging.root.handlers[:]:
- if isinstance(handler, logging.handlers.RotatingFileHandler):
- logging.root.removeHandler(handler)
-
- def test_configure_with_invalid_log_rotation(self):
- """Test configure() with invalid log rotation falls back to default."""
- with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
- log_file_path = Path(tmp_file.name)
-
- try:
- configure(log_file=log_file_path, log_rotation="invalid rotation")
- config = structlog._config
- assert config is not None
-
- # Should use default 10MB rotation
- rotating_handlers = [
- h for h in logging.root.handlers if isinstance(h, logging.handlers.RotatingFileHandler)
- ]
- if rotating_handlers:
- handler = rotating_handlers[0]
- assert handler.maxBytes == 10 * 1024 * 1024 # Default 10MB
- finally:
- # Cleanup
- if log_file_path.exists():
- log_file_path.unlink()
- for handler in logging.root.handlers[:]:
- if isinstance(handler, logging.handlers.RotatingFileHandler):
- logging.root.removeHandler(handler)
-
- @patch.dict(os.environ, {"LANGFLOW_LOG_LEVEL": "WARNING"})
- def test_configure_env_variable_override(self):
- """Test configure() respects LANGFLOW_LOG_LEVEL environment variable."""
- configure() # Should use WARNING from env var
-
- config = structlog._config
- assert config is not None
- # The wrapper_class should be configured for WARNING level
-
- @patch.dict(os.environ, {"LANGFLOW_LOG_FILE": "/tmp/test.log"}) # noqa: S108
- def test_configure_env_log_file_override(self):
- """Test configure() respects LANGFLOW_LOG_FILE environment variable."""
- configure()
-
- config = structlog._config
- assert config is not None
-
- @patch.dict(os.environ, {"LANGFLOW_LOG_ENV": "container"})
- def test_configure_env_log_env_override(self):
- """Test configure() respects LANGFLOW_LOG_ENV environment variable."""
- configure()
-
- config = structlog._config
- assert config is not None
-
- @patch.dict(os.environ, {"LANGFLOW_LOG_FORMAT": "custom"})
- def test_configure_env_log_format_override(self):
- """Test configure() respects LANGFLOW_LOG_FORMAT environment variable."""
- configure()
-
- config = structlog._config
- assert config is not None
-
- @patch.dict(os.environ, {"LANGFLOW_PRETTY_LOGS": "false"})
- def test_configure_env_pretty_logs_disabled(self):
- """Test configure() respects LANGFLOW_PRETTY_LOGS=false."""
- configure()
-
- config = structlog._config
- assert config is not None
-
- def test_configure_critical_plus_one_bug(self):
- """Test that configure() handles disable=True without KeyError.
-
- This tests the specific bug where using logging.CRITICAL + 1
- as a filter level would cause a KeyError.
- """
- # This should not raise a KeyError
- configure(disable=True, log_level="CRITICAL")
-
- config = structlog._config
- assert config is not None
-
- # Verify we can get a logger and it's properly configured
- logger = structlog.get_logger()
- assert logger is not None
-
-
-class TestInterceptHandler:
- """Test suite for the InterceptHandler class."""
-
- def setup_method(self):
- """Setup for each test method."""
- self.handler = InterceptHandler()
- # Mock structlog to capture calls
- self.mock_logger = Mock()
- self.structlog_patcher = patch("structlog.get_logger", return_value=self.mock_logger)
- self.structlog_patcher.start()
-
- def teardown_method(self):
- """Cleanup after each test method."""
- self.structlog_patcher.stop()
-
- def test_emit_critical_level(self):
- """Test InterceptHandler.emit() with CRITICAL level."""
- record = logging.LogRecord(
- name="test_logger",
- level=logging.CRITICAL,
- pathname="test.py",
- lineno=1,
- msg="Critical message",
- args=(),
- exc_info=None,
- )
-
- self.handler.emit(record)
- self.mock_logger.critical.assert_called_once_with("Critical message")
-
- def test_emit_error_level(self):
- """Test InterceptHandler.emit() with ERROR level."""
- record = logging.LogRecord(
- name="test_logger",
- level=logging.ERROR,
- pathname="test.py",
- lineno=1,
- msg="Error message",
- args=(),
- exc_info=None,
- )
-
- self.handler.emit(record)
- self.mock_logger.error.assert_called_once_with("Error message")
-
- def test_emit_warning_level(self):
- """Test InterceptHandler.emit() with WARNING level."""
- record = logging.LogRecord(
- name="test_logger",
- level=logging.WARNING,
- pathname="test.py",
- lineno=1,
- msg="Warning message",
- args=(),
- exc_info=None,
- )
-
- self.handler.emit(record)
- self.mock_logger.warning.assert_called_once_with("Warning message")
-
- def test_emit_info_level(self):
- """Test InterceptHandler.emit() with INFO level."""
- record = logging.LogRecord(
- name="test_logger",
- level=logging.INFO,
- pathname="test.py",
- lineno=1,
- msg="Info message",
- args=(),
- exc_info=None,
- )
-
- self.handler.emit(record)
- self.mock_logger.info.assert_called_once_with("Info message")
-
- def test_emit_debug_level(self):
- """Test InterceptHandler.emit() with DEBUG level."""
- record = logging.LogRecord(
- name="test_logger",
- level=logging.DEBUG,
- pathname="test.py",
- lineno=1,
- msg="Debug message",
- args=(),
- exc_info=None,
- )
-
- self.handler.emit(record)
- self.mock_logger.debug.assert_called_once_with("Debug message")
-
- def test_emit_custom_level_above_critical(self):
- """Test InterceptHandler.emit() with custom level above CRITICAL."""
- # Test level higher than CRITICAL (like logging.CRITICAL + 1)
- record = logging.LogRecord(
- name="test_logger",
- level=logging.CRITICAL + 1,
- pathname="test.py",
- lineno=1,
- msg="Super critical message",
- args=(),
- exc_info=None,
- )
-
- self.handler.emit(record)
- # Should map to critical for levels >= CRITICAL
- self.mock_logger.critical.assert_called_once_with("Super critical message")
-
- def test_emit_with_message_formatting(self):
- """Test InterceptHandler.emit() with message formatting."""
- record = logging.LogRecord(
- name="test_logger",
- level=logging.INFO,
- pathname="test.py",
- lineno=1,
- msg="Message with %s and %d",
- args=("string", 42),
- exc_info=None,
- )
-
- self.handler.emit(record)
- self.mock_logger.info.assert_called_once_with("Message with string and 42")
-
-
-class TestSetupFunctions:
- """Test suite for setup_uvicorn_logger() and setup_gunicorn_logger()."""
-
- def setup_method(self):
- """Setup for each test method."""
- # Store original logger configurations
- self.original_loggers = {}
-
- def teardown_method(self):
- """Cleanup after each test method."""
- # Restore original logger configurations if needed
-
- @patch("logging.getLogger")
- def test_setup_uvicorn_logger(self, mock_get_logger):
- """Test setup_uvicorn_logger() configures uvicorn loggers correctly."""
- # Create mock uvicorn loggers
- mock_uvicorn_access = Mock()
- mock_uvicorn_access.handlers = ["some_handler"] # Start with some handlers
- mock_uvicorn_access.propagate = False # Start with propagate False
-
- mock_uvicorn_error = Mock()
- mock_uvicorn_error.handlers = ["some_handler"] # Start with some handlers
- mock_uvicorn_error.propagate = False # Start with propagate False
-
- # Mock logging.getLogger to return the right loggers for specific names
- def get_logger_side_effect(name):
- if name == "uvicorn.access":
- return mock_uvicorn_access
- if name == "uvicorn.error":
- return mock_uvicorn_error
- return Mock()
-
- mock_get_logger.side_effect = get_logger_side_effect
-
- # Mock logging.root.manager.loggerDict to contain uvicorn logger names
- mock_logger_dict = {
- "uvicorn.access": Mock(),
- "uvicorn.error": Mock(),
- "other.logger": Mock(), # Should be ignored
- }
-
- with patch("logging.root.manager.loggerDict", mock_logger_dict):
- setup_uvicorn_logger()
-
- # Verify uvicorn loggers were configured
- assert mock_uvicorn_access.handlers == []
- assert mock_uvicorn_access.propagate is True
- assert mock_uvicorn_error.handlers == []
- assert mock_uvicorn_error.propagate is True
-
- @patch("logging.getLogger")
- def test_setup_gunicorn_logger(self, mock_get_logger):
- """Test setup_gunicorn_logger() configures gunicorn loggers correctly."""
- mock_error_logger = Mock()
- mock_access_logger = Mock()
-
- def get_logger_side_effect(name):
- if name == "gunicorn.error":
- return mock_error_logger
- if name == "gunicorn.access":
- return mock_access_logger
- return Mock()
-
- mock_get_logger.side_effect = get_logger_side_effect
-
- setup_gunicorn_logger()
-
- # Verify gunicorn loggers were configured
- assert mock_error_logger.handlers == []
- assert mock_error_logger.propagate is True
- assert mock_access_logger.handlers == []
- assert mock_access_logger.propagate is True
-
-
-class TestLogProcessors:
- """Test suite for log processor functions."""
-
- def test_add_serialized_with_buffer_disabled(self):
- """Test add_serialized() when log buffer is disabled."""
- event_dict = {"timestamp": 1625097600.123, "event": "Test message", "module": "test_module"}
-
- with patch.object(log_buffer, "enabled", return_value=False):
- result = add_serialized(None, "info", event_dict)
-
- # Should return event_dict unchanged when buffer is disabled
- assert result == event_dict
- assert "serialized" not in result
-
- def test_add_serialized_with_buffer_enabled(self):
- """Test add_serialized() when log buffer is enabled."""
- event_dict = {"timestamp": 1625097600.123, "event": "Test message", "module": "test_module"}
-
- with patch.object(log_buffer, "enabled", return_value=True):
- result = add_serialized(None, "info", event_dict)
-
- # Should add serialized field when buffer is enabled
- assert "serialized" in result
- serialized_data = json.loads(result["serialized"])
- assert serialized_data["timestamp"] == 1625097600.123
- assert serialized_data["message"] == "Test message"
- assert serialized_data["level"] == "INFO"
- assert serialized_data["module"] == "test_module"
-
- def test_remove_exception_in_production(self):
- """Test remove_exception_in_production() removes exception info in prod."""
- event_dict = {"event": "Test message", "exception": "Some exception", "exc_info": "Some exc info"}
-
- # Import the actual module to access DEV
- import sys
-
- logger_module = sys.modules["lfx.log.logger"]
- with patch.object(logger_module, "DEV", False): # noqa: FBT003
- result = remove_exception_in_production(None, "error", event_dict)
-
- # Should remove exception info in production
- assert "exception" not in result
- assert "exc_info" not in result
- assert result["event"] == "Test message"
-
- def test_remove_exception_in_development(self):
- """Test remove_exception_in_production() keeps exception info in dev."""
- event_dict = {"event": "Test message", "exception": "Some exception", "exc_info": "Some exc info"}
-
- # Import the actual module to access DEV
- import sys
-
- logger_module = sys.modules["lfx.log.logger"]
- with patch.object(logger_module, "DEV", True): # noqa: FBT003
- result = remove_exception_in_production(None, "error", event_dict)
-
- # Should keep exception info in development
- assert result["exception"] == "Some exception"
- assert result["exc_info"] == "Some exc info"
- assert result["event"] == "Test message"
-
- def test_buffer_writer_with_buffer_disabled(self):
- """Test buffer_writer() when log buffer is disabled."""
- event_dict = {"event": "Test message"}
-
- with (
- patch.object(log_buffer, "enabled", return_value=False),
- patch.object(log_buffer, "write") as mock_write,
- ):
- result = buffer_writer(None, "info", event_dict)
-
- # Should not write to buffer when disabled
- mock_write.assert_not_called()
- assert result == event_dict
-
- def test_buffer_writer_with_buffer_enabled(self):
- """Test buffer_writer() when log buffer is enabled."""
- event_dict = {"event": "Test message"}
-
- with (
- patch.object(log_buffer, "enabled", return_value=True),
- patch.object(log_buffer, "write") as mock_write,
- ):
- result = buffer_writer(None, "info", event_dict)
-
- # Should write to buffer when enabled
- mock_write.assert_called_once()
- call_args = mock_write.call_args[0]
- written_data = json.loads(call_args[0])
- assert written_data["event"] == "Test message"
- assert result == event_dict
-
-
-class TestConstants:
- """Test suite for module constants."""
-
- def test_valid_log_levels_contains_all_standard_levels(self):
- """Test VALID_LOG_LEVELS contains all expected levels."""
- expected_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
- assert expected_levels == VALID_LOG_LEVELS
-
- def test_log_level_map_has_correct_mappings(self):
- """Test LOG_LEVEL_MAP has correct integer mappings."""
- expected_mappings = {
- "DEBUG": logging.DEBUG,
- "INFO": logging.INFO,
- "WARNING": logging.WARNING,
- "ERROR": logging.ERROR,
- "CRITICAL": logging.CRITICAL,
- }
- assert expected_mappings == LOG_LEVEL_MAP
-
- def test_log_level_map_values_are_integers(self):
- """Test all LOG_LEVEL_MAP values are integers."""
- for level_name, level_value in LOG_LEVEL_MAP.items():
- assert isinstance(level_value, int), f"Level {level_name} value {level_value} is not an integer"
-
-
-class TestEdgeCasesAndErrorConditions:
- """Test suite for edge cases and error conditions."""
-
- def test_configure_with_nonexistent_parent_directory(self):
- """Test configure() handles non-existent parent directories gracefully."""
- # Create a path with non-existent parent directory
- nonexistent_path = Path("/definitely/nonexistent/directory/logfile.log")
-
- # Should not raise an exception
- configure(log_file=nonexistent_path)
-
- config = structlog._config
- assert config is not None
-
- def test_configure_with_none_parameters(self):
- """Test configure() handles None parameters correctly."""
- configure(log_level=None, log_file=None, disable=None, log_env=None, log_format=None, log_rotation=None)
-
- config = structlog._config
- assert config is not None
-
- def test_configure_with_empty_string_parameters(self):
- """Test configure() handles empty string parameters correctly."""
- configure(log_level="", log_env="", log_format="", log_rotation="")
-
- config = structlog._config
- assert config is not None
-
- def test_multiple_configure_calls(self):
- """Test that multiple calls to configure() work correctly."""
- # First configuration
- configure(log_level="DEBUG")
- config1 = structlog._config
-
- # Second configuration should override the first
- configure(log_level="ERROR")
- config2 = structlog._config
-
- # Both should be valid but different
- assert config1 is not None
- assert config2 is not None
-
- def test_configure_creates_global_logger(self):
- """Test that configure() creates a global logger."""
- configure()
-
- # Should be able to get a logger after configuration
- logger = structlog.get_logger()
- assert logger is not None
-
- # Logger should have the expected methods
- assert hasattr(logger, "debug")
- assert hasattr(logger, "info")
- assert hasattr(logger, "warning")
- assert hasattr(logger, "error")
- assert hasattr(logger, "critical")
-
- def test_intercept_handler_integration_with_stdlib_logging(self):
- """Test InterceptHandler integration with standard library logging."""
- # Reset any existing handlers
- root_logger = logging.getLogger()
- original_handlers = root_logger.handlers[:]
-
- try:
- # Clear existing handlers
- root_logger.handlers.clear()
-
- # Add InterceptHandler
- handler = InterceptHandler()
- root_logger.addHandler(handler)
- root_logger.setLevel(logging.DEBUG)
-
- # Configure structlog to capture the intercepted logs
- with patch("structlog.get_logger") as mock_get_logger:
- mock_logger = Mock()
- mock_get_logger.return_value = mock_logger
-
- # Use stdlib logging
- test_logger = logging.getLogger("test.logger")
- test_logger.info("Test message")
-
- # Should have intercepted and forwarded to structlog
- mock_get_logger.assert_called_with("test.logger")
- mock_logger.info.assert_called_with("Test message")
-
- finally:
- # Restore original handlers
- root_logger.handlers[:] = original_handlers
-
-
-# Integration tests for SizedLogBuffer with write operations
-class TestSizedLogBufferIntegration:
- """Integration tests for SizedLogBuffer with various data formats."""
-
- def test_write_with_event_field(self):
- """Test write() with event field in message."""
- buffer = SizedLogBuffer()
- buffer.max = 5
-
- message = json.dumps({"event": "Test event message", "timestamp": "2021-07-01T12:00:00Z"})
-
- buffer.write(message)
- assert len(buffer) == 1
- # Check that event was extracted correctly
- entries = buffer.get_last_n(1)
- assert "Test event message" in entries.values()
-
- def test_write_with_msg_field_fallback(self):
- """Test write() falls back to msg field when event is not present."""
- buffer = SizedLogBuffer()
- buffer.max = 5
-
- message = json.dumps({"msg": "Test msg message", "timestamp": "2021-07-01T12:00:00Z"})
-
- buffer.write(message)
- assert len(buffer) == 1
- entries = buffer.get_last_n(1)
- assert "Test msg message" in entries.values()
-
- def test_write_with_numeric_timestamp(self):
- """Test write() with numeric timestamp."""
- buffer = SizedLogBuffer()
- buffer.max = 5
-
- timestamp = 1625097600.123
- message = json.dumps({"event": "Test message", "timestamp": timestamp})
-
- buffer.write(message)
- entries = buffer.get_last_n(1)
- # Should convert to epoch milliseconds
- expected_timestamp = int(timestamp * 1000)
- assert expected_timestamp in entries
-
- def test_write_with_iso_timestamp(self):
- """Test write() with ISO format timestamp."""
- buffer = SizedLogBuffer()
- buffer.max = 5
-
- message = json.dumps({"event": "Test message", "timestamp": "2021-07-01T12:00:00.123Z"})
-
- buffer.write(message)
- entries = buffer.get_last_n(1)
- assert len(entries) == 1
- # Should have parsed and converted timestamp
- timestamps = list(entries.keys())
- assert timestamps[0] > 0 # Should be a valid epoch timestamp
-
-
-class TestSpecificBugFixes:
- """Test suite for specific bugs that were discovered and fixed."""
-
- def test_disable_with_critical_plus_one_level(self):
- """Test the specific bug where disable=True with CRITICAL+1 caused KeyError.
-
- This was the original bug: when disable=True was used, the code tried
- to use logging.CRITICAL + 1 as a filter level, which would cause a
- KeyError in structlog's make_filtering_bound_logger function.
- """
- # This specific case should not raise a KeyError anymore
- try:
- configure(disable=True, log_level="CRITICAL")
- logger = structlog.get_logger()
-
- # The logger should be configured but effectively disabled
- assert logger is not None
-
- # Try to log something - it should not crash
- logger.info("This should not appear")
- logger.critical("This should also not appear")
-
- except KeyError as e:
- pytest.fail(f"KeyError raised during configure with disable=True: {e}")
- except Exception: # noqa: S110
- # Other exceptions might be OK, but KeyError specifically was the bug
- pass
-
- def test_log_buffer_thread_safety(self):
- """Test that log buffer operations are thread-safe."""
- import threading
- import time
-
- buffer = SizedLogBuffer(max_readers=5)
- buffer.max = 100
-
- results = []
- errors = []
-
- def write_logs():
- try:
- for i in range(10):
- message = json.dumps({"event": f"Thread message {i}", "timestamp": time.time() + i})
- buffer.write(message)
- time.sleep(0.001) # Small delay to simulate real usage
- results.append("write_success")
- except Exception as e:
- errors.append(f"Write error: {e}")
-
- def read_logs():
- try:
- for _i in range(5):
- entries = buffer.get_last_n(5)
- assert isinstance(entries, dict)
- time.sleep(0.002) # Small delay
- results.append("read_success")
- except Exception as e:
- errors.append(f"Read error: {e}")
-
- # Create multiple threads for reading and writing
- threads = []
- for _ in range(3):
- threads.append(threading.Thread(target=write_logs))
- threads.append(threading.Thread(target=read_logs))
-
- # Start all threads
- for thread in threads:
- thread.start()
-
- # Wait for all threads to complete
- for thread in threads:
- thread.join(timeout=5)
-
- # Check that no errors occurred
- assert len(errors) == 0, f"Thread safety errors: {errors}"
-
- # Check that all operations completed successfully
- assert len(results) == 6 # 3 write + 3 read operations
- assert all("success" in result for result in results)
-
- def test_log_rotation_parsing_edge_cases(self):
- """Test edge cases in log rotation parsing."""
- with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
- log_file_path = Path(tmp_file.name)
-
- test_cases = [
- ("100 MB", 100 * 1024 * 1024),
- ("50MB", 10 * 1024 * 1024), # Should fall back to default
- ("invalid format", 10 * 1024 * 1024), # Should fall back to default
- ("", 10 * 1024 * 1024), # Should use default
- ("0 MB", 10 * 1024 * 1024), # Should fall back to default
- ]
-
- for rotation_str, expected_bytes in test_cases:
- try:
- # Clear any existing handlers
- for handler in logging.root.handlers[:]:
- if isinstance(handler, logging.handlers.RotatingFileHandler):
- logging.root.removeHandler(handler)
-
- configure(log_file=log_file_path, log_rotation=rotation_str)
-
- rotating_handlers = [
- h for h in logging.root.handlers if isinstance(h, logging.handlers.RotatingFileHandler)
- ]
-
- if rotating_handlers:
- handler = rotating_handlers[0]
- assert handler.maxBytes == expected_bytes, f"Failed for rotation '{rotation_str}'"
-
- finally:
- # Cleanup for each test case
- if log_file_path.exists():
- with contextlib.suppress(builtins.BaseException):
- log_file_path.unlink()
- for handler in logging.root.handlers[:]:
- if isinstance(handler, logging.handlers.RotatingFileHandler):
- logging.root.removeHandler(handler)
-
-
-@pytest.fixture
-def sized_log_buffer():
- return SizedLogBuffer()
-
-
-def test_init_default():
- buffer = SizedLogBuffer()
- assert buffer.max == 0
- assert buffer._max_readers == 20
-
-
-def test_init_with_env_variable():
- with patch.dict(os.environ, {"LANGFLOW_LOG_RETRIEVER_BUFFER_SIZE": "100"}):
- buffer = SizedLogBuffer()
- assert buffer.max == 100
-
-
-def test_write(sized_log_buffer):
- message = json.dumps({"text": "Test log", "record": {"time": {"timestamp": 1625097600.1244334}}})
- sized_log_buffer.max = 1 # Set max size to 1 for testing
- sized_log_buffer.write(message)
- assert len(sized_log_buffer.buffer) == 1
- assert sized_log_buffer.buffer[0][0] == 1625097600124
- assert sized_log_buffer.buffer[0][1] == "Test log"
-
-
-def test_write_overflow(sized_log_buffer):
- sized_log_buffer.max = 2
- messages = [json.dumps({"text": f"Log {i}", "record": {"time": {"timestamp": 1625097600 + i}}}) for i in range(3)]
- for message in messages:
- sized_log_buffer.write(message)
-
- assert len(sized_log_buffer.buffer) == 2
- assert sized_log_buffer.buffer[0][0] == 1625097601000
- assert sized_log_buffer.buffer[1][0] == 1625097602000
-
-
-def test_len(sized_log_buffer):
- sized_log_buffer.max = 3
- messages = [json.dumps({"text": f"Log {i}", "record": {"time": {"timestamp": 1625097600 + i}}}) for i in range(3)]
- for message in messages:
- sized_log_buffer.write(message)
-
- assert len(sized_log_buffer) == 3
-
-
-def test_get_after_timestamp(sized_log_buffer):
- sized_log_buffer.max = 5
- messages = [json.dumps({"text": f"Log {i}", "record": {"time": {"timestamp": 1625097600 + i}}}) for i in range(5)]
- for message in messages:
- sized_log_buffer.write(message)
-
- result = sized_log_buffer.get_after_timestamp(1625097602000, lines=2)
- assert len(result) == 2
- assert 1625097603000 in result
- assert 1625097602000 in result
-
-
-def test_get_before_timestamp(sized_log_buffer):
- sized_log_buffer.max = 5
- messages = [json.dumps({"text": f"Log {i}", "record": {"time": {"timestamp": 1625097600 + i}}}) for i in range(5)]
- for message in messages:
- sized_log_buffer.write(message)
-
- result = sized_log_buffer.get_before_timestamp(1625097603000, lines=2)
- assert len(result) == 2
- assert 1625097601000 in result
- assert 1625097602000 in result
-
-
-def test_get_last_n(sized_log_buffer):
- sized_log_buffer.max = 5
- messages = [json.dumps({"text": f"Log {i}", "record": {"time": {"timestamp": 1625097600 + i}}}) for i in range(5)]
- for message in messages:
- sized_log_buffer.write(message)
-
- result = sized_log_buffer.get_last_n(3)
- assert len(result) == 3
- assert 1625097602000 in result
- assert 1625097603000 in result
- assert 1625097604000 in result
-
-
-def test_enabled(sized_log_buffer):
- assert not sized_log_buffer.enabled()
- sized_log_buffer.max = 1
- assert sized_log_buffer.enabled()
-
-
-def test_max_size(sized_log_buffer):
- assert sized_log_buffer.max_size() == 0
- sized_log_buffer.max = 100
- assert sized_log_buffer.max_size() == 100
diff --git a/src/backend/tests/unit/test_login.py b/src/backend/tests/unit/test_login.py
deleted file mode 100644
index e22016267f76..000000000000
--- a/src/backend/tests/unit/test_login.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import pytest
-from langflow.services.auth.utils import get_password_hash
-from langflow.services.database.models.user import User
-from langflow.services.deps import session_scope
-from sqlalchemy.exc import IntegrityError
-
-
-@pytest.fixture
-def test_user():
- return User(
- username="testuser",
- password=get_password_hash("testpassword"), # Assuming password needs to be hashed
- is_active=True,
- is_superuser=False,
- )
-
-
-async def test_login_successful(client, test_user):
- # Adding the test user to the database
- try:
- async with session_scope() as session:
- session.add(test_user)
- await session.commit()
- except IntegrityError:
- pass
-
- response = await client.post("api/v1/login", data={"username": "testuser", "password": "testpassword"})
- assert response.status_code == 200
- assert "access_token" in response.json()
-
-
-async def test_login_unsuccessful_wrong_username(client):
- response = await client.post("api/v1/login", data={"username": "wrongusername", "password": "testpassword"})
- assert response.status_code == 401
- assert response.json()["detail"] == "Incorrect username or password"
-
-
-async def test_login_unsuccessful_wrong_password(client, test_user, async_session):
- # Adding the test user to the database
- async_session.add(test_user)
- await async_session.commit()
-
- response = await client.post("api/v1/login", data={"username": "testuser", "password": "wrongpassword"})
- assert response.status_code == 401
- assert response.json()["detail"] == "Incorrect username or password"
diff --git a/src/backend/tests/unit/test_messages_endpoints.py b/src/backend/tests/unit/test_messages_endpoints.py
deleted file mode 100644
index 9b72c90ceed2..000000000000
--- a/src/backend/tests/unit/test_messages_endpoints.py
+++ /dev/null
@@ -1,237 +0,0 @@
-from datetime import datetime, timezone
-from urllib.parse import quote
-from uuid import UUID
-
-import pytest
-from httpx import AsyncClient
-from langflow.memory import aadd_messagetables
-
-# Assuming you have these imports available
-from langflow.services.database.models.message import MessageCreate, MessageRead, MessageUpdate
-from langflow.services.database.models.message.model import MessageTable
-from langflow.services.deps import session_scope
-
-
-@pytest.fixture
-async def created_message():
- async with session_scope() as session:
- message = MessageCreate(text="Test message", sender="User", sender_name="User", session_id="session_id")
- messagetable = MessageTable.model_validate(message, from_attributes=True)
- messagetables = await aadd_messagetables([messagetable], session)
- return MessageRead.model_validate(messagetables[0], from_attributes=True)
-
-
-@pytest.fixture
-async def created_messages(session): # noqa: ARG001
- async with session_scope() as _session:
- messages = [
- MessageCreate(text="Test message 1", sender="User", sender_name="User", session_id="session_id2"),
- MessageCreate(text="Test message 2", sender="User", sender_name="User", session_id="session_id2"),
- MessageCreate(text="Test message 3", sender="AI", sender_name="AI", session_id="session_id2"),
- ]
- messagetables = [MessageTable.model_validate(message, from_attributes=True) for message in messages]
- return await aadd_messagetables(messagetables, _session)
-
-
-@pytest.fixture
-async def messages_with_datetime_session_id(session): # noqa: ARG001
- """Create messages with datetime-like session IDs that contain characters requiring URL encoding."""
- datetime_session_id = "2024-01-15 10:30:45 UTC" # Contains spaces and colons
- async with session_scope() as _session:
- messages = [
- MessageCreate(text="Datetime message 1", sender="User", sender_name="User", session_id=datetime_session_id),
- MessageCreate(text="Datetime message 2", sender="AI", sender_name="AI", session_id=datetime_session_id),
- ]
- messagetables = [MessageTable.model_validate(message, from_attributes=True) for message in messages]
- created_messages = await aadd_messagetables(messagetables, _session)
- return created_messages, datetime_session_id
-
-
-@pytest.mark.api_key_required
-async def test_delete_messages(client: AsyncClient, created_messages, logged_in_headers):
- response = await client.request(
- "DELETE", "api/v1/monitor/messages", json=[str(msg.id) for msg in created_messages], headers=logged_in_headers
- )
- assert response.status_code == 204, response.text
- assert response.reason_phrase == "No Content"
-
-
-@pytest.mark.api_key_required
-async def test_update_message(client: AsyncClient, logged_in_headers, created_message):
- message_id = created_message.id
- message_update = MessageUpdate(text="Updated content")
- response = await client.put(
- f"api/v1/monitor/messages/{message_id}", json=message_update.model_dump(), headers=logged_in_headers
- )
- assert response.status_code == 200, response.text
- updated_message = MessageRead(**response.json())
- assert updated_message.text == "Updated content"
-
-
-@pytest.mark.api_key_required
-async def test_update_message_not_found(client: AsyncClient, logged_in_headers):
- non_existent_id = UUID("00000000-0000-0000-0000-000000000000")
- message_update = MessageUpdate(text="Updated content")
- response = await client.put(
- f"api/v1/monitor/messages/{non_existent_id}", json=message_update.model_dump(), headers=logged_in_headers
- )
- assert response.status_code == 404, response.text
- assert response.json()["detail"] == "Message not found"
-
-
-@pytest.mark.api_key_required
-async def test_delete_messages_session(client: AsyncClient, created_messages, logged_in_headers):
- session_id = "session_id2"
- response = await client.delete(f"api/v1/monitor/messages/session/{session_id}", headers=logged_in_headers)
- assert response.status_code == 204
- assert response.reason_phrase == "No Content"
-
- assert len(created_messages) == 3
- response = await client.get("api/v1/monitor/messages", headers=logged_in_headers)
- assert response.status_code == 200
- assert len(response.json()) == 0
-
-
-# Successfully update session ID for all messages with the old session ID
-@pytest.mark.usefixtures("session")
-async def test_successfully_update_session_id(client, logged_in_headers, created_messages):
- old_session_id = "session_id2"
- new_session_id = "new_session_id"
-
- response = await client.patch(
- f"api/v1/monitor/messages/session/{old_session_id}",
- params={"new_session_id": new_session_id},
- headers=logged_in_headers,
- )
-
- assert response.status_code == 200, response.text
- updated_messages = response.json()
- assert len(updated_messages) == len(created_messages)
- for message in updated_messages:
- assert message["session_id"] == new_session_id
-
- response = await client.get(
- "api/v1/monitor/messages", headers=logged_in_headers, params={"session_id": new_session_id}
- )
- assert response.status_code == 200
- assert len(response.json()) == len(created_messages)
- messages = response.json()
- for message in messages:
- assert message["session_id"] == new_session_id
- response_timestamp = message["timestamp"]
- timestamp = datetime.strptime(response_timestamp, "%Y-%m-%d %H:%M:%S %Z").replace(tzinfo=timezone.utc)
- timestamp_str = timestamp.strftime("%Y-%m-%d %H:%M:%S %Z")
- assert timestamp_str == response_timestamp
-
- # Check if the messages ordered by timestamp are in the correct order
- # User, User, AI
- assert messages[0]["sender"] == "User"
- assert messages[1]["sender"] == "User"
- assert messages[2]["sender"] == "AI"
-
-
-# No messages found with the given session ID
-@pytest.mark.usefixtures("session")
-async def test_no_messages_found_with_given_session_id(client, logged_in_headers):
- old_session_id = "non_existent_session_id"
- new_session_id = "new_session_id"
-
- response = await client.patch(
- f"/messages/session/{old_session_id}", params={"new_session_id": new_session_id}, headers=logged_in_headers
- )
-
- assert response.status_code == 404, response.text
- assert response.json()["detail"] == "Not Found"
-
-
-# Test for URL-encoded datetime session ID
-@pytest.mark.api_key_required
-async def test_get_messages_with_url_encoded_datetime_session_id(
- client: AsyncClient, messages_with_datetime_session_id, logged_in_headers
-):
- """Test that URL-encoded datetime session IDs are properly decoded and matched."""
- created_messages, datetime_session_id = messages_with_datetime_session_id
-
- # URL encode the datetime session ID (spaces become %20, colons become %3A)
- encoded_session_id = quote(datetime_session_id)
-
- # Test with URL-encoded session ID
- response = await client.get(
- "api/v1/monitor/messages", params={"session_id": encoded_session_id}, headers=logged_in_headers
- )
-
- assert response.status_code == 200, response.text
- messages = response.json()
- assert len(messages) == 2
-
- # Verify all messages have the correct (decoded) session ID
- for message in messages:
- assert message["session_id"] == datetime_session_id
-
- # Verify message content
- assert messages[0]["text"] == "Datetime message 1"
- assert messages[1]["text"] == "Datetime message 2"
-
-
-@pytest.mark.api_key_required
-async def test_get_messages_with_non_encoded_datetime_session_id(
- client: AsyncClient, messages_with_datetime_session_id, logged_in_headers
-):
- """Test that non-URL-encoded datetime session IDs also work correctly."""
- created_messages, datetime_session_id = messages_with_datetime_session_id
-
- # Test with non-encoded session ID (should still work due to unquote being safe for non-encoded strings)
- response = await client.get(
- "api/v1/monitor/messages", params={"session_id": datetime_session_id}, headers=logged_in_headers
- )
-
- assert response.status_code == 200, response.text
- messages = response.json()
- assert len(messages) == 2
-
- # Verify all messages have the correct session ID
- for message in messages:
- assert message["session_id"] == datetime_session_id
-
-
-@pytest.mark.api_key_required
-async def test_get_messages_with_various_encoded_characters(client: AsyncClient, logged_in_headers):
- """Test various URL-encoded characters in session IDs."""
- # Create a session ID with various special characters
- special_session_id = "test+session:2024@domain.com"
-
- async with session_scope() as session:
- message = MessageCreate(
- text="Special chars message", sender="User", sender_name="User", session_id=special_session_id
- )
- messagetable = MessageTable.model_validate(message, from_attributes=True)
- await aadd_messagetables([messagetable], session)
-
- # URL encode the session ID
- encoded_session_id = quote(special_session_id)
-
- # Test with URL-encoded session ID
- response = await client.get(
- "api/v1/monitor/messages", params={"session_id": encoded_session_id}, headers=logged_in_headers
- )
-
- assert response.status_code == 200, response.text
- messages = response.json()
- assert len(messages) == 1
- assert messages[0]["session_id"] == special_session_id
- assert messages[0]["text"] == "Special chars message"
-
-
-@pytest.mark.api_key_required
-async def test_get_messages_empty_result_with_encoded_nonexistent_session(client: AsyncClient, logged_in_headers):
- """Test that URL-encoded non-existent session IDs return empty results."""
- nonexistent_session_id = "2024-12-31 23:59:59 UTC"
- encoded_session_id = quote(nonexistent_session_id)
-
- response = await client.get(
- "api/v1/monitor/messages", params={"session_id": encoded_session_id}, headers=logged_in_headers
- )
-
- assert response.status_code == 200, response.text
- messages = response.json()
- assert len(messages) == 0
diff --git a/src/backend/tests/unit/test_process.py b/src/backend/tests/unit/test_process.py
deleted file mode 100644
index 6037f50fb0d4..000000000000
--- a/src/backend/tests/unit/test_process.py
+++ /dev/null
@@ -1,378 +0,0 @@
-from langflow.processing.process import process_tweaks
-from langflow.services.deps import get_session_service
-from langflow.services.utils import register_all_service_factories
-
-
-def test_no_tweaks():
- graph_data = {
- "data": {
- "nodes": [
- {
- "id": "node1",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 1},
- "param2": {"value": 2},
- }
- }
- },
- },
- {
- "id": "node2",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 3},
- "param2": {"value": 4},
- }
- }
- },
- },
- ]
- }
- }
- tweaks = {}
- result = process_tweaks(graph_data, tweaks)
- assert result == graph_data
-
-
-def test_single_tweak():
- graph_data = {
- "data": {
- "nodes": [
- {
- "id": "node1",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 1, "type": "int"},
- "param2": {"value": 2, "type": "int"},
- }
- }
- },
- },
- {
- "id": "node2",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 3, "type": "int"},
- "param2": {"value": 4, "type": "int"},
- }
- }
- },
- },
- ]
- }
- }
- tweaks = {"node1": {"param1": 5}}
- expected_result = {
- "data": {
- "nodes": [
- {
- "id": "node1",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 5, "type": "int"},
- "param2": {"value": 2, "type": "int"},
- }
- }
- },
- },
- {
- "id": "node2",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 3, "type": "int"},
- "param2": {"value": 4, "type": "int"},
- }
- }
- },
- },
- ]
- }
- }
- result = process_tweaks(graph_data, tweaks)
- assert result == expected_result
-
-
-def test_multiple_tweaks():
- graph_data = {
- "data": {
- "nodes": [
- {
- "id": "node1",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 1, "type": "int"},
- "param2": {"value": 2, "type": "int"},
- }
- }
- },
- },
- {
- "id": "node2",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 3, "type": "int"},
- "param2": {"value": 4, "type": "int"},
- }
- }
- },
- },
- ]
- }
- }
- tweaks = {
- "node1": {"param1": 5, "param2": 6},
- "node2": {"param1": 7},
- }
- expected_result = {
- "data": {
- "nodes": [
- {
- "id": "node1",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 5, "type": "int"},
- "param2": {"value": 6, "type": "int"},
- }
- }
- },
- },
- {
- "id": "node2",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 7, "type": "int"},
- "param2": {"value": 4, "type": "int"},
- }
- }
- },
- },
- ]
- }
- }
- result = process_tweaks(graph_data, tweaks)
- assert result == expected_result
-
-
-# Test twekas that just pass the param and value but no node id.
-# This is a new feature that was added to the process_tweaks function
-def test_tweak_no_node_id():
- graph_data = {
- "data": {
- "nodes": [
- {
- "id": "node1",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 1, "type": "int"},
- "param2": {"value": 2, "type": "int"},
- }
- }
- },
- },
- {
- "id": "node2",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 3, "type": "int"},
- "param2": {"value": 4, "type": "int"},
- }
- }
- },
- },
- ]
- }
- }
- tweaks = {"param1": 5}
- expected_result = {
- "data": {
- "nodes": [
- {
- "id": "node1",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 5, "type": "int"},
- "param2": {"value": 2, "type": "int"},
- }
- }
- },
- },
- {
- "id": "node2",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 5, "type": "int"},
- "param2": {"value": 4, "type": "int"},
- }
- }
- },
- },
- ]
- }
- }
- result = process_tweaks(graph_data, tweaks)
- assert result == expected_result
-
-
-def test_tweak_not_in_template():
- graph_data = {
- "data": {
- "nodes": [
- {
- "id": "node1",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 1, "type": "int"},
- "param2": {"value": 2, "type": "int"},
- }
- }
- },
- },
- {
- "id": "node2",
- "data": {
- "node": {
- "template": {
- "param1": {"value": 3, "type": "int"},
- "param2": {"value": 4, "type": "int"},
- }
- }
- },
- },
- ]
- }
- }
- tweaks = {"node1": {"param3": 5}}
- result = process_tweaks(graph_data, tweaks)
- assert result == graph_data
-
-
-async def test_load_langchain_object_with_cached_session(basic_graph_data):
- # Provide a non-existent session_id
- register_all_service_factories()
- session_service = get_session_service()
- session_id1 = "non-existent-session-id"
- graph1, artifacts1 = await session_service.load_session(session_id1, basic_graph_data)
- # Use the new session_id to get the langchain_object again
- graph2, artifacts2 = await session_service.load_session(session_id1, basic_graph_data)
-
- assert graph1 == graph2
- assert artifacts1 == artifacts2
-
-
-# TODO: Update basic graph data
-# async def test_load_langchain_object_with_no_cached_session(client, basic_graph_data):
-# # Provide a non-existent session_id
-# session_service = get_session_service()
-# session_id1 = "non-existent-session-id"
-# session_id = session_service.build_key(session_id1, basic_graph_data)
-# graph1, artifacts1 = await session_service.load_session(
-# session_id, data_graph=basic_graph_data, flow_id="flow_id"
-# )
-# # Clear the cache
-# await session_service.clear_session(session_id)
-# # Use the new session_id to get the graph again
-# graph2, artifacts2 = await session_service.load_session(
-# session_id, data_graph=basic_graph_data, flow_id="flow_id"
-# )
-#
-# # Since the cache was cleared, objects should be different
-# assert id(graph1) != id(graph2)
-
-
-# async def test_load_langchain_object_without_session_id(client, basic_graph_data):
-# # Provide a non-existent session_id
-# session_service = get_session_service()
-# session_id1 = None
-# graph1, artifacts1 = await session_service.load_session(
-# session_id1, data_graph=basic_graph_data, flow_id="flow_id"
-# )
-# # Use the new session_id to get the langchain_object again
-# graph2, artifacts2 = await session_service.load_session(
-# session_id1, data_graph=basic_graph_data, flow_id="flow_id"
-# )
-#
-# assert graph1 == graph2
-
-
-def test_apply_tweaks_code_override_prevention():
- """Test that code tweaks are prevented and logged as warning."""
- from unittest.mock import patch
-
- from langflow.processing.process import apply_tweaks
-
- # Create a simple node with template including code field
- node = {
- "id": "test_node",
- "data": {
- "node": {
- "template": {
- "code": {"value": "original_code", "type": "code"},
- "param1": {"value": "original_value", "type": "str"},
- }
- }
- },
- }
-
- # Try to tweak both code and a normal parameter
- node_tweaks = {"code": "malicious_code_injection", "param1": "new_value"}
-
- # Capture log output
- with patch("langflow.processing.process.logger") as mock_logger:
- apply_tweaks(node, node_tweaks)
-
- # Verify warning was logged for code override attempt
- mock_logger.warning.assert_called_once_with("Security: Code field cannot be overridden via tweaks.")
-
- # Verify code field was NOT modified
- assert node["data"]["node"]["template"]["code"]["value"] == "original_code"
-
- # Verify other parameter WAS modified
- assert node["data"]["node"]["template"]["param1"]["value"] == "new_value"
-
-
-def test_apply_tweaks_code_only_prevention():
- """Test that only code tweaks are prevented when trying to override code alone."""
- from unittest.mock import patch
-
- from langflow.processing.process import apply_tweaks
-
- # Create a simple node with template including code field
- node = {
- "id": "test_node",
- "data": {
- "node": {
- "template": {
- "code": {"value": "original_code", "type": "code"},
- }
- }
- },
- }
-
- # Try to tweak only the code field
- node_tweaks = {"code": "attempted_code_injection"}
-
- # Capture log output
- with patch("langflow.processing.process.logger") as mock_logger:
- apply_tweaks(node, node_tweaks)
-
- # Verify warning was logged
- mock_logger.warning.assert_called_once_with("Security: Code field cannot be overridden via tweaks.")
-
- # Verify code field was NOT modified
- assert node["data"]["node"]["template"]["code"]["value"] == "original_code"
diff --git a/src/backend/tests/unit/test_session_endpoint.py b/src/backend/tests/unit/test_session_endpoint.py
deleted file mode 100644
index ca20a2b09466..000000000000
--- a/src/backend/tests/unit/test_session_endpoint.py
+++ /dev/null
@@ -1,142 +0,0 @@
-from uuid import uuid4
-
-import pytest
-from httpx import AsyncClient
-from langflow.memory import aadd_messagetables
-from langflow.services.database.models.message.model import MessageTable
-from langflow.services.deps import session_scope
-
-
-@pytest.fixture
-async def messages_with_flow_ids(session): # noqa: ARG001
- """Create messages with different session_ids and flow_ids for testing sessions endpoint."""
- async with session_scope() as _session:
- flow_id_1 = uuid4()
- flow_id_2 = uuid4()
-
- # Create MessageTable objects directly since MessageCreate doesn't have flow_id field
- messagetables = [
- MessageTable(
- text="Message 1", sender="User", sender_name="User", session_id="session_A", flow_id=flow_id_1
- ),
- MessageTable(text="Message 2", sender="AI", sender_name="AI", session_id="session_A", flow_id=flow_id_1),
- MessageTable(
- text="Message 3", sender="User", sender_name="User", session_id="session_B", flow_id=flow_id_1
- ),
- MessageTable(
- text="Message 4", sender="User", sender_name="User", session_id="session_C", flow_id=flow_id_2
- ),
- MessageTable(text="Message 5", sender="AI", sender_name="AI", session_id="session_D", flow_id=flow_id_2),
- MessageTable(
- text="Message 6",
- sender="User",
- sender_name="User",
- session_id="session_E",
- flow_id=None, # No flow_id
- ),
- ]
- created_messages = await aadd_messagetables(messagetables, _session)
-
- return {
- "messages": created_messages,
- "flow_id_1": flow_id_1,
- "flow_id_2": flow_id_2,
- "expected_sessions_flow_1": {"session_A", "session_B"},
- "expected_sessions_flow_2": {"session_C", "session_D"},
- "expected_all_sessions": {"session_A", "session_B", "session_C", "session_D", "session_E"},
- }
-
-
-# Tests for /sessions endpoint
-@pytest.mark.api_key_required
-async def test_get_sessions_all(client: AsyncClient, logged_in_headers, messages_with_flow_ids):
- """Test getting all sessions without any filter."""
- response = await client.get("api/v1/monitor/messages/sessions", headers=logged_in_headers)
-
- assert response.status_code == 200, response.text
- sessions = response.json()
- assert isinstance(sessions, list)
-
- # Convert to set for easier comparison since order doesn't matter
- returned_sessions = set(sessions)
- expected_sessions = messages_with_flow_ids["expected_all_sessions"]
-
- assert returned_sessions == expected_sessions
- assert len(sessions) == len(expected_sessions)
-
-
-@pytest.mark.api_key_required
-async def test_get_sessions_with_flow_id_filter(client: AsyncClient, logged_in_headers, messages_with_flow_ids):
- """Test getting sessions filtered by flow_id."""
- flow_id_1 = messages_with_flow_ids["flow_id_1"]
-
- response = await client.get(
- "api/v1/monitor/messages/sessions", params={"flow_id": str(flow_id_1)}, headers=logged_in_headers
- )
-
- assert response.status_code == 200, response.text
- sessions = response.json()
- assert isinstance(sessions, list)
-
- returned_sessions = set(sessions)
- expected_sessions = messages_with_flow_ids["expected_sessions_flow_1"]
-
- assert returned_sessions == expected_sessions
- assert len(sessions) == len(expected_sessions)
-
-
-@pytest.mark.api_key_required
-async def test_get_sessions_with_different_flow_id(client: AsyncClient, logged_in_headers, messages_with_flow_ids):
- """Test getting sessions filtered by a different flow_id."""
- flow_id_2 = messages_with_flow_ids["flow_id_2"]
-
- response = await client.get(
- "api/v1/monitor/messages/sessions", params={"flow_id": str(flow_id_2)}, headers=logged_in_headers
- )
-
- assert response.status_code == 200, response.text
- sessions = response.json()
- assert isinstance(sessions, list)
-
- returned_sessions = set(sessions)
- expected_sessions = messages_with_flow_ids["expected_sessions_flow_2"]
-
- assert returned_sessions == expected_sessions
- assert len(sessions) == len(expected_sessions)
-
-
-@pytest.mark.api_key_required
-async def test_get_sessions_with_non_existent_flow_id(client: AsyncClient, logged_in_headers):
- """Test getting sessions with a non-existent flow_id returns empty list."""
- non_existent_flow_id = uuid4()
-
- response = await client.get(
- "api/v1/monitor/messages/sessions", params={"flow_id": str(non_existent_flow_id)}, headers=logged_in_headers
- )
-
- assert response.status_code == 200, response.text
- sessions = response.json()
- assert isinstance(sessions, list)
- assert len(sessions) == 0
-
-
-@pytest.mark.api_key_required
-async def test_get_sessions_empty_database(client: AsyncClient, logged_in_headers):
- """Test getting sessions when no messages exist in database."""
- response = await client.get("api/v1/monitor/messages/sessions", headers=logged_in_headers)
-
- assert response.status_code == 200, response.text
- sessions = response.json()
- assert isinstance(sessions, list)
- assert len(sessions) == 0
-
-
-@pytest.mark.api_key_required
-async def test_get_sessions_invalid_flow_id_format(client: AsyncClient, logged_in_headers):
- """Test getting sessions with invalid flow_id format returns 422."""
- response = await client.get(
- "api/v1/monitor/messages/sessions", params={"flow_id": "invalid-uuid"}, headers=logged_in_headers
- )
-
- assert response.status_code == 422, response.text
- assert "detail" in response.json()
diff --git a/src/backend/tests/unit/test_setup_superuser.py b/src/backend/tests/unit/test_setup_superuser.py
deleted file mode 100644
index 04c9df1126ea..000000000000
--- a/src/backend/tests/unit/test_setup_superuser.py
+++ /dev/null
@@ -1,246 +0,0 @@
-import asyncio
-from unittest.mock import AsyncMock, MagicMock, patch
-
-import pytest
-from langflow.services.auth.utils import create_super_user
-from langflow.services.database.models.user.model import User
-from langflow.services.utils import teardown_superuser
-from sqlalchemy.exc import IntegrityError
-
-from lfx.services.settings.constants import (
- DEFAULT_SUPERUSER,
- DEFAULT_SUPERUSER_PASSWORD,
-)
-
-# @patch("langflow.services.deps.get_session")
-# @patch("langflow.services.utils.create_super_user")
-# @patch("langflow.services.deps.get_settings_service")
-# # @patch("langflow.services.utils.verify_password")
-# def test_setup_superuser(
-# mock_get_session, mock_create_super_user, mock_get_settings_service
-# ):
-# # Test when AUTO_LOGIN is True
-# calls = []
-# mock_settings_service = Mock()
-# mock_settings_service.auth_settings.AUTO_LOGIN = True
-# mock_settings_service.auth_settings.SUPERUSER = DEFAULT_SUPERUSER
-# mock_settings_service.auth_settings.SUPERUSER_PASSWORD = DEFAULT_SUPERUSER_PASSWORD
-# mock_get_settings_service.return_value = mock_settings_service
-# mock_session = Mock()
-# mock_session.query.return_value.filter.return_value.first.return_value = (
-# mock_session
-# )
-# # return value of get_session is a generator
-# mock_get_session.return_value = iter([mock_session, mock_session, mock_session])
-# setup_superuser(mock_settings_service, mock_session)
-# mock_session.query.assert_called_once_with(User)
-# # Set return value of filter to be None
-# mock_session.query.return_value.filter.return_value.first.return_value = None
-# actual_expr = mock_session.query.return_value.filter.call_args[0][0]
-# expected_expr = User.username == DEFAULT_SUPERUSER
-
-# assert str(actual_expr) == str(expected_expr)
-# create_call = call(
-# db=mock_session, username=DEFAULT_SUPERUSER, password=DEFAULT_SUPERUSER_PASSWORD
-# )
-# calls.append(create_call)
-# # mock_create_super_user.assert_has_calls(calls)
-# assert 1 == mock_create_super_user.call_count
-
-# def reset_mock_credentials():
-# mock_settings_service.auth_settings.SUPERUSER = DEFAULT_SUPERUSER
-# mock_settings_service.auth_settings.SUPERUSER_PASSWORD = (
-# DEFAULT_SUPERUSER_PASSWORD
-# )
-
-# ADMIN_USER_NAME = "admin_user"
-# # Test when username and password are default
-# mock_settings_service.auth_settings = Mock()
-# mock_settings_service.auth_settings.AUTO_LOGIN = False
-# mock_settings_service.auth_settings.SUPERUSER = ADMIN_USER_NAME
-# mock_settings_service.auth_settings.SUPERUSER_PASSWORD = "password"
-# mock_settings_service.auth_settings.reset_credentials = Mock(
-# side_effect=reset_mock_credentials
-# )
-
-# mock_get_settings_service.return_value = mock_settings_service
-
-# setup_superuser(mock_settings_service, mock_session)
-# mock_session.query.assert_called_with(User)
-# actual_expr = mock_session.query.return_value.filter.call_args[0][0]
-# expected_expr = User.username == ADMIN_USER_NAME
-
-# assert str(actual_expr) == str(expected_expr)
-# create_call = call(db=mock_session, username=ADMIN_USER_NAME, password="password")
-# calls.append(create_call)
-# # mock_create_super_user.assert_has_calls(calls)
-# assert 2 == mock_create_super_user.call_count
-# # Test that superuser credentials are reset
-# mock_settings_service.auth_settings.reset_credentials.assert_called_once()
-# assert mock_settings_service.auth_settings.SUPERUSER != ADMIN_USER_NAME
-# assert mock_settings_service.auth_settings.SUPERUSER_PASSWORD != "password"
-
-# # Test when superuser already exists
-# mock_settings_service.auth_settings.AUTO_LOGIN = False
-# mock_settings_service.auth_settings.SUPERUSER = ADMIN_USER_NAME
-# mock_settings_service.auth_settings.SUPERUSER_PASSWORD = "password"
-# mock_user = Mock()
-# mock_user.is_superuser = True
-# mock_session.query.return_value.filter.return_value.first.return_value = mock_user
-# setup_superuser(mock_settings_service, mock_session)
-# mock_session.query.assert_called_with(User)
-# actual_expr = mock_session.query.return_value.filter.call_args[0][0]
-# expected_expr = User.username == ADMIN_USER_NAME
-
-# assert str(actual_expr) == str(expected_expr)
-
-
-@patch("langflow.services.deps.get_settings_service")
-@patch("langflow.services.deps.get_session")
-async def test_teardown_superuser_default_superuser(mock_get_session, mock_get_settings_service):
- mock_settings_service = MagicMock()
- mock_settings_service.auth_settings.AUTO_LOGIN = True
- mock_settings_service.auth_settings.SUPERUSER = DEFAULT_SUPERUSER
- mock_settings_service.auth_settings.SUPERUSER_PASSWORD = DEFAULT_SUPERUSER_PASSWORD
- mock_get_settings_service.return_value = mock_settings_service
-
- mock_session = MagicMock()
- mock_user = MagicMock()
- mock_user.is_superuser = True
- mock_session.query.return_value.filter.return_value.first.return_value = mock_user
- mock_get_session.return_value = iter([mock_session])
-
- await teardown_superuser(mock_settings_service, mock_session)
-
- mock_session.query.assert_not_called()
-
-
-async def test_teardown_superuser_no_default_superuser():
- admin_user_name = "admin_user"
- mock_settings_service = MagicMock()
- mock_settings_service.auth_settings.AUTO_LOGIN = False
- mock_settings_service.auth_settings.SUPERUSER = admin_user_name
- mock_settings_service.auth_settings.SUPERUSER_PASSWORD = "password" # noqa: S105
-
- mock_session = AsyncMock(return_value=asyncio.Future())
- mock_user = MagicMock()
- mock_user.is_superuser = False
- mock_user.last_login_at = None
-
- mock_result = MagicMock()
- mock_result.first.return_value = mock_user
- mock_session.exec.return_value = mock_result
-
- await teardown_superuser(mock_settings_service, mock_session)
-
- mock_session.delete.assert_not_awaited()
- mock_session.commit.assert_not_awaited()
-
-
-@pytest.mark.asyncio
-async def test_create_super_user_race_condition():
- """Test create_super_user handles race conditions gracefully when multiple workers try to create the same user."""
- # Mock the database session
- mock_session = AsyncMock()
-
- # Create a mock user that will be "created" by the first worker
- mock_user = MagicMock(spec=User)
- mock_user.username = "testuser"
- mock_user.is_superuser = True
-
- # Mock get_password_hash to return a fixed value
- mock_get_password_hash = MagicMock(return_value="hashed_password")
-
- # Set up the race condition scenario:
- # 1. First call to get_user_by_username returns None (user doesn't exist)
- # 2. commit() raises IntegrityError (simulating race condition)
- # 3. After rollback, second call to get_user_by_username returns the existing user
- mock_get_user_by_username = AsyncMock()
- mock_get_user_by_username.side_effect = [None, mock_user] # None first, then existing user
-
- mock_session.commit.side_effect = IntegrityError("statement", "params", Exception("orig"))
- with (
- patch("langflow.services.auth.utils.get_user_by_username", mock_get_user_by_username),
- patch("langflow.services.auth.utils.get_password_hash", mock_get_password_hash),
- patch("langflow.services.database.models.user.model.User") as mock_user_class,
- ):
- # Configure the User class mock to return our mock_user when instantiated
- mock_user_class.return_value = mock_user
-
- result = await create_super_user("testuser", "password", mock_session)
-
- # Verify that the function handled the race condition correctly
- assert result == mock_user
- assert mock_session.add.call_count == 1 # User was added to session
- assert mock_session.commit.call_count == 1 # Commit was attempted once (and failed)
- assert mock_session.rollback.call_count == 1 # Session was rolled back after IntegrityError
- assert mock_get_user_by_username.call_count == 2 # Called twice: initial check + after rollback
-
-
-@pytest.mark.asyncio
-async def test_create_super_user_race_condition_no_user_found():
- """Test that create_super_user re-raises exception if no user is found after IntegrityError."""
- # Mock the database session
- mock_session = AsyncMock()
-
- # Mock get_user_by_username to always return None (even after rollback)
- mock_get_user_by_username = AsyncMock()
- mock_get_user_by_username.side_effect = [None, None] # None for initial check and after rollback
-
- # Mock other dependencies
- mock_get_password_hash = MagicMock(return_value="hashed_password")
- mock_user = MagicMock(spec=User)
-
- # Set up scenario where IntegrityError occurs but no user is found afterward
- integrity_error = IntegrityError("statement", "params", Exception("orig"))
- mock_session.commit.side_effect = integrity_error
-
- with (
- patch("langflow.services.auth.utils.get_user_by_username", mock_get_user_by_username),
- patch("langflow.services.auth.utils.get_password_hash", mock_get_password_hash),
- patch("langflow.services.database.models.user.model.User", return_value=mock_user),
- pytest.raises(IntegrityError),
- ):
- await create_super_user("testuser", "password", mock_session)
-
- # Verify rollback was called but exception was re-raised
- assert mock_session.rollback.call_count == 1
- assert mock_get_user_by_username.call_count == 2 # Initial + after rollback
-
-
-@pytest.mark.asyncio
-async def test_create_super_user_concurrent_workers():
- """Test multiple concurrent calls to create_super_user with the same username."""
- # This would require a real database to properly test, but we can simulate
- # the behavior with mocks to verify the logic works correctly
-
- mock_session1 = AsyncMock()
- mock_session2 = AsyncMock()
-
- # Create mock users
- mock_user = MagicMock(spec=User)
- mock_user.username = "admin"
- mock_user.is_superuser = True
-
- mock_get_user_by_username = AsyncMock()
-
- # Worker 1 succeeds, Worker 2 gets IntegrityError then finds existing user
- mock_session1.commit.return_value = None # Success
- mock_session2.commit.side_effect = IntegrityError("statement", "params", Exception("orig")) # Race condition
-
- # get_user_by_username returns None initially, then the created user for worker 2
- mock_get_user_by_username.side_effect = [None, None, mock_user]
-
- with patch("langflow.services.auth.utils.get_user_by_username", mock_get_user_by_username):
- # Simulate concurrent execution using asyncio.gather
- result1, result2 = await asyncio.gather(
- create_super_user("admin", "password", mock_session1),
- create_super_user("admin", "password", mock_session2),
- )
-
- # Both workers should end up with a user (worker 1 creates, worker 2 finds existing)
- assert result1 is not None
- assert result2 == mock_user
-
- # Worker 2 should have rolled back and fetched existing user
- assert mock_session2.rollback.call_count == 1
diff --git a/src/backend/tests/unit/test_simple_agent_in_lfx_run.py b/src/backend/tests/unit/test_simple_agent_in_lfx_run.py
deleted file mode 100644
index f9359744f391..000000000000
--- a/src/backend/tests/unit/test_simple_agent_in_lfx_run.py
+++ /dev/null
@@ -1,355 +0,0 @@
-"""Tests for the simple agent workflow that can be executed via `lfx run`.
-
-This module tests the agent workflow by:
-1. Creating and validating the agent script
-2. Testing component instantiation and configuration
-3. Testing direct graph execution without CLI
-4. Verifying the workflow works with langflow's dependencies
-"""
-
-import os
-from pathlib import Path
-
-import pytest
-
-
-class TestAgentInLfxRun:
- """Test the agent workflow that demonstrates lfx run functionality."""
-
- @pytest.fixture
- def simple_agent_script_content(self):
- """The simple_agent.py script content for testing lfx run."""
- return '''"""A simple agent flow example for Langflow.
-
-This script demonstrates how to set up a conversational agent using Langflow's
-Agent component with web search capabilities.
-
-Features:
-- Uses the new flattened component access (cp.AgentComponent instead of deep imports)
-- Configures logging to 'langflow.log' at INFO level
-- Creates an agent with OpenAI GPT model
-- Provides web search tools via URLComponent
-- Connects ChatInput → Agent → ChatOutput
-
-Usage:
- uv run lfx run simple_agent.py "How are you?"
-"""
-
-import os
-from pathlib import Path
-
-# Using the new flattened component access
-from lfx import components as cp
-from lfx.graph import Graph
-from lfx.log.logger import LogConfig
-
-log_config = LogConfig(
- log_level="INFO",
- log_file=Path("langflow.log"),
-)
-
-# Showcase the new flattened component access - no need for deep imports!
-chat_input = cp.ChatInput()
-agent = cp.AgentComponent()
-url_component = cp.URLComponent()
-tools = url_component.to_toolkit()
-
-agent.set(
- model_name="gpt-4o-mini",
- agent_llm="OpenAI",
- api_key=os.getenv("OPENAI_API_KEY"),
- input_value=chat_input.message_response,
- tools=tools,
-)
-chat_output = cp.ChatOutput().set(input_value=agent.message_response)
-
-graph = Graph(chat_input, chat_output, log_config=log_config)
-'''
-
- @pytest.fixture
- def simple_agent_script_file(self):
- """Get the path to the agent script in tests/data."""
- # Use the script file we created in tests/data
- script_path = Path(__file__).parent.parent / "data" / "simple_agent.py"
- assert script_path.exists(), f"Script file not found: {script_path}"
-
- yield script_path
-
- # Cleanup any log file that might be created
- log_file = Path("langflow.log")
- if log_file.exists():
- log_file.unlink(missing_ok=True)
-
- def test_agent_script_structure_and_syntax(self, simple_agent_script_content):
- """Test that the agent script has correct structure and valid syntax."""
- import ast
-
- # Test syntax is valid
- try:
- ast.parse(simple_agent_script_content)
- except SyntaxError as e:
- pytest.fail(f"Script has invalid syntax: {e}")
-
- # Test key components are present
- assert "from lfx import components as cp" in simple_agent_script_content
- assert "cp.ChatInput()" in simple_agent_script_content
- assert "cp.AgentComponent()" in simple_agent_script_content
- assert "cp.URLComponent()" in simple_agent_script_content
- assert "cp.ChatOutput()" in simple_agent_script_content
- assert "url_component.to_toolkit()" in simple_agent_script_content
- assert 'model_name="gpt-4o-mini"' in simple_agent_script_content
- assert 'agent_llm="OpenAI"' in simple_agent_script_content
- assert "Graph(chat_input, chat_output" in simple_agent_script_content
-
- def test_agent_script_file_validation(self, simple_agent_script_file):
- """Test that the agent script file exists and has valid content."""
- # Since we don't have direct CLI access in langflow tests,
- # verify the script file exists and has correct content
- assert simple_agent_script_file.exists(), "Script file should exist in tests/data"
-
- # Verify script content has expected structure
- content = simple_agent_script_file.read_text()
- assert "from lfx import components as cp" in content
- assert "cp.AgentComponent()" in content
- assert "Graph(chat_input, chat_output" in content
-
- def test_agent_script_supports_formats(self, simple_agent_script_file):
- """Test that the script supports logging configuration."""
- # Verify script file exists and contains the expected structure
- assert simple_agent_script_file.exists()
-
- # Test that the script mentions the format options in its docstring
- content = simple_agent_script_file.read_text()
- assert "Usage:" in content, "Script should have usage documentation"
-
- # Verify the key logging components are present
- assert "LogConfig" in content, "Script should configure logging properly"
-
- @pytest.mark.skipif(not os.getenv("OPENAI_API_KEY"), reason="OPENAI_API_KEY required for full execution test")
- def test_agent_script_api_configuration(self, simple_agent_script_file):
- """Test that the script is properly configured for API usage."""
- # Verify the script file exists and has API key configuration
- assert simple_agent_script_file.exists()
-
- content = simple_agent_script_file.read_text()
-
- # Should use environment variable for API key
- assert 'os.getenv("OPENAI_API_KEY")' in content
-
- # Should use the recommended model
- assert 'model_name="gpt-4o-mini"' in content
-
- def test_agent_workflow_direct_execution(self):
- """Test the agent workflow by executing the graph directly."""
- # Import the components for direct execution
- try:
- from lfx import components as cp
- from lfx.graph import Graph
- from lfx.log.logger import LogConfig
- except ImportError as e:
- pytest.skip(f"LFX components not available: {e}")
-
- # Create the agent workflow
- log_config = LogConfig(
- log_level="INFO",
- log_file=Path("langflow.log"),
- )
-
- chat_input = cp.ChatInput()
- agent = cp.AgentComponent()
- url_component = cp.URLComponent()
-
- # Configure URL component for tools
- url_component.set(urls=["https://httpbin.org/json"])
- tools = url_component.to_toolkit()
-
- # Configure agent
- agent.set(
- model_name="gpt-4o-mini",
- agent_llm="OpenAI",
- api_key=os.getenv("OPENAI_API_KEY", "test-key"), # Use test key if not available
- input_value="Hello, how are you?", # Direct input instead of chat_input.message_response
- tools=tools,
- )
-
- chat_output = cp.ChatOutput()
-
- # Create graph
- graph = Graph(chat_input, chat_output, log_config=log_config)
-
- # Verify graph was created successfully
- assert graph is not None
- # The Graph object exists and has the expected structure
- assert str(graph), "Graph should have string representation"
-
- # Cleanup log file
- log_file = Path("langflow.log")
- if log_file.exists():
- log_file.unlink(missing_ok=True)
-
- def test_flattened_component_access_pattern(self):
- """Test that the flattened component access pattern works."""
- try:
- from lfx import components as cp
- except ImportError as e:
- pytest.skip(f"LFX components not available: {e}")
-
- # Test that all required components are accessible via flattened access
- components_to_test = ["ChatInput", "AgentComponent", "URLComponent", "ChatOutput"]
-
- for component_name in components_to_test:
- assert hasattr(cp, component_name), f"Component {component_name} not available via flattened access"
-
- # Test that we can instantiate each component
- component_class = getattr(cp, component_name)
- try:
- instance = component_class()
- assert instance is not None
- except Exception as e:
- pytest.fail(f"Failed to instantiate {component_name}: {e}")
-
- def test_url_component_to_toolkit_functionality(self):
- """Test that URLComponent.to_toolkit() works properly."""
- try:
- from lfx import components as cp
- except ImportError as e:
- pytest.skip(f"LFX components not available: {e}")
-
- url_component = cp.URLComponent()
-
- # Configure with test URL
- url_component.set(urls=["https://httpbin.org/json"])
-
- # Test to_toolkit functionality
- tools = url_component.to_toolkit()
-
- # Should return some kind of tools object/list
- assert tools is not None
- # Should be iterable (list, tuple, or similar)
- assert hasattr(tools, "__iter__"), "Tools should be iterable"
-
- def test_agent_configuration_workflow(self):
- """Test agent configuration in the workflow."""
- try:
- from lfx import components as cp
- except ImportError as e:
- pytest.skip(f"LFX components not available: {e}")
-
- agent = cp.AgentComponent()
-
- # Test the agent.set() configuration
- agent.set(
- model_name="gpt-4o-mini",
- agent_llm="OpenAI",
- api_key="test-key", # Use test key
- input_value="Test message",
- tools=[], # Empty tools for this test
- )
-
- # Verify configuration was applied
- assert agent.model_name == "gpt-4o-mini"
- assert agent.agent_llm == "OpenAI"
- assert agent.api_key == "test-key"
- assert agent.input_value == "Test message"
-
- def test_chat_output_chaining_pattern(self):
- """Test the chat output chaining pattern."""
- try:
- from lfx import components as cp
- from lfx.schema.message import Message
- except ImportError as e:
- pytest.skip(f"LFX components not available: {e}")
-
- chat_output = cp.ChatOutput()
-
- # Test the chaining pattern: cp.ChatOutput().set(input_value=agent.message_response)
- mock_message = Message(text="Test response")
- result = chat_output.set(input_value=mock_message)
-
- # Should return the chat_output instance for chaining
- assert result is chat_output
- assert chat_output.input_value == mock_message
-
- def test_logging_configuration(self):
- """Test LogConfig setup for the workflow."""
- try:
- from lfx.log.logger import LogConfig
- except ImportError as e:
- pytest.skip(f"LFX logging not available: {e}")
-
- # Test LogConfig creation for the workflow
- log_config = LogConfig(
- log_level="INFO",
- log_file=Path("langflow.log"),
- )
-
- assert log_config is not None
- # LogConfig may be a dict or object, verify it contains the expected data
- if isinstance(log_config, dict):
- assert log_config.get("log_level") == "INFO"
- assert log_config.get("log_file") == Path("langflow.log")
- else:
- assert hasattr(log_config, "log_level") or hasattr(log_config, "__dict__")
-
- # Cleanup
- log_file = Path("langflow.log")
- if log_file.exists():
- log_file.unlink(missing_ok=True)
-
- def test_environment_variable_handling(self):
- """Test that environment variable handling works properly."""
- # Test os.getenv("OPENAI_API_KEY") pattern
- import os
-
- # This should not raise an error even if the env var is not set
- api_key = os.getenv("OPENAI_API_KEY")
-
- # Should return None if not set, string if set
- assert api_key is None or isinstance(api_key, str)
-
- @pytest.mark.skipif(not os.getenv("OPENAI_API_KEY"), reason="OPENAI_API_KEY required for integration test")
- def test_complete_workflow_integration(self):
- """Test the complete agent workflow integration."""
- try:
- from lfx import components as cp
- from lfx.graph import Graph
- from lfx.log.logger import LogConfig
- except ImportError as e:
- pytest.skip(f"LFX components not available: {e}")
-
- # Set up the complete workflow
- log_config = LogConfig(
- log_level="INFO",
- log_file=Path("langflow.log"),
- )
-
- chat_input = cp.ChatInput()
- agent = cp.AgentComponent()
- url_component = cp.URLComponent()
-
- # Configure URL component
- url_component.set(urls=["https://httpbin.org/json"])
- tools = url_component.to_toolkit()
-
- # Configure agent with real API key
- agent.set(
- model_name="gpt-4o-mini",
- agent_llm="OpenAI",
- api_key=os.getenv("OPENAI_API_KEY"),
- input_value="What is 2 + 2?", # Simple math question
- tools=tools,
- )
-
- chat_output = cp.ChatOutput()
-
- # Create and verify graph
- graph = Graph(chat_input, chat_output, log_config=log_config)
- assert graph is not None
-
- # The actual execution would happen when the graph is run
- # For now, just verify the setup completed without errors
-
- # Cleanup
- log_file = Path("langflow.log")
- if log_file.exists():
- log_file.unlink(missing_ok=True)
diff --git a/src/backend/tests/unit/test_telemetry.py b/src/backend/tests/unit/test_telemetry.py
deleted file mode 100644
index ad56a0cb77e3..000000000000
--- a/src/backend/tests/unit/test_telemetry.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import re
-import threading
-from concurrent.futures import ThreadPoolExecutor, as_completed
-
-import pytest
-from langflow.services.telemetry.opentelemetry import OpenTelemetry
-
-fixed_labels = {"flow_id": "this_flow_id", "service": "this", "user": "that"}
-
-
-@pytest.fixture
-def opentelemetry_instance():
- return OpenTelemetry()
-
-
-def test_init(opentelemetry_instance):
- assert isinstance(opentelemetry_instance, OpenTelemetry)
- assert len(opentelemetry_instance._metrics) > 1
- assert len(opentelemetry_instance._metrics) == len(opentelemetry_instance._metrics_registry) == 2
- assert "file_uploads" in opentelemetry_instance._metrics
-
-
-def test_gauge(opentelemetry_instance):
- opentelemetry_instance.update_gauge("file_uploads", 1024, fixed_labels)
-
-
-def test_gauge_with_counter_method(opentelemetry_instance):
- with pytest.raises(TypeError, match="Metric 'file_uploads' is not a counter"):
- opentelemetry_instance.increment_counter(metric_name="file_uploads", value=1, labels=fixed_labels)
-
-
-def test_gauge_with_historgram_method(opentelemetry_instance):
- with pytest.raises(TypeError, match="Metric 'file_uploads' is not a histogram"):
- opentelemetry_instance.observe_histogram("file_uploads", 1, fixed_labels)
-
-
-def test_gauge_with_up_down_counter_method(opentelemetry_instance):
- with pytest.raises(TypeError, match="Metric 'file_uploads' is not an up down counter"):
- opentelemetry_instance.up_down_counter("file_uploads", 1, labels=fixed_labels)
-
-
-def test_increment_counter(opentelemetry_instance):
- opentelemetry_instance.increment_counter(metric_name="num_files_uploaded", value=5, labels=fixed_labels)
-
-
-def test_increment_counter_empty_label(opentelemetry_instance):
- with pytest.raises(ValueError, match="Labels must be provided for the metric"):
- opentelemetry_instance.increment_counter(metric_name="num_files_uploaded", value=5, labels={})
-
-
-def test_increment_counter_missing_mandatory_label(opentelemetry_instance):
- with pytest.raises(ValueError, match=re.escape("Missing required labels: {'flow_id'}")):
- opentelemetry_instance.increment_counter(metric_name="num_files_uploaded", value=5, labels={"service": "one"})
-
-
-def test_increment_counter_unregisted_metric(opentelemetry_instance):
- with pytest.raises(ValueError, match="Metric 'num_files_uploaded_1' is not registered"):
- opentelemetry_instance.increment_counter(metric_name="num_files_uploaded_1", value=5, labels=fixed_labels)
-
-
-def test_opentelementry_singleton(opentelemetry_instance):
- opentelemetry_instance_2 = OpenTelemetry()
- assert opentelemetry_instance is opentelemetry_instance_2
-
- opentelemetry_instance_3 = OpenTelemetry(prometheus_enabled=False)
- assert opentelemetry_instance is opentelemetry_instance_3
- assert opentelemetry_instance.prometheus_enabled == opentelemetry_instance_3.prometheus_enabled
-
-
-def test_missing_labels(opentelemetry_instance):
- with pytest.raises(ValueError, match="Labels must be provided for the metric"):
- opentelemetry_instance.increment_counter(metric_name="num_files_uploaded", labels=None, value=1.0)
- with pytest.raises(ValueError, match="Labels must be provided for the metric"):
- opentelemetry_instance.up_down_counter("num_files_uploaded", 1, None)
- with pytest.raises(ValueError, match="Labels must be provided for the metric"):
- opentelemetry_instance.update_gauge(metric_name="num_files_uploaded", value=1.0, labels={})
- with pytest.raises(ValueError, match="Labels must be provided for the metric"):
- opentelemetry_instance.observe_histogram("num_files_uploaded", 1, {})
-
-
-def test_multithreaded_singleton():
- def create_instance():
- return OpenTelemetry()
-
- # Create instances in multiple threads
- with ThreadPoolExecutor(max_workers=10) as executor:
- futures = [executor.submit(create_instance) for _ in range(100)]
- instances = [future.result() for future in as_completed(futures)]
-
- # Check that all instances are the same
- first_instance = instances[0]
- for instance in instances[1:]:
- assert instance is first_instance
-
-
-def test_multithreaded_singleton_race_condition():
- # This test simulates a potential race condition
- start_event = threading.Event()
-
- def create_instance():
- start_event.wait() # Wait for all threads to be ready
- return OpenTelemetry()
-
- # Create instances in multiple threads, all starting at the same time
- with ThreadPoolExecutor(max_workers=100) as executor:
- futures = [executor.submit(create_instance) for _ in range(100)]
- start_event.set() # Start all threads simultaneously
- instances = [future.result() for future in as_completed(futures)]
-
- # Check that all instances are the same
- first_instance = instances[0]
- for instance in instances[1:]:
- assert instance is first_instance
diff --git a/src/backend/tests/unit/test_template.py b/src/backend/tests/unit/test_template.py
deleted file mode 100644
index b35291e1758c..000000000000
--- a/src/backend/tests/unit/test_template.py
+++ /dev/null
@@ -1,93 +0,0 @@
-import importlib
-
-import pytest
-from pydantic import BaseModel
-
-from lfx.utils.util import build_template_from_function, get_base_classes, get_default_factory
-
-
-# Dummy classes for testing purposes
-class Parent(BaseModel):
- """Parent Class."""
-
- parent_field: str
-
-
-class Child(Parent):
- """Child Class."""
-
- child_field: int
-
-
-class ExampleClass1(BaseModel):
- """Example class 1."""
-
- def __init__(self, data: list[int] | None = None):
- self.data = data or [1, 2, 3]
-
-
-class ExampleClass2(BaseModel):
- """Example class 2."""
-
- def __init__(self, data: dict[str, int] | None = None):
- self.data = data or {"a": 1, "b": 2, "c": 3}
-
-
-def example_loader_1() -> ExampleClass1:
- """Example loader function 1."""
- return ExampleClass1()
-
-
-def example_loader_2() -> ExampleClass2:
- """Example loader function 2."""
- return ExampleClass2()
-
-
-def test_build_template_from_function():
- type_to_loader_dict = {
- "example1": example_loader_1,
- "example2": example_loader_2,
- }
-
- # Test with valid name
- result = build_template_from_function("ExampleClass1", type_to_loader_dict)
-
- assert result is not None
- assert "template" in result
- assert "description" in result
- assert "base_classes" in result
-
- # Test with add_function=True
- result_with_function = build_template_from_function("ExampleClass1", type_to_loader_dict, add_function=True)
- assert result_with_function is not None
- assert "Callable" in result_with_function["base_classes"]
-
- # Test with invalid name
- with pytest.raises(ValueError, match=r".* not found"):
- build_template_from_function("NonExistent", type_to_loader_dict)
-
-
-# Test get_base_classes
-def test_get_base_classes():
- base_classes_parent = get_base_classes(Parent)
- base_classes_child = get_base_classes(Child)
-
- assert "Parent" in base_classes_parent
- assert "Child" in base_classes_child
- assert "Parent" in base_classes_child
-
-
-# Test get_default_factory
-def test_get_default_factory():
- module_name = "lfx.utils.util"
- function_repr = ""
-
- def dummy_function():
- return "default_value"
-
- # Add dummy_function to your_module
- importlib.import_module(module_name).dummy_function = dummy_function
-
- default_value = get_default_factory(module_name, function_repr)
-
- assert default_value == "default_value"
diff --git a/src/backend/tests/unit/test_user.py b/src/backend/tests/unit/test_user.py
deleted file mode 100644
index c9eb9cfd99d2..000000000000
--- a/src/backend/tests/unit/test_user.py
+++ /dev/null
@@ -1,257 +0,0 @@
-from datetime import datetime, timezone
-
-import pytest
-from httpx import AsyncClient
-from langflow.services.auth.utils import create_super_user, get_password_hash
-from langflow.services.database.models.user import UserUpdate
-from langflow.services.database.models.user.model import User
-from langflow.services.database.utils import session_getter
-from langflow.services.deps import get_db_service, get_settings_service
-from sqlmodel import select
-
-
-@pytest.fixture
-async def super_user(client): # noqa: ARG001
- settings_manager = get_settings_service()
- auth_settings = settings_manager.auth_settings
- async with session_getter(get_db_service()) as db:
- return await create_super_user(
- db=db,
- username=auth_settings.SUPERUSER,
- password=auth_settings.SUPERUSER_PASSWORD,
- )
-
-
-@pytest.fixture
-async def super_user_headers(
- client: AsyncClient,
- super_user, # noqa: ARG001
-):
- settings_service = get_settings_service()
- auth_settings = settings_service.auth_settings
- login_data = {
- "username": auth_settings.SUPERUSER,
- "password": auth_settings.SUPERUSER_PASSWORD,
- }
- response = await client.post("api/v1/login", data=login_data)
- assert response.status_code == 200
- tokens = response.json()
- a_token = tokens["access_token"]
- return {"Authorization": f"Bearer {a_token}"}
-
-
-@pytest.fixture
-async def deactivated_user(client): # noqa: ARG001
- async with session_getter(get_db_service()) as session:
- user = User(
- username="deactivateduser",
- password=get_password_hash("testpassword"),
- is_active=False,
- is_superuser=False,
- last_login_at=datetime.now(tz=timezone.utc),
- )
- session.add(user)
- await session.commit()
- await session.refresh(user)
- return user
-
-
-async def test_user_waiting_for_approval(client):
- username = "waitingforapproval"
- password = "testpassword" # noqa: S105
-
- # Debug: Check if the user already exists
- async with session_getter(get_db_service()) as session:
- stmt = select(User).where(User.username == username)
- existing_user = (await session.exec(stmt)).first()
- if existing_user:
- pytest.fail(
- f"User {username} already exists before the test. Database URL: {get_db_service().database_url}"
- )
-
- # Create a user that is not active and has never logged in
- async with session_getter(get_db_service()) as session:
- user = User(
- username=username,
- password=get_password_hash(password),
- is_active=False,
- last_login_at=None,
- )
- session.add(user)
- await session.commit()
-
- login_data = {"username": "waitingforapproval", "password": "testpassword"}
- response = await client.post("api/v1/login", data=login_data)
- assert response.status_code == 400
- assert response.json()["detail"] == "Waiting for approval"
-
- # Debug: Check if the user still exists after the test
- async with session_getter(get_db_service()) as session:
- stmt = select(User).where(User.username == username)
- existing_user = (await session.exec(stmt)).first()
- if existing_user:
- pass
- else:
- pytest.fail(f"User {username} does not exist after the test. This is unexpected.")
-
-
-@pytest.mark.api_key_required
-async def test_deactivated_user_cannot_login(client: AsyncClient, deactivated_user):
- login_data = {"username": deactivated_user.username, "password": "testpassword"}
- response = await client.post("api/v1/login", data=login_data)
- assert response.status_code == 401, response.json()
- assert response.json()["detail"] == "Inactive user", response.text
-
-
-@pytest.mark.usefixtures("deactivated_user")
-async def test_deactivated_user_cannot_access(client: AsyncClient, logged_in_headers):
- # Assuming the headers for deactivated_user
- response = await client.get("api/v1/users/", headers=logged_in_headers)
- assert response.status_code == 403, response.status_code
- assert response.json()["detail"] == "The user doesn't have enough privileges", response.text
-
-
-@pytest.mark.api_key_required
-async def test_data_consistency_after_update(client: AsyncClient, active_user, logged_in_headers, super_user_headers):
- user_id = active_user.id
- update_data = UserUpdate(is_active=False)
-
- response = await client.patch(f"/api/v1/users/{user_id}", json=update_data.model_dump(), headers=super_user_headers)
- assert response.status_code == 200, response.json()
-
- # Fetch the updated user from the database
- response = await client.get("api/v1/users/whoami", headers=logged_in_headers)
- assert response.status_code == 401, response.json()
- assert response.json()["detail"] == "User not found or is inactive."
-
-
-@pytest.mark.api_key_required
-async def test_data_consistency_after_delete(client: AsyncClient, test_user, super_user_headers):
- user_id = test_user.get("id")
- response = await client.delete(f"/api/v1/users/{user_id}", headers=super_user_headers)
- assert response.status_code == 200, response.json()
-
- # Attempt to fetch the deleted user from the database
- response = await client.get("api/v1/users/", headers=super_user_headers)
- assert response.status_code == 200
- assert all(user["id"] != user_id for user in response.json()["users"])
-
-
-@pytest.mark.api_key_required
-async def test_inactive_user(client: AsyncClient):
- # Create a user that is not active and has a last_login_at value
- async with session_getter(get_db_service()) as session:
- user = User(
- username="inactiveuser",
- password=get_password_hash("testpassword"),
- is_active=False,
- last_login_at=datetime(2023, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
- )
- session.add(user)
- await session.commit()
-
- login_data = {"username": "inactiveuser", "password": "testpassword"}
- response = await client.post("api/v1/login", data=login_data)
- assert response.status_code == 401
- assert response.json()["detail"] == "Inactive user"
-
-
-@pytest.mark.api_key_required
-def test_add_user(test_user):
- assert test_user["username"] == "testuser"
-
-
-@pytest.mark.api_key_required
-async def test_read_all_users(client: AsyncClient, super_user_headers):
- response = await client.get("api/v1/users/", headers=super_user_headers)
- assert response.status_code == 200, response.json()
- assert isinstance(response.json()["users"], list)
-
-
-@pytest.mark.api_key_required
-async def test_normal_user_cant_read_all_users(client: AsyncClient, logged_in_headers):
- response = await client.get("api/v1/users/", headers=logged_in_headers)
- assert response.status_code == 403, response.json()
- assert response.json() == {"detail": "The user doesn't have enough privileges"}
-
-
-@pytest.mark.api_key_required
-async def test_patch_user(client: AsyncClient, active_user, logged_in_headers):
- user_id = active_user.id
- update_data = UserUpdate(
- username="newname",
- )
-
- response = await client.patch(f"/api/v1/users/{user_id}", json=update_data.model_dump(), headers=logged_in_headers)
- assert response.status_code == 200, response.json()
- update_data = UserUpdate(
- profile_image="new_image",
- )
-
- response = await client.patch(f"/api/v1/users/{user_id}", json=update_data.model_dump(), headers=logged_in_headers)
- assert response.status_code == 200, response.json()
-
-
-@pytest.mark.api_key_required
-async def test_patch_reset_password(client: AsyncClient, active_user, logged_in_headers):
- user_id = active_user.id
- update_data = UserUpdate(
- password="newpassword", # noqa: S106
- )
-
- response = await client.patch(
- f"/api/v1/users/{user_id}/reset-password",
- json=update_data.model_dump(),
- headers=logged_in_headers,
- )
- assert response.status_code == 200, response.json()
- # Now we need to test if the new password works
- login_data = {"username": active_user.username, "password": "newpassword"}
- response = await client.post("api/v1/login", data=login_data)
- assert response.status_code == 200
-
-
-@pytest.mark.api_key_required
-@pytest.mark.usefixtures("active_user")
-async def test_patch_user_wrong_id(client: AsyncClient, logged_in_headers):
- user_id = "wrong_id"
- update_data = UserUpdate(
- username="newname",
- )
-
- response = await client.patch(f"/api/v1/users/{user_id}", json=update_data.model_dump(), headers=logged_in_headers)
- assert response.status_code == 422, response.json()
- json_response = response.json()
- detail = json_response["detail"]
- error = detail[0]
- assert error["loc"] == ["path", "user_id"]
- assert error["type"] == "uuid_parsing"
-
-
-@pytest.mark.api_key_required
-async def test_delete_user(client: AsyncClient, test_user, super_user_headers):
- user_id = test_user["id"]
- response = await client.delete(f"/api/v1/users/{user_id}", headers=super_user_headers)
- assert response.status_code == 200
- assert response.json() == {"detail": "User deleted"}
-
-
-@pytest.mark.api_key_required
-@pytest.mark.usefixtures("test_user")
-async def test_delete_user_wrong_id(client: AsyncClient, super_user_headers):
- user_id = "wrong_id"
- response = await client.delete(f"/api/v1/users/{user_id}", headers=super_user_headers)
- assert response.status_code == 422
- json_response = response.json()
- detail = json_response["detail"]
- error = detail[0]
- assert error["loc"] == ["path", "user_id"]
- assert error["type"] == "uuid_parsing"
-
-
-@pytest.mark.api_key_required
-async def test_normal_user_cant_delete_user(client: AsyncClient, test_user, logged_in_headers):
- user_id = test_user["id"]
- response = await client.delete(f"/api/v1/users/{user_id}", headers=logged_in_headers)
- assert response.status_code == 403
- assert response.json() == {"detail": "The user doesn't have enough privileges"}
diff --git a/src/backend/tests/unit/test_validate_code.py b/src/backend/tests/unit/test_validate_code.py
deleted file mode 100644
index 0e9394efa1c3..000000000000
--- a/src/backend/tests/unit/test_validate_code.py
+++ /dev/null
@@ -1,192 +0,0 @@
-from pathlib import Path
-from unittest import mock
-
-import pytest
-from langflow.utils.validate import (
- create_class,
- create_function,
- execute_function,
- extract_function_name,
- validate_code,
-)
-from requests.exceptions import MissingSchema
-
-
-def test_create_function():
- code = """
-from pathlib import Path
-
-def my_function(x: str) -> Path:
- return Path(x)
-"""
-
- function_name = extract_function_name(code)
- function = create_function(code, function_name)
- result = function("test")
- assert result == Path("test")
-
-
-def test_validate_code():
- # Test case with a valid import and function
- code1 = """
-import math
-
-def square(x):
- return x ** 2
-"""
- errors1 = validate_code(code1)
- assert errors1 == {"imports": {"errors": []}, "function": {"errors": []}}
-
- # Test case with an invalid import and valid function
- code2 = """
-import non_existent_module
-
-def square(x):
- return x ** 2
-"""
- errors2 = validate_code(code2)
- assert errors2 == {
- "imports": {"errors": ["No module named 'non_existent_module'"]},
- "function": {"errors": []},
- }
-
- # Test case with a valid import and invalid function syntax
- code3 = """
-import math
-
-def square(x)
- return x ** 2
-"""
- errors3 = validate_code(code3)
- assert errors3 == {
- "imports": {"errors": []},
- "function": {"errors": ["expected ':' (, line 4)"]},
- }
-
-
-def test_execute_function_success():
- code = """
-import math
-
-def my_function(x):
- return math.sin(x) + 1
- """
- result = execute_function(code, "my_function", 0.5)
- assert result == 1.479425538604203
-
-
-def test_execute_function_missing_module():
- code = """
-import some_missing_module
-
-def my_function(x):
- return some_missing_module.some_function(x)
- """
- with pytest.raises(ModuleNotFoundError):
- execute_function(code, "my_function", 0.5)
-
-
-def test_execute_function_missing_function():
- code = """
-import math
-
-def my_function(x):
- return math.some_missing_function(x)
- """
- with pytest.raises(AttributeError):
- execute_function(code, "my_function", 0.5)
-
-
-def test_execute_function_missing_schema():
- code = """
-import requests
-
-def my_function(x):
- return requests.get(x).text
- """
- with mock.patch("requests.get", side_effect=MissingSchema), pytest.raises(MissingSchema):
- execute_function(code, "my_function", "invalid_url")
-
-
-def test_create_class():
- code = """
-from langflow.custom import CustomComponent
-
-class ExternalClass:
- def __init__(self, value):
- self.value = value
-
-class MyComponent(CustomComponent):
- def build(self):
- return ExternalClass("test")
-"""
- class_name = "MyComponent"
- created_class = create_class(code, class_name)
- instance = created_class()
- result = instance.build()
- assert result.value == "test"
-
-
-def test_create_class_module_import():
- code = """
-from langflow.custom import CustomComponent
-from PIL import ImageDraw
-
-class ExternalClass:
- def __init__(self, value):
- self.value = value
-
-class MyComponent(CustomComponent):
- def build(self):
- return ExternalClass("test")
-"""
- class_name = "MyComponent"
- created_class = create_class(code, class_name)
- instance = created_class()
- result = instance.build()
- assert result.value == "test"
-
-
-def test_create_class_with_multiple_external_classes():
- code = """
-from langflow.custom import CustomComponent
-
-class ExternalClass1:
- def __init__(self, value):
- self.value = value
-
-class ExternalClass2:
- def __init__(self, value):
- self.value = value
-
-class MyComponent(CustomComponent):
- def build(self):
- return ExternalClass1("test1"), ExternalClass2("test2")
-"""
- class_name = "MyComponent"
- created_class = create_class(code, class_name)
- instance = created_class()
- result1, result2 = instance.build()
- assert result1.value == "test1"
- assert result2.value == "test2"
-
-
-def test_create_class_with_external_variables_and_functions():
- code = """
-from langflow.custom import CustomComponent
-
-external_variable = "external_value"
-
-def external_function():
- return "external_function_value"
-
-class MyComponent(CustomComponent):
- def build(self):
- return external_variable, external_function()
-"""
- class_name = "MyComponent"
- created_class = create_class(code, class_name)
- instance = created_class()
- result_variable, result_function = instance.build()
- assert result_variable == "external_value"
- assert result_function == "external_function_value"
diff --git a/src/backend/tests/unit/test_version.py b/src/backend/tests/unit/test_version.py
deleted file mode 100644
index d068b52987cd..000000000000
--- a/src/backend/tests/unit/test_version.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from langflow.utils.version import _compute_non_prerelease_version, get_version_info
-
-
-def test_version():
- info = get_version_info()
- assert info["version"] is not None
- assert info["main_version"] is not None
- assert info["package"] is not None
-
-
-def test_compute_main():
- assert _compute_non_prerelease_version("1.0.10.post0") == "1.0.10"
- assert _compute_non_prerelease_version("1.0.10.a1") == "1.0.10"
- assert _compute_non_prerelease_version("1.0.10.b112") == "1.0.10"
- assert _compute_non_prerelease_version("1.0.10.rc0") == "1.0.10"
- assert _compute_non_prerelease_version("1.0.10.dev9") == "1.0.10"
- assert _compute_non_prerelease_version("1.0.10") == "1.0.10"
diff --git a/src/backend/tests/unit/test_voice_mode.py b/src/backend/tests/unit/test_voice_mode.py
deleted file mode 100644
index 3bf3b2c8cc2d..000000000000
--- a/src/backend/tests/unit/test_voice_mode.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import numpy as np
-import pytest
-
-try:
- import webrtcvad
-except ImportError:
- pytestmark = pytest.mark.skip(reason="webrtcvad is not installed. Skipping voice mode tests.")
-
-from langflow.utils.voice_utils import (
- BYTES_PER_16K_FRAME,
- BYTES_PER_24K_FRAME,
- SAMPLE_RATE_24K,
- VAD_SAMPLE_RATE_16K,
- resample_24k_to_16k,
-)
-
-
-def test_resample_24k_to_16k_valid_frame():
- """Test that valid 960-byte frames (20ms @ 24kHz) resample to 640 bytes (20ms @ 16kHz)."""
- # Generate a fake 20ms @ 24kHz frame (960 bytes)
- duration_samples_24k = int(0.02 * SAMPLE_RATE_24K) # 480 samples
- # Use the newer numpy random Generator
- rng = np.random.default_rng()
- fake_frame_24k = (rng.random(duration_samples_24k) * 32767).astype(np.int16)
- frame_24k_bytes = fake_frame_24k.tobytes()
-
- assert len(frame_24k_bytes) == BYTES_PER_24K_FRAME # 960
-
- # Resample
- frame_16k_bytes = resample_24k_to_16k(frame_24k_bytes)
-
- # Check length after resampling
- assert len(frame_16k_bytes) == BYTES_PER_16K_FRAME # 640
-
-
-def test_resample_24k_to_16k_invalid_frame():
- """Test that passing an invalid size frame raises a ValueError."""
- invalid_frame = b"\x00\x01" * 100 # only 200 bytes, not 960
- with pytest.raises(ValueError, match="Expected exactly"):
- _ = resample_24k_to_16k(invalid_frame)
-
-
-def test_webrtcvad_silence_detection():
- """Make sure that passing all-zero frames leads to is_speech == False."""
- vad = webrtcvad.Vad(mode=0)
-
- # Generate 1 second of silence @16k, chunk it in 20ms frames
- num_samples = VAD_SAMPLE_RATE_16K # 1 second
- silent_audio = np.zeros(num_samples, dtype=np.int16).tobytes()
-
- frame_size = BYTES_PER_16K_FRAME # 640
- num_frames = len(silent_audio) // frame_size
-
- speech_frames = 0
- for i in range(num_frames):
- frame_16k = silent_audio[i * frame_size : (i + 1) * frame_size]
-
- is_speech = vad.is_speech(frame_16k, VAD_SAMPLE_RATE_16K)
- if is_speech:
- speech_frames += 1
-
- # Expect zero frames labeled as speech
- assert speech_frames == 0
-
-
-def test_webrtcvad_with_real_data():
- """End-to-end test.
-
- - Generate synthetic 24kHz audio
- - Break into 20ms frames
- - Resample to 16k
- - Check how many frames VAD detects as speech.
- This test is approximate, since random audio won't always be "speech."
- """
- # Instead of reading from a file, generate synthetic audio
- # Create 1 second of random audio data at 24kHz
- num_samples = SAMPLE_RATE_24K # 1 second
- rng = np.random.default_rng(seed=42) # Use a fixed seed for reproducibility
-
- # Generate random audio (this won't be detected as speech, but that's fine for testing)
- raw_data_24k = (rng.random(num_samples) * 32767).astype(np.int16).tobytes()
-
- # We'll chunk into 20ms frames (960 bytes each)
- frame_size_24k = BYTES_PER_24K_FRAME # 960
- total_frames = len(raw_data_24k) // frame_size_24k
-
- vad = webrtcvad.Vad(mode=2)
-
- resampled_all = bytearray()
- speech_count = 0
- for i in range(total_frames):
- frame_24k = raw_data_24k[i * frame_size_24k : (i + 1) * frame_size_24k]
- frame_16k = resample_24k_to_16k(frame_24k)
-
- resampled_all.extend(frame_16k) # Append to our buffer
-
- is_speech = vad.is_speech(frame_16k, VAD_SAMPLE_RATE_16K)
- if is_speech:
- speech_count += 1
-
- # For random noise, we expect very few frames to be detected as speech
- # We're not making a strict assertion, just verifying the process works
- assert len(resampled_all) == (total_frames * BYTES_PER_16K_FRAME)
-
- # Log the speech detection rate
- speech_count / total_frames if total_frames > 0 else 0
diff --git a/src/backend/tests/unit/test_webhook.py b/src/backend/tests/unit/test_webhook.py
deleted file mode 100644
index 534460572b9e..000000000000
--- a/src/backend/tests/unit/test_webhook.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import aiofiles
-import anyio
-import pytest
-
-
-@pytest.fixture(autouse=True)
-def _check_openai_api_key_in_environment_variables():
- pass
-
-
-async def test_webhook_endpoint_requires_api_key_when_auto_login_false(client, added_webhook_test):
- """Test that webhook endpoint requires API key when WEBHOOK_AUTH_ENABLE=true."""
- # Mock the settings service to enable webhook authentication
- from unittest.mock import patch
-
- with patch("langflow.services.auth.utils.get_settings_service") as mock_settings:
- mock_auth_settings = type("AuthSettings", (), {"WEBHOOK_AUTH_ENABLE": True})()
- mock_settings_service = type("SettingsService", (), {"auth_settings": mock_auth_settings})()
- mock_settings.return_value = mock_settings_service
-
- endpoint_name = added_webhook_test["endpoint_name"]
- endpoint = f"api/v1/webhook/{endpoint_name}"
-
- payload = {"path": "/tmp/test_file.txt"} # noqa: S108
-
- # Should fail without API key when webhook auth is enabled
- response = await client.post(endpoint, json=payload)
- assert response.status_code == 403
- assert "API key required when webhook authentication is enabled" in response.json()["detail"]
-
-
-async def test_webhook_endpoint_with_valid_api_key(client, added_webhook_test, created_api_key):
- """Test that webhook works when valid API key is provided."""
- endpoint_name = added_webhook_test["endpoint_name"]
- endpoint = f"api/v1/webhook/{endpoint_name}"
-
- # Create a temporary file
- async with aiofiles.tempfile.TemporaryDirectory() as tmp:
- file_path = anyio.Path(tmp) / "test_file.txt"
- payload = {"path": str(file_path)}
-
- # Should work with valid API key
- response = await client.post(endpoint, headers={"x-api-key": created_api_key.api_key}, json=payload)
- assert response.status_code == 202
- assert await file_path.exists(), f"File {file_path} does not exist"
-
- file_does_not_exist = not await file_path.exists()
- assert file_does_not_exist, f"File {file_path} still exists"
-
-
-async def test_webhook_endpoint_unauthorized_user_flow(client, added_webhook_test):
- """Test that webhook fails when user doesn't own the flow."""
- # Mock the settings service to enable webhook authentication
- from unittest.mock import patch
-
- with patch("langflow.services.auth.utils.get_settings_service") as mock_settings:
- mock_auth_settings = type("AuthSettings", (), {"WEBHOOK_AUTH_ENABLE": True})()
- mock_settings_service = type("SettingsService", (), {"auth_settings": mock_auth_settings})()
- mock_settings.return_value = mock_settings_service
-
- # This test would need a different user's API key to test authorization
- # For now, we'll use an invalid API key to simulate this
- endpoint_name = added_webhook_test["endpoint_name"]
- endpoint = f"api/v1/webhook/{endpoint_name}"
-
- payload = {"path": "/tmp/test_file.txt"} # noqa: S108
-
- # Should fail with invalid API key
- response = await client.post(endpoint, headers={"x-api-key": "invalid_key"}, json=payload)
- assert response.status_code == 403
- assert "Invalid API key" in response.json()["detail"]
-
-
-async def test_webhook_flow_on_run_endpoint(client, added_webhook_test, created_api_key):
- endpoint_name = added_webhook_test["endpoint_name"]
- endpoint = f"api/v1/run/{endpoint_name}?stream=false"
- # Just test that "Random Payload" returns 202
- # returns 202
- payload = {
- "output_type": "any",
- }
- response = await client.post(endpoint, headers={"x-api-key": created_api_key.api_key}, json=payload)
- assert response.status_code == 200, response.json()
-
-
-async def test_webhook_with_auto_login_enabled(client, added_webhook_test):
- """Test webhook behavior when WEBHOOK_AUTH_ENABLE=false - should work without API key."""
- # Mock the settings service to disable webhook authentication (default behavior)
- from unittest.mock import patch
-
- with patch("langflow.services.auth.utils.get_settings_service") as mock_settings:
- mock_auth_settings = type("AuthSettings", (), {"WEBHOOK_AUTH_ENABLE": False})()
- mock_settings_service = type("SettingsService", (), {"auth_settings": mock_auth_settings})()
- mock_settings.return_value = mock_settings_service
-
- endpoint_name = added_webhook_test["endpoint_name"]
- endpoint = f"api/v1/webhook/{endpoint_name}"
-
- payload = {"path": "/tmp/test_auto_login.txt"} # noqa: S108
-
- # Should work without API key when webhook auth is disabled
- response = await client.post(endpoint, json=payload)
- assert response.status_code == 202
-
-
-async def test_webhook_with_random_payload_requires_auth(client, added_webhook_test, created_api_key):
- """Test that webhook with random payload still requires authentication."""
- # Mock the settings service to enable webhook authentication
- from unittest.mock import patch
-
- with patch("langflow.services.auth.utils.get_settings_service") as mock_settings:
- mock_auth_settings = type("AuthSettings", (), {"WEBHOOK_AUTH_ENABLE": True})()
- mock_settings_service = type("SettingsService", (), {"auth_settings": mock_auth_settings})()
- mock_settings.return_value = mock_settings_service
-
- endpoint_name = added_webhook_test["endpoint_name"]
- endpoint = f"api/v1/webhook/{endpoint_name}"
-
- # Should fail without API key
- response = await client.post(endpoint, json="Random Payload")
- assert response.status_code == 403
-
- # Should work with API key (even with random payload)
- response = await client.post(
- endpoint,
- headers={"x-api-key": created_api_key.api_key},
- json="Random Payload",
- )
- assert response.status_code == 202
diff --git a/src/backend/tests/unit/utils/__init__.py b/src/backend/tests/unit/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/backend/tests/unit/utils/test_connection_string_parser.py b/src/backend/tests/unit/utils/test_connection_string_parser.py
deleted file mode 100644
index 7492b4162c62..000000000000
--- a/src/backend/tests/unit/utils/test_connection_string_parser.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import pytest
-from langflow.utils.connection_string_parser import transform_connection_string
-
-
-@pytest.mark.parametrize(
- ("connection_string", "expected"),
- [
- ("protocol:user:password@host", "protocol:user:password@host"),
- ("protocol:user@host", "protocol:user@host"),
- ("protocol:user:pass@word@host", "protocol:user:pass%40word@host"),
- ("protocol:user:pa:ss:word@host", "protocol:user:pa:ss:word@host"),
- ("user:password@host", "user:password@host"),
- ("protocol::password@host", "protocol::password@host"),
- ("protocol:user:password@", "protocol:user:password@"),
- ("protocol:user:pa@ss@word@host", "protocol:user:pa%40ss%40word@host"),
- ],
-)
-def test_transform_connection_string(connection_string, expected):
- result = transform_connection_string(connection_string)
- assert result == expected
diff --git a/src/backend/tests/unit/utils/test_format_directory_path.py b/src/backend/tests/unit/utils/test_format_directory_path.py
deleted file mode 100644
index 690c9178eb9b..000000000000
--- a/src/backend/tests/unit/utils/test_format_directory_path.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import pytest
-
-from lfx.base.data.utils import format_directory_path
-
-
-@pytest.mark.parametrize(
- ("input_path", "expected"),
- [
- # Test case 1: Standard path with no newlines (no change expected)
- ("/home/user/documents/file.txt", "/home/user/documents/file.txt"),
- # Test case 2: Path with newline character (replace \n with \\n)
- ("/home/user/docu\nments/file.txt", "/home/user/docu\\nments/file.txt"),
- # Test case 3: Path with multiple newline characters
- ("/home/user/\ndocu\nments/file.txt", "/home/user/\\ndocu\\nments/file.txt"),
- # Test case 4: Path with only newline characters
- ("\n\n\n", "\\n\\n\\n"),
- # Test case 5: Empty path (as per the original function, this remains an empty string)
- ("", ""),
- # Test case 6: Path with mixed newlines and other special characters
- ("/home/user/my-\ndocs/special_file!.pdf", "/home/user/my-\\ndocs/special_file!.pdf"),
- # Test case 7: Windows-style path with newline
- ("C:\\Users\\\nDocuments\\file.txt", "C:\\Users\\\\nDocuments\\file.txt"), # No conversion of backslashes
- # Test case 8: Path with trailing newline
- ("/home/user/documents/\n", "/home/user/documents/\\n"),
- # Test case 9: Path with leading newline
- ("\n/home/user/documents/", "\\n/home/user/documents/"),
- # Test case 10: Path with multiple consecutive newlines
- ("/home/user/docu\n\nments/file.txt", "/home/user/docu\\n\\nments/file.txt"),
- # Test case 11: Windows-style path (backslashes remain unchanged)
- ("C:\\Users\\Documents\\file.txt", "C:\\Users\\Documents\\file.txt"),
- # Test case 12: Windows path with trailing backslash
- ("C:\\Users\\Documents\\", "C:\\Users\\Documents\\"),
- # Test case 13: Mixed separators (leave as is)
- ("C:/Users\\Documents/file.txt", "C:/Users\\Documents/file.txt"),
- # Test case 14: Network path (UNC) (leave backslashes as is)
- ("\\\\server\\share\\file.txt", "\\\\server\\share\\file.txt"),
- ],
-)
-def test_format_directory_path(input_path, expected):
- result = format_directory_path(input_path)
- assert result == expected
-
-
-# Additional test for type checking
-def test_format_directory_path_type():
- result = format_directory_path("/home/user/file.txt")
- assert isinstance(result, str)
diff --git a/src/backend/tests/unit/utils/test_image_utils.py b/src/backend/tests/unit/utils/test_image_utils.py
deleted file mode 100644
index 31c2432aba3e..000000000000
--- a/src/backend/tests/unit/utils/test_image_utils.py
+++ /dev/null
@@ -1,108 +0,0 @@
-import base64
-
-import pytest
-from langflow.utils.image import convert_image_to_base64, create_data_url, create_image_content_dict
-
-
-@pytest.fixture
-def sample_image(tmp_path):
- """Create a sample image file for testing."""
- image_path = tmp_path / "test_image.png"
- # Create a small black 1x1 pixel PNG file
- image_content = base64.b64decode(
- "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACklEQVR4nGMAAQAABQABDQottAAAAABJRU5ErkJggg=="
- )
- image_path.write_bytes(image_content)
- return image_path
-
-
-def test_convert_image_to_base64_success(sample_image):
- """Test successful conversion of image to base64."""
- base64_str = convert_image_to_base64(sample_image)
- assert isinstance(base64_str, str)
- # Verify it's valid base64
- assert base64.b64decode(base64_str)
-
-
-def test_convert_image_to_base64_empty_path():
- """Test conversion with empty path."""
- with pytest.raises(ValueError, match="Image path cannot be empty"):
- convert_image_to_base64("")
-
-
-def test_convert_image_to_base64_nonexistent_file():
- """Test conversion with non-existent file."""
- with pytest.raises(FileNotFoundError, match="Image file not found"):
- convert_image_to_base64("nonexistent.png")
-
-
-def test_convert_image_to_base64_directory(tmp_path):
- """Test conversion with directory path instead of file."""
- with pytest.raises(ValueError, match="Path is not a file"):
- convert_image_to_base64(tmp_path)
-
-
-def test_create_data_url_success(sample_image):
- """Test successful creation of data URL."""
- data_url = create_data_url(sample_image)
- assert data_url.startswith("data:image/png;base64,")
- # Verify the base64 part is valid
- base64_part = data_url.split(",")[1]
- assert base64.b64decode(base64_part)
-
-
-def test_create_data_url_with_custom_mime(sample_image):
- """Test creation of data URL with custom MIME type."""
- custom_mime = "image/custom"
- data_url = create_data_url(sample_image, mime_type=custom_mime)
- assert data_url.startswith(f"data:{custom_mime};base64,")
-
-
-def test_create_data_url_invalid_file():
- """Test creation of data URL with invalid file."""
- with pytest.raises(FileNotFoundError):
- create_data_url("nonexistent.jpg")
-
-
-def test_create_data_url_unrecognized_extension(tmp_path):
- """Test creation of data URL with unrecognized file extension."""
- invalid_file = tmp_path / "test.unknown"
- invalid_file.touch()
- with pytest.raises(ValueError, match="Could not determine MIME type"):
- create_data_url(invalid_file)
-
-
-def test_create_image_content_dict_success(sample_image):
- """Test successful creation of image content dict."""
- content_dict = create_image_content_dict(sample_image)
- assert content_dict["type"] == "image"
- assert content_dict["source_type"] == "url"
- assert "url" in content_dict
- assert content_dict["url"].startswith("data:image/png;base64,")
- # Verify the base64 part is valid
- base64_part = content_dict["url"].split(",")[1]
- assert base64.b64decode(base64_part)
-
-
-def test_create_image_content_dict_with_custom_mime(sample_image):
- """Test creation of image content dict with custom MIME type."""
- custom_mime = "image/custom"
- content_dict = create_image_content_dict(sample_image, mime_type=custom_mime)
- assert content_dict["type"] == "image"
- assert content_dict["source_type"] == "url"
- assert "url" in content_dict
- assert content_dict["url"].startswith(f"data:{custom_mime};base64,")
-
-
-def test_create_image_content_dict_invalid_file():
- """Test creation of image content dict with invalid file."""
- with pytest.raises(FileNotFoundError):
- create_image_content_dict("nonexistent.jpg")
-
-
-def test_create_image_content_dict_unrecognized_extension(tmp_path):
- """Test creation of image content dict with unrecognized file extension."""
- invalid_file = tmp_path / "test.unknown"
- invalid_file.touch()
- with pytest.raises(ValueError, match="Could not determine MIME type"):
- create_image_content_dict(invalid_file)
diff --git a/src/backend/tests/unit/utils/test_interface_utils.py b/src/backend/tests/unit/utils/test_interface_utils.py
deleted file mode 100644
index e1d7e7e85778..000000000000
--- a/src/backend/tests/unit/utils/test_interface_utils.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import pytest
-from langflow.interface.utils import extract_input_variables_from_prompt
-
-
-@pytest.mark.parametrize(
- ("prompt", "expected"),
- [
- # Basic variable extraction
- ("Hello {name}!", ["name"]),
- ("Hi {name}, you are {age} years old", ["name", "age"]),
- # Empty prompt
- ("", []),
- ("No variables here", []),
- # Duplicate variables
- ("Hello {name}! How are you {name}?", ["name"]),
- # Whitespace handling - Formatter preserves whitespace in field names
- ("Hello { name }!", [" name "]),
- ("Hi { name }, bye", [" name "]),
- # Multiple braces (escaping)
- ("Escaped {{not_a_var}}", []),
- ("Mixed {{escaped}} and {real_var}", ["real_var"]),
- ("Double escaped {{{{not_this}}}}", []),
- # Complex cases
- ("Hello {name}! Your score is {{4 + 5}}, age: {age}", ["name", "age"]),
- ("Nested {{obj['key']}} with {normal_var}", ["normal_var"]),
- ("Template {{user.name}} with {id} and {type}", ["id", "type"]),
- # Edge cases
- ("{single}", ["single"]),
- ("{{double}}", []),
- ("{{{}}}", []),
- # Multiple variables with various spacing
- (
- """
- Multi-line with {var1}
- and {var2} plus
- {var3} at the end
- """,
- ["var1", "var2", "var3"],
- ),
- ],
-)
-def test_extract_input_variables(prompt, expected):
- """Test the extract_input_variables_from_prompt function with various cases."""
- result = extract_input_variables_from_prompt(prompt)
- assert sorted(result) == sorted(expected), f"Failed for prompt: {prompt}"
-
-
-@pytest.mark.parametrize(
- ("prompt", "expected_error"),
- [
- # Malformed format strings that should raise ValueError
- ("}{", "Single '}' encountered in format string"),
- ("{incomplete", "expected '}' before end of string"),
- ("incomplete}", "Single '}' encountered in format string"),
- ],
-)
-def test_extract_input_variables_malformed(prompt, expected_error):
- """Test that malformed format strings raise ValueError."""
- with pytest.raises(ValueError, match=expected_error):
- extract_input_variables_from_prompt(prompt)
diff --git a/src/backend/tests/unit/utils/test_rewrite_file_path.py b/src/backend/tests/unit/utils/test_rewrite_file_path.py
deleted file mode 100644
index ad9f7371dd41..000000000000
--- a/src/backend/tests/unit/utils/test_rewrite_file_path.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import pytest
-
-from lfx.base.data.utils import format_directory_path
-
-
-@pytest.mark.parametrize(
- ("input_path", "expected"),
- [
- # Test case 1: Standard path with no newlines
- ("/home/user/documents/file.txt", "/home/user/documents/file.txt"),
- # Test case 2: Path with newline character
- ("/home/user/docu\nments/file.txt", "/home/user/docu\\nments/file.txt"),
- # Test case 3: Path with multiple newline characters
- ("/home/user/\ndocu\nments/file.txt", "/home/user/\\ndocu\\nments/file.txt"),
- # Test case 4: Path with only newline characters
- ("\n\n\n", "\\n\\n\\n"),
- # Test case 5: Empty path
- ("", ""),
- # Test case 6: Path with mixed newlines and other special characters
- ("/home/user/my-\ndocs/special_file!.pdf", "/home/user/my-\\ndocs/special_file!.pdf"),
- # Test case 7: Windows-style path with newline
- ("C:\\Users\\\nDocuments\\file.txt", "C:\\Users\\\\nDocuments\\file.txt"),
- # Test case 8: Path with trailing newline
- ("/home/user/documents/\n", "/home/user/documents/\\n"),
- # Test case 9: Path with leading newline
- ("\n/home/user/documents/", "\\n/home/user/documents/"),
- # Test case 10: Path with multiple consecutive newlines
- ("/home/user/docu\n\nments/file.txt", "/home/user/docu\\n\\nments/file.txt"),
- ],
-)
-def test_format_directory_path(input_path, expected):
- result = format_directory_path(input_path)
- assert result == expected
-
-
-# Additional test for type checking
-def test_format_directory_path_type():
- result = format_directory_path("/home/user/file.txt")
- assert isinstance(result, str)
diff --git a/src/backend/tests/unit/utils/test_template_validation.py b/src/backend/tests/unit/utils/test_template_validation.py
deleted file mode 100644
index 0ef3f859d07c..000000000000
--- a/src/backend/tests/unit/utils/test_template_validation.py
+++ /dev/null
@@ -1,718 +0,0 @@
-"""Unit tests for template validation utilities."""
-
-import asyncio
-from unittest.mock import AsyncMock, Mock, patch
-
-import pytest
-from langflow.utils.template_validation import (
- _validate_event_stream,
- validate_flow_can_build,
- validate_flow_code,
- validate_flow_execution,
- validate_template_structure,
-)
-
-
-class AsyncIteratorMock:
- """Mock class that provides proper async iteration."""
-
- def __init__(self, items):
- self.items = items
-
- def __aiter__(self):
- return self
-
- async def __anext__(self):
- if not self.items:
- raise StopAsyncIteration
- return self.items.pop(0)
-
-
-class TestValidateTemplateStructure:
- """Test cases for validate_template_structure function."""
-
- def test_valid_template_structure(self):
- """Test validation passes for valid template structure."""
- template_data = {
- "nodes": [
- {"id": "node1", "data": {"type": "input"}},
- {"id": "node2", "data": {"type": "output"}},
- ],
- "edges": [{"source": "node1", "target": "node2"}],
- }
- errors = validate_template_structure(template_data, "test.json")
- assert errors == []
-
- def test_valid_template_with_data_wrapper(self):
- """Test validation passes for template with data wrapper."""
- template_data = {
- "data": {
- "nodes": [{"id": "node1", "data": {"type": "input"}}],
- "edges": [],
- }
- }
- errors = validate_template_structure(template_data, "test.json")
- assert errors == []
-
- def test_missing_nodes_field(self):
- """Test validation fails when nodes field is missing."""
- template_data = {"edges": []}
- errors = validate_template_structure(template_data, "test.json")
- assert "test.json: Missing 'nodes' field" in errors
-
- def test_missing_edges_field(self):
- """Test validation fails when edges field is missing."""
- template_data = {"nodes": []}
- errors = validate_template_structure(template_data, "test.json")
- assert "test.json: Missing 'edges' field" in errors
-
- def test_nodes_not_list(self):
- """Test validation fails when nodes is not a list."""
- template_data = {"nodes": "not_a_list", "edges": []}
- errors = validate_template_structure(template_data, "test.json")
- assert "test.json: 'nodes' must be a list" in errors
-
- def test_edges_not_list(self):
- """Test validation fails when edges is not a list."""
- template_data = {"nodes": [], "edges": "not_a_list"}
- errors = validate_template_structure(template_data, "test.json")
- assert "test.json: 'edges' must be a list" in errors
-
- def test_node_missing_id(self):
- """Test validation fails when node is missing id."""
- template_data = {
- "nodes": [{"data": {"type": "input"}}],
- "edges": [],
- }
- errors = validate_template_structure(template_data, "test.json")
- assert "test.json: Node 0 missing 'id'" in errors
-
- def test_node_missing_data(self):
- """Test validation fails when node is missing data."""
- template_data = {
- "nodes": [{"id": "node1"}],
- "edges": [],
- }
- errors = validate_template_structure(template_data, "test.json")
- assert "test.json: Node 0 missing 'data'" in errors
-
- def test_multiple_validation_errors(self):
- """Test multiple validation errors are collected."""
- template_data = {
- "nodes": [
- {"data": {"type": "input"}}, # Missing id
- {"id": "node2"}, # Missing data
- ],
- "edges": "not_a_list",
- }
- errors = validate_template_structure(template_data, "test.json")
- assert len(errors) == 3
- assert "Node 0 missing 'id'" in str(errors)
- assert "Node 1 missing 'data'" in str(errors)
- assert "'edges' must be a list" in str(errors)
-
-
-class TestValidateFlowCanBuild:
- """Test cases for validate_flow_can_build function."""
-
- @patch("langflow.utils.template_validation.Graph")
- def test_valid_flow_builds_successfully(self, mock_graph_class):
- """Test validation passes when flow builds successfully."""
- # Setup mock graph
- mock_graph = Mock()
- mock_graph.vertices = [Mock(id="vertex1"), Mock(id="vertex2")]
- mock_graph_class.from_payload.return_value = mock_graph
-
- template_data = {
- "nodes": [{"id": "node1", "data": {"type": "input"}}],
- "edges": [],
- }
-
- errors = validate_flow_can_build(template_data, "test.json")
- assert errors == []
- mock_graph_class.from_payload.assert_called_once()
- mock_graph.validate_stream.assert_called_once()
-
- @patch("langflow.utils.template_validation.Graph")
- def test_flow_build_fails_with_exception(self, mock_graph_class):
- """Test validation fails when flow build raises exception."""
- mock_graph_class.from_payload.side_effect = ValueError("Build failed")
-
- template_data = {"nodes": [], "edges": []}
- errors = validate_flow_can_build(template_data, "test.json")
- assert len(errors) == 1
- assert "test.json: Failed to build flow graph: Build failed" in errors
-
- @patch("langflow.utils.template_validation.Graph")
- def test_flow_has_no_vertices(self, mock_graph_class):
- """Test validation fails when flow has no vertices."""
- mock_graph = Mock()
- mock_graph.vertices = []
- mock_graph_class.from_payload.return_value = mock_graph
-
- template_data = {"nodes": [], "edges": []}
- errors = validate_flow_can_build(template_data, "test.json")
- assert "test.json: Flow has no vertices after building" in errors
-
- @patch("langflow.utils.template_validation.Graph")
- def test_vertex_missing_id(self, mock_graph_class):
- """Test validation fails when vertex is missing ID."""
- mock_vertex = Mock()
- mock_vertex.id = None
- mock_graph = Mock()
- mock_graph.vertices = [mock_vertex]
- mock_graph_class.from_payload.return_value = mock_graph
-
- template_data = {"nodes": [], "edges": []}
- errors = validate_flow_can_build(template_data, "test.json")
- assert "test.json: Vertex missing ID" in errors
-
- @patch("langflow.utils.template_validation.Graph")
- def test_uses_unique_flow_id(self, mock_graph_class):
- """Test that unique flow ID and name are used."""
- mock_graph = Mock()
- mock_graph.vertices = [Mock(id="vertex1")]
- mock_graph_class.from_payload.return_value = mock_graph
-
- template_data = {"nodes": [], "edges": []}
- validate_flow_can_build(template_data, "my_flow.json")
-
- # Verify from_payload was called with proper parameters
- call_args = mock_graph_class.from_payload.call_args
- assert call_args[0][0] == template_data # template_data
- assert len(call_args[0][1]) == 36 # UUID length
- assert call_args[0][2] == "my_flow" # flow_name
- # The user_id is passed as a keyword argument
- assert call_args[1]["user_id"] == "test_user"
-
- @patch("langflow.utils.template_validation.Graph")
- def test_validate_stream_exception(self, mock_graph_class):
- """Test that validate_stream exceptions are caught."""
- mock_graph = Mock()
- mock_graph.vertices = [Mock(id="vertex1")]
- mock_graph.validate_stream.side_effect = ValueError("Stream validation failed")
- mock_graph_class.from_payload.return_value = mock_graph
-
- template_data = {"nodes": [], "edges": []}
- errors = validate_flow_can_build(template_data, "test.json")
-
- assert len(errors) == 1
- assert "Failed to build flow graph: Stream validation failed" in errors[0]
-
-
-class TestValidateFlowCode:
- """Test cases for validate_flow_code function."""
-
- @patch("langflow.utils.template_validation.validate_code")
- def test_valid_flow_code(self, mock_validate_code):
- """Test validation passes when code is valid."""
- mock_validate_code.return_value = {
- "imports": {"errors": []},
- "function": {"errors": []},
- }
-
- template_data = {
- "data": {
- "nodes": [
- {
- "id": "node1",
- "data": {
- "id": "node1",
- "node": {
- "template": {
- "code_field": {
- "type": "code",
- "value": "def hello(): return 'world'",
- }
- }
- },
- },
- }
- ]
- }
- }
-
- errors = validate_flow_code(template_data, "test.json")
- assert errors == []
- mock_validate_code.assert_called_once_with("def hello(): return 'world'")
-
- @patch("langflow.utils.template_validation.validate_code")
- def test_code_import_errors(self, mock_validate_code):
- """Test validation fails when code has import errors."""
- mock_validate_code.return_value = {
- "imports": {"errors": ["Module not found: nonexistent_module"]},
- "function": {"errors": []},
- }
-
- template_data = {
- "nodes": [
- {
- "data": {
- "id": "node1",
- "node": {
- "template": {
- "code_field": {
- "type": "code",
- "value": "import nonexistent_module",
- }
- }
- },
- }
- }
- ]
- }
-
- errors = validate_flow_code(template_data, "test.json")
- assert len(errors) == 1
- assert "Import error in node node1: Module not found: nonexistent_module" in errors[0]
-
- @patch("langflow.utils.template_validation.validate_code")
- def test_code_function_errors(self, mock_validate_code):
- """Test validation fails when code has function errors."""
- mock_validate_code.return_value = {
- "imports": {"errors": []},
- "function": {"errors": ["Syntax error in function"]},
- }
-
- template_data = {
- "nodes": [
- {
- "data": {
- "id": "node2",
- "node": {
- "template": {
- "code_field": {
- "type": "code",
- "value": "def broken(: pass",
- }
- }
- },
- }
- }
- ]
- }
-
- errors = validate_flow_code(template_data, "test.json")
- assert len(errors) == 1
- assert "Function error in node node2: Syntax error in function" in errors[0]
-
- def test_no_code_fields(self):
- """Test validation passes when there are no code fields."""
- template_data = {
- "nodes": [{"data": {"node": {"template": {"text_field": {"type": "text", "value": "hello"}}}}}]
- }
-
- errors = validate_flow_code(template_data, "test.json")
- assert errors == []
-
- def test_empty_code_value(self):
- """Test validation passes when code value is empty."""
- template_data = {"nodes": [{"data": {"node": {"template": {"code_field": {"type": "code", "value": ""}}}}}]}
-
- errors = validate_flow_code(template_data, "test.json")
- assert errors == []
-
- def test_code_validation_exception(self):
- """Test validation handles exceptions gracefully."""
- template_data = {
- "nodes": [{"data": {"node": {"template": {"code_field": {"type": "code", "value": "def test(): pass"}}}}}]
- }
-
- with patch("langflow.utils.template_validation.validate_code", side_effect=ValueError("Unexpected error")):
- errors = validate_flow_code(template_data, "test.json")
- assert len(errors) == 1
- assert "Code validation failed: Unexpected error" in errors[0]
-
- def test_code_validation_other_exceptions(self):
- """Test validation handles different exception types."""
- template_data = {
- "nodes": [{"data": {"node": {"template": {"code_field": {"type": "code", "value": "def test(): pass"}}}}}]
- }
-
- # Test TypeError
- with patch("langflow.utils.template_validation.validate_code", side_effect=TypeError("Type error")):
- errors = validate_flow_code(template_data, "test.json")
- assert len(errors) == 1
- assert "Code validation failed: Type error" in errors[0]
-
- # Test KeyError
- with patch("langflow.utils.template_validation.validate_code", side_effect=KeyError("key")):
- errors = validate_flow_code(template_data, "test.json")
- assert len(errors) == 1
- assert "Code validation failed: 'key'" in errors[0]
-
- # Test AttributeError
- with patch("langflow.utils.template_validation.validate_code", side_effect=AttributeError("Attribute error")):
- errors = validate_flow_code(template_data, "test.json")
- assert len(errors) == 1
- assert "Code validation failed: Attribute error" in errors[0]
-
-
-class TestValidateFlowExecution:
- """Test cases for validate_flow_execution function."""
-
- @pytest.mark.asyncio
- async def test_successful_flow_execution(self):
- """Test validation passes when flow execution succeeds."""
- # Mock client responses
- mock_client = AsyncMock()
-
- # Mock create flow response
- create_response = Mock()
- create_response.status_code = 201
- create_response.json.return_value = {"id": "flow123"}
- mock_client.post.return_value = create_response
-
- # Mock build response
- build_response = Mock()
- build_response.status_code = 200
- build_response.json.return_value = {"job_id": "job123"}
-
- # Mock events response
- events_response = Mock()
- events_response.status_code = 200
- events_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(
- [
- '{"event": "vertices_sorted", "job_id": "job123", "data": {"ids": ["v1"]}}',
- '{"event": "end_vertex", "job_id": "job123", "data": {"build_data": {"result": "success"}}}',
- '{"event": "end", "job_id": "job123"}',
- ]
- )
- )
-
- # Set up call sequence
- mock_client.post.side_effect = [create_response, build_response]
- mock_client.get.return_value = events_response
- mock_client.delete.return_value = Mock()
-
- template_data = {"nodes": [], "edges": []}
- headers = {"Authorization": "Bearer token"}
-
- errors = await validate_flow_execution(mock_client, template_data, "test.json", headers)
- assert errors == []
-
- # Verify API calls
- assert mock_client.post.call_count == 2
- mock_client.get.assert_called_once()
- mock_client.delete.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_flow_creation_fails(self):
- """Test validation fails when flow creation fails."""
- mock_client = AsyncMock()
- create_response = Mock()
- create_response.status_code = 400
- mock_client.post.return_value = create_response
-
- template_data = {"nodes": [], "edges": []}
- headers = {"Authorization": "Bearer token"}
-
- errors = await validate_flow_execution(mock_client, template_data, "test.json", headers)
- assert len(errors) == 1
- assert "Failed to create flow: 400" in errors[0]
-
- @pytest.mark.asyncio
- async def test_flow_build_fails(self):
- """Test validation fails when flow build fails."""
- mock_client = AsyncMock()
-
- # Mock successful create
- create_response = Mock()
- create_response.status_code = 201
- create_response.json.return_value = {"id": "flow123"}
-
- # Mock failed build
- build_response = Mock()
- build_response.status_code = 500
-
- mock_client.post.side_effect = [create_response, build_response]
- mock_client.delete.return_value = Mock()
-
- template_data = {"nodes": [], "edges": []}
- headers = {"Authorization": "Bearer token"}
-
- errors = await validate_flow_execution(mock_client, template_data, "test.json", headers)
- assert len(errors) == 1
- assert "Failed to build flow: 500" in errors[0]
-
- @pytest.mark.asyncio
- async def test_execution_timeout(self):
- """Test validation fails when execution times out."""
- mock_client = AsyncMock()
- mock_client.post.side_effect = asyncio.TimeoutError()
-
- template_data = {"nodes": [], "edges": []}
- headers = {"Authorization": "Bearer token"}
-
- errors = await validate_flow_execution(mock_client, template_data, "test.json", headers)
- assert len(errors) == 1
- assert "Flow execution timed out" in errors[0]
-
- @pytest.mark.asyncio
- async def test_cleanup_on_exception(self):
- """Test that flow cleanup happens even when exceptions occur."""
- mock_client = AsyncMock()
-
- # Mock successful create
- create_response = Mock()
- create_response.status_code = 201
- create_response.json.return_value = {"id": "flow123"}
-
- # Mock build that raises exception
- mock_client.post.side_effect = [create_response, ValueError("Build error")]
- mock_client.delete.return_value = Mock()
-
- template_data = {"nodes": [], "edges": []}
- headers = {"Authorization": "Bearer token", "timeout": 10}
-
- errors = await validate_flow_execution(mock_client, template_data, "test.json", headers)
- assert len(errors) == 1
- assert "Flow execution validation failed: Build error" in errors[0]
-
- # Verify cleanup was called
- mock_client.delete.assert_called_once_with("api/v1/flows/flow123", headers=headers, timeout=10)
-
-
-class TestValidateEventStream:
- """Test cases for _validate_event_stream function."""
-
- @pytest.mark.asyncio
- async def test_valid_event_stream(self):
- """Test validation passes for valid event stream."""
- mock_response = Mock()
- mock_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(
- [
- '{"event": "vertices_sorted", "job_id": "job123", "data": {"ids": ["v1", "v2"]}}',
- '{"event": "end_vertex", "job_id": "job123", "data": {"build_data": {"result": "success"}}}',
- '{"event": "end_vertex", "job_id": "job123", "data": {"build_data": {"result": "success"}}}',
- '{"event": "end", "job_id": "job123"}',
- ]
- )
- )
-
- errors = []
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert errors == []
-
- @pytest.mark.asyncio
- async def test_missing_end_event(self):
- """Test validation fails when end event is missing."""
- mock_response = Mock()
- mock_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(
- ['{"event": "vertices_sorted", "job_id": "job123", "data": {"ids": ["v1"]}}']
- )
- )
-
- errors = []
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert len(errors) == 1
- assert "Missing end event in execution" in errors[0]
-
- @pytest.mark.asyncio
- async def test_job_id_mismatch(self):
- """Test validation fails when job ID doesn't match."""
- mock_response = Mock()
- mock_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(
- [
- '{"event": "vertices_sorted", "job_id": "wrong_job", "data": {"ids": ["v1"]}}',
- '{"event": "end", "job_id": "job123"}',
- ]
- )
- )
-
- errors = []
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert len(errors) == 1
- assert "Job ID mismatch in event stream" in errors[0]
-
- @pytest.mark.asyncio
- async def test_invalid_json_in_stream(self):
- """Test validation handles invalid JSON in event stream."""
- mock_response = Mock()
- mock_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(["invalid json", '{"event": "end", "job_id": "job123"}'])
- )
-
- errors = []
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert len(errors) == 1
- assert "Invalid JSON in event stream: invalid json" in errors[0]
-
- @pytest.mark.asyncio
- async def test_error_event_handling(self):
- """Test validation handles error events properly."""
- mock_response = Mock()
- mock_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(
- [
- '{"event": "error", "job_id": "job123", "data": {"error": "Something went wrong"}}',
- '{"event": "error", "job_id": "job123", "data": {"error": "False"}}', # Should be ignored
- '{"event": "error", "job_id": "job123", "data": "String error"}',
- '{"event": "end", "job_id": "job123"}',
- ]
- )
- )
-
- errors = []
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert len(errors) == 2
- assert "Flow execution error: Something went wrong" in errors[0]
- assert "Flow execution error: String error" in errors[1]
-
- @pytest.mark.asyncio
- async def test_missing_vertex_ids(self):
- """Test validation fails when vertices_sorted event missing IDs."""
- mock_response = Mock()
- mock_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(
- [
- '{"event": "vertices_sorted", "job_id": "job123", "data": {}}',
- '{"event": "end", "job_id": "job123"}',
- ]
- )
- )
-
- errors = []
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert len(errors) == 1
- assert "Missing vertex IDs in vertices_sorted event" in errors[0]
-
- @pytest.mark.asyncio
- async def test_missing_build_data(self):
- """Test validation fails when end_vertex event missing build_data."""
- mock_response = Mock()
- mock_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(
- [
- '{"event": "end_vertex", "job_id": "job123", "data": {}}',
- '{"event": "end", "job_id": "job123"}',
- ]
- )
- )
-
- errors = []
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert len(errors) == 1
- assert "Missing build_data in end_vertex event" in errors[0]
-
- @pytest.mark.asyncio
- async def test_event_stream_timeout(self):
- """Test validation handles timeout gracefully."""
-
- class SlowAsyncIterator:
- """Async iterator that will cause timeout."""
-
- def __aiter__(self):
- return self
-
- async def __anext__(self):
- await asyncio.sleep(10) # Will cause timeout
- return '{"event": "end", "job_id": "job123"}'
-
- mock_response = Mock()
- mock_response.aiter_lines = Mock(return_value=SlowAsyncIterator())
-
- errors = []
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert len(errors) == 1
- assert "Flow execution timeout" in errors[0]
-
- @pytest.mark.asyncio
- async def test_common_event_types_ignored(self):
- """Test that common event types don't cause errors."""
- mock_response = Mock()
- mock_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(
- [
- '{"event": "message", "job_id": "job123"}',
- '{"event": "token", "job_id": "job123"}',
- '{"event": "add_message", "job_id": "job123"}',
- '{"event": "stream_closed", "job_id": "job123"}',
- '{"event": "end", "job_id": "job123"}',
- ]
- )
- )
-
- errors = []
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert errors == []
-
- @pytest.mark.asyncio
- async def test_vertices_sorted_without_end_vertex_events(self):
- """Test validation with vertices_sorted but no end_vertex events."""
- mock_response = Mock()
- mock_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(
- [
- '{"event": "vertices_sorted", "job_id": "job123", "data": {"ids": ["v1", "v2"]}}',
- '{"event": "end", "job_id": "job123"}',
- ]
- )
- )
-
- errors = []
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert errors == []
-
- @pytest.mark.asyncio
- async def test_vertex_count_tracking(self):
- """Test that vertex_count is properly tracked."""
- mock_response = Mock()
- mock_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(
- [
- '{"event": "vertices_sorted", "job_id": "job123", "data": {"ids": ["v1", "v2", "v3"]}}',
- '{"event": "end_vertex", "job_id": "job123", "data": {"build_data": {"result": "success"}}}',
- '{"event": "end_vertex", "job_id": "job123", "data": {"build_data": {"result": "success"}}}',
- '{"event": "end_vertex", "job_id": "job123", "data": {"build_data": {"result": "success"}}}',
- '{"event": "end", "job_id": "job123"}',
- ]
- )
- )
-
- errors = []
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert errors == []
-
- @pytest.mark.asyncio
- async def test_empty_lines_in_stream(self):
- """Test that empty lines in event stream are properly handled."""
- mock_response = Mock()
- mock_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(
- [
- "", # Empty line
- '{"event": "vertices_sorted", "job_id": "job123", "data": {"ids": ["v1"]}}',
- "", # Another empty line
- '{"event": "end", "job_id": "job123"}',
- "", # Empty line at end
- ]
- )
- )
-
- errors = []
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert errors == []
-
- @pytest.mark.asyncio
- async def test_event_stream_validation_exception(self):
- """Test that event stream validation handles exceptions properly."""
- mock_response = Mock()
- mock_response.aiter_lines = Mock(
- return_value=AsyncIteratorMock(
- [
- '{"event": "end", "job_id": "job123"}',
- ]
- )
- )
-
- # Mock the json.loads to raise a different exception type
- errors = []
- with patch("langflow.utils.template_validation.json.loads", side_effect=TypeError("Type error")):
- await _validate_event_stream(mock_response, "job123", "test.json", errors)
- assert len(errors) == 1
- assert "Event stream validation failed: Type error" in errors[0]
diff --git a/src/backend/tests/unit/utils/test_truncate_long_strings.py b/src/backend/tests/unit/utils/test_truncate_long_strings.py
deleted file mode 100644
index f549c3ba1c2a..000000000000
--- a/src/backend/tests/unit/utils/test_truncate_long_strings.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import math
-
-import pytest
-from langflow.serialization.constants import MAX_TEXT_LENGTH
-
-from lfx.utils.util_strings import truncate_long_strings
-
-
-@pytest.mark.parametrize(
- ("input_data", "max_length", "expected"),
- [
- # Test case 1: String shorter than max_length
- ("short string", 20, "short string"),
- # Test case 2: String exactly at max_length
- ("exact", 5, "exact"),
- # Test case 3: String longer than max_length
- ("long string", 7, "long st..."),
- # Test case 4: Empty string
- ("", 5, ""),
- # Test case 5: Single character string
- ("a", 1, "a"),
- # Test case 6: Unicode string
- ("こんにちは", 3, "こんに..."),
- # Test case 7: Integer input
- (12345, 3, 12345),
- # Test case 8: Float input
- (math.pi, 4, math.pi),
- # Test case 9: Boolean input
- (True, 2, True),
- # Test case 10: None input
- (None, 5, None),
- # Test case 11: Very long string
- ("a" * 1000, 10, "a" * 10 + "..."),
- ],
-)
-def test_truncate_long_strings_non_dict_list(input_data, max_length, expected):
- result = truncate_long_strings(input_data, max_length)
- assert result == expected
-
-
-# Test for max_length of 0
-def test_truncate_long_strings_zero_max_length():
- assert truncate_long_strings("any string", 0) == "..."
-
-
-# Test for negative max_length
-def test_truncate_long_strings_negative_max_length():
- assert truncate_long_strings("any string", -1) == "any string"
-
-
-# Test for None max_length (should use default MAX_TEXT_LENGTH)
-def test_truncate_long_strings_none_max_length():
- long_string = "a" * (MAX_TEXT_LENGTH + 10)
- result = truncate_long_strings(long_string, None)
- assert len(result) == MAX_TEXT_LENGTH + 3 # +3 for "..."
- assert result == "a" * MAX_TEXT_LENGTH + "..."
diff --git a/src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py b/src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py
deleted file mode 100644
index a073c16048ac..000000000000
--- a/src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import pytest
-from langflow.serialization.constants import MAX_TEXT_LENGTH
-
-from lfx.utils.util_strings import truncate_long_strings
-
-
-@pytest.mark.parametrize(
- ("input_data", "max_length", "expected"),
- [
- # Test case 1: Simple string truncation
- ({"key": "a" * 100}, 10, {"key": "a" * 10 + "..."}),
- # Test case 2: Nested dictionary
- ({"outer": {"inner": "b" * 100}}, 5, {"outer": {"inner": "b" * 5 + "..."}}),
- # Test case 3: List of strings
- (["short", "a" * 100, "also short"], 7, ["short", "a" * 7 + "...", "also sh" + "..."]),
- # Test case 4: Mixed nested structure
- (
- {"key1": ["a" * 100, {"nested": "b" * 100}], "key2": "c" * 100},
- 8,
- {"key1": ["a" * 8 + "...", {"nested": "b" * 8 + "..."}], "key2": "c" * 8 + "..."},
- ),
- # Test case 5: Empty structures
- ({}, 10, {}),
- ([], 10, []),
- # Test case 6: Strings at exact max_length
- ({"exact": "a" * 10}, 10, {"exact": "a" * 10}),
- # Test case 7: Non-string values
- ({"num": 12345, "bool": True, "none": None}, 5, {"num": 12345, "bool": True, "none": None}),
- # Test case 8: Unicode characters
- ({"unicode": "こんにちは世界"}, 3, {"unicode": "こんに..."}),
- # Test case 9: Very large structure
- (
- {"key" + str(i): "value" * i for i in range(1000)},
- 10,
- {"key" + str(i): ("value" * i)[:10] + "..." if len("value" * i) > 10 else "value" * i for i in range(1000)},
- ),
- ],
-)
-def test_truncate_long_strings(input_data, max_length, expected):
- result = truncate_long_strings(input_data, max_length)
- assert result == expected
-
-
-def test_truncate_long_strings_default_max_length():
- long_string = "a" * (MAX_TEXT_LENGTH + 1)
- input_data = {"key": long_string}
- result = truncate_long_strings(input_data)
- assert len(result["key"]) == MAX_TEXT_LENGTH + 3 # +3 for the "..."
-
-
-def test_truncate_long_strings_no_modification():
- input_data = {"short": "short string", "nested": {"also_short": "another short string"}}
- result = truncate_long_strings(input_data, 100)
- assert result == input_data
-
-
-# Test for type preservation
-def test_truncate_long_strings_type_preservation():
- input_data = {"str": "a" * 100, "list": ["b" * 100], "dict": {"nested": "c" * 100}}
- result = truncate_long_strings(input_data, 10)
- assert isinstance(result, dict)
- assert isinstance(result["str"], str)
- assert isinstance(result["list"], list)
- assert isinstance(result["dict"], dict)
-
-
-# Test for in-place modification
-def test_truncate_long_strings_in_place_modification():
- input_data = {"key": "a" * 100}
- result = truncate_long_strings(input_data, 10)
- assert result is input_data # Check if the same object is returned
-
-
-# Test for invalid input
-def test_truncate_long_strings_invalid_input():
- input_string = "not a dict or list"
- result = truncate_long_strings(input_string, 10)
- assert result == "not a dict..." # The function should truncate the string
-
-
-# Updated test for negative max_length
-def test_truncate_long_strings_negative_max_length():
- input_data = {"key": "value"}
- result = truncate_long_strings(input_data, -1)
- assert result == input_data # Assuming the function ignores negative max_length
-
-
-# Additional test for zero max_length
-def test_truncate_long_strings_zero_max_length():
- input_data = {"key": "value"}
- result = truncate_long_strings(input_data, 0)
- assert result == {"key": "..."} # Assuming the function truncates to just "..."
-
-
-# Test for very small positive max_length
-def test_truncate_long_strings_small_max_length():
- input_data = {"key": "value"}
- result = truncate_long_strings(input_data, 1)
- assert result == {"key": "v..."} # Assuming the function keeps at least one character
diff --git a/src/backend/tests/unit/utils/test_util_strings.py b/src/backend/tests/unit/utils/test_util_strings.py
deleted file mode 100644
index c60c493765f5..000000000000
--- a/src/backend/tests/unit/utils/test_util_strings.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import pytest
-
-from lfx.utils import util_strings
-
-
-@pytest.mark.parametrize(
- ("value", "expected"),
- [
- ("sqlite:///test.db", True),
- ("sqlite:////var/folders/test.db", True),
- ("sqlite:///:memory:", True),
- ("sqlite+aiosqlite:////var/folders/test.db", True),
- ("postgresql://user:pass@localhost/dbname", True),
- ("postgresql+psycopg2://scott:tiger@localhost:5432/mydatabase", True),
- ("postgresql+pg8000://dbuser:kx%40jj5%2Fg@pghost10/appdb", True),
- ("mysql://user:pass@localhost/dbname", True),
- ("mysql+mysqldb://scott:tiger@localhost/foo", True),
- ("mysql+pymysql://scott:tiger@localhost/foo", True),
- ("oracle://scott:tiger@localhost:1521/?service_name=freepdb1", True),
- ("oracle+cx_oracle://scott:tiger@tnsalias", True),
- ("oracle+oracledb://scott:tiger@localhost:1521/?service_name=freepdb1", True),
- ("", False),
- (" invalid ", False),
- ("not_a_url", False),
- (None, False),
- ("invalid://database", False),
- ("invalid://:@/test", False),
- ],
-)
-def test_is_valid_database_url(value, expected):
- assert util_strings.is_valid_database_url(value) == expected
diff --git a/src/backend/tests/unit/utils/test_validate.py b/src/backend/tests/unit/utils/test_validate.py
deleted file mode 100644
index b646ca4d2e56..000000000000
--- a/src/backend/tests/unit/utils/test_validate.py
+++ /dev/null
@@ -1,652 +0,0 @@
-"""Unit tests for validate.py utilities."""
-
-import ast
-import warnings
-from unittest.mock import Mock, patch
-
-import pytest
-
-from lfx.custom.validate import (
- _create_langflow_execution_context,
- add_type_ignores,
- build_class_constructor,
- compile_class_code,
- create_class,
- create_function,
- create_type_ignore_class,
- eval_function,
- execute_function,
- extract_class_code,
- extract_class_name,
- extract_function_name,
- find_names_in_code,
- get_default_imports,
- prepare_global_scope,
- validate_code,
-)
-
-
-class TestAddTypeIgnores:
- """Test cases for add_type_ignores function."""
-
- def test_adds_type_ignore_when_missing(self):
- """Test that TypeIgnore is added when not present."""
- # Remove TypeIgnore if it exists
- if hasattr(ast, "TypeIgnore"):
- delattr(ast, "TypeIgnore")
-
- add_type_ignores()
-
- assert hasattr(ast, "TypeIgnore")
- assert issubclass(ast.TypeIgnore, ast.AST)
- assert ast.TypeIgnore._fields == ()
-
- def test_does_nothing_when_already_exists(self):
- """Test that function doesn't modify existing TypeIgnore."""
- # Ensure TypeIgnore exists first
- add_type_ignores()
- original_type_ignore = ast.TypeIgnore
-
- add_type_ignores()
-
- assert ast.TypeIgnore is original_type_ignore
-
-
-class TestValidateCode:
- """Test cases for validate_code function."""
-
- def test_valid_code_with_function(self):
- """Test validation passes for valid code with function."""
- code = """
-def hello_world():
- return "Hello, World!"
-"""
- result = validate_code(code)
- assert result["imports"]["errors"] == []
- assert result["function"]["errors"] == []
-
- def test_code_with_valid_imports(self):
- """Test validation passes for code with valid imports."""
- code = """
-import os
-import sys
-
-def get_path():
- return os.path.join(sys.path[0], "test")
-"""
- result = validate_code(code)
- assert result["imports"]["errors"] == []
- assert result["function"]["errors"] == []
-
- def test_code_with_invalid_imports(self):
- """Test validation fails for code with invalid imports."""
- code = """
-import nonexistent_module
-
-def test_func():
- return nonexistent_module.some_function()
-"""
- result = validate_code(code)
- assert len(result["imports"]["errors"]) == 1
- assert "nonexistent_module" in result["imports"]["errors"][0]
-
- def test_code_with_syntax_error(self):
- """Test validation fails for code with syntax errors."""
- code = """
-def broken_function(
- return "incomplete"
-"""
- result = validate_code(code)
- # The function should catch the syntax error and return it in the results
- assert len(result["function"]["errors"]) >= 1
- error_message = " ".join(result["function"]["errors"])
- assert (
- "SyntaxError" in error_message or "invalid syntax" in error_message or "was never closed" in error_message
- )
-
- def test_code_with_function_execution_error(self):
- """Test validation fails when function execution fails."""
- code = """
-def error_function():
- undefined_variable + 1
-"""
- result = validate_code(code)
- # This should pass parsing but may fail execution
- assert result["imports"]["errors"] == []
-
- def test_empty_code(self):
- """Test validation handles empty code."""
- result = validate_code("")
- assert result["imports"]["errors"] == []
- assert result["function"]["errors"] == []
-
- def test_code_with_multiple_imports(self):
- """Test validation handles multiple imports."""
- code = """
-import os
-import sys
-import json
-import nonexistent1
-import nonexistent2
-
-def test_func():
- return json.dumps({"path": os.getcwd()})
-"""
- result = validate_code(code)
- assert len(result["imports"]["errors"]) == 2
- assert any("nonexistent1" in err for err in result["imports"]["errors"])
- assert any("nonexistent2" in err for err in result["imports"]["errors"])
-
- @patch("lfx.custom.validate.logger")
- def test_logging_on_parse_error(self, mock_logger):
- """Test that parsing errors are logged."""
- # Structlog doesn't have opt method, so hasattr(logger, "opt") returns False
- mock_logger.debug = Mock()
-
- code = "invalid python syntax +++"
- validate_code(code)
-
- # With structlog, we expect logger.debug to be called with exc_info=True
- mock_logger.debug.assert_called_with("Error parsing code", exc_info=True)
-
-
-class TestCreateLangflowExecutionContext:
- """Test cases for _create_langflow_execution_context function."""
-
- def test_creates_context_with_langflow_imports(self):
- """Test that context includes langflow imports."""
- # The function imports modules inside try/except blocks
- # We don't need to patch anything, just test it works
- context = _create_langflow_execution_context()
-
- # Check that the context contains the expected keys
- # The actual imports may succeed or fail, but the function should handle both cases
- assert isinstance(context, dict)
- # These keys should be present regardless of import success/failure
- expected_keys = ["DataFrame", "Message", "Data", "Component", "HandleInput", "Output", "TabInput"]
- for key in expected_keys:
- assert key in context, f"Expected key '{key}' not found in context"
-
- def test_creates_mock_classes_on_import_failure(self):
- """Test that mock classes are created when imports fail."""
- # Test that the function handles import failures gracefully
- # by checking the actual implementation behavior
- with patch("builtins.__import__", side_effect=ImportError("Module not found")):
- context = _create_langflow_execution_context()
-
- # Even with import failures, the context should still be created
- assert isinstance(context, dict)
- # The function should create mock classes when imports fail
- if "DataFrame" in context:
- assert isinstance(context["DataFrame"], type)
-
- def test_includes_typing_imports(self):
- """Test that typing imports are included."""
- context = _create_langflow_execution_context()
-
- assert "Any" in context
- assert "Dict" in context
- assert "List" in context
- assert "Optional" in context
- assert "Union" in context
-
- def test_does_not_include_pandas(self):
- """Test that pandas is not included in the langflow execution context."""
- context = _create_langflow_execution_context()
- assert "pd" not in context
-
-
-class TestEvalFunction:
- """Test cases for eval_function function."""
-
- def test_evaluates_simple_function(self):
- """Test evaluation of a simple function."""
- function_string = """
-def add_numbers(a, b):
- return a + b
-"""
- func = eval_function(function_string)
- assert callable(func)
- assert func(2, 3) == 5
-
- def test_evaluates_function_with_default_args(self):
- """Test evaluation of function with default arguments."""
- function_string = """
-def greet(name="World"):
- return f"Hello, {name}!"
-"""
- func = eval_function(function_string)
- assert func() == "Hello, World!"
- assert func("Alice") == "Hello, Alice!"
-
- def test_raises_error_for_no_function(self):
- """Test that error is raised when no function is found."""
- code_string = """
-x = 42
-y = "hello"
-"""
- with pytest.raises(ValueError, match="Function string does not contain a function"):
- eval_function(code_string)
-
- def test_finds_correct_function_among_multiple(self):
- """Test that the correct function is found when multiple exist."""
- function_string = """
-def helper():
- return "helper"
-
-def main_function():
- return "main"
-"""
- func = eval_function(function_string)
- # Should return one of the functions (implementation detail)
- assert callable(func)
-
-
-class TestExecuteFunction:
- """Test cases for execute_function function."""
-
- def test_executes_function_with_args(self):
- """Test execution of function with arguments."""
- code = """
-def multiply(x, y):
- return x * y
-"""
- result = execute_function(code, "multiply", 4, 5)
- assert result == 20
-
- def test_executes_function_with_kwargs(self):
- """Test execution of function with keyword arguments."""
- code = """
-def create_message(text, urgent=False):
- prefix = "URGENT: " if urgent else ""
- return prefix + text
-"""
- result = execute_function(code, "create_message", "Hello", urgent=True)
- assert result == "URGENT: Hello"
-
- def test_executes_function_with_imports(self):
- """Test execution of function that uses imports."""
- code = """
-import os
-
-def get_current_dir():
- return os.getcwd()
-"""
- result = execute_function(code, "get_current_dir")
- assert isinstance(result, str)
-
- def test_raises_error_for_missing_module(self):
- """Test that error is raised for missing modules."""
- code = """
-import nonexistent_module
-
-def test_func():
- return nonexistent_module.test()
-"""
- with pytest.raises(ModuleNotFoundError, match="Module nonexistent_module not found"):
- execute_function(code, "test_func")
-
- def test_raises_error_for_missing_function(self):
- """Test that error is raised when function doesn't exist."""
- code = """
-def existing_function():
- return "exists"
-"""
- # The function should raise an error when the specified function doesn't exist
- with pytest.raises((ValueError, StopIteration)):
- execute_function(code, "nonexistent_function")
-
-
-class TestCreateFunction:
- """Test cases for create_function function."""
-
- def test_creates_callable_function(self):
- """Test that a callable function is created."""
- code = """
-def square(x):
- return x ** 2
-"""
- func = create_function(code, "square")
- assert callable(func)
- assert func(5) == 25
-
- def test_handles_imports_in_function(self):
- """Test that imports within function are handled."""
- code = """
-import math
-
-def calculate_area(radius):
- return math.pi * radius ** 2
-"""
- func = create_function(code, "calculate_area")
- result = func(2)
- assert abs(result - 12.566370614359172) < 0.0001
-
- def test_handles_from_imports(self):
- """Test that from imports are handled correctly."""
- code = """
-from math import sqrt
-
-def hypotenuse(a, b):
- return sqrt(a**2 + b**2)
-"""
- func = create_function(code, "hypotenuse")
- assert func(3, 4) == 5.0
-
- def test_raises_error_for_missing_module(self):
- """Test that error is raised for missing modules."""
- code = """
-import nonexistent_module
-
-def test_func():
- return "test"
-"""
- with pytest.raises(ModuleNotFoundError, match="Module nonexistent_module not found"):
- create_function(code, "test_func")
-
-
-class TestCreateClass:
- """Test cases for create_class function."""
-
- def test_creates_simple_class(self):
- """Test creation of a simple class."""
- code = """
-class TestClass:
- def __init__(self, value=None):
- self.value = value
-
- def get_value(self):
- return self.value
-"""
- cls = create_class(code, "TestClass")
- instance = cls()
- assert hasattr(instance, "__init__")
- assert hasattr(instance, "get_value")
-
- def test_handles_class_with_imports(self):
- """Test creation of class that uses imports."""
- code = """
-import json
-
-class JsonHandler:
- def __init__(self):
- self.data = {}
-
- def to_json(self):
- return json.dumps(self.data)
-"""
- cls = create_class(code, "JsonHandler")
- instance = cls()
- assert hasattr(instance, "to_json")
-
- def test_replaces_legacy_imports(self):
- """Test that legacy import statements are replaced."""
- code = """
-from langflow import CustomComponent
-
-class MyComponent(CustomComponent):
- def build(self):
- return "test"
-"""
- # Should not raise an error due to import replacement
- with patch("lfx.custom.validate.prepare_global_scope") as mock_prepare:
- mock_prepare.return_value = {"CustomComponent": type("CustomComponent", (), {})}
- with patch("lfx.custom.validate.extract_class_code") as mock_extract:
- mock_extract.return_value = Mock()
- with patch("lfx.custom.validate.compile_class_code") as mock_compile:
- mock_compile.return_value = compile("pass", "", "exec")
- with patch("lfx.custom.validate.build_class_constructor") as mock_build:
- mock_build.return_value = lambda: None
- create_class(code, "MyComponent")
-
- def test_handles_syntax_error(self):
- """Test that syntax errors are handled properly."""
- code = """
-class BrokenClass
- def __init__(self):
- pass
-"""
- with pytest.raises(ValueError, match="Syntax error in code"):
- create_class(code, "BrokenClass")
-
- def test_handles_validation_error(self):
- """Test that validation errors are handled properly."""
- code = """
-class TestClass:
- def __init__(self):
- pass
-"""
- # Create a proper ValidationError instance
- from pydantic_core import ValidationError as CoreValidationError
-
- validation_error = CoreValidationError.from_exception_data("TestClass", [])
-
- with (
- patch("lfx.custom.validate.prepare_global_scope", side_effect=validation_error),
- pytest.raises(ValueError, match=".*"),
- ):
- create_class(code, "TestClass")
-
-
-class TestHelperFunctions:
- """Test cases for helper functions."""
-
- def test_create_type_ignore_class(self):
- """Test creation of TypeIgnore class."""
- type_ignore_class = create_type_ignore_class()
- assert issubclass(type_ignore_class, ast.AST)
- assert type_ignore_class._fields == ()
-
- def test_extract_function_name(self):
- """Test extraction of function name from code."""
- code = """
-def my_function():
- return "test"
-"""
- name = extract_function_name(code)
- assert name == "my_function"
-
- def test_extract_function_name_no_function(self):
- """Test error when no function found."""
- code = "x = 42"
- with pytest.raises(ValueError, match="No function definition found"):
- extract_function_name(code)
-
- def test_extract_class_name(self):
- """Test extraction of Component class name."""
- code = """
-class MyComponent(Component):
- def build(self):
- pass
-"""
- name = extract_class_name(code)
- assert name == "MyComponent"
-
- def test_extract_class_name_no_component(self):
- """Test error when no Component subclass found."""
- code = """
-class RegularClass:
- pass
-"""
- with pytest.raises(TypeError, match="No Component subclass found"):
- extract_class_name(code)
-
- def test_extract_class_name_syntax_error(self):
- """Test error handling for syntax errors in extract_class_name."""
- code = "class BrokenClass"
- with pytest.raises(ValueError, match="Invalid Python code"):
- extract_class_name(code)
-
- def test_find_names_in_code(self):
- """Test finding specific names in code."""
- code = "from typing import Optional, List\ndata: Optional[List[str]] = None"
- names = ["Optional", "List", "Dict", "Union"]
- found = find_names_in_code(code, names)
- assert found == {"Optional", "List"}
-
- def test_find_names_in_code_none_found(self):
- """Test when no names are found in code."""
- code = "x = 42"
- names = ["Optional", "List"]
- found = find_names_in_code(code, names)
- assert found == set()
-
-
-class TestPrepareGlobalScope:
- """Test cases for prepare_global_scope function."""
-
- def test_handles_imports(self):
- """Test that imports are properly handled."""
- code = """
-import os
-import sys
-
-def test():
- pass
-"""
- module = ast.parse(code)
- scope = prepare_global_scope(module)
- assert "os" in scope
- assert "sys" in scope
-
- def test_handles_from_imports(self):
- """Test that from imports are properly handled."""
- code = """
-from os import path
-from sys import version
-
-def test():
- pass
-"""
- module = ast.parse(code)
- scope = prepare_global_scope(module)
- assert "path" in scope
- assert "version" in scope
-
- def test_handles_import_errors(self):
- """Test that import errors are properly raised."""
- code = """
-import nonexistent_module
-
-def test():
- pass
-"""
- module = ast.parse(code)
- with pytest.raises(ModuleNotFoundError, match="Module nonexistent_module not found"):
- prepare_global_scope(module)
-
- def test_handles_langchain_warnings(self):
- """Test that langchain warnings are suppressed."""
- code = """
-from langchain_core.messages import BaseMessage
-
-def test():
- pass
-"""
- module = ast.parse(code)
-
- with patch("importlib.import_module") as mock_import:
- mock_module = Mock()
- mock_module.BaseMessage = Mock()
- mock_import.return_value = mock_module
-
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
- prepare_global_scope(module)
- # Should not have langchain warnings
- langchain_warnings = [warning for warning in w if "langchain" in str(warning.message).lower()]
- assert len(langchain_warnings) == 0
-
- def test_executes_definitions(self):
- """Test that class and function definitions are executed."""
- code = """
-def helper():
- return "helper"
-
-class TestClass:
- value = 42
-"""
- module = ast.parse(code)
- scope = prepare_global_scope(module)
- assert "helper" in scope
- assert "TestClass" in scope
- assert callable(scope["helper"])
- assert scope["TestClass"].value == 42
-
-
-class TestClassCodeOperations:
- """Test cases for class code operation functions."""
-
- def test_extract_class_code(self):
- """Test extraction of class code from module."""
- code = """
-def helper():
- pass
-
-class MyClass:
- def method(self):
- pass
-"""
- module = ast.parse(code)
- class_code = extract_class_code(module, "MyClass")
- assert isinstance(class_code, ast.ClassDef)
- assert class_code.name == "MyClass"
-
- def test_compile_class_code(self):
- """Test compilation of class code."""
- code = """
-class TestClass:
- def method(self):
- return "test"
-"""
- module = ast.parse(code)
- class_code = extract_class_code(module, "TestClass")
- compiled = compile_class_code(class_code)
- assert compiled is not None
-
- def test_build_class_constructor(self):
- """Test building class constructor."""
- code = """
-class SimpleClass:
- def __init__(self):
- self.value = "test"
-"""
- module = ast.parse(code)
- class_code = extract_class_code(module, "SimpleClass")
- compiled = compile_class_code(class_code)
-
- constructor = build_class_constructor(compiled, {}, "SimpleClass")
- assert constructor is not None
-
-
-class TestGetDefaultImports:
- """Test cases for get_default_imports function."""
-
- @patch("lfx.field_typing.constants.CUSTOM_COMPONENT_SUPPORTED_TYPES", {"TestType": Mock()})
- def test_returns_default_imports(self):
- """Test that default imports are returned."""
- code = "TestType and Optional"
-
- with patch("importlib.import_module") as mock_import:
- mock_module = Mock()
- mock_module.TestType = Mock()
- mock_import.return_value = mock_module
-
- imports = get_default_imports(code)
- assert "Optional" in imports
- assert "List" in imports
- assert "Dict" in imports
- assert "Union" in imports
-
- def test_includes_langflow_imports(self):
- """Test that langflow imports are included when found in code."""
- # Use an actual type from CUSTOM_COMPONENT_SUPPORTED_TYPES
- code = "Chain is used here"
-
- with patch("lfx.custom.validate.importlib") as mock_importlib:
- mock_module = Mock()
- mock_module.Chain = Mock()
- mock_importlib.import_module.return_value = mock_module
-
- imports = get_default_imports(code)
- assert "Chain" in imports
diff --git a/src/lfx/src/lfx/__init__.py b/src/lfx/src/lfx/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/__init__.py b/src/lfx/src/lfx/base/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/agents/__init__.py b/src/lfx/src/lfx/base/agents/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/agents/crewai/__init__.py b/src/lfx/src/lfx/base/agents/crewai/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/astra_assistants/__init__.py b/src/lfx/src/lfx/base/astra_assistants/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/chains/__init__.py b/src/lfx/src/lfx/base/chains/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/composio/__init__.py b/src/lfx/src/lfx/base/composio/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/compressors/__init__.py b/src/lfx/src/lfx/base/compressors/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/curl/__init__.py b/src/lfx/src/lfx/base/curl/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/document_transformers/__init__.py b/src/lfx/src/lfx/base/document_transformers/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/embeddings/__init__.py b/src/lfx/src/lfx/base/embeddings/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/flow_processing/__init__.py b/src/lfx/src/lfx/base/flow_processing/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/huggingface/__init__.py b/src/lfx/src/lfx/base/huggingface/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/io/__init__.py b/src/lfx/src/lfx/base/io/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/langchain_utilities/__init__.py b/src/lfx/src/lfx/base/langchain_utilities/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/langwatch/__init__.py b/src/lfx/src/lfx/base/langwatch/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/mcp/__init__.py b/src/lfx/src/lfx/base/mcp/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/memory/__init__.py b/src/lfx/src/lfx/base/memory/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/processing/__init__.py b/src/lfx/src/lfx/base/processing/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/prompts/__init__.py b/src/lfx/src/lfx/base/prompts/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/textsplitters/__init__.py b/src/lfx/src/lfx/base/textsplitters/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/tools/__init__.py b/src/lfx/src/lfx/base/tools/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/base/vectorstores/__init__.py b/src/lfx/src/lfx/base/vectorstores/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/custom/custom_component/__init__.py b/src/lfx/src/lfx/custom/custom_component/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/exceptions/__init__.py b/src/lfx/src/lfx/exceptions/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/graph/edge/__init__.py b/src/lfx/src/lfx/graph/edge/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/graph/graph/__init__.py b/src/lfx/src/lfx/graph/graph/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/graph/state/__init__.py b/src/lfx/src/lfx/graph/state/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/graph/vertex/__init__.py b/src/lfx/src/lfx/graph/vertex/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/template/field/__init__.py b/src/lfx/src/lfx/template/field/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/src/lfx/template/template/__init__.py b/src/lfx/src/lfx/template/template/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/data/BasicChatwithPromptandHistory.json b/src/lfx/tests/data/BasicChatwithPromptandHistory.json
deleted file mode 100644
index 658ac0479077..000000000000
--- a/src/lfx/tests/data/BasicChatwithPromptandHistory.json
+++ /dev/null
@@ -1 +0,0 @@
-{"description":"A simple chat with a custom prompt template and conversational memory buffer","name":"Basic Chat with Prompt and History (2)","data":{"nodes":[{"width":384,"height":621,"id":"ChatOpenAI-vy7fV","type":"genericNode","position":{"x":170.87326389541306,"y":465.8628482073749},"data":{"type":"ChatOpenAI","node":{"template":{"callbacks":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"callbacks","advanced":false,"dynamic":false,"info":"","type":"langchain.callbacks.base.BaseCallbackHandler","list":true},"cache":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"cache","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"client":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"client","advanced":false,"dynamic":false,"info":"","type":"Any","list":false},"max_retries":{"required":false,"placeholder":"","show":false,"multiline":false,"value":6,"password":false,"name":"max_retries","advanced":false,"dynamic":false,"info":"","type":"int","list":false},"max_tokens":{"required":false,"placeholder":"","show":true,"multiline":false,"password":true,"name":"max_tokens","advanced":false,"dynamic":false,"info":"","type":"int","list":false,"value":""},"metadata":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"metadata","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"model_kwargs":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"model_kwargs","advanced":true,"dynamic":false,"info":"","type":"code","list":false},"model_name":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"gpt-3.5-turbo","password":false,"options":["gpt-3.5-turbo-0613","gpt-3.5-turbo","gpt-3.5-turbo-16k-0613","gpt-3.5-turbo-16k","gpt-4-0613","gpt-4-32k-0613","gpt-4","gpt-4-32k"],"name":"model_name","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"n":{"required":false,"placeholder":"","show":false,"multiline":false,"value":1,"password":false,"name":"n","advanced":false,"dynamic":false,"info":"","type":"int","list":false},"openai_api_base":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"openai_api_base","display_name":"OpenAI API Base","advanced":false,"dynamic":false,"info":"\nThe base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\n","type":"str","list":false},"openai_api_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":true,"name":"openai_api_key","display_name":"OpenAI API Key","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"openai_organization":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"openai_organization","display_name":"OpenAI Organization","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"openai_proxy":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"openai_proxy","display_name":"OpenAI Proxy","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"request_timeout":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"request_timeout","advanced":false,"dynamic":false,"info":"","type":"float","list":false,"value":60},"streaming":{"required":false,"placeholder":"","show":false,"multiline":false,"value":false,"password":false,"name":"streaming","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"tags":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tags","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"temperature":{"required":false,"placeholder":"","show":true,"multiline":false,"value":0.7,"password":false,"name":"temperature","advanced":false,"dynamic":false,"info":"","type":"float","list":false},"tiktoken_model_name":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tiktoken_model_name","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"verbose":{"required":false,"placeholder":"","show":false,"multiline":false,"value":false,"password":false,"name":"verbose","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"_type":"ChatOpenAI"},"description":"`OpenAI` Chat large language models API.","base_classes":["ChatOpenAI","BaseChatModel","BaseLanguageModel","BaseLLM"],"display_name":"ChatOpenAI","documentation":"https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai"},"id":"ChatOpenAI-vy7fV","value":null},"selected":true,"dragging":false,"positionAbsolute":{"x":170.87326389541306,"y":465.8628482073749}},{"width":384,"height":307,"id":"LLMChain-UjBh1","type":"genericNode","position":{"x":1250.1806448178158,"y":588.4657451068704},"data":{"type":"LLMChain","node":{"template":{"callbacks":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"callbacks","advanced":false,"dynamic":false,"info":"","type":"langchain.callbacks.base.BaseCallbackHandler","list":true},"llm":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"llm","advanced":false,"dynamic":false,"info":"","type":"BaseLanguageModel","list":false},"memory":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"memory","advanced":false,"dynamic":false,"info":"","type":"BaseMemory","list":false},"output_parser":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"output_parser","advanced":false,"dynamic":false,"info":"","type":"BaseLLMOutputParser","list":false},"prompt":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"prompt","advanced":false,"dynamic":false,"info":"","type":"BasePromptTemplate","list":false},"llm_kwargs":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"llm_kwargs","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"metadata":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"metadata","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"output_key":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"text","password":false,"name":"output_key","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"return_final_only":{"required":false,"placeholder":"","show":false,"multiline":false,"value":true,"password":false,"name":"return_final_only","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"tags":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tags","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"verbose":{"required":false,"placeholder":"","show":false,"multiline":false,"value":false,"password":false,"name":"verbose","advanced":true,"dynamic":false,"info":"","type":"bool","list":false},"_type":"LLMChain"},"description":"Chain to run queries against LLMs.","base_classes":["LLMChain","Chain","function"],"display_name":"LLMChain","documentation":"https://python.langchain.com/docs/modules/chains/foundational/llm_chain"},"id":"LLMChain-UjBh1","value":null},"selected":false,"positionAbsolute":{"x":1250.1806448178158,"y":588.4657451068704},"dragging":false},{"width":384,"height":273,"id":"PromptTemplate-5Q0W8","type":"genericNode","position":{"x":172.18064481781585,"y":67.26574510687044},"data":{"type":"PromptTemplate","node":{"template":{"output_parser":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"output_parser","advanced":false,"dynamic":false,"info":"","type":"BaseOutputParser","list":false},"input_variables":{"required":true,"placeholder":"","show":false,"multiline":false,"password":false,"name":"input_variables","advanced":false,"dynamic":false,"info":"","type":"str","list":true,"value":["history","text"]},"partial_variables":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"partial_variables","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"template":{"required":true,"placeholder":"","show":true,"multiline":true,"password":false,"name":"template","advanced":false,"dynamic":false,"info":"","type":"prompt","list":false,"value":"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\n{history}\nHuman: {text}\nAI:"},"template_format":{"required":false,"placeholder":"","show":false,"multiline":false,"value":"f-string","password":false,"name":"template_format","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"validate_template":{"required":false,"placeholder":"","show":false,"multiline":false,"value":true,"password":false,"name":"validate_template","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"_type":"PromptTemplate","history":{"required":false,"placeholder":"","show":true,"multiline":true,"value":"","password":false,"name":"history","display_name":"history","advanced":false,"input_types":["Document","BaseOutputParser"],"dynamic":false,"info":"","type":"str","list":false},"text":{"required":false,"placeholder":"","show":true,"multiline":true,"value":"","password":false,"name":"text","display_name":"text","advanced":false,"input_types":["Document","BaseOutputParser"],"dynamic":false,"info":"","type":"str","list":false}},"description":"A prompt template for a language model.","base_classes":["StringPromptTemplate","PromptTemplate","BasePromptTemplate"],"name":"","display_name":"PromptTemplate","documentation":"https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/","custom_fields":{"template":["history","text"]},"output_types":[],"field_formatters":{"formatters":{"openai_api_key":{}},"base_formatters":{"kwargs":{},"optional":{},"list":{},"dict":{},"union":{},"multiline":{},"show":{},"password":{},"default":{},"headers":{},"dict_code_file":{},"model_fields":{"MODEL_DICT":{"OpenAI":["text-davinci-003","text-davinci-002","text-curie-001","text-babbage-001","text-ada-001"],"ChatOpenAI":["gpt-3.5-turbo-0613","gpt-3.5-turbo","gpt-3.5-turbo-16k-0613","gpt-3.5-turbo-16k","gpt-4-0613","gpt-4-32k-0613","gpt-4","gpt-4-32k"],"Anthropic":["claude-v1","claude-v1-100k","claude-instant-v1","claude-instant-v1-100k","claude-v1.3","claude-v1.3-100k","claude-v1.2","claude-v1.0","claude-instant-v1.1","claude-instant-v1.1-100k","claude-instant-v1.0"],"ChatAnthropic":["claude-v1","claude-v1-100k","claude-instant-v1","claude-instant-v1-100k","claude-v1.3","claude-v1.3-100k","claude-v1.2","claude-v1.0","claude-instant-v1.1","claude-instant-v1.1-100k","claude-instant-v1.0"]}}}},"beta":false,"error":null},"id":"PromptTemplate-5Q0W8","value":null},"selected":false,"dragging":false,"positionAbsolute":{"x":172.18064481781585,"y":67.26574510687044}},{"width":384,"height":561,"id":"ConversationBufferMemory-Lu2Nb","type":"genericNode","position":{"x":802.1806448178158,"y":43.265745106870426},"data":{"type":"ConversationBufferMemory","node":{"template":{"chat_memory":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"chat_memory","advanced":false,"dynamic":false,"info":"","type":"BaseChatMessageHistory","list":false},"ai_prefix":{"required":false,"placeholder":"","show":false,"multiline":false,"value":"AI","password":false,"name":"ai_prefix","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"human_prefix":{"required":false,"placeholder":"","show":false,"multiline":false,"value":"Human","password":false,"name":"human_prefix","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"input_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":false,"name":"input_key","advanced":false,"dynamic":false,"info":"The variable to be used as Chat Input when more than one variable is available.","type":"str","list":false},"memory_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"history","password":false,"name":"memory_key","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"output_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":false,"name":"output_key","advanced":false,"dynamic":false,"info":"The variable to be used as Chat Output (e.g. answer in a ConversationalRetrievalChain)","type":"str","list":false},"return_messages":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"return_messages","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"_type":"ConversationBufferMemory"},"description":"Buffer for storing conversation memory.","base_classes":["BaseMemory","ConversationBufferMemory","BaseChatMemory"],"display_name":"ConversationBufferMemory","documentation":"https://python.langchain.com/docs/modules/memory/how_to/buffer"},"id":"ConversationBufferMemory-Lu2Nb","value":null},"selected":false,"positionAbsolute":{"x":802.1806448178158,"y":43.265745106870426},"dragging":false}],"edges":[{"source":"ChatOpenAI-vy7fV","sourceHandle":"ChatOpenAI|ChatOpenAI-vy7fV|ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLLM","target":"LLMChain-UjBh1","targetHandle":"BaseLanguageModel|llm|LLMChain-UjBh1","className":"","id":"reactflow__edge-ChatOpenAI-vy7fVChatOpenAI|ChatOpenAI-vy7fV|ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLLM-LLMChain-UjBh1BaseLanguageModel|llm|LLMChain-UjBh1","selected":false,"animated":false,"style":{"stroke":"#555"}},{"source":"PromptTemplate-5Q0W8","sourceHandle":"PromptTemplate|PromptTemplate-5Q0W8|StringPromptTemplate|PromptTemplate|BasePromptTemplate","target":"LLMChain-UjBh1","targetHandle":"BasePromptTemplate|prompt|LLMChain-UjBh1","className":"","id":"reactflow__edge-PromptTemplate-5Q0W8PromptTemplate|PromptTemplate-5Q0W8|StringPromptTemplate|PromptTemplate|BasePromptTemplate-LLMChain-UjBh1BasePromptTemplate|prompt|LLMChain-UjBh1","animated":false,"style":{"stroke":"#555"}},{"source":"ConversationBufferMemory-Lu2Nb","sourceHandle":"ConversationBufferMemory|ConversationBufferMemory-Lu2Nb|BaseMemory|ConversationBufferMemory|BaseChatMemory","target":"LLMChain-UjBh1","targetHandle":"BaseMemory|memory|LLMChain-UjBh1","className":"","id":"reactflow__edge-ConversationBufferMemory-Lu2NbConversationBufferMemory|ConversationBufferMemory-Lu2Nb|BaseMemory|ConversationBufferMemory|BaseChatMemory-LLMChain-UjBh1BaseMemory|memory|LLMChain-UjBh1","animated":false,"style":{"stroke":"#555"}}],"viewport":{"x":-64.70809474436828,"y":44.7801470275611,"zoom":0.6622606580990782}},"id":"0cdfb2f2-19de-4e15-99fa-fd5203b38053"}
\ No newline at end of file
diff --git a/src/lfx/tests/data/ChatInputTest.json b/src/lfx/tests/data/ChatInputTest.json
deleted file mode 100644
index 3afebc4c92c6..000000000000
--- a/src/lfx/tests/data/ChatInputTest.json
+++ /dev/null
@@ -1,918 +0,0 @@
-{
- "name": "ChatInputTest",
- "description": "",
- "data": {
- "nodes": [
- {
- "width": 384,
- "height": 359,
- "id": "PromptTemplate-IKKOx",
- "type": "genericNode",
- "position": {
- "x": 880,
- "y": 646.9375
- },
- "data": {
- "type": "PromptTemplate",
- "node": {
- "template": {
- "output_parser": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "output_parser",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseOutputParser",
- "list": false
- },
- "input_variables": {
- "required": true,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "input_variables",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true,
- "value": [
- "input"
- ]
- },
- "partial_variables": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "partial_variables",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "template": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "password": false,
- "name": "template",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "prompt",
- "list": false,
- "value": "Input: {input}\nAI:"
- },
- "template_format": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": "f-string",
- "password": false,
- "name": "template_format",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "validate_template": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": true,
- "password": false,
- "name": "validate_template",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "_type": "PromptTemplate",
- "input": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "value": "",
- "password": false,
- "name": "input",
- "display_name": "input",
- "advanced": false,
- "input_types": [
- "Document",
- "BaseOutputParser",
- "str"
- ],
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- }
- },
- "description": "A prompt template for a language model.",
- "base_classes": [
- "BasePromptTemplate",
- "PromptTemplate",
- "StringPromptTemplate"
- ],
- "name": "",
- "display_name": "PromptTemplate",
- "documentation": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/",
- "custom_fields": {
- "": [
- "input"
- ],
- "template": [
- "input"
- ]
- },
- "output_types": [],
- "field_formatters": {
- "formatters": {
- "openai_api_key": {}
- },
- "base_formatters": {
- "kwargs": {},
- "optional": {},
- "list": {},
- "dict": {},
- "union": {},
- "multiline": {},
- "show": {},
- "password": {},
- "default": {},
- "headers": {},
- "dict_code_file": {},
- "model_fields": {
- "MODEL_DICT": {
- "OpenAI": [
- "text-davinci-003",
- "text-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001"
- ],
- "ChatOpenAI": [
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k-0613",
- "gpt-3.5-turbo-16k",
- "gpt-4-0613",
- "gpt-4-32k-0613",
- "gpt-4",
- "gpt-4-32k"
- ],
- "Anthropic": [
- "claude-v1",
- "claude-v1-100k",
- "claude-instant-v1",
- "claude-instant-v1-100k",
- "claude-v1.3",
- "claude-v1.3-100k",
- "claude-v1.2",
- "claude-v1.0",
- "claude-instant-v1.1",
- "claude-instant-v1.1-100k",
- "claude-instant-v1.0"
- ],
- "ChatAnthropic": [
- "claude-v1",
- "claude-v1-100k",
- "claude-instant-v1",
- "claude-instant-v1-100k",
- "claude-v1.3",
- "claude-v1.3-100k",
- "claude-v1.2",
- "claude-v1.0",
- "claude-instant-v1.1",
- "claude-instant-v1.1-100k",
- "claude-instant-v1.0"
- ]
- }
- }
- }
- },
- "beta": false,
- "error": null
- },
- "id": "PromptTemplate-IKKOx"
- },
- "selected": false,
- "positionAbsolute": {
- "x": 880,
- "y": 646.9375
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 307,
- "id": "LLMChain-e2dhN",
- "type": "genericNode",
- "position": {
- "x": 1449.330344958542,
- "y": 880.1760221487797
- },
- "data": {
- "type": "LLMChain",
- "node": {
- "template": {
- "callbacks": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "callbacks",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "langchain.callbacks.base.BaseCallbackHandler",
- "list": true
- },
- "llm": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "llm",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseLanguageModel",
- "list": false
- },
- "memory": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "memory",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseMemory",
- "list": false
- },
- "output_parser": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "output_parser",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseLLMOutputParser",
- "list": false
- },
- "prompt": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "prompt",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BasePromptTemplate",
- "list": false
- },
- "llm_kwargs": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "llm_kwargs",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "metadata": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "metadata",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "output_key": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "text",
- "password": false,
- "name": "output_key",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "return_final_only": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": true,
- "password": false,
- "name": "return_final_only",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "tags": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "tags",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "verbose": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "verbose",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "_type": "LLMChain"
- },
- "description": "Chain to run queries against LLMs.",
- "base_classes": [
- "Chain",
- "LLMChain",
- "function",
- "Text"
- ],
- "display_name": "LLMChain",
- "custom_fields": {},
- "output_types": [],
- "documentation": "https://python.langchain.com/docs/modules/chains/foundational/llm_chain",
- "beta": false,
- "error": null
- },
- "id": "LLMChain-e2dhN"
- },
- "positionAbsolute": {
- "x": 1449.330344958542,
- "y": 880.1760221487797
- }
- },
- {
- "width": 384,
- "height": 621,
- "id": "ChatOpenAI-2I57f",
- "type": "genericNode",
- "position": {
- "x": 393.3551923753797,
- "y": 1061.025177453298
- },
- "data": {
- "type": "ChatOpenAI",
- "node": {
- "template": {
- "callbacks": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "callbacks",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "langchain.callbacks.base.BaseCallbackHandler",
- "list": true
- },
- "cache": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "cache",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "client": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "client",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "Any",
- "list": false
- },
- "max_retries": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 6,
- "password": false,
- "name": "max_retries",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "max_tokens": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": true,
- "name": "max_tokens",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false,
- "value": ""
- },
- "metadata": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "metadata",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "model_kwargs": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "model_kwargs",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "model_name": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "gpt-3.5-turbo-0613",
- "password": false,
- "options": [
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k-0613",
- "gpt-3.5-turbo-16k",
- "gpt-4-0613",
- "gpt-4-32k-0613",
- "gpt-4",
- "gpt-4-32k"
- ],
- "name": "model_name",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "n": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 1,
- "password": false,
- "name": "n",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "openai_api_base": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "openai_api_base",
- "display_name": "OpenAI API Base",
- "advanced": false,
- "dynamic": false,
- "info": "\nThe base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\n",
- "type": "str",
- "list": false
- },
- "openai_api_key": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "",
- "password": true,
- "name": "openai_api_key",
- "display_name": "OpenAI API Key",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "openai_organization": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "openai_organization",
- "display_name": "OpenAI Organization",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "openai_proxy": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "openai_proxy",
- "display_name": "OpenAI Proxy",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "request_timeout": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "request_timeout",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "float",
- "list": false
- },
- "streaming": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "streaming",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "tags": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "tags",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "temperature": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": 0.7,
- "password": false,
- "name": "temperature",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "float",
- "list": false
- },
- "tiktoken_model_name": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "tiktoken_model_name",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "verbose": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "verbose",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "_type": "ChatOpenAI"
- },
- "description": "`OpenAI` Chat large language models API.",
- "base_classes": [
- "BaseChatModel",
- "ChatOpenAI",
- "BaseLanguageModel",
- "BaseLLM"
- ],
- "display_name": "ChatOpenAI",
- "custom_fields": {},
- "output_types": [],
- "documentation": "https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai",
- "beta": false,
- "error": null
- },
- "id": "ChatOpenAI-2I57f"
- },
- "selected": false,
- "positionAbsolute": {
- "x": 393.3551923753797,
- "y": 1061.025177453298
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 359,
- "id": "ChatInput-207IY",
- "type": "genericNode",
- "position": {
- "x": 415.1018926651509,
- "y": 506.62736462360317
- },
- "data": {
- "type": "ChatInput",
- "node": {
- "template": {
- "code": {
- "dynamic": true,
- "required": true,
- "placeholder": "",
- "show": false,
- "multiline": true,
- "value": "from typing import Optional\nfrom lfx.custom import CustomComponent\n\n\nclass ChatInput(CustomComponent):\n display_name = \"Chat Input\"\n\n def build(self, message: Optional[str] = \"\") -> str:\n return message\n",
- "password": false,
- "name": "code",
- "advanced": false,
- "type": "code",
- "list": false
- },
- "_type": "CustomComponent",
- "message": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "",
- "password": false,
- "name": "message",
- "display_name": "message",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- }
- },
- "description": "Used to get user input from the chat.",
- "base_classes": [
- "str"
- ],
- "display_name": "Chat Input",
- "custom_fields": {
- "message": null
- },
- "output_types": [
- "ChatInput"
- ],
- "documentation": "",
- "beta": true,
- "error": null
- },
- "id": "ChatInput-207IY"
- },
- "positionAbsolute": {
- "x": 415.1018926651509,
- "y": 506.62736462360317
- }
- },
- {
- "width": 384,
- "height": 389,
- "id": "ChatOutput-1jlJy",
- "type": "genericNode",
- "position": {
- "x": 2002.8008888732943,
- "y": 926.1397178702218
- },
- "data": {
- "type": "ChatOutput",
- "node": {
- "template": {
- "code": {
- "dynamic": true,
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "value": "from typing import Optional, Text\nfrom langflow.api.v1.schemas import ChatMessage\nfrom langflow.services.utils import get_chat_manager\nfrom lfx.custom import CustomComponent\nfrom anyio.from_thread import start_blocking_portal\nfrom lfx.log.logger import logger\n\n\nclass ChatOutput(CustomComponent):\n display_name = \"Chat Output\"\n description = \"Used to send a message to the chat.\"\n\n field_config = {\n \"code\": {\n \"show\": False,\n }\n }\n\n def build_config(self):\n return {\"message\": {\"input_types\": [\"Text\"]}}\n\n def build(self, message: Optional[Text], is_ai: bool = False) -> Text:\n if not message:\n return \"\"\n try:\n chat_manager = get_chat_manager()\n chat_message = ChatMessage(message=message, is_bot=is_ai)\n # send_message is a coroutine\n # run in a thread safe manner\n with start_blocking_portal() as portal:\n portal.call(chat_manager.send_message, chat_message)\n chat_manager.chat_history.add_message(\n chat_manager.cache_manager.current_client_id, chat_message\n )\n except Exception as exc:\n logger.exception(exc)\n logger.debug(f\"Error sending message to chat: {exc}\")\n self.repr_value = message\n return message\n",
- "password": false,
- "name": "code",
- "advanced": false,
- "type": "code",
- "list": false
- },
- "_type": "CustomComponent",
- "is_ai": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": true,
- "password": false,
- "name": "is_ai",
- "display_name": "is_ai",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "message": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "message",
- "display_name": "message",
- "advanced": false,
- "input_types": [
- "Text"
- ],
- "dynamic": false,
- "info": "",
- "type": "Text",
- "list": false
- }
- },
- "description": "Used to send a message to the chat.",
- "base_classes": [
- "str"
- ],
- "display_name": "Chat Output",
- "custom_fields": {
- "is_ai": null,
- "message": null
- },
- "output_types": [
- "ChatOutput"
- ],
- "documentation": "",
- "beta": true,
- "error": null
- },
- "id": "ChatOutput-1jlJy"
- },
- "selected": true,
- "dragging": false,
- "positionAbsolute": {
- "x": 2002.8008888732943,
- "y": 926.1397178702218
- }
- }
- ],
- "edges": [
- {
- "source": "PromptTemplate-IKKOx",
- "sourceHandle": "PromptTemplate|PromptTemplate-IKKOx|BasePromptTemplate|PromptTemplate|StringPromptTemplate",
- "target": "LLMChain-e2dhN",
- "targetHandle": "BasePromptTemplate|prompt|LLMChain-e2dhN",
- "style": {
- "stroke": "#555"
- },
- "className": "",
- "animated": false,
- "id": "reactflow__edge-PromptTemplate-IKKOxPromptTemplate|PromptTemplate-IKKOx|StringPromptTemplate|BasePromptTemplate|PromptTemplate-LLMChain-e2dhNBasePromptTemplate|prompt|LLMChain-e2dhN"
- },
- {
- "source": "ChatOpenAI-2I57f",
- "sourceHandle": "ChatOpenAI|ChatOpenAI-2I57f|BaseChatModel|ChatOpenAI|BaseLanguageModel|BaseLLM",
- "target": "LLMChain-e2dhN",
- "targetHandle": "BaseLanguageModel|llm|LLMChain-e2dhN",
- "style": {
- "stroke": "#555"
- },
- "className": "",
- "animated": false,
- "id": "reactflow__edge-ChatOpenAI-2I57fChatOpenAI|ChatOpenAI-2I57f|BaseChatModel|ChatOpenAI|BaseLanguageModel|BaseLLM-LLMChain-e2dhNBaseLanguageModel|llm|LLMChain-e2dhN"
- },
- {
- "source": "ChatInput-207IY",
- "sourceHandle": "ChatInput|ChatInput-207IY|str",
- "target": "PromptTemplate-IKKOx",
- "targetHandle": "Document;BaseOutputParser;str|input|PromptTemplate-IKKOx",
- "style": {
- "stroke": "#555"
- },
- "className": "",
- "animated": false,
- "id": "reactflow__edge-ChatInput-207IYChatInput|ChatInput-207IY|str-PromptTemplate-IKKOxDocument;BaseOutputParser;str|input|PromptTemplate-IKKOx"
- },
- {
- "source": "LLMChain-e2dhN",
- "sourceHandle": "LLMChain|LLMChain-e2dhN|Chain|LLMChain|function|Text",
- "target": "ChatOutput-1jlJy",
- "targetHandle": "Text|message|ChatOutput-1jlJy",
- "style": {
- "stroke": "#555"
- },
- "className": "stroke-foreground stroke-connection",
- "animated": false,
- "id": "reactflow__edge-LLMChain-e2dhNLLMChain|LLMChain-e2dhN|Chain|LLMChain|function|Text-ChatOutput-1jlJyText|message|ChatOutput-1jlJy"
- }
- ],
- "viewport": {
- "x": -141.98308184453367,
- "y": -104.98637616656356,
- "zoom": 0.4788209787464315
- }
- },
- "id": "b3388ab9-b5dc-4447-b560-79caef40faa5",
- "user_id": "c65bfea3-3eea-4e71-8fc4-106238eb0583"
-}
\ No newline at end of file
diff --git a/src/lfx/tests/data/MemoryChatbotNoLLM.json b/src/lfx/tests/data/MemoryChatbotNoLLM.json
deleted file mode 100644
index 8d4c5fe421a1..000000000000
--- a/src/lfx/tests/data/MemoryChatbotNoLLM.json
+++ /dev/null
@@ -1,1384 +0,0 @@
-{
- "data": {
- "edges": [
- {
- "animated": false,
- "className": "",
- "data": {
- "sourceHandle": {
- "dataType": "Memory",
- "id": "Memory-8X8Cq",
- "name": "dataframe",
- "output_types": [
- "DataFrame"
- ]
- },
- "targetHandle": {
- "fieldName": "input_data",
- "id": "TypeConverterComponent-koSIz",
- "inputTypes": [
- "Message",
- "Data",
- "DataFrame"
- ],
- "type": "other"
- }
- },
- "id": "xy-edge__Memory-8X8Cq{œdataTypeœ:œMemoryœ,œidœ:œMemory-8X8Cqœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-TypeConverterComponent-koSIz{œfieldNameœ:œinput_dataœ,œidœ:œTypeConverterComponent-koSIzœ,œinputTypesœ:[œMessageœ,œDataœ,œDataFrameœ],œtypeœ:œotherœ}",
- "selected": false,
- "source": "Memory-8X8Cq",
- "sourceHandle": "{œdataTypeœ:œMemoryœ,œidœ:œMemory-8X8Cqœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}",
- "target": "TypeConverterComponent-koSIz",
- "targetHandle": "{œfieldNameœ:œinput_dataœ,œidœ:œTypeConverterComponent-koSIzœ,œinputTypesœ:[œMessageœ,œDataœ,œDataFrameœ],œtypeœ:œotherœ}"
- },
- {
- "animated": false,
- "className": "",
- "data": {
- "sourceHandle": {
- "dataType": "TypeConverterComponent",
- "id": "TypeConverterComponent-koSIz",
- "name": "message_output",
- "output_types": [
- "Message"
- ]
- },
- "targetHandle": {
- "fieldName": "context",
- "id": "Prompt-VSSGR",
- "inputTypes": [
- "Message"
- ],
- "type": "str"
- }
- },
- "id": "xy-edge__TypeConverterComponent-koSIz{œdataTypeœ:œTypeConverterComponentœ,œidœ:œTypeConverterComponent-koSIzœ,œnameœ:œmessage_outputœ,œoutput_typesœ:[œMessageœ]}-Prompt-VSSGR{œfieldNameœ:œcontextœ,œidœ:œPrompt-VSSGRœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
- "selected": false,
- "source": "TypeConverterComponent-koSIz",
- "sourceHandle": "{œdataTypeœ:œTypeConverterComponentœ,œidœ:œTypeConverterComponent-koSIzœ,œnameœ:œmessage_outputœ,œoutput_typesœ:[œMessageœ]}",
- "target": "Prompt-VSSGR",
- "targetHandle": "{œfieldNameœ:œcontextœ,œidœ:œPrompt-VSSGRœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}"
- },
- {
- "animated": false,
- "className": "",
- "data": {
- "sourceHandle": {
- "dataType": "ChatInput",
- "id": "ChatInput-vsgM1",
- "name": "message",
- "output_types": [
- "Message"
- ]
- },
- "targetHandle": {
- "fieldName": "user_message",
- "id": "Prompt-VSSGR",
- "inputTypes": [
- "Message"
- ],
- "type": "str"
- }
- },
- "id": "xy-edge__ChatInput-vsgM1{œdataTypeœ:œChatInputœ,œidœ:œChatInput-vsgM1œ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Prompt-VSSGR{œfieldNameœ:œuser_messageœ,œidœ:œPrompt-VSSGRœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
- "selected": false,
- "source": "ChatInput-vsgM1",
- "sourceHandle": "{œdataTypeœ:œChatInputœ,œidœ:œChatInput-vsgM1œ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}",
- "target": "Prompt-VSSGR",
- "targetHandle": "{œfieldNameœ:œuser_messageœ,œidœ:œPrompt-VSSGRœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}"
- },
- {
- "animated": false,
- "className": "",
- "data": {
- "sourceHandle": {
- "dataType": "Prompt",
- "id": "Prompt-VSSGR",
- "name": "prompt",
- "output_types": [
- "Message"
- ]
- },
- "targetHandle": {
- "fieldName": "input_value",
- "id": "ChatOutput-NAw0P",
- "inputTypes": [
- "Data",
- "DataFrame",
- "Message"
- ],
- "type": "other"
- }
- },
- "id": "xy-edge__Prompt-VSSGR{œdataTypeœ:œPromptœ,œidœ:œPrompt-VSSGRœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-NAw0P{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-NAw0Pœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}",
- "selected": false,
- "source": "Prompt-VSSGR",
- "sourceHandle": "{œdataTypeœ:œPromptœ,œidœ:œPrompt-VSSGRœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}",
- "target": "ChatOutput-NAw0P",
- "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-NAw0Pœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}"
- }
- ],
- "nodes": [
- {
- "data": {
- "description": "Create a prompt template with dynamic variables.",
- "display_name": "Prompt",
- "id": "Prompt-VSSGR",
- "node": {
- "base_classes": [
- "Message"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {
- "template": [
- "context",
- "user_message"
- ]
- },
- "description": "Create a prompt template with dynamic variables.",
- "display_name": "Prompt",
- "documentation": "",
- "edited": false,
- "field_order": [
- "template",
- "tool_placeholder"
- ],
- "frozen": false,
- "icon": "braces",
- "legacy": false,
- "lf_version": "1.4.2",
- "metadata": {},
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Prompt",
- "group_outputs": false,
- "method": "build_prompt",
- "name": "prompt",
- "options": null,
- "required_inputs": null,
- "selected": "Message",
- "tool_mode": true,
- "types": [
- "Message"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "from lfx.base.prompts.api_utils import process_prompt_template\nfrom lfx.custom import Component\nfrom lfx.inputs.inputs import DefaultPromptField\nfrom lfx.io import MessageTextInput, Output, PromptInput\nfrom lfx.schema.message import Message\nfrom lfx.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
- },
- "context": {
- "advanced": false,
- "display_name": "context",
- "dynamic": false,
- "field_type": "str",
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "input_types": [
- "Message"
- ],
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "context",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "type": "str",
- "value": ""
- },
- "template": {
- "_input_type": "PromptInput",
- "advanced": false,
- "display_name": "Template",
- "dynamic": false,
- "info": "",
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "template",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "type": "prompt",
- "value": "{context}\n\nUser: {user_message}\nAI: "
- },
- "tool_placeholder": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Tool Placeholder",
- "dynamic": false,
- "info": "A placeholder input for tool mode.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "tool_placeholder",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": true,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "user_message": {
- "advanced": false,
- "display_name": "user_message",
- "dynamic": false,
- "field_type": "str",
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "input_types": [
- "Message"
- ],
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "user_message",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "type": "str",
- "value": ""
- }
- },
- "tool_mode": false
- },
- "type": "Prompt"
- },
- "dragging": false,
- "height": 494,
- "id": "Prompt-VSSGR",
- "measured": {
- "height": 494,
- "width": 320
- },
- "position": {
- "x": 1880.8227904110583,
- "y": 625.8049209882275
- },
- "positionAbsolute": {
- "x": 1880.8227904110583,
- "y": 625.8049209882275
- },
- "selected": false,
- "type": "genericNode",
- "width": 384
- },
- {
- "data": {
- "description": "Get chat inputs from the Playground.",
- "display_name": "Chat Input",
- "id": "ChatInput-vsgM1",
- "node": {
- "base_classes": [
- "Message"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Get chat inputs from the Playground.",
- "display_name": "Chat Input",
- "documentation": "",
- "edited": false,
- "field_order": [
- "input_value",
- "should_store_message",
- "sender",
- "sender_name",
- "session_id",
- "files",
- "background_color",
- "chat_icon",
- "text_color"
- ],
- "frozen": false,
- "icon": "MessagesSquare",
- "legacy": false,
- "lf_version": "1.4.2",
- "metadata": {},
- "minimized": true,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Chat Message",
- "group_outputs": false,
- "method": "message_response",
- "name": "message",
- "options": null,
- "required_inputs": null,
- "selected": "Message",
- "tool_mode": true,
- "types": [
- "Message"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "background_color": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Background Color",
- "dynamic": false,
- "info": "The background color of the icon.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "background_color",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "chat_icon": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Icon",
- "dynamic": false,
- "info": "The icon of the message.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "chat_icon",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
- },
- "files": {
- "_input_type": "FileInput",
- "advanced": true,
- "display_name": "Files",
- "dynamic": false,
- "fileTypes": [
- "txt",
- "md",
- "mdx",
- "csv",
- "json",
- "yaml",
- "yml",
- "xml",
- "html",
- "htm",
- "pdf",
- "docx",
- "py",
- "sh",
- "sql",
- "js",
- "ts",
- "tsx",
- "jpg",
- "jpeg",
- "png",
- "bmp",
- "image"
- ],
- "file_path": "",
- "info": "Files to be sent with the message.",
- "list": true,
- "list_add_label": "Add More",
- "name": "files",
- "placeholder": "",
- "required": false,
- "show": true,
- "temp_file": true,
- "title_case": false,
- "trace_as_metadata": true,
- "type": "file",
- "value": ""
- },
- "input_value": {
- "_input_type": "MultilineInput",
- "advanced": false,
- "copy_field": false,
- "display_name": "Input Text",
- "dynamic": false,
- "info": "Message to be passed as input.",
- "input_types": [],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "multiline": true,
- "name": "input_value",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "sender": {
- "_input_type": "DropdownInput",
- "advanced": true,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "Sender Type",
- "dynamic": false,
- "info": "Type of sender.",
- "name": "sender",
- "options": [
- "Machine",
- "User"
- ],
- "options_metadata": [],
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "User"
- },
- "sender_name": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Sender Name",
- "dynamic": false,
- "info": "Name of the sender.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "sender_name",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": "User"
- },
- "session_id": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Session ID",
- "dynamic": false,
- "info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "session_id",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "should_store_message": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Store Messages",
- "dynamic": false,
- "info": "Store the message in the history.",
- "list": false,
- "list_add_label": "Add More",
- "name": "should_store_message",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": true
- },
- "text_color": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Text Color",
- "dynamic": false,
- "info": "The text color of the name",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "text_color",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- }
- },
- "tool_mode": false
- },
- "type": "ChatInput"
- },
- "dragging": false,
- "height": 294,
- "id": "ChatInput-vsgM1",
- "measured": {
- "height": 294,
- "width": 320
- },
- "position": {
- "x": 1275.9262193671882,
- "y": 836.1228056896347
- },
- "positionAbsolute": {
- "x": 1275.9262193671882,
- "y": 836.1228056896347
- },
- "selected": false,
- "type": "genericNode",
- "width": 384
- },
- {
- "data": {
- "description": "Display a chat message in the Playground.",
- "display_name": "Chat Output",
- "id": "ChatOutput-NAw0P",
- "node": {
- "base_classes": [
- "Message"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Display a chat message in the Playground.",
- "display_name": "Chat Output",
- "documentation": "",
- "edited": false,
- "field_order": [
- "input_value",
- "should_store_message",
- "sender",
- "sender_name",
- "session_id",
- "data_template",
- "background_color",
- "chat_icon",
- "text_color",
- "clean_data"
- ],
- "frozen": false,
- "icon": "MessagesSquare",
- "legacy": false,
- "lf_version": "1.4.2",
- "metadata": {},
- "minimized": true,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Output Message",
- "group_outputs": false,
- "method": "message_response",
- "name": "message",
- "options": null,
- "required_inputs": null,
- "selected": "Message",
- "tool_mode": true,
- "types": [
- "Message"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "background_color": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Background Color",
- "dynamic": false,
- "info": "The background color of the icon.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "background_color",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "chat_icon": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Icon",
- "dynamic": false,
- "info": "The icon of the message.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "chat_icon",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "clean_data": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Basic Clean Data",
- "dynamic": false,
- "info": "Whether to clean the data",
- "list": false,
- "list_add_label": "Add More",
- "name": "clean_data",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": true
- },
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs import BoolInput\nfrom lfx.inputs.inputs import HandleInput\nfrom lfx.io import DropdownInput, MessageTextInput, Output\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
- },
- "data_template": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Data Template",
- "dynamic": false,
- "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "data_template",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": "{text}"
- },
- "input_value": {
- "_input_type": "HandleInput",
- "advanced": false,
- "display_name": "Inputs",
- "dynamic": false,
- "info": "Message to be passed as output.",
- "input_types": [
- "Data",
- "DataFrame",
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "name": "input_value",
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "trace_as_metadata": true,
- "type": "other",
- "value": ""
- },
- "sender": {
- "_input_type": "DropdownInput",
- "advanced": true,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "Sender Type",
- "dynamic": false,
- "info": "Type of sender.",
- "name": "sender",
- "options": [
- "Machine",
- "User"
- ],
- "options_metadata": [],
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "Machine"
- },
- "sender_name": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Sender Name",
- "dynamic": false,
- "info": "Name of the sender.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "sender_name",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": "AI"
- },
- "session_id": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Session ID",
- "dynamic": false,
- "info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "session_id",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "should_store_message": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Store Messages",
- "dynamic": false,
- "info": "Store the message in the history.",
- "list": false,
- "list_add_label": "Add More",
- "name": "should_store_message",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": true
- },
- "text_color": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Text Color",
- "dynamic": false,
- "info": "The text color of the name",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "text_color",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- }
- },
- "tool_mode": false
- },
- "type": "ChatOutput"
- },
- "dragging": true,
- "height": 294,
- "id": "ChatOutput-NAw0P",
- "measured": {
- "height": 294,
- "width": 320
- },
- "position": {
- "x": 2487.48936094892,
- "y": 703.7197762654707
- },
- "positionAbsolute": {
- "x": 2487.48936094892,
- "y": 703.7197762654707
- },
- "selected": false,
- "type": "genericNode",
- "width": 384
- },
- {
- "data": {
- "description": "Retrieves stored chat messages from Langflow tables or an external memory.",
- "display_name": "Chat Memory",
- "id": "Memory-8X8Cq",
- "node": {
- "base_classes": [
- "DataFrame"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Stores or retrieves stored chat messages from Langflow tables or an external memory.",
- "display_name": "Message History",
- "documentation": "",
- "edited": false,
- "field_order": [
- "mode",
- "message",
- "memory",
- "sender",
- "sender_name",
- "n_messages",
- "session_id",
- "order",
- "template"
- ],
- "frozen": false,
- "icon": "message-square-more",
- "legacy": false,
- "lf_version": "1.4.2",
- "metadata": {},
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Messages",
- "group_outputs": false,
- "hidden": null,
- "method": "retrieve_messages_dataframe",
- "name": "dataframe",
- "options": null,
- "required_inputs": null,
- "selected": "DataFrame",
- "tool_mode": true,
- "types": [
- "DataFrame"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "from typing import Any, cast\n\nfrom lfx.custom import Component\nfrom lfx.inputs import HandleInput\nfrom lfx.io import DropdownInput, IntInput, MessageTextInput, MultilineInput, Output, TabInput\nfrom lfx.memory import aget_messages, astore_message\nfrom lfx.schema import Data, dotdict\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.utils.component_utils import set_current_fields, set_field_display\nfrom lfx.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n show=False,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [Output(display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True)]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n )\n ]\n return frontend_node\n\n async def retrieve_messages(self) -> Data:\n sender = self.sender\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender == \"Machine and User\":\n sender = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender:\n expected_type = MESSAGE_SENDER_AI if sender == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n stored = await aget_messages(\n sender=sender,\n sender_name=sender_name,\n session_id=session_id,\n limit=n_messages,\n order=order,\n )\n self.status = stored\n return cast(Data, stored)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n"
- },
- "memory": {
- "_input_type": "HandleInput",
- "advanced": true,
- "display_name": "External Memory",
- "dynamic": false,
- "info": "Retrieve messages from an external memory. If empty, it will use the Langflow tables.",
- "input_types": [
- "Memory"
- ],
- "list": false,
- "list_add_label": "Add More",
- "name": "memory",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "trace_as_metadata": true,
- "type": "other",
- "value": ""
- },
- "message": {
- "_input_type": "MessageTextInput",
- "advanced": false,
- "display_name": "Message",
- "dynamic": true,
- "info": "The chat message to be stored.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "message",
- "placeholder": "",
- "required": false,
- "show": false,
- "title_case": false,
- "tool_mode": true,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "mode": {
- "_input_type": "TabInput",
- "advanced": false,
- "display_name": "Mode",
- "dynamic": false,
- "info": "Operation mode: Store messages or Retrieve messages.",
- "name": "mode",
- "options": [
- "Retrieve",
- "Store"
- ],
- "placeholder": "",
- "real_time_refresh": true,
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "tab",
- "value": "Retrieve"
- },
- "n_messages": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Number of Messages",
- "dynamic": false,
- "info": "Number of messages to retrieve.",
- "list": false,
- "list_add_label": "Add More",
- "name": "n_messages",
- "placeholder": "",
- "required": false,
- "show": false,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": 100
- },
- "order": {
- "_input_type": "DropdownInput",
- "advanced": true,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "Order",
- "dynamic": false,
- "info": "Order of the messages.",
- "name": "order",
- "options": [
- "Ascending",
- "Descending"
- ],
- "options_metadata": [],
- "placeholder": "",
- "required": true,
- "show": false,
- "title_case": false,
- "toggle": false,
- "tool_mode": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": "Ascending"
- },
- "sender": {
- "_input_type": "DropdownInput",
- "advanced": true,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "Sender Type",
- "dynamic": false,
- "info": "Filter by sender type.",
- "name": "sender",
- "options": [
- "Machine",
- "User",
- "Machine and User"
- ],
- "options_metadata": [],
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "Machine and User"
- },
- "sender_name": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Sender Name",
- "dynamic": false,
- "info": "Filter by sender name.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "sender_name",
- "placeholder": "",
- "required": false,
- "show": false,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "session_id": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Session ID",
- "dynamic": false,
- "info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "session_id",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "template": {
- "_input_type": "MultilineInput",
- "advanced": true,
- "copy_field": false,
- "display_name": "Template",
- "dynamic": false,
- "info": "The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "multiline": true,
- "name": "template",
- "placeholder": "",
- "required": false,
- "show": false,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": "{sender_name}: {text}"
- }
- },
- "tool_mode": false
- },
- "type": "Memory"
- },
- "dragging": false,
- "height": 366,
- "id": "Memory-8X8Cq",
- "measured": {
- "height": 366,
- "width": 320
- },
- "position": {
- "x": 1308.5775646859402,
- "y": 406.95204412025845
- },
- "positionAbsolute": {
- "x": 1308.5775646859402,
- "y": 406.95204412025845
- },
- "selected": false,
- "type": "genericNode",
- "width": 384
- },
- {
- "data": {
- "id": "TypeConverterComponent-koSIz",
- "node": {
- "base_classes": [
- "Message"
- ],
- "beta": false,
- "category": "processing",
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Convert between different types (Message, Data, DataFrame)",
- "display_name": "Type Convert",
- "documentation": "",
- "edited": false,
- "field_order": [
- "input_data",
- "output_type"
- ],
- "frozen": false,
- "icon": "repeat",
- "key": "TypeConverterComponent",
- "legacy": false,
- "lf_version": "1.4.2",
- "metadata": {},
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Message Output",
- "group_outputs": false,
- "method": "convert_to_message",
- "name": "message_output",
- "selected": "Message",
- "tool_mode": true,
- "types": [
- "Message"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "score": 0.007568328950209746,
- "template": {
- "_type": "Component",
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "from typing import Any\n\nfrom lfx.custom import Component\nfrom lfx.io import HandleInput, Output, TabInput\nfrom lfx.schema import Data, DataFrame, Message\n\n\ndef convert_to_message(v) -> Message:\n \"\"\"Convert input to Message type.\n\n Args:\n v: Input to convert (Message, Data, DataFrame, or dict)\n\n Returns:\n Message: Converted Message object\n \"\"\"\n return v if isinstance(v, Message) else v.to_message()\n\n\ndef convert_to_data(v: DataFrame | Data | Message | dict) -> Data:\n \"\"\"Convert input to Data type.\n\n Args:\n v: Input to convert (Message, Data, DataFrame, or dict)\n\n Returns:\n Data: Converted Data object\n \"\"\"\n if isinstance(v, dict):\n return Data(v)\n return v if isinstance(v, Data) else v.to_data()\n\n\ndef convert_to_dataframe(v: DataFrame | Data | Message | dict) -> DataFrame:\n \"\"\"Convert input to DataFrame type.\n\n Args:\n v: Input to convert (Message, Data, DataFrame, or dict)\n\n Returns:\n DataFrame: Converted DataFrame object\n \"\"\"\n if isinstance(v, dict):\n return DataFrame([v])\n return v if isinstance(v, DataFrame) else v.to_dataframe()\n\n\nclass TypeConverterComponent(Component):\n display_name = \"Type Convert\"\n description = \"Convert between different types (Message, Data, DataFrame)\"\n icon = \"repeat\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Input\",\n input_types=[\"Message\", \"Data\", \"DataFrame\"],\n info=\"Accept Message, Data or DataFrame as input\",\n required=True,\n ),\n TabInput(\n name=\"output_type\",\n display_name=\"Output Type\",\n options=[\"Message\", \"Data\", \"DataFrame\"],\n info=\"Select the desired output data type\",\n real_time_refresh=True,\n value=\"Message\",\n ),\n ]\n\n outputs = [Output(display_name=\"Message Output\", name=\"message_output\", method=\"convert_to_message\")]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"output_type\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n\n # Add only the selected output type\n if field_value == \"Message\":\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Message Output\", name=\"message_output\", method=\"convert_to_message\").to_dict()\n )\n elif field_value == \"Data\":\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Data Output\", name=\"data_output\", method=\"convert_to_data\").to_dict()\n )\n elif field_value == \"DataFrame\":\n frontend_node[\"outputs\"].append(\n Output(\n display_name=\"DataFrame Output\", name=\"dataframe_output\", method=\"convert_to_dataframe\"\n ).to_dict()\n )\n\n return frontend_node\n\n def convert_to_message(self) -> Message:\n \"\"\"Convert input to Message type.\"\"\"\n return convert_to_message(self.input_data[0] if isinstance(self.input_data, list) else self.input_data)\n\n def convert_to_data(self) -> Data:\n \"\"\"Convert input to Data type.\"\"\"\n return convert_to_data(self.input_data[0] if isinstance(self.input_data, list) else self.input_data)\n\n def convert_to_dataframe(self) -> DataFrame:\n \"\"\"Convert input to DataFrame type.\"\"\"\n return convert_to_dataframe(self.input_data[0] if isinstance(self.input_data, list) else self.input_data)\n"
- },
- "input_data": {
- "_input_type": "HandleInput",
- "advanced": false,
- "display_name": "Input",
- "dynamic": false,
- "info": "Accept Message, Data or DataFrame as input",
- "input_types": [
- "Message",
- "Data",
- "DataFrame"
- ],
- "list": false,
- "list_add_label": "Add More",
- "name": "input_data",
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "trace_as_metadata": true,
- "type": "other",
- "value": ""
- },
- "output_type": {
- "_input_type": "TabInput",
- "advanced": false,
- "display_name": "Output Type",
- "dynamic": false,
- "info": "Select the desired output data type",
- "name": "output_type",
- "options": [
- "Message",
- "Data",
- "DataFrame"
- ],
- "placeholder": "",
- "real_time_refresh": true,
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "tab",
- "value": "Message"
- }
- },
- "tool_mode": false
- },
- "showNode": true,
- "type": "TypeConverterComponent"
- },
- "dragging": false,
- "id": "TypeConverterComponent-koSIz",
- "measured": {
- "height": 261,
- "width": 320
- },
- "position": {
- "x": 1680.7884314480486,
- "y": 378.23790603026777
- },
- "selected": true,
- "type": "genericNode"
- }
- ],
- "viewport": {
- "x": -810.6674739450368,
- "y": -114.59139005551219,
- "zoom": 0.6810300764379204
- }
- },
- "description": "This project can be used as a starting point for building a Chat experience with user specific memory. You can set a different Session ID to start a new message history.",
- "endpoint_name": null,
- "id": "76f4de62-4bb6-4681-b90f-7be832cd9818",
- "is_component": false,
- "last_tested_version": "1.4.2",
- "name": "MemoryChatbotNoLLM",
- "tags": []
-}
\ No newline at end of file
diff --git a/src/lfx/tests/data/Openapi.json b/src/lfx/tests/data/Openapi.json
deleted file mode 100644
index 1a4985ce2895..000000000000
--- a/src/lfx/tests/data/Openapi.json
+++ /dev/null
@@ -1,445 +0,0 @@
-{
- "description": "",
- "name": "openapi",
- "id": "1",
- "data": {
- "nodes": [
- {
- "width": 384,
- "height": 311,
- "id": "dndnode_19",
- "type": "genericNode",
- "position": {
- "x": -207.85635949789724,
- "y": -105.73915116823618
- },
- "data": {
- "type": "JsonToolkit",
- "node": {
- "template": {
- "spec": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "spec",
- "type": "JsonSpec",
- "list": false
- },
- "_type": "JsonToolkit"
- },
- "description": "Toolkit for interacting with a JSON spec.",
- "base_classes": [
- "BaseToolkit"
- ]
- },
- "id": "dndnode_19",
- "value": null
- },
- "selected": false,
- "positionAbsolute": {
- "x": -207.85635949789724,
- "y": -105.73915116823618
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 351,
- "id": "dndnode_32",
- "type": "genericNode",
- "position": {
- "x": 745.308873444751,
- "y": -37.007911201107675
- },
- "data": {
- "type": "OpenAPIToolkit",
- "node": {
- "template": {
- "json_agent": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "json_agent",
- "type": "AgentExecutor",
- "list": false
- },
- "requests_wrapper": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "requests_wrapper",
- "type": "TextRequestsWrapper",
- "list": false
- },
- "_type": "OpenAPIToolkit"
- },
- "description": "Toolkit for interacting with a OpenAPI api.",
- "base_classes": [
- "BaseToolkit"
- ]
- },
- "id": "dndnode_32",
- "value": null
- },
- "selected": false,
- "positionAbsolute": {
- "x": 745.308873444751,
- "y": -37.007911201107675
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 351,
- "id": "dndnode_33",
- "type": "genericNode",
- "position": {
- "x": 281.30887344475104,
- "y": 2.9920887988923255
- },
- "data": {
- "type": "JsonAgent",
- "node": {
- "template": {
- "toolkit": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "toolkit",
- "type": "BaseToolkit",
- "list": false
- },
- "llm": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "llm",
- "type": "BaseLanguageModel",
- "list": false
- },
- "_type": "JsonAgent"
- },
- "description": "Construct a json agent from an LLM and tools.",
- "base_classes": [
- "AgentExecutor"
- ]
- },
- "id": "dndnode_33",
- "value": null
- },
- "selected": false,
- "dragging": false,
- "positionAbsolute": {
- "x": 281.30887344475104,
- "y": 2.9920887988923255
- }
- },
- {
- "width": 384,
- "height": 349,
- "id": "dndnode_34",
- "type": "genericNode",
- "position": {
- "x": 301.30887344475104,
- "y": 532.9920887988924
- },
- "data": {
- "type": "TextRequestsWrapper",
- "node": {
- "template": {
- "headers": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "value": "{'Authorization':\n 'Bearer '}",
- "password": false,
- "name": "headers",
- "type": "code",
- "list": false
- },
- "aiosession": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "aiosession",
- "type": "ClientSession",
- "list": false
- },
- "_type": "TextRequestsWrapper"
- },
- "description": "Lightweight wrapper around requests library.",
- "base_classes": [
- "TextRequestsWrapper"
- ]
- },
- "id": "dndnode_34",
- "value": null
- },
- "positionAbsolute": {
- "x": 301.30887344475104,
- "y": 532.9920887988924
- }
- },
- {
- "width": 384,
- "height": 407,
- "id": "dndnode_35",
- "type": "genericNode",
- "position": {
- "x": -754.691126555249,
- "y": -37.00791120110762
- },
- "data": {
- "type": "JsonSpec",
- "node": {
- "template": {
- "path": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "api-with-examples.yaml",
- "suffixes": [
- ".json",
- ".yaml",
- ".yml"
- ],
- "password": false,
- "name": "path",
- "type": "file",
- "list": false,
- "fileTypes": [
- "json",
- "yaml",
- "yml"
- ],
- "file_path": "api-with-examples.yaml"
- },
- "max_value_length": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "4000",
- "password": false,
- "name": "max_value_length",
- "type": "int",
- "list": false
- },
- "_type": "JsonSpec"
- },
- "description": "",
- "base_classes": [
- "Tool",
- "JsonSpec"
- ]
- },
- "id": "dndnode_35",
- "value": null
- },
- "selected": false,
- "dragging": false,
- "positionAbsolute": {
- "x": -754.691126555249,
- "y": -37.00791120110762
- }
- },
- {
- "width": 384,
- "height": 563,
- "id": "dndnode_36",
- "type": "genericNode",
- "position": {
- "x": -310.69112655524896,
- "y": 514.9920887988924
- },
- "data": {
- "type": "ChatOpenAI",
- "node": {
- "template": {
- "cache": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "cache",
- "type": "bool",
- "list": false
- },
- "verbose": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "verbose",
- "type": "bool",
- "list": false
- },
- "client": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "client",
- "type": "Any",
- "list": false
- },
- "model_name": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "gpt-3.5-turbo",
- "password": false,
- "name": "model_name",
- "type": "str",
- "list": false
- },
- "model_kwargs": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "model_kwargs",
- "type": "code",
- "list": false
- },
- "openai_api_key": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "openai_api_key",
- "type": "str",
- "list": false,
- "value": "sk-"
- },
- "max_retries": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 6,
- "password": false,
- "name": "max_retries",
- "type": "int",
- "list": false
- },
- "prefix_messages": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "prefix_messages",
- "type": "Any",
- "list": true
- },
- "streaming": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "streaming",
- "type": "bool",
- "list": false
- },
- "_type": "ChatOpenAI"
- },
- "description": "Wrapper around OpenAI Chat large language models.To use, you should have the ``openai`` python package installed, and theenvironment variable ``OPENAI_API_KEY`` set with your API key.Any parameters that are valid to be passed to the openai.create call can be passedin, even if not explicitly saved on this class.",
- "base_classes": [
- "BaseLanguageModel",
- "BaseLLM"
- ]
- },
- "id": "dndnode_36",
- "value": null
- },
- "selected": false,
- "dragging": false,
- "positionAbsolute": {
- "x": -310.69112655524896,
- "y": 514.9920887988924
- }
- }
- ],
- "edges": [
- {
- "source": "dndnode_19",
- "sourceHandle": "JsonToolkit|dndnode_19|BaseToolkit",
- "target": "dndnode_33",
- "targetHandle": "BaseToolkit|toolkit|dndnode_33",
- "className": "animate-pulse",
- "id": "reactflow__edge-dndnode_19JsonToolkit|dndnode_19|BaseToolkit-dndnode_33BaseToolkit|toolkit|dndnode_33",
- "selected": false
- },
- {
- "source": "dndnode_33",
- "sourceHandle": "JsonAgent|dndnode_33|AgentExecutor",
- "target": "dndnode_32",
- "targetHandle": "AgentExecutor|json_agent|dndnode_32",
- "className": "animate-pulse",
- "id": "reactflow__edge-dndnode_33JsonAgent|dndnode_33|AgentExecutor-dndnode_32AgentExecutor|json_agent|dndnode_32",
- "selected": false
- },
- {
- "source": "dndnode_34",
- "sourceHandle": "TextRequestsWrapper|dndnode_34|TextRequestsWrapper",
- "target": "dndnode_32",
- "targetHandle": "TextRequestsWrapper|requests_wrapper|dndnode_32",
- "className": "animate-pulse",
- "id": "reactflow__edge-dndnode_34RequestsWrapper|dndnode_34|TextRequestsWrapper-dndnode_32RequestsWrapper|requests_wrapper|dndnode_32",
- "selected": false
- },
- {
- "source": "dndnode_35",
- "sourceHandle": "JsonSpec|dndnode_35|Tool|JsonSpec",
- "target": "dndnode_19",
- "targetHandle": "JsonSpec|spec|dndnode_19",
- "className": "animate-pulse",
- "id": "reactflow__edge-dndnode_35JsonSpec|dndnode_35|Tool|JsonSpec-dndnode_19JsonSpec|spec|dndnode_19",
- "selected": false
- },
- {
- "source": "dndnode_36",
- "sourceHandle": "ChatOpenAI|dndnode_36|BaseLanguageModel|BaseLLM",
- "target": "dndnode_33",
- "targetHandle": "BaseLanguageModel|llm|dndnode_33",
- "className": "animate-pulse",
- "id": "reactflow__edge-dndnode_36OpenAIChat|dndnode_36|BaseLanguageModel|BaseLLM-dndnode_33BaseLanguageModel|llm|dndnode_33"
- }
- ],
- "viewport": {
- "x": 0,
- "y": 0,
- "zoom": 1
- }
- },
- "chat": [
- {
- "message": "test",
- "isSend": true
- }
- ]
-}
\ No newline at end of file
diff --git a/src/lfx/tests/data/SimpleAPITest.json b/src/lfx/tests/data/SimpleAPITest.json
deleted file mode 100644
index fc778802c0f1..000000000000
--- a/src/lfx/tests/data/SimpleAPITest.json
+++ /dev/null
@@ -1,756 +0,0 @@
-{
- "id": "1e3eea9b-d466-4fba-b0d9-08901143df3e",
- "data": {
- "nodes": [
- {
- "id": "ChatInput-3OQi9",
- "type": "genericNode",
- "position": {
- "x": 180,
- "y": 126.45072115384616
- },
- "data": {
- "type": "ChatInput",
- "node": {
- "template": {
- "_type": "Component",
- "files": {
- "trace_as_metadata": true,
- "file_path": "",
- "fileTypes": [
- "txt",
- "md",
- "mdx",
- "csv",
- "json",
- "yaml",
- "yml",
- "xml",
- "html",
- "htm",
- "pdf",
- "docx",
- "py",
- "sh",
- "sql",
- "js",
- "ts",
- "tsx",
- "jpg",
- "jpeg",
- "png",
- "bmp",
- "image"
- ],
- "list": true,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "files",
- "value": "",
- "display_name": "Files",
- "advanced": true,
- "dynamic": false,
- "info": "Files to be sent with the message.",
- "title_case": false,
- "type": "file",
- "_input_type": "FileInput"
- },
- "background_color": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "background_color",
- "value": "",
- "display_name": "Background Color",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The background color of the icon.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "chat_icon": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "chat_icon",
- "value": "",
- "display_name": "Icon",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The icon of the message.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "code": {
- "type": "code",
- "required": true,
- "placeholder": "",
- "list": false,
- "show": true,
- "multiline": true,
- "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n",
- "fileTypes": [],
- "file_path": "",
- "password": false,
- "name": "code",
- "advanced": true,
- "dynamic": true,
- "info": "",
- "load_from_db": false,
- "title_case": false
- },
- "input_value": {
- "tool_mode": false,
- "trace_as_input": true,
- "multiline": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "input_value",
- "value": "",
- "display_name": "Text",
- "advanced": false,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Message to be passed as input.",
- "title_case": false,
- "type": "str",
- "_input_type": "MultilineInput"
- },
- "sender": {
- "tool_mode": false,
- "trace_as_metadata": true,
- "options": [
- "Machine",
- "User"
- ],
- "combobox": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "sender",
- "value": "User",
- "display_name": "Sender Type",
- "advanced": true,
- "dynamic": false,
- "info": "Type of sender.",
- "title_case": false,
- "type": "str",
- "_input_type": "DropdownInput"
- },
- "sender_name": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "sender_name",
- "value": "User",
- "display_name": "Sender Name",
- "advanced": false,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Name of the sender.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "session_id": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "session_id",
- "value": "",
- "display_name": "Session ID",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "should_store_message": {
- "tool_mode": false,
- "trace_as_metadata": true,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "should_store_message",
- "value": true,
- "display_name": "Store Messages",
- "advanced": true,
- "dynamic": false,
- "info": "Store the message in the history.",
- "title_case": false,
- "type": "bool",
- "_input_type": "BoolInput"
- },
- "text_color": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "text_color",
- "value": "",
- "display_name": "Text Color",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The text color of the name",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- }
- },
- "description": "Get chat inputs from the Playground.",
- "icon": "MessagesSquare",
- "base_classes": [
- "Message"
- ],
- "display_name": "Chat Input",
- "documentation": "",
- "minimized": true,
- "custom_fields": {},
- "output_types": [],
- "pinned": false,
- "conditional_paths": [],
- "frozen": false,
- "outputs": [
- {
- "types": [
- "Message"
- ],
- "selected": "Message",
- "name": "message",
- "display_name": "Message",
- "method": "message_response",
- "value": "__UNDEFINED__",
- "cache": true
- }
- ],
- "field_order": [
- "input_value",
- "should_store_message",
- "sender",
- "sender_name",
- "session_id",
- "files",
- "background_color",
- "chat_icon",
- "text_color"
- ],
- "beta": false,
- "legacy": false,
- "edited": false,
- "metadata": {},
- "tool_mode": false
- },
- "id": "ChatInput-3OQi9",
- "description": "Get chat inputs from the Playground.",
- "display_name": "Chat Input"
- },
- "selected": true,
- "width": 384,
- "height": 309,
- "measured": {
- "width": 384,
- "height": 309
- },
- "dragging": false
- },
- {
- "id": "TextInput-eFiZp",
- "type": "genericNode",
- "position": {
- "x": 192.89230769230767,
- "y": 525.6661057692309
- },
- "data": {
- "type": "TextInput",
- "node": {
- "template": {
- "_type": "Component",
- "code": {
- "type": "code",
- "required": true,
- "placeholder": "",
- "list": false,
- "show": true,
- "multiline": true,
- "value": "from lfx.base.io.text import TextComponent\nfrom lfx.io import MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get text inputs from the Playground.\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n",
- "fileTypes": [],
- "file_path": "",
- "password": false,
- "name": "code",
- "advanced": true,
- "dynamic": true,
- "info": "",
- "load_from_db": false,
- "title_case": false
- },
- "input_value": {
- "tool_mode": false,
- "trace_as_input": true,
- "multiline": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "input_value",
- "value": "AI",
- "display_name": "Text",
- "advanced": false,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Text to be passed as input.",
- "title_case": false,
- "type": "str",
- "_input_type": "MultilineInput"
- }
- },
- "description": "Get text inputs from the Playground.",
- "icon": "type",
- "base_classes": [
- "Message"
- ],
- "display_name": "Text Input",
- "documentation": "",
- "minimized": false,
- "custom_fields": {},
- "output_types": [],
- "pinned": false,
- "conditional_paths": [],
- "frozen": false,
- "outputs": [
- {
- "types": [
- "Message"
- ],
- "selected": "Message",
- "name": "text",
- "display_name": "Text",
- "method": "text_response",
- "value": "__UNDEFINED__",
- "cache": true
- }
- ],
- "field_order": [
- "input_value"
- ],
- "beta": false,
- "legacy": false,
- "edited": false,
- "metadata": {},
- "tool_mode": false
- },
- "id": "TextInput-eFiZp"
- },
- "selected": false,
- "width": 384,
- "height": 309,
- "positionAbsolute": {
- "x": 186,
- "y": 549.296875
- },
- "dragging": false,
- "measured": {
- "width": 384,
- "height": 309
- }
- },
- {
- "id": "ChatOutput-J6aor",
- "type": "genericNode",
- "position": {
- "x": 820,
- "y": 224.296875
- },
- "data": {
- "type": "ChatOutput",
- "node": {
- "template": {
- "_type": "Component",
- "background_color": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "background_color",
- "value": "",
- "display_name": "Background Color",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The background color of the icon.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "chat_icon": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "chat_icon",
- "value": "",
- "display_name": "Icon",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The icon of the message.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "code": {
- "type": "code",
- "required": true,
- "placeholder": "",
- "list": false,
- "show": true,
- "multiline": true,
- "value": "from lfx.base.io.chat import ChatComponent\nfrom lfx.inputs import BoolInput\nfrom lfx.io import DropdownInput, MessageInput, MessageTextInput, Output\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n MessageInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n source_dict[\"source\"] = source\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n message = self.input_value if isinstance(self.input_value, Message) else Message(text=self.input_value)\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n",
- "fileTypes": [],
- "file_path": "",
- "password": false,
- "name": "code",
- "advanced": true,
- "dynamic": true,
- "info": "",
- "load_from_db": false,
- "title_case": false
- },
- "data_template": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "data_template",
- "value": "{text}",
- "display_name": "Data Template",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "input_value": {
- "trace_as_input": true,
- "tool_mode": false,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "input_value",
- "value": "",
- "display_name": "Text",
- "advanced": false,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Message to be passed as output.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageInput"
- },
- "sender": {
- "tool_mode": false,
- "trace_as_metadata": true,
- "options": [
- "Machine",
- "User"
- ],
- "combobox": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "sender",
- "value": "Machine",
- "display_name": "Sender Type",
- "advanced": true,
- "dynamic": false,
- "info": "Type of sender.",
- "title_case": false,
- "type": "str",
- "_input_type": "DropdownInput"
- },
- "sender_name": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "sender_name",
- "value": "",
- "display_name": "Sender Name",
- "advanced": false,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Name of the sender.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "session_id": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "session_id",
- "value": "",
- "display_name": "Session ID",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "should_store_message": {
- "tool_mode": false,
- "trace_as_metadata": true,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "should_store_message",
- "value": true,
- "display_name": "Store Messages",
- "advanced": true,
- "dynamic": false,
- "info": "Store the message in the history.",
- "title_case": false,
- "type": "bool",
- "_input_type": "BoolInput"
- },
- "text_color": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "text_color",
- "value": "",
- "display_name": "Text Color",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The text color of the name",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- }
- },
- "description": "Display a chat message in the Playground.",
- "icon": "MessagesSquare",
- "base_classes": [
- "Message"
- ],
- "display_name": "Chat Output",
- "documentation": "",
- "minimized": true,
- "custom_fields": {},
- "output_types": [],
- "pinned": false,
- "conditional_paths": [],
- "frozen": false,
- "outputs": [
- {
- "types": [
- "Message"
- ],
- "selected": "Message",
- "name": "message",
- "display_name": "Message",
- "method": "message_response",
- "value": "__UNDEFINED__",
- "cache": true
- }
- ],
- "field_order": [
- "input_value",
- "should_store_message",
- "sender",
- "sender_name",
- "session_id",
- "data_template",
- "background_color",
- "chat_icon",
- "text_color"
- ],
- "beta": false,
- "legacy": false,
- "edited": false,
- "metadata": {},
- "tool_mode": false
- },
- "id": "ChatOutput-J6aor",
- "description": "Display a chat message in the Playground.",
- "display_name": "Chat Output"
- },
- "selected": false,
- "width": 384,
- "height": 403,
- "positionAbsolute": {
- "x": 820,
- "y": 224.296875
- },
- "dragging": false,
- "measured": {
- "width": 384,
- "height": 403
- }
- }
- ],
- "edges": [
- {
- "source": "TextInput-eFiZp",
- "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-eFiZpœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
- "target": "ChatOutput-J6aor",
- "targetHandle": "{œfieldNameœ:œsender_nameœ,œidœ:œChatOutput-J6aorœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
- "data": {
- "targetHandle": {
- "fieldName": "sender_name",
- "id": "ChatOutput-J6aor",
- "inputTypes": [
- "Message"
- ],
- "type": "str"
- },
- "sourceHandle": {
- "dataType": "TextInput",
- "id": "TextInput-eFiZp",
- "name": "text",
- "output_types": [
- "Message"
- ]
- }
- },
- "id": "xy-edge__TextInput-eFiZp{œdataTypeœ:œTextInputœ,œidœ:œTextInput-eFiZpœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-J6aor{œfieldNameœ:œsender_nameœ,œidœ:œChatOutput-J6aorœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}"
- },
- {
- "source": "ChatInput-3OQi9",
- "sourceHandle": "{œdataTypeœ:œChatInputœ,œidœ:œChatInput-3OQi9œ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}",
- "target": "ChatOutput-J6aor",
- "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-J6aorœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
- "data": {
- "targetHandle": {
- "fieldName": "input_value",
- "id": "ChatOutput-J6aor",
- "inputTypes": [
- "Message"
- ],
- "type": "str"
- },
- "sourceHandle": {
- "dataType": "ChatInput",
- "id": "ChatInput-3OQi9",
- "name": "message",
- "output_types": [
- "Message"
- ]
- }
- },
- "id": "xy-edge__ChatInput-3OQi9{œdataTypeœ:œChatInputœ,œidœ:œChatInput-3OQi9œ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-J6aor{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-J6aorœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}"
- }
- ],
- "viewport": {
- "x": -69.8125,
- "y": -65.067138671875,
- "zoom": 1.015625
- }
- },
- "description": "Nurture NLP Nodes Here.",
- "name": "Simple API Test",
- "last_tested_version": "1.1.1",
- "endpoint_name": null,
- "is_component": false
-}
\ No newline at end of file
diff --git a/src/lfx/tests/data/TwoOutputsTest.json b/src/lfx/tests/data/TwoOutputsTest.json
deleted file mode 100644
index ff46aab5ba1c..000000000000
--- a/src/lfx/tests/data/TwoOutputsTest.json
+++ /dev/null
@@ -1,1024 +0,0 @@
-{
- "name": "TwoOutputsTest",
- "description": "",
- "data": {
- "nodes": [
- {
- "width": 384,
- "height": 359,
- "id": "PromptTemplate-CweKz",
- "type": "genericNode",
- "position": {
- "x": 969.6448076246203,
- "y": 528.7788853763968
- },
- "data": {
- "type": "PromptTemplate",
- "node": {
- "template": {
- "output_parser": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "output_parser",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseOutputParser",
- "list": false
- },
- "input_variables": {
- "required": true,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "input_variables",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true,
- "value": [
- "input"
- ]
- },
- "partial_variables": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "partial_variables",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "template": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "password": false,
- "name": "template",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "prompt",
- "list": false,
- "value": "Input: {input}\nAI:"
- },
- "template_format": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": "f-string",
- "password": false,
- "name": "template_format",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "validate_template": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": true,
- "password": false,
- "name": "validate_template",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "_type": "PromptTemplate",
- "input": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "value": "",
- "password": false,
- "name": "input",
- "display_name": "input",
- "advanced": false,
- "input_types": [
- "Document",
- "BaseOutputParser",
- "str"
- ],
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- }
- },
- "description": "A prompt template for a language model.",
- "base_classes": [
- "BasePromptTemplate",
- "StringPromptTemplate",
- "PromptTemplate"
- ],
- "name": "",
- "display_name": "PromptTemplate",
- "documentation": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/",
- "custom_fields": {
- "": [
- "input"
- ],
- "template": [
- "input"
- ]
- },
- "output_types": [],
- "field_formatters": {
- "formatters": {
- "openai_api_key": {}
- },
- "base_formatters": {
- "kwargs": {},
- "optional": {},
- "list": {},
- "dict": {},
- "union": {},
- "multiline": {},
- "show": {},
- "password": {},
- "default": {},
- "headers": {},
- "dict_code_file": {},
- "model_fields": {
- "MODEL_DICT": {
- "OpenAI": [
- "text-davinci-003",
- "text-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001"
- ],
- "ChatOpenAI": [
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k-0613",
- "gpt-3.5-turbo-16k",
- "gpt-4-0613",
- "gpt-4-32k-0613",
- "gpt-4",
- "gpt-4-32k"
- ],
- "Anthropic": [
- "claude-v1",
- "claude-v1-100k",
- "claude-instant-v1",
- "claude-instant-v1-100k",
- "claude-v1.3",
- "claude-v1.3-100k",
- "claude-v1.2",
- "claude-v1.0",
- "claude-instant-v1.1",
- "claude-instant-v1.1-100k",
- "claude-instant-v1.0"
- ],
- "ChatAnthropic": [
- "claude-v1",
- "claude-v1-100k",
- "claude-instant-v1",
- "claude-instant-v1-100k",
- "claude-v1.3",
- "claude-v1.3-100k",
- "claude-v1.2",
- "claude-v1.0",
- "claude-instant-v1.1",
- "claude-instant-v1.1-100k",
- "claude-instant-v1.0"
- ]
- }
- }
- }
- },
- "beta": false,
- "error": null
- },
- "id": "PromptTemplate-CweKz"
- },
- "selected": false,
- "positionAbsolute": {
- "x": 969.6448076246203,
- "y": 528.7788853763968
- }
- },
- {
- "width": 384,
- "height": 307,
- "id": "LLMChain-HUM6g",
- "type": "genericNode",
- "position": {
- "x": 1515.3241458756393,
- "y": 732.4536491407735
- },
- "data": {
- "type": "LLMChain",
- "node": {
- "template": {
- "callbacks": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "callbacks",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "langchain.callbacks.base.BaseCallbackHandler",
- "list": true
- },
- "llm": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "llm",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseLanguageModel",
- "list": false
- },
- "memory": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "memory",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseMemory",
- "list": false
- },
- "output_parser": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "output_parser",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseLLMOutputParser",
- "list": false
- },
- "prompt": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "prompt",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BasePromptTemplate",
- "list": false
- },
- "llm_kwargs": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "llm_kwargs",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "metadata": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "metadata",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "output_key": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "text",
- "password": false,
- "name": "output_key",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "return_final_only": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": true,
- "password": false,
- "name": "return_final_only",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "tags": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "tags",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "verbose": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "verbose",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "_type": "LLMChain"
- },
- "description": "Chain to run queries against LLMs.",
- "base_classes": [
- "LLMChain",
- "Chain",
- "function",
- "Text"
- ],
- "display_name": "LLMChain",
- "custom_fields": {},
- "output_types": [],
- "documentation": "https://python.langchain.com/docs/modules/chains/foundational/llm_chain",
- "beta": false,
- "error": null
- },
- "id": "LLMChain-HUM6g"
- },
- "selected": false,
- "positionAbsolute": {
- "x": 1515.3241458756393,
- "y": 732.4536491407735
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 621,
- "id": "ChatOpenAI-02kOF",
- "type": "genericNode",
- "position": {
- "x": 483,
- "y": 942.8665628296949
- },
- "data": {
- "type": "ChatOpenAI",
- "node": {
- "template": {
- "callbacks": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "callbacks",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "langchain.callbacks.base.BaseCallbackHandler",
- "list": true
- },
- "cache": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "cache",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "client": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "client",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "Any",
- "list": false
- },
- "max_retries": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 6,
- "password": false,
- "name": "max_retries",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "max_tokens": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": true,
- "name": "max_tokens",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false,
- "value": ""
- },
- "metadata": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "metadata",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "model_kwargs": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "model_kwargs",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "model_name": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "gpt-3.5-turbo-0613",
- "password": false,
- "options": [
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k-0613",
- "gpt-3.5-turbo-16k",
- "gpt-4-0613",
- "gpt-4-32k-0613",
- "gpt-4",
- "gpt-4-32k"
- ],
- "name": "model_name",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "n": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 1,
- "password": false,
- "name": "n",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "openai_api_base": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "openai_api_base",
- "display_name": "OpenAI API Base",
- "advanced": false,
- "dynamic": false,
- "info": "\nThe base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\n",
- "type": "str",
- "list": false
- },
- "openai_api_key": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "",
- "password": true,
- "name": "openai_api_key",
- "display_name": "OpenAI API Key",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "openai_organization": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "openai_organization",
- "display_name": "OpenAI Organization",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "openai_proxy": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "openai_proxy",
- "display_name": "OpenAI Proxy",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "request_timeout": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "request_timeout",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "float",
- "list": false
- },
- "streaming": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "streaming",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "tags": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "tags",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "temperature": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": 0.7,
- "password": false,
- "name": "temperature",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "float",
- "list": false
- },
- "tiktoken_model_name": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "tiktoken_model_name",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "verbose": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "verbose",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "_type": "ChatOpenAI"
- },
- "description": "`OpenAI` Chat large language models API.",
- "base_classes": [
- "ChatOpenAI",
- "BaseLanguageModel",
- "BaseChatModel",
- "BaseLLM"
- ],
- "display_name": "ChatOpenAI",
- "custom_fields": {},
- "output_types": [],
- "documentation": "https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai",
- "beta": false,
- "error": null
- },
- "id": "ChatOpenAI-02kOF"
- },
- "selected": false,
- "positionAbsolute": {
- "x": 483,
- "y": 942.8665628296949
- }
- },
- {
- "width": 384,
- "height": 389,
- "id": "ChatOutput-8SWFf",
- "type": "genericNode",
- "position": {
- "x": 2035.5749798606498,
- "y": 651.0174452514373
- },
- "data": {
- "type": "ChatOutput",
- "node": {
- "template": {
- "code": {
- "dynamic": true,
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "value": "from typing import Optional\nfrom langflow.api.v1.schemas import ChatMessage\nfrom langflow.services.utils import get_chat_manager\nfrom lfx.custom import CustomComponent\nfrom anyio.from_thread import start_blocking_portal\nfrom lfx.log.logger import logger\nfrom lfx.field_typing import Text\n\n\nclass ChatOutput(CustomComponent):\n display_name = \"Chat Output\"\n\n def build_config(self):\n return {\"message\": {\"input_types\": [\"str\"]}}\n\n def build(self, message: Optional[Text], is_ai: bool = False) -> Text:\n if not message:\n return \"\"\n try:\n chat_manager = get_chat_manager()\n chat_message = ChatMessage(message=message, is_bot=is_ai)\n # send_message is a coroutine\n # run in a thread safe manner\n with start_blocking_portal() as portal:\n portal.call(chat_manager.send_message, chat_message)\n chat_manager.chat_history.add_message(\n chat_manager.cache_manager.current_client_id, chat_message\n )\n except Exception as exc:\n logger.exception(exc)\n logger.debug(f\"Error sending message to chat: {exc}\")\n\n return message\n",
- "password": false,
- "name": "code",
- "advanced": false,
- "type": "code",
- "list": false
- },
- "_type": "CustomComponent",
- "is_ai": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "is_ai",
- "display_name": "is_ai",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "message": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "message",
- "display_name": "message",
- "advanced": false,
- "input_types": [
- "Text"
- ],
- "dynamic": false,
- "info": "",
- "type": "Text",
- "list": false
- }
- },
- "description": "Used to send a message to the chat.",
- "base_classes": [
- "str"
- ],
- "display_name": "Chat Output",
- "custom_fields": {
- "is_ai": null,
- "message": null
- },
- "output_types": [
- "ChatOutput"
- ],
- "documentation": "",
- "beta": true,
- "error": null
- },
- "id": "ChatOutput-8SWFf"
- },
- "selected": false,
- "positionAbsolute": {
- "x": 2035.5749798606498,
- "y": 651.0174452514373
- }
- },
- {
- "width": 384,
- "height": 273,
- "id": "ChatInput-PqtHe",
- "type": "genericNode",
- "position": {
- "x": 504.7467002897712,
- "y": 388.46875
- },
- "data": {
- "type": "ChatInput",
- "node": {
- "template": {
- "code": {
- "dynamic": true,
- "required": true,
- "placeholder": "",
- "show": false,
- "multiline": true,
- "value": "from typing import Optional\nfrom lfx.custom import CustomComponent\n\n\nclass ChatInput(CustomComponent):\n display_name = \"Chat Input\"\n\n def build(self, message: Optional[str] = \"\") -> str:\n return message\n",
- "password": false,
- "name": "code",
- "advanced": false,
- "type": "code",
- "list": false
- },
- "_type": "CustomComponent",
- "message": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "",
- "password": false,
- "name": "message",
- "display_name": "message",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- }
- },
- "description": "Used to get user input from the chat.",
- "base_classes": [
- "str"
- ],
- "display_name": "Chat Input",
- "custom_fields": {
- "message": null
- },
- "output_types": [
- "ChatInput"
- ],
- "documentation": "",
- "beta": true,
- "error": null
- },
- "id": "ChatInput-PqtHe"
- },
- "selected": false,
- "positionAbsolute": {
- "x": 504.7467002897712,
- "y": 388.46875
- }
- },
- {
- "width": 384,
- "height": 475,
- "id": "Tool-jyI4N",
- "type": "genericNode",
- "position": {
- "x": 2044.485030617051,
- "y": 1131.4250055845532
- },
- "data": {
- "type": "Tool",
- "node": {
- "template": {
- "func": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "password": false,
- "name": "func",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "function",
- "list": false
- },
- "description": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "value": "Test tool",
- "password": false,
- "name": "description",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "name": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "value": "Tool",
- "password": false,
- "name": "name",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "return_direct": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "return_direct",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "_type": "Tool"
- },
- "description": "Converts a chain, agent or function into a tool.",
- "base_classes": [
- "Tool",
- "BaseTool"
- ],
- "display_name": "Tool",
- "custom_fields": {},
- "output_types": [],
- "documentation": "",
- "beta": false,
- "error": null
- },
- "id": "Tool-jyI4N"
- },
- "selected": true,
- "positionAbsolute": {
- "x": 2044.485030617051,
- "y": 1131.4250055845532
- },
- "dragging": false
- }
- ],
- "edges": [
- {
- "source": "PromptTemplate-CweKz",
- "target": "LLMChain-HUM6g",
- "sourceHandle": "PromptTemplate|PromptTemplate-CweKz|BasePromptTemplate|StringPromptTemplate|PromptTemplate",
- "targetHandle": "BasePromptTemplate|prompt|LLMChain-HUM6g",
- "id": "reactflow__edge-PromptTemplate-CweKzPromptTemplate|PromptTemplate-CweKz|BasePromptTemplate|StringPromptTemplate|PromptTemplate-LLMChain-HUM6gBasePromptTemplate|prompt|LLMChain-HUM6g",
- "style": {
- "stroke": "#555"
- },
- "className": "stroke-gray-900 ",
- "animated": false,
- "selected": false
- },
- {
- "source": "ChatOpenAI-02kOF",
- "target": "LLMChain-HUM6g",
- "sourceHandle": "ChatOpenAI|ChatOpenAI-02kOF|ChatOpenAI|BaseLanguageModel|BaseChatModel|BaseLLM",
- "targetHandle": "BaseLanguageModel|llm|LLMChain-HUM6g",
- "id": "reactflow__edge-ChatOpenAI-02kOFChatOpenAI|ChatOpenAI-02kOF|ChatOpenAI|BaseLanguageModel|BaseChatModel|BaseLLM-LLMChain-HUM6gBaseLanguageModel|llm|LLMChain-HUM6g",
- "style": {
- "stroke": "#555"
- },
- "className": "stroke-gray-900 ",
- "animated": false,
- "selected": false
- },
- {
- "source": "ChatInput-PqtHe",
- "target": "PromptTemplate-CweKz",
- "sourceHandle": "ChatInput|ChatInput-PqtHe|str",
- "targetHandle": "Document;BaseOutputParser;str|input|PromptTemplate-CweKz",
- "id": "reactflow__edge-ChatInput-PqtHeChatInput|ChatInput-PqtHe|str-PromptTemplate-CweKzDocument;BaseOutputParser;str|input|PromptTemplate-CweKz",
- "style": {
- "stroke": "#555"
- },
- "className": "stroke-gray-900 ",
- "animated": false,
- "selected": false
- },
- {
- "source": "LLMChain-HUM6g",
- "sourceHandle": "LLMChain|LLMChain-HUM6g|LLMChain|Chain|function|Text",
- "target": "ChatOutput-8SWFf",
- "targetHandle": "Text|message|ChatOutput-8SWFf",
- "style": {
- "stroke": "#555"
- },
- "className": "stroke-foreground stroke-connection",
- "animated": false,
- "id": "reactflow__edge-LLMChain-HUM6gLLMChain|LLMChain-HUM6g|LLMChain|Chain|function|Text-ChatOutput-8SWFfText|message|ChatOutput-8SWFf"
- },
- {
- "source": "LLMChain-HUM6g",
- "sourceHandle": "LLMChain|LLMChain-HUM6g|LLMChain|Chain|function|Text",
- "target": "Tool-jyI4N",
- "targetHandle": "function|func|Tool-jyI4N",
- "style": {
- "stroke": "#555"
- },
- "className": "stroke-foreground stroke-connection",
- "animated": false,
- "id": "reactflow__edge-LLMChain-HUM6gLLMChain|LLMChain-HUM6g|LLMChain|Chain|function|Text-Tool-jyI4Nfunction|func|Tool-jyI4N"
- }
- ],
- "viewport": {
- "x": -401.32668426335044,
- "y": -129.59138346130635,
- "zoom": 0.5073779796520557
- }
- },
- "id": "cf923ccb-e14c-4754-96eb-a8a3b5bbe082",
- "user_id": "c65bfea3-3eea-4e71-8fc4-106238eb0583"
-}
\ No newline at end of file
diff --git a/src/lfx/tests/data/Vector_store.json b/src/lfx/tests/data/Vector_store.json
deleted file mode 100644
index 2a1ddd5f3791..000000000000
--- a/src/lfx/tests/data/Vector_store.json
+++ /dev/null
@@ -1,1283 +0,0 @@
-{
- "name": "Vector Store",
- "description": "An agent that can query a Vector Store.\nTry asking \"How do I upload examples to Langflow?\"\n\n\n\n",
- "data": {
- "nodes": [
- {
- "width": 384,
- "height": 267,
- "id": "VectorStoreAgent-FOmxY",
- "type": "genericNode",
- "position": {
- "x": 2115.5183674856203,
- "y": -1277.6284872455249
- },
- "data": {
- "type": "VectorStoreAgent",
- "node": {
- "template": {
- "llm": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "llm",
- "display_name": "LLM",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseLanguageModel",
- "list": false
- },
- "vectorstoreinfo": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "vectorstoreinfo",
- "display_name": "Vector Store Info",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "VectorStoreInfo",
- "list": false
- },
- "_type": "vectorstore_agent"
- },
- "description": "Construct an agent from a Vector Store.",
- "base_classes": [
- "AgentExecutor"
- ],
- "display_name": "VectorStoreAgent",
- "documentation": ""
- },
- "id": "VectorStoreAgent-FOmxY",
- "value": null
- },
- "selected": false,
- "positionAbsolute": {
- "x": 2115.5183674856203,
- "y": -1277.6284872455249
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 399,
- "id": "VectorStoreInfo-z0sH5",
- "type": "genericNode",
- "position": {
- "x": 1553.2875394928135,
- "y": -1319.2113273706286
- },
- "data": {
- "type": "VectorStoreInfo",
- "node": {
- "template": {
- "vectorstore": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "vectorstore",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "VectorStore",
- "list": false
- },
- "description": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "password": false,
- "name": "description",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false,
- "value": "Instructions to upload examples to Langflow Community Examples"
- },
- "name": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "name",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false,
- "value": "UploadExamples"
- },
- "_type": "VectorStoreInfo"
- },
- "description": "Information about a VectorStore.",
- "base_classes": [
- "VectorStoreInfo"
- ],
- "display_name": "VectorStoreInfo",
- "documentation": ""
- },
- "id": "VectorStoreInfo-z0sH5",
- "value": null
- },
- "selected": false,
- "positionAbsolute": {
- "x": 1553.2875394928135,
- "y": -1319.2113273706286
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 359,
- "id": "OpenAIEmbeddings-lge2J",
- "type": "genericNode",
- "position": {
- "x": 677.2699276778915,
- "y": -734.4639958173494
- },
- "data": {
- "type": "OpenAIEmbeddings",
- "node": {
- "template": {
- "allowed_special": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": [],
- "password": false,
- "name": "allowed_special",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "Literal'all'",
- "list": true
- },
- "disallowed_special": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "all",
- "password": false,
- "name": "disallowed_special",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "Literal'all'",
- "list": true
- },
- "chunk_size": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": 1000,
- "password": false,
- "name": "chunk_size",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "client": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "client",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "Any",
- "list": false
- },
- "deployment": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "text-embedding-ada-002",
- "password": false,
- "name": "deployment",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "embedding_ctx_length": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": 8191,
- "password": false,
- "name": "embedding_ctx_length",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "headers": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": true,
- "value": "{'Authorization':\n 'Bearer '}",
- "password": false,
- "name": "headers",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "Any",
- "list": false
- },
- "max_retries": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": 6,
- "password": false,
- "name": "max_retries",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "model": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "text-embedding-ada-002",
- "password": false,
- "name": "model",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "model_kwargs": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "model_kwargs",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "openai_api_base": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": true,
- "name": "openai_api_base",
- "display_name": "OpenAI API Base",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false,
- "value": ""
- },
- "openai_api_key": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "",
- "password": true,
- "name": "openai_api_key",
- "display_name": "OpenAI API Key",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "openai_api_type": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": true,
- "name": "openai_api_type",
- "display_name": "OpenAI API Type",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false,
- "value": ""
- },
- "openai_api_version": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": true,
- "name": "openai_api_version",
- "display_name": "OpenAI API Version",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false,
- "value": ""
- },
- "openai_organization": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "openai_organization",
- "display_name": "OpenAI Organization",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "openai_proxy": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "openai_proxy",
- "display_name": "OpenAI Proxy",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "request_timeout": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "request_timeout",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "float",
- "list": false
- },
- "show_progress_bar": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "show_progress_bar",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "tiktoken_model_name": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": true,
- "name": "tiktoken_model_name",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false,
- "value": ""
- },
- "_type": "OpenAIEmbeddings"
- },
- "description": "OpenAI embedding models.",
- "base_classes": [
- "OpenAIEmbeddings",
- "Embeddings"
- ],
- "display_name": "OpenAIEmbeddings",
- "documentation": "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/openai"
- },
- "id": "OpenAIEmbeddings-lge2J",
- "value": null
- },
- "selected": false,
- "positionAbsolute": {
- "x": 677.2699276778915,
- "y": -734.4639958173494
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 515,
- "id": "Chroma-UK4a8",
- "type": "genericNode",
- "position": {
- "x": 1138.12587416446,
- "y": -1289.1517285671812
- },
- "data": {
- "type": "Chroma",
- "node": {
- "template": {
- "client": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "client",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "chromadb.Client",
- "list": false
- },
- "client_settings": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "client_settings",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "chromadb.config.Setting",
- "list": true
- },
- "documents": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "documents",
- "display_name": "Documents",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "Document",
- "list": true
- },
- "embedding": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "embedding",
- "display_name": "Embedding",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "Embeddings",
- "list": false
- },
- "chroma_server_cors_allow_origins": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "chroma_server_cors_allow_origins",
- "display_name": "Chroma Server CORS Allow Origins",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "chroma_server_grpc_port": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "chroma_server_grpc_port",
- "display_name": "Chroma Server GRPC Port",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "chroma_server_host": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "chroma_server_host",
- "display_name": "Chroma Server Host",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "chroma_server_http_port": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "chroma_server_http_port",
- "display_name": "Chroma Server HTTP Port",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "chroma_server_ssl_enabled": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "chroma_server_ssl_enabled",
- "display_name": "Chroma Server SSL Enabled",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "collection_metadata": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "collection_metadata",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "collection_name": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "langflow",
- "password": false,
- "name": "collection_name",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "ids": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "ids",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "metadatas": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "metadatas",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": true
- },
- "persist": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "persist",
- "display_name": "Persist",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "persist_directory": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "persist_directory",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "search_kwargs": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "{}",
- "password": false,
- "name": "search_kwargs",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "_type": "Chroma"
- },
- "description": "Create a Chroma vectorstore from a raw documents.",
- "base_classes": [
- "VectorStore",
- "Chroma",
- "BaseRetriever",
- "VectorStoreRetriever"
- ],
- "display_name": "Chroma",
- "custom_fields": {},
- "output_types": [],
- "documentation": "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/chroma"
- },
- "id": "Chroma-UK4a8",
- "value": null
- },
- "selected": false,
- "positionAbsolute": {
- "x": 1138.12587416446,
- "y": -1289.1517285671812
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 575,
- "id": "RecursiveCharacterTextSplitter-AUWrU",
- "type": "genericNode",
- "position": {
- "x": 607.3861456929772,
- "y": -1343.8126308350086
- },
- "data": {
- "type": "RecursiveCharacterTextSplitter",
- "node": {
- "template": {
- "documents": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "documents",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "Document",
- "list": true
- },
- "chunk_overlap": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": 200,
- "password": false,
- "name": "chunk_overlap",
- "display_name": "Chunk Overlap",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "chunk_size": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": 1000,
- "password": false,
- "name": "chunk_size",
- "display_name": "Chunk Size",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "separator_type": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "Text",
- "password": false,
- "options": [
- "Text",
- "cpp",
- "go",
- "html",
- "java",
- "js",
- "latex",
- "markdown",
- "php",
- "proto",
- "python",
- "rst",
- "ruby",
- "rust",
- "scala",
- "sol",
- "swift"
- ],
- "name": "separator_type",
- "display_name": "Separator Type",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "separators": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": ".",
- "password": false,
- "name": "separators",
- "display_name": "Separator",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "_type": "RecursiveCharacterTextSplitter"
- },
- "description": "Splitting text by recursively look at characters.",
- "base_classes": [
- "Document"
- ],
- "display_name": "RecursiveCharacterTextSplitter",
- "custom_fields": {},
- "output_types": [
- "Document"
- ],
- "documentation": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/recursive_text_splitter"
- },
- "id": "RecursiveCharacterTextSplitter-AUWrU",
- "value": null
- },
- "selected": false,
- "positionAbsolute": {
- "x": 607.3861456929772,
- "y": -1343.8126308350086
- }
- },
- {
- "width": 384,
- "height": 379,
- "id": "WebBaseLoader-aUAEE",
- "type": "genericNode",
- "position": {
- "x": 60.77712301470575,
- "y": -1345.575885746874
- },
- "data": {
- "type": "WebBaseLoader",
- "node": {
- "template": {
- "metadata": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "{}",
- "password": false,
- "name": "metadata",
- "display_name": "Metadata",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "web_path": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "http://docs.langflow.org/examples/how-upload-examples",
- "password": false,
- "name": "web_path",
- "display_name": "Web Page",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "_type": "WebBaseLoader"
- },
- "description": "Load HTML pages using `urllib` and parse them with `BeautifulSoup'.",
- "base_classes": [
- "Document"
- ],
- "display_name": "WebBaseLoader",
- "custom_fields": {},
- "output_types": [
- "Document"
- ],
- "documentation": "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/web_base"
- },
- "id": "WebBaseLoader-aUAEE",
- "value": null
- },
- "selected": false,
- "positionAbsolute": {
- "x": 60.77712301470575,
- "y": -1345.575885746874
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 621,
- "id": "ChatOpenAI-U4mZ2",
- "type": "genericNode",
- "position": {
- "x": 1557.7805431884235,
- "y": -897.7091381330642
- },
- "data": {
- "type": "ChatOpenAI",
- "node": {
- "template": {
- "callbacks": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "callbacks",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "langchain.callbacks.base.BaseCallbackHandler",
- "list": true
- },
- "cache": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "cache",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "client": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "client",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "Any",
- "list": false
- },
- "max_retries": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 6,
- "password": false,
- "name": "max_retries",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "max_tokens": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": true,
- "name": "max_tokens",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false,
- "value": ""
- },
- "metadata": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "metadata",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "model_kwargs": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "model_kwargs",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "model_name": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "gpt-3.5-turbo-0613",
- "password": false,
- "options": [
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k-0613",
- "gpt-3.5-turbo-16k",
- "gpt-4-0613",
- "gpt-4-32k-0613",
- "gpt-4",
- "gpt-4-32k"
- ],
- "name": "model_name",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "n": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 1,
- "password": false,
- "name": "n",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "openai_api_base": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "openai_api_base",
- "display_name": "OpenAI API Base",
- "advanced": false,
- "dynamic": false,
- "info": "\nThe base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\n",
- "type": "str",
- "list": false
- },
- "openai_api_key": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "",
- "password": true,
- "name": "openai_api_key",
- "display_name": "OpenAI API Key",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "openai_organization": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "openai_organization",
- "display_name": "OpenAI Organization",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "openai_proxy": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "openai_proxy",
- "display_name": "OpenAI Proxy",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "request_timeout": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "request_timeout",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "float",
- "list": false
- },
- "streaming": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "streaming",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "tags": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "tags",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "temperature": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "0.2",
- "password": false,
- "name": "temperature",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "float",
- "list": false
- },
- "tiktoken_model_name": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "tiktoken_model_name",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "verbose": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "verbose",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "_type": "ChatOpenAI"
- },
- "description": "`OpenAI` Chat large language models API.",
- "base_classes": [
- "ChatOpenAI",
- "BaseLanguageModel",
- "BaseChatModel",
- "BaseLLM"
- ],
- "display_name": "ChatOpenAI",
- "custom_fields": {},
- "output_types": [],
- "documentation": "https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai"
- },
- "id": "ChatOpenAI-U4mZ2",
- "value": null
- },
- "selected": false,
- "positionAbsolute": {
- "x": 1557.7805431884235,
- "y": -897.7091381330642
- },
- "dragging": false
- }
- ],
- "edges": [
- {
- "source": "VectorStoreInfo-z0sH5",
- "sourceHandle": "VectorStoreInfo|VectorStoreInfo-z0sH5|VectorStoreInfo",
- "target": "VectorStoreAgent-FOmxY",
- "targetHandle": "VectorStoreInfo|vectorstoreinfo|VectorStoreAgent-FOmxY",
- "className": "",
- "id": "reactflow__edge-VectorStoreInfo-z0sH5VectorStoreInfo|VectorStoreInfo-z0sH5|VectorStoreInfo-VectorStoreAgent-FOmxYVectorStoreInfo|vectorstoreinfo|VectorStoreAgent-FOmxY",
- "selected": false,
- "style": {
- "stroke": "#555"
- },
- "animated": false
- },
- {
- "source": "Chroma-UK4a8",
- "sourceHandle": "Chroma|Chroma-UK4a8|VectorStore|Chroma|BaseRetriever|VectorStoreRetriever",
- "target": "VectorStoreInfo-z0sH5",
- "targetHandle": "VectorStore|vectorstore|VectorStoreInfo-z0sH5",
- "style": {
- "stroke": "#555"
- },
- "className": "",
- "animated": false,
- "id": "reactflow__edge-Chroma-UK4a8Chroma|Chroma-UK4a8|VectorStore|Chroma|BaseRetriever|VectorStoreRetriever-VectorStoreInfo-z0sH5VectorStore|vectorstore|VectorStoreInfo-z0sH5",
- "selected": false
- },
- {
- "source": "WebBaseLoader-aUAEE",
- "sourceHandle": "WebBaseLoader|WebBaseLoader-aUAEE|Document",
- "target": "RecursiveCharacterTextSplitter-AUWrU",
- "targetHandle": "Document|documents|RecursiveCharacterTextSplitter-AUWrU",
- "style": {
- "stroke": "#555"
- },
- "className": "",
- "animated": false,
- "id": "reactflow__edge-WebBaseLoader-aUAEEWebBaseLoader|WebBaseLoader-aUAEE|Document-RecursiveCharacterTextSplitter-AUWrUDocument|documents|RecursiveCharacterTextSplitter-AUWrU",
- "selected": false
- },
- {
- "source": "RecursiveCharacterTextSplitter-AUWrU",
- "sourceHandle": "RecursiveCharacterTextSplitter|RecursiveCharacterTextSplitter-AUWrU|Document",
- "target": "Chroma-UK4a8",
- "targetHandle": "Document|documents|Chroma-UK4a8",
- "style": {
- "stroke": "#555"
- },
- "className": "",
- "animated": false,
- "id": "reactflow__edge-RecursiveCharacterTextSplitter-AUWrURecursiveCharacterTextSplitter|RecursiveCharacterTextSplitter-AUWrU|Document-Chroma-UK4a8Document|documents|Chroma-UK4a8",
- "selected": false
- },
- {
- "source": "ChatOpenAI-U4mZ2",
- "sourceHandle": "ChatOpenAI|ChatOpenAI-U4mZ2|ChatOpenAI|BaseLanguageModel|BaseChatModel|BaseLLM",
- "target": "VectorStoreAgent-FOmxY",
- "targetHandle": "BaseLanguageModel|llm|VectorStoreAgent-FOmxY",
- "style": {
- "stroke": "#555"
- },
- "className": "",
- "animated": false,
- "id": "reactflow__edge-ChatOpenAI-U4mZ2ChatOpenAI|ChatOpenAI-U4mZ2|ChatOpenAI|BaseLanguageModel|BaseChatModel|BaseLLM-VectorStoreAgent-FOmxYBaseLanguageModel|llm|VectorStoreAgent-FOmxY",
- "selected": false
- },
- {
- "source": "OpenAIEmbeddings-lge2J",
- "sourceHandle": "OpenAIEmbeddings|OpenAIEmbeddings-lge2J|OpenAIEmbeddings|Embeddings",
- "target": "Chroma-UK4a8",
- "targetHandle": "Embeddings|embedding|Chroma-UK4a8",
- "style": {
- "stroke": "#555"
- },
- "className": "",
- "animated": false,
- "id": "reactflow__edge-OpenAIEmbeddings-lge2JOpenAIEmbeddings|OpenAIEmbeddings-lge2J|OpenAIEmbeddings|Embeddings-Chroma-UK4a8Embeddings|embedding|Chroma-UK4a8"
- }
- ],
- "viewport": {
- "x": 23.25459650899495,
- "y": 727.4174391025257,
- "zoom": 0.3802259585247222
- }
- },
- "id": "cc9d45a0-a071-4435-9e90-32ccbd1a972b",
- "user_id": "c65bfea3-3eea-4e71-8fc4-106238eb0583"
-}
\ No newline at end of file
diff --git a/src/lfx/tests/data/WebhookTest.json b/src/lfx/tests/data/WebhookTest.json
deleted file mode 100644
index 450af7a6280e..000000000000
--- a/src/lfx/tests/data/WebhookTest.json
+++ /dev/null
@@ -1,987 +0,0 @@
-{
- "id": "395a1d68-ee52-457c-a775-fac91363e165",
- "data": {
- "nodes": [
- {
- "id": "CustomComponent-5ADNr",
- "type": "genericNode",
- "position": {
- "x": 888.0012384532345,
- "y": 272.41352212880344
- },
- "data": {
- "type": "CustomComponent",
- "node": {
- "template": {
- "_type": "Component",
- "code": {
- "type": "code",
- "required": true,
- "placeholder": "",
- "list": false,
- "show": true,
- "multiline": true,
- "value": "# from lfx.field_typing import Data\nfrom lfx.custom import Component\nfrom lfx.io import StrInput\nfrom lfx.schema import Data\nfrom lfx.io import Output\nfrom pathlib import Path\nimport aiofiles\n\nclass CustomComponent(Component):\n display_name = \"Async Component\"\n description = \"Use as a template to create your own component.\"\n documentation: str = \"http://docs.langflow.org/components/custom\"\n icon = \"custom_components\"\n\n inputs = [\n StrInput(name=\"input_value\", display_name=\"Input Value\", value=\"Hello, World!\", input_types=[\"Data\"]),\n ]\n\n outputs = [\n Output(display_name=\"Output\", name=\"output\", method=\"build_output\"),\n ]\n\n async def build_output(self) -> Data:\n if isinstance(self.input_value, Data):\n data = self.input_value\n else:\n data = Data(value=self.input_value)\n \n if \"path\" in data:\n path = self.resolve_path(data.path)\n path_obj = Path(path)\n async with aiofiles.open(path, \"w\") as f:\n await f.write(data.model_dump_json())\n \n self.status = data\n return data",
- "fileTypes": [],
- "file_path": "",
- "password": false,
- "name": "code",
- "advanced": true,
- "dynamic": true,
- "info": "",
- "load_from_db": false,
- "title_case": false
- },
- "input_value": {
- "tool_mode": false,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "input_value",
- "value": "",
- "display_name": "Input Value",
- "advanced": false,
- "input_types": [
- "Data"
- ],
- "dynamic": false,
- "info": "",
- "title_case": false,
- "type": "str",
- "_input_type": "StrInput"
- }
- },
- "description": "Use as a template to create your own component.",
- "icon": "custom_components",
- "base_classes": [
- "Data"
- ],
- "display_name": "Async Component",
- "documentation": "https://docs.langflow.org/components-custom-components",
- "minimized": false,
- "custom_fields": {},
- "output_types": [],
- "pinned": false,
- "conditional_paths": [],
- "frozen": false,
- "outputs": [
- {
- "types": [
- "Data"
- ],
- "selected": "Data",
- "name": "output",
- "display_name": "Output",
- "method": "build_output",
- "value": "__UNDEFINED__",
- "cache": true
- }
- ],
- "field_order": [
- "input_value"
- ],
- "beta": false,
- "legacy": false,
- "edited": true,
- "metadata": {},
- "tool_mode": false
- },
- "id": "CustomComponent-5ADNr",
- "description": "Use as a template to create your own component.",
- "display_name": "Custom Component"
- },
- "selected": true,
- "width": 384,
- "height": 337,
- "positionAbsolute": {
- "x": 888.0012384532345,
- "y": 272.41352212880344
- },
- "dragging": false,
- "measured": {
- "width": 384,
- "height": 337
- }
- },
- {
- "id": "Webhook-ww3dq",
- "type": "genericNode",
- "position": {
- "x": 418,
- "y": 270.2890625
- },
- "data": {
- "type": "Webhook",
- "node": {
- "template": {
- "_type": "Component",
- "code": {
- "type": "code",
- "required": true,
- "placeholder": "",
- "list": false,
- "show": true,
- "multiline": true,
- "value": "import json\n\nfrom lfx.custom import Component\nfrom lfx.io import MultilineInput, Output\nfrom lfx.schema import Data\n\n\nclass WebhookComponent(Component):\n display_name = \"Webhook\"\n description = \"Defines a webhook input for the flow.\"\n name = \"Webhook\"\n icon = \"webhook\"\n\n inputs = [\n MultilineInput(\n name=\"data\",\n display_name=\"Payload\",\n info=\"Receives a payload from external systems via HTTP POST.\",\n )\n ]\n outputs = [\n Output(display_name=\"Data\", name=\"output_data\", method=\"build_data\"),\n ]\n\n def build_data(self) -> Data:\n message: str | Data = \"\"\n if not self.data:\n self.status = \"No data provided.\"\n return Data(data={})\n try:\n body = json.loads(self.data or \"{}\")\n except json.JSONDecodeError:\n body = {\"payload\": self.data}\n message = f\"Invalid JSON payload. Please check the format.\\n\\n{self.data}\"\n data = Data(data=body)\n if not message:\n message = data\n self.status = message\n return data\n",
- "fileTypes": [],
- "file_path": "",
- "password": false,
- "name": "code",
- "advanced": true,
- "dynamic": true,
- "info": "",
- "load_from_db": false,
- "title_case": false
- },
- "data": {
- "tool_mode": false,
- "trace_as_input": true,
- "multiline": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "data",
- "value": "{\"test\": 1}",
- "display_name": "Payload",
- "advanced": false,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Receives a payload from external systems via HTTP POST.",
- "title_case": false,
- "type": "str",
- "_input_type": "MultilineInput"
- }
- },
- "description": "Defines a webhook input for the flow.",
- "icon": "webhook",
- "base_classes": [
- "Data"
- ],
- "display_name": "Webhook",
- "documentation": "",
- "minimized": false,
- "custom_fields": {},
- "output_types": [],
- "pinned": false,
- "conditional_paths": [],
- "frozen": false,
- "outputs": [
- {
- "types": [
- "Data"
- ],
- "selected": "Data",
- "name": "output_data",
- "display_name": "Data",
- "method": "build_data",
- "value": "__UNDEFINED__",
- "cache": true
- }
- ],
- "field_order": [
- "data"
- ],
- "beta": false,
- "legacy": false,
- "edited": false,
- "metadata": {},
- "tool_mode": false,
- "lf_version": "1.1.1"
- },
- "id": "Webhook-ww3dq",
- "description": "Defines a webhook input for the flow.",
- "display_name": "Webhook"
- },
- "selected": false,
- "width": 384,
- "height": 309,
- "dragging": true,
- "positionAbsolute": {
- "x": 418,
- "y": 270.2890625
- },
- "measured": {
- "width": 384,
- "height": 309
- }
- },
- {
- "id": "ChatInput-ov3Mq",
- "type": "genericNode",
- "position": {
- "x": 419.7235078147726,
- "y": 646.9863203129902
- },
- "data": {
- "type": "ChatInput",
- "node": {
- "template": {
- "_type": "Component",
- "files": {
- "trace_as_metadata": true,
- "file_path": "",
- "fileTypes": [
- "txt",
- "md",
- "mdx",
- "csv",
- "json",
- "yaml",
- "yml",
- "xml",
- "html",
- "htm",
- "pdf",
- "docx",
- "py",
- "sh",
- "sql",
- "js",
- "ts",
- "tsx",
- "jpg",
- "jpeg",
- "png",
- "bmp",
- "image"
- ],
- "list": true,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "files",
- "value": "",
- "display_name": "Files",
- "advanced": true,
- "dynamic": false,
- "info": "Files to be sent with the message.",
- "title_case": false,
- "type": "file",
- "_input_type": "FileInput"
- },
- "background_color": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "background_color",
- "value": "",
- "display_name": "Background Color",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The background color of the icon.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "chat_icon": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "chat_icon",
- "value": "",
- "display_name": "Icon",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The icon of the message.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "code": {
- "type": "code",
- "required": true,
- "placeholder": "",
- "list": false,
- "show": true,
- "multiline": true,
- "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n",
- "fileTypes": [],
- "file_path": "",
- "password": false,
- "name": "code",
- "advanced": true,
- "dynamic": true,
- "info": "",
- "load_from_db": false,
- "title_case": false
- },
- "input_value": {
- "tool_mode": false,
- "trace_as_input": true,
- "multiline": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "input_value",
- "value": "Should not run",
- "display_name": "Text",
- "advanced": false,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Message to be passed as input.",
- "title_case": false,
- "type": "str",
- "_input_type": "MultilineInput"
- },
- "sender": {
- "tool_mode": false,
- "trace_as_metadata": true,
- "options": [
- "Machine",
- "User"
- ],
- "combobox": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "sender",
- "value": "User",
- "display_name": "Sender Type",
- "advanced": true,
- "dynamic": false,
- "info": "Type of sender.",
- "title_case": false,
- "type": "str",
- "_input_type": "DropdownInput"
- },
- "sender_name": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "sender_name",
- "value": "User",
- "display_name": "Sender Name",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Name of the sender.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "session_id": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "session_id",
- "value": "",
- "display_name": "Session ID",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "should_store_message": {
- "tool_mode": false,
- "trace_as_metadata": true,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "should_store_message",
- "value": true,
- "display_name": "Store Messages",
- "advanced": true,
- "dynamic": false,
- "info": "Store the message in the history.",
- "title_case": false,
- "type": "bool",
- "_input_type": "BoolInput"
- },
- "text_color": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "text_color",
- "value": "",
- "display_name": "Text Color",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The text color of the name",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- }
- },
- "description": "Get chat inputs from the Playground.",
- "icon": "MessagesSquare",
- "base_classes": [
- "Message"
- ],
- "display_name": "Chat Input",
- "documentation": "",
- "minimized": true,
- "custom_fields": {},
- "output_types": [],
- "pinned": false,
- "conditional_paths": [],
- "frozen": false,
- "outputs": [
- {
- "types": [
- "Message"
- ],
- "selected": "Message",
- "name": "message",
- "display_name": "Message",
- "method": "message_response",
- "value": "__UNDEFINED__",
- "cache": true
- }
- ],
- "field_order": [
- "input_value",
- "should_store_message",
- "sender",
- "sender_name",
- "session_id",
- "files",
- "background_color",
- "chat_icon",
- "text_color"
- ],
- "beta": false,
- "legacy": false,
- "edited": false,
- "metadata": {},
- "tool_mode": false
- },
- "id": "ChatInput-ov3Mq"
- },
- "selected": false,
- "width": 384,
- "height": 309,
- "positionAbsolute": {
- "x": 419.7235078147726,
- "y": 646.9863203129902
- },
- "dragging": false,
- "measured": {
- "width": 384,
- "height": 309
- }
- },
- {
- "id": "ChatOutput-5k554",
- "type": "genericNode",
- "position": {
- "x": 884.7327265656637,
- "y": 662.4287265670896
- },
- "data": {
- "type": "ChatOutput",
- "node": {
- "template": {
- "_type": "Component",
- "background_color": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "background_color",
- "value": "",
- "display_name": "Background Color",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The background color of the icon.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "chat_icon": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "chat_icon",
- "value": "",
- "display_name": "Icon",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The icon of the message.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "code": {
- "type": "code",
- "required": true,
- "placeholder": "",
- "list": false,
- "show": true,
- "multiline": true,
- "value": "from lfx.base.io.chat import ChatComponent\nfrom lfx.inputs import BoolInput\nfrom lfx.io import DropdownInput, MessageInput, MessageTextInput, Output\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n MessageInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n source_dict[\"source\"] = source\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n message = self.input_value if isinstance(self.input_value, Message) else Message(text=self.input_value)\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n",
- "fileTypes": [],
- "file_path": "",
- "password": false,
- "name": "code",
- "advanced": true,
- "dynamic": true,
- "info": "",
- "load_from_db": false,
- "title_case": false
- },
- "data_template": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "data_template",
- "value": "{text}",
- "display_name": "Data Template",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "input_value": {
- "trace_as_input": true,
- "tool_mode": false,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "input_value",
- "value": "",
- "display_name": "Text",
- "advanced": false,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Message to be passed as output.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageInput"
- },
- "sender": {
- "tool_mode": false,
- "trace_as_metadata": true,
- "options": [
- "Machine",
- "User"
- ],
- "combobox": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "sender",
- "value": "Machine",
- "display_name": "Sender Type",
- "advanced": true,
- "dynamic": false,
- "info": "Type of sender.",
- "title_case": false,
- "type": "str",
- "_input_type": "DropdownInput"
- },
- "sender_name": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "sender_name",
- "value": "AI",
- "display_name": "Sender Name",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Name of the sender.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "session_id": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "session_id",
- "value": "",
- "display_name": "Session ID",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "should_store_message": {
- "tool_mode": false,
- "trace_as_metadata": true,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "should_store_message",
- "value": true,
- "display_name": "Store Messages",
- "advanced": true,
- "dynamic": false,
- "info": "Store the message in the history.",
- "title_case": false,
- "type": "bool",
- "_input_type": "BoolInput"
- },
- "text_color": {
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "text_color",
- "value": "",
- "display_name": "Text Color",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The text color of the name",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- }
- },
- "description": "Display a chat message in the Playground.",
- "icon": "MessagesSquare",
- "base_classes": [
- "Message"
- ],
- "display_name": "Chat Output",
- "documentation": "",
- "minimized": true,
- "custom_fields": {},
- "output_types": [],
- "pinned": false,
- "conditional_paths": [],
- "frozen": false,
- "outputs": [
- {
- "types": [
- "Message"
- ],
- "selected": "Message",
- "name": "message",
- "display_name": "Message",
- "method": "message_response",
- "value": "__UNDEFINED__",
- "cache": true
- }
- ],
- "field_order": [
- "input_value",
- "should_store_message",
- "sender",
- "sender_name",
- "session_id",
- "data_template",
- "background_color",
- "chat_icon",
- "text_color"
- ],
- "beta": false,
- "legacy": false,
- "edited": false,
- "metadata": {},
- "tool_mode": false
- },
- "id": "ChatOutput-5k554"
- },
- "selected": false,
- "width": 384,
- "height": 309,
- "positionAbsolute": {
- "x": 884.7327265656637,
- "y": 662.4287265670896
- },
- "dragging": false,
- "measured": {
- "width": 384,
- "height": 309
- }
- },
- {
- "id": "CustomComponent-ErhNJ",
- "type": "genericNode",
- "position": {
- "x": 1396.7134608749789,
- "y": 284.91367968123217
- },
- "data": {
- "type": "CustomComponent",
- "node": {
- "template": {
- "_type": "Component",
- "code": {
- "type": "code",
- "required": true,
- "placeholder": "",
- "list": false,
- "show": true,
- "multiline": true,
- "value": "# from lfx.field_typing import Data\nfrom lfx.custom import Component\nfrom lfx.io import StrInput\nfrom lfx.schema import Data\nfrom lfx.io import Output\nfrom pathlib import Path\nimport httpx\nclass CustomComponent(Component):\n display_name = \"Async Component\"\n description = \"Use as a template to create your own component.\"\n documentation: str = \"http://docs.langflow.org/components/custom\"\n icon = \"custom_components\"\n\n inputs = [\n StrInput(name=\"input_value\", display_name=\"Input Value\", value=\"Hello, World!\", input_types=[\"Data\"]),\n ]\n\n outputs = [\n Output(display_name=\"Output\", name=\"output\", method=\"build_output\"),\n ]\n\n async def build_output(self) -> Data:\n async with httpx.AsyncClient() as client:\n response = await client.get(\"https://www.google.com\")\n response.raise_for_status()\n return Data(response=response.text)",
- "fileTypes": [],
- "file_path": "",
- "password": false,
- "name": "code",
- "advanced": true,
- "dynamic": true,
- "info": "",
- "load_from_db": false,
- "title_case": false
- },
- "input_value": {
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "value": "",
- "name": "input_value",
- "display_name": "Input Value",
- "advanced": false,
- "input_types": [
- "Data"
- ],
- "dynamic": false,
- "info": "",
- "title_case": false,
- "type": "str"
- }
- },
- "description": "Use as a template to create your own component.",
- "icon": "custom_components",
- "base_classes": [],
- "display_name": "Custom Component",
- "documentation": "https://docs.langflow.org/components-custom-components",
- "custom_fields": {},
- "output_types": [],
- "pinned": false,
- "conditional_paths": [],
- "frozen": false,
- "outputs": [
- {
- "types": [],
- "name": "output",
- "display_name": "Output",
- "method": "build_output",
- "value": "__UNDEFINED__",
- "cache": true
- }
- ],
- "field_order": [
- "input_value"
- ],
- "beta": false,
- "edited": true
- },
- "id": "CustomComponent-ErhNJ",
- "description": "Use as a template to create your own component.",
- "display_name": "Custom Component"
- },
- "selected": false,
- "width": 384,
- "height": 337,
- "positionAbsolute": {
- "x": 1396.7134608749789,
- "y": 284.91367968123217
- },
- "dragging": false,
- "measured": {
- "width": 384,
- "height": 337
- }
- }
- ],
- "edges": [
- {
- "source": "Webhook-ww3dq",
- "sourceHandle": "{œdataTypeœ:œWebhookœ,œidœ:œWebhook-ww3dqœ,œnameœ:œoutput_dataœ,œoutput_typesœ:[œDataœ]}",
- "target": "CustomComponent-5ADNr",
- "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œCustomComponent-5ADNrœ,œinputTypesœ:[œDataœ],œtypeœ:œstrœ}",
- "data": {
- "targetHandle": {
- "fieldName": "input_value",
- "id": "CustomComponent-5ADNr",
- "inputTypes": [
- "Data"
- ],
- "type": "str"
- },
- "sourceHandle": {
- "dataType": "Webhook",
- "id": "Webhook-ww3dq",
- "name": "output_data",
- "output_types": [
- "Data"
- ]
- }
- },
- "id": "reactflow__edge-Webhook-ww3dq{œdataTypeœ:œWebhookœ,œidœ:œWebhook-ww3dqœ,œnameœ:œoutput_dataœ,œoutput_typesœ:[œDataœ]}-CustomComponent-5ADNr{œfieldNameœ:œinput_valueœ,œidœ:œCustomComponent-5ADNrœ,œinputTypesœ:[œDataœ],œtypeœ:œstrœ}",
- "className": "",
- "animated": false
- },
- {
- "source": "ChatInput-ov3Mq",
- "sourceHandle": "{œdataTypeœ:œChatInputœ,œidœ:œChatInput-ov3Mqœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}",
- "target": "ChatOutput-5k554",
- "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-5k554œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
- "data": {
- "targetHandle": {
- "fieldName": "input_value",
- "id": "ChatOutput-5k554",
- "inputTypes": [
- "Message"
- ],
- "type": "str"
- },
- "sourceHandle": {
- "dataType": "ChatInput",
- "id": "ChatInput-ov3Mq",
- "name": "message",
- "output_types": [
- "Message"
- ]
- }
- },
- "id": "reactflow__edge-ChatInput-ov3Mq{œdataTypeœ:œChatInputœ,œidœ:œChatInput-ov3Mqœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-5k554{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-5k554œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
- "className": "",
- "animated": false
- },
- {
- "source": "CustomComponent-5ADNr",
- "sourceHandle": "{œdataTypeœ:œCustomComponentœ,œidœ:œCustomComponent-5ADNrœ,œnameœ:œoutputœ,œoutput_typesœ:[œDataœ]}",
- "target": "CustomComponent-ErhNJ",
- "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œCustomComponent-ErhNJœ,œinputTypesœ:[œDataœ],œtypeœ:œstrœ}",
- "data": {
- "targetHandle": {
- "fieldName": "input_value",
- "id": "CustomComponent-ErhNJ",
- "inputTypes": [
- "Data"
- ],
- "type": "str"
- },
- "sourceHandle": {
- "dataType": "CustomComponent",
- "id": "CustomComponent-5ADNr",
- "name": "output",
- "output_types": [
- "Data"
- ]
- }
- },
- "id": "reactflow__edge-CustomComponent-5ADNr{œdataTypeœ:œCustomComponentœ,œidœ:œCustomComponent-5ADNrœ,œnameœ:œoutputœ,œoutput_typesœ:[œDataœ]}-CustomComponent-ErhNJ{œfieldNameœ:œinput_valueœ,œidœ:œCustomComponent-ErhNJœ,œinputTypesœ:[œDataœ],œtypeœ:œstrœ}",
- "className": "",
- "animated": false
- }
- ],
- "viewport": {
- "x": -179.56996489421806,
- "y": 68.14631386099461,
- "zoom": 0.7180226657378755
- }
- },
- "description": "The Power of Language at Your Fingertips.",
- "name": "Webhook Test",
- "last_tested_version": "1.1.1",
- "endpoint_name": "webhook-test",
- "is_component": false
-}
\ No newline at end of file
diff --git a/src/lfx/tests/data/__init__.py b/src/lfx/tests/data/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/data/basic_example.json b/src/lfx/tests/data/basic_example.json
deleted file mode 100644
index e3a5141a6de5..000000000000
--- a/src/lfx/tests/data/basic_example.json
+++ /dev/null
@@ -1,510 +0,0 @@
-{
- "description": "",
- "name": "BasicExample",
- "id": "a53f9130-f2fa-4a3e-b22a-3856d946351a",
- "data": {
- "nodes": [
- {
- "width": 384,
- "height": 267,
- "id": "dndnode_81",
- "type": "genericNode",
- "position": {
- "x": 1030,
- "y": 694
- },
- "data": {
- "type": "TimeTravelGuideChain",
- "node": {
- "template": {
- "llm": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "llm",
- "advanced": false,
- "type": "BaseLanguageModel",
- "list": false
- },
- "memory": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "memory",
- "advanced": false,
- "type": "BaseChatMemory",
- "list": false
- },
- "_type": "TimeTravelGuideChain"
- },
- "description": "Time travel guide chain to be used in the flow.",
- "base_classes": [
- "LLMChain",
- "BaseCustomChain",
- "TimeTravelGuideChain",
- "Chain",
- "ConversationChain"
- ]
- },
- "id": "dndnode_81",
- "value": null
- },
- "selected": false,
- "positionAbsolute": {
- "x": 1030,
- "y": 694
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 597,
- "id": "dndnode_82",
- "type": "genericNode",
- "position": {
- "x": 520,
- "y": 732
- },
- "data": {
- "type": "OpenAI",
- "node": {
- "template": {
- "cache": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "cache",
- "advanced": false,
- "type": "bool",
- "list": false
- },
- "verbose": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "verbose",
- "advanced": false,
- "type": "bool",
- "list": false
- },
- "callbacks": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "callbacks",
- "advanced": false,
- "type": "langchain.callbacks.base.BaseCallbackHandler",
- "list": true
- },
- "client": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "client",
- "advanced": false,
- "type": "Any",
- "list": false
- },
- "model_name": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "text-davinci-003",
- "password": false,
- "options": [
- "text-davinci-003",
- "text-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001"
- ],
- "name": "model_name",
- "advanced": false,
- "type": "str",
- "list": true
- },
- "temperature": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": 0.7,
- "password": false,
- "name": "temperature",
- "advanced": false,
- "type": "float",
- "list": false
- },
- "max_tokens": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": 256,
- "password": true,
- "name": "max_tokens",
- "advanced": false,
- "type": "int",
- "list": false
- },
- "top_p": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 1,
- "password": false,
- "name": "top_p",
- "advanced": false,
- "type": "float",
- "list": false
- },
- "frequency_penalty": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 0,
- "password": false,
- "name": "frequency_penalty",
- "advanced": false,
- "type": "float",
- "list": false
- },
- "presence_penalty": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 0,
- "password": false,
- "name": "presence_penalty",
- "advanced": false,
- "type": "float",
- "list": false
- },
- "n": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 1,
- "password": false,
- "name": "n",
- "advanced": false,
- "type": "int",
- "list": false
- },
- "best_of": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 1,
- "password": false,
- "name": "best_of",
- "advanced": false,
- "type": "int",
- "list": false
- },
- "model_kwargs": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "model_kwargs",
- "advanced": true,
- "type": "code",
- "list": false
- },
- "openai_api_key": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": null,
- "password": true,
- "name": "openai_api_key",
- "display_name": "OpenAI API Key",
- "advanced": false,
- "type": "str",
- "list": false
- },
- "openai_api_base": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": true,
- "name": "openai_api_base",
- "advanced": false,
- "type": "str",
- "list": false
- },
- "openai_organization": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "openai_organization",
- "advanced": false,
- "type": "str",
- "list": false
- },
- "batch_size": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 20,
- "password": false,
- "name": "batch_size",
- "advanced": false,
- "type": "int",
- "list": false
- },
- "request_timeout": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "request_timeout",
- "advanced": false,
- "type": "float",
- "list": false
- },
- "logit_bias": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "logit_bias",
- "advanced": false,
- "type": "code",
- "list": false
- },
- "max_retries": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 6,
- "password": false,
- "name": "max_retries",
- "advanced": false,
- "type": "int",
- "list": false
- },
- "streaming": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "streaming",
- "advanced": false,
- "type": "bool",
- "list": false
- },
- "allowed_special": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": [],
- "password": false,
- "name": "allowed_special",
- "advanced": false,
- "type": "Literal'all'",
- "list": true
- },
- "disallowed_special": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": "all",
- "password": false,
- "name": "disallowed_special",
- "advanced": false,
- "type": "Literal'all'",
- "list": false
- },
- "_type": "OpenAI"
- },
- "description": "Wrapper around OpenAI large language models.",
- "base_classes": [
- "BaseLLM",
- "OpenAI",
- "BaseOpenAI",
- "BaseLanguageModel"
- ]
- },
- "id": "dndnode_82",
- "value": null
- },
- "selected": false,
- "positionAbsolute": {
- "x": 520,
- "y": 732
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 273,
- "id": "dndnode_83",
- "type": "genericNode",
- "position": {
- "x": 512,
- "y": 402
- },
- "data": {
- "type": "ConversationBufferMemory",
- "node": {
- "template": {
- "chat_memory": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "chat_memory",
- "advanced": false,
- "type": "BaseChatMessageHistory",
- "list": false
- },
- "output_key": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "output_key",
- "advanced": false,
- "type": "str",
- "list": false
- },
- "input_key": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "input_key",
- "advanced": false,
- "type": "str",
- "list": false
- },
- "return_messages": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "return_messages",
- "advanced": false,
- "type": "bool",
- "list": false
- },
- "human_prefix": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": "Human",
- "password": false,
- "name": "human_prefix",
- "advanced": false,
- "type": "str",
- "list": false
- },
- "ai_prefix": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": "AI",
- "password": false,
- "name": "ai_prefix",
- "advanced": false,
- "type": "str",
- "list": false
- },
- "memory_key": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "history",
- "password": false,
- "name": "memory_key",
- "advanced": false,
- "type": "str",
- "list": false
- },
- "_type": "ConversationBufferMemory"
- },
- "description": "Buffer for storing conversation memory.",
- "base_classes": [
- "ConversationBufferMemory",
- "BaseChatMemory",
- "BaseMemory"
- ]
- },
- "id": "dndnode_83",
- "value": null
- },
- "selected": false,
- "positionAbsolute": {
- "x": 512,
- "y": 402
- },
- "dragging": false
- }
- ],
- "edges": [
- {
- "source": "dndnode_82",
- "sourceHandle": "OpenAI|dndnode_82|BaseLLM|OpenAI|BaseOpenAI|BaseLanguageModel",
- "target": "dndnode_81",
- "targetHandle": "BaseLanguageModel|llm|dndnode_81",
- "className": "animate-pulse",
- "id": "reactflow__edge-dndnode_82OpenAI|dndnode_82|BaseLLM|OpenAI|BaseOpenAI|BaseLanguageModel-dndnode_81BaseLanguageModel|llm|dndnode_81",
- "selected": false
- },
- {
- "source": "dndnode_83",
- "sourceHandle": "ConversationBufferMemory|dndnode_83|ConversationBufferMemory|BaseChatMemory|BaseMemory",
- "target": "dndnode_81",
- "targetHandle": "BaseChatMemory|memory|dndnode_81",
- "className": "animate-pulse",
- "id": "reactflow__edge-dndnode_83ConversationBufferMemory|dndnode_83|ConversationBufferMemory|BaseChatMemory|BaseMemory-dndnode_81BaseChatMemory|memory|dndnode_81"
- }
- ],
- "viewport": {
- "x": 1,
- "y": 0,
- "zoom": 0.5
- }
- },
- "last_tested_version": "0.6.2"
-}
diff --git a/src/lfx/tests/data/complex_example.json b/src/lfx/tests/data/complex_example.json
deleted file mode 100644
index b4e688fc76dc..000000000000
--- a/src/lfx/tests/data/complex_example.json
+++ /dev/null
@@ -1 +0,0 @@
-{"description":"Chain the Words, Master Language!","name":"complex_example","data":{"nodes":[{"width":384,"height":267,"id":"ZeroShotAgent-UQytQ","type":"genericNode","position":{"x":1444.3029693177525,"y":769.2195451536553},"data":{"type":"ZeroShotAgent","node":{"template":{"allowed_tools":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"allowed_tools","advanced":false,"info":"","type":"Tool","list":true},"llm_chain":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"llm_chain","advanced":false,"info":"","type":"LLMChain","list":false},"output_parser":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"output_parser","advanced":false,"info":"","type":"AgentOutputParser","list":false},"_type":"ZeroShotAgent"},"description":"Agent for the MRKL chain.","base_classes":["BaseSingleActionAgent","ZeroShotAgent","Agent","function"],"display_name":"ZeroShotAgent","documentation":"https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent"},"id":"ZeroShotAgent-UQytQ","value":null},"selected":false,"positionAbsolute":{"x":1444.3029693177525,"y":769.2195451536553},"dragging":false},{"width":384,"height":267,"id":"ZeroShotAgent-4Yl9Q","type":"genericNode","position":{"x":2507.5134255411913,"y":703.4268189022047},"data":{"type":"ZeroShotAgent","node":{"template":{"allowed_tools":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"allowed_tools","advanced":false,"info":"","type":"Tool","list":true},"llm_chain":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"llm_chain","advanced":false,"info":"","type":"LLMChain","list":false},"output_parser":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"output_parser","advanced":false,"info":"","type":"AgentOutputParser","list":false},"_type":"ZeroShotAgent"},"description":"Agent for the MRKL chain.","base_classes":["BaseSingleActionAgent","ZeroShotAgent","Agent","function"],"display_name":"ZeroShotAgent","documentation":"https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent"},"id":"ZeroShotAgent-4Yl9Q","value":null},"selected":false,"positionAbsolute":{"x":2507.5134255411913,"y":703.4268189022047},"dragging":false},{"width":384,"height":475,"id":"Tool-Ssk4g","type":"genericNode","position":{"x":1990.4155792278825,"y":894.4563316029999},"data":{"type":"Tool","node":{"template":{"func":{"required":true,"placeholder":"","show":true,"multiline":true,"password":false,"name":"func","advanced":false,"info":"","type":"function","list":false},"description":{"required":true,"placeholder":"","show":true,"multiline":true,"value":"AgentTool","password":false,"name":"description","advanced":false,"info":"","type":"str","list":false},"name":{"required":true,"placeholder":"","show":true,"multiline":true,"value":"AgentTool","password":false,"name":"name","advanced":false,"info":"","type":"str","list":false},"return_direct":{"required":true,"placeholder":"","show":true,"multiline":false,"value":false,"password":false,"name":"return_direct","advanced":false,"info":"","type":"bool","list":false},"_type":"Tool"},"description":"Converts a chain, agent or function into a tool.","base_classes":["Tool"],"display_name":"Tool","documentation":""},"id":"Tool-Ssk4g","value":null},"selected":false,"positionAbsolute":{"x":1990.4155792278825,"y":894.4563316029999},"dragging":false},{"width":384,"height":513,"id":"PythonFunctionTool-qSfC8","type":"genericNode","position":{"x":881.9234666781165,"y":717.4260855419674},"data":{"type":"PythonFunctionTool","node":{"template":{"code":{"required":true,"placeholder":"","show":true,"multiline":true,"value":"\ndef python_function(text: str) -> str:\n \"\"\"This is a default python function that returns the input text\"\"\"\n return text\n","password":false,"name":"code","advanced":false,"info":"","type":"code","list":false},"description":{"required":true,"placeholder":"","show":true,"multiline":true,"value":"Uppercases","password":false,"name":"description","advanced":false,"info":"","type":"str","list":false},"name":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"Uppercase","password":false,"name":"name","advanced":false,"info":"","type":"str","list":false},"return_direct":{"required":true,"placeholder":"","show":true,"multiline":false,"value":false,"password":false,"name":"return_direct","advanced":false,"info":"","type":"bool","list":false},"_type":"PythonFunctionTool"},"description":"Python function to be executed.","base_classes":["Tool"],"display_name":"PythonFunctionTool","documentation":""},"id":"PythonFunctionTool-qSfC8","value":null},"selected":false,"dragging":false,"positionAbsolute":{"x":881.9234666781165,"y":717.4260855419674}},{"width":384,"height":307,"id":"LLMChain-5pPr3","type":"genericNode","position":{"x":952.8848633792611,"y":205.91268432121848},"data":{"type":"LLMChain","node":{"template":{"callbacks":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"callbacks","advanced":false,"info":"","type":"langchain.callbacks.base.BaseCallbackHandler","list":true},"llm":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"llm","advanced":false,"info":"","type":"BaseLanguageModel","list":false},"memory":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"memory","advanced":false,"info":"","type":"BaseMemory","list":false},"output_parser":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"output_parser","advanced":false,"info":"","type":"BaseLLMOutputParser","list":false},"prompt":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"prompt","advanced":false,"info":"","type":"BasePromptTemplate","list":false},"llm_kwargs":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"llm_kwargs","advanced":false,"info":"","type":"code","list":false},"output_key":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"text","password":false,"name":"output_key","advanced":true,"info":"","type":"str","list":false},"return_final_only":{"required":false,"placeholder":"","show":false,"multiline":false,"value":true,"password":false,"name":"return_final_only","advanced":false,"info":"","type":"bool","list":false},"tags":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tags","advanced":false,"info":"","type":"str","list":true},"verbose":{"required":false,"placeholder":"","show":true,"multiline":false,"value":false,"password":false,"name":"verbose","advanced":true,"info":"","type":"bool","list":false},"_type":"LLMChain"},"description":"Chain to run queries against LLMs.","base_classes":["LLMChain","Chain","function"],"display_name":"LLMChain","documentation":"https://python.langchain.com/docs/modules/chains/foundational/llm_chain"},"id":"LLMChain-5pPr3","value":null},"selected":false,"positionAbsolute":{"x":952.8848633792611,"y":205.91268432121848},"dragging":false},{"width":384,"height":421,"id":"ZeroShotPrompt-KeA26","type":"genericNode","position":{"x":284.2531445624355,"y":99.41468159745108},"data":{"type":"ZeroShotPrompt","node":{"template":{"format_instructions":{"required":true,"placeholder":"","show":true,"multiline":true,"value":"Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question","password":false,"name":"format_instructions","advanced":false,"info":"","type":"prompt","list":false},"prefix":{"required":false,"placeholder":"","show":true,"multiline":true,"value":"Answer the following questions as best you can. You have access to the following tools:","password":false,"name":"prefix","advanced":false,"info":"","type":"prompt","list":false},"suffix":{"required":true,"placeholder":"","show":true,"multiline":true,"value":"Begin!\n\nQuestion: {input}\nThought:{agent_scratchpad}","password":false,"name":"suffix","advanced":false,"info":"","type":"prompt","list":false},"_type":"ZeroShotPrompt"},"description":"Prompt template for Zero Shot Agent.","base_classes":["BasePromptTemplate"],"display_name":"ZeroShotPrompt","documentation":"https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent"},"id":"ZeroShotPrompt-KeA26","value":null},"selected":false,"positionAbsolute":{"x":284.2531445624355,"y":99.41468159745108},"dragging":false},{"width":384,"height":611,"id":"OpenAI-YKFjJ","type":"genericNode","position":{"x":151.61242562883945,"y":646.4646888408231},"data":{"type":"OpenAI","node":{"template":{"allowed_special":{"required":false,"placeholder":"","show":false,"multiline":false,"value":[],"password":false,"name":"allowed_special","advanced":false,"info":"","type":"Literal'all'","list":true},"callbacks":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"callbacks","advanced":false,"info":"","type":"langchain.callbacks.base.BaseCallbackHandler","list":true},"disallowed_special":{"required":false,"placeholder":"","show":false,"multiline":false,"value":"all","password":false,"name":"disallowed_special","advanced":false,"info":"","type":"Literal'all'","list":false},"batch_size":{"required":false,"placeholder":"","show":false,"multiline":false,"value":20,"password":false,"name":"batch_size","advanced":false,"info":"","type":"int","list":false},"best_of":{"required":false,"placeholder":"","show":false,"multiline":false,"value":1,"password":false,"name":"best_of","advanced":false,"info":"","type":"int","list":false},"cache":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"cache","advanced":false,"info":"","type":"bool","list":false},"client":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"client","advanced":false,"info":"","type":"Any","list":false},"frequency_penalty":{"required":false,"placeholder":"","show":false,"multiline":false,"value":0,"password":false,"name":"frequency_penalty","advanced":false,"info":"","type":"float","list":false},"logit_bias":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"logit_bias","advanced":false,"info":"","type":"code","list":false},"max_retries":{"required":false,"placeholder":"","show":false,"multiline":false,"value":6,"password":false,"name":"max_retries","advanced":false,"info":"","type":"int","list":false},"max_tokens":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":true,"name":"max_tokens","advanced":false,"info":"","type":"int","list":false},"model_kwargs":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"model_kwargs","advanced":true,"info":"","type":"code","list":false},"model_name":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"text-davinci-003","password":false,"options":["text-davinci-003","text-davinci-002","text-curie-001","text-babbage-001","text-ada-001"],"name":"model_name","advanced":false,"info":"","type":"str","list":true},"n":{"required":false,"placeholder":"","show":false,"multiline":false,"value":1,"password":false,"name":"n","advanced":false,"info":"","type":"int","list":false},"openai_api_base":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"openai_api_base","display_name":"OpenAI API Base","advanced":false,"info":"\nThe base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\n","type":"str","list":false},"openai_api_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":true,"name":"openai_api_key","display_name":"OpenAI API Key","advanced":false,"info":"","type":"str","list":false},"openai_organization":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"openai_organization","display_name":"OpenAI Organization","advanced":false,"info":"","type":"str","list":false},"openai_proxy":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"openai_proxy","display_name":"OpenAI Proxy","advanced":false,"info":"","type":"str","list":false},"presence_penalty":{"required":false,"placeholder":"","show":false,"multiline":false,"value":0,"password":false,"name":"presence_penalty","advanced":false,"info":"","type":"float","list":false},"request_timeout":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"request_timeout","advanced":false,"info":"","type":"float","list":false},"streaming":{"required":false,"placeholder":"","show":false,"multiline":false,"value":false,"password":false,"name":"streaming","advanced":false,"info":"","type":"bool","list":false},"tags":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tags","advanced":false,"info":"","type":"str","list":true},"temperature":{"required":false,"placeholder":"","show":true,"multiline":false,"value":0.7,"password":false,"name":"temperature","advanced":false,"info":"","type":"float","list":false},"tiktoken_model_name":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tiktoken_model_name","advanced":false,"info":"","type":"str","list":false},"top_p":{"required":false,"placeholder":"","show":false,"multiline":false,"value":1,"password":false,"name":"top_p","advanced":false,"info":"","type":"float","list":false},"verbose":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"verbose","advanced":false,"info":"","type":"bool","list":false},"_type":"OpenAI"},"description":"Wrapper around OpenAI large language models.","base_classes":["BaseOpenAI","BaseLLM","OpenAI","BaseLanguageModel"],"display_name":"OpenAI","documentation":"https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai"},"id":"OpenAI-YKFjJ","value":null},"selected":false,"positionAbsolute":{"x":151.61242562883945,"y":646.4646888408231},"dragging":false}],"edges":[{"source":"Tool-Ssk4g","sourceHandle":"Tool|Tool-Ssk4g|Tool","target":"ZeroShotAgent-4Yl9Q","targetHandle":"Tool|allowed_tools|ZeroShotAgent-4Yl9Q","style":{"stroke":"inherit"},"className":"stroke-gray-900 dark:stroke-gray-200","animated":false,"id":"reactflow__edge-Tool-Ssk4gTool|Tool-Ssk4g|Tool-ZeroShotAgent-4Yl9QTool|allowed_tools|ZeroShotAgent-4Yl9Q","selected":false},{"source":"ZeroShotAgent-UQytQ","sourceHandle":"ZeroShotAgent|ZeroShotAgent-UQytQ|BaseSingleActionAgent|ZeroShotAgent|Agent|function","target":"Tool-Ssk4g","targetHandle":"function|func|Tool-Ssk4g","style":{"stroke":"inherit"},"className":"stroke-gray-900 dark:stroke-gray-200","animated":false,"id":"reactflow__edge-ZeroShotAgent-UQytQZeroShotAgent|ZeroShotAgent-UQytQ|BaseSingleActionAgent|ZeroShotAgent|Agent|function-Tool-Ssk4gfunction|func|Tool-Ssk4g","selected":false},{"source":"PythonFunctionTool-qSfC8","sourceHandle":"PythonFunctionTool|PythonFunctionTool-qSfC8|Tool","target":"ZeroShotAgent-UQytQ","targetHandle":"Tool|allowed_tools|ZeroShotAgent-UQytQ","style":{"stroke":"inherit"},"className":"stroke-gray-900 dark:stroke-gray-200","animated":false,"id":"reactflow__edge-PythonFunctionTool-qSfC8PythonFunctionTool|PythonFunctionTool-qSfC8|Tool-ZeroShotAgent-UQytQTool|allowed_tools|ZeroShotAgent-UQytQ","selected":false},{"source":"ZeroShotPrompt-KeA26","sourceHandle":"ZeroShotPrompt|ZeroShotPrompt-KeA26|BasePromptTemplate","target":"LLMChain-5pPr3","targetHandle":"BasePromptTemplate|prompt|LLMChain-5pPr3","style":{"stroke":"inherit"},"className":"stroke-gray-900 dark:stroke-gray-200","animated":false,"id":"reactflow__edge-ZeroShotPrompt-KeA26ZeroShotPrompt|ZeroShotPrompt-KeA26|BasePromptTemplate-LLMChain-5pPr3BasePromptTemplate|prompt|LLMChain-5pPr3","selected":false},{"source":"OpenAI-YKFjJ","sourceHandle":"OpenAI|OpenAI-YKFjJ|BaseOpenAI|BaseLLM|OpenAI|BaseLanguageModel","target":"LLMChain-5pPr3","targetHandle":"BaseLanguageModel|llm|LLMChain-5pPr3","style":{"stroke":"inherit"},"className":"stroke-gray-900 dark:stroke-gray-200","animated":false,"id":"reactflow__edge-OpenAI-YKFjJOpenAI|OpenAI-YKFjJ|BaseOpenAI|BaseLLM|OpenAI|BaseLanguageModel-LLMChain-5pPr3BaseLanguageModel|llm|LLMChain-5pPr3","selected":false},{"source":"LLMChain-5pPr3","sourceHandle":"LLMChain|LLMChain-5pPr3|LLMChain|Chain|function","target":"ZeroShotAgent-4Yl9Q","targetHandle":"LLMChain|llm_chain|ZeroShotAgent-4Yl9Q","style":{"stroke":"inherit"},"className":"stroke-gray-900 dark:stroke-gray-200","animated":false,"id":"reactflow__edge-LLMChain-5pPr3LLMChain|LLMChain-5pPr3|LLMChain|Chain|function-ZeroShotAgent-4Yl9QLLMChain|llm_chain|ZeroShotAgent-4Yl9Q","selected":false},{"source":"LLMChain-5pPr3","sourceHandle":"LLMChain|LLMChain-5pPr3|LLMChain|Chain|function","target":"ZeroShotAgent-UQytQ","targetHandle":"LLMChain|llm_chain|ZeroShotAgent-UQytQ","style":{"stroke":"inherit"},"className":"stroke-gray-900 dark:stroke-gray-200","animated":false,"id":"reactflow__edge-LLMChain-5pPr3LLMChain|LLMChain-5pPr3|LLMChain|Chain|function-ZeroShotAgent-UQytQLLMChain|llm_chain|ZeroShotAgent-UQytQ","selected":false}],"viewport":{"x":-77.90141289801102,"y":58.94201890632064,"zoom":0.3906639400861592}},"id":"e5213457-cb4c-48b5-b2bf-a6bc5b63f625"}
\ No newline at end of file
diff --git a/src/lfx/tests/data/component.py b/src/lfx/tests/data/component.py
deleted file mode 100644
index 7d9d33bc01a5..000000000000
--- a/src/lfx/tests/data/component.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import random
-
-from lfx.custom import CustomComponent
-
-
-class TestComponent(CustomComponent):
- def refresh_values(self):
- # This is a function that will be called every time the component is updated
- # and should return a list of random strings
- return [f"Random {random.randint(1, 100)}" for _ in range(5)] # noqa: S311
-
- def build_config(self):
- return {"param": {"display_name": "Param", "options": self.refresh_values}}
-
- def build(self, param: int):
- return param
diff --git a/src/lfx/tests/data/component_multiple_outputs.py b/src/lfx/tests/data/component_multiple_outputs.py
deleted file mode 100644
index 25d817323717..000000000000
--- a/src/lfx/tests/data/component_multiple_outputs.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from lfx.custom import Component
-from lfx.inputs.inputs import IntInput, MessageTextInput
-from lfx.template.field.base import Output
-
-
-class MultipleOutputsComponent(Component):
- inputs = [
- MessageTextInput(display_name="Input", name="input"),
- IntInput(display_name="Number", name="number"),
- ]
- outputs = [
- Output(display_name="Certain Output", name="certain_output", method="certain_output"),
- Output(display_name="Other Output", name="other_output", method="other_output"),
- ]
-
- def certain_output(self) -> str:
- return f"This is my string input: {self.input}"
-
- def other_output(self) -> int:
- return f"This is my int input multiplied by 2: {self.number * 2}"
diff --git a/src/lfx/tests/data/component_nested_call.py b/src/lfx/tests/data/component_nested_call.py
deleted file mode 100644
index 18a08d984207..000000000000
--- a/src/lfx/tests/data/component_nested_call.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from random import randint
-
-from lfx.custom import Component
-from lfx.inputs.inputs import IntInput, MessageTextInput
-from lfx.template.field.base import Output
-
-
-class MultipleOutputsComponent(Component):
- inputs = [
- MessageTextInput(display_name="Input", name="input"),
- IntInput(display_name="Number", name="number"),
- ]
- outputs = [
- Output(display_name="Certain Output", name="certain_output", method="certain_output"),
- Output(display_name="Other Output", name="other_output", method="other_output"),
- ]
-
- def certain_output(self) -> int:
- return randint(0, self.number) # noqa: S311
-
- def other_output(self) -> int:
- return self.certain_output()
diff --git a/src/lfx/tests/data/component_with_templatefield.py b/src/lfx/tests/data/component_with_templatefield.py
deleted file mode 100644
index 2d3fabd34f09..000000000000
--- a/src/lfx/tests/data/component_with_templatefield.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import random
-
-from lfx.custom import CustomComponent
-from lfx.field_typing import Input
-
-
-class TestComponent(CustomComponent):
- def refresh_values(self):
- # This is a function that will be called every time the component is updated
- # and should return a list of random strings
- return [f"Random {random.randint(1, 100)}" for _ in range(5)] # noqa: S311
-
- def build_config(self):
- return {"param": Input(display_name="Param", options=self.refresh_values)}
-
- def build(self, param: int):
- return param
diff --git a/src/lfx/tests/data/debug_incoming_24k.raw b/src/lfx/tests/data/debug_incoming_24k.raw
deleted file mode 100644
index efc09ee4ef86..000000000000
Binary files a/src/lfx/tests/data/debug_incoming_24k.raw and /dev/null differ
diff --git a/src/lfx/tests/data/dynamic_output_component.py b/src/lfx/tests/data/dynamic_output_component.py
deleted file mode 100644
index cc3a34e04529..000000000000
--- a/src/lfx/tests/data/dynamic_output_component.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from typing import Any
-
-from lfx.custom import Component
-from lfx.io import BoolInput, MessageTextInput, Output
-from lfx.schema import Data
-
-
-class DynamicOutputComponent(Component):
- display_name = "Dynamic Output Component"
- description = "Use as a template to create your own component."
- documentation: str = "https://docs.langflow.org/components-custom-components"
- icon = "custom_components"
- name = "DynamicOutputComponent"
-
- inputs = [
- MessageTextInput(name="input_value", display_name="Input Value", value="Hello, World!"),
- BoolInput(name="show_output", display_name="Show Output", value=True, real_time_refresh=True),
- ]
-
- outputs = [
- Output(display_name="Output", name="output", method="build_output"),
- ]
-
- def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any):
- if field_name == "show_output":
- if field_value:
- frontend_node["outputs"].append(
- Output(display_name="Tool Output", name="tool_output", method="build_output")
- )
- else:
- # remove the output
- frontend_node["outputs"] = [
- output for output in frontend_node["outputs"] if output["name"] != "tool_output"
- ]
- return frontend_node
-
- def build_output(self) -> Data:
- data = Data(value=self.input_value)
- self.status = data
- return data
diff --git a/src/lfx/tests/data/env_variable_test.json b/src/lfx/tests/data/env_variable_test.json
deleted file mode 100644
index 79dfe7ac3da1..000000000000
--- a/src/lfx/tests/data/env_variable_test.json
+++ /dev/null
@@ -1,335 +0,0 @@
-{
- "id": "a7003613-8243-4f71-800c-6be1c4065518",
- "data": {
- "nodes": [
- {
- "id": "Secret-zIbKs",
- "type": "genericNode",
- "position": {
- "x": 397.9312192693087,
- "y": 262.8483455882353
- },
- "data": {
- "type": "Secret",
- "node": {
- "template": {
- "_type": "Component",
- "code": {
- "type": "code",
- "required": true,
- "placeholder": "",
- "list": false,
- "show": true,
- "multiline": true,
- "value": "from lfx.custom import Component\nfrom lfx.io import SecretStrInput, Output\nfrom lfx.schema.message import Message\n\n\nclass SecretComponent(Component):\n display_name = \"SecretComponent\"\n description = \"SECURE.\"\n icon = \"lock\"\n name = \"Secret\"\n\n inputs = [\n SecretStrInput(\n name=\"secret_key_input\",\n display_name=\"Secret Key\",\n info=\"The Secret to be reveald.\",\n required=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Secret\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n self.log(self.secret_key_input)\n message = Message(\n text=self.secret_key_input,\n )\n return message\n",
- "fileTypes": [],
- "file_path": "",
- "password": false,
- "name": "code",
- "advanced": true,
- "dynamic": true,
- "info": "",
- "load_from_db": false,
- "title_case": false
- },
- "secret_key_input": {
- "load_from_db": false,
- "required": true,
- "placeholder": "",
- "show": true,
- "name": "secret_key_input",
- "value": "",
- "display_name": "Secret Key",
- "advanced": false,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The Secret to be reveald.",
- "title_case": false,
- "password": true,
- "type": "str",
- "_input_type": "SecretStrInput"
- }
- },
- "description": "SECURE.",
- "icon": "lock",
- "base_classes": [
- "Message"
- ],
- "display_name": "SecretComponent",
- "documentation": "",
- "custom_fields": {},
- "output_types": [],
- "pinned": false,
- "conditional_paths": [],
- "frozen": false,
- "outputs": [
- {
- "types": [
- "Message"
- ],
- "selected": "Message",
- "name": "text",
- "display_name": "Secret",
- "method": "text_response",
- "value": "__UNDEFINED__",
- "cache": true
- }
- ],
- "field_order": [
- "secret_key_input"
- ],
- "beta": false,
- "edited": true,
- "metadata": {},
- "lf_version": "1.0.18"
- },
- "id": "Secret-zIbKs"
- },
- "selected": false,
- "width": 384,
- "height": 289,
- "positionAbsolute": {
- "x": 397.9312192693087,
- "y": 262.8483455882353
- },
- "dragging": false
- },
- {
- "id": "ChatOutput-u9cPC",
- "type": "genericNode",
- "position": {
- "x": 863,
- "y": 265.171875
- },
- "data": {
- "type": "ChatOutput",
- "node": {
- "template": {
- "_type": "Component",
- "code": {
- "type": "code",
- "required": true,
- "placeholder": "",
- "list": false,
- "show": true,
- "multiline": true,
- "value": "from lfx.base.io.chat import ChatComponent\nfrom lfx.inputs import BoolInput\nfrom lfx.io import DropdownInput, MessageTextInput, Output\nfrom lfx.memory import store_message\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n name = \"ChatOutput\"\n\n inputs = [\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n def message_response(self) -> Message:\n message = Message(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n )\n if (\n self.session_id\n and isinstance(message, Message)\n and isinstance(message.text, str)\n and self.should_store_message\n ):\n store_message(\n message,\n flow_id=self.graph.flow_id,\n )\n self.message.value = message\n\n self.status = message\n return message\n",
- "fileTypes": [],
- "file_path": "",
- "password": false,
- "name": "code",
- "advanced": true,
- "dynamic": true,
- "info": "",
- "load_from_db": false,
- "title_case": false
- },
- "data_template": {
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "data_template",
- "value": "{text}",
- "display_name": "Data Template",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "input_value": {
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "input_value",
- "value": "",
- "display_name": "Text",
- "advanced": false,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Message to be passed as output.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "sender": {
- "trace_as_metadata": true,
- "options": [
- "Machine",
- "User"
- ],
- "combobox": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "sender",
- "value": "Machine",
- "display_name": "Sender Type",
- "advanced": true,
- "dynamic": false,
- "info": "Type of sender.",
- "title_case": false,
- "type": "str",
- "_input_type": "DropdownInput"
- },
- "sender_name": {
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "sender_name",
- "value": "AI",
- "display_name": "Sender Name",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "Name of the sender.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "session_id": {
- "trace_as_input": true,
- "trace_as_metadata": true,
- "load_from_db": false,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "session_id",
- "value": "",
- "display_name": "Session ID",
- "advanced": true,
- "input_types": [
- "Message"
- ],
- "dynamic": false,
- "info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
- "title_case": false,
- "type": "str",
- "_input_type": "MessageTextInput"
- },
- "should_store_message": {
- "trace_as_metadata": true,
- "list": false,
- "required": false,
- "placeholder": "",
- "show": true,
- "name": "should_store_message",
- "value": true,
- "display_name": "Store Messages",
- "advanced": true,
- "dynamic": false,
- "info": "Store the message in the history.",
- "title_case": false,
- "type": "bool",
- "_input_type": "BoolInput"
- }
- },
- "description": "Display a chat message in the Playground.",
- "icon": "ChatOutput",
- "base_classes": [
- "Message"
- ],
- "display_name": "Chat Output",
- "documentation": "",
- "custom_fields": {},
- "output_types": [],
- "pinned": false,
- "conditional_paths": [],
- "frozen": false,
- "outputs": [
- {
- "types": [
- "Message"
- ],
- "selected": "Message",
- "name": "message",
- "display_name": "Message",
- "method": "message_response",
- "value": "__UNDEFINED__",
- "cache": true
- }
- ],
- "field_order": [
- "input_value",
- "should_store_message",
- "sender",
- "sender_name",
- "session_id",
- "data_template"
- ],
- "beta": false,
- "edited": false,
- "metadata": {},
- "lf_version": "1.0.18"
- },
- "id": "ChatOutput-u9cPC"
- },
- "selected": false,
- "width": 384,
- "height": 289
- }
- ],
- "edges": [
- {
- "source": "Secret-zIbKs",
- "sourceHandle": "{œdataTypeœ:œSecretœ,œidœ:œSecret-zIbKsœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
- "target": "ChatOutput-u9cPC",
- "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-u9cPCœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
- "data": {
- "targetHandle": {
- "fieldName": "input_value",
- "id": "ChatOutput-u9cPC",
- "inputTypes": [
- "Message"
- ],
- "type": "str"
- },
- "sourceHandle": {
- "dataType": "Secret",
- "id": "Secret-zIbKs",
- "name": "text",
- "output_types": [
- "Message"
- ]
- }
- },
- "id": "reactflow__edge-Secret-zIbKs{œdataTypeœ:œSecretœ,œidœ:œSecret-zIbKsœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-u9cPC{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-u9cPCœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
- "animated": false,
- "className": ""
- }
- ],
- "viewport": {
- "x": 11.839003462770279,
- "y": -83.83942756687532,
- "zoom": 1.0894902752636453
- }
- },
- "description": "Engineered for Excellence, Built for Business.",
- "name": "env_variable_test",
- "last_tested_version": "1.0.18",
- "endpoint_name": "env_variable_test",
- "is_component": false
-}
\ No newline at end of file
diff --git a/src/lfx/tests/data/grouped_chat.json b/src/lfx/tests/data/grouped_chat.json
deleted file mode 100644
index 79a01a1195b5..000000000000
--- a/src/lfx/tests/data/grouped_chat.json
+++ /dev/null
@@ -1 +0,0 @@
-{"description":"A simple chat with a custom prompt template and conversational memory buffer","name":"GroupTest","data":{"nodes":[{"width":384,"height":621,"id":"ChatOpenAI-rUJ1b","type":"genericNode","position":{"x":170.87326389541306,"y":465.8628482073749},"data":{"type":"ChatOpenAI","node":{"template":{"callbacks":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"callbacks","advanced":false,"dynamic":false,"info":"","type":"langchain.callbacks.base.BaseCallbackHandler","list":true},"cache":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"cache","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"client":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"client","advanced":false,"dynamic":false,"info":"","type":"Any","list":false},"max_retries":{"required":false,"placeholder":"","show":false,"multiline":false,"value":6,"password":false,"name":"max_retries","advanced":false,"dynamic":false,"info":"","type":"int","list":false},"max_tokens":{"required":false,"placeholder":"","show":true,"multiline":false,"password":true,"name":"max_tokens","advanced":false,"dynamic":false,"info":"","type":"int","list":false,"value":""},"metadata":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"metadata","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"model_kwargs":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"model_kwargs","advanced":true,"dynamic":false,"info":"","type":"code","list":false},"model_name":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"gpt-3.5-turbo","password":false,"options":["gpt-3.5-turbo-0613","gpt-3.5-turbo","gpt-3.5-turbo-16k-0613","gpt-3.5-turbo-16k","gpt-4-0613","gpt-4-32k-0613","gpt-4","gpt-4-32k"],"name":"model_name","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"n":{"required":false,"placeholder":"","show":false,"multiline":false,"value":1,"password":false,"name":"n","advanced":false,"dynamic":false,"info":"","type":"int","list":false},"openai_api_base":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"openai_api_base","display_name":"OpenAI API Base","advanced":false,"dynamic":false,"info":"\nThe base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\n","type":"str","list":false},"openai_api_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":true,"name":"openai_api_key","display_name":"OpenAI API Key","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"openai_organization":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"openai_organization","display_name":"OpenAI Organization","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"openai_proxy":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"openai_proxy","display_name":"OpenAI Proxy","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"request_timeout":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"request_timeout","advanced":false,"dynamic":false,"info":"","type":"float","list":false,"value":60},"streaming":{"required":false,"placeholder":"","show":false,"multiline":false,"value":false,"password":false,"name":"streaming","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"tags":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tags","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"temperature":{"required":false,"placeholder":"","show":true,"multiline":false,"value":0.7,"password":false,"name":"temperature","advanced":false,"dynamic":false,"info":"","type":"float","list":false},"tiktoken_model_name":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tiktoken_model_name","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"verbose":{"required":false,"placeholder":"","show":false,"multiline":false,"value":false,"password":false,"name":"verbose","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"_type":"ChatOpenAI"},"description":"`OpenAI` Chat large language models API.","base_classes":["ChatOpenAI","BaseChatModel","BaseLanguageModel","BaseLLM"],"display_name":"ChatOpenAI","documentation":"https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai"},"id":"ChatOpenAI-rUJ1b","value":null},"selected":false,"dragging":false,"positionAbsolute":{"x":170.87326389541306,"y":465.8628482073749}},{"width":384,"height":445,"id":"PromptTemplate-Wjk4g","type":"genericNode","position":{"x":190.53285757241179,"y":6.073885727980169},"data":{"type":"PromptTemplate","node":{"template":{"output_parser":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"output_parser","advanced":false,"dynamic":true,"info":"","type":"BaseOutputParser","list":false},"input_variables":{"required":true,"placeholder":"","show":false,"multiline":false,"password":false,"name":"input_variables","advanced":false,"dynamic":true,"info":"","type":"str","list":true,"value":["history","text"]},"partial_variables":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"partial_variables","advanced":false,"dynamic":true,"info":"","type":"code","list":false},"template":{"required":true,"placeholder":"","show":true,"multiline":true,"password":false,"name":"template","advanced":false,"dynamic":true,"info":"","type":"prompt","list":false,"value":"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\n{history}\nHuman: {text}\nAI:"},"template_format":{"required":false,"placeholder":"","show":false,"multiline":false,"value":"f-string","password":false,"name":"template_format","advanced":false,"dynamic":true,"info":"","type":"str","list":false},"validate_template":{"required":false,"placeholder":"","show":false,"multiline":false,"value":true,"password":false,"name":"validate_template","advanced":false,"dynamic":true,"info":"","type":"bool","list":false},"_type":"PromptTemplate","history":{"required":false,"placeholder":"","show":true,"multiline":true,"value":"","password":false,"name":"history","display_name":"history","advanced":false,"input_types":["Document","BaseOutputParser"],"dynamic":false,"info":"","type":"str","list":false},"text":{"required":false,"placeholder":"","show":true,"multiline":true,"value":"","password":false,"name":"text","display_name":"text","advanced":false,"input_types":["Document","BaseOutputParser"],"dynamic":false,"info":"","type":"str","list":false}},"description":"A prompt template for a language model.","base_classes":["BasePromptTemplate","PromptTemplate","StringPromptTemplate"],"name":"","display_name":"PromptTemplate","documentation":"https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/","custom_fields":{"":["history","text"],"template":["history","text"]},"output_types":[],"field_formatters":{"formatters":{"openai_api_key":{}},"base_formatters":{"kwargs":{},"optional":{},"list":{},"dict":{},"union":{},"multiline":{},"show":{},"password":{},"default":{},"headers":{},"dict_code_file":{},"model_fields":{"MODEL_DICT":{"OpenAI":["text-davinci-003","text-davinci-002","text-curie-001","text-babbage-001","text-ada-001"],"ChatOpenAI":["gpt-3.5-turbo-0613","gpt-3.5-turbo","gpt-3.5-turbo-16k-0613","gpt-3.5-turbo-16k","gpt-4-0613","gpt-4-32k-0613","gpt-4","gpt-4-32k"],"Anthropic":["claude-v1","claude-v1-100k","claude-instant-v1","claude-instant-v1-100k","claude-v1.3","claude-v1.3-100k","claude-v1.2","claude-v1.0","claude-instant-v1.1","claude-instant-v1.1-100k","claude-instant-v1.0"],"ChatAnthropic":["claude-v1","claude-v1-100k","claude-instant-v1","claude-instant-v1-100k","claude-v1.3","claude-v1.3-100k","claude-v1.2","claude-v1.0","claude-instant-v1.1","claude-instant-v1.1-100k","claude-instant-v1.0"]}}}},"beta":false,"error":null},"id":"PromptTemplate-Wjk4g"},"selected":false,"positionAbsolute":{"x":190.53285757241179,"y":6.073885727980169},"dragging":false},{"width":384,"height":307,"data":{"id":"LLMChain-pimAb","type":"LLMChain","node":{"display_name":"group Node","documentation":"","base_classes":["Chain","LLMChain","function"],"description":"double click to edit description","template":{"llm_LLMChain-2P369":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"llm","advanced":false,"dynamic":false,"info":"","type":"BaseLanguageModel","list":false,"proxy":{"id":"LLMChain-2P369","field":"llm"},"display_name":"LLM - LLMChain"},"prompt_LLMChain-2P369":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"prompt","advanced":false,"dynamic":false,"info":"","type":"BasePromptTemplate","list":false,"proxy":{"id":"LLMChain-2P369","field":"prompt"},"display_name":"Prompt - LLMChain"},"output_key_LLMChain-2P369":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"text","password":false,"name":"output_key","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"LLMChain-2P369","field":"output_key"},"display_name":"Output Key - LLMChain"},"chat_memory_ConversationBufferMemory-kUMif":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"chat_memory","advanced":false,"dynamic":false,"info":"","type":"BaseChatMessageHistory","list":false,"proxy":{"id":"ConversationBufferMemory-kUMif","field":"chat_memory"},"display_name":"Chat Memory - ConversationBuf..."},"input_key_ConversationBufferMemory-kUMif":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":false,"name":"input_key","advanced":true,"dynamic":false,"info":"The variable to be used as Chat Input when more than one variable is available.","type":"str","list":false,"proxy":{"id":"ConversationBufferMemory-kUMif","field":"input_key"},"display_name":"Input Key - ConversationBuf..."},"memory_key_ConversationBufferMemory-kUMif":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"history","password":false,"name":"memory_key","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"ConversationBufferMemory-kUMif","field":"memory_key"},"display_name":"Memory Key - ConversationBuf..."},"output_key_ConversationBufferMemory-kUMif":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":false,"name":"output_key","advanced":true,"dynamic":false,"info":"The variable to be used as Chat Output (e.g. answer in a ConversationalRetrievalChain)","type":"str","list":false,"proxy":{"id":"ConversationBufferMemory-kUMif","field":"output_key"},"display_name":"Output Key - ConversationBuf..."},"return_messages_ConversationBufferMemory-kUMif":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"return_messages","advanced":true,"dynamic":false,"info":"","type":"bool","list":false,"proxy":{"id":"ConversationBufferMemory-kUMif","field":"return_messages"},"display_name":"Return Messages - ConversationBuf..."}},"flow":{"data":{"nodes":[{"width":384,"height":307,"id":"LLMChain-2P369","type":"genericNode","position":{"x":1250.1806448178158,"y":588.4657451068704},"data":{"type":"LLMChain","node":{"template":{"callbacks":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"callbacks","advanced":false,"dynamic":false,"info":"","type":"langchain.callbacks.base.BaseCallbackHandler","list":true},"llm":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"llm","advanced":false,"dynamic":false,"info":"","type":"BaseLanguageModel","list":false},"memory":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"memory","advanced":false,"dynamic":false,"info":"","type":"BaseMemory","list":false},"output_parser":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"output_parser","advanced":false,"dynamic":false,"info":"","type":"BaseLLMOutputParser","list":false},"prompt":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"prompt","advanced":false,"dynamic":false,"info":"","type":"BasePromptTemplate","list":false},"llm_kwargs":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"llm_kwargs","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"metadata":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"metadata","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"output_key":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"text","password":false,"name":"output_key","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"return_final_only":{"required":false,"placeholder":"","show":false,"multiline":false,"value":true,"password":false,"name":"return_final_only","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"tags":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tags","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"verbose":{"required":false,"placeholder":"","show":false,"multiline":false,"value":false,"password":false,"name":"verbose","advanced":true,"dynamic":false,"info":"","type":"bool","list":false},"_type":"LLMChain"},"description":"Chain to run queries against LLMs.","base_classes":["Chain","LLMChain","function"],"display_name":"LLMChain","documentation":"https://python.langchain.com/docs/modules/chains/foundational/llm_chain"},"id":"LLMChain-2P369","value":null},"selected":true,"positionAbsolute":{"x":1250.1806448178158,"y":588.4657451068704},"dragging":false},{"width":384,"height":561,"id":"ConversationBufferMemory-kUMif","type":"genericNode","position":{"x":802.1806448178158,"y":43.265745106870426},"data":{"type":"ConversationBufferMemory","node":{"template":{"chat_memory":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"chat_memory","advanced":false,"dynamic":false,"info":"","type":"BaseChatMessageHistory","list":false},"ai_prefix":{"required":false,"placeholder":"","show":false,"multiline":false,"value":"AI","password":false,"name":"ai_prefix","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"human_prefix":{"required":false,"placeholder":"","show":false,"multiline":false,"value":"Human","password":false,"name":"human_prefix","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"input_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":false,"name":"input_key","advanced":false,"dynamic":false,"info":"The variable to be used as Chat Input when more than one variable is available.","type":"str","list":false},"memory_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"history","password":false,"name":"memory_key","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"output_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":false,"name":"output_key","advanced":false,"dynamic":false,"info":"The variable to be used as Chat Output (e.g. answer in a ConversationalRetrievalChain)","type":"str","list":false},"return_messages":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"return_messages","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"_type":"ConversationBufferMemory"},"description":"Buffer for storing conversation memory.","base_classes":["ConversationBufferMemory","BaseMemory","BaseChatMemory"],"display_name":"ConversationBufferMemory","documentation":"https://python.langchain.com/docs/modules/memory/how_to/buffer"},"id":"ConversationBufferMemory-kUMif","value":null},"selected":true,"positionAbsolute":{"x":802.1806448178158,"y":43.265745106870426},"dragging":false}],"edges":[{"source":"ConversationBufferMemory-kUMif","sourceHandle":"{œbaseClassesœ:[œConversationBufferMemoryœ,œBaseMemoryœ,œBaseChatMemoryœ],œdataTypeœ:œConversationBufferMemoryœ,œidœ:œConversationBufferMemory-kUMifœ}","target":"LLMChain-2P369","targetHandle":"{œfieldNameœ:œmemoryœ,œidœ:œLLMChain-2P369œ,œinputTypesœ:null,œtypeœ:œBaseMemoryœ}","className":"stroke-gray-900 stroke-connection","id":"reactflow__edge-ConversationBufferMemory-kUMif{œbaseClassesœ:[œConversationBufferMemoryœ,œBaseMemoryœ,œBaseChatMemoryœ],œdataTypeœ:œConversationBufferMemoryœ,œidœ:œConversationBufferMemory-kUMifœ}-LLMChain-2P369{œfieldNameœ:œmemoryœ,œidœ:œLLMChain-2P369œ,œinputTypesœ:null,œtypeœ:œBaseMemoryœ}","animated":false,"style":{"stroke":"#555"},"data":{"sourceHandle":{"baseClasses":["ConversationBufferMemory","BaseMemory","BaseChatMemory"],"dataType":"ConversationBufferMemory","id":"ConversationBufferMemory-kUMif"},"targetHandle":{"fieldName":"memory","id":"LLMChain-2P369","inputTypes":null,"type":"BaseMemory"}},"selected":true}],"viewport":{"x":169.1802019559105,"y":186.01151115352206,"zoom":0.5224749517346055}},"name":"Pensive Franklin","description":"","id":"3Sb9U"}}},"id":"LLMChain-pimAb","position":{"x":775.4509216701647,"y":315.8657451068704},"type":"genericNode","selected":false,"positionAbsolute":{"x":775.4509216701647,"y":315.8657451068704},"dragging":false}],"edges":[{"source":"PromptTemplate-Wjk4g","sourceHandle":"{œbaseClassesœ:[œBasePromptTemplateœ,œPromptTemplateœ,œStringPromptTemplateœ],œdataTypeœ:œPromptTemplateœ,œidœ:œPromptTemplate-Wjk4gœ}","target":"LLMChain-pimAb","targetHandle":"{œfieldNameœ:œprompt_LLMChain-2P369œ,œidœ:œLLMChain-pimAbœ,œinputTypesœ:null,œproxyœ:{œfieldœ:œpromptœ,œidœ:œLLMChain-2P369œ},œtypeœ:œBasePromptTemplateœ}","data":{"targetHandle":{"fieldName":"prompt_LLMChain-2P369","id":"LLMChain-pimAb","inputTypes":null,"proxy":{"field":"prompt","id":"LLMChain-2P369"},"type":"BasePromptTemplate"},"sourceHandle":{"baseClasses":["BasePromptTemplate","PromptTemplate","StringPromptTemplate"],"dataType":"PromptTemplate","id":"PromptTemplate-Wjk4g"}},"style":{"stroke":"#555"},"className":"stroke-foreground stroke-connection","animated":false,"id":"reactflow__edge-PromptTemplate-Wjk4g{œbaseClassesœ:[œBasePromptTemplateœ,œPromptTemplateœ,œStringPromptTemplateœ],œdataTypeœ:œPromptTemplateœ,œidœ:œPromptTemplate-Wjk4gœ}-LLMChain-pimAb{œfieldNameœ:œprompt_LLMChain-2P369œ,œidœ:œLLMChain-pimAbœ,œinputTypesœ:null,œproxyœ:{œfieldœ:œpromptœ,œidœ:œLLMChain-2P369œ},œtypeœ:œBasePromptTemplateœ}"},{"source":"ChatOpenAI-rUJ1b","sourceHandle":"{œbaseClassesœ:[œChatOpenAIœ,œBaseChatModelœ,œBaseLanguageModelœ,œBaseLLMœ],œdataTypeœ:œChatOpenAIœ,œidœ:œChatOpenAI-rUJ1bœ}","target":"LLMChain-pimAb","targetHandle":"{œfieldNameœ:œllm_LLMChain-2P369œ,œidœ:œLLMChain-pimAbœ,œinputTypesœ:null,œproxyœ:{œfieldœ:œllmœ,œidœ:œLLMChain-2P369œ},œtypeœ:œBaseLanguageModelœ}","data":{"targetHandle":{"fieldName":"llm_LLMChain-2P369","id":"LLMChain-pimAb","inputTypes":null,"proxy":{"field":"llm","id":"LLMChain-2P369"},"type":"BaseLanguageModel"},"sourceHandle":{"baseClasses":["ChatOpenAI","BaseChatModel","BaseLanguageModel","BaseLLM"],"dataType":"ChatOpenAI","id":"ChatOpenAI-rUJ1b"}},"style":{"stroke":"#555"},"className":"stroke-foreground stroke-connection","animated":false,"id":"reactflow__edge-ChatOpenAI-rUJ1b{œbaseClassesœ:[œChatOpenAIœ,œBaseChatModelœ,œBaseLanguageModelœ,œBaseLLMœ],œdataTypeœ:œChatOpenAIœ,œidœ:œChatOpenAI-rUJ1bœ}-LLMChain-pimAb{œfieldNameœ:œllm_LLMChain-2P369œ,œidœ:œLLMChain-pimAbœ,œinputTypesœ:null,œproxyœ:{œfieldœ:œllmœ,œidœ:œLLMChain-2P369œ},œtypeœ:œBaseLanguageModelœ}"}],"viewport":{"x":169.18020195591043,"y":186.01151115352206,"zoom":0.5224749517346055}},"id":"6a498bfb-bdb4-40f8-9ac5-30c6afcb2d53"}
\ No newline at end of file
diff --git a/src/lfx/tests/data/one_group_chat.json b/src/lfx/tests/data/one_group_chat.json
deleted file mode 100644
index 31b2df84e6a9..000000000000
--- a/src/lfx/tests/data/one_group_chat.json
+++ /dev/null
@@ -1,1302 +0,0 @@
-{
- "description": "A simple chat with a custom prompt template and conversational memory buffer",
- "name": "One Group",
- "data": {
- "nodes": [
- {
- "width": 384,
- "height": 485,
- "data": {
- "id": "LLMChain-7wD4b",
- "type": "LLMChain",
- "node": {
- "display_name": "group Node",
- "documentation": "",
- "base_classes": [
- "Chain",
- "LLMChain",
- "function"
- ],
- "description": "double click to edit description",
- "template": {
- "max_tokens_ChatOpenAI-WlIXw": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": true,
- "name": "max_tokens",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false,
- "proxy": {
- "id": "ChatOpenAI-WlIXw",
- "field": "max_tokens"
- },
- "display_name": "Max Tokens - ChatOpenAI",
- "value": ""
- },
- "model_kwargs_ChatOpenAI-WlIXw": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "model_kwargs",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false,
- "proxy": {
- "id": "ChatOpenAI-WlIXw",
- "field": "model_kwargs"
- },
- "display_name": "Model Kwargs - ChatOpenAI"
- },
- "model_name_ChatOpenAI-WlIXw": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "gpt-3.5-turbo",
- "password": false,
- "options": [
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k-0613",
- "gpt-3.5-turbo-16k",
- "gpt-4-0613",
- "gpt-4-32k-0613",
- "gpt-4",
- "gpt-4-32k"
- ],
- "name": "model_name",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true,
- "proxy": {
- "id": "ChatOpenAI-WlIXw",
- "field": "model_name"
- },
- "display_name": "Model Name - ChatOpenAI"
- },
- "openai_api_base_ChatOpenAI-WlIXw": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "openai_api_base",
- "display_name": "OpenAI API Base - ChatOpenAI",
- "advanced": true,
- "dynamic": false,
- "info": "\nThe base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\n",
- "type": "str",
- "list": false,
- "proxy": {
- "id": "ChatOpenAI-WlIXw",
- "field": "openai_api_base"
- }
- },
- "openai_api_key_ChatOpenAI-WlIXw": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "test",
- "password": true,
- "name": "openai_api_key",
- "display_name": "OpenAI API Key - ChatOpenAI",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false,
- "proxy": {
- "id": "ChatOpenAI-WlIXw",
- "field": "openai_api_key"
- }
- },
- "temperature_ChatOpenAI-WlIXw": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": 0.7,
- "password": false,
- "name": "temperature",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "float",
- "list": false,
- "proxy": {
- "id": "ChatOpenAI-WlIXw",
- "field": "temperature"
- },
- "display_name": "Temperature - ChatOpenAI"
- },
- "output_key_LLMChain-qaGdJ": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "text",
- "password": false,
- "name": "output_key",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false,
- "proxy": {
- "id": "LLMChain-qaGdJ",
- "field": "output_key"
- },
- "display_name": "Output Key - LLMChain"
- },
- "chat_memory_ConversationBufferMemory-WkJkh": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "chat_memory",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseChatMessageHistory",
- "list": false,
- "proxy": {
- "id": "ConversationBufferMemory-WkJkh",
- "field": "chat_memory"
- },
- "display_name": "Chat Memory - ConversationBuf..."
- },
- "input_key_ConversationBufferMemory-WkJkh": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "",
- "password": false,
- "name": "input_key",
- "advanced": true,
- "dynamic": false,
- "info": "The variable to be used as Chat Input when more than one variable is available.",
- "type": "str",
- "list": false,
- "proxy": {
- "id": "ConversationBufferMemory-WkJkh",
- "field": "input_key"
- },
- "display_name": "Input Key - ConversationBuf..."
- },
- "memory_key_ConversationBufferMemory-WkJkh": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "history",
- "password": false,
- "name": "memory_key",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false,
- "proxy": {
- "id": "ConversationBufferMemory-WkJkh",
- "field": "memory_key"
- },
- "display_name": "Memory Key - ConversationBuf..."
- },
- "output_key_ConversationBufferMemory-WkJkh": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "",
- "password": false,
- "name": "output_key",
- "advanced": true,
- "dynamic": false,
- "info": "The variable to be used as Chat Output (e.g. answer in a ConversationalRetrievalChain)",
- "type": "str",
- "list": false,
- "proxy": {
- "id": "ConversationBufferMemory-WkJkh",
- "field": "output_key"
- },
- "display_name": "Output Key - ConversationBuf..."
- },
- "return_messages_ConversationBufferMemory-WkJkh": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "return_messages",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false,
- "proxy": {
- "id": "ConversationBufferMemory-WkJkh",
- "field": "return_messages"
- },
- "display_name": "Return Messages - ConversationBuf..."
- },
- "template_PromptTemplate-h1IlH": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "password": false,
- "name": "template",
- "advanced": false,
- "dynamic": true,
- "info": "",
- "type": "prompt",
- "list": false,
- "value": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\n{history}\nHuman: {text}\nAI:",
- "proxy": {
- "id": "PromptTemplate-h1IlH",
- "field": "template"
- },
- "display_name": "Template - PromptTemplate"
- },
- "history_PromptTemplate-h1IlH": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "value": "",
- "password": false,
- "name": "history",
- "display_name": "history - PromptTemplate",
- "advanced": false,
- "input_types": [
- "Document",
- "BaseOutputParser"
- ],
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false,
- "proxy": {
- "id": "PromptTemplate-h1IlH",
- "field": "history"
- }
- },
- "text_PromptTemplate-h1IlH": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "value": "",
- "password": false,
- "name": "text",
- "display_name": "text - PromptTemplate",
- "advanced": false,
- "input_types": [
- "Document",
- "BaseOutputParser"
- ],
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false,
- "proxy": {
- "id": "PromptTemplate-h1IlH",
- "field": "text"
- }
- }
- },
- "flow": {
- "data": {
- "nodes": [
- {
- "width": 384,
- "height": 621,
- "id": "ChatOpenAI-WlIXw",
- "type": "genericNode",
- "position": {
- "x": 170.87326389541306,
- "y": 465.8628482073749
- },
- "data": {
- "type": "ChatOpenAI",
- "node": {
- "template": {
- "callbacks": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "callbacks",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "langchain.callbacks.base.BaseCallbackHandler",
- "list": true
- },
- "cache": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "cache",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "client": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "client",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "Any",
- "list": false
- },
- "max_retries": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 6,
- "password": false,
- "name": "max_retries",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "max_tokens": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": true,
- "name": "max_tokens",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "metadata": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "metadata",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "model_kwargs": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "model_kwargs",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "model_name": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "gpt-3.5-turbo",
- "password": false,
- "options": [
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k-0613",
- "gpt-3.5-turbo-16k",
- "gpt-4-0613",
- "gpt-4-32k-0613",
- "gpt-4",
- "gpt-4-32k"
- ],
- "name": "model_name",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "n": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": 1,
- "password": false,
- "name": "n",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "int",
- "list": false
- },
- "openai_api_base": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "openai_api_base",
- "display_name": "OpenAI API Base",
- "advanced": false,
- "dynamic": false,
- "info": "\nThe base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\n",
- "type": "str",
- "list": false
- },
- "openai_api_key": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "",
- "password": true,
- "name": "openai_api_key",
- "display_name": "OpenAI API Key",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "openai_organization": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "openai_organization",
- "display_name": "OpenAI Organization",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "openai_proxy": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "openai_proxy",
- "display_name": "OpenAI Proxy",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "request_timeout": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "request_timeout",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "float",
- "list": false,
- "value": 60
- },
- "streaming": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "streaming",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "tags": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "tags",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "temperature": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": 0.7,
- "password": false,
- "name": "temperature",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "float",
- "list": false
- },
- "tiktoken_model_name": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "tiktoken_model_name",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "verbose": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "verbose",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "_type": "ChatOpenAI"
- },
- "description": "`OpenAI` Chat large language models API.",
- "base_classes": [
- "ChatOpenAI",
- "BaseLanguageModel",
- "BaseChatModel",
- "BaseLLM"
- ],
- "display_name": "ChatOpenAI",
- "documentation": "https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai"
- },
- "id": "ChatOpenAI-WlIXw",
- "value": null
- },
- "selected": true,
- "dragging": false,
- "positionAbsolute": {
- "x": 170.87326389541306,
- "y": 465.8628482073749
- }
- },
- {
- "width": 384,
- "height": 307,
- "id": "LLMChain-qaGdJ",
- "type": "genericNode",
- "position": {
- "x": 1250.1806448178158,
- "y": 588.4657451068704
- },
- "data": {
- "type": "LLMChain",
- "node": {
- "template": {
- "callbacks": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "callbacks",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "langchain.callbacks.base.BaseCallbackHandler",
- "list": true
- },
- "llm": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "llm",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseLanguageModel",
- "list": false
- },
- "memory": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "memory",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseMemory",
- "list": false
- },
- "output_parser": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "output_parser",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseLLMOutputParser",
- "list": false
- },
- "prompt": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "prompt",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BasePromptTemplate",
- "list": false
- },
- "llm_kwargs": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "llm_kwargs",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "metadata": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "metadata",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "code",
- "list": false
- },
- "output_key": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "text",
- "password": false,
- "name": "output_key",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "return_final_only": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": true,
- "password": false,
- "name": "return_final_only",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "tags": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "tags",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": true
- },
- "verbose": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": false,
- "password": false,
- "name": "verbose",
- "advanced": true,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "_type": "LLMChain"
- },
- "description": "Chain to run queries against LLMs.",
- "base_classes": [
- "Chain",
- "LLMChain",
- "function"
- ],
- "display_name": "LLMChain",
- "documentation": "https://python.langchain.com/docs/modules/chains/foundational/llm_chain"
- },
- "id": "LLMChain-qaGdJ",
- "value": null
- },
- "selected": true,
- "positionAbsolute": {
- "x": 1250.1806448178158,
- "y": 588.4657451068704
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 561,
- "id": "ConversationBufferMemory-WkJkh",
- "type": "genericNode",
- "position": {
- "x": 802.1806448178158,
- "y": 43.265745106870426
- },
- "data": {
- "type": "ConversationBufferMemory",
- "node": {
- "template": {
- "chat_memory": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "chat_memory",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "BaseChatMessageHistory",
- "list": false
- },
- "ai_prefix": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": "AI",
- "password": false,
- "name": "ai_prefix",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "human_prefix": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": "Human",
- "password": false,
- "name": "human_prefix",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "input_key": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "",
- "password": false,
- "name": "input_key",
- "advanced": false,
- "dynamic": false,
- "info": "The variable to be used as Chat Input when more than one variable is available.",
- "type": "str",
- "list": false
- },
- "memory_key": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "history",
- "password": false,
- "name": "memory_key",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "output_key": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "value": "",
- "password": false,
- "name": "output_key",
- "advanced": false,
- "dynamic": false,
- "info": "The variable to be used as Chat Output (e.g. answer in a ConversationalRetrievalChain)",
- "type": "str",
- "list": false
- },
- "return_messages": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": false,
- "password": false,
- "name": "return_messages",
- "advanced": false,
- "dynamic": false,
- "info": "",
- "type": "bool",
- "list": false
- },
- "_type": "ConversationBufferMemory"
- },
- "description": "Buffer for storing conversation memory.",
- "base_classes": [
- "BaseChatMemory",
- "BaseMemory",
- "ConversationBufferMemory"
- ],
- "display_name": "ConversationBufferMemory",
- "documentation": "https://python.langchain.com/docs/modules/memory/how_to/buffer"
- },
- "id": "ConversationBufferMemory-WkJkh",
- "value": null
- },
- "selected": true,
- "positionAbsolute": {
- "x": 802.1806448178158,
- "y": 43.265745106870426
- },
- "dragging": false
- },
- {
- "width": 384,
- "height": 445,
- "id": "PromptTemplate-h1IlH",
- "type": "genericNode",
- "position": {
- "x": 190.53285757241179,
- "y": 6.073885727980169
- },
- "data": {
- "type": "PromptTemplate",
- "node": {
- "template": {
- "output_parser": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "output_parser",
- "advanced": false,
- "dynamic": true,
- "info": "",
- "type": "BaseOutputParser",
- "list": false
- },
- "input_variables": {
- "required": true,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "input_variables",
- "advanced": false,
- "dynamic": true,
- "info": "",
- "type": "str",
- "list": true,
- "value": [
- "history",
- "text"
- ]
- },
- "partial_variables": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "password": false,
- "name": "partial_variables",
- "advanced": false,
- "dynamic": true,
- "info": "",
- "type": "code",
- "list": false
- },
- "template": {
- "required": true,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "password": false,
- "name": "template",
- "advanced": false,
- "dynamic": true,
- "info": "",
- "type": "prompt",
- "list": false,
- "value": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\n{history}\nHuman: {text}\nAI:"
- },
- "template_format": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": "f-string",
- "password": false,
- "name": "template_format",
- "advanced": false,
- "dynamic": true,
- "info": "",
- "type": "str",
- "list": false
- },
- "validate_template": {
- "required": false,
- "placeholder": "",
- "show": false,
- "multiline": false,
- "value": true,
- "password": false,
- "name": "validate_template",
- "advanced": false,
- "dynamic": true,
- "info": "",
- "type": "bool",
- "list": false
- },
- "_type": "PromptTemplate",
- "history": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "value": "",
- "password": false,
- "name": "history",
- "display_name": "history",
- "advanced": false,
- "input_types": [
- "Document",
- "BaseOutputParser"
- ],
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- },
- "text": {
- "required": false,
- "placeholder": "",
- "show": true,
- "multiline": true,
- "value": "",
- "password": false,
- "name": "text",
- "display_name": "text",
- "advanced": false,
- "input_types": [
- "Document",
- "BaseOutputParser"
- ],
- "dynamic": false,
- "info": "",
- "type": "str",
- "list": false
- }
- },
- "description": "A prompt template for a language model.",
- "base_classes": [
- "BasePromptTemplate",
- "PromptTemplate",
- "StringPromptTemplate"
- ],
- "name": "",
- "display_name": "PromptTemplate",
- "documentation": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/",
- "custom_fields": {
- "": [
- "history",
- "text"
- ],
- "template": [
- "history",
- "text"
- ]
- },
- "output_types": [],
- "field_formatters": {
- "formatters": {
- "openai_api_key": {}
- },
- "base_formatters": {
- "kwargs": {},
- "optional": {},
- "list": {},
- "dict": {},
- "union": {},
- "multiline": {},
- "show": {},
- "password": {},
- "default": {},
- "headers": {},
- "dict_code_file": {},
- "model_fields": {
- "MODEL_DICT": {
- "OpenAI": [
- "text-davinci-003",
- "text-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001"
- ],
- "ChatOpenAI": [
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k-0613",
- "gpt-3.5-turbo-16k",
- "gpt-4-0613",
- "gpt-4-32k-0613",
- "gpt-4",
- "gpt-4-32k"
- ],
- "Anthropic": [
- "claude-v1",
- "claude-v1-100k",
- "claude-instant-v1",
- "claude-instant-v1-100k",
- "claude-v1.3",
- "claude-v1.3-100k",
- "claude-v1.2",
- "claude-v1.0",
- "claude-instant-v1.1",
- "claude-instant-v1.1-100k",
- "claude-instant-v1.0"
- ],
- "ChatAnthropic": [
- "claude-v1",
- "claude-v1-100k",
- "claude-instant-v1",
- "claude-instant-v1-100k",
- "claude-v1.3",
- "claude-v1.3-100k",
- "claude-v1.2",
- "claude-v1.0",
- "claude-instant-v1.1",
- "claude-instant-v1.1-100k",
- "claude-instant-v1.0"
- ]
- }
- }
- }
- },
- "beta": false,
- "error": null
- },
- "id": "PromptTemplate-h1IlH"
- },
- "selected": true,
- "positionAbsolute": {
- "x": 190.53285757241179,
- "y": 6.073885727980169
- },
- "dragging": false
- }
- ],
- "edges": [
- {
- "source": "ChatOpenAI-WlIXw",
- "sourceHandle": "{œbaseClassesœ:[œChatOpenAIœ,œBaseLanguageModelœ,œBaseChatModelœ,œBaseLLMœ],œdataTypeœ:œChatOpenAIœ,œidœ:œChatOpenAI-WlIXwœ}",
- "target": "LLMChain-qaGdJ",
- "targetHandle": "{œfieldNameœ:œllmœ,œidœ:œLLMChain-qaGdJœ,œinputTypesœ:null,œtypeœ:œBaseLanguageModelœ}",
- "className": "stroke-gray-900 stroke-connection",
- "id": "reactflow__edge-ChatOpenAI-WlIXw{œbaseClassesœ:[œChatOpenAIœ,œBaseLanguageModelœ,œBaseChatModelœ,œBaseLLMœ],œdataTypeœ:œChatOpenAIœ,œidœ:œChatOpenAI-WlIXwœ}-LLMChain-qaGdJ{œfieldNameœ:œllmœ,œidœ:œLLMChain-qaGdJœ,œinputTypesœ:null,œtypeœ:œBaseLanguageModelœ}",
- "selected": true,
- "animated": false,
- "style": {
- "stroke": "#555"
- },
- "data": {
- "sourceHandle": {
- "baseClasses": [
- "ChatOpenAI",
- "BaseLanguageModel",
- "BaseChatModel",
- "BaseLLM"
- ],
- "dataType": "ChatOpenAI",
- "id": "ChatOpenAI-WlIXw"
- },
- "targetHandle": {
- "fieldName": "llm",
- "id": "LLMChain-qaGdJ",
- "inputTypes": null,
- "type": "BaseLanguageModel"
- }
- }
- },
- {
- "source": "ConversationBufferMemory-WkJkh",
- "sourceHandle": "{œbaseClassesœ:[œBaseChatMemoryœ,œBaseMemoryœ,œConversationBufferMemoryœ],œdataTypeœ:œConversationBufferMemoryœ,œidœ:œConversationBufferMemory-WkJkhœ}",
- "target": "LLMChain-qaGdJ",
- "targetHandle": "{œfieldNameœ:œmemoryœ,œidœ:œLLMChain-qaGdJœ,œinputTypesœ:null,œtypeœ:œBaseMemoryœ}",
- "className": "stroke-gray-900 stroke-connection",
- "id": "reactflow__edge-ConversationBufferMemory-WkJkh{œbaseClassesœ:[œBaseChatMemoryœ,œBaseMemoryœ,œConversationBufferMemoryœ],œdataTypeœ:œConversationBufferMemoryœ,œidœ:œConversationBufferMemory-WkJkhœ}-LLMChain-qaGdJ{œfieldNameœ:œmemoryœ,œidœ:œLLMChain-qaGdJœ,œinputTypesœ:null,œtypeœ:œBaseMemoryœ}",
- "animated": false,
- "style": {
- "stroke": "#555"
- },
- "data": {
- "sourceHandle": {
- "baseClasses": [
- "BaseChatMemory",
- "BaseMemory",
- "ConversationBufferMemory"
- ],
- "dataType": "ConversationBufferMemory",
- "id": "ConversationBufferMemory-WkJkh"
- },
- "targetHandle": {
- "fieldName": "memory",
- "id": "LLMChain-qaGdJ",
- "inputTypes": null,
- "type": "BaseMemory"
- }
- },
- "selected": true
- },
- {
- "source": "PromptTemplate-h1IlH",
- "sourceHandle": "{œbaseClassesœ:[œBasePromptTemplateœ,œPromptTemplateœ,œStringPromptTemplateœ],œdataTypeœ:œPromptTemplateœ,œidœ:œPromptTemplate-h1IlHœ}",
- "target": "LLMChain-qaGdJ",
- "targetHandle": "{œfieldNameœ:œpromptœ,œidœ:œLLMChain-qaGdJœ,œinputTypesœ:null,œtypeœ:œBasePromptTemplateœ}",
- "style": {
- "stroke": "#555"
- },
- "className": "stroke-gray-900 stroke-connection",
- "animated": false,
- "id": "reactflow__edge-PromptTemplate-h1IlH{œbaseClassesœ:[œBasePromptTemplateœ,œPromptTemplateœ,œStringPromptTemplateœ],œdataTypeœ:œPromptTemplateœ,œidœ:œPromptTemplate-h1IlHœ}-LLMChain-qaGdJ{œfieldNameœ:œpromptœ,œidœ:œLLMChain-qaGdJœ,œinputTypesœ:null,œtypeœ:œBasePromptTemplateœ}",
- "data": {
- "sourceHandle": {
- "baseClasses": [
- "BasePromptTemplate",
- "PromptTemplate",
- "StringPromptTemplate"
- ],
- "dataType": "PromptTemplate",
- "id": "PromptTemplate-h1IlH"
- },
- "targetHandle": {
- "fieldName": "prompt",
- "id": "LLMChain-qaGdJ",
- "inputTypes": null,
- "type": "BasePromptTemplate"
- }
- },
- "selected": true
- }
- ],
- "viewport": {
- "x": 51.18733552370577,
- "y": 64.73969994910271,
- "zoom": 0.5175724661902371
- }
- },
- "name": "Peppy Cori",
- "description": "",
- "id": "fmpGP"
- }
- }
- },
- "id": "LLMChain-7wD4b",
- "position": {
- "x": 603.4418527758642,
- "y": 275.91705603727394
- },
- "type": "genericNode",
- "selected": true,
- "dragging": false,
- "positionAbsolute": {
- "x": 603.4418527758642,
- "y": 275.91705603727394
- }
- }
- ],
- "edges": [],
- "viewport": {
- "x": -248.3019876307386,
- "y": -114.01962984298234,
- "zoom": 0.8178072603465967
- }
- },
- "id": "70a5f5a3-53c8-4e1c-996c-d9c46ae40220"
-}
\ No newline at end of file
diff --git a/src/lfx/tests/data/vector_store_grouped.json b/src/lfx/tests/data/vector_store_grouped.json
deleted file mode 100644
index 37176fb98cec..000000000000
--- a/src/lfx/tests/data/vector_store_grouped.json
+++ /dev/null
@@ -1 +0,0 @@
-{"description":"An agent that can query a Vector Store.\nTry asking \"How do I upload examples to Langflow?\"\n\n\n\n","name":"Vector Store","data":{"nodes":[{"width":384,"height":267,"id":"VectorStoreAgent-xWgPj","type":"genericNode","position":{"x":2115.5183674856203,"y":-1277.6284872455249},"data":{"type":"VectorStoreAgent","node":{"template":{"llm":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"llm","display_name":"LLM","advanced":false,"dynamic":false,"info":"","type":"BaseLanguageModel","list":false},"vectorstoreinfo":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"vectorstoreinfo","display_name":"Vector Store Info","advanced":false,"dynamic":false,"info":"","type":"VectorStoreInfo","list":false},"_type":"vectorstore_agent"},"description":"Construct an agent from a Vector Store.","base_classes":["AgentExecutor"],"display_name":"VectorStoreAgent","documentation":""},"id":"VectorStoreAgent-xWgPj","value":null},"selected":false,"positionAbsolute":{"x":2115.5183674856203,"y":-1277.6284872455249},"dragging":false},{"width":384,"height":399,"id":"VectorStoreInfo-JbqfX","type":"genericNode","position":{"x":1553.2875394928135,"y":-1319.2113273706286},"data":{"type":"VectorStoreInfo","node":{"template":{"vectorstore":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"vectorstore","advanced":false,"dynamic":false,"info":"","type":"VectorStore","list":false},"description":{"required":true,"placeholder":"","show":true,"multiline":true,"password":false,"name":"description","advanced":false,"dynamic":false,"info":"","type":"str","list":false,"value":"Instructions to upload examples to Langflow Community Examples"},"name":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"name","advanced":false,"dynamic":false,"info":"","type":"str","list":false,"value":"UploadExamples"},"_type":"VectorStoreInfo"},"description":"Information about a VectorStore.","base_classes":["VectorStoreInfo"],"display_name":"VectorStoreInfo","documentation":""},"id":"VectorStoreInfo-JbqfX","value":null},"selected":false,"positionAbsolute":{"x":1553.2875394928135,"y":-1319.2113273706286},"dragging":false},{"width":384,"height":621,"id":"ChatOpenAI-sXmo2","type":"genericNode","position":{"x":1557.7805431884235,"y":-897.7091381330642},"data":{"type":"ChatOpenAI","node":{"template":{"callbacks":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"callbacks","advanced":false,"dynamic":false,"info":"","type":"langchain.callbacks.base.BaseCallbackHandler","list":true},"cache":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"cache","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"client":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"client","advanced":false,"dynamic":false,"info":"","type":"Any","list":false},"max_retries":{"required":false,"placeholder":"","show":false,"multiline":false,"value":6,"password":false,"name":"max_retries","advanced":false,"dynamic":false,"info":"","type":"int","list":false},"max_tokens":{"required":false,"placeholder":"","show":true,"multiline":false,"password":true,"name":"max_tokens","advanced":false,"dynamic":false,"info":"","type":"int","list":false,"value":""},"metadata":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"metadata","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"model_kwargs":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"model_kwargs","advanced":true,"dynamic":false,"info":"","type":"code","list":false},"model_name":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"gpt-3.5-turbo-0613","password":false,"options":["gpt-3.5-turbo-0613","gpt-3.5-turbo","gpt-3.5-turbo-16k-0613","gpt-3.5-turbo-16k","gpt-4-0613","gpt-4-32k-0613","gpt-4","gpt-4-32k"],"name":"model_name","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"n":{"required":false,"placeholder":"","show":false,"multiline":false,"value":1,"password":false,"name":"n","advanced":false,"dynamic":false,"info":"","type":"int","list":false},"openai_api_base":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"openai_api_base","display_name":"OpenAI API Base","advanced":false,"dynamic":false,"info":"\nThe base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\n","type":"str","list":false},"openai_api_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":true,"name":"openai_api_key","display_name":"OpenAI API Key","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"openai_organization":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"openai_organization","display_name":"OpenAI Organization","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"openai_proxy":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"openai_proxy","display_name":"OpenAI Proxy","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"request_timeout":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"request_timeout","advanced":false,"dynamic":false,"info":"","type":"float","list":false},"streaming":{"required":false,"placeholder":"","show":false,"multiline":false,"value":false,"password":false,"name":"streaming","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"tags":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tags","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"temperature":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"0.2","password":false,"name":"temperature","advanced":false,"dynamic":false,"info":"","type":"float","list":false},"tiktoken_model_name":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tiktoken_model_name","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"verbose":{"required":false,"placeholder":"","show":false,"multiline":false,"value":false,"password":false,"name":"verbose","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"_type":"ChatOpenAI"},"description":"`OpenAI` Chat large language models API.","base_classes":["ChatOpenAI","BaseLanguageModel","BaseChatModel","BaseLLM"],"display_name":"ChatOpenAI","custom_fields":{},"output_types":[],"documentation":"https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai"},"id":"ChatOpenAI-sXmo2","value":null},"selected":false,"positionAbsolute":{"x":1557.7805431884235,"y":-897.7091381330642},"dragging":false},{"width":384,"height":707,"data":{"id":"Chroma-JRSb8","type":"Chroma","node":{"output_types":[],"display_name":"group Node","documentation":"","base_classes":["Chroma","VectorStore","BaseRetriever","VectorStoreRetriever"],"description":"double click to edit description","template":{"allowed_special_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"value":[],"password":false,"name":"allowed_special","advanced":true,"dynamic":false,"info":"","type":"Literal'all'","list":true,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"allowed_special"},"display_name":"Allowed Special - OpenAIEmbedding..."},"disallowed_special_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"all","password":false,"name":"disallowed_special","advanced":true,"dynamic":false,"info":"","type":"Literal'all'","list":true,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"disallowed_special"},"display_name":"Disallowed Special - OpenAIEmbedding..."},"chunk_size_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"value":1000,"password":false,"name":"chunk_size","advanced":true,"dynamic":false,"info":"","type":"int","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"chunk_size"},"display_name":"Chunk Size - OpenAIEmbedding..."},"client_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"client","advanced":true,"dynamic":false,"info":"","type":"Any","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"client"},"display_name":"Client - OpenAIEmbedding..."},"deployment_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"text-embedding-ada-002","password":false,"name":"deployment","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"deployment"},"display_name":"Deployment - OpenAIEmbedding..."},"embedding_ctx_length_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"value":8191,"password":false,"name":"embedding_ctx_length","advanced":true,"dynamic":false,"info":"","type":"int","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"embedding_ctx_length"},"display_name":"Embedding Ctx Length - OpenAIEmbedding..."},"max_retries_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"value":6,"password":false,"name":"max_retries","advanced":true,"dynamic":false,"info":"","type":"int","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"max_retries"},"display_name":"Max Retries - OpenAIEmbedding..."},"model_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"text-embedding-ada-002","password":false,"name":"model","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"model"},"display_name":"Model - OpenAIEmbedding..."},"model_kwargs_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"model_kwargs","advanced":true,"dynamic":false,"info":"","type":"code","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"model_kwargs"},"display_name":"Model Kwargs - OpenAIEmbedding..."},"openai_api_base_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"password":true,"name":"openai_api_base","display_name":"OpenAI API Base - OpenAIEmbedding...","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"openai_api_base"},"value":""},"openai_api_key_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":true,"name":"openai_api_key","display_name":"OpenAI API Key - OpenAIEmbedding...","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"openai_api_key"}},"openai_api_type_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"password":true,"name":"openai_api_type","display_name":"OpenAI API Type - OpenAIEmbedding...","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"openai_api_type"},"value":""},"openai_api_version_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"password":true,"name":"openai_api_version","display_name":"OpenAI API Version - OpenAIEmbedding...","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"openai_api_version"},"value":""},"openai_organization_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"openai_organization","display_name":"OpenAI Organization - OpenAIEmbedding...","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"openai_organization"}},"openai_proxy_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"openai_proxy","display_name":"OpenAI Proxy - OpenAIEmbedding...","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"openai_proxy"}},"request_timeout_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"request_timeout","advanced":true,"dynamic":false,"info":"","type":"float","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"request_timeout"},"display_name":"Request Timeout - OpenAIEmbedding..."},"show_progress_bar_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"value":false,"password":false,"name":"show_progress_bar","advanced":true,"dynamic":false,"info":"","type":"bool","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"show_progress_bar"},"display_name":"Show Progress Bar - OpenAIEmbedding..."},"tiktoken_model_name_OpenAIEmbeddings-YwSvx":{"required":false,"placeholder":"","show":true,"multiline":false,"password":true,"name":"tiktoken_model_name","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"OpenAIEmbeddings-YwSvx","field":"tiktoken_model_name"},"display_name":"Tiktoken Model Name - OpenAIEmbedding...","value":""},"chroma_server_cors_allow_origins_Chroma-fIjxj":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"chroma_server_cors_allow_origins","display_name":"Chroma Server CORS Allow Origins - Chroma","advanced":true,"dynamic":false,"info":"","type":"str","list":true,"proxy":{"id":"Chroma-fIjxj","field":"chroma_server_cors_allow_origins"}},"chroma_server_grpc_port_Chroma-fIjxj":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"chroma_server_grpc_port","display_name":"Chroma Server GRPC Port - Chroma","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"Chroma-fIjxj","field":"chroma_server_grpc_port"}},"chroma_server_host_Chroma-fIjxj":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"chroma_server_host","display_name":"Chroma Server Host - Chroma","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"Chroma-fIjxj","field":"chroma_server_host"}},"chroma_server_http_port_Chroma-fIjxj":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"chroma_server_http_port","display_name":"Chroma Server HTTP Port - Chroma","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"Chroma-fIjxj","field":"chroma_server_http_port"}},"chroma_server_ssl_enabled_Chroma-fIjxj":{"required":false,"placeholder":"","show":true,"multiline":false,"value":false,"password":false,"name":"chroma_server_ssl_enabled","display_name":"Chroma Server SSL Enabled - Chroma","advanced":true,"dynamic":false,"info":"","type":"bool","list":false,"proxy":{"id":"Chroma-fIjxj","field":"chroma_server_ssl_enabled"}},"collection_name_Chroma-fIjxj":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"langflow","password":false,"name":"collection_name","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"Chroma-fIjxj","field":"collection_name"},"display_name":"Collection Name - Chroma"},"persist_Chroma-fIjxj":{"required":false,"placeholder":"","show":true,"multiline":false,"value":false,"password":false,"name":"persist","display_name":"Persist - Chroma","advanced":true,"dynamic":false,"info":"","type":"bool","list":false,"proxy":{"id":"Chroma-fIjxj","field":"persist"}},"persist_directory_Chroma-fIjxj":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"persist_directory","advanced":true,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"Chroma-fIjxj","field":"persist_directory"},"display_name":"Persist Directory - Chroma"},"search_kwargs_Chroma-fIjxj":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"{}","password":false,"name":"search_kwargs","advanced":true,"dynamic":false,"info":"","type":"code","list":false,"proxy":{"id":"Chroma-fIjxj","field":"search_kwargs"},"display_name":"Search Kwargs - Chroma"},"chunk_overlap_RecursiveCharacterTextSplitter-eXb39_RecursiveCharacterTextSplitter-SjLCC":{"required":true,"placeholder":"","show":true,"multiline":false,"value":200,"password":false,"name":"chunk_overlap","display_name":"Chunk Overlap - RecursiveCharac... - group Node","advanced":false,"dynamic":false,"info":"","type":"int","list":false,"proxy":{"id":"RecursiveCharacterTextSplitter-SjLCC","field":"chunk_overlap_RecursiveCharacterTextSplitter-eXb39"}},"chunk_size_RecursiveCharacterTextSplitter-eXb39_RecursiveCharacterTextSplitter-SjLCC":{"required":true,"placeholder":"","show":true,"multiline":false,"value":1000,"password":false,"name":"chunk_size","display_name":"Chunk Size - RecursiveCharac... - group Node","advanced":false,"dynamic":false,"info":"","type":"int","list":false,"proxy":{"id":"RecursiveCharacterTextSplitter-SjLCC","field":"chunk_size_RecursiveCharacterTextSplitter-eXb39"}},"separator_type_RecursiveCharacterTextSplitter-eXb39_RecursiveCharacterTextSplitter-SjLCC":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"Text","password":false,"options":["Text","cpp","go","html","java","js","latex","markdown","php","proto","python","rst","ruby","rust","scala","sol","swift"],"name":"separator_type","display_name":"Separator Type - RecursiveCharac... - group Node","advanced":false,"dynamic":false,"info":"","type":"str","list":true,"proxy":{"id":"RecursiveCharacterTextSplitter-SjLCC","field":"separator_type_RecursiveCharacterTextSplitter-eXb39"}},"separators_RecursiveCharacterTextSplitter-eXb39_RecursiveCharacterTextSplitter-SjLCC":{"required":true,"placeholder":"","show":true,"multiline":false,"value":".","password":false,"name":"separators","display_name":"Separator - RecursiveCharac... - group Node","advanced":false,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"RecursiveCharacterTextSplitter-SjLCC","field":"separators_RecursiveCharacterTextSplitter-eXb39"}},"metadata_WebBaseLoader-LlDNv_RecursiveCharacterTextSplitter-SjLCC":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"{}","password":false,"name":"metadata","display_name":"Metadata - WebBaseLoader - group Node","advanced":false,"dynamic":false,"info":"","type":"code","list":false,"proxy":{"id":"RecursiveCharacterTextSplitter-SjLCC","field":"metadata_WebBaseLoader-LlDNv"}},"web_path_WebBaseLoader-LlDNv_RecursiveCharacterTextSplitter-SjLCC":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"http://docs.langflow.org/examples/how-upload-examples","password":false,"name":"web_path","display_name":"Web Page - WebBaseLoader - group Node","advanced":false,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"RecursiveCharacterTextSplitter-SjLCC","field":"web_path_WebBaseLoader-LlDNv"}}},"flow":{"data":{"nodes":[{"width":384,"height":359,"id":"OpenAIEmbeddings-YwSvx","type":"genericNode","position":{"x":677.2699276778915,"y":-734.4639958173494},"data":{"type":"OpenAIEmbeddings","node":{"template":{"allowed_special":{"required":false,"placeholder":"","show":true,"multiline":false,"value":[],"password":false,"name":"allowed_special","advanced":true,"dynamic":false,"info":"","type":"Literal'all'","list":true},"disallowed_special":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"all","password":false,"name":"disallowed_special","advanced":true,"dynamic":false,"info":"","type":"Literal'all'","list":true},"chunk_size":{"required":false,"placeholder":"","show":true,"multiline":false,"value":1000,"password":false,"name":"chunk_size","advanced":true,"dynamic":false,"info":"","type":"int","list":false},"client":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"client","advanced":true,"dynamic":false,"info":"","type":"Any","list":false},"deployment":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"text-embedding-ada-002","password":false,"name":"deployment","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"embedding_ctx_length":{"required":false,"placeholder":"","show":true,"multiline":false,"value":8191,"password":false,"name":"embedding_ctx_length","advanced":true,"dynamic":false,"info":"","type":"int","list":false},"headers":{"required":false,"placeholder":"","show":false,"multiline":true,"value":"{'Authorization':\n 'Bearer '}","password":false,"name":"headers","advanced":true,"dynamic":false,"info":"","type":"Any","list":false},"max_retries":{"required":false,"placeholder":"","show":true,"multiline":false,"value":6,"password":false,"name":"max_retries","advanced":true,"dynamic":false,"info":"","type":"int","list":false},"model":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"text-embedding-ada-002","password":false,"name":"model","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"model_kwargs":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"model_kwargs","advanced":true,"dynamic":false,"info":"","type":"code","list":false},"openai_api_base":{"required":false,"placeholder":"","show":true,"multiline":false,"password":true,"name":"openai_api_base","display_name":"OpenAI API Base","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"openai_api_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":true,"name":"openai_api_key","display_name":"OpenAI API Key","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"openai_api_type":{"required":false,"placeholder":"","show":true,"multiline":false,"password":true,"name":"openai_api_type","display_name":"OpenAI API Type","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"openai_api_version":{"required":false,"placeholder":"","show":true,"multiline":false,"password":true,"name":"openai_api_version","display_name":"OpenAI API Version","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"openai_organization":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"openai_organization","display_name":"OpenAI Organization","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"openai_proxy":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"openai_proxy","display_name":"OpenAI Proxy","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"request_timeout":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"request_timeout","advanced":true,"dynamic":false,"info":"","type":"float","list":false},"show_progress_bar":{"required":false,"placeholder":"","show":true,"multiline":false,"value":false,"password":false,"name":"show_progress_bar","advanced":true,"dynamic":false,"info":"","type":"bool","list":false},"tiktoken_model_name":{"required":false,"placeholder":"","show":true,"multiline":false,"password":true,"name":"tiktoken_model_name","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"_type":"OpenAIEmbeddings"},"description":"OpenAI embedding models.","base_classes":["OpenAIEmbeddings","Embeddings"],"display_name":"OpenAIEmbeddings","documentation":"https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/openai"},"id":"OpenAIEmbeddings-YwSvx","value":null},"selected":true,"positionAbsolute":{"x":677.2699276778915,"y":-734.4639958173494},"dragging":false},{"width":384,"height":515,"id":"Chroma-fIjxj","type":"genericNode","position":{"x":998.5929276655718,"y":-1315.4167537905012},"data":{"type":"Chroma","node":{"template":{"client":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"client","advanced":false,"dynamic":false,"info":"","type":"chromadb.Client","list":false},"client_settings":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"client_settings","advanced":false,"dynamic":false,"info":"","type":"chromadb.config.Setting","list":true},"documents":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"documents","display_name":"Documents","advanced":false,"dynamic":false,"info":"","type":"Document","list":true},"embedding":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"embedding","display_name":"Embedding","advanced":false,"dynamic":false,"info":"","type":"Embeddings","list":false},"chroma_server_cors_allow_origins":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"chroma_server_cors_allow_origins","display_name":"Chroma Server CORS Allow Origins","advanced":true,"dynamic":false,"info":"","type":"str","list":true},"chroma_server_grpc_port":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"chroma_server_grpc_port","display_name":"Chroma Server GRPC Port","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"chroma_server_host":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"chroma_server_host","display_name":"Chroma Server Host","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"chroma_server_http_port":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"chroma_server_http_port","display_name":"Chroma Server HTTP Port","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"chroma_server_ssl_enabled":{"required":false,"placeholder":"","show":true,"multiline":false,"value":false,"password":false,"name":"chroma_server_ssl_enabled","display_name":"Chroma Server SSL Enabled","advanced":true,"dynamic":false,"info":"","type":"bool","list":false},"collection_metadata":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"collection_metadata","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"collection_name":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"langflow","password":false,"name":"collection_name","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"ids":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"ids","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"metadatas":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"metadatas","advanced":false,"dynamic":false,"info":"","type":"code","list":true},"persist":{"required":false,"placeholder":"","show":true,"multiline":false,"value":false,"password":false,"name":"persist","display_name":"Persist","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"persist_directory":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"persist_directory","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"search_kwargs":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"{}","password":false,"name":"search_kwargs","advanced":true,"dynamic":false,"info":"","type":"code","list":false},"_type":"Chroma"},"description":"Create a Chroma vectorstore from a raw documents.","base_classes":["Chroma","VectorStore","BaseRetriever","VectorStoreRetriever"],"display_name":"Chroma","custom_fields":{},"output_types":[],"documentation":"https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/chroma"},"id":"Chroma-fIjxj","value":null},"selected":true,"positionAbsolute":{"x":998.5929276655718,"y":-1315.4167537905012},"dragging":false},{"width":384,"height":707,"data":{"id":"RecursiveCharacterTextSplitter-SjLCC","type":"RecursiveCharacterTextSplitter","node":{"output_types":["Document"],"display_name":"group Node","documentation":"","base_classes":["Document"],"description":"double click to edit description","template":{"chunk_overlap_RecursiveCharacterTextSplitter-eXb39":{"required":true,"placeholder":"","show":true,"multiline":false,"value":200,"password":false,"name":"chunk_overlap","display_name":"Chunk Overlap - RecursiveCharac...","advanced":false,"dynamic":false,"info":"","type":"int","list":false,"proxy":{"id":"RecursiveCharacterTextSplitter-eXb39","field":"chunk_overlap"}},"chunk_size_RecursiveCharacterTextSplitter-eXb39":{"required":true,"placeholder":"","show":true,"multiline":false,"value":1000,"password":false,"name":"chunk_size","display_name":"Chunk Size - RecursiveCharac...","advanced":false,"dynamic":false,"info":"","type":"int","list":false,"proxy":{"id":"RecursiveCharacterTextSplitter-eXb39","field":"chunk_size"}},"separator_type_RecursiveCharacterTextSplitter-eXb39":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"Text","password":false,"options":["Text","cpp","go","html","java","js","latex","markdown","php","proto","python","rst","ruby","rust","scala","sol","swift"],"name":"separator_type","display_name":"Separator Type - RecursiveCharac...","advanced":false,"dynamic":false,"info":"","type":"str","list":true,"proxy":{"id":"RecursiveCharacterTextSplitter-eXb39","field":"separator_type"}},"separators_RecursiveCharacterTextSplitter-eXb39":{"required":true,"placeholder":"","show":true,"multiline":false,"value":".","password":false,"name":"separators","display_name":"Separator - RecursiveCharac...","advanced":false,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"RecursiveCharacterTextSplitter-eXb39","field":"separators"}},"metadata_WebBaseLoader-LlDNv":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"{}","password":false,"name":"metadata","display_name":"Metadata - WebBaseLoader","advanced":false,"dynamic":false,"info":"","type":"code","list":false,"proxy":{"id":"WebBaseLoader-LlDNv","field":"metadata"}},"web_path_WebBaseLoader-LlDNv":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"http://docs.langflow.org/examples/how-upload-examples","password":false,"name":"web_path","display_name":"Web Page - WebBaseLoader","advanced":false,"dynamic":false,"info":"","type":"str","list":false,"proxy":{"id":"WebBaseLoader-LlDNv","field":"web_path"}}},"flow":{"data":{"nodes":[{"width":384,"height":575,"id":"RecursiveCharacterTextSplitter-eXb39","type":"genericNode","position":{"x":543.3651467111342,"y":-1373.3607842112438},"data":{"type":"RecursiveCharacterTextSplitter","node":{"template":{"documents":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"documents","advanced":false,"dynamic":false,"info":"","type":"Document","list":true},"chunk_overlap":{"required":true,"placeholder":"","show":true,"multiline":false,"value":200,"password":false,"name":"chunk_overlap","display_name":"Chunk Overlap","advanced":false,"dynamic":false,"info":"","type":"int","list":false},"chunk_size":{"required":true,"placeholder":"","show":true,"multiline":false,"value":1000,"password":false,"name":"chunk_size","display_name":"Chunk Size","advanced":false,"dynamic":false,"info":"","type":"int","list":false},"separator_type":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"Text","password":false,"options":["Text","cpp","go","html","java","js","latex","markdown","php","proto","python","rst","ruby","rust","scala","sol","swift"],"name":"separator_type","display_name":"Separator Type","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"separators":{"required":true,"placeholder":"","show":true,"multiline":false,"value":".","password":false,"name":"separators","display_name":"Separator","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"_type":"RecursiveCharacterTextSplitter"},"description":"Splitting text by recursively look at characters.","base_classes":["Document"],"display_name":"RecursiveCharacterTextSplitter","custom_fields":{},"output_types":["Document"],"documentation":"https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/recursive_text_splitter"},"id":"RecursiveCharacterTextSplitter-eXb39","value":null},"selected":true,"positionAbsolute":{"x":543.3651467111342,"y":-1373.3607842112438},"dragging":false},{"width":384,"height":379,"id":"WebBaseLoader-LlDNv","type":"genericNode","position":{"x":60.77712301470575,"y":-1345.575885746874},"data":{"type":"WebBaseLoader","node":{"template":{"metadata":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"{}","password":false,"name":"metadata","display_name":"Metadata","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"web_path":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"http://docs.langflow.org/examples/how-upload-examples","password":false,"name":"web_path","display_name":"Web Page","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"_type":"WebBaseLoader"},"description":"Load HTML pages using `urllib` and parse them with `BeautifulSoup'.","base_classes":["Document"],"display_name":"WebBaseLoader","custom_fields":{},"output_types":["Document"],"documentation":"https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/web_base"},"id":"WebBaseLoader-LlDNv","value":null},"selected":true,"positionAbsolute":{"x":60.77712301470575,"y":-1345.575885746874},"dragging":false}],"edges":[{"source":"WebBaseLoader-LlDNv","sourceHandle":"{œbaseClassesœ:[œDocumentœ],œdataTypeœ:œWebBaseLoaderœ,œidœ:œWebBaseLoader-LlDNvœ}","target":"RecursiveCharacterTextSplitter-eXb39","targetHandle":"{œfieldNameœ:œdocumentsœ,œidœ:œRecursiveCharacterTextSplitter-eXb39œ,œinputTypesœ:null,œtypeœ:œDocumentœ}","style":{"stroke":"#555"},"className":"stroke-gray-900 stroke-connection","animated":false,"id":"reactflow__edge-WebBaseLoader-LlDNv{œbaseClassesœ:[œDocumentœ],œdataTypeœ:œWebBaseLoaderœ,œidœ:œWebBaseLoader-LlDNvœ}-RecursiveCharacterTextSplitter-eXb39{œfieldNameœ:œdocumentsœ,œidœ:œRecursiveCharacterTextSplitter-eXb39œ,œinputTypesœ:null,œtypeœ:œDocumentœ}","selected":true,"data":{"sourceHandle":{"baseClasses":["Document"],"dataType":"WebBaseLoader","id":"WebBaseLoader-LlDNv"},"targetHandle":{"fieldName":"documents","id":"RecursiveCharacterTextSplitter-eXb39","inputTypes":null,"type":"Document"}}}],"viewport":{"x":171.77566864238327,"y":1008.7716987035463,"zoom":0.6091751241035919}},"name":"Giggly Aryabhata","description":"","id":"oms5B"}}},"id":"RecursiveCharacterTextSplitter-SjLCC","position":{"x":459.66128620284064,"y":-1502.284409630862},"type":"genericNode","selected":true,"positionAbsolute":{"x":459.66128620284064,"y":-1502.284409630862},"dragging":false}],"edges":[{"source":"OpenAIEmbeddings-YwSvx","sourceHandle":"{œbaseClassesœ:[œOpenAIEmbeddingsœ,œEmbeddingsœ],œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-YwSvxœ}","target":"Chroma-fIjxj","targetHandle":"{œfieldNameœ:œembeddingœ,œidœ:œChroma-fIjxjœ,œinputTypesœ:null,œtypeœ:œEmbeddingsœ}","style":{"stroke":"#555"},"className":"stroke-gray-900 stroke-connection","animated":false,"id":"reactflow__edge-OpenAIEmbeddings-YwSvx{œbaseClassesœ:[œOpenAIEmbeddingsœ,œEmbeddingsœ],œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-YwSvxœ}-Chroma-fIjxj{œfieldNameœ:œembeddingœ,œidœ:œChroma-fIjxjœ,œinputTypesœ:null,œtypeœ:œEmbeddingsœ}","data":{"sourceHandle":{"baseClasses":["OpenAIEmbeddings","Embeddings"],"dataType":"OpenAIEmbeddings","id":"OpenAIEmbeddings-YwSvx"},"targetHandle":{"fieldName":"embedding","id":"Chroma-fIjxj","inputTypes":null,"type":"Embeddings"}},"selected":true},{"source":"RecursiveCharacterTextSplitter-SjLCC","sourceHandle":"{œbaseClassesœ:[œDocumentœ],œdataTypeœ:œRecursiveCharacterTextSplitterœ,œidœ:œRecursiveCharacterTextSplitter-SjLCCœ}","target":"Chroma-fIjxj","targetHandle":"{œfieldNameœ:œdocumentsœ,œidœ:œChroma-fIjxjœ,œinputTypesœ:null,œtypeœ:œDocumentœ}","data":{"targetHandle":{"fieldName":"documents","id":"Chroma-fIjxj","inputTypes":null,"type":"Document"},"sourceHandle":{"baseClasses":["Document"],"dataType":"RecursiveCharacterTextSplitter","id":"RecursiveCharacterTextSplitter-SjLCC"}},"style":{"stroke":"#555"},"className":"stroke-foreground stroke-connection","animated":false,"id":"reactflow__edge-RecursiveCharacterTextSplitter-SjLCC{œbaseClassesœ:[œDocumentœ],œdataTypeœ:œRecursiveCharacterTextSplitterœ,œidœ:œRecursiveCharacterTextSplitter-SjLCCœ}-Chroma-fIjxj{œfieldNameœ:œdocumentsœ,œidœ:œChroma-fIjxjœ,œinputTypesœ:null,œtypeœ:œDocumentœ}","selected":true}],"viewport":{"x":75.85425902478954,"y":794.442518380995,"zoom":0.3834017786930542}},"name":"Serene Noyce","description":"","id":"Tfctp"}}},"id":"Chroma-JRSb8","position":{"x":910.0668563050097,"y":-1379.672298924546},"type":"genericNode","selected":true,"positionAbsolute":{"x":910.0668563050097,"y":-1379.672298924546},"dragging":false}],"edges":[{"source":"VectorStoreInfo-JbqfX","sourceHandle":"{œbaseClassesœ:[œVectorStoreInfoœ],œdataTypeœ:œVectorStoreInfoœ,œidœ:œVectorStoreInfo-JbqfXœ}","target":"VectorStoreAgent-xWgPj","targetHandle":"{œfieldNameœ:œvectorstoreinfoœ,œidœ:œVectorStoreAgent-xWgPjœ,œinputTypesœ:null,œtypeœ:œVectorStoreInfoœ}","className":"stroke-gray-900 stroke-connection","id":"reactflow__edge-VectorStoreInfo-JbqfX{œbaseClassesœ:[œVectorStoreInfoœ],œdataTypeœ:œVectorStoreInfoœ,œidœ:œVectorStoreInfo-JbqfXœ}-VectorStoreAgent-xWgPj{œfieldNameœ:œvectorstoreinfoœ,œidœ:œVectorStoreAgent-xWgPjœ,œinputTypesœ:null,œtypeœ:œVectorStoreInfoœ}","selected":false,"style":{"stroke":"#555"},"animated":false,"data":{"sourceHandle":{"baseClasses":["VectorStoreInfo"],"dataType":"VectorStoreInfo","id":"VectorStoreInfo-JbqfX"},"targetHandle":{"fieldName":"vectorstoreinfo","id":"VectorStoreAgent-xWgPj","inputTypes":null,"type":"VectorStoreInfo"}}},{"source":"ChatOpenAI-sXmo2","sourceHandle":"{œbaseClassesœ:[œChatOpenAIœ,œBaseLanguageModelœ,œBaseChatModelœ,œBaseLLMœ],œdataTypeœ:œChatOpenAIœ,œidœ:œChatOpenAI-sXmo2œ}","target":"VectorStoreAgent-xWgPj","targetHandle":"{œfieldNameœ:œllmœ,œidœ:œVectorStoreAgent-xWgPjœ,œinputTypesœ:null,œtypeœ:œBaseLanguageModelœ}","style":{"stroke":"#555"},"className":"stroke-gray-900 stroke-connection","animated":false,"id":"reactflow__edge-ChatOpenAI-sXmo2{œbaseClassesœ:[œChatOpenAIœ,œBaseLanguageModelœ,œBaseChatModelœ,œBaseLLMœ],œdataTypeœ:œChatOpenAIœ,œidœ:œChatOpenAI-sXmo2œ}-VectorStoreAgent-xWgPj{œfieldNameœ:œllmœ,œidœ:œVectorStoreAgent-xWgPjœ,œinputTypesœ:null,œtypeœ:œBaseLanguageModelœ}","selected":false,"data":{"sourceHandle":{"baseClasses":["ChatOpenAI","BaseLanguageModel","BaseChatModel","BaseLLM"],"dataType":"ChatOpenAI","id":"ChatOpenAI-sXmo2"},"targetHandle":{"fieldName":"llm","id":"VectorStoreAgent-xWgPj","inputTypes":null,"type":"BaseLanguageModel"}}},{"source":"Chroma-JRSb8","sourceHandle":"{œbaseClassesœ:[œChromaœ,œVectorStoreœ,œBaseRetrieverœ,œVectorStoreRetrieverœ],œdataTypeœ:œChromaœ,œidœ:œChroma-JRSb8œ}","target":"VectorStoreInfo-JbqfX","targetHandle":"{œfieldNameœ:œvectorstoreœ,œidœ:œVectorStoreInfo-JbqfXœ,œinputTypesœ:null,œtypeœ:œVectorStoreœ}","data":{"targetHandle":{"fieldName":"vectorstore","id":"VectorStoreInfo-JbqfX","inputTypes":null,"type":"VectorStore"},"sourceHandle":{"baseClasses":["Chroma","VectorStore","BaseRetriever","VectorStoreRetriever"],"dataType":"Chroma","id":"Chroma-JRSb8"}},"style":{"stroke":"#555"},"className":"stroke-foreground stroke-connection","animated":false,"id":"reactflow__edge-Chroma-JRSb8{œbaseClassesœ:[œChromaœ,œVectorStoreœ,œBaseRetrieverœ,œVectorStoreRetrieverœ],œdataTypeœ:œChromaœ,œidœ:œChroma-JRSb8œ}-VectorStoreInfo-JbqfX{œfieldNameœ:œvectorstoreœ,œidœ:œVectorStoreInfo-JbqfXœ,œinputTypesœ:null,œtypeœ:œVectorStoreœ}"}],"viewport":{"x":-514.4089400658404,"y":1037.1605824094304,"zoom":0.6583510309092263}},"id":"33f0dc0b-39f1-4573-9811-a92b1ea51634"}
\ No newline at end of file
diff --git a/src/lfx/tests/unit/custom/__init__.py b/src/lfx/tests/unit/custom/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/unit/custom/component/__init__.py b/src/lfx/tests/unit/custom/component/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/unit/custom/custom_component/__init__.py b/src/lfx/tests/unit/custom/custom_component/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/unit/events/__init__.py b/src/lfx/tests/unit/events/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/unit/graph/__init__.py b/src/lfx/tests/unit/graph/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/unit/graph/edge/__init__.py b/src/lfx/tests/unit/graph/edge/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/unit/graph/graph/__init__.py b/src/lfx/tests/unit/graph/graph/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/unit/graph/graph/state/__init__.py b/src/lfx/tests/unit/graph/graph/state/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/unit/graph/vertex/__init__.py b/src/lfx/tests/unit/graph/vertex/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/unit/inputs/__init__.py b/src/lfx/tests/unit/inputs/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/unit/memory/__init__.py b/src/lfx/tests/unit/memory/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/lfx/tests/unit/schema/__init__.py b/src/lfx/tests/unit/schema/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/packages/README.md b/src/packages/README.md
new file mode 100644
index 000000000000..99a9665dcd0c
--- /dev/null
+++ b/src/packages/README.md
@@ -0,0 +1,73 @@
+# Langflow Modules
+
+This directory contains the modular Python packages that make up Langflow:
+
+## Structure
+
+### `/packages/core` (lfx)
+- **Package**: `lfx` (Langflow Executor)
+- **Purpose**: Lightweight CLI tool for executing and serving Langflow AI flows
+- **Key Features**:
+ - Core graph processing and execution
+ - Component system
+ - CLI interface
+ - Template management
+ - Serialization and I/O operations
+
+### `/packages/base` (langflow-base)
+- **Package**: `langflow-base`
+- **Purpose**: Base Langflow package with web application
+- **Key Features**:
+ - FastAPI web application
+ - API endpoints
+ - Database models and migrations (Alembic)
+ - Services layer
+ - Authentication and security
+ - Integration with langchain ecosystem
+
+### `/packages/langflow`
+- **Package**: `langflow`
+- **Purpose**: Main orchestration package
+- **Key Features**:
+ - Version management
+ - Package coordination
+ - Entry point for full Langflow application
+
+## Dependencies
+
+```
+langflow (main package)
+ ├── langflow-base (web application and services)
+ │ └── lfx (core execution engine)
+ └── lfx (can also be used standalone)
+```
+
+## Development
+
+Each module has its own `pyproject.toml` for dependency management and can be developed independently while maintaining clear interfaces between packages.
+
+### Building Packages
+
+```bash
+# Build individual packages
+cd src/packages/core && hatch build
+cd src/packages/base && hatch build
+cd src/packages/langflow && hatch build
+```
+
+### Testing
+
+```bash
+# Run tests for individual packages
+cd src/packages/core && pytest tests/
+cd src/packages/base && pytest # Tests location TBD
+```
+
+## Migration Notes
+
+This structure was migrated from:
+- `src/core/` → `src/packages/core/`
+- `src/backend/base/` → `src/packages/base/`
+- `src/backend/langflow/version/` → `src/packages/langflow/version/`
+
+The namespace package compatibility layer in `langflow/__init__.py` ensures backward compatibility with existing imports.
\ No newline at end of file
diff --git a/src/backend/base/README.md b/src/packages/base/README.md
similarity index 100%
rename from src/backend/base/README.md
rename to src/packages/base/README.md
diff --git a/src/backend/base/langflow/__init__.py b/src/packages/base/langflow/__init__.py
similarity index 100%
rename from src/backend/base/langflow/__init__.py
rename to src/packages/base/langflow/__init__.py
diff --git a/src/backend/base/langflow/__main__.py b/src/packages/base/langflow/__main__.py
similarity index 100%
rename from src/backend/base/langflow/__main__.py
rename to src/packages/base/langflow/__main__.py
diff --git a/src/backend/base/langflow/alembic.ini b/src/packages/base/langflow/alembic.ini
similarity index 100%
rename from src/backend/base/langflow/alembic.ini
rename to src/packages/base/langflow/alembic.ini
diff --git a/src/backend/base/langflow/alembic/README b/src/packages/base/langflow/alembic/README
similarity index 100%
rename from src/backend/base/langflow/alembic/README
rename to src/packages/base/langflow/alembic/README
diff --git a/src/backend/base/langflow/alembic/env.py b/src/packages/base/langflow/alembic/env.py
similarity index 100%
rename from src/backend/base/langflow/alembic/env.py
rename to src/packages/base/langflow/alembic/env.py
diff --git a/src/backend/base/langflow/alembic/script.py.mako b/src/packages/base/langflow/alembic/script.py.mako
similarity index 100%
rename from src/backend/base/langflow/alembic/script.py.mako
rename to src/packages/base/langflow/alembic/script.py.mako
diff --git a/src/backend/base/langflow/alembic/versions/006b3990db50_add_unique_constraints.py b/src/packages/base/langflow/alembic/versions/006b3990db50_add_unique_constraints.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/006b3990db50_add_unique_constraints.py
rename to src/packages/base/langflow/alembic/versions/006b3990db50_add_unique_constraints.py
diff --git a/src/backend/base/langflow/alembic/versions/012fb73ac359_add_folder_table.py b/src/packages/base/langflow/alembic/versions/012fb73ac359_add_folder_table.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/012fb73ac359_add_folder_table.py
rename to src/packages/base/langflow/alembic/versions/012fb73ac359_add_folder_table.py
diff --git a/src/backend/base/langflow/alembic/versions/0882f9657f22_encrypt_existing_mcp_auth_settings_.py b/src/packages/base/langflow/alembic/versions/0882f9657f22_encrypt_existing_mcp_auth_settings_.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/0882f9657f22_encrypt_existing_mcp_auth_settings_.py
rename to src/packages/base/langflow/alembic/versions/0882f9657f22_encrypt_existing_mcp_auth_settings_.py
diff --git a/src/backend/base/langflow/alembic/versions/0ae3a2674f32_update_the_columns_that_need_to_change_.py b/src/packages/base/langflow/alembic/versions/0ae3a2674f32_update_the_columns_that_need_to_change_.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/0ae3a2674f32_update_the_columns_that_need_to_change_.py
rename to src/packages/base/langflow/alembic/versions/0ae3a2674f32_update_the_columns_that_need_to_change_.py
diff --git a/src/backend/base/langflow/alembic/versions/0b8757876a7c_.py b/src/packages/base/langflow/alembic/versions/0b8757876a7c_.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/0b8757876a7c_.py
rename to src/packages/base/langflow/alembic/versions/0b8757876a7c_.py
diff --git a/src/backend/base/langflow/alembic/versions/0d60fcbd4e8e_create_vertex_builds_table.py b/src/packages/base/langflow/alembic/versions/0d60fcbd4e8e_create_vertex_builds_table.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/0d60fcbd4e8e_create_vertex_builds_table.py
rename to src/packages/base/langflow/alembic/versions/0d60fcbd4e8e_create_vertex_builds_table.py
diff --git a/src/backend/base/langflow/alembic/versions/1a110b568907_replace_credential_table_with_variable.py b/src/packages/base/langflow/alembic/versions/1a110b568907_replace_credential_table_with_variable.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/1a110b568907_replace_credential_table_with_variable.py
rename to src/packages/base/langflow/alembic/versions/1a110b568907_replace_credential_table_with_variable.py
diff --git a/src/backend/base/langflow/alembic/versions/1b8b740a6fa3_remove_fk_constraint_in_message_.py b/src/packages/base/langflow/alembic/versions/1b8b740a6fa3_remove_fk_constraint_in_message_.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/1b8b740a6fa3_remove_fk_constraint_in_message_.py
rename to src/packages/base/langflow/alembic/versions/1b8b740a6fa3_remove_fk_constraint_in_message_.py
diff --git a/src/backend/base/langflow/alembic/versions/1c79524817ed_add_unique_constraints_per_user_in_.py b/src/packages/base/langflow/alembic/versions/1c79524817ed_add_unique_constraints_per_user_in_.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/1c79524817ed_add_unique_constraints_per_user_in_.py
rename to src/packages/base/langflow/alembic/versions/1c79524817ed_add_unique_constraints_per_user_in_.py
diff --git a/src/backend/base/langflow/alembic/versions/1cb603706752_modify_uniqueness_constraint_on_file_.py b/src/packages/base/langflow/alembic/versions/1cb603706752_modify_uniqueness_constraint_on_file_.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/1cb603706752_modify_uniqueness_constraint_on_file_.py
rename to src/packages/base/langflow/alembic/versions/1cb603706752_modify_uniqueness_constraint_on_file_.py
diff --git a/src/backend/base/langflow/alembic/versions/1d90f8a0efe1_update_description_columns_type.py b/src/packages/base/langflow/alembic/versions/1d90f8a0efe1_update_description_columns_type.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/1d90f8a0efe1_update_description_columns_type.py
rename to src/packages/base/langflow/alembic/versions/1d90f8a0efe1_update_description_columns_type.py
diff --git a/src/backend/base/langflow/alembic/versions/1eab2c3eb45e_event_error.py b/src/packages/base/langflow/alembic/versions/1eab2c3eb45e_event_error.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/1eab2c3eb45e_event_error.py
rename to src/packages/base/langflow/alembic/versions/1eab2c3eb45e_event_error.py
diff --git a/src/backend/base/langflow/alembic/versions/1ef9c4f3765d_.py b/src/packages/base/langflow/alembic/versions/1ef9c4f3765d_.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/1ef9c4f3765d_.py
rename to src/packages/base/langflow/alembic/versions/1ef9c4f3765d_.py
diff --git a/src/backend/base/langflow/alembic/versions/1f4d6df60295_add_default_fields_column.py b/src/packages/base/langflow/alembic/versions/1f4d6df60295_add_default_fields_column.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/1f4d6df60295_add_default_fields_column.py
rename to src/packages/base/langflow/alembic/versions/1f4d6df60295_add_default_fields_column.py
diff --git a/src/backend/base/langflow/alembic/versions/260dbcc8b680_adds_tables.py b/src/packages/base/langflow/alembic/versions/260dbcc8b680_adds_tables.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/260dbcc8b680_adds_tables.py
rename to src/packages/base/langflow/alembic/versions/260dbcc8b680_adds_tables.py
diff --git a/src/backend/base/langflow/alembic/versions/29fe8f1f806b_add_missing_index.py b/src/packages/base/langflow/alembic/versions/29fe8f1f806b_add_missing_index.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/29fe8f1f806b_add_missing_index.py
rename to src/packages/base/langflow/alembic/versions/29fe8f1f806b_add_missing_index.py
diff --git a/src/backend/base/langflow/alembic/versions/2ac71eb9c3ae_adds_credential_table.py b/src/packages/base/langflow/alembic/versions/2ac71eb9c3ae_adds_credential_table.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/2ac71eb9c3ae_adds_credential_table.py
rename to src/packages/base/langflow/alembic/versions/2ac71eb9c3ae_adds_credential_table.py
diff --git a/src/backend/base/langflow/alembic/versions/3162e83e485f_add_auth_settings_to_folder_and_merge.py b/src/packages/base/langflow/alembic/versions/3162e83e485f_add_auth_settings_to_folder_and_merge.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/3162e83e485f_add_auth_settings_to_folder_and_merge.py
rename to src/packages/base/langflow/alembic/versions/3162e83e485f_add_auth_settings_to_folder_and_merge.py
diff --git a/src/backend/base/langflow/alembic/versions/3bb0ddf32dfb_add_unique_constraints_per_user_in_flow_.py b/src/packages/base/langflow/alembic/versions/3bb0ddf32dfb_add_unique_constraints_per_user_in_flow_.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/3bb0ddf32dfb_add_unique_constraints_per_user_in_flow_.py
rename to src/packages/base/langflow/alembic/versions/3bb0ddf32dfb_add_unique_constraints_per_user_in_flow_.py
diff --git a/src/backend/base/langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py b/src/packages/base/langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py
rename to src/packages/base/langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py
diff --git a/src/backend/base/langflow/alembic/versions/58b28437a398_modify_nullable.py b/src/packages/base/langflow/alembic/versions/58b28437a398_modify_nullable.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/58b28437a398_modify_nullable.py
rename to src/packages/base/langflow/alembic/versions/58b28437a398_modify_nullable.py
diff --git a/src/backend/base/langflow/alembic/versions/5ace73a7f223_new_remove_table_upgrade_op.py b/src/packages/base/langflow/alembic/versions/5ace73a7f223_new_remove_table_upgrade_op.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/5ace73a7f223_new_remove_table_upgrade_op.py
rename to src/packages/base/langflow/alembic/versions/5ace73a7f223_new_remove_table_upgrade_op.py
diff --git a/src/backend/base/langflow/alembic/versions/631faacf5da2_add_webhook_columns.py b/src/packages/base/langflow/alembic/versions/631faacf5da2_add_webhook_columns.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/631faacf5da2_add_webhook_columns.py
rename to src/packages/base/langflow/alembic/versions/631faacf5da2_add_webhook_columns.py
diff --git a/src/backend/base/langflow/alembic/versions/63b9c451fd30_add_icon_and_icon_bg_color_to_flow.py b/src/packages/base/langflow/alembic/versions/63b9c451fd30_add_icon_and_icon_bg_color_to_flow.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/63b9c451fd30_add_icon_and_icon_bg_color_to_flow.py
rename to src/packages/base/langflow/alembic/versions/63b9c451fd30_add_icon_and_icon_bg_color_to_flow.py
diff --git a/src/backend/base/langflow/alembic/versions/66f72f04a1de_add_mcp_support_with_project_settings_.py b/src/packages/base/langflow/alembic/versions/66f72f04a1de_add_mcp_support_with_project_settings_.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/66f72f04a1de_add_mcp_support_with_project_settings_.py
rename to src/packages/base/langflow/alembic/versions/66f72f04a1de_add_mcp_support_with_project_settings_.py
diff --git a/src/backend/base/langflow/alembic/versions/67cc006d50bf_add_profile_image_column.py b/src/packages/base/langflow/alembic/versions/67cc006d50bf_add_profile_image_column.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/67cc006d50bf_add_profile_image_column.py
rename to src/packages/base/langflow/alembic/versions/67cc006d50bf_add_profile_image_column.py
diff --git a/src/backend/base/langflow/alembic/versions/6e7b581b5648_fix_nullable.py b/src/packages/base/langflow/alembic/versions/6e7b581b5648_fix_nullable.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/6e7b581b5648_fix_nullable.py
rename to src/packages/base/langflow/alembic/versions/6e7b581b5648_fix_nullable.py
diff --git a/src/backend/base/langflow/alembic/versions/7843803a87b5_store_updates.py b/src/packages/base/langflow/alembic/versions/7843803a87b5_store_updates.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/7843803a87b5_store_updates.py
rename to src/packages/base/langflow/alembic/versions/7843803a87b5_store_updates.py
diff --git a/src/backend/base/langflow/alembic/versions/79e675cb6752_change_datetime_type.py b/src/packages/base/langflow/alembic/versions/79e675cb6752_change_datetime_type.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/79e675cb6752_change_datetime_type.py
rename to src/packages/base/langflow/alembic/versions/79e675cb6752_change_datetime_type.py
diff --git a/src/backend/base/langflow/alembic/versions/7d2162acc8b2_adds_updated_at_and_folder_cols.py b/src/packages/base/langflow/alembic/versions/7d2162acc8b2_adds_updated_at_and_folder_cols.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/7d2162acc8b2_adds_updated_at_and_folder_cols.py
rename to src/packages/base/langflow/alembic/versions/7d2162acc8b2_adds_updated_at_and_folder_cols.py
diff --git a/src/backend/base/langflow/alembic/versions/90be8e2ed91e_create_transactions_table.py b/src/packages/base/langflow/alembic/versions/90be8e2ed91e_create_transactions_table.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/90be8e2ed91e_create_transactions_table.py
rename to src/packages/base/langflow/alembic/versions/90be8e2ed91e_create_transactions_table.py
diff --git a/src/backend/base/langflow/alembic/versions/93e2705fa8d6_add_column_save_path_to_flow.py b/src/packages/base/langflow/alembic/versions/93e2705fa8d6_add_column_save_path_to_flow.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/93e2705fa8d6_add_column_save_path_to_flow.py
rename to src/packages/base/langflow/alembic/versions/93e2705fa8d6_add_column_save_path_to_flow.py
diff --git a/src/backend/base/langflow/alembic/versions/a72f5cf9c2f9_add_endpoint_name_col.py b/src/packages/base/langflow/alembic/versions/a72f5cf9c2f9_add_endpoint_name_col.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/a72f5cf9c2f9_add_endpoint_name_col.py
rename to src/packages/base/langflow/alembic/versions/a72f5cf9c2f9_add_endpoint_name_col.py
diff --git a/src/backend/base/langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py b/src/packages/base/langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py
rename to src/packages/base/langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py
diff --git a/src/backend/base/langflow/alembic/versions/bc2f01c40e4a_new_fixes.py b/src/packages/base/langflow/alembic/versions/bc2f01c40e4a_new_fixes.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/bc2f01c40e4a_new_fixes.py
rename to src/packages/base/langflow/alembic/versions/bc2f01c40e4a_new_fixes.py
diff --git a/src/backend/base/langflow/alembic/versions/c153816fd85f_set_name_and_value_to_not_nullable.py b/src/packages/base/langflow/alembic/versions/c153816fd85f_set_name_and_value_to_not_nullable.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/c153816fd85f_set_name_and_value_to_not_nullable.py
rename to src/packages/base/langflow/alembic/versions/c153816fd85f_set_name_and_value_to_not_nullable.py
diff --git a/src/backend/base/langflow/alembic/versions/d066bfd22890_add_message_table.py b/src/packages/base/langflow/alembic/versions/d066bfd22890_add_message_table.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/d066bfd22890_add_message_table.py
rename to src/packages/base/langflow/alembic/versions/d066bfd22890_add_message_table.py
diff --git a/src/backend/base/langflow/alembic/versions/d2d475a1f7c0_add_tags_column_to_flow.py b/src/packages/base/langflow/alembic/versions/d2d475a1f7c0_add_tags_column_to_flow.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/d2d475a1f7c0_add_tags_column_to_flow.py
rename to src/packages/base/langflow/alembic/versions/d2d475a1f7c0_add_tags_column_to_flow.py
diff --git a/src/backend/base/langflow/alembic/versions/d3dbf656a499_add_gradient_column_in_flow.py b/src/packages/base/langflow/alembic/versions/d3dbf656a499_add_gradient_column_in_flow.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/d3dbf656a499_add_gradient_column_in_flow.py
rename to src/packages/base/langflow/alembic/versions/d3dbf656a499_add_gradient_column_in_flow.py
diff --git a/src/backend/base/langflow/alembic/versions/d9a6ea21edcd_rename_default_folder.py b/src/packages/base/langflow/alembic/versions/d9a6ea21edcd_rename_default_folder.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/d9a6ea21edcd_rename_default_folder.py
rename to src/packages/base/langflow/alembic/versions/d9a6ea21edcd_rename_default_folder.py
diff --git a/src/backend/base/langflow/alembic/versions/dd9e0804ebd1_add_v2_file_table.py b/src/packages/base/langflow/alembic/versions/dd9e0804ebd1_add_v2_file_table.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/dd9e0804ebd1_add_v2_file_table.py
rename to src/packages/base/langflow/alembic/versions/dd9e0804ebd1_add_v2_file_table.py
diff --git a/src/backend/base/langflow/alembic/versions/e3162c1804e6_add_persistent_locked_state.py b/src/packages/base/langflow/alembic/versions/e3162c1804e6_add_persistent_locked_state.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/e3162c1804e6_add_persistent_locked_state.py
rename to src/packages/base/langflow/alembic/versions/e3162c1804e6_add_persistent_locked_state.py
diff --git a/src/backend/base/langflow/alembic/versions/e3bc869fa272_fix_nullable.py b/src/packages/base/langflow/alembic/versions/e3bc869fa272_fix_nullable.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/e3bc869fa272_fix_nullable.py
rename to src/packages/base/langflow/alembic/versions/e3bc869fa272_fix_nullable.py
diff --git a/src/backend/base/langflow/alembic/versions/e56d87f8994a_add_optins_column_to_user.py b/src/packages/base/langflow/alembic/versions/e56d87f8994a_add_optins_column_to_user.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/e56d87f8994a_add_optins_column_to_user.py
rename to src/packages/base/langflow/alembic/versions/e56d87f8994a_add_optins_column_to_user.py
diff --git a/src/backend/base/langflow/alembic/versions/e5a65ecff2cd_nullable_in_vertex_build.py b/src/packages/base/langflow/alembic/versions/e5a65ecff2cd_nullable_in_vertex_build.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/e5a65ecff2cd_nullable_in_vertex_build.py
rename to src/packages/base/langflow/alembic/versions/e5a65ecff2cd_nullable_in_vertex_build.py
diff --git a/src/backend/base/langflow/alembic/versions/eb5866d51fd2_change_columns_to_be_nullable.py b/src/packages/base/langflow/alembic/versions/eb5866d51fd2_change_columns_to_be_nullable.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/eb5866d51fd2_change_columns_to_be_nullable.py
rename to src/packages/base/langflow/alembic/versions/eb5866d51fd2_change_columns_to_be_nullable.py
diff --git a/src/backend/base/langflow/alembic/versions/eb5e72293a8e_add_error_and_edit_flags_to_message.py b/src/packages/base/langflow/alembic/versions/eb5e72293a8e_add_error_and_edit_flags_to_message.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/eb5e72293a8e_add_error_and_edit_flags_to_message.py
rename to src/packages/base/langflow/alembic/versions/eb5e72293a8e_add_error_and_edit_flags_to_message.py
diff --git a/src/backend/base/langflow/alembic/versions/f3b2d1f1002d_add_column_access_type_to_flow.py b/src/packages/base/langflow/alembic/versions/f3b2d1f1002d_add_column_access_type_to_flow.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/f3b2d1f1002d_add_column_access_type_to_flow.py
rename to src/packages/base/langflow/alembic/versions/f3b2d1f1002d_add_column_access_type_to_flow.py
diff --git a/src/backend/base/langflow/alembic/versions/f5ee9749d1a6_user_id_can_be_null_in_flow.py b/src/packages/base/langflow/alembic/versions/f5ee9749d1a6_user_id_can_be_null_in_flow.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/f5ee9749d1a6_user_id_can_be_null_in_flow.py
rename to src/packages/base/langflow/alembic/versions/f5ee9749d1a6_user_id_can_be_null_in_flow.py
diff --git a/src/backend/base/langflow/alembic/versions/fd531f8868b1_fix_credential_table.py b/src/packages/base/langflow/alembic/versions/fd531f8868b1_fix_credential_table.py
similarity index 100%
rename from src/backend/base/langflow/alembic/versions/fd531f8868b1_fix_credential_table.py
rename to src/packages/base/langflow/alembic/versions/fd531f8868b1_fix_credential_table.py
diff --git a/src/backend/base/langflow/api/__init__.py b/src/packages/base/langflow/api/__init__.py
similarity index 100%
rename from src/backend/base/langflow/api/__init__.py
rename to src/packages/base/langflow/api/__init__.py
diff --git a/src/backend/base/langflow/api/build.py b/src/packages/base/langflow/api/build.py
similarity index 100%
rename from src/backend/base/langflow/api/build.py
rename to src/packages/base/langflow/api/build.py
diff --git a/src/backend/base/langflow/api/disconnect.py b/src/packages/base/langflow/api/disconnect.py
similarity index 100%
rename from src/backend/base/langflow/api/disconnect.py
rename to src/packages/base/langflow/api/disconnect.py
diff --git a/src/backend/base/langflow/api/health_check_router.py b/src/packages/base/langflow/api/health_check_router.py
similarity index 100%
rename from src/backend/base/langflow/api/health_check_router.py
rename to src/packages/base/langflow/api/health_check_router.py
diff --git a/src/backend/base/langflow/api/limited_background_tasks.py b/src/packages/base/langflow/api/limited_background_tasks.py
similarity index 100%
rename from src/backend/base/langflow/api/limited_background_tasks.py
rename to src/packages/base/langflow/api/limited_background_tasks.py
diff --git a/src/backend/base/langflow/api/log_router.py b/src/packages/base/langflow/api/log_router.py
similarity index 100%
rename from src/backend/base/langflow/api/log_router.py
rename to src/packages/base/langflow/api/log_router.py
diff --git a/src/backend/base/langflow/api/router.py b/src/packages/base/langflow/api/router.py
similarity index 100%
rename from src/backend/base/langflow/api/router.py
rename to src/packages/base/langflow/api/router.py
diff --git a/src/backend/base/langflow/api/schemas.py b/src/packages/base/langflow/api/schemas.py
similarity index 100%
rename from src/backend/base/langflow/api/schemas.py
rename to src/packages/base/langflow/api/schemas.py
diff --git a/src/backend/base/langflow/api/utils.py b/src/packages/base/langflow/api/utils.py
similarity index 100%
rename from src/backend/base/langflow/api/utils.py
rename to src/packages/base/langflow/api/utils.py
diff --git a/src/backend/base/langflow/api/v1/__init__.py b/src/packages/base/langflow/api/v1/__init__.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/__init__.py
rename to src/packages/base/langflow/api/v1/__init__.py
diff --git a/src/backend/base/langflow/api/v1/api_key.py b/src/packages/base/langflow/api/v1/api_key.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/api_key.py
rename to src/packages/base/langflow/api/v1/api_key.py
diff --git a/src/backend/base/langflow/api/v1/base.py b/src/packages/base/langflow/api/v1/base.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/base.py
rename to src/packages/base/langflow/api/v1/base.py
diff --git a/src/backend/base/langflow/api/v1/callback.py b/src/packages/base/langflow/api/v1/callback.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/callback.py
rename to src/packages/base/langflow/api/v1/callback.py
diff --git a/src/backend/base/langflow/api/v1/chat.py b/src/packages/base/langflow/api/v1/chat.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/chat.py
rename to src/packages/base/langflow/api/v1/chat.py
diff --git a/src/backend/base/langflow/api/v1/endpoints.py b/src/packages/base/langflow/api/v1/endpoints.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/endpoints.py
rename to src/packages/base/langflow/api/v1/endpoints.py
diff --git a/src/backend/base/langflow/api/v1/files.py b/src/packages/base/langflow/api/v1/files.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/files.py
rename to src/packages/base/langflow/api/v1/files.py
diff --git a/src/backend/base/langflow/api/v1/flows.py b/src/packages/base/langflow/api/v1/flows.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/flows.py
rename to src/packages/base/langflow/api/v1/flows.py
diff --git a/src/backend/base/langflow/api/v1/folders.py b/src/packages/base/langflow/api/v1/folders.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/folders.py
rename to src/packages/base/langflow/api/v1/folders.py
diff --git a/src/backend/base/langflow/api/v1/knowledge_bases.py b/src/packages/base/langflow/api/v1/knowledge_bases.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/knowledge_bases.py
rename to src/packages/base/langflow/api/v1/knowledge_bases.py
diff --git a/src/backend/base/langflow/api/v1/login.py b/src/packages/base/langflow/api/v1/login.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/login.py
rename to src/packages/base/langflow/api/v1/login.py
diff --git a/src/backend/base/langflow/api/v1/mcp.py b/src/packages/base/langflow/api/v1/mcp.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/mcp.py
rename to src/packages/base/langflow/api/v1/mcp.py
diff --git a/src/backend/base/langflow/api/v1/mcp_projects.py b/src/packages/base/langflow/api/v1/mcp_projects.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/mcp_projects.py
rename to src/packages/base/langflow/api/v1/mcp_projects.py
diff --git a/src/backend/base/langflow/api/v1/mcp_utils.py b/src/packages/base/langflow/api/v1/mcp_utils.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/mcp_utils.py
rename to src/packages/base/langflow/api/v1/mcp_utils.py
diff --git a/src/backend/base/langflow/api/v1/monitor.py b/src/packages/base/langflow/api/v1/monitor.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/monitor.py
rename to src/packages/base/langflow/api/v1/monitor.py
diff --git a/src/backend/base/langflow/api/v1/openai_responses.py b/src/packages/base/langflow/api/v1/openai_responses.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/openai_responses.py
rename to src/packages/base/langflow/api/v1/openai_responses.py
diff --git a/src/backend/base/langflow/api/v1/projects.py b/src/packages/base/langflow/api/v1/projects.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/projects.py
rename to src/packages/base/langflow/api/v1/projects.py
diff --git a/src/backend/base/langflow/api/v1/schemas.py b/src/packages/base/langflow/api/v1/schemas.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/schemas.py
rename to src/packages/base/langflow/api/v1/schemas.py
diff --git a/src/backend/base/langflow/api/v1/starter_projects.py b/src/packages/base/langflow/api/v1/starter_projects.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/starter_projects.py
rename to src/packages/base/langflow/api/v1/starter_projects.py
diff --git a/src/backend/base/langflow/api/v1/store.py b/src/packages/base/langflow/api/v1/store.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/store.py
rename to src/packages/base/langflow/api/v1/store.py
diff --git a/src/backend/base/langflow/api/v1/users.py b/src/packages/base/langflow/api/v1/users.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/users.py
rename to src/packages/base/langflow/api/v1/users.py
diff --git a/src/backend/base/langflow/api/v1/validate.py b/src/packages/base/langflow/api/v1/validate.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/validate.py
rename to src/packages/base/langflow/api/v1/validate.py
diff --git a/src/backend/base/langflow/api/v1/variable.py b/src/packages/base/langflow/api/v1/variable.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/variable.py
rename to src/packages/base/langflow/api/v1/variable.py
diff --git a/src/backend/base/langflow/api/v1/voice_mode.py b/src/packages/base/langflow/api/v1/voice_mode.py
similarity index 100%
rename from src/backend/base/langflow/api/v1/voice_mode.py
rename to src/packages/base/langflow/api/v1/voice_mode.py
diff --git a/src/backend/base/langflow/api/v2/__init__.py b/src/packages/base/langflow/api/v2/__init__.py
similarity index 100%
rename from src/backend/base/langflow/api/v2/__init__.py
rename to src/packages/base/langflow/api/v2/__init__.py
diff --git a/src/backend/base/langflow/api/v2/files.py b/src/packages/base/langflow/api/v2/files.py
similarity index 100%
rename from src/backend/base/langflow/api/v2/files.py
rename to src/packages/base/langflow/api/v2/files.py
diff --git a/src/backend/base/langflow/api/v2/mcp.py b/src/packages/base/langflow/api/v2/mcp.py
similarity index 100%
rename from src/backend/base/langflow/api/v2/mcp.py
rename to src/packages/base/langflow/api/v2/mcp.py
diff --git a/src/backend/base/langflow/base/__init__.py b/src/packages/base/langflow/base/__init__.py
similarity index 100%
rename from src/backend/base/langflow/base/__init__.py
rename to src/packages/base/langflow/base/__init__.py
diff --git a/src/backend/base/langflow/base/agents/__init__.py b/src/packages/base/langflow/base/agents/__init__.py
similarity index 100%
rename from src/backend/base/langflow/base/agents/__init__.py
rename to src/packages/base/langflow/base/agents/__init__.py
diff --git a/src/backend/base/langflow/base/data/__init__.py b/src/packages/base/langflow/base/data/__init__.py
similarity index 100%
rename from src/backend/base/langflow/base/data/__init__.py
rename to src/packages/base/langflow/base/data/__init__.py
diff --git a/src/backend/base/langflow/base/data/utils.py b/src/packages/base/langflow/base/data/utils.py
similarity index 100%
rename from src/backend/base/langflow/base/data/utils.py
rename to src/packages/base/langflow/base/data/utils.py
diff --git a/src/backend/base/langflow/base/embeddings/__init__.py b/src/packages/base/langflow/base/embeddings/__init__.py
similarity index 100%
rename from src/backend/base/langflow/base/embeddings/__init__.py
rename to src/packages/base/langflow/base/embeddings/__init__.py
diff --git a/src/backend/base/langflow/base/io/__init__.py b/src/packages/base/langflow/base/io/__init__.py
similarity index 100%
rename from src/backend/base/langflow/base/io/__init__.py
rename to src/packages/base/langflow/base/io/__init__.py
diff --git a/src/backend/base/langflow/base/io/chat.py b/src/packages/base/langflow/base/io/chat.py
similarity index 100%
rename from src/backend/base/langflow/base/io/chat.py
rename to src/packages/base/langflow/base/io/chat.py
diff --git a/src/backend/base/langflow/base/io/text.py b/src/packages/base/langflow/base/io/text.py
similarity index 100%
rename from src/backend/base/langflow/base/io/text.py
rename to src/packages/base/langflow/base/io/text.py
diff --git a/src/backend/base/langflow/base/knowledge_bases/__init__.py b/src/packages/base/langflow/base/knowledge_bases/__init__.py
similarity index 100%
rename from src/backend/base/langflow/base/knowledge_bases/__init__.py
rename to src/packages/base/langflow/base/knowledge_bases/__init__.py
diff --git a/src/backend/base/langflow/base/knowledge_bases/knowledge_base_utils.py b/src/packages/base/langflow/base/knowledge_bases/knowledge_base_utils.py
similarity index 100%
rename from src/backend/base/langflow/base/knowledge_bases/knowledge_base_utils.py
rename to src/packages/base/langflow/base/knowledge_bases/knowledge_base_utils.py
diff --git a/src/backend/base/langflow/base/memory/__init__.py b/src/packages/base/langflow/base/memory/__init__.py
similarity index 100%
rename from src/backend/base/langflow/base/memory/__init__.py
rename to src/packages/base/langflow/base/memory/__init__.py
diff --git a/src/backend/base/langflow/base/models/__init__.py b/src/packages/base/langflow/base/models/__init__.py
similarity index 100%
rename from src/backend/base/langflow/base/models/__init__.py
rename to src/packages/base/langflow/base/models/__init__.py
diff --git a/src/backend/base/langflow/base/models/openai_constants.py b/src/packages/base/langflow/base/models/openai_constants.py
similarity index 100%
rename from src/backend/base/langflow/base/models/openai_constants.py
rename to src/packages/base/langflow/base/models/openai_constants.py
diff --git a/src/backend/base/langflow/base/prompts/__init__.py b/src/packages/base/langflow/base/prompts/__init__.py
similarity index 100%
rename from src/backend/base/langflow/base/prompts/__init__.py
rename to src/packages/base/langflow/base/prompts/__init__.py
diff --git a/src/backend/base/langflow/base/prompts/api_utils.py b/src/packages/base/langflow/base/prompts/api_utils.py
similarity index 100%
rename from src/backend/base/langflow/base/prompts/api_utils.py
rename to src/packages/base/langflow/base/prompts/api_utils.py
diff --git a/src/backend/base/langflow/base/textsplitters/__init__.py b/src/packages/base/langflow/base/textsplitters/__init__.py
similarity index 100%
rename from src/backend/base/langflow/base/textsplitters/__init__.py
rename to src/packages/base/langflow/base/textsplitters/__init__.py
diff --git a/src/backend/base/langflow/base/tools/__init__.py b/src/packages/base/langflow/base/tools/__init__.py
similarity index 100%
rename from src/backend/base/langflow/base/tools/__init__.py
rename to src/packages/base/langflow/base/tools/__init__.py
diff --git a/src/backend/base/langflow/base/vectorstores/__init__.py b/src/packages/base/langflow/base/vectorstores/__init__.py
similarity index 100%
rename from src/backend/base/langflow/base/vectorstores/__init__.py
rename to src/packages/base/langflow/base/vectorstores/__init__.py
diff --git a/src/backend/base/langflow/cli/__init__.py b/src/packages/base/langflow/cli/__init__.py
similarity index 100%
rename from src/backend/base/langflow/cli/__init__.py
rename to src/packages/base/langflow/cli/__init__.py
diff --git a/src/backend/base/langflow/cli/progress.py b/src/packages/base/langflow/cli/progress.py
similarity index 100%
rename from src/backend/base/langflow/cli/progress.py
rename to src/packages/base/langflow/cli/progress.py
diff --git a/src/backend/base/langflow/components/__init__.py b/src/packages/base/langflow/components/__init__.py
similarity index 100%
rename from src/backend/base/langflow/components/__init__.py
rename to src/packages/base/langflow/components/__init__.py
diff --git a/src/backend/base/langflow/components/agents.py b/src/packages/base/langflow/components/agents.py
similarity index 100%
rename from src/backend/base/langflow/components/agents.py
rename to src/packages/base/langflow/components/agents.py
diff --git a/src/backend/base/langflow/components/anthropic.py b/src/packages/base/langflow/components/anthropic.py
similarity index 100%
rename from src/backend/base/langflow/components/anthropic.py
rename to src/packages/base/langflow/components/anthropic.py
diff --git a/src/backend/base/langflow/components/data.py b/src/packages/base/langflow/components/data.py
similarity index 100%
rename from src/backend/base/langflow/components/data.py
rename to src/packages/base/langflow/components/data.py
diff --git a/src/backend/base/langflow/components/helpers.py b/src/packages/base/langflow/components/helpers.py
similarity index 100%
rename from src/backend/base/langflow/components/helpers.py
rename to src/packages/base/langflow/components/helpers.py
diff --git a/src/backend/base/langflow/components/knowledge_bases/__init__.py b/src/packages/base/langflow/components/knowledge_bases/__init__.py
similarity index 100%
rename from src/backend/base/langflow/components/knowledge_bases/__init__.py
rename to src/packages/base/langflow/components/knowledge_bases/__init__.py
diff --git a/src/backend/base/langflow/components/knowledge_bases/ingestion.py b/src/packages/base/langflow/components/knowledge_bases/ingestion.py
similarity index 100%
rename from src/backend/base/langflow/components/knowledge_bases/ingestion.py
rename to src/packages/base/langflow/components/knowledge_bases/ingestion.py
diff --git a/src/backend/base/langflow/components/knowledge_bases/retrieval.py b/src/packages/base/langflow/components/knowledge_bases/retrieval.py
similarity index 100%
rename from src/backend/base/langflow/components/knowledge_bases/retrieval.py
rename to src/packages/base/langflow/components/knowledge_bases/retrieval.py
diff --git a/src/backend/base/langflow/components/openai.py b/src/packages/base/langflow/components/openai.py
similarity index 100%
rename from src/backend/base/langflow/components/openai.py
rename to src/packages/base/langflow/components/openai.py
diff --git a/src/backend/base/langflow/components/processing/__init__.py b/src/packages/base/langflow/components/processing/__init__.py
similarity index 100%
rename from src/backend/base/langflow/components/processing/__init__.py
rename to src/packages/base/langflow/components/processing/__init__.py
diff --git a/src/backend/base/langflow/components/processing/converter.py b/src/packages/base/langflow/components/processing/converter.py
similarity index 100%
rename from src/backend/base/langflow/components/processing/converter.py
rename to src/packages/base/langflow/components/processing/converter.py
diff --git a/src/backend/base/langflow/core/__init__.py b/src/packages/base/langflow/core/__init__.py
similarity index 100%
rename from src/backend/base/langflow/core/__init__.py
rename to src/packages/base/langflow/core/__init__.py
diff --git a/src/backend/base/langflow/core/celery_app.py b/src/packages/base/langflow/core/celery_app.py
similarity index 100%
rename from src/backend/base/langflow/core/celery_app.py
rename to src/packages/base/langflow/core/celery_app.py
diff --git a/src/backend/base/langflow/core/celeryconfig.py b/src/packages/base/langflow/core/celeryconfig.py
similarity index 100%
rename from src/backend/base/langflow/core/celeryconfig.py
rename to src/packages/base/langflow/core/celeryconfig.py
diff --git a/src/backend/base/langflow/custom/__init__.py b/src/packages/base/langflow/custom/__init__.py
similarity index 100%
rename from src/backend/base/langflow/custom/__init__.py
rename to src/packages/base/langflow/custom/__init__.py
diff --git a/src/backend/base/langflow/custom/custom_component/__init__.py b/src/packages/base/langflow/custom/custom_component/__init__.py
similarity index 100%
rename from src/backend/base/langflow/custom/custom_component/__init__.py
rename to src/packages/base/langflow/custom/custom_component/__init__.py
diff --git a/src/backend/base/langflow/custom/custom_component/component.py b/src/packages/base/langflow/custom/custom_component/component.py
similarity index 100%
rename from src/backend/base/langflow/custom/custom_component/component.py
rename to src/packages/base/langflow/custom/custom_component/component.py
diff --git a/src/backend/base/langflow/custom/custom_component/component_with_cache.py b/src/packages/base/langflow/custom/custom_component/component_with_cache.py
similarity index 100%
rename from src/backend/base/langflow/custom/custom_component/component_with_cache.py
rename to src/packages/base/langflow/custom/custom_component/component_with_cache.py
diff --git a/src/backend/base/langflow/custom/custom_component/custom_component.py b/src/packages/base/langflow/custom/custom_component/custom_component.py
similarity index 100%
rename from src/backend/base/langflow/custom/custom_component/custom_component.py
rename to src/packages/base/langflow/custom/custom_component/custom_component.py
diff --git a/src/backend/base/langflow/custom/utils.py b/src/packages/base/langflow/custom/utils.py
similarity index 100%
rename from src/backend/base/langflow/custom/utils.py
rename to src/packages/base/langflow/custom/utils.py
diff --git a/src/backend/base/langflow/custom/validate.py b/src/packages/base/langflow/custom/validate.py
similarity index 100%
rename from src/backend/base/langflow/custom/validate.py
rename to src/packages/base/langflow/custom/validate.py
diff --git a/src/backend/base/langflow/events/__init__.py b/src/packages/base/langflow/events/__init__.py
similarity index 100%
rename from src/backend/base/langflow/events/__init__.py
rename to src/packages/base/langflow/events/__init__.py
diff --git a/src/backend/base/langflow/events/event_manager.py b/src/packages/base/langflow/events/event_manager.py
similarity index 100%
rename from src/backend/base/langflow/events/event_manager.py
rename to src/packages/base/langflow/events/event_manager.py
diff --git a/src/backend/base/langflow/exceptions/__init__.py b/src/packages/base/langflow/exceptions/__init__.py
similarity index 100%
rename from src/backend/base/langflow/exceptions/__init__.py
rename to src/packages/base/langflow/exceptions/__init__.py
diff --git a/src/backend/base/langflow/exceptions/api.py b/src/packages/base/langflow/exceptions/api.py
similarity index 100%
rename from src/backend/base/langflow/exceptions/api.py
rename to src/packages/base/langflow/exceptions/api.py
diff --git a/src/backend/base/langflow/exceptions/component.py b/src/packages/base/langflow/exceptions/component.py
similarity index 100%
rename from src/backend/base/langflow/exceptions/component.py
rename to src/packages/base/langflow/exceptions/component.py
diff --git a/src/backend/base/langflow/exceptions/serialization.py b/src/packages/base/langflow/exceptions/serialization.py
similarity index 100%
rename from src/backend/base/langflow/exceptions/serialization.py
rename to src/packages/base/langflow/exceptions/serialization.py
diff --git a/src/backend/base/langflow/field_typing/__init__.py b/src/packages/base/langflow/field_typing/__init__.py
similarity index 100%
rename from src/backend/base/langflow/field_typing/__init__.py
rename to src/packages/base/langflow/field_typing/__init__.py
diff --git a/src/backend/base/langflow/field_typing/constants.py b/src/packages/base/langflow/field_typing/constants.py
similarity index 100%
rename from src/backend/base/langflow/field_typing/constants.py
rename to src/packages/base/langflow/field_typing/constants.py
diff --git a/src/backend/base/langflow/field_typing/range_spec.py b/src/packages/base/langflow/field_typing/range_spec.py
similarity index 100%
rename from src/backend/base/langflow/field_typing/range_spec.py
rename to src/packages/base/langflow/field_typing/range_spec.py
diff --git a/src/backend/base/langflow/graph/__init__.py b/src/packages/base/langflow/graph/__init__.py
similarity index 100%
rename from src/backend/base/langflow/graph/__init__.py
rename to src/packages/base/langflow/graph/__init__.py
diff --git a/src/backend/base/langflow/helpers/__init__.py b/src/packages/base/langflow/helpers/__init__.py
similarity index 100%
rename from src/backend/base/langflow/helpers/__init__.py
rename to src/packages/base/langflow/helpers/__init__.py
diff --git a/src/backend/base/langflow/helpers/base_model.py b/src/packages/base/langflow/helpers/base_model.py
similarity index 100%
rename from src/backend/base/langflow/helpers/base_model.py
rename to src/packages/base/langflow/helpers/base_model.py
diff --git a/src/backend/base/langflow/helpers/custom.py b/src/packages/base/langflow/helpers/custom.py
similarity index 100%
rename from src/backend/base/langflow/helpers/custom.py
rename to src/packages/base/langflow/helpers/custom.py
diff --git a/src/backend/base/langflow/helpers/data.py b/src/packages/base/langflow/helpers/data.py
similarity index 100%
rename from src/backend/base/langflow/helpers/data.py
rename to src/packages/base/langflow/helpers/data.py
diff --git a/src/backend/base/langflow/helpers/flow.py b/src/packages/base/langflow/helpers/flow.py
similarity index 100%
rename from src/backend/base/langflow/helpers/flow.py
rename to src/packages/base/langflow/helpers/flow.py
diff --git a/src/backend/base/langflow/helpers/folders.py b/src/packages/base/langflow/helpers/folders.py
similarity index 100%
rename from src/backend/base/langflow/helpers/folders.py
rename to src/packages/base/langflow/helpers/folders.py
diff --git a/src/backend/base/langflow/helpers/user.py b/src/packages/base/langflow/helpers/user.py
similarity index 100%
rename from src/backend/base/langflow/helpers/user.py
rename to src/packages/base/langflow/helpers/user.py
diff --git a/src/backend/base/langflow/initial_setup/__init__.py b/src/packages/base/langflow/initial_setup/__init__.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/__init__.py
rename to src/packages/base/langflow/initial_setup/__init__.py
diff --git a/src/backend/base/langflow/initial_setup/constants.py b/src/packages/base/langflow/initial_setup/constants.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/constants.py
rename to src/packages/base/langflow/initial_setup/constants.py
diff --git a/src/backend/base/langflow/initial_setup/load.py b/src/packages/base/langflow/initial_setup/load.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/load.py
rename to src/packages/base/langflow/initial_setup/load.py
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-01.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-01.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-01.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-01.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-02.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-02.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-02.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-02.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-03.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-03.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-03.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-03.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-04.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-04.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-04.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-04.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-05.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-05.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-05.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-05.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-06.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-06.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-06.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-06.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-07.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-07.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-07.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-07.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-08.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-08.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-08.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-08.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-09.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-09.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-09.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-09.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-10.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-10.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-10.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-10.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-11.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-11.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-11.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-11.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-12.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-12.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-12.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-12.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-13.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-13.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-13.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-13.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-14.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-14.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-14.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-14.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-15.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-15.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-15.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-15.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-16.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-16.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-16.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-16.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-17.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-17.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-17.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-17.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-18.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-18.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-18.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-18.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-19.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-19.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-19.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-19.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-20.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-20.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-20.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-20.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-21.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-21.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-21.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-21.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-22.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-22.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-22.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-22.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-23.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-23.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-23.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-23.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-24.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-24.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-24.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-24.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-25.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-25.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-25.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-25.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-26.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-26.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-26.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-26.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-27.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-27.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-27.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-27.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-01.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-01.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-01.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-01.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-02.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-02.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-02.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-02.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-03.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-03.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-03.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-03.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-04.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-04.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-04.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-04.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-05.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-05.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-05.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-05.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-06.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-06.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-06.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-06.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-07.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-07.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-07.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-07.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-08.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-08.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-08.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-08.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-09.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-09.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-09.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-09.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-10.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-10.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-10.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-10.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-11.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-11.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-11.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-11.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-12.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-12.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-12.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-12.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-13.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-13.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-13.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-13.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-14.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-14.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-14.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-14.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-15.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-15.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-15.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-15.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-16.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-16.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-16.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-16.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-17.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-17.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-17.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-17.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-18.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-18.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-18.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-18.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-19.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-19.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-19.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-19.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-20.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-20.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-20.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-20.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-21.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-21.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-21.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-21.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-22.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-22.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-22.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-22.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-23.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-23.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-23.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-23.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-24.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-24.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-24.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-24.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-25.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-25.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-25.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-25.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-26.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-26.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-26.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-26.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-27.svg b/src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-27.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-27.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-27.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/026-alien.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/026-alien.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/026-alien.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/026-alien.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/027-satellite.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/027-satellite.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/027-satellite.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/027-satellite.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/028-alien.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/028-alien.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/028-alien.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/028-alien.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/029-telescope.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/029-telescope.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/029-telescope.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/029-telescope.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/030-books.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/030-books.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/030-books.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/030-books.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/031-planet.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/031-planet.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/031-planet.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/031-planet.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/032-constellation.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/032-constellation.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/032-constellation.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/032-constellation.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/033-planet.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/033-planet.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/033-planet.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/033-planet.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/034-alien.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/034-alien.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/034-alien.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/034-alien.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/035-globe.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/035-globe.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/035-globe.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/035-globe.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/036-eclipse.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/036-eclipse.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/036-eclipse.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/036-eclipse.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/037-meteor.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/037-meteor.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/037-meteor.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/037-meteor.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/038-eclipse.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/038-eclipse.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/038-eclipse.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/038-eclipse.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/039-Asteroid.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/039-Asteroid.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/039-Asteroid.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/039-Asteroid.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/040-mission.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/040-mission.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/040-mission.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/040-mission.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/041-spaceship.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/041-spaceship.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/041-spaceship.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/041-spaceship.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/042-space shuttle.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/042-space shuttle.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/042-space shuttle.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/042-space shuttle.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/043-space shuttle.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/043-space shuttle.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/043-space shuttle.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/043-space shuttle.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/044-rocket.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/044-rocket.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/044-rocket.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/044-rocket.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/045-astronaut.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/045-astronaut.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/045-astronaut.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/045-astronaut.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/046-rocket.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/046-rocket.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/046-rocket.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/046-rocket.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/047-computer.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/047-computer.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/047-computer.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/047-computer.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/048-satellite.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/048-satellite.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/048-satellite.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/048-satellite.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/049-astronaut.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/049-astronaut.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/049-astronaut.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/049-astronaut.svg
diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/050-space robot.svg b/src/packages/base/langflow/initial_setup/profile_pictures/Space/050-space robot.svg
similarity index 100%
rename from src/backend/base/langflow/initial_setup/profile_pictures/Space/050-space robot.svg
rename to src/packages/base/langflow/initial_setup/profile_pictures/Space/050-space robot.svg
diff --git a/src/backend/base/langflow/initial_setup/setup.py b/src/packages/base/langflow/initial_setup/setup.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/setup.py
rename to src/packages/base/langflow/initial_setup/setup.py
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json b/src/packages/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json b/src/packages/base/langflow/initial_setup/starter_projects/Basic Prompting.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Basic Prompting.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json b/src/packages/base/langflow/initial_setup/starter_projects/Blog Writer.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Blog Writer.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Generator.json b/src/packages/base/langflow/initial_setup/starter_projects/Custom Component Generator.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Custom Component Generator.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Custom Component Generator.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json b/src/packages/base/langflow/initial_setup/starter_projects/Document Q&A.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Document Q&A.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Financial Report Parser.json b/src/packages/base/langflow/initial_setup/starter_projects/Financial Report Parser.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Financial Report Parser.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Financial Report Parser.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Hybrid Search RAG.json b/src/packages/base/langflow/initial_setup/starter_projects/Hybrid Search RAG.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Hybrid Search RAG.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Hybrid Search RAG.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Image Sentiment Analysis.json b/src/packages/base/langflow/initial_setup/starter_projects/Image Sentiment Analysis.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Image Sentiment Analysis.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Image Sentiment Analysis.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json b/src/packages/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Invoice Summarizer.json b/src/packages/base/langflow/initial_setup/starter_projects/Invoice Summarizer.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Invoice Summarizer.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Invoice Summarizer.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Knowledge Ingestion.json b/src/packages/base/langflow/initial_setup/starter_projects/Knowledge Ingestion.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Knowledge Ingestion.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Knowledge Ingestion.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Knowledge Retrieval.json b/src/packages/base/langflow/initial_setup/starter_projects/Knowledge Retrieval.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Knowledge Retrieval.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Knowledge Retrieval.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json b/src/packages/base/langflow/initial_setup/starter_projects/Market Research.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Market Research.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Market Research.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Meeting Summary.json b/src/packages/base/langflow/initial_setup/starter_projects/Meeting Summary.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Meeting Summary.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Meeting Summary.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Memory Chatbot.json b/src/packages/base/langflow/initial_setup/starter_projects/Memory Chatbot.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Memory Chatbot.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Memory Chatbot.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/News Aggregator.json b/src/packages/base/langflow/initial_setup/starter_projects/News Aggregator.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/News Aggregator.json
rename to src/packages/base/langflow/initial_setup/starter_projects/News Aggregator.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Nvidia Remix.json b/src/packages/base/langflow/initial_setup/starter_projects/Nvidia Remix.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Nvidia Remix.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Nvidia Remix.json
diff --git "a/src/backend/base/langflow/initial_setup/starter_projects/Pok\303\251dex Agent.json" "b/src/packages/base/langflow/initial_setup/starter_projects/Pok\303\251dex Agent.json"
similarity index 100%
rename from "src/backend/base/langflow/initial_setup/starter_projects/Pok\303\251dex Agent.json"
rename to "src/packages/base/langflow/initial_setup/starter_projects/Pok\303\251dex Agent.json"
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json b/src/packages/base/langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Price Deal Finder.json b/src/packages/base/langflow/initial_setup/starter_projects/Price Deal Finder.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Price Deal Finder.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Price Deal Finder.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json b/src/packages/base/langflow/initial_setup/starter_projects/Research Agent.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Research Agent.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Research Translation Loop.json b/src/packages/base/langflow/initial_setup/starter_projects/Research Translation Loop.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Research Translation Loop.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Research Translation Loop.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/SEO Keyword Generator.json b/src/packages/base/langflow/initial_setup/starter_projects/SEO Keyword Generator.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/SEO Keyword Generator.json
rename to src/packages/base/langflow/initial_setup/starter_projects/SEO Keyword Generator.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json b/src/packages/base/langflow/initial_setup/starter_projects/SaaS Pricing.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json
rename to src/packages/base/langflow/initial_setup/starter_projects/SaaS Pricing.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Search agent.json b/src/packages/base/langflow/initial_setup/starter_projects/Search agent.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Search agent.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Search agent.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents.json b/src/packages/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json b/src/packages/base/langflow/initial_setup/starter_projects/Simple Agent.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Simple Agent.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Social Media Agent.json b/src/packages/base/langflow/initial_setup/starter_projects/Social Media Agent.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Social Media Agent.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Social Media Agent.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Text Sentiment Analysis.json b/src/packages/base/langflow/initial_setup/starter_projects/Text Sentiment Analysis.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Text Sentiment Analysis.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Text Sentiment Analysis.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json b/src/packages/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Twitter Thread Generator.json b/src/packages/base/langflow/initial_setup/starter_projects/Twitter Thread Generator.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Twitter Thread Generator.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Twitter Thread Generator.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json b/src/packages/base/langflow/initial_setup/starter_projects/Vector Store RAG.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Vector Store RAG.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Youtube Analysis.json b/src/packages/base/langflow/initial_setup/starter_projects/Youtube Analysis.json
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/Youtube Analysis.json
rename to src/packages/base/langflow/initial_setup/starter_projects/Youtube Analysis.json
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/__init__.py b/src/packages/base/langflow/initial_setup/starter_projects/__init__.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/__init__.py
rename to src/packages/base/langflow/initial_setup/starter_projects/__init__.py
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/basic_prompting.py b/src/packages/base/langflow/initial_setup/starter_projects/basic_prompting.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/basic_prompting.py
rename to src/packages/base/langflow/initial_setup/starter_projects/basic_prompting.py
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/blog_writer.py b/src/packages/base/langflow/initial_setup/starter_projects/blog_writer.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/blog_writer.py
rename to src/packages/base/langflow/initial_setup/starter_projects/blog_writer.py
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/complex_agent.py b/src/packages/base/langflow/initial_setup/starter_projects/complex_agent.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/complex_agent.py
rename to src/packages/base/langflow/initial_setup/starter_projects/complex_agent.py
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/document_qa.py b/src/packages/base/langflow/initial_setup/starter_projects/document_qa.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/document_qa.py
rename to src/packages/base/langflow/initial_setup/starter_projects/document_qa.py
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/hierarchical_tasks_agent.py b/src/packages/base/langflow/initial_setup/starter_projects/hierarchical_tasks_agent.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/hierarchical_tasks_agent.py
rename to src/packages/base/langflow/initial_setup/starter_projects/hierarchical_tasks_agent.py
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/memory_chatbot.py b/src/packages/base/langflow/initial_setup/starter_projects/memory_chatbot.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/memory_chatbot.py
rename to src/packages/base/langflow/initial_setup/starter_projects/memory_chatbot.py
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/sequential_tasks_agent.py b/src/packages/base/langflow/initial_setup/starter_projects/sequential_tasks_agent.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/sequential_tasks_agent.py
rename to src/packages/base/langflow/initial_setup/starter_projects/sequential_tasks_agent.py
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/vector_store_rag.py b/src/packages/base/langflow/initial_setup/starter_projects/vector_store_rag.py
similarity index 100%
rename from src/backend/base/langflow/initial_setup/starter_projects/vector_store_rag.py
rename to src/packages/base/langflow/initial_setup/starter_projects/vector_store_rag.py
diff --git a/src/backend/base/langflow/inputs/__init__.py b/src/packages/base/langflow/inputs/__init__.py
similarity index 100%
rename from src/backend/base/langflow/inputs/__init__.py
rename to src/packages/base/langflow/inputs/__init__.py
diff --git a/src/backend/base/langflow/inputs/constants.py b/src/packages/base/langflow/inputs/constants.py
similarity index 100%
rename from src/backend/base/langflow/inputs/constants.py
rename to src/packages/base/langflow/inputs/constants.py
diff --git a/src/backend/base/langflow/inputs/input_mixin.py b/src/packages/base/langflow/inputs/input_mixin.py
similarity index 100%
rename from src/backend/base/langflow/inputs/input_mixin.py
rename to src/packages/base/langflow/inputs/input_mixin.py
diff --git a/src/backend/base/langflow/inputs/inputs.py b/src/packages/base/langflow/inputs/inputs.py
similarity index 100%
rename from src/backend/base/langflow/inputs/inputs.py
rename to src/packages/base/langflow/inputs/inputs.py
diff --git a/src/backend/base/langflow/inputs/utils.py b/src/packages/base/langflow/inputs/utils.py
similarity index 100%
rename from src/backend/base/langflow/inputs/utils.py
rename to src/packages/base/langflow/inputs/utils.py
diff --git a/src/backend/base/langflow/inputs/validators.py b/src/packages/base/langflow/inputs/validators.py
similarity index 100%
rename from src/backend/base/langflow/inputs/validators.py
rename to src/packages/base/langflow/inputs/validators.py
diff --git a/src/backend/base/langflow/interface/__init__.py b/src/packages/base/langflow/interface/__init__.py
similarity index 100%
rename from src/backend/base/langflow/interface/__init__.py
rename to src/packages/base/langflow/interface/__init__.py
diff --git a/src/backend/base/langflow/interface/components.py b/src/packages/base/langflow/interface/components.py
similarity index 100%
rename from src/backend/base/langflow/interface/components.py
rename to src/packages/base/langflow/interface/components.py
diff --git a/src/backend/base/langflow/interface/importing/__init__.py b/src/packages/base/langflow/interface/importing/__init__.py
similarity index 100%
rename from src/backend/base/langflow/interface/importing/__init__.py
rename to src/packages/base/langflow/interface/importing/__init__.py
diff --git a/src/backend/base/langflow/interface/importing/utils.py b/src/packages/base/langflow/interface/importing/utils.py
similarity index 100%
rename from src/backend/base/langflow/interface/importing/utils.py
rename to src/packages/base/langflow/interface/importing/utils.py
diff --git a/src/backend/base/langflow/interface/initialize/__init__.py b/src/packages/base/langflow/interface/initialize/__init__.py
similarity index 100%
rename from src/backend/base/langflow/interface/initialize/__init__.py
rename to src/packages/base/langflow/interface/initialize/__init__.py
diff --git a/src/backend/base/langflow/interface/initialize/loading.py b/src/packages/base/langflow/interface/initialize/loading.py
similarity index 100%
rename from src/backend/base/langflow/interface/initialize/loading.py
rename to src/packages/base/langflow/interface/initialize/loading.py
diff --git a/src/backend/base/langflow/interface/listing.py b/src/packages/base/langflow/interface/listing.py
similarity index 100%
rename from src/backend/base/langflow/interface/listing.py
rename to src/packages/base/langflow/interface/listing.py
diff --git a/src/backend/base/langflow/interface/run.py b/src/packages/base/langflow/interface/run.py
similarity index 100%
rename from src/backend/base/langflow/interface/run.py
rename to src/packages/base/langflow/interface/run.py
diff --git a/src/backend/base/langflow/interface/utils.py b/src/packages/base/langflow/interface/utils.py
similarity index 100%
rename from src/backend/base/langflow/interface/utils.py
rename to src/packages/base/langflow/interface/utils.py
diff --git a/src/backend/base/langflow/io/__init__.py b/src/packages/base/langflow/io/__init__.py
similarity index 100%
rename from src/backend/base/langflow/io/__init__.py
rename to src/packages/base/langflow/io/__init__.py
diff --git a/src/backend/base/langflow/io/schema.py b/src/packages/base/langflow/io/schema.py
similarity index 100%
rename from src/backend/base/langflow/io/schema.py
rename to src/packages/base/langflow/io/schema.py
diff --git a/src/backend/base/langflow/langflow_launcher.py b/src/packages/base/langflow/langflow_launcher.py
similarity index 100%
rename from src/backend/base/langflow/langflow_launcher.py
rename to src/packages/base/langflow/langflow_launcher.py
diff --git a/src/backend/base/langflow/load/__init__.py b/src/packages/base/langflow/load/__init__.py
similarity index 100%
rename from src/backend/base/langflow/load/__init__.py
rename to src/packages/base/langflow/load/__init__.py
diff --git a/src/backend/base/langflow/load/utils.py b/src/packages/base/langflow/load/utils.py
similarity index 100%
rename from src/backend/base/langflow/load/utils.py
rename to src/packages/base/langflow/load/utils.py
diff --git a/src/backend/base/langflow/logging/__init__.py b/src/packages/base/langflow/logging/__init__.py
similarity index 100%
rename from src/backend/base/langflow/logging/__init__.py
rename to src/packages/base/langflow/logging/__init__.py
diff --git a/src/backend/base/langflow/logging/setup.py b/src/packages/base/langflow/logging/setup.py
similarity index 100%
rename from src/backend/base/langflow/logging/setup.py
rename to src/packages/base/langflow/logging/setup.py
diff --git a/src/backend/base/langflow/main.py b/src/packages/base/langflow/main.py
similarity index 100%
rename from src/backend/base/langflow/main.py
rename to src/packages/base/langflow/main.py
diff --git a/src/backend/base/langflow/memory.py b/src/packages/base/langflow/memory.py
similarity index 100%
rename from src/backend/base/langflow/memory.py
rename to src/packages/base/langflow/memory.py
diff --git a/src/backend/base/langflow/middleware.py b/src/packages/base/langflow/middleware.py
similarity index 100%
rename from src/backend/base/langflow/middleware.py
rename to src/packages/base/langflow/middleware.py
diff --git a/src/backend/base/langflow/processing/__init__.py b/src/packages/base/langflow/processing/__init__.py
similarity index 100%
rename from src/backend/base/langflow/processing/__init__.py
rename to src/packages/base/langflow/processing/__init__.py
diff --git a/src/backend/base/langflow/processing/process.py b/src/packages/base/langflow/processing/process.py
similarity index 100%
rename from src/backend/base/langflow/processing/process.py
rename to src/packages/base/langflow/processing/process.py
diff --git a/src/backend/base/langflow/py.typed b/src/packages/base/langflow/py.typed
similarity index 100%
rename from src/backend/base/langflow/py.typed
rename to src/packages/base/langflow/py.typed
diff --git a/src/backend/base/langflow/schema/__init__.py b/src/packages/base/langflow/schema/__init__.py
similarity index 100%
rename from src/backend/base/langflow/schema/__init__.py
rename to src/packages/base/langflow/schema/__init__.py
diff --git a/src/backend/base/langflow/schema/artifact.py b/src/packages/base/langflow/schema/artifact.py
similarity index 100%
rename from src/backend/base/langflow/schema/artifact.py
rename to src/packages/base/langflow/schema/artifact.py
diff --git a/src/backend/base/langflow/schema/content_block.py b/src/packages/base/langflow/schema/content_block.py
similarity index 100%
rename from src/backend/base/langflow/schema/content_block.py
rename to src/packages/base/langflow/schema/content_block.py
diff --git a/src/backend/base/langflow/schema/content_types.py b/src/packages/base/langflow/schema/content_types.py
similarity index 100%
rename from src/backend/base/langflow/schema/content_types.py
rename to src/packages/base/langflow/schema/content_types.py
diff --git a/src/backend/base/langflow/schema/data.py b/src/packages/base/langflow/schema/data.py
similarity index 100%
rename from src/backend/base/langflow/schema/data.py
rename to src/packages/base/langflow/schema/data.py
diff --git a/src/backend/base/langflow/schema/dataframe.py b/src/packages/base/langflow/schema/dataframe.py
similarity index 100%
rename from src/backend/base/langflow/schema/dataframe.py
rename to src/packages/base/langflow/schema/dataframe.py
diff --git a/src/backend/base/langflow/schema/dotdict.py b/src/packages/base/langflow/schema/dotdict.py
similarity index 100%
rename from src/backend/base/langflow/schema/dotdict.py
rename to src/packages/base/langflow/schema/dotdict.py
diff --git a/src/backend/base/langflow/schema/encoders.py b/src/packages/base/langflow/schema/encoders.py
similarity index 100%
rename from src/backend/base/langflow/schema/encoders.py
rename to src/packages/base/langflow/schema/encoders.py
diff --git a/src/backend/base/langflow/schema/graph.py b/src/packages/base/langflow/schema/graph.py
similarity index 100%
rename from src/backend/base/langflow/schema/graph.py
rename to src/packages/base/langflow/schema/graph.py
diff --git a/src/backend/base/langflow/schema/image.py b/src/packages/base/langflow/schema/image.py
similarity index 100%
rename from src/backend/base/langflow/schema/image.py
rename to src/packages/base/langflow/schema/image.py
diff --git a/src/backend/base/langflow/schema/log.py b/src/packages/base/langflow/schema/log.py
similarity index 100%
rename from src/backend/base/langflow/schema/log.py
rename to src/packages/base/langflow/schema/log.py
diff --git a/src/backend/base/langflow/schema/message.py b/src/packages/base/langflow/schema/message.py
similarity index 100%
rename from src/backend/base/langflow/schema/message.py
rename to src/packages/base/langflow/schema/message.py
diff --git a/src/backend/base/langflow/schema/playground_events.py b/src/packages/base/langflow/schema/playground_events.py
similarity index 100%
rename from src/backend/base/langflow/schema/playground_events.py
rename to src/packages/base/langflow/schema/playground_events.py
diff --git a/src/backend/base/langflow/schema/properties.py b/src/packages/base/langflow/schema/properties.py
similarity index 100%
rename from src/backend/base/langflow/schema/properties.py
rename to src/packages/base/langflow/schema/properties.py
diff --git a/src/backend/base/langflow/schema/schema.py b/src/packages/base/langflow/schema/schema.py
similarity index 100%
rename from src/backend/base/langflow/schema/schema.py
rename to src/packages/base/langflow/schema/schema.py
diff --git a/src/backend/base/langflow/schema/table.py b/src/packages/base/langflow/schema/table.py
similarity index 100%
rename from src/backend/base/langflow/schema/table.py
rename to src/packages/base/langflow/schema/table.py
diff --git a/src/backend/base/langflow/schema/validators.py b/src/packages/base/langflow/schema/validators.py
similarity index 100%
rename from src/backend/base/langflow/schema/validators.py
rename to src/packages/base/langflow/schema/validators.py
diff --git a/src/backend/base/langflow/serialization/__init__.py b/src/packages/base/langflow/serialization/__init__.py
similarity index 100%
rename from src/backend/base/langflow/serialization/__init__.py
rename to src/packages/base/langflow/serialization/__init__.py
diff --git a/src/backend/base/langflow/serialization/constants.py b/src/packages/base/langflow/serialization/constants.py
similarity index 100%
rename from src/backend/base/langflow/serialization/constants.py
rename to src/packages/base/langflow/serialization/constants.py
diff --git a/src/backend/base/langflow/serialization/serialization.py b/src/packages/base/langflow/serialization/serialization.py
similarity index 100%
rename from src/backend/base/langflow/serialization/serialization.py
rename to src/packages/base/langflow/serialization/serialization.py
diff --git a/src/backend/base/langflow/server.py b/src/packages/base/langflow/server.py
similarity index 100%
rename from src/backend/base/langflow/server.py
rename to src/packages/base/langflow/server.py
diff --git a/src/backend/base/langflow/services/__init__.py b/src/packages/base/langflow/services/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/__init__.py
rename to src/packages/base/langflow/services/__init__.py
diff --git a/src/backend/base/langflow/services/auth/__init__.py b/src/packages/base/langflow/services/auth/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/auth/__init__.py
rename to src/packages/base/langflow/services/auth/__init__.py
diff --git a/src/backend/base/langflow/services/auth/factory.py b/src/packages/base/langflow/services/auth/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/auth/factory.py
rename to src/packages/base/langflow/services/auth/factory.py
diff --git a/src/backend/base/langflow/services/auth/mcp_encryption.py b/src/packages/base/langflow/services/auth/mcp_encryption.py
similarity index 100%
rename from src/backend/base/langflow/services/auth/mcp_encryption.py
rename to src/packages/base/langflow/services/auth/mcp_encryption.py
diff --git a/src/backend/base/langflow/services/auth/service.py b/src/packages/base/langflow/services/auth/service.py
similarity index 100%
rename from src/backend/base/langflow/services/auth/service.py
rename to src/packages/base/langflow/services/auth/service.py
diff --git a/src/backend/base/langflow/services/auth/utils.py b/src/packages/base/langflow/services/auth/utils.py
similarity index 100%
rename from src/backend/base/langflow/services/auth/utils.py
rename to src/packages/base/langflow/services/auth/utils.py
diff --git a/src/backend/base/langflow/services/base.py b/src/packages/base/langflow/services/base.py
similarity index 100%
rename from src/backend/base/langflow/services/base.py
rename to src/packages/base/langflow/services/base.py
diff --git a/src/backend/base/langflow/services/cache/__init__.py b/src/packages/base/langflow/services/cache/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/cache/__init__.py
rename to src/packages/base/langflow/services/cache/__init__.py
diff --git a/src/backend/base/langflow/services/cache/base.py b/src/packages/base/langflow/services/cache/base.py
similarity index 100%
rename from src/backend/base/langflow/services/cache/base.py
rename to src/packages/base/langflow/services/cache/base.py
diff --git a/src/backend/base/langflow/services/cache/disk.py b/src/packages/base/langflow/services/cache/disk.py
similarity index 100%
rename from src/backend/base/langflow/services/cache/disk.py
rename to src/packages/base/langflow/services/cache/disk.py
diff --git a/src/backend/base/langflow/services/cache/factory.py b/src/packages/base/langflow/services/cache/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/cache/factory.py
rename to src/packages/base/langflow/services/cache/factory.py
diff --git a/src/backend/base/langflow/services/cache/service.py b/src/packages/base/langflow/services/cache/service.py
similarity index 100%
rename from src/backend/base/langflow/services/cache/service.py
rename to src/packages/base/langflow/services/cache/service.py
diff --git a/src/backend/base/langflow/services/cache/utils.py b/src/packages/base/langflow/services/cache/utils.py
similarity index 100%
rename from src/backend/base/langflow/services/cache/utils.py
rename to src/packages/base/langflow/services/cache/utils.py
diff --git a/src/backend/base/langflow/services/chat/__init__.py b/src/packages/base/langflow/services/chat/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/chat/__init__.py
rename to src/packages/base/langflow/services/chat/__init__.py
diff --git a/src/backend/base/langflow/services/chat/cache.py b/src/packages/base/langflow/services/chat/cache.py
similarity index 100%
rename from src/backend/base/langflow/services/chat/cache.py
rename to src/packages/base/langflow/services/chat/cache.py
diff --git a/src/backend/base/langflow/services/chat/factory.py b/src/packages/base/langflow/services/chat/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/chat/factory.py
rename to src/packages/base/langflow/services/chat/factory.py
diff --git a/src/backend/base/langflow/services/chat/schema.py b/src/packages/base/langflow/services/chat/schema.py
similarity index 100%
rename from src/backend/base/langflow/services/chat/schema.py
rename to src/packages/base/langflow/services/chat/schema.py
diff --git a/src/backend/base/langflow/services/chat/service.py b/src/packages/base/langflow/services/chat/service.py
similarity index 100%
rename from src/backend/base/langflow/services/chat/service.py
rename to src/packages/base/langflow/services/chat/service.py
diff --git a/src/backend/base/langflow/services/database/__init__.py b/src/packages/base/langflow/services/database/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/database/__init__.py
rename to src/packages/base/langflow/services/database/__init__.py
diff --git a/src/backend/base/langflow/services/database/factory.py b/src/packages/base/langflow/services/database/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/database/factory.py
rename to src/packages/base/langflow/services/database/factory.py
diff --git a/src/backend/base/langflow/services/database/models/__init__.py b/src/packages/base/langflow/services/database/models/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/__init__.py
rename to src/packages/base/langflow/services/database/models/__init__.py
diff --git a/src/backend/base/langflow/services/database/models/api_key/__init__.py b/src/packages/base/langflow/services/database/models/api_key/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/api_key/__init__.py
rename to src/packages/base/langflow/services/database/models/api_key/__init__.py
diff --git a/src/backend/base/langflow/services/database/models/api_key/crud.py b/src/packages/base/langflow/services/database/models/api_key/crud.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/api_key/crud.py
rename to src/packages/base/langflow/services/database/models/api_key/crud.py
diff --git a/src/backend/base/langflow/services/database/models/api_key/model.py b/src/packages/base/langflow/services/database/models/api_key/model.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/api_key/model.py
rename to src/packages/base/langflow/services/database/models/api_key/model.py
diff --git a/src/backend/base/langflow/services/database/models/base.py b/src/packages/base/langflow/services/database/models/base.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/base.py
rename to src/packages/base/langflow/services/database/models/base.py
diff --git a/src/backend/base/langflow/services/database/models/file/__init__.py b/src/packages/base/langflow/services/database/models/file/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/file/__init__.py
rename to src/packages/base/langflow/services/database/models/file/__init__.py
diff --git a/src/backend/base/langflow/services/database/models/file/crud.py b/src/packages/base/langflow/services/database/models/file/crud.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/file/crud.py
rename to src/packages/base/langflow/services/database/models/file/crud.py
diff --git a/src/backend/base/langflow/services/database/models/file/model.py b/src/packages/base/langflow/services/database/models/file/model.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/file/model.py
rename to src/packages/base/langflow/services/database/models/file/model.py
diff --git a/src/backend/base/langflow/services/database/models/flow/__init__.py b/src/packages/base/langflow/services/database/models/flow/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/flow/__init__.py
rename to src/packages/base/langflow/services/database/models/flow/__init__.py
diff --git a/src/backend/base/langflow/services/database/models/flow/model.py b/src/packages/base/langflow/services/database/models/flow/model.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/flow/model.py
rename to src/packages/base/langflow/services/database/models/flow/model.py
diff --git a/src/backend/base/langflow/services/database/models/flow/schema.py b/src/packages/base/langflow/services/database/models/flow/schema.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/flow/schema.py
rename to src/packages/base/langflow/services/database/models/flow/schema.py
diff --git a/src/backend/base/langflow/services/database/models/flow/utils.py b/src/packages/base/langflow/services/database/models/flow/utils.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/flow/utils.py
rename to src/packages/base/langflow/services/database/models/flow/utils.py
diff --git a/src/backend/base/langflow/services/database/models/folder/__init__.py b/src/packages/base/langflow/services/database/models/folder/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/folder/__init__.py
rename to src/packages/base/langflow/services/database/models/folder/__init__.py
diff --git a/src/backend/base/langflow/services/database/models/folder/constants.py b/src/packages/base/langflow/services/database/models/folder/constants.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/folder/constants.py
rename to src/packages/base/langflow/services/database/models/folder/constants.py
diff --git a/src/backend/base/langflow/services/database/models/folder/model.py b/src/packages/base/langflow/services/database/models/folder/model.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/folder/model.py
rename to src/packages/base/langflow/services/database/models/folder/model.py
diff --git a/src/backend/base/langflow/services/database/models/folder/pagination_model.py b/src/packages/base/langflow/services/database/models/folder/pagination_model.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/folder/pagination_model.py
rename to src/packages/base/langflow/services/database/models/folder/pagination_model.py
diff --git a/src/backend/base/langflow/services/database/models/folder/utils.py b/src/packages/base/langflow/services/database/models/folder/utils.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/folder/utils.py
rename to src/packages/base/langflow/services/database/models/folder/utils.py
diff --git a/src/backend/base/langflow/services/database/models/message/__init__.py b/src/packages/base/langflow/services/database/models/message/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/message/__init__.py
rename to src/packages/base/langflow/services/database/models/message/__init__.py
diff --git a/src/backend/base/langflow/services/database/models/message/crud.py b/src/packages/base/langflow/services/database/models/message/crud.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/message/crud.py
rename to src/packages/base/langflow/services/database/models/message/crud.py
diff --git a/src/backend/base/langflow/services/database/models/message/model.py b/src/packages/base/langflow/services/database/models/message/model.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/message/model.py
rename to src/packages/base/langflow/services/database/models/message/model.py
diff --git a/src/backend/base/langflow/services/database/models/transactions/__init__.py b/src/packages/base/langflow/services/database/models/transactions/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/transactions/__init__.py
rename to src/packages/base/langflow/services/database/models/transactions/__init__.py
diff --git a/src/backend/base/langflow/services/database/models/transactions/crud.py b/src/packages/base/langflow/services/database/models/transactions/crud.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/transactions/crud.py
rename to src/packages/base/langflow/services/database/models/transactions/crud.py
diff --git a/src/backend/base/langflow/services/database/models/transactions/model.py b/src/packages/base/langflow/services/database/models/transactions/model.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/transactions/model.py
rename to src/packages/base/langflow/services/database/models/transactions/model.py
diff --git a/src/backend/base/langflow/services/database/models/user/__init__.py b/src/packages/base/langflow/services/database/models/user/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/user/__init__.py
rename to src/packages/base/langflow/services/database/models/user/__init__.py
diff --git a/src/backend/base/langflow/services/database/models/user/crud.py b/src/packages/base/langflow/services/database/models/user/crud.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/user/crud.py
rename to src/packages/base/langflow/services/database/models/user/crud.py
diff --git a/src/backend/base/langflow/services/database/models/user/model.py b/src/packages/base/langflow/services/database/models/user/model.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/user/model.py
rename to src/packages/base/langflow/services/database/models/user/model.py
diff --git a/src/backend/base/langflow/services/database/models/variable/__init__.py b/src/packages/base/langflow/services/database/models/variable/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/variable/__init__.py
rename to src/packages/base/langflow/services/database/models/variable/__init__.py
diff --git a/src/backend/base/langflow/services/database/models/variable/model.py b/src/packages/base/langflow/services/database/models/variable/model.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/variable/model.py
rename to src/packages/base/langflow/services/database/models/variable/model.py
diff --git a/src/backend/base/langflow/services/database/models/vertex_builds/__init__.py b/src/packages/base/langflow/services/database/models/vertex_builds/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/vertex_builds/__init__.py
rename to src/packages/base/langflow/services/database/models/vertex_builds/__init__.py
diff --git a/src/backend/base/langflow/services/database/models/vertex_builds/crud.py b/src/packages/base/langflow/services/database/models/vertex_builds/crud.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/vertex_builds/crud.py
rename to src/packages/base/langflow/services/database/models/vertex_builds/crud.py
diff --git a/src/backend/base/langflow/services/database/models/vertex_builds/model.py b/src/packages/base/langflow/services/database/models/vertex_builds/model.py
similarity index 100%
rename from src/backend/base/langflow/services/database/models/vertex_builds/model.py
rename to src/packages/base/langflow/services/database/models/vertex_builds/model.py
diff --git a/src/backend/base/langflow/services/database/service.py b/src/packages/base/langflow/services/database/service.py
similarity index 100%
rename from src/backend/base/langflow/services/database/service.py
rename to src/packages/base/langflow/services/database/service.py
diff --git a/src/backend/base/langflow/services/database/session.py b/src/packages/base/langflow/services/database/session.py
similarity index 100%
rename from src/backend/base/langflow/services/database/session.py
rename to src/packages/base/langflow/services/database/session.py
diff --git a/src/backend/base/langflow/services/database/utils.py b/src/packages/base/langflow/services/database/utils.py
similarity index 100%
rename from src/backend/base/langflow/services/database/utils.py
rename to src/packages/base/langflow/services/database/utils.py
diff --git a/src/backend/base/langflow/services/deps.py b/src/packages/base/langflow/services/deps.py
similarity index 100%
rename from src/backend/base/langflow/services/deps.py
rename to src/packages/base/langflow/services/deps.py
diff --git a/src/backend/base/langflow/services/enhanced_manager.py b/src/packages/base/langflow/services/enhanced_manager.py
similarity index 100%
rename from src/backend/base/langflow/services/enhanced_manager.py
rename to src/packages/base/langflow/services/enhanced_manager.py
diff --git a/src/backend/base/langflow/services/factory.py b/src/packages/base/langflow/services/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/factory.py
rename to src/packages/base/langflow/services/factory.py
diff --git a/src/backend/base/langflow/services/flow/__init__.py b/src/packages/base/langflow/services/flow/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/flow/__init__.py
rename to src/packages/base/langflow/services/flow/__init__.py
diff --git a/src/backend/base/langflow/services/flow/flow_runner.py b/src/packages/base/langflow/services/flow/flow_runner.py
similarity index 100%
rename from src/backend/base/langflow/services/flow/flow_runner.py
rename to src/packages/base/langflow/services/flow/flow_runner.py
diff --git a/src/backend/base/langflow/services/job_queue/__init__.py b/src/packages/base/langflow/services/job_queue/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/job_queue/__init__.py
rename to src/packages/base/langflow/services/job_queue/__init__.py
diff --git a/src/backend/base/langflow/services/job_queue/factory.py b/src/packages/base/langflow/services/job_queue/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/job_queue/factory.py
rename to src/packages/base/langflow/services/job_queue/factory.py
diff --git a/src/backend/base/langflow/services/job_queue/service.py b/src/packages/base/langflow/services/job_queue/service.py
similarity index 100%
rename from src/backend/base/langflow/services/job_queue/service.py
rename to src/packages/base/langflow/services/job_queue/service.py
diff --git a/src/backend/base/langflow/services/manager.py b/src/packages/base/langflow/services/manager.py
similarity index 100%
rename from src/backend/base/langflow/services/manager.py
rename to src/packages/base/langflow/services/manager.py
diff --git a/src/backend/base/langflow/services/schema.py b/src/packages/base/langflow/services/schema.py
similarity index 100%
rename from src/backend/base/langflow/services/schema.py
rename to src/packages/base/langflow/services/schema.py
diff --git a/src/backend/base/langflow/services/session/__init__.py b/src/packages/base/langflow/services/session/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/session/__init__.py
rename to src/packages/base/langflow/services/session/__init__.py
diff --git a/src/backend/base/langflow/services/session/factory.py b/src/packages/base/langflow/services/session/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/session/factory.py
rename to src/packages/base/langflow/services/session/factory.py
diff --git a/src/backend/base/langflow/services/session/service.py b/src/packages/base/langflow/services/session/service.py
similarity index 100%
rename from src/backend/base/langflow/services/session/service.py
rename to src/packages/base/langflow/services/session/service.py
diff --git a/src/backend/base/langflow/services/session/utils.py b/src/packages/base/langflow/services/session/utils.py
similarity index 100%
rename from src/backend/base/langflow/services/session/utils.py
rename to src/packages/base/langflow/services/session/utils.py
diff --git a/src/backend/base/langflow/services/settings/__init__.py b/src/packages/base/langflow/services/settings/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/settings/__init__.py
rename to src/packages/base/langflow/services/settings/__init__.py
diff --git a/src/backend/base/langflow/services/settings/base.py b/src/packages/base/langflow/services/settings/base.py
similarity index 100%
rename from src/backend/base/langflow/services/settings/base.py
rename to src/packages/base/langflow/services/settings/base.py
diff --git a/src/backend/base/langflow/services/settings/factory.py b/src/packages/base/langflow/services/settings/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/settings/factory.py
rename to src/packages/base/langflow/services/settings/factory.py
diff --git a/src/backend/base/langflow/services/settings/feature_flags.py b/src/packages/base/langflow/services/settings/feature_flags.py
similarity index 100%
rename from src/backend/base/langflow/services/settings/feature_flags.py
rename to src/packages/base/langflow/services/settings/feature_flags.py
diff --git a/src/backend/base/langflow/services/settings/service.py b/src/packages/base/langflow/services/settings/service.py
similarity index 100%
rename from src/backend/base/langflow/services/settings/service.py
rename to src/packages/base/langflow/services/settings/service.py
diff --git a/src/backend/base/langflow/services/shared_component_cache/__init__.py b/src/packages/base/langflow/services/shared_component_cache/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/shared_component_cache/__init__.py
rename to src/packages/base/langflow/services/shared_component_cache/__init__.py
diff --git a/src/backend/base/langflow/services/shared_component_cache/factory.py b/src/packages/base/langflow/services/shared_component_cache/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/shared_component_cache/factory.py
rename to src/packages/base/langflow/services/shared_component_cache/factory.py
diff --git a/src/backend/base/langflow/services/shared_component_cache/service.py b/src/packages/base/langflow/services/shared_component_cache/service.py
similarity index 100%
rename from src/backend/base/langflow/services/shared_component_cache/service.py
rename to src/packages/base/langflow/services/shared_component_cache/service.py
diff --git a/src/backend/base/langflow/services/socket/__init__.py b/src/packages/base/langflow/services/socket/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/socket/__init__.py
rename to src/packages/base/langflow/services/socket/__init__.py
diff --git a/src/backend/base/langflow/services/socket/factory.py b/src/packages/base/langflow/services/socket/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/socket/factory.py
rename to src/packages/base/langflow/services/socket/factory.py
diff --git a/src/backend/base/langflow/services/socket/service.py b/src/packages/base/langflow/services/socket/service.py
similarity index 100%
rename from src/backend/base/langflow/services/socket/service.py
rename to src/packages/base/langflow/services/socket/service.py
diff --git a/src/backend/base/langflow/services/socket/utils.py b/src/packages/base/langflow/services/socket/utils.py
similarity index 100%
rename from src/backend/base/langflow/services/socket/utils.py
rename to src/packages/base/langflow/services/socket/utils.py
diff --git a/src/backend/base/langflow/services/state/__init__.py b/src/packages/base/langflow/services/state/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/state/__init__.py
rename to src/packages/base/langflow/services/state/__init__.py
diff --git a/src/backend/base/langflow/services/state/factory.py b/src/packages/base/langflow/services/state/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/state/factory.py
rename to src/packages/base/langflow/services/state/factory.py
diff --git a/src/backend/base/langflow/services/state/service.py b/src/packages/base/langflow/services/state/service.py
similarity index 100%
rename from src/backend/base/langflow/services/state/service.py
rename to src/packages/base/langflow/services/state/service.py
diff --git a/src/backend/base/langflow/services/storage/__init__.py b/src/packages/base/langflow/services/storage/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/storage/__init__.py
rename to src/packages/base/langflow/services/storage/__init__.py
diff --git a/src/backend/base/langflow/services/storage/constants.py b/src/packages/base/langflow/services/storage/constants.py
similarity index 100%
rename from src/backend/base/langflow/services/storage/constants.py
rename to src/packages/base/langflow/services/storage/constants.py
diff --git a/src/backend/base/langflow/services/storage/factory.py b/src/packages/base/langflow/services/storage/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/storage/factory.py
rename to src/packages/base/langflow/services/storage/factory.py
diff --git a/src/backend/base/langflow/services/storage/local.py b/src/packages/base/langflow/services/storage/local.py
similarity index 100%
rename from src/backend/base/langflow/services/storage/local.py
rename to src/packages/base/langflow/services/storage/local.py
diff --git a/src/backend/base/langflow/services/storage/s3.py b/src/packages/base/langflow/services/storage/s3.py
similarity index 100%
rename from src/backend/base/langflow/services/storage/s3.py
rename to src/packages/base/langflow/services/storage/s3.py
diff --git a/src/backend/base/langflow/services/storage/service.py b/src/packages/base/langflow/services/storage/service.py
similarity index 100%
rename from src/backend/base/langflow/services/storage/service.py
rename to src/packages/base/langflow/services/storage/service.py
diff --git a/src/backend/base/langflow/services/storage/utils.py b/src/packages/base/langflow/services/storage/utils.py
similarity index 100%
rename from src/backend/base/langflow/services/storage/utils.py
rename to src/packages/base/langflow/services/storage/utils.py
diff --git a/src/backend/base/langflow/services/store/__init__.py b/src/packages/base/langflow/services/store/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/store/__init__.py
rename to src/packages/base/langflow/services/store/__init__.py
diff --git a/src/backend/base/langflow/services/store/exceptions.py b/src/packages/base/langflow/services/store/exceptions.py
similarity index 100%
rename from src/backend/base/langflow/services/store/exceptions.py
rename to src/packages/base/langflow/services/store/exceptions.py
diff --git a/src/backend/base/langflow/services/store/factory.py b/src/packages/base/langflow/services/store/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/store/factory.py
rename to src/packages/base/langflow/services/store/factory.py
diff --git a/src/backend/base/langflow/services/store/schema.py b/src/packages/base/langflow/services/store/schema.py
similarity index 100%
rename from src/backend/base/langflow/services/store/schema.py
rename to src/packages/base/langflow/services/store/schema.py
diff --git a/src/backend/base/langflow/services/store/service.py b/src/packages/base/langflow/services/store/service.py
similarity index 100%
rename from src/backend/base/langflow/services/store/service.py
rename to src/packages/base/langflow/services/store/service.py
diff --git a/src/backend/base/langflow/services/store/utils.py b/src/packages/base/langflow/services/store/utils.py
similarity index 100%
rename from src/backend/base/langflow/services/store/utils.py
rename to src/packages/base/langflow/services/store/utils.py
diff --git a/src/backend/base/langflow/services/task/__init__.py b/src/packages/base/langflow/services/task/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/task/__init__.py
rename to src/packages/base/langflow/services/task/__init__.py
diff --git a/src/backend/base/langflow/services/task/backends/__init__.py b/src/packages/base/langflow/services/task/backends/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/task/backends/__init__.py
rename to src/packages/base/langflow/services/task/backends/__init__.py
diff --git a/src/backend/base/langflow/services/task/backends/anyio.py b/src/packages/base/langflow/services/task/backends/anyio.py
similarity index 100%
rename from src/backend/base/langflow/services/task/backends/anyio.py
rename to src/packages/base/langflow/services/task/backends/anyio.py
diff --git a/src/backend/base/langflow/services/task/backends/base.py b/src/packages/base/langflow/services/task/backends/base.py
similarity index 100%
rename from src/backend/base/langflow/services/task/backends/base.py
rename to src/packages/base/langflow/services/task/backends/base.py
diff --git a/src/backend/base/langflow/services/task/backends/celery.py b/src/packages/base/langflow/services/task/backends/celery.py
similarity index 100%
rename from src/backend/base/langflow/services/task/backends/celery.py
rename to src/packages/base/langflow/services/task/backends/celery.py
diff --git a/src/backend/base/langflow/services/task/factory.py b/src/packages/base/langflow/services/task/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/task/factory.py
rename to src/packages/base/langflow/services/task/factory.py
diff --git a/src/backend/base/langflow/services/task/service.py b/src/packages/base/langflow/services/task/service.py
similarity index 100%
rename from src/backend/base/langflow/services/task/service.py
rename to src/packages/base/langflow/services/task/service.py
diff --git a/src/backend/base/langflow/services/task/temp_flow_cleanup.py b/src/packages/base/langflow/services/task/temp_flow_cleanup.py
similarity index 100%
rename from src/backend/base/langflow/services/task/temp_flow_cleanup.py
rename to src/packages/base/langflow/services/task/temp_flow_cleanup.py
diff --git a/src/backend/base/langflow/services/task/utils.py b/src/packages/base/langflow/services/task/utils.py
similarity index 100%
rename from src/backend/base/langflow/services/task/utils.py
rename to src/packages/base/langflow/services/task/utils.py
diff --git a/src/backend/base/langflow/services/telemetry/__init__.py b/src/packages/base/langflow/services/telemetry/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/telemetry/__init__.py
rename to src/packages/base/langflow/services/telemetry/__init__.py
diff --git a/src/backend/base/langflow/services/telemetry/factory.py b/src/packages/base/langflow/services/telemetry/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/telemetry/factory.py
rename to src/packages/base/langflow/services/telemetry/factory.py
diff --git a/src/backend/base/langflow/services/telemetry/opentelemetry.py b/src/packages/base/langflow/services/telemetry/opentelemetry.py
similarity index 100%
rename from src/backend/base/langflow/services/telemetry/opentelemetry.py
rename to src/packages/base/langflow/services/telemetry/opentelemetry.py
diff --git a/src/backend/base/langflow/services/telemetry/schema.py b/src/packages/base/langflow/services/telemetry/schema.py
similarity index 100%
rename from src/backend/base/langflow/services/telemetry/schema.py
rename to src/packages/base/langflow/services/telemetry/schema.py
diff --git a/src/backend/base/langflow/services/telemetry/service.py b/src/packages/base/langflow/services/telemetry/service.py
similarity index 100%
rename from src/backend/base/langflow/services/telemetry/service.py
rename to src/packages/base/langflow/services/telemetry/service.py
diff --git a/src/backend/base/langflow/services/tracing/__init__.py b/src/packages/base/langflow/services/tracing/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/tracing/__init__.py
rename to src/packages/base/langflow/services/tracing/__init__.py
diff --git a/src/backend/base/langflow/services/tracing/arize_phoenix.py b/src/packages/base/langflow/services/tracing/arize_phoenix.py
similarity index 100%
rename from src/backend/base/langflow/services/tracing/arize_phoenix.py
rename to src/packages/base/langflow/services/tracing/arize_phoenix.py
diff --git a/src/backend/base/langflow/services/tracing/base.py b/src/packages/base/langflow/services/tracing/base.py
similarity index 100%
rename from src/backend/base/langflow/services/tracing/base.py
rename to src/packages/base/langflow/services/tracing/base.py
diff --git a/src/backend/base/langflow/services/tracing/factory.py b/src/packages/base/langflow/services/tracing/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/tracing/factory.py
rename to src/packages/base/langflow/services/tracing/factory.py
diff --git a/src/backend/base/langflow/services/tracing/langfuse.py b/src/packages/base/langflow/services/tracing/langfuse.py
similarity index 100%
rename from src/backend/base/langflow/services/tracing/langfuse.py
rename to src/packages/base/langflow/services/tracing/langfuse.py
diff --git a/src/backend/base/langflow/services/tracing/langsmith.py b/src/packages/base/langflow/services/tracing/langsmith.py
similarity index 100%
rename from src/backend/base/langflow/services/tracing/langsmith.py
rename to src/packages/base/langflow/services/tracing/langsmith.py
diff --git a/src/backend/base/langflow/services/tracing/langwatch.py b/src/packages/base/langflow/services/tracing/langwatch.py
similarity index 100%
rename from src/backend/base/langflow/services/tracing/langwatch.py
rename to src/packages/base/langflow/services/tracing/langwatch.py
diff --git a/src/backend/base/langflow/services/tracing/opik.py b/src/packages/base/langflow/services/tracing/opik.py
similarity index 100%
rename from src/backend/base/langflow/services/tracing/opik.py
rename to src/packages/base/langflow/services/tracing/opik.py
diff --git a/src/backend/base/langflow/services/tracing/schema.py b/src/packages/base/langflow/services/tracing/schema.py
similarity index 100%
rename from src/backend/base/langflow/services/tracing/schema.py
rename to src/packages/base/langflow/services/tracing/schema.py
diff --git a/src/backend/base/langflow/services/tracing/service.py b/src/packages/base/langflow/services/tracing/service.py
similarity index 100%
rename from src/backend/base/langflow/services/tracing/service.py
rename to src/packages/base/langflow/services/tracing/service.py
diff --git a/src/backend/base/langflow/services/tracing/traceloop.py b/src/packages/base/langflow/services/tracing/traceloop.py
similarity index 100%
rename from src/backend/base/langflow/services/tracing/traceloop.py
rename to src/packages/base/langflow/services/tracing/traceloop.py
diff --git a/src/backend/base/langflow/services/tracing/utils.py b/src/packages/base/langflow/services/tracing/utils.py
similarity index 100%
rename from src/backend/base/langflow/services/tracing/utils.py
rename to src/packages/base/langflow/services/tracing/utils.py
diff --git a/src/backend/base/langflow/services/utils.py b/src/packages/base/langflow/services/utils.py
similarity index 100%
rename from src/backend/base/langflow/services/utils.py
rename to src/packages/base/langflow/services/utils.py
diff --git a/src/backend/base/langflow/services/variable/__init__.py b/src/packages/base/langflow/services/variable/__init__.py
similarity index 100%
rename from src/backend/base/langflow/services/variable/__init__.py
rename to src/packages/base/langflow/services/variable/__init__.py
diff --git a/src/backend/base/langflow/services/variable/base.py b/src/packages/base/langflow/services/variable/base.py
similarity index 100%
rename from src/backend/base/langflow/services/variable/base.py
rename to src/packages/base/langflow/services/variable/base.py
diff --git a/src/backend/base/langflow/services/variable/constants.py b/src/packages/base/langflow/services/variable/constants.py
similarity index 100%
rename from src/backend/base/langflow/services/variable/constants.py
rename to src/packages/base/langflow/services/variable/constants.py
diff --git a/src/backend/base/langflow/services/variable/factory.py b/src/packages/base/langflow/services/variable/factory.py
similarity index 100%
rename from src/backend/base/langflow/services/variable/factory.py
rename to src/packages/base/langflow/services/variable/factory.py
diff --git a/src/backend/base/langflow/services/variable/kubernetes.py b/src/packages/base/langflow/services/variable/kubernetes.py
similarity index 100%
rename from src/backend/base/langflow/services/variable/kubernetes.py
rename to src/packages/base/langflow/services/variable/kubernetes.py
diff --git a/src/backend/base/langflow/services/variable/kubernetes_secrets.py b/src/packages/base/langflow/services/variable/kubernetes_secrets.py
similarity index 100%
rename from src/backend/base/langflow/services/variable/kubernetes_secrets.py
rename to src/packages/base/langflow/services/variable/kubernetes_secrets.py
diff --git a/src/backend/base/langflow/services/variable/service.py b/src/packages/base/langflow/services/variable/service.py
similarity index 100%
rename from src/backend/base/langflow/services/variable/service.py
rename to src/packages/base/langflow/services/variable/service.py
diff --git a/src/backend/base/langflow/settings.py b/src/packages/base/langflow/settings.py
similarity index 100%
rename from src/backend/base/langflow/settings.py
rename to src/packages/base/langflow/settings.py
diff --git a/src/backend/base/langflow/template/__init__.py b/src/packages/base/langflow/template/__init__.py
similarity index 100%
rename from src/backend/base/langflow/template/__init__.py
rename to src/packages/base/langflow/template/__init__.py
diff --git a/src/backend/base/langflow/template/field/__init__.py b/src/packages/base/langflow/template/field/__init__.py
similarity index 100%
rename from src/backend/base/langflow/template/field/__init__.py
rename to src/packages/base/langflow/template/field/__init__.py
diff --git a/src/backend/base/langflow/template/field/base.py b/src/packages/base/langflow/template/field/base.py
similarity index 100%
rename from src/backend/base/langflow/template/field/base.py
rename to src/packages/base/langflow/template/field/base.py
diff --git a/src/backend/base/langflow/template/frontend_node.py b/src/packages/base/langflow/template/frontend_node.py
similarity index 100%
rename from src/backend/base/langflow/template/frontend_node.py
rename to src/packages/base/langflow/template/frontend_node.py
diff --git a/src/backend/base/langflow/template/utils.py b/src/packages/base/langflow/template/utils.py
similarity index 100%
rename from src/backend/base/langflow/template/utils.py
rename to src/packages/base/langflow/template/utils.py
diff --git a/src/backend/base/langflow/type_extraction/__init__.py b/src/packages/base/langflow/type_extraction/__init__.py
similarity index 100%
rename from src/backend/base/langflow/type_extraction/__init__.py
rename to src/packages/base/langflow/type_extraction/__init__.py
diff --git a/src/backend/base/langflow/type_extraction/type_extraction.py b/src/packages/base/langflow/type_extraction/type_extraction.py
similarity index 100%
rename from src/backend/base/langflow/type_extraction/type_extraction.py
rename to src/packages/base/langflow/type_extraction/type_extraction.py
diff --git a/src/backend/base/langflow/utils/__init__.py b/src/packages/base/langflow/utils/__init__.py
similarity index 100%
rename from src/backend/base/langflow/utils/__init__.py
rename to src/packages/base/langflow/utils/__init__.py
diff --git a/src/backend/base/langflow/utils/component_utils.py b/src/packages/base/langflow/utils/component_utils.py
similarity index 100%
rename from src/backend/base/langflow/utils/component_utils.py
rename to src/packages/base/langflow/utils/component_utils.py
diff --git a/src/backend/base/langflow/utils/compression.py b/src/packages/base/langflow/utils/compression.py
similarity index 100%
rename from src/backend/base/langflow/utils/compression.py
rename to src/packages/base/langflow/utils/compression.py
diff --git a/src/backend/base/langflow/utils/connection_string_parser.py b/src/packages/base/langflow/utils/connection_string_parser.py
similarity index 100%
rename from src/backend/base/langflow/utils/connection_string_parser.py
rename to src/packages/base/langflow/utils/connection_string_parser.py
diff --git a/src/backend/base/langflow/utils/constants.py b/src/packages/base/langflow/utils/constants.py
similarity index 100%
rename from src/backend/base/langflow/utils/constants.py
rename to src/packages/base/langflow/utils/constants.py
diff --git a/src/backend/base/langflow/utils/data_structure.py b/src/packages/base/langflow/utils/data_structure.py
similarity index 100%
rename from src/backend/base/langflow/utils/data_structure.py
rename to src/packages/base/langflow/utils/data_structure.py
diff --git a/src/backend/base/langflow/utils/image.py b/src/packages/base/langflow/utils/image.py
similarity index 100%
rename from src/backend/base/langflow/utils/image.py
rename to src/packages/base/langflow/utils/image.py
diff --git a/src/backend/base/langflow/utils/lazy_load.py b/src/packages/base/langflow/utils/lazy_load.py
similarity index 100%
rename from src/backend/base/langflow/utils/lazy_load.py
rename to src/packages/base/langflow/utils/lazy_load.py
diff --git a/src/backend/base/langflow/utils/migration.py b/src/packages/base/langflow/utils/migration.py
similarity index 100%
rename from src/backend/base/langflow/utils/migration.py
rename to src/packages/base/langflow/utils/migration.py
diff --git a/src/backend/base/langflow/utils/payload.py b/src/packages/base/langflow/utils/payload.py
similarity index 100%
rename from src/backend/base/langflow/utils/payload.py
rename to src/packages/base/langflow/utils/payload.py
diff --git a/src/backend/base/langflow/utils/schemas.py b/src/packages/base/langflow/utils/schemas.py
similarity index 100%
rename from src/backend/base/langflow/utils/schemas.py
rename to src/packages/base/langflow/utils/schemas.py
diff --git a/src/backend/base/langflow/utils/template_validation.py b/src/packages/base/langflow/utils/template_validation.py
similarity index 100%
rename from src/backend/base/langflow/utils/template_validation.py
rename to src/packages/base/langflow/utils/template_validation.py
diff --git a/src/backend/base/langflow/utils/util.py b/src/packages/base/langflow/utils/util.py
similarity index 100%
rename from src/backend/base/langflow/utils/util.py
rename to src/packages/base/langflow/utils/util.py
diff --git a/src/backend/base/langflow/utils/validate.py b/src/packages/base/langflow/utils/validate.py
similarity index 100%
rename from src/backend/base/langflow/utils/validate.py
rename to src/packages/base/langflow/utils/validate.py
diff --git a/src/backend/base/langflow/utils/version.py b/src/packages/base/langflow/utils/version.py
similarity index 100%
rename from src/backend/base/langflow/utils/version.py
rename to src/packages/base/langflow/utils/version.py
diff --git a/src/backend/base/langflow/utils/voice_utils.py b/src/packages/base/langflow/utils/voice_utils.py
similarity index 100%
rename from src/backend/base/langflow/utils/voice_utils.py
rename to src/packages/base/langflow/utils/voice_utils.py
diff --git a/src/backend/base/langflow/worker.py b/src/packages/base/langflow/worker.py
similarity index 100%
rename from src/backend/base/langflow/worker.py
rename to src/packages/base/langflow/worker.py
diff --git a/src/backend/base/pyproject.toml b/src/packages/base/pyproject.toml
similarity index 100%
rename from src/backend/base/pyproject.toml
rename to src/packages/base/pyproject.toml
diff --git a/src/backend/base/uv.lock b/src/packages/base/uv.lock
similarity index 100%
rename from src/backend/base/uv.lock
rename to src/packages/base/uv.lock
diff --git a/src/lfx/Makefile b/src/packages/core/Makefile
similarity index 100%
rename from src/lfx/Makefile
rename to src/packages/core/Makefile
diff --git a/src/lfx/README.md b/src/packages/core/README.md
similarity index 100%
rename from src/lfx/README.md
rename to src/packages/core/README.md
diff --git a/src/lfx/docker/Dockerfile b/src/packages/core/docker/Dockerfile
similarity index 100%
rename from src/lfx/docker/Dockerfile
rename to src/packages/core/docker/Dockerfile
diff --git a/src/lfx/docker/Dockerfile.dev b/src/packages/core/docker/Dockerfile.dev
similarity index 75%
rename from src/lfx/docker/Dockerfile.dev
rename to src/packages/core/docker/Dockerfile.dev
index 09c9074c5bd4..21b547d59831 100644
--- a/src/lfx/docker/Dockerfile.dev
+++ b/src/packages/core/docker/Dockerfile.dev
@@ -28,25 +28,25 @@ RUN apt-get update \
COPY pyproject.toml uv.lock ./
# Member's pyproject so uv knows about workspace packages (no source yet, better cache)
-COPY src/lfx/pyproject.toml /app/src/lfx/pyproject.toml
-COPY src/lfx/README.md /app/src/lfx/README.md
-COPY src/backend/base/pyproject.toml /app/src/backend/base/pyproject.toml
-COPY src/backend/base/README.md /app/src/backend/base/README.md
+COPY src/packages/core/pyproject.toml /app/src/packages/core/pyproject.toml
+COPY src/packages/core/README.md /app/src/packages/core/README.md
+COPY src/packages/base/pyproject.toml /app/src/packages/base/pyproject.toml
+COPY src/packages/base/README.md /app/src/packages/base/README.md
# Create the venv and install LFX with dev dependencies
RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --frozen --dev --package lfx
# --- Now copy the source and tests (doesn't bust the deps layer) ---
-COPY src/lfx/src /app/src/lfx/src
-COPY src/lfx/tests /app/src/lfx/tests
+COPY src/packages/core/lfx /app/src/packages/core/lfx
+COPY src/packages/core/tests /app/src/packages/core/tests
# Install the LFX package into the virtual environment (editable for dev)
RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --frozen --dev --package lfx
# Set working directory
-WORKDIR /app/src/lfx
+WORKDIR /app/src/packages/core
# Place executables in the environment at the front of the path
ENV PATH="/app/.venv/bin:$PATH"
diff --git a/src/backend/tests/data/__init__.py b/src/packages/core/lfx/__init__.py
similarity index 100%
rename from src/backend/tests/data/__init__.py
rename to src/packages/core/lfx/__init__.py
diff --git a/src/lfx/src/lfx/__main__.py b/src/packages/core/lfx/__main__.py
similarity index 100%
rename from src/lfx/src/lfx/__main__.py
rename to src/packages/core/lfx/__main__.py
diff --git a/src/backend/tests/integration/__init__.py b/src/packages/core/lfx/base/__init__.py
similarity index 100%
rename from src/backend/tests/integration/__init__.py
rename to src/packages/core/lfx/base/__init__.py
diff --git a/src/backend/tests/integration/backward_compatibility/__init__.py b/src/packages/core/lfx/base/agents/__init__.py
similarity index 100%
rename from src/backend/tests/integration/backward_compatibility/__init__.py
rename to src/packages/core/lfx/base/agents/__init__.py
diff --git a/src/lfx/src/lfx/base/agents/agent.py b/src/packages/core/lfx/base/agents/agent.py
similarity index 100%
rename from src/lfx/src/lfx/base/agents/agent.py
rename to src/packages/core/lfx/base/agents/agent.py
diff --git a/src/lfx/src/lfx/base/agents/callback.py b/src/packages/core/lfx/base/agents/callback.py
similarity index 100%
rename from src/lfx/src/lfx/base/agents/callback.py
rename to src/packages/core/lfx/base/agents/callback.py
diff --git a/src/lfx/src/lfx/base/agents/context.py b/src/packages/core/lfx/base/agents/context.py
similarity index 100%
rename from src/lfx/src/lfx/base/agents/context.py
rename to src/packages/core/lfx/base/agents/context.py
diff --git a/src/backend/tests/integration/components/__init__.py b/src/packages/core/lfx/base/agents/crewai/__init__.py
similarity index 100%
rename from src/backend/tests/integration/components/__init__.py
rename to src/packages/core/lfx/base/agents/crewai/__init__.py
diff --git a/src/lfx/src/lfx/base/agents/crewai/crew.py b/src/packages/core/lfx/base/agents/crewai/crew.py
similarity index 100%
rename from src/lfx/src/lfx/base/agents/crewai/crew.py
rename to src/packages/core/lfx/base/agents/crewai/crew.py
diff --git a/src/lfx/src/lfx/base/agents/crewai/tasks.py b/src/packages/core/lfx/base/agents/crewai/tasks.py
similarity index 100%
rename from src/lfx/src/lfx/base/agents/crewai/tasks.py
rename to src/packages/core/lfx/base/agents/crewai/tasks.py
diff --git a/src/lfx/src/lfx/base/agents/default_prompts.py b/src/packages/core/lfx/base/agents/default_prompts.py
similarity index 100%
rename from src/lfx/src/lfx/base/agents/default_prompts.py
rename to src/packages/core/lfx/base/agents/default_prompts.py
diff --git a/src/lfx/src/lfx/base/agents/errors.py b/src/packages/core/lfx/base/agents/errors.py
similarity index 100%
rename from src/lfx/src/lfx/base/agents/errors.py
rename to src/packages/core/lfx/base/agents/errors.py
diff --git a/src/lfx/src/lfx/base/agents/events.py b/src/packages/core/lfx/base/agents/events.py
similarity index 100%
rename from src/lfx/src/lfx/base/agents/events.py
rename to src/packages/core/lfx/base/agents/events.py
diff --git a/src/lfx/src/lfx/base/agents/utils.py b/src/packages/core/lfx/base/agents/utils.py
similarity index 100%
rename from src/lfx/src/lfx/base/agents/utils.py
rename to src/packages/core/lfx/base/agents/utils.py
diff --git a/src/backend/tests/integration/components/assistants/__init__.py b/src/packages/core/lfx/base/astra_assistants/__init__.py
similarity index 100%
rename from src/backend/tests/integration/components/assistants/__init__.py
rename to src/packages/core/lfx/base/astra_assistants/__init__.py
diff --git a/src/lfx/src/lfx/base/astra_assistants/util.py b/src/packages/core/lfx/base/astra_assistants/util.py
similarity index 100%
rename from src/lfx/src/lfx/base/astra_assistants/util.py
rename to src/packages/core/lfx/base/astra_assistants/util.py
diff --git a/src/backend/tests/integration/components/astra/__init__.py b/src/packages/core/lfx/base/chains/__init__.py
similarity index 100%
rename from src/backend/tests/integration/components/astra/__init__.py
rename to src/packages/core/lfx/base/chains/__init__.py
diff --git a/src/lfx/src/lfx/base/chains/model.py b/src/packages/core/lfx/base/chains/model.py
similarity index 100%
rename from src/lfx/src/lfx/base/chains/model.py
rename to src/packages/core/lfx/base/chains/model.py
diff --git a/src/backend/tests/integration/components/helpers/__init__.py b/src/packages/core/lfx/base/composio/__init__.py
similarity index 100%
rename from src/backend/tests/integration/components/helpers/__init__.py
rename to src/packages/core/lfx/base/composio/__init__.py
diff --git a/src/lfx/src/lfx/base/composio/composio_base.py b/src/packages/core/lfx/base/composio/composio_base.py
similarity index 100%
rename from src/lfx/src/lfx/base/composio/composio_base.py
rename to src/packages/core/lfx/base/composio/composio_base.py
diff --git a/src/backend/tests/integration/components/inputs/__init__.py b/src/packages/core/lfx/base/compressors/__init__.py
similarity index 100%
rename from src/backend/tests/integration/components/inputs/__init__.py
rename to src/packages/core/lfx/base/compressors/__init__.py
diff --git a/src/lfx/src/lfx/base/compressors/model.py b/src/packages/core/lfx/base/compressors/model.py
similarity index 100%
rename from src/lfx/src/lfx/base/compressors/model.py
rename to src/packages/core/lfx/base/compressors/model.py
diff --git a/src/lfx/src/lfx/base/constants.py b/src/packages/core/lfx/base/constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/constants.py
rename to src/packages/core/lfx/base/constants.py
diff --git a/src/backend/tests/integration/components/mcp/__init__.py b/src/packages/core/lfx/base/curl/__init__.py
similarity index 100%
rename from src/backend/tests/integration/components/mcp/__init__.py
rename to src/packages/core/lfx/base/curl/__init__.py
diff --git a/src/lfx/src/lfx/base/curl/parse.py b/src/packages/core/lfx/base/curl/parse.py
similarity index 100%
rename from src/lfx/src/lfx/base/curl/parse.py
rename to src/packages/core/lfx/base/curl/parse.py
diff --git a/src/lfx/src/lfx/base/data/__init__.py b/src/packages/core/lfx/base/data/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/base/data/__init__.py
rename to src/packages/core/lfx/base/data/__init__.py
diff --git a/src/lfx/src/lfx/base/data/base_file.py b/src/packages/core/lfx/base/data/base_file.py
similarity index 100%
rename from src/lfx/src/lfx/base/data/base_file.py
rename to src/packages/core/lfx/base/data/base_file.py
diff --git a/src/lfx/src/lfx/base/data/docling_utils.py b/src/packages/core/lfx/base/data/docling_utils.py
similarity index 100%
rename from src/lfx/src/lfx/base/data/docling_utils.py
rename to src/packages/core/lfx/base/data/docling_utils.py
diff --git a/src/lfx/src/lfx/base/data/utils.py b/src/packages/core/lfx/base/data/utils.py
similarity index 100%
rename from src/lfx/src/lfx/base/data/utils.py
rename to src/packages/core/lfx/base/data/utils.py
diff --git a/src/backend/tests/integration/components/output_parsers/__init__.py b/src/packages/core/lfx/base/document_transformers/__init__.py
similarity index 100%
rename from src/backend/tests/integration/components/output_parsers/__init__.py
rename to src/packages/core/lfx/base/document_transformers/__init__.py
diff --git a/src/lfx/src/lfx/base/document_transformers/model.py b/src/packages/core/lfx/base/document_transformers/model.py
similarity index 100%
rename from src/lfx/src/lfx/base/document_transformers/model.py
rename to src/packages/core/lfx/base/document_transformers/model.py
diff --git a/src/backend/tests/integration/components/outputs/__init__.py b/src/packages/core/lfx/base/embeddings/__init__.py
similarity index 100%
rename from src/backend/tests/integration/components/outputs/__init__.py
rename to src/packages/core/lfx/base/embeddings/__init__.py
diff --git a/src/lfx/src/lfx/base/embeddings/aiml_embeddings.py b/src/packages/core/lfx/base/embeddings/aiml_embeddings.py
similarity index 100%
rename from src/lfx/src/lfx/base/embeddings/aiml_embeddings.py
rename to src/packages/core/lfx/base/embeddings/aiml_embeddings.py
diff --git a/src/lfx/src/lfx/base/embeddings/model.py b/src/packages/core/lfx/base/embeddings/model.py
similarity index 100%
rename from src/lfx/src/lfx/base/embeddings/model.py
rename to src/packages/core/lfx/base/embeddings/model.py
diff --git a/src/backend/tests/integration/components/prompts/__init__.py b/src/packages/core/lfx/base/flow_processing/__init__.py
similarity index 100%
rename from src/backend/tests/integration/components/prompts/__init__.py
rename to src/packages/core/lfx/base/flow_processing/__init__.py
diff --git a/src/lfx/src/lfx/base/flow_processing/utils.py b/src/packages/core/lfx/base/flow_processing/utils.py
similarity index 100%
rename from src/lfx/src/lfx/base/flow_processing/utils.py
rename to src/packages/core/lfx/base/flow_processing/utils.py
diff --git a/src/backend/tests/integration/flows/__init__.py b/src/packages/core/lfx/base/huggingface/__init__.py
similarity index 100%
rename from src/backend/tests/integration/flows/__init__.py
rename to src/packages/core/lfx/base/huggingface/__init__.py
diff --git a/src/lfx/src/lfx/base/huggingface/model_bridge.py b/src/packages/core/lfx/base/huggingface/model_bridge.py
similarity index 100%
rename from src/lfx/src/lfx/base/huggingface/model_bridge.py
rename to src/packages/core/lfx/base/huggingface/model_bridge.py
diff --git a/src/backend/tests/locust/__init__.py b/src/packages/core/lfx/base/io/__init__.py
similarity index 100%
rename from src/backend/tests/locust/__init__.py
rename to src/packages/core/lfx/base/io/__init__.py
diff --git a/src/lfx/src/lfx/base/io/chat.py b/src/packages/core/lfx/base/io/chat.py
similarity index 100%
rename from src/lfx/src/lfx/base/io/chat.py
rename to src/packages/core/lfx/base/io/chat.py
diff --git a/src/lfx/src/lfx/base/io/text.py b/src/packages/core/lfx/base/io/text.py
similarity index 100%
rename from src/lfx/src/lfx/base/io/text.py
rename to src/packages/core/lfx/base/io/text.py
diff --git a/src/backend/tests/performance/__init__.py b/src/packages/core/lfx/base/langchain_utilities/__init__.py
similarity index 100%
rename from src/backend/tests/performance/__init__.py
rename to src/packages/core/lfx/base/langchain_utilities/__init__.py
diff --git a/src/lfx/src/lfx/base/langchain_utilities/model.py b/src/packages/core/lfx/base/langchain_utilities/model.py
similarity index 100%
rename from src/lfx/src/lfx/base/langchain_utilities/model.py
rename to src/packages/core/lfx/base/langchain_utilities/model.py
diff --git a/src/lfx/src/lfx/base/langchain_utilities/spider_constants.py b/src/packages/core/lfx/base/langchain_utilities/spider_constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/langchain_utilities/spider_constants.py
rename to src/packages/core/lfx/base/langchain_utilities/spider_constants.py
diff --git a/src/backend/tests/unit/api/__init__.py b/src/packages/core/lfx/base/langwatch/__init__.py
similarity index 100%
rename from src/backend/tests/unit/api/__init__.py
rename to src/packages/core/lfx/base/langwatch/__init__.py
diff --git a/src/lfx/src/lfx/base/langwatch/utils.py b/src/packages/core/lfx/base/langwatch/utils.py
similarity index 100%
rename from src/lfx/src/lfx/base/langwatch/utils.py
rename to src/packages/core/lfx/base/langwatch/utils.py
diff --git a/src/backend/tests/unit/api/v1/__init__.py b/src/packages/core/lfx/base/mcp/__init__.py
similarity index 100%
rename from src/backend/tests/unit/api/v1/__init__.py
rename to src/packages/core/lfx/base/mcp/__init__.py
diff --git a/src/lfx/src/lfx/base/mcp/constants.py b/src/packages/core/lfx/base/mcp/constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/mcp/constants.py
rename to src/packages/core/lfx/base/mcp/constants.py
diff --git a/src/lfx/src/lfx/base/mcp/util.py b/src/packages/core/lfx/base/mcp/util.py
similarity index 100%
rename from src/lfx/src/lfx/base/mcp/util.py
rename to src/packages/core/lfx/base/mcp/util.py
diff --git a/src/backend/tests/unit/api/v2/__init__.py b/src/packages/core/lfx/base/memory/__init__.py
similarity index 100%
rename from src/backend/tests/unit/api/v2/__init__.py
rename to src/packages/core/lfx/base/memory/__init__.py
diff --git a/src/lfx/src/lfx/base/memory/memory.py b/src/packages/core/lfx/base/memory/memory.py
similarity index 100%
rename from src/lfx/src/lfx/base/memory/memory.py
rename to src/packages/core/lfx/base/memory/memory.py
diff --git a/src/lfx/src/lfx/base/memory/model.py b/src/packages/core/lfx/base/memory/model.py
similarity index 100%
rename from src/lfx/src/lfx/base/memory/model.py
rename to src/packages/core/lfx/base/memory/model.py
diff --git a/src/lfx/src/lfx/base/models/__init__.py b/src/packages/core/lfx/base/models/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/__init__.py
rename to src/packages/core/lfx/base/models/__init__.py
diff --git a/src/lfx/src/lfx/base/models/aiml_constants.py b/src/packages/core/lfx/base/models/aiml_constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/aiml_constants.py
rename to src/packages/core/lfx/base/models/aiml_constants.py
diff --git a/src/lfx/src/lfx/base/models/anthropic_constants.py b/src/packages/core/lfx/base/models/anthropic_constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/anthropic_constants.py
rename to src/packages/core/lfx/base/models/anthropic_constants.py
diff --git a/src/lfx/src/lfx/base/models/aws_constants.py b/src/packages/core/lfx/base/models/aws_constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/aws_constants.py
rename to src/packages/core/lfx/base/models/aws_constants.py
diff --git a/src/lfx/src/lfx/base/models/chat_result.py b/src/packages/core/lfx/base/models/chat_result.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/chat_result.py
rename to src/packages/core/lfx/base/models/chat_result.py
diff --git a/src/lfx/src/lfx/base/models/google_generative_ai_constants.py b/src/packages/core/lfx/base/models/google_generative_ai_constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/google_generative_ai_constants.py
rename to src/packages/core/lfx/base/models/google_generative_ai_constants.py
diff --git a/src/lfx/src/lfx/base/models/groq_constants.py b/src/packages/core/lfx/base/models/groq_constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/groq_constants.py
rename to src/packages/core/lfx/base/models/groq_constants.py
diff --git a/src/lfx/src/lfx/base/models/model.py b/src/packages/core/lfx/base/models/model.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/model.py
rename to src/packages/core/lfx/base/models/model.py
diff --git a/src/lfx/src/lfx/base/models/model_input_constants.py b/src/packages/core/lfx/base/models/model_input_constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/model_input_constants.py
rename to src/packages/core/lfx/base/models/model_input_constants.py
diff --git a/src/lfx/src/lfx/base/models/model_metadata.py b/src/packages/core/lfx/base/models/model_metadata.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/model_metadata.py
rename to src/packages/core/lfx/base/models/model_metadata.py
diff --git a/src/lfx/src/lfx/base/models/model_utils.py b/src/packages/core/lfx/base/models/model_utils.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/model_utils.py
rename to src/packages/core/lfx/base/models/model_utils.py
diff --git a/src/lfx/src/lfx/base/models/novita_constants.py b/src/packages/core/lfx/base/models/novita_constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/novita_constants.py
rename to src/packages/core/lfx/base/models/novita_constants.py
diff --git a/src/lfx/src/lfx/base/models/ollama_constants.py b/src/packages/core/lfx/base/models/ollama_constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/ollama_constants.py
rename to src/packages/core/lfx/base/models/ollama_constants.py
diff --git a/src/lfx/src/lfx/base/models/openai_constants.py b/src/packages/core/lfx/base/models/openai_constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/openai_constants.py
rename to src/packages/core/lfx/base/models/openai_constants.py
diff --git a/src/lfx/src/lfx/base/models/sambanova_constants.py b/src/packages/core/lfx/base/models/sambanova_constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/models/sambanova_constants.py
rename to src/packages/core/lfx/base/models/sambanova_constants.py
diff --git a/src/backend/tests/unit/base/__init__.py b/src/packages/core/lfx/base/processing/__init__.py
similarity index 100%
rename from src/backend/tests/unit/base/__init__.py
rename to src/packages/core/lfx/base/processing/__init__.py
diff --git a/src/backend/tests/unit/base/data/__init__.py b/src/packages/core/lfx/base/prompts/__init__.py
similarity index 100%
rename from src/backend/tests/unit/base/data/__init__.py
rename to src/packages/core/lfx/base/prompts/__init__.py
diff --git a/src/lfx/src/lfx/base/prompts/api_utils.py b/src/packages/core/lfx/base/prompts/api_utils.py
similarity index 100%
rename from src/lfx/src/lfx/base/prompts/api_utils.py
rename to src/packages/core/lfx/base/prompts/api_utils.py
diff --git a/src/lfx/src/lfx/base/prompts/utils.py b/src/packages/core/lfx/base/prompts/utils.py
similarity index 100%
rename from src/lfx/src/lfx/base/prompts/utils.py
rename to src/packages/core/lfx/base/prompts/utils.py
diff --git a/src/backend/tests/unit/base/load/__init__.py b/src/packages/core/lfx/base/textsplitters/__init__.py
similarity index 100%
rename from src/backend/tests/unit/base/load/__init__.py
rename to src/packages/core/lfx/base/textsplitters/__init__.py
diff --git a/src/lfx/src/lfx/base/textsplitters/model.py b/src/packages/core/lfx/base/textsplitters/model.py
similarity index 100%
rename from src/lfx/src/lfx/base/textsplitters/model.py
rename to src/packages/core/lfx/base/textsplitters/model.py
diff --git a/src/backend/tests/unit/base/mcp/__init__.py b/src/packages/core/lfx/base/tools/__init__.py
similarity index 100%
rename from src/backend/tests/unit/base/mcp/__init__.py
rename to src/packages/core/lfx/base/tools/__init__.py
diff --git a/src/lfx/src/lfx/base/tools/base.py b/src/packages/core/lfx/base/tools/base.py
similarity index 100%
rename from src/lfx/src/lfx/base/tools/base.py
rename to src/packages/core/lfx/base/tools/base.py
diff --git a/src/lfx/src/lfx/base/tools/component_tool.py b/src/packages/core/lfx/base/tools/component_tool.py
similarity index 100%
rename from src/lfx/src/lfx/base/tools/component_tool.py
rename to src/packages/core/lfx/base/tools/component_tool.py
diff --git a/src/lfx/src/lfx/base/tools/constants.py b/src/packages/core/lfx/base/tools/constants.py
similarity index 100%
rename from src/lfx/src/lfx/base/tools/constants.py
rename to src/packages/core/lfx/base/tools/constants.py
diff --git a/src/lfx/src/lfx/base/tools/flow_tool.py b/src/packages/core/lfx/base/tools/flow_tool.py
similarity index 100%
rename from src/lfx/src/lfx/base/tools/flow_tool.py
rename to src/packages/core/lfx/base/tools/flow_tool.py
diff --git a/src/lfx/src/lfx/base/tools/run_flow.py b/src/packages/core/lfx/base/tools/run_flow.py
similarity index 100%
rename from src/lfx/src/lfx/base/tools/run_flow.py
rename to src/packages/core/lfx/base/tools/run_flow.py
diff --git a/src/backend/tests/unit/base/tools/__init__.py b/src/packages/core/lfx/base/vectorstores/__init__.py
similarity index 100%
rename from src/backend/tests/unit/base/tools/__init__.py
rename to src/packages/core/lfx/base/vectorstores/__init__.py
diff --git a/src/lfx/src/lfx/base/vectorstores/model.py b/src/packages/core/lfx/base/vectorstores/model.py
similarity index 100%
rename from src/lfx/src/lfx/base/vectorstores/model.py
rename to src/packages/core/lfx/base/vectorstores/model.py
diff --git a/src/lfx/src/lfx/base/vectorstores/utils.py b/src/packages/core/lfx/base/vectorstores/utils.py
similarity index 100%
rename from src/lfx/src/lfx/base/vectorstores/utils.py
rename to src/packages/core/lfx/base/vectorstores/utils.py
diff --git a/src/lfx/src/lfx/base/vectorstores/vector_store_connection_decorator.py b/src/packages/core/lfx/base/vectorstores/vector_store_connection_decorator.py
similarity index 100%
rename from src/lfx/src/lfx/base/vectorstores/vector_store_connection_decorator.py
rename to src/packages/core/lfx/base/vectorstores/vector_store_connection_decorator.py
diff --git a/src/lfx/src/lfx/cli/__init__.py b/src/packages/core/lfx/cli/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/cli/__init__.py
rename to src/packages/core/lfx/cli/__init__.py
diff --git a/src/lfx/src/lfx/cli/commands.py b/src/packages/core/lfx/cli/commands.py
similarity index 100%
rename from src/lfx/src/lfx/cli/commands.py
rename to src/packages/core/lfx/cli/commands.py
diff --git a/src/lfx/src/lfx/cli/common.py b/src/packages/core/lfx/cli/common.py
similarity index 100%
rename from src/lfx/src/lfx/cli/common.py
rename to src/packages/core/lfx/cli/common.py
diff --git a/src/lfx/src/lfx/cli/run.py b/src/packages/core/lfx/cli/run.py
similarity index 100%
rename from src/lfx/src/lfx/cli/run.py
rename to src/packages/core/lfx/cli/run.py
diff --git a/src/lfx/src/lfx/cli/script_loader.py b/src/packages/core/lfx/cli/script_loader.py
similarity index 100%
rename from src/lfx/src/lfx/cli/script_loader.py
rename to src/packages/core/lfx/cli/script_loader.py
diff --git a/src/lfx/src/lfx/cli/serve_app.py b/src/packages/core/lfx/cli/serve_app.py
similarity index 100%
rename from src/lfx/src/lfx/cli/serve_app.py
rename to src/packages/core/lfx/cli/serve_app.py
diff --git a/src/lfx/src/lfx/cli/validation.py b/src/packages/core/lfx/cli/validation.py
similarity index 100%
rename from src/lfx/src/lfx/cli/validation.py
rename to src/packages/core/lfx/cli/validation.py
diff --git a/src/lfx/src/lfx/components/FAISS/__init__.py b/src/packages/core/lfx/components/FAISS/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/FAISS/__init__.py
rename to src/packages/core/lfx/components/FAISS/__init__.py
diff --git a/src/lfx/src/lfx/components/FAISS/faiss.py b/src/packages/core/lfx/components/FAISS/faiss.py
similarity index 100%
rename from src/lfx/src/lfx/components/FAISS/faiss.py
rename to src/packages/core/lfx/components/FAISS/faiss.py
diff --git a/src/lfx/src/lfx/components/Notion/__init__.py b/src/packages/core/lfx/components/Notion/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/Notion/__init__.py
rename to src/packages/core/lfx/components/Notion/__init__.py
diff --git a/src/lfx/src/lfx/components/Notion/add_content_to_page.py b/src/packages/core/lfx/components/Notion/add_content_to_page.py
similarity index 100%
rename from src/lfx/src/lfx/components/Notion/add_content_to_page.py
rename to src/packages/core/lfx/components/Notion/add_content_to_page.py
diff --git a/src/lfx/src/lfx/components/Notion/create_page.py b/src/packages/core/lfx/components/Notion/create_page.py
similarity index 100%
rename from src/lfx/src/lfx/components/Notion/create_page.py
rename to src/packages/core/lfx/components/Notion/create_page.py
diff --git a/src/lfx/src/lfx/components/Notion/list_database_properties.py b/src/packages/core/lfx/components/Notion/list_database_properties.py
similarity index 100%
rename from src/lfx/src/lfx/components/Notion/list_database_properties.py
rename to src/packages/core/lfx/components/Notion/list_database_properties.py
diff --git a/src/lfx/src/lfx/components/Notion/list_pages.py b/src/packages/core/lfx/components/Notion/list_pages.py
similarity index 100%
rename from src/lfx/src/lfx/components/Notion/list_pages.py
rename to src/packages/core/lfx/components/Notion/list_pages.py
diff --git a/src/lfx/src/lfx/components/Notion/list_users.py b/src/packages/core/lfx/components/Notion/list_users.py
similarity index 100%
rename from src/lfx/src/lfx/components/Notion/list_users.py
rename to src/packages/core/lfx/components/Notion/list_users.py
diff --git a/src/lfx/src/lfx/components/Notion/page_content_viewer.py b/src/packages/core/lfx/components/Notion/page_content_viewer.py
similarity index 100%
rename from src/lfx/src/lfx/components/Notion/page_content_viewer.py
rename to src/packages/core/lfx/components/Notion/page_content_viewer.py
diff --git a/src/lfx/src/lfx/components/Notion/search.py b/src/packages/core/lfx/components/Notion/search.py
similarity index 100%
rename from src/lfx/src/lfx/components/Notion/search.py
rename to src/packages/core/lfx/components/Notion/search.py
diff --git a/src/lfx/src/lfx/components/Notion/update_page_property.py b/src/packages/core/lfx/components/Notion/update_page_property.py
similarity index 100%
rename from src/lfx/src/lfx/components/Notion/update_page_property.py
rename to src/packages/core/lfx/components/Notion/update_page_property.py
diff --git a/src/lfx/src/lfx/components/__init__.py b/src/packages/core/lfx/components/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/__init__.py
rename to src/packages/core/lfx/components/__init__.py
diff --git a/src/lfx/src/lfx/components/_importing.py b/src/packages/core/lfx/components/_importing.py
similarity index 100%
rename from src/lfx/src/lfx/components/_importing.py
rename to src/packages/core/lfx/components/_importing.py
diff --git a/src/lfx/src/lfx/components/agentql/__init__.py b/src/packages/core/lfx/components/agentql/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/agentql/__init__.py
rename to src/packages/core/lfx/components/agentql/__init__.py
diff --git a/src/lfx/src/lfx/components/agentql/agentql_api.py b/src/packages/core/lfx/components/agentql/agentql_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/agentql/agentql_api.py
rename to src/packages/core/lfx/components/agentql/agentql_api.py
diff --git a/src/lfx/src/lfx/components/agents/__init__.py b/src/packages/core/lfx/components/agents/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/agents/__init__.py
rename to src/packages/core/lfx/components/agents/__init__.py
diff --git a/src/lfx/src/lfx/components/agents/agent.py b/src/packages/core/lfx/components/agents/agent.py
similarity index 100%
rename from src/lfx/src/lfx/components/agents/agent.py
rename to src/packages/core/lfx/components/agents/agent.py
diff --git a/src/lfx/src/lfx/components/agents/mcp_component.py b/src/packages/core/lfx/components/agents/mcp_component.py
similarity index 100%
rename from src/lfx/src/lfx/components/agents/mcp_component.py
rename to src/packages/core/lfx/components/agents/mcp_component.py
diff --git a/src/lfx/src/lfx/components/aiml/__init__.py b/src/packages/core/lfx/components/aiml/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/aiml/__init__.py
rename to src/packages/core/lfx/components/aiml/__init__.py
diff --git a/src/lfx/src/lfx/components/aiml/aiml.py b/src/packages/core/lfx/components/aiml/aiml.py
similarity index 100%
rename from src/lfx/src/lfx/components/aiml/aiml.py
rename to src/packages/core/lfx/components/aiml/aiml.py
diff --git a/src/lfx/src/lfx/components/aiml/aiml_embeddings.py b/src/packages/core/lfx/components/aiml/aiml_embeddings.py
similarity index 100%
rename from src/lfx/src/lfx/components/aiml/aiml_embeddings.py
rename to src/packages/core/lfx/components/aiml/aiml_embeddings.py
diff --git a/src/lfx/src/lfx/components/amazon/__init__.py b/src/packages/core/lfx/components/amazon/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/amazon/__init__.py
rename to src/packages/core/lfx/components/amazon/__init__.py
diff --git a/src/lfx/src/lfx/components/amazon/amazon_bedrock_embedding.py b/src/packages/core/lfx/components/amazon/amazon_bedrock_embedding.py
similarity index 100%
rename from src/lfx/src/lfx/components/amazon/amazon_bedrock_embedding.py
rename to src/packages/core/lfx/components/amazon/amazon_bedrock_embedding.py
diff --git a/src/lfx/src/lfx/components/amazon/amazon_bedrock_model.py b/src/packages/core/lfx/components/amazon/amazon_bedrock_model.py
similarity index 100%
rename from src/lfx/src/lfx/components/amazon/amazon_bedrock_model.py
rename to src/packages/core/lfx/components/amazon/amazon_bedrock_model.py
diff --git a/src/lfx/src/lfx/components/amazon/s3_bucket_uploader.py b/src/packages/core/lfx/components/amazon/s3_bucket_uploader.py
similarity index 100%
rename from src/lfx/src/lfx/components/amazon/s3_bucket_uploader.py
rename to src/packages/core/lfx/components/amazon/s3_bucket_uploader.py
diff --git a/src/lfx/src/lfx/components/anthropic/__init__.py b/src/packages/core/lfx/components/anthropic/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/anthropic/__init__.py
rename to src/packages/core/lfx/components/anthropic/__init__.py
diff --git a/src/lfx/src/lfx/components/anthropic/anthropic.py b/src/packages/core/lfx/components/anthropic/anthropic.py
similarity index 100%
rename from src/lfx/src/lfx/components/anthropic/anthropic.py
rename to src/packages/core/lfx/components/anthropic/anthropic.py
diff --git a/src/lfx/src/lfx/components/apify/__init__.py b/src/packages/core/lfx/components/apify/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/apify/__init__.py
rename to src/packages/core/lfx/components/apify/__init__.py
diff --git a/src/lfx/src/lfx/components/apify/apify_actor.py b/src/packages/core/lfx/components/apify/apify_actor.py
similarity index 100%
rename from src/lfx/src/lfx/components/apify/apify_actor.py
rename to src/packages/core/lfx/components/apify/apify_actor.py
diff --git a/src/lfx/src/lfx/components/arxiv/__init__.py b/src/packages/core/lfx/components/arxiv/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/arxiv/__init__.py
rename to src/packages/core/lfx/components/arxiv/__init__.py
diff --git a/src/lfx/src/lfx/components/arxiv/arxiv.py b/src/packages/core/lfx/components/arxiv/arxiv.py
similarity index 100%
rename from src/lfx/src/lfx/components/arxiv/arxiv.py
rename to src/packages/core/lfx/components/arxiv/arxiv.py
diff --git a/src/lfx/src/lfx/components/assemblyai/__init__.py b/src/packages/core/lfx/components/assemblyai/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/assemblyai/__init__.py
rename to src/packages/core/lfx/components/assemblyai/__init__.py
diff --git a/src/lfx/src/lfx/components/assemblyai/assemblyai_get_subtitles.py b/src/packages/core/lfx/components/assemblyai/assemblyai_get_subtitles.py
similarity index 100%
rename from src/lfx/src/lfx/components/assemblyai/assemblyai_get_subtitles.py
rename to src/packages/core/lfx/components/assemblyai/assemblyai_get_subtitles.py
diff --git a/src/lfx/src/lfx/components/assemblyai/assemblyai_lemur.py b/src/packages/core/lfx/components/assemblyai/assemblyai_lemur.py
similarity index 100%
rename from src/lfx/src/lfx/components/assemblyai/assemblyai_lemur.py
rename to src/packages/core/lfx/components/assemblyai/assemblyai_lemur.py
diff --git a/src/lfx/src/lfx/components/assemblyai/assemblyai_list_transcripts.py b/src/packages/core/lfx/components/assemblyai/assemblyai_list_transcripts.py
similarity index 100%
rename from src/lfx/src/lfx/components/assemblyai/assemblyai_list_transcripts.py
rename to src/packages/core/lfx/components/assemblyai/assemblyai_list_transcripts.py
diff --git a/src/lfx/src/lfx/components/assemblyai/assemblyai_poll_transcript.py b/src/packages/core/lfx/components/assemblyai/assemblyai_poll_transcript.py
similarity index 100%
rename from src/lfx/src/lfx/components/assemblyai/assemblyai_poll_transcript.py
rename to src/packages/core/lfx/components/assemblyai/assemblyai_poll_transcript.py
diff --git a/src/lfx/src/lfx/components/assemblyai/assemblyai_start_transcript.py b/src/packages/core/lfx/components/assemblyai/assemblyai_start_transcript.py
similarity index 100%
rename from src/lfx/src/lfx/components/assemblyai/assemblyai_start_transcript.py
rename to src/packages/core/lfx/components/assemblyai/assemblyai_start_transcript.py
diff --git a/src/lfx/src/lfx/components/azure/__init__.py b/src/packages/core/lfx/components/azure/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/azure/__init__.py
rename to src/packages/core/lfx/components/azure/__init__.py
diff --git a/src/lfx/src/lfx/components/azure/azure_openai.py b/src/packages/core/lfx/components/azure/azure_openai.py
similarity index 100%
rename from src/lfx/src/lfx/components/azure/azure_openai.py
rename to src/packages/core/lfx/components/azure/azure_openai.py
diff --git a/src/lfx/src/lfx/components/azure/azure_openai_embeddings.py b/src/packages/core/lfx/components/azure/azure_openai_embeddings.py
similarity index 100%
rename from src/lfx/src/lfx/components/azure/azure_openai_embeddings.py
rename to src/packages/core/lfx/components/azure/azure_openai_embeddings.py
diff --git a/src/lfx/src/lfx/components/baidu/__init__.py b/src/packages/core/lfx/components/baidu/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/baidu/__init__.py
rename to src/packages/core/lfx/components/baidu/__init__.py
diff --git a/src/lfx/src/lfx/components/baidu/baidu_qianfan_chat.py b/src/packages/core/lfx/components/baidu/baidu_qianfan_chat.py
similarity index 100%
rename from src/lfx/src/lfx/components/baidu/baidu_qianfan_chat.py
rename to src/packages/core/lfx/components/baidu/baidu_qianfan_chat.py
diff --git a/src/lfx/src/lfx/components/bing/__init__.py b/src/packages/core/lfx/components/bing/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/bing/__init__.py
rename to src/packages/core/lfx/components/bing/__init__.py
diff --git a/src/lfx/src/lfx/components/bing/bing_search_api.py b/src/packages/core/lfx/components/bing/bing_search_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/bing/bing_search_api.py
rename to src/packages/core/lfx/components/bing/bing_search_api.py
diff --git a/src/lfx/src/lfx/components/cassandra/__init__.py b/src/packages/core/lfx/components/cassandra/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/cassandra/__init__.py
rename to src/packages/core/lfx/components/cassandra/__init__.py
diff --git a/src/lfx/src/lfx/components/cassandra/cassandra.py b/src/packages/core/lfx/components/cassandra/cassandra.py
similarity index 100%
rename from src/lfx/src/lfx/components/cassandra/cassandra.py
rename to src/packages/core/lfx/components/cassandra/cassandra.py
diff --git a/src/lfx/src/lfx/components/cassandra/cassandra_chat.py b/src/packages/core/lfx/components/cassandra/cassandra_chat.py
similarity index 100%
rename from src/lfx/src/lfx/components/cassandra/cassandra_chat.py
rename to src/packages/core/lfx/components/cassandra/cassandra_chat.py
diff --git a/src/lfx/src/lfx/components/cassandra/cassandra_graph.py b/src/packages/core/lfx/components/cassandra/cassandra_graph.py
similarity index 100%
rename from src/lfx/src/lfx/components/cassandra/cassandra_graph.py
rename to src/packages/core/lfx/components/cassandra/cassandra_graph.py
diff --git a/src/lfx/src/lfx/components/chains/__init__.py b/src/packages/core/lfx/components/chains/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/chains/__init__.py
rename to src/packages/core/lfx/components/chains/__init__.py
diff --git a/src/lfx/src/lfx/components/chroma/__init__.py b/src/packages/core/lfx/components/chroma/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/chroma/__init__.py
rename to src/packages/core/lfx/components/chroma/__init__.py
diff --git a/src/lfx/src/lfx/components/chroma/chroma.py b/src/packages/core/lfx/components/chroma/chroma.py
similarity index 100%
rename from src/lfx/src/lfx/components/chroma/chroma.py
rename to src/packages/core/lfx/components/chroma/chroma.py
diff --git a/src/lfx/src/lfx/components/cleanlab/__init__.py b/src/packages/core/lfx/components/cleanlab/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/cleanlab/__init__.py
rename to src/packages/core/lfx/components/cleanlab/__init__.py
diff --git a/src/lfx/src/lfx/components/cleanlab/cleanlab_evaluator.py b/src/packages/core/lfx/components/cleanlab/cleanlab_evaluator.py
similarity index 100%
rename from src/lfx/src/lfx/components/cleanlab/cleanlab_evaluator.py
rename to src/packages/core/lfx/components/cleanlab/cleanlab_evaluator.py
diff --git a/src/lfx/src/lfx/components/cleanlab/cleanlab_rag_evaluator.py b/src/packages/core/lfx/components/cleanlab/cleanlab_rag_evaluator.py
similarity index 100%
rename from src/lfx/src/lfx/components/cleanlab/cleanlab_rag_evaluator.py
rename to src/packages/core/lfx/components/cleanlab/cleanlab_rag_evaluator.py
diff --git a/src/lfx/src/lfx/components/cleanlab/cleanlab_remediator.py b/src/packages/core/lfx/components/cleanlab/cleanlab_remediator.py
similarity index 100%
rename from src/lfx/src/lfx/components/cleanlab/cleanlab_remediator.py
rename to src/packages/core/lfx/components/cleanlab/cleanlab_remediator.py
diff --git a/src/lfx/src/lfx/components/clickhouse/__init__.py b/src/packages/core/lfx/components/clickhouse/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/clickhouse/__init__.py
rename to src/packages/core/lfx/components/clickhouse/__init__.py
diff --git a/src/lfx/src/lfx/components/clickhouse/clickhouse.py b/src/packages/core/lfx/components/clickhouse/clickhouse.py
similarity index 100%
rename from src/lfx/src/lfx/components/clickhouse/clickhouse.py
rename to src/packages/core/lfx/components/clickhouse/clickhouse.py
diff --git a/src/lfx/src/lfx/components/cloudflare/__init__.py b/src/packages/core/lfx/components/cloudflare/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/cloudflare/__init__.py
rename to src/packages/core/lfx/components/cloudflare/__init__.py
diff --git a/src/lfx/src/lfx/components/cloudflare/cloudflare.py b/src/packages/core/lfx/components/cloudflare/cloudflare.py
similarity index 100%
rename from src/lfx/src/lfx/components/cloudflare/cloudflare.py
rename to src/packages/core/lfx/components/cloudflare/cloudflare.py
diff --git a/src/lfx/src/lfx/components/cohere/__init__.py b/src/packages/core/lfx/components/cohere/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/cohere/__init__.py
rename to src/packages/core/lfx/components/cohere/__init__.py
diff --git a/src/lfx/src/lfx/components/cohere/cohere_embeddings.py b/src/packages/core/lfx/components/cohere/cohere_embeddings.py
similarity index 100%
rename from src/lfx/src/lfx/components/cohere/cohere_embeddings.py
rename to src/packages/core/lfx/components/cohere/cohere_embeddings.py
diff --git a/src/lfx/src/lfx/components/cohere/cohere_models.py b/src/packages/core/lfx/components/cohere/cohere_models.py
similarity index 100%
rename from src/lfx/src/lfx/components/cohere/cohere_models.py
rename to src/packages/core/lfx/components/cohere/cohere_models.py
diff --git a/src/lfx/src/lfx/components/cohere/cohere_rerank.py b/src/packages/core/lfx/components/cohere/cohere_rerank.py
similarity index 100%
rename from src/lfx/src/lfx/components/cohere/cohere_rerank.py
rename to src/packages/core/lfx/components/cohere/cohere_rerank.py
diff --git a/src/lfx/src/lfx/components/composio/__init__.py b/src/packages/core/lfx/components/composio/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/__init__.py
rename to src/packages/core/lfx/components/composio/__init__.py
diff --git a/src/lfx/src/lfx/components/composio/composio_api.py b/src/packages/core/lfx/components/composio/composio_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/composio_api.py
rename to src/packages/core/lfx/components/composio/composio_api.py
diff --git a/src/lfx/src/lfx/components/composio/dropbox_compnent.py b/src/packages/core/lfx/components/composio/dropbox_compnent.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/dropbox_compnent.py
rename to src/packages/core/lfx/components/composio/dropbox_compnent.py
diff --git a/src/lfx/src/lfx/components/composio/github.amrom.workers.devposio.py b/src/packages/core/lfx/components/composio/github.amrom.workers.devposio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/github.amrom.workers.devposio.py
rename to src/packages/core/lfx/components/composio/github.amrom.workers.devposio.py
diff --git a/src/lfx/src/lfx/components/composio/gmail_composio.py b/src/packages/core/lfx/components/composio/gmail_composio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/gmail_composio.py
rename to src/packages/core/lfx/components/composio/gmail_composio.py
diff --git a/src/lfx/src/lfx/components/composio/googlecalendar_composio.py b/src/packages/core/lfx/components/composio/googlecalendar_composio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/googlecalendar_composio.py
rename to src/packages/core/lfx/components/composio/googlecalendar_composio.py
diff --git a/src/lfx/src/lfx/components/composio/googlemeet_composio.py b/src/packages/core/lfx/components/composio/googlemeet_composio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/googlemeet_composio.py
rename to src/packages/core/lfx/components/composio/googlemeet_composio.py
diff --git a/src/lfx/src/lfx/components/composio/googletasks_composio.py b/src/packages/core/lfx/components/composio/googletasks_composio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/googletasks_composio.py
rename to src/packages/core/lfx/components/composio/googletasks_composio.py
diff --git a/src/lfx/src/lfx/components/composio/linear_composio.py b/src/packages/core/lfx/components/composio/linear_composio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/linear_composio.py
rename to src/packages/core/lfx/components/composio/linear_composio.py
diff --git a/src/lfx/src/lfx/components/composio/outlook_composio.py b/src/packages/core/lfx/components/composio/outlook_composio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/outlook_composio.py
rename to src/packages/core/lfx/components/composio/outlook_composio.py
diff --git a/src/lfx/src/lfx/components/composio/reddit_composio.py b/src/packages/core/lfx/components/composio/reddit_composio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/reddit_composio.py
rename to src/packages/core/lfx/components/composio/reddit_composio.py
diff --git a/src/lfx/src/lfx/components/composio/slack_composio.py b/src/packages/core/lfx/components/composio/slack_composio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/slack_composio.py
rename to src/packages/core/lfx/components/composio/slack_composio.py
diff --git a/src/lfx/src/lfx/components/composio/slackbot_composio.py b/src/packages/core/lfx/components/composio/slackbot_composio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/slackbot_composio.py
rename to src/packages/core/lfx/components/composio/slackbot_composio.py
diff --git a/src/lfx/src/lfx/components/composio/supabase_composio.py b/src/packages/core/lfx/components/composio/supabase_composio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/supabase_composio.py
rename to src/packages/core/lfx/components/composio/supabase_composio.py
diff --git a/src/lfx/src/lfx/components/composio/todoist_composio.py b/src/packages/core/lfx/components/composio/todoist_composio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/todoist_composio.py
rename to src/packages/core/lfx/components/composio/todoist_composio.py
diff --git a/src/lfx/src/lfx/components/composio/youtube_composio.py b/src/packages/core/lfx/components/composio/youtube_composio.py
similarity index 100%
rename from src/lfx/src/lfx/components/composio/youtube_composio.py
rename to src/packages/core/lfx/components/composio/youtube_composio.py
diff --git a/src/lfx/src/lfx/components/confluence/__init__.py b/src/packages/core/lfx/components/confluence/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/confluence/__init__.py
rename to src/packages/core/lfx/components/confluence/__init__.py
diff --git a/src/lfx/src/lfx/components/confluence/confluence.py b/src/packages/core/lfx/components/confluence/confluence.py
similarity index 100%
rename from src/lfx/src/lfx/components/confluence/confluence.py
rename to src/packages/core/lfx/components/confluence/confluence.py
diff --git a/src/lfx/src/lfx/components/couchbase/__init__.py b/src/packages/core/lfx/components/couchbase/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/couchbase/__init__.py
rename to src/packages/core/lfx/components/couchbase/__init__.py
diff --git a/src/lfx/src/lfx/components/couchbase/couchbase.py b/src/packages/core/lfx/components/couchbase/couchbase.py
similarity index 100%
rename from src/lfx/src/lfx/components/couchbase/couchbase.py
rename to src/packages/core/lfx/components/couchbase/couchbase.py
diff --git a/src/lfx/src/lfx/components/crewai/__init__.py b/src/packages/core/lfx/components/crewai/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/crewai/__init__.py
rename to src/packages/core/lfx/components/crewai/__init__.py
diff --git a/src/lfx/src/lfx/components/crewai/crewai.py b/src/packages/core/lfx/components/crewai/crewai.py
similarity index 100%
rename from src/lfx/src/lfx/components/crewai/crewai.py
rename to src/packages/core/lfx/components/crewai/crewai.py
diff --git a/src/lfx/src/lfx/components/crewai/hierarchical_crew.py b/src/packages/core/lfx/components/crewai/hierarchical_crew.py
similarity index 100%
rename from src/lfx/src/lfx/components/crewai/hierarchical_crew.py
rename to src/packages/core/lfx/components/crewai/hierarchical_crew.py
diff --git a/src/lfx/src/lfx/components/crewai/hierarchical_task.py b/src/packages/core/lfx/components/crewai/hierarchical_task.py
similarity index 100%
rename from src/lfx/src/lfx/components/crewai/hierarchical_task.py
rename to src/packages/core/lfx/components/crewai/hierarchical_task.py
diff --git a/src/lfx/src/lfx/components/crewai/sequential_crew.py b/src/packages/core/lfx/components/crewai/sequential_crew.py
similarity index 100%
rename from src/lfx/src/lfx/components/crewai/sequential_crew.py
rename to src/packages/core/lfx/components/crewai/sequential_crew.py
diff --git a/src/lfx/src/lfx/components/crewai/sequential_task.py b/src/packages/core/lfx/components/crewai/sequential_task.py
similarity index 100%
rename from src/lfx/src/lfx/components/crewai/sequential_task.py
rename to src/packages/core/lfx/components/crewai/sequential_task.py
diff --git a/src/lfx/src/lfx/components/crewai/sequential_task_agent.py b/src/packages/core/lfx/components/crewai/sequential_task_agent.py
similarity index 100%
rename from src/lfx/src/lfx/components/crewai/sequential_task_agent.py
rename to src/packages/core/lfx/components/crewai/sequential_task_agent.py
diff --git a/src/lfx/src/lfx/components/custom_component/__init__.py b/src/packages/core/lfx/components/custom_component/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/custom_component/__init__.py
rename to src/packages/core/lfx/components/custom_component/__init__.py
diff --git a/src/lfx/src/lfx/components/custom_component/custom_component.py b/src/packages/core/lfx/components/custom_component/custom_component.py
similarity index 100%
rename from src/lfx/src/lfx/components/custom_component/custom_component.py
rename to src/packages/core/lfx/components/custom_component/custom_component.py
diff --git a/src/lfx/src/lfx/components/data/__init__.py b/src/packages/core/lfx/components/data/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/data/__init__.py
rename to src/packages/core/lfx/components/data/__init__.py
diff --git a/src/lfx/src/lfx/components/data/api_request.py b/src/packages/core/lfx/components/data/api_request.py
similarity index 100%
rename from src/lfx/src/lfx/components/data/api_request.py
rename to src/packages/core/lfx/components/data/api_request.py
diff --git a/src/lfx/src/lfx/components/data/csv_to_data.py b/src/packages/core/lfx/components/data/csv_to_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/data/csv_to_data.py
rename to src/packages/core/lfx/components/data/csv_to_data.py
diff --git a/src/lfx/src/lfx/components/data/directory.py b/src/packages/core/lfx/components/data/directory.py
similarity index 100%
rename from src/lfx/src/lfx/components/data/directory.py
rename to src/packages/core/lfx/components/data/directory.py
diff --git a/src/lfx/src/lfx/components/data/file.py b/src/packages/core/lfx/components/data/file.py
similarity index 100%
rename from src/lfx/src/lfx/components/data/file.py
rename to src/packages/core/lfx/components/data/file.py
diff --git a/src/lfx/src/lfx/components/data/json_to_data.py b/src/packages/core/lfx/components/data/json_to_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/data/json_to_data.py
rename to src/packages/core/lfx/components/data/json_to_data.py
diff --git a/src/lfx/src/lfx/components/data/news_search.py b/src/packages/core/lfx/components/data/news_search.py
similarity index 100%
rename from src/lfx/src/lfx/components/data/news_search.py
rename to src/packages/core/lfx/components/data/news_search.py
diff --git a/src/lfx/src/lfx/components/data/rss.py b/src/packages/core/lfx/components/data/rss.py
similarity index 100%
rename from src/lfx/src/lfx/components/data/rss.py
rename to src/packages/core/lfx/components/data/rss.py
diff --git a/src/lfx/src/lfx/components/data/sql_executor.py b/src/packages/core/lfx/components/data/sql_executor.py
similarity index 100%
rename from src/lfx/src/lfx/components/data/sql_executor.py
rename to src/packages/core/lfx/components/data/sql_executor.py
diff --git a/src/lfx/src/lfx/components/data/url.py b/src/packages/core/lfx/components/data/url.py
similarity index 100%
rename from src/lfx/src/lfx/components/data/url.py
rename to src/packages/core/lfx/components/data/url.py
diff --git a/src/lfx/src/lfx/components/data/web_search.py b/src/packages/core/lfx/components/data/web_search.py
similarity index 100%
rename from src/lfx/src/lfx/components/data/web_search.py
rename to src/packages/core/lfx/components/data/web_search.py
diff --git a/src/lfx/src/lfx/components/data/webhook.py b/src/packages/core/lfx/components/data/webhook.py
similarity index 100%
rename from src/lfx/src/lfx/components/data/webhook.py
rename to src/packages/core/lfx/components/data/webhook.py
diff --git a/src/lfx/src/lfx/components/datastax/__init__.py b/src/packages/core/lfx/components/datastax/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/__init__.py
rename to src/packages/core/lfx/components/datastax/__init__.py
diff --git a/src/lfx/src/lfx/components/datastax/astra_assistant_manager.py b/src/packages/core/lfx/components/datastax/astra_assistant_manager.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/astra_assistant_manager.py
rename to src/packages/core/lfx/components/datastax/astra_assistant_manager.py
diff --git a/src/lfx/src/lfx/components/datastax/astra_db.py b/src/packages/core/lfx/components/datastax/astra_db.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/astra_db.py
rename to src/packages/core/lfx/components/datastax/astra_db.py
diff --git a/src/lfx/src/lfx/components/datastax/astra_vectorize.py b/src/packages/core/lfx/components/datastax/astra_vectorize.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/astra_vectorize.py
rename to src/packages/core/lfx/components/datastax/astra_vectorize.py
diff --git a/src/lfx/src/lfx/components/datastax/astradb.py b/src/packages/core/lfx/components/datastax/astradb.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/astradb.py
rename to src/packages/core/lfx/components/datastax/astradb.py
diff --git a/src/lfx/src/lfx/components/datastax/astradb_cql.py b/src/packages/core/lfx/components/datastax/astradb_cql.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/astradb_cql.py
rename to src/packages/core/lfx/components/datastax/astradb_cql.py
diff --git a/src/lfx/src/lfx/components/datastax/astradb_graph.py b/src/packages/core/lfx/components/datastax/astradb_graph.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/astradb_graph.py
rename to src/packages/core/lfx/components/datastax/astradb_graph.py
diff --git a/src/lfx/src/lfx/components/datastax/astradb_tool.py b/src/packages/core/lfx/components/datastax/astradb_tool.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/astradb_tool.py
rename to src/packages/core/lfx/components/datastax/astradb_tool.py
diff --git a/src/lfx/src/lfx/components/datastax/astradb_vectorstore.py b/src/packages/core/lfx/components/datastax/astradb_vectorstore.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/astradb_vectorstore.py
rename to src/packages/core/lfx/components/datastax/astradb_vectorstore.py
diff --git a/src/lfx/src/lfx/components/datastax/cassandra.py b/src/packages/core/lfx/components/datastax/cassandra.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/cassandra.py
rename to src/packages/core/lfx/components/datastax/cassandra.py
diff --git a/src/lfx/src/lfx/components/datastax/create_assistant.py b/src/packages/core/lfx/components/datastax/create_assistant.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/create_assistant.py
rename to src/packages/core/lfx/components/datastax/create_assistant.py
diff --git a/src/lfx/src/lfx/components/datastax/create_thread.py b/src/packages/core/lfx/components/datastax/create_thread.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/create_thread.py
rename to src/packages/core/lfx/components/datastax/create_thread.py
diff --git a/src/lfx/src/lfx/components/datastax/dotenv.py b/src/packages/core/lfx/components/datastax/dotenv.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/dotenv.py
rename to src/packages/core/lfx/components/datastax/dotenv.py
diff --git a/src/lfx/src/lfx/components/datastax/get_assistant.py b/src/packages/core/lfx/components/datastax/get_assistant.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/get_assistant.py
rename to src/packages/core/lfx/components/datastax/get_assistant.py
diff --git a/src/lfx/src/lfx/components/datastax/getenvvar.py b/src/packages/core/lfx/components/datastax/getenvvar.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/getenvvar.py
rename to src/packages/core/lfx/components/datastax/getenvvar.py
diff --git a/src/lfx/src/lfx/components/datastax/graph_rag.py b/src/packages/core/lfx/components/datastax/graph_rag.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/graph_rag.py
rename to src/packages/core/lfx/components/datastax/graph_rag.py
diff --git a/src/lfx/src/lfx/components/datastax/hcd.py b/src/packages/core/lfx/components/datastax/hcd.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/hcd.py
rename to src/packages/core/lfx/components/datastax/hcd.py
diff --git a/src/lfx/src/lfx/components/datastax/list_assistants.py b/src/packages/core/lfx/components/datastax/list_assistants.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/list_assistants.py
rename to src/packages/core/lfx/components/datastax/list_assistants.py
diff --git a/src/lfx/src/lfx/components/datastax/run.py b/src/packages/core/lfx/components/datastax/run.py
similarity index 100%
rename from src/lfx/src/lfx/components/datastax/run.py
rename to src/packages/core/lfx/components/datastax/run.py
diff --git a/src/lfx/src/lfx/components/deactivated/__init__.py b/src/packages/core/lfx/components/deactivated/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/__init__.py
rename to src/packages/core/lfx/components/deactivated/__init__.py
diff --git a/src/lfx/src/lfx/components/deactivated/amazon_kendra.py b/src/packages/core/lfx/components/deactivated/amazon_kendra.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/amazon_kendra.py
rename to src/packages/core/lfx/components/deactivated/amazon_kendra.py
diff --git a/src/lfx/src/lfx/components/deactivated/chat_litellm_model.py b/src/packages/core/lfx/components/deactivated/chat_litellm_model.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/chat_litellm_model.py
rename to src/packages/core/lfx/components/deactivated/chat_litellm_model.py
diff --git a/src/lfx/src/lfx/components/deactivated/code_block_extractor.py b/src/packages/core/lfx/components/deactivated/code_block_extractor.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/code_block_extractor.py
rename to src/packages/core/lfx/components/deactivated/code_block_extractor.py
diff --git a/src/lfx/src/lfx/components/deactivated/documents_to_data.py b/src/packages/core/lfx/components/deactivated/documents_to_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/documents_to_data.py
rename to src/packages/core/lfx/components/deactivated/documents_to_data.py
diff --git a/src/lfx/src/lfx/components/deactivated/embed.py b/src/packages/core/lfx/components/deactivated/embed.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/embed.py
rename to src/packages/core/lfx/components/deactivated/embed.py
diff --git a/src/lfx/src/lfx/components/deactivated/extract_key_from_data.py b/src/packages/core/lfx/components/deactivated/extract_key_from_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/extract_key_from_data.py
rename to src/packages/core/lfx/components/deactivated/extract_key_from_data.py
diff --git a/src/lfx/src/lfx/components/deactivated/json_document_builder.py b/src/packages/core/lfx/components/deactivated/json_document_builder.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/json_document_builder.py
rename to src/packages/core/lfx/components/deactivated/json_document_builder.py
diff --git a/src/lfx/src/lfx/components/deactivated/list_flows.py b/src/packages/core/lfx/components/deactivated/list_flows.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/list_flows.py
rename to src/packages/core/lfx/components/deactivated/list_flows.py
diff --git a/src/lfx/src/lfx/components/deactivated/mcp_sse.py b/src/packages/core/lfx/components/deactivated/mcp_sse.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/mcp_sse.py
rename to src/packages/core/lfx/components/deactivated/mcp_sse.py
diff --git a/src/lfx/src/lfx/components/deactivated/mcp_stdio.py b/src/packages/core/lfx/components/deactivated/mcp_stdio.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/mcp_stdio.py
rename to src/packages/core/lfx/components/deactivated/mcp_stdio.py
diff --git a/src/lfx/src/lfx/components/deactivated/merge_data.py b/src/packages/core/lfx/components/deactivated/merge_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/merge_data.py
rename to src/packages/core/lfx/components/deactivated/merge_data.py
diff --git a/src/lfx/src/lfx/components/deactivated/message.py b/src/packages/core/lfx/components/deactivated/message.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/message.py
rename to src/packages/core/lfx/components/deactivated/message.py
diff --git a/src/lfx/src/lfx/components/deactivated/metal.py b/src/packages/core/lfx/components/deactivated/metal.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/metal.py
rename to src/packages/core/lfx/components/deactivated/metal.py
diff --git a/src/lfx/src/lfx/components/deactivated/multi_query.py b/src/packages/core/lfx/components/deactivated/multi_query.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/multi_query.py
rename to src/packages/core/lfx/components/deactivated/multi_query.py
diff --git a/src/lfx/src/lfx/components/deactivated/retriever.py b/src/packages/core/lfx/components/deactivated/retriever.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/retriever.py
rename to src/packages/core/lfx/components/deactivated/retriever.py
diff --git a/src/lfx/src/lfx/components/deactivated/selective_passthrough.py b/src/packages/core/lfx/components/deactivated/selective_passthrough.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/selective_passthrough.py
rename to src/packages/core/lfx/components/deactivated/selective_passthrough.py
diff --git a/src/lfx/src/lfx/components/deactivated/should_run_next.py b/src/packages/core/lfx/components/deactivated/should_run_next.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/should_run_next.py
rename to src/packages/core/lfx/components/deactivated/should_run_next.py
diff --git a/src/lfx/src/lfx/components/deactivated/split_text.py b/src/packages/core/lfx/components/deactivated/split_text.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/split_text.py
rename to src/packages/core/lfx/components/deactivated/split_text.py
diff --git a/src/lfx/src/lfx/components/deactivated/store_message.py b/src/packages/core/lfx/components/deactivated/store_message.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/store_message.py
rename to src/packages/core/lfx/components/deactivated/store_message.py
diff --git a/src/lfx/src/lfx/components/deactivated/sub_flow.py b/src/packages/core/lfx/components/deactivated/sub_flow.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/sub_flow.py
rename to src/packages/core/lfx/components/deactivated/sub_flow.py
diff --git a/src/lfx/src/lfx/components/deactivated/vectara_self_query.py b/src/packages/core/lfx/components/deactivated/vectara_self_query.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/vectara_self_query.py
rename to src/packages/core/lfx/components/deactivated/vectara_self_query.py
diff --git a/src/lfx/src/lfx/components/deactivated/vector_store.py b/src/packages/core/lfx/components/deactivated/vector_store.py
similarity index 100%
rename from src/lfx/src/lfx/components/deactivated/vector_store.py
rename to src/packages/core/lfx/components/deactivated/vector_store.py
diff --git a/src/lfx/src/lfx/components/deepseek/__init__.py b/src/packages/core/lfx/components/deepseek/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/deepseek/__init__.py
rename to src/packages/core/lfx/components/deepseek/__init__.py
diff --git a/src/lfx/src/lfx/components/deepseek/deepseek.py b/src/packages/core/lfx/components/deepseek/deepseek.py
similarity index 100%
rename from src/lfx/src/lfx/components/deepseek/deepseek.py
rename to src/packages/core/lfx/components/deepseek/deepseek.py
diff --git a/src/lfx/src/lfx/components/docling/__init__.py b/src/packages/core/lfx/components/docling/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/docling/__init__.py
rename to src/packages/core/lfx/components/docling/__init__.py
diff --git a/src/lfx/src/lfx/components/docling/chunk_docling_document.py b/src/packages/core/lfx/components/docling/chunk_docling_document.py
similarity index 100%
rename from src/lfx/src/lfx/components/docling/chunk_docling_document.py
rename to src/packages/core/lfx/components/docling/chunk_docling_document.py
diff --git a/src/lfx/src/lfx/components/docling/docling_inline.py b/src/packages/core/lfx/components/docling/docling_inline.py
similarity index 100%
rename from src/lfx/src/lfx/components/docling/docling_inline.py
rename to src/packages/core/lfx/components/docling/docling_inline.py
diff --git a/src/lfx/src/lfx/components/docling/docling_remote.py b/src/packages/core/lfx/components/docling/docling_remote.py
similarity index 100%
rename from src/lfx/src/lfx/components/docling/docling_remote.py
rename to src/packages/core/lfx/components/docling/docling_remote.py
diff --git a/src/lfx/src/lfx/components/docling/export_docling_document.py b/src/packages/core/lfx/components/docling/export_docling_document.py
similarity index 100%
rename from src/lfx/src/lfx/components/docling/export_docling_document.py
rename to src/packages/core/lfx/components/docling/export_docling_document.py
diff --git a/src/lfx/src/lfx/components/documentloaders/__init__.py b/src/packages/core/lfx/components/documentloaders/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/documentloaders/__init__.py
rename to src/packages/core/lfx/components/documentloaders/__init__.py
diff --git a/src/lfx/src/lfx/components/duckduckgo/__init__.py b/src/packages/core/lfx/components/duckduckgo/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/duckduckgo/__init__.py
rename to src/packages/core/lfx/components/duckduckgo/__init__.py
diff --git a/src/lfx/src/lfx/components/duckduckgo/duck_duck_go_search_run.py b/src/packages/core/lfx/components/duckduckgo/duck_duck_go_search_run.py
similarity index 100%
rename from src/lfx/src/lfx/components/duckduckgo/duck_duck_go_search_run.py
rename to src/packages/core/lfx/components/duckduckgo/duck_duck_go_search_run.py
diff --git a/src/lfx/src/lfx/components/elastic/__init__.py b/src/packages/core/lfx/components/elastic/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/elastic/__init__.py
rename to src/packages/core/lfx/components/elastic/__init__.py
diff --git a/src/lfx/src/lfx/components/elastic/elasticsearch.py b/src/packages/core/lfx/components/elastic/elasticsearch.py
similarity index 100%
rename from src/lfx/src/lfx/components/elastic/elasticsearch.py
rename to src/packages/core/lfx/components/elastic/elasticsearch.py
diff --git a/src/lfx/src/lfx/components/elastic/opensearch.py b/src/packages/core/lfx/components/elastic/opensearch.py
similarity index 100%
rename from src/lfx/src/lfx/components/elastic/opensearch.py
rename to src/packages/core/lfx/components/elastic/opensearch.py
diff --git a/src/lfx/src/lfx/components/embeddings/__init__.py b/src/packages/core/lfx/components/embeddings/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/embeddings/__init__.py
rename to src/packages/core/lfx/components/embeddings/__init__.py
diff --git a/src/lfx/src/lfx/components/embeddings/similarity.py b/src/packages/core/lfx/components/embeddings/similarity.py
similarity index 100%
rename from src/lfx/src/lfx/components/embeddings/similarity.py
rename to src/packages/core/lfx/components/embeddings/similarity.py
diff --git a/src/lfx/src/lfx/components/embeddings/text_embedder.py b/src/packages/core/lfx/components/embeddings/text_embedder.py
similarity index 100%
rename from src/lfx/src/lfx/components/embeddings/text_embedder.py
rename to src/packages/core/lfx/components/embeddings/text_embedder.py
diff --git a/src/lfx/src/lfx/components/exa/__init__.py b/src/packages/core/lfx/components/exa/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/exa/__init__.py
rename to src/packages/core/lfx/components/exa/__init__.py
diff --git a/src/lfx/src/lfx/components/exa/exa_search.py b/src/packages/core/lfx/components/exa/exa_search.py
similarity index 100%
rename from src/lfx/src/lfx/components/exa/exa_search.py
rename to src/packages/core/lfx/components/exa/exa_search.py
diff --git a/src/lfx/src/lfx/components/firecrawl/__init__.py b/src/packages/core/lfx/components/firecrawl/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/firecrawl/__init__.py
rename to src/packages/core/lfx/components/firecrawl/__init__.py
diff --git a/src/lfx/src/lfx/components/firecrawl/firecrawl_crawl_api.py b/src/packages/core/lfx/components/firecrawl/firecrawl_crawl_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/firecrawl/firecrawl_crawl_api.py
rename to src/packages/core/lfx/components/firecrawl/firecrawl_crawl_api.py
diff --git a/src/lfx/src/lfx/components/firecrawl/firecrawl_extract_api.py b/src/packages/core/lfx/components/firecrawl/firecrawl_extract_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/firecrawl/firecrawl_extract_api.py
rename to src/packages/core/lfx/components/firecrawl/firecrawl_extract_api.py
diff --git a/src/lfx/src/lfx/components/firecrawl/firecrawl_map_api.py b/src/packages/core/lfx/components/firecrawl/firecrawl_map_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/firecrawl/firecrawl_map_api.py
rename to src/packages/core/lfx/components/firecrawl/firecrawl_map_api.py
diff --git a/src/lfx/src/lfx/components/firecrawl/firecrawl_scrape_api.py b/src/packages/core/lfx/components/firecrawl/firecrawl_scrape_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/firecrawl/firecrawl_scrape_api.py
rename to src/packages/core/lfx/components/firecrawl/firecrawl_scrape_api.py
diff --git a/src/lfx/src/lfx/components/git/__init__.py b/src/packages/core/lfx/components/git/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/git/__init__.py
rename to src/packages/core/lfx/components/git/__init__.py
diff --git a/src/lfx/src/lfx/components/git/git.py b/src/packages/core/lfx/components/git/git.py
similarity index 100%
rename from src/lfx/src/lfx/components/git/git.py
rename to src/packages/core/lfx/components/git/git.py
diff --git a/src/lfx/src/lfx/components/git/gitextractor.py b/src/packages/core/lfx/components/git/gitextractor.py
similarity index 100%
rename from src/lfx/src/lfx/components/git/gitextractor.py
rename to src/packages/core/lfx/components/git/gitextractor.py
diff --git a/src/lfx/src/lfx/components/glean/__init__.py b/src/packages/core/lfx/components/glean/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/glean/__init__.py
rename to src/packages/core/lfx/components/glean/__init__.py
diff --git a/src/lfx/src/lfx/components/glean/glean_search_api.py b/src/packages/core/lfx/components/glean/glean_search_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/glean/glean_search_api.py
rename to src/packages/core/lfx/components/glean/glean_search_api.py
diff --git a/src/lfx/src/lfx/components/google/__init__.py b/src/packages/core/lfx/components/google/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/google/__init__.py
rename to src/packages/core/lfx/components/google/__init__.py
diff --git a/src/lfx/src/lfx/components/google/gmail.py b/src/packages/core/lfx/components/google/gmail.py
similarity index 100%
rename from src/lfx/src/lfx/components/google/gmail.py
rename to src/packages/core/lfx/components/google/gmail.py
diff --git a/src/lfx/src/lfx/components/google/google_bq_sql_executor.py b/src/packages/core/lfx/components/google/google_bq_sql_executor.py
similarity index 100%
rename from src/lfx/src/lfx/components/google/google_bq_sql_executor.py
rename to src/packages/core/lfx/components/google/google_bq_sql_executor.py
diff --git a/src/lfx/src/lfx/components/google/google_drive.py b/src/packages/core/lfx/components/google/google_drive.py
similarity index 100%
rename from src/lfx/src/lfx/components/google/google_drive.py
rename to src/packages/core/lfx/components/google/google_drive.py
diff --git a/src/lfx/src/lfx/components/google/google_drive_search.py b/src/packages/core/lfx/components/google/google_drive_search.py
similarity index 100%
rename from src/lfx/src/lfx/components/google/google_drive_search.py
rename to src/packages/core/lfx/components/google/google_drive_search.py
diff --git a/src/lfx/src/lfx/components/google/google_generative_ai.py b/src/packages/core/lfx/components/google/google_generative_ai.py
similarity index 100%
rename from src/lfx/src/lfx/components/google/google_generative_ai.py
rename to src/packages/core/lfx/components/google/google_generative_ai.py
diff --git a/src/lfx/src/lfx/components/google/google_generative_ai_embeddings.py b/src/packages/core/lfx/components/google/google_generative_ai_embeddings.py
similarity index 100%
rename from src/lfx/src/lfx/components/google/google_generative_ai_embeddings.py
rename to src/packages/core/lfx/components/google/google_generative_ai_embeddings.py
diff --git a/src/lfx/src/lfx/components/google/google_oauth_token.py b/src/packages/core/lfx/components/google/google_oauth_token.py
similarity index 100%
rename from src/lfx/src/lfx/components/google/google_oauth_token.py
rename to src/packages/core/lfx/components/google/google_oauth_token.py
diff --git a/src/lfx/src/lfx/components/google/google_search_api_core.py b/src/packages/core/lfx/components/google/google_search_api_core.py
similarity index 100%
rename from src/lfx/src/lfx/components/google/google_search_api_core.py
rename to src/packages/core/lfx/components/google/google_search_api_core.py
diff --git a/src/lfx/src/lfx/components/google/google_serper_api_core.py b/src/packages/core/lfx/components/google/google_serper_api_core.py
similarity index 100%
rename from src/lfx/src/lfx/components/google/google_serper_api_core.py
rename to src/packages/core/lfx/components/google/google_serper_api_core.py
diff --git a/src/lfx/src/lfx/components/groq/__init__.py b/src/packages/core/lfx/components/groq/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/groq/__init__.py
rename to src/packages/core/lfx/components/groq/__init__.py
diff --git a/src/lfx/src/lfx/components/groq/groq.py b/src/packages/core/lfx/components/groq/groq.py
similarity index 100%
rename from src/lfx/src/lfx/components/groq/groq.py
rename to src/packages/core/lfx/components/groq/groq.py
diff --git a/src/lfx/src/lfx/components/helpers/__init__.py b/src/packages/core/lfx/components/helpers/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/helpers/__init__.py
rename to src/packages/core/lfx/components/helpers/__init__.py
diff --git a/src/lfx/src/lfx/components/helpers/calculator_core.py b/src/packages/core/lfx/components/helpers/calculator_core.py
similarity index 100%
rename from src/lfx/src/lfx/components/helpers/calculator_core.py
rename to src/packages/core/lfx/components/helpers/calculator_core.py
diff --git a/src/lfx/src/lfx/components/helpers/create_list.py b/src/packages/core/lfx/components/helpers/create_list.py
similarity index 100%
rename from src/lfx/src/lfx/components/helpers/create_list.py
rename to src/packages/core/lfx/components/helpers/create_list.py
diff --git a/src/lfx/src/lfx/components/helpers/current_date.py b/src/packages/core/lfx/components/helpers/current_date.py
similarity index 100%
rename from src/lfx/src/lfx/components/helpers/current_date.py
rename to src/packages/core/lfx/components/helpers/current_date.py
diff --git a/src/lfx/src/lfx/components/helpers/id_generator.py b/src/packages/core/lfx/components/helpers/id_generator.py
similarity index 100%
rename from src/lfx/src/lfx/components/helpers/id_generator.py
rename to src/packages/core/lfx/components/helpers/id_generator.py
diff --git a/src/lfx/src/lfx/components/helpers/memory.py b/src/packages/core/lfx/components/helpers/memory.py
similarity index 100%
rename from src/lfx/src/lfx/components/helpers/memory.py
rename to src/packages/core/lfx/components/helpers/memory.py
diff --git a/src/lfx/src/lfx/components/helpers/output_parser.py b/src/packages/core/lfx/components/helpers/output_parser.py
similarity index 100%
rename from src/lfx/src/lfx/components/helpers/output_parser.py
rename to src/packages/core/lfx/components/helpers/output_parser.py
diff --git a/src/lfx/src/lfx/components/helpers/store_message.py b/src/packages/core/lfx/components/helpers/store_message.py
similarity index 100%
rename from src/lfx/src/lfx/components/helpers/store_message.py
rename to src/packages/core/lfx/components/helpers/store_message.py
diff --git a/src/lfx/src/lfx/components/homeassistant/__init__.py b/src/packages/core/lfx/components/homeassistant/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/homeassistant/__init__.py
rename to src/packages/core/lfx/components/homeassistant/__init__.py
diff --git a/src/lfx/src/lfx/components/homeassistant/home_assistant_control.py b/src/packages/core/lfx/components/homeassistant/home_assistant_control.py
similarity index 100%
rename from src/lfx/src/lfx/components/homeassistant/home_assistant_control.py
rename to src/packages/core/lfx/components/homeassistant/home_assistant_control.py
diff --git a/src/lfx/src/lfx/components/homeassistant/list_home_assistant_states.py b/src/packages/core/lfx/components/homeassistant/list_home_assistant_states.py
similarity index 100%
rename from src/lfx/src/lfx/components/homeassistant/list_home_assistant_states.py
rename to src/packages/core/lfx/components/homeassistant/list_home_assistant_states.py
diff --git a/src/lfx/src/lfx/components/huggingface/__init__.py b/src/packages/core/lfx/components/huggingface/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/huggingface/__init__.py
rename to src/packages/core/lfx/components/huggingface/__init__.py
diff --git a/src/lfx/src/lfx/components/huggingface/huggingface.py b/src/packages/core/lfx/components/huggingface/huggingface.py
similarity index 100%
rename from src/lfx/src/lfx/components/huggingface/huggingface.py
rename to src/packages/core/lfx/components/huggingface/huggingface.py
diff --git a/src/lfx/src/lfx/components/huggingface/huggingface_inference_api.py b/src/packages/core/lfx/components/huggingface/huggingface_inference_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/huggingface/huggingface_inference_api.py
rename to src/packages/core/lfx/components/huggingface/huggingface_inference_api.py
diff --git a/src/lfx/src/lfx/components/ibm/__init__.py b/src/packages/core/lfx/components/ibm/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/ibm/__init__.py
rename to src/packages/core/lfx/components/ibm/__init__.py
diff --git a/src/lfx/src/lfx/components/ibm/watsonx.py b/src/packages/core/lfx/components/ibm/watsonx.py
similarity index 100%
rename from src/lfx/src/lfx/components/ibm/watsonx.py
rename to src/packages/core/lfx/components/ibm/watsonx.py
diff --git a/src/lfx/src/lfx/components/ibm/watsonx_embeddings.py b/src/packages/core/lfx/components/ibm/watsonx_embeddings.py
similarity index 100%
rename from src/lfx/src/lfx/components/ibm/watsonx_embeddings.py
rename to src/packages/core/lfx/components/ibm/watsonx_embeddings.py
diff --git a/src/lfx/src/lfx/components/icosacomputing/__init__.py b/src/packages/core/lfx/components/icosacomputing/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/icosacomputing/__init__.py
rename to src/packages/core/lfx/components/icosacomputing/__init__.py
diff --git a/src/lfx/src/lfx/components/icosacomputing/combinatorial_reasoner.py b/src/packages/core/lfx/components/icosacomputing/combinatorial_reasoner.py
similarity index 100%
rename from src/lfx/src/lfx/components/icosacomputing/combinatorial_reasoner.py
rename to src/packages/core/lfx/components/icosacomputing/combinatorial_reasoner.py
diff --git a/src/lfx/src/lfx/components/input_output/__init__.py b/src/packages/core/lfx/components/input_output/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/input_output/__init__.py
rename to src/packages/core/lfx/components/input_output/__init__.py
diff --git a/src/lfx/src/lfx/components/input_output/chat.py b/src/packages/core/lfx/components/input_output/chat.py
similarity index 100%
rename from src/lfx/src/lfx/components/input_output/chat.py
rename to src/packages/core/lfx/components/input_output/chat.py
diff --git a/src/lfx/src/lfx/components/input_output/chat_output.py b/src/packages/core/lfx/components/input_output/chat_output.py
similarity index 100%
rename from src/lfx/src/lfx/components/input_output/chat_output.py
rename to src/packages/core/lfx/components/input_output/chat_output.py
diff --git a/src/lfx/src/lfx/components/input_output/text.py b/src/packages/core/lfx/components/input_output/text.py
similarity index 100%
rename from src/lfx/src/lfx/components/input_output/text.py
rename to src/packages/core/lfx/components/input_output/text.py
diff --git a/src/lfx/src/lfx/components/input_output/text_output.py b/src/packages/core/lfx/components/input_output/text_output.py
similarity index 100%
rename from src/lfx/src/lfx/components/input_output/text_output.py
rename to src/packages/core/lfx/components/input_output/text_output.py
diff --git a/src/lfx/src/lfx/components/jigsawstack/__init__.py b/src/packages/core/lfx/components/jigsawstack/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/jigsawstack/__init__.py
rename to src/packages/core/lfx/components/jigsawstack/__init__.py
diff --git a/src/lfx/src/lfx/components/jigsawstack/ai_scrape.py b/src/packages/core/lfx/components/jigsawstack/ai_scrape.py
similarity index 100%
rename from src/lfx/src/lfx/components/jigsawstack/ai_scrape.py
rename to src/packages/core/lfx/components/jigsawstack/ai_scrape.py
diff --git a/src/lfx/src/lfx/components/jigsawstack/ai_web_search.py b/src/packages/core/lfx/components/jigsawstack/ai_web_search.py
similarity index 100%
rename from src/lfx/src/lfx/components/jigsawstack/ai_web_search.py
rename to src/packages/core/lfx/components/jigsawstack/ai_web_search.py
diff --git a/src/lfx/src/lfx/components/jigsawstack/file_read.py b/src/packages/core/lfx/components/jigsawstack/file_read.py
similarity index 100%
rename from src/lfx/src/lfx/components/jigsawstack/file_read.py
rename to src/packages/core/lfx/components/jigsawstack/file_read.py
diff --git a/src/lfx/src/lfx/components/jigsawstack/file_upload.py b/src/packages/core/lfx/components/jigsawstack/file_upload.py
similarity index 100%
rename from src/lfx/src/lfx/components/jigsawstack/file_upload.py
rename to src/packages/core/lfx/components/jigsawstack/file_upload.py
diff --git a/src/lfx/src/lfx/components/jigsawstack/image_generation.py b/src/packages/core/lfx/components/jigsawstack/image_generation.py
similarity index 100%
rename from src/lfx/src/lfx/components/jigsawstack/image_generation.py
rename to src/packages/core/lfx/components/jigsawstack/image_generation.py
diff --git a/src/lfx/src/lfx/components/jigsawstack/nsfw.py b/src/packages/core/lfx/components/jigsawstack/nsfw.py
similarity index 100%
rename from src/lfx/src/lfx/components/jigsawstack/nsfw.py
rename to src/packages/core/lfx/components/jigsawstack/nsfw.py
diff --git a/src/lfx/src/lfx/components/jigsawstack/object_detection.py b/src/packages/core/lfx/components/jigsawstack/object_detection.py
similarity index 100%
rename from src/lfx/src/lfx/components/jigsawstack/object_detection.py
rename to src/packages/core/lfx/components/jigsawstack/object_detection.py
diff --git a/src/lfx/src/lfx/components/jigsawstack/sentiment.py b/src/packages/core/lfx/components/jigsawstack/sentiment.py
similarity index 100%
rename from src/lfx/src/lfx/components/jigsawstack/sentiment.py
rename to src/packages/core/lfx/components/jigsawstack/sentiment.py
diff --git a/src/lfx/src/lfx/components/jigsawstack/text_to_sql.py b/src/packages/core/lfx/components/jigsawstack/text_to_sql.py
similarity index 100%
rename from src/lfx/src/lfx/components/jigsawstack/text_to_sql.py
rename to src/packages/core/lfx/components/jigsawstack/text_to_sql.py
diff --git a/src/lfx/src/lfx/components/jigsawstack/text_translate.py b/src/packages/core/lfx/components/jigsawstack/text_translate.py
similarity index 100%
rename from src/lfx/src/lfx/components/jigsawstack/text_translate.py
rename to src/packages/core/lfx/components/jigsawstack/text_translate.py
diff --git a/src/lfx/src/lfx/components/jigsawstack/vocr.py b/src/packages/core/lfx/components/jigsawstack/vocr.py
similarity index 100%
rename from src/lfx/src/lfx/components/jigsawstack/vocr.py
rename to src/packages/core/lfx/components/jigsawstack/vocr.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/__init__.py b/src/packages/core/lfx/components/langchain_utilities/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/__init__.py
rename to src/packages/core/lfx/components/langchain_utilities/__init__.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/character.py b/src/packages/core/lfx/components/langchain_utilities/character.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/character.py
rename to src/packages/core/lfx/components/langchain_utilities/character.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/conversation.py b/src/packages/core/lfx/components/langchain_utilities/conversation.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/conversation.py
rename to src/packages/core/lfx/components/langchain_utilities/conversation.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/csv_agent.py b/src/packages/core/lfx/components/langchain_utilities/csv_agent.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/csv_agent.py
rename to src/packages/core/lfx/components/langchain_utilities/csv_agent.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/fake_embeddings.py b/src/packages/core/lfx/components/langchain_utilities/fake_embeddings.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/fake_embeddings.py
rename to src/packages/core/lfx/components/langchain_utilities/fake_embeddings.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/html_link_extractor.py b/src/packages/core/lfx/components/langchain_utilities/html_link_extractor.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/html_link_extractor.py
rename to src/packages/core/lfx/components/langchain_utilities/html_link_extractor.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/json_agent.py b/src/packages/core/lfx/components/langchain_utilities/json_agent.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/json_agent.py
rename to src/packages/core/lfx/components/langchain_utilities/json_agent.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/langchain_hub.py b/src/packages/core/lfx/components/langchain_utilities/langchain_hub.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/langchain_hub.py
rename to src/packages/core/lfx/components/langchain_utilities/langchain_hub.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/language_recursive.py b/src/packages/core/lfx/components/langchain_utilities/language_recursive.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/language_recursive.py
rename to src/packages/core/lfx/components/langchain_utilities/language_recursive.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/language_semantic.py b/src/packages/core/lfx/components/langchain_utilities/language_semantic.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/language_semantic.py
rename to src/packages/core/lfx/components/langchain_utilities/language_semantic.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/llm_checker.py b/src/packages/core/lfx/components/langchain_utilities/llm_checker.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/llm_checker.py
rename to src/packages/core/lfx/components/langchain_utilities/llm_checker.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/llm_math.py b/src/packages/core/lfx/components/langchain_utilities/llm_math.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/llm_math.py
rename to src/packages/core/lfx/components/langchain_utilities/llm_math.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/natural_language.py b/src/packages/core/lfx/components/langchain_utilities/natural_language.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/natural_language.py
rename to src/packages/core/lfx/components/langchain_utilities/natural_language.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/openai_tools.py b/src/packages/core/lfx/components/langchain_utilities/openai_tools.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/openai_tools.py
rename to src/packages/core/lfx/components/langchain_utilities/openai_tools.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/openapi.py b/src/packages/core/lfx/components/langchain_utilities/openapi.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/openapi.py
rename to src/packages/core/lfx/components/langchain_utilities/openapi.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/recursive_character.py b/src/packages/core/lfx/components/langchain_utilities/recursive_character.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/recursive_character.py
rename to src/packages/core/lfx/components/langchain_utilities/recursive_character.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/retrieval_qa.py b/src/packages/core/lfx/components/langchain_utilities/retrieval_qa.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/retrieval_qa.py
rename to src/packages/core/lfx/components/langchain_utilities/retrieval_qa.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/runnable_executor.py b/src/packages/core/lfx/components/langchain_utilities/runnable_executor.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/runnable_executor.py
rename to src/packages/core/lfx/components/langchain_utilities/runnable_executor.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/self_query.py b/src/packages/core/lfx/components/langchain_utilities/self_query.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/self_query.py
rename to src/packages/core/lfx/components/langchain_utilities/self_query.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/spider.py b/src/packages/core/lfx/components/langchain_utilities/spider.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/spider.py
rename to src/packages/core/lfx/components/langchain_utilities/spider.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/sql.py b/src/packages/core/lfx/components/langchain_utilities/sql.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/sql.py
rename to src/packages/core/lfx/components/langchain_utilities/sql.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/sql_database.py b/src/packages/core/lfx/components/langchain_utilities/sql_database.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/sql_database.py
rename to src/packages/core/lfx/components/langchain_utilities/sql_database.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/sql_generator.py b/src/packages/core/lfx/components/langchain_utilities/sql_generator.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/sql_generator.py
rename to src/packages/core/lfx/components/langchain_utilities/sql_generator.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/tool_calling.py b/src/packages/core/lfx/components/langchain_utilities/tool_calling.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/tool_calling.py
rename to src/packages/core/lfx/components/langchain_utilities/tool_calling.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/vector_store_info.py b/src/packages/core/lfx/components/langchain_utilities/vector_store_info.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/vector_store_info.py
rename to src/packages/core/lfx/components/langchain_utilities/vector_store_info.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/vector_store_router.py b/src/packages/core/lfx/components/langchain_utilities/vector_store_router.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/vector_store_router.py
rename to src/packages/core/lfx/components/langchain_utilities/vector_store_router.py
diff --git a/src/lfx/src/lfx/components/langchain_utilities/xml_agent.py b/src/packages/core/lfx/components/langchain_utilities/xml_agent.py
similarity index 100%
rename from src/lfx/src/lfx/components/langchain_utilities/xml_agent.py
rename to src/packages/core/lfx/components/langchain_utilities/xml_agent.py
diff --git a/src/lfx/src/lfx/components/langwatch/__init__.py b/src/packages/core/lfx/components/langwatch/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/langwatch/__init__.py
rename to src/packages/core/lfx/components/langwatch/__init__.py
diff --git a/src/lfx/src/lfx/components/langwatch/langwatch.py b/src/packages/core/lfx/components/langwatch/langwatch.py
similarity index 100%
rename from src/lfx/src/lfx/components/langwatch/langwatch.py
rename to src/packages/core/lfx/components/langwatch/langwatch.py
diff --git a/src/lfx/src/lfx/components/link_extractors/__init__.py b/src/packages/core/lfx/components/link_extractors/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/link_extractors/__init__.py
rename to src/packages/core/lfx/components/link_extractors/__init__.py
diff --git a/src/lfx/src/lfx/components/lmstudio/__init__.py b/src/packages/core/lfx/components/lmstudio/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/lmstudio/__init__.py
rename to src/packages/core/lfx/components/lmstudio/__init__.py
diff --git a/src/lfx/src/lfx/components/lmstudio/lmstudioembeddings.py b/src/packages/core/lfx/components/lmstudio/lmstudioembeddings.py
similarity index 100%
rename from src/lfx/src/lfx/components/lmstudio/lmstudioembeddings.py
rename to src/packages/core/lfx/components/lmstudio/lmstudioembeddings.py
diff --git a/src/lfx/src/lfx/components/lmstudio/lmstudiomodel.py b/src/packages/core/lfx/components/lmstudio/lmstudiomodel.py
similarity index 100%
rename from src/lfx/src/lfx/components/lmstudio/lmstudiomodel.py
rename to src/packages/core/lfx/components/lmstudio/lmstudiomodel.py
diff --git a/src/lfx/src/lfx/components/logic/__init__.py b/src/packages/core/lfx/components/logic/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/logic/__init__.py
rename to src/packages/core/lfx/components/logic/__init__.py
diff --git a/src/lfx/src/lfx/components/logic/conditional_router.py b/src/packages/core/lfx/components/logic/conditional_router.py
similarity index 100%
rename from src/lfx/src/lfx/components/logic/conditional_router.py
rename to src/packages/core/lfx/components/logic/conditional_router.py
diff --git a/src/lfx/src/lfx/components/logic/data_conditional_router.py b/src/packages/core/lfx/components/logic/data_conditional_router.py
similarity index 100%
rename from src/lfx/src/lfx/components/logic/data_conditional_router.py
rename to src/packages/core/lfx/components/logic/data_conditional_router.py
diff --git a/src/lfx/src/lfx/components/logic/flow_tool.py b/src/packages/core/lfx/components/logic/flow_tool.py
similarity index 100%
rename from src/lfx/src/lfx/components/logic/flow_tool.py
rename to src/packages/core/lfx/components/logic/flow_tool.py
diff --git a/src/lfx/src/lfx/components/logic/listen.py b/src/packages/core/lfx/components/logic/listen.py
similarity index 100%
rename from src/lfx/src/lfx/components/logic/listen.py
rename to src/packages/core/lfx/components/logic/listen.py
diff --git a/src/lfx/src/lfx/components/logic/llm_conditional_router.py b/src/packages/core/lfx/components/logic/llm_conditional_router.py
similarity index 100%
rename from src/lfx/src/lfx/components/logic/llm_conditional_router.py
rename to src/packages/core/lfx/components/logic/llm_conditional_router.py
diff --git a/src/lfx/src/lfx/components/logic/loop.py b/src/packages/core/lfx/components/logic/loop.py
similarity index 100%
rename from src/lfx/src/lfx/components/logic/loop.py
rename to src/packages/core/lfx/components/logic/loop.py
diff --git a/src/lfx/src/lfx/components/logic/notify.py b/src/packages/core/lfx/components/logic/notify.py
similarity index 100%
rename from src/lfx/src/lfx/components/logic/notify.py
rename to src/packages/core/lfx/components/logic/notify.py
diff --git a/src/lfx/src/lfx/components/logic/pass_message.py b/src/packages/core/lfx/components/logic/pass_message.py
similarity index 100%
rename from src/lfx/src/lfx/components/logic/pass_message.py
rename to src/packages/core/lfx/components/logic/pass_message.py
diff --git a/src/lfx/src/lfx/components/logic/run_flow.py b/src/packages/core/lfx/components/logic/run_flow.py
similarity index 100%
rename from src/lfx/src/lfx/components/logic/run_flow.py
rename to src/packages/core/lfx/components/logic/run_flow.py
diff --git a/src/lfx/src/lfx/components/logic/sub_flow.py b/src/packages/core/lfx/components/logic/sub_flow.py
similarity index 100%
rename from src/lfx/src/lfx/components/logic/sub_flow.py
rename to src/packages/core/lfx/components/logic/sub_flow.py
diff --git a/src/lfx/src/lfx/components/maritalk/__init__.py b/src/packages/core/lfx/components/maritalk/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/maritalk/__init__.py
rename to src/packages/core/lfx/components/maritalk/__init__.py
diff --git a/src/lfx/src/lfx/components/maritalk/maritalk.py b/src/packages/core/lfx/components/maritalk/maritalk.py
similarity index 100%
rename from src/lfx/src/lfx/components/maritalk/maritalk.py
rename to src/packages/core/lfx/components/maritalk/maritalk.py
diff --git a/src/lfx/src/lfx/components/mem0/__init__.py b/src/packages/core/lfx/components/mem0/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/mem0/__init__.py
rename to src/packages/core/lfx/components/mem0/__init__.py
diff --git a/src/lfx/src/lfx/components/mem0/mem0_chat_memory.py b/src/packages/core/lfx/components/mem0/mem0_chat_memory.py
similarity index 100%
rename from src/lfx/src/lfx/components/mem0/mem0_chat_memory.py
rename to src/packages/core/lfx/components/mem0/mem0_chat_memory.py
diff --git a/src/lfx/src/lfx/components/milvus/__init__.py b/src/packages/core/lfx/components/milvus/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/milvus/__init__.py
rename to src/packages/core/lfx/components/milvus/__init__.py
diff --git a/src/lfx/src/lfx/components/milvus/milvus.py b/src/packages/core/lfx/components/milvus/milvus.py
similarity index 100%
rename from src/lfx/src/lfx/components/milvus/milvus.py
rename to src/packages/core/lfx/components/milvus/milvus.py
diff --git a/src/lfx/src/lfx/components/mistral/__init__.py b/src/packages/core/lfx/components/mistral/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/mistral/__init__.py
rename to src/packages/core/lfx/components/mistral/__init__.py
diff --git a/src/lfx/src/lfx/components/mistral/mistral.py b/src/packages/core/lfx/components/mistral/mistral.py
similarity index 100%
rename from src/lfx/src/lfx/components/mistral/mistral.py
rename to src/packages/core/lfx/components/mistral/mistral.py
diff --git a/src/lfx/src/lfx/components/mistral/mistral_embeddings.py b/src/packages/core/lfx/components/mistral/mistral_embeddings.py
similarity index 100%
rename from src/lfx/src/lfx/components/mistral/mistral_embeddings.py
rename to src/packages/core/lfx/components/mistral/mistral_embeddings.py
diff --git a/src/lfx/src/lfx/components/models/__init__.py b/src/packages/core/lfx/components/models/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/models/__init__.py
rename to src/packages/core/lfx/components/models/__init__.py
diff --git a/src/lfx/src/lfx/components/models/embedding_model.py b/src/packages/core/lfx/components/models/embedding_model.py
similarity index 100%
rename from src/lfx/src/lfx/components/models/embedding_model.py
rename to src/packages/core/lfx/components/models/embedding_model.py
diff --git a/src/lfx/src/lfx/components/models/language_model.py b/src/packages/core/lfx/components/models/language_model.py
similarity index 100%
rename from src/lfx/src/lfx/components/models/language_model.py
rename to src/packages/core/lfx/components/models/language_model.py
diff --git a/src/lfx/src/lfx/components/mongodb/__init__.py b/src/packages/core/lfx/components/mongodb/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/mongodb/__init__.py
rename to src/packages/core/lfx/components/mongodb/__init__.py
diff --git a/src/lfx/src/lfx/components/mongodb/mongodb_atlas.py b/src/packages/core/lfx/components/mongodb/mongodb_atlas.py
similarity index 100%
rename from src/lfx/src/lfx/components/mongodb/mongodb_atlas.py
rename to src/packages/core/lfx/components/mongodb/mongodb_atlas.py
diff --git a/src/lfx/src/lfx/components/needle/__init__.py b/src/packages/core/lfx/components/needle/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/needle/__init__.py
rename to src/packages/core/lfx/components/needle/__init__.py
diff --git a/src/lfx/src/lfx/components/needle/needle.py b/src/packages/core/lfx/components/needle/needle.py
similarity index 100%
rename from src/lfx/src/lfx/components/needle/needle.py
rename to src/packages/core/lfx/components/needle/needle.py
diff --git a/src/lfx/src/lfx/components/notdiamond/__init__.py b/src/packages/core/lfx/components/notdiamond/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/notdiamond/__init__.py
rename to src/packages/core/lfx/components/notdiamond/__init__.py
diff --git a/src/lfx/src/lfx/components/notdiamond/notdiamond.py b/src/packages/core/lfx/components/notdiamond/notdiamond.py
similarity index 100%
rename from src/lfx/src/lfx/components/notdiamond/notdiamond.py
rename to src/packages/core/lfx/components/notdiamond/notdiamond.py
diff --git a/src/lfx/src/lfx/components/novita/__init__.py b/src/packages/core/lfx/components/novita/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/novita/__init__.py
rename to src/packages/core/lfx/components/novita/__init__.py
diff --git a/src/lfx/src/lfx/components/novita/novita.py b/src/packages/core/lfx/components/novita/novita.py
similarity index 100%
rename from src/lfx/src/lfx/components/novita/novita.py
rename to src/packages/core/lfx/components/novita/novita.py
diff --git a/src/lfx/src/lfx/components/nvidia/__init__.py b/src/packages/core/lfx/components/nvidia/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/nvidia/__init__.py
rename to src/packages/core/lfx/components/nvidia/__init__.py
diff --git a/src/lfx/src/lfx/components/nvidia/nvidia.py b/src/packages/core/lfx/components/nvidia/nvidia.py
similarity index 100%
rename from src/lfx/src/lfx/components/nvidia/nvidia.py
rename to src/packages/core/lfx/components/nvidia/nvidia.py
diff --git a/src/lfx/src/lfx/components/nvidia/nvidia_embedding.py b/src/packages/core/lfx/components/nvidia/nvidia_embedding.py
similarity index 100%
rename from src/lfx/src/lfx/components/nvidia/nvidia_embedding.py
rename to src/packages/core/lfx/components/nvidia/nvidia_embedding.py
diff --git a/src/lfx/src/lfx/components/nvidia/nvidia_ingest.py b/src/packages/core/lfx/components/nvidia/nvidia_ingest.py
similarity index 100%
rename from src/lfx/src/lfx/components/nvidia/nvidia_ingest.py
rename to src/packages/core/lfx/components/nvidia/nvidia_ingest.py
diff --git a/src/lfx/src/lfx/components/nvidia/nvidia_rerank.py b/src/packages/core/lfx/components/nvidia/nvidia_rerank.py
similarity index 100%
rename from src/lfx/src/lfx/components/nvidia/nvidia_rerank.py
rename to src/packages/core/lfx/components/nvidia/nvidia_rerank.py
diff --git a/src/lfx/src/lfx/components/nvidia/system_assist.py b/src/packages/core/lfx/components/nvidia/system_assist.py
similarity index 100%
rename from src/lfx/src/lfx/components/nvidia/system_assist.py
rename to src/packages/core/lfx/components/nvidia/system_assist.py
diff --git a/src/lfx/src/lfx/components/olivya/__init__.py b/src/packages/core/lfx/components/olivya/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/olivya/__init__.py
rename to src/packages/core/lfx/components/olivya/__init__.py
diff --git a/src/lfx/src/lfx/components/olivya/olivya.py b/src/packages/core/lfx/components/olivya/olivya.py
similarity index 100%
rename from src/lfx/src/lfx/components/olivya/olivya.py
rename to src/packages/core/lfx/components/olivya/olivya.py
diff --git a/src/lfx/src/lfx/components/ollama/__init__.py b/src/packages/core/lfx/components/ollama/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/ollama/__init__.py
rename to src/packages/core/lfx/components/ollama/__init__.py
diff --git a/src/lfx/src/lfx/components/ollama/ollama.py b/src/packages/core/lfx/components/ollama/ollama.py
similarity index 100%
rename from src/lfx/src/lfx/components/ollama/ollama.py
rename to src/packages/core/lfx/components/ollama/ollama.py
diff --git a/src/lfx/src/lfx/components/ollama/ollama_embeddings.py b/src/packages/core/lfx/components/ollama/ollama_embeddings.py
similarity index 100%
rename from src/lfx/src/lfx/components/ollama/ollama_embeddings.py
rename to src/packages/core/lfx/components/ollama/ollama_embeddings.py
diff --git a/src/lfx/src/lfx/components/openai/__init__.py b/src/packages/core/lfx/components/openai/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/openai/__init__.py
rename to src/packages/core/lfx/components/openai/__init__.py
diff --git a/src/lfx/src/lfx/components/openai/openai.py b/src/packages/core/lfx/components/openai/openai.py
similarity index 100%
rename from src/lfx/src/lfx/components/openai/openai.py
rename to src/packages/core/lfx/components/openai/openai.py
diff --git a/src/lfx/src/lfx/components/openai/openai_chat_model.py b/src/packages/core/lfx/components/openai/openai_chat_model.py
similarity index 100%
rename from src/lfx/src/lfx/components/openai/openai_chat_model.py
rename to src/packages/core/lfx/components/openai/openai_chat_model.py
diff --git a/src/lfx/src/lfx/components/openrouter/__init__.py b/src/packages/core/lfx/components/openrouter/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/openrouter/__init__.py
rename to src/packages/core/lfx/components/openrouter/__init__.py
diff --git a/src/lfx/src/lfx/components/openrouter/openrouter.py b/src/packages/core/lfx/components/openrouter/openrouter.py
similarity index 100%
rename from src/lfx/src/lfx/components/openrouter/openrouter.py
rename to src/packages/core/lfx/components/openrouter/openrouter.py
diff --git a/src/lfx/src/lfx/components/output_parsers/__init__.py b/src/packages/core/lfx/components/output_parsers/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/output_parsers/__init__.py
rename to src/packages/core/lfx/components/output_parsers/__init__.py
diff --git a/src/lfx/src/lfx/components/perplexity/__init__.py b/src/packages/core/lfx/components/perplexity/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/perplexity/__init__.py
rename to src/packages/core/lfx/components/perplexity/__init__.py
diff --git a/src/lfx/src/lfx/components/perplexity/perplexity.py b/src/packages/core/lfx/components/perplexity/perplexity.py
similarity index 100%
rename from src/lfx/src/lfx/components/perplexity/perplexity.py
rename to src/packages/core/lfx/components/perplexity/perplexity.py
diff --git a/src/lfx/src/lfx/components/pgvector/__init__.py b/src/packages/core/lfx/components/pgvector/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/pgvector/__init__.py
rename to src/packages/core/lfx/components/pgvector/__init__.py
diff --git a/src/lfx/src/lfx/components/pgvector/pgvector.py b/src/packages/core/lfx/components/pgvector/pgvector.py
similarity index 100%
rename from src/lfx/src/lfx/components/pgvector/pgvector.py
rename to src/packages/core/lfx/components/pgvector/pgvector.py
diff --git a/src/lfx/src/lfx/components/pinecone/__init__.py b/src/packages/core/lfx/components/pinecone/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/pinecone/__init__.py
rename to src/packages/core/lfx/components/pinecone/__init__.py
diff --git a/src/lfx/src/lfx/components/pinecone/pinecone.py b/src/packages/core/lfx/components/pinecone/pinecone.py
similarity index 100%
rename from src/lfx/src/lfx/components/pinecone/pinecone.py
rename to src/packages/core/lfx/components/pinecone/pinecone.py
diff --git a/src/lfx/src/lfx/components/processing/__init__.py b/src/packages/core/lfx/components/processing/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/__init__.py
rename to src/packages/core/lfx/components/processing/__init__.py
diff --git a/src/lfx/src/lfx/components/processing/alter_metadata.py b/src/packages/core/lfx/components/processing/alter_metadata.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/alter_metadata.py
rename to src/packages/core/lfx/components/processing/alter_metadata.py
diff --git a/src/lfx/src/lfx/components/processing/batch_run.py b/src/packages/core/lfx/components/processing/batch_run.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/batch_run.py
rename to src/packages/core/lfx/components/processing/batch_run.py
diff --git a/src/lfx/src/lfx/components/processing/combine_text.py b/src/packages/core/lfx/components/processing/combine_text.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/combine_text.py
rename to src/packages/core/lfx/components/processing/combine_text.py
diff --git a/src/lfx/src/lfx/components/processing/converter.py b/src/packages/core/lfx/components/processing/converter.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/converter.py
rename to src/packages/core/lfx/components/processing/converter.py
diff --git a/src/lfx/src/lfx/components/processing/create_data.py b/src/packages/core/lfx/components/processing/create_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/create_data.py
rename to src/packages/core/lfx/components/processing/create_data.py
diff --git a/src/lfx/src/lfx/components/processing/data_operations.py b/src/packages/core/lfx/components/processing/data_operations.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/data_operations.py
rename to src/packages/core/lfx/components/processing/data_operations.py
diff --git a/src/lfx/src/lfx/components/processing/data_to_dataframe.py b/src/packages/core/lfx/components/processing/data_to_dataframe.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/data_to_dataframe.py
rename to src/packages/core/lfx/components/processing/data_to_dataframe.py
diff --git a/src/lfx/src/lfx/components/processing/dataframe_operations.py b/src/packages/core/lfx/components/processing/dataframe_operations.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/dataframe_operations.py
rename to src/packages/core/lfx/components/processing/dataframe_operations.py
diff --git a/src/lfx/src/lfx/components/processing/extract_key.py b/src/packages/core/lfx/components/processing/extract_key.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/extract_key.py
rename to src/packages/core/lfx/components/processing/extract_key.py
diff --git a/src/lfx/src/lfx/components/processing/filter_data.py b/src/packages/core/lfx/components/processing/filter_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/filter_data.py
rename to src/packages/core/lfx/components/processing/filter_data.py
diff --git a/src/lfx/src/lfx/components/processing/filter_data_values.py b/src/packages/core/lfx/components/processing/filter_data_values.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/filter_data_values.py
rename to src/packages/core/lfx/components/processing/filter_data_values.py
diff --git a/src/lfx/src/lfx/components/processing/json_cleaner.py b/src/packages/core/lfx/components/processing/json_cleaner.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/json_cleaner.py
rename to src/packages/core/lfx/components/processing/json_cleaner.py
diff --git a/src/lfx/src/lfx/components/processing/lambda_filter.py b/src/packages/core/lfx/components/processing/lambda_filter.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/lambda_filter.py
rename to src/packages/core/lfx/components/processing/lambda_filter.py
diff --git a/src/lfx/src/lfx/components/processing/llm_router.py b/src/packages/core/lfx/components/processing/llm_router.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/llm_router.py
rename to src/packages/core/lfx/components/processing/llm_router.py
diff --git a/src/lfx/src/lfx/components/processing/merge_data.py b/src/packages/core/lfx/components/processing/merge_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/merge_data.py
rename to src/packages/core/lfx/components/processing/merge_data.py
diff --git a/src/lfx/src/lfx/components/processing/message_to_data.py b/src/packages/core/lfx/components/processing/message_to_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/message_to_data.py
rename to src/packages/core/lfx/components/processing/message_to_data.py
diff --git a/src/lfx/src/lfx/components/processing/parse_data.py b/src/packages/core/lfx/components/processing/parse_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/parse_data.py
rename to src/packages/core/lfx/components/processing/parse_data.py
diff --git a/src/lfx/src/lfx/components/processing/parse_dataframe.py b/src/packages/core/lfx/components/processing/parse_dataframe.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/parse_dataframe.py
rename to src/packages/core/lfx/components/processing/parse_dataframe.py
diff --git a/src/lfx/src/lfx/components/processing/parse_json_data.py b/src/packages/core/lfx/components/processing/parse_json_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/parse_json_data.py
rename to src/packages/core/lfx/components/processing/parse_json_data.py
diff --git a/src/lfx/src/lfx/components/processing/parser.py b/src/packages/core/lfx/components/processing/parser.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/parser.py
rename to src/packages/core/lfx/components/processing/parser.py
diff --git a/src/lfx/src/lfx/components/processing/prompt.py b/src/packages/core/lfx/components/processing/prompt.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/prompt.py
rename to src/packages/core/lfx/components/processing/prompt.py
diff --git a/src/lfx/src/lfx/components/processing/python_repl_core.py b/src/packages/core/lfx/components/processing/python_repl_core.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/python_repl_core.py
rename to src/packages/core/lfx/components/processing/python_repl_core.py
diff --git a/src/lfx/src/lfx/components/processing/regex.py b/src/packages/core/lfx/components/processing/regex.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/regex.py
rename to src/packages/core/lfx/components/processing/regex.py
diff --git a/src/lfx/src/lfx/components/processing/save_file.py b/src/packages/core/lfx/components/processing/save_file.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/save_file.py
rename to src/packages/core/lfx/components/processing/save_file.py
diff --git a/src/lfx/src/lfx/components/processing/select_data.py b/src/packages/core/lfx/components/processing/select_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/select_data.py
rename to src/packages/core/lfx/components/processing/select_data.py
diff --git a/src/lfx/src/lfx/components/processing/split_text.py b/src/packages/core/lfx/components/processing/split_text.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/split_text.py
rename to src/packages/core/lfx/components/processing/split_text.py
diff --git a/src/lfx/src/lfx/components/processing/structured_output.py b/src/packages/core/lfx/components/processing/structured_output.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/structured_output.py
rename to src/packages/core/lfx/components/processing/structured_output.py
diff --git a/src/lfx/src/lfx/components/processing/update_data.py b/src/packages/core/lfx/components/processing/update_data.py
similarity index 100%
rename from src/lfx/src/lfx/components/processing/update_data.py
rename to src/packages/core/lfx/components/processing/update_data.py
diff --git a/src/lfx/src/lfx/components/prototypes/__init__.py b/src/packages/core/lfx/components/prototypes/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/prototypes/__init__.py
rename to src/packages/core/lfx/components/prototypes/__init__.py
diff --git a/src/lfx/src/lfx/components/prototypes/python_function.py b/src/packages/core/lfx/components/prototypes/python_function.py
similarity index 100%
rename from src/lfx/src/lfx/components/prototypes/python_function.py
rename to src/packages/core/lfx/components/prototypes/python_function.py
diff --git a/src/lfx/src/lfx/components/qdrant/__init__.py b/src/packages/core/lfx/components/qdrant/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/qdrant/__init__.py
rename to src/packages/core/lfx/components/qdrant/__init__.py
diff --git a/src/lfx/src/lfx/components/qdrant/qdrant.py b/src/packages/core/lfx/components/qdrant/qdrant.py
similarity index 100%
rename from src/lfx/src/lfx/components/qdrant/qdrant.py
rename to src/packages/core/lfx/components/qdrant/qdrant.py
diff --git a/src/lfx/src/lfx/components/redis/__init__.py b/src/packages/core/lfx/components/redis/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/redis/__init__.py
rename to src/packages/core/lfx/components/redis/__init__.py
diff --git a/src/lfx/src/lfx/components/redis/redis.py b/src/packages/core/lfx/components/redis/redis.py
similarity index 100%
rename from src/lfx/src/lfx/components/redis/redis.py
rename to src/packages/core/lfx/components/redis/redis.py
diff --git a/src/lfx/src/lfx/components/redis/redis_chat.py b/src/packages/core/lfx/components/redis/redis_chat.py
similarity index 100%
rename from src/lfx/src/lfx/components/redis/redis_chat.py
rename to src/packages/core/lfx/components/redis/redis_chat.py
diff --git a/src/lfx/src/lfx/components/sambanova/__init__.py b/src/packages/core/lfx/components/sambanova/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/sambanova/__init__.py
rename to src/packages/core/lfx/components/sambanova/__init__.py
diff --git a/src/lfx/src/lfx/components/sambanova/sambanova.py b/src/packages/core/lfx/components/sambanova/sambanova.py
similarity index 100%
rename from src/lfx/src/lfx/components/sambanova/sambanova.py
rename to src/packages/core/lfx/components/sambanova/sambanova.py
diff --git a/src/lfx/src/lfx/components/scrapegraph/__init__.py b/src/packages/core/lfx/components/scrapegraph/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/scrapegraph/__init__.py
rename to src/packages/core/lfx/components/scrapegraph/__init__.py
diff --git a/src/lfx/src/lfx/components/scrapegraph/scrapegraph_markdownify_api.py b/src/packages/core/lfx/components/scrapegraph/scrapegraph_markdownify_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/scrapegraph/scrapegraph_markdownify_api.py
rename to src/packages/core/lfx/components/scrapegraph/scrapegraph_markdownify_api.py
diff --git a/src/lfx/src/lfx/components/scrapegraph/scrapegraph_search_api.py b/src/packages/core/lfx/components/scrapegraph/scrapegraph_search_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/scrapegraph/scrapegraph_search_api.py
rename to src/packages/core/lfx/components/scrapegraph/scrapegraph_search_api.py
diff --git a/src/lfx/src/lfx/components/scrapegraph/scrapegraph_smart_scraper_api.py b/src/packages/core/lfx/components/scrapegraph/scrapegraph_smart_scraper_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/scrapegraph/scrapegraph_smart_scraper_api.py
rename to src/packages/core/lfx/components/scrapegraph/scrapegraph_smart_scraper_api.py
diff --git a/src/lfx/src/lfx/components/searchapi/__init__.py b/src/packages/core/lfx/components/searchapi/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/searchapi/__init__.py
rename to src/packages/core/lfx/components/searchapi/__init__.py
diff --git a/src/lfx/src/lfx/components/searchapi/search.py b/src/packages/core/lfx/components/searchapi/search.py
similarity index 100%
rename from src/lfx/src/lfx/components/searchapi/search.py
rename to src/packages/core/lfx/components/searchapi/search.py
diff --git a/src/lfx/src/lfx/components/serpapi/__init__.py b/src/packages/core/lfx/components/serpapi/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/serpapi/__init__.py
rename to src/packages/core/lfx/components/serpapi/__init__.py
diff --git a/src/lfx/src/lfx/components/serpapi/serp.py b/src/packages/core/lfx/components/serpapi/serp.py
similarity index 100%
rename from src/lfx/src/lfx/components/serpapi/serp.py
rename to src/packages/core/lfx/components/serpapi/serp.py
diff --git a/src/lfx/src/lfx/components/supabase/__init__.py b/src/packages/core/lfx/components/supabase/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/supabase/__init__.py
rename to src/packages/core/lfx/components/supabase/__init__.py
diff --git a/src/lfx/src/lfx/components/supabase/supabase.py b/src/packages/core/lfx/components/supabase/supabase.py
similarity index 100%
rename from src/lfx/src/lfx/components/supabase/supabase.py
rename to src/packages/core/lfx/components/supabase/supabase.py
diff --git a/src/lfx/src/lfx/components/tavily/__init__.py b/src/packages/core/lfx/components/tavily/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/tavily/__init__.py
rename to src/packages/core/lfx/components/tavily/__init__.py
diff --git a/src/lfx/src/lfx/components/tavily/tavily_extract.py b/src/packages/core/lfx/components/tavily/tavily_extract.py
similarity index 100%
rename from src/lfx/src/lfx/components/tavily/tavily_extract.py
rename to src/packages/core/lfx/components/tavily/tavily_extract.py
diff --git a/src/lfx/src/lfx/components/tavily/tavily_search.py b/src/packages/core/lfx/components/tavily/tavily_search.py
similarity index 100%
rename from src/lfx/src/lfx/components/tavily/tavily_search.py
rename to src/packages/core/lfx/components/tavily/tavily_search.py
diff --git a/src/lfx/src/lfx/components/textsplitters/__init__.py b/src/packages/core/lfx/components/textsplitters/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/textsplitters/__init__.py
rename to src/packages/core/lfx/components/textsplitters/__init__.py
diff --git a/src/lfx/src/lfx/components/toolkits/__init__.py b/src/packages/core/lfx/components/toolkits/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/toolkits/__init__.py
rename to src/packages/core/lfx/components/toolkits/__init__.py
diff --git a/src/lfx/src/lfx/components/tools/__init__.py b/src/packages/core/lfx/components/tools/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/__init__.py
rename to src/packages/core/lfx/components/tools/__init__.py
diff --git a/src/lfx/src/lfx/components/tools/calculator.py b/src/packages/core/lfx/components/tools/calculator.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/calculator.py
rename to src/packages/core/lfx/components/tools/calculator.py
diff --git a/src/lfx/src/lfx/components/tools/google_search_api.py b/src/packages/core/lfx/components/tools/google_search_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/google_search_api.py
rename to src/packages/core/lfx/components/tools/google_search_api.py
diff --git a/src/lfx/src/lfx/components/tools/google_serper_api.py b/src/packages/core/lfx/components/tools/google_serper_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/google_serper_api.py
rename to src/packages/core/lfx/components/tools/google_serper_api.py
diff --git a/src/lfx/src/lfx/components/tools/python_code_structured_tool.py b/src/packages/core/lfx/components/tools/python_code_structured_tool.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/python_code_structured_tool.py
rename to src/packages/core/lfx/components/tools/python_code_structured_tool.py
diff --git a/src/lfx/src/lfx/components/tools/python_repl.py b/src/packages/core/lfx/components/tools/python_repl.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/python_repl.py
rename to src/packages/core/lfx/components/tools/python_repl.py
diff --git a/src/lfx/src/lfx/components/tools/search_api.py b/src/packages/core/lfx/components/tools/search_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/search_api.py
rename to src/packages/core/lfx/components/tools/search_api.py
diff --git a/src/lfx/src/lfx/components/tools/searxng.py b/src/packages/core/lfx/components/tools/searxng.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/searxng.py
rename to src/packages/core/lfx/components/tools/searxng.py
diff --git a/src/lfx/src/lfx/components/tools/serp_api.py b/src/packages/core/lfx/components/tools/serp_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/serp_api.py
rename to src/packages/core/lfx/components/tools/serp_api.py
diff --git a/src/lfx/src/lfx/components/tools/tavily_search_tool.py b/src/packages/core/lfx/components/tools/tavily_search_tool.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/tavily_search_tool.py
rename to src/packages/core/lfx/components/tools/tavily_search_tool.py
diff --git a/src/lfx/src/lfx/components/tools/wikidata_api.py b/src/packages/core/lfx/components/tools/wikidata_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/wikidata_api.py
rename to src/packages/core/lfx/components/tools/wikidata_api.py
diff --git a/src/lfx/src/lfx/components/tools/wikipedia_api.py b/src/packages/core/lfx/components/tools/wikipedia_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/wikipedia_api.py
rename to src/packages/core/lfx/components/tools/wikipedia_api.py
diff --git a/src/lfx/src/lfx/components/tools/yahoo_finance.py b/src/packages/core/lfx/components/tools/yahoo_finance.py
similarity index 100%
rename from src/lfx/src/lfx/components/tools/yahoo_finance.py
rename to src/packages/core/lfx/components/tools/yahoo_finance.py
diff --git a/src/lfx/src/lfx/components/twelvelabs/__init__.py b/src/packages/core/lfx/components/twelvelabs/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/twelvelabs/__init__.py
rename to src/packages/core/lfx/components/twelvelabs/__init__.py
diff --git a/src/lfx/src/lfx/components/twelvelabs/convert_astra_results.py b/src/packages/core/lfx/components/twelvelabs/convert_astra_results.py
similarity index 100%
rename from src/lfx/src/lfx/components/twelvelabs/convert_astra_results.py
rename to src/packages/core/lfx/components/twelvelabs/convert_astra_results.py
diff --git a/src/lfx/src/lfx/components/twelvelabs/pegasus_index.py b/src/packages/core/lfx/components/twelvelabs/pegasus_index.py
similarity index 100%
rename from src/lfx/src/lfx/components/twelvelabs/pegasus_index.py
rename to src/packages/core/lfx/components/twelvelabs/pegasus_index.py
diff --git a/src/lfx/src/lfx/components/twelvelabs/split_video.py b/src/packages/core/lfx/components/twelvelabs/split_video.py
similarity index 100%
rename from src/lfx/src/lfx/components/twelvelabs/split_video.py
rename to src/packages/core/lfx/components/twelvelabs/split_video.py
diff --git a/src/lfx/src/lfx/components/twelvelabs/text_embeddings.py b/src/packages/core/lfx/components/twelvelabs/text_embeddings.py
similarity index 100%
rename from src/lfx/src/lfx/components/twelvelabs/text_embeddings.py
rename to src/packages/core/lfx/components/twelvelabs/text_embeddings.py
diff --git a/src/lfx/src/lfx/components/twelvelabs/twelvelabs_pegasus.py b/src/packages/core/lfx/components/twelvelabs/twelvelabs_pegasus.py
similarity index 100%
rename from src/lfx/src/lfx/components/twelvelabs/twelvelabs_pegasus.py
rename to src/packages/core/lfx/components/twelvelabs/twelvelabs_pegasus.py
diff --git a/src/lfx/src/lfx/components/twelvelabs/video_embeddings.py b/src/packages/core/lfx/components/twelvelabs/video_embeddings.py
similarity index 100%
rename from src/lfx/src/lfx/components/twelvelabs/video_embeddings.py
rename to src/packages/core/lfx/components/twelvelabs/video_embeddings.py
diff --git a/src/lfx/src/lfx/components/twelvelabs/video_file.py b/src/packages/core/lfx/components/twelvelabs/video_file.py
similarity index 100%
rename from src/lfx/src/lfx/components/twelvelabs/video_file.py
rename to src/packages/core/lfx/components/twelvelabs/video_file.py
diff --git a/src/lfx/src/lfx/components/unstructured/__init__.py b/src/packages/core/lfx/components/unstructured/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/unstructured/__init__.py
rename to src/packages/core/lfx/components/unstructured/__init__.py
diff --git a/src/lfx/src/lfx/components/unstructured/unstructured.py b/src/packages/core/lfx/components/unstructured/unstructured.py
similarity index 100%
rename from src/lfx/src/lfx/components/unstructured/unstructured.py
rename to src/packages/core/lfx/components/unstructured/unstructured.py
diff --git a/src/lfx/src/lfx/components/upstash/__init__.py b/src/packages/core/lfx/components/upstash/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/upstash/__init__.py
rename to src/packages/core/lfx/components/upstash/__init__.py
diff --git a/src/lfx/src/lfx/components/upstash/upstash.py b/src/packages/core/lfx/components/upstash/upstash.py
similarity index 100%
rename from src/lfx/src/lfx/components/upstash/upstash.py
rename to src/packages/core/lfx/components/upstash/upstash.py
diff --git a/src/lfx/src/lfx/components/vectara/__init__.py b/src/packages/core/lfx/components/vectara/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectara/__init__.py
rename to src/packages/core/lfx/components/vectara/__init__.py
diff --git a/src/lfx/src/lfx/components/vectara/vectara.py b/src/packages/core/lfx/components/vectara/vectara.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectara/vectara.py
rename to src/packages/core/lfx/components/vectara/vectara.py
diff --git a/src/lfx/src/lfx/components/vectara/vectara_rag.py b/src/packages/core/lfx/components/vectara/vectara_rag.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectara/vectara_rag.py
rename to src/packages/core/lfx/components/vectara/vectara_rag.py
diff --git a/src/lfx/src/lfx/components/vectorstores/__init__.py b/src/packages/core/lfx/components/vectorstores/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/__init__.py
rename to src/packages/core/lfx/components/vectorstores/__init__.py
diff --git a/src/lfx/src/lfx/components/vectorstores/astradb.py b/src/packages/core/lfx/components/vectorstores/astradb.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/astradb.py
rename to src/packages/core/lfx/components/vectorstores/astradb.py
diff --git a/src/lfx/src/lfx/components/vectorstores/astradb_graph.py b/src/packages/core/lfx/components/vectorstores/astradb_graph.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/astradb_graph.py
rename to src/packages/core/lfx/components/vectorstores/astradb_graph.py
diff --git a/src/lfx/src/lfx/components/vectorstores/cassandra.py b/src/packages/core/lfx/components/vectorstores/cassandra.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/cassandra.py
rename to src/packages/core/lfx/components/vectorstores/cassandra.py
diff --git a/src/lfx/src/lfx/components/vectorstores/cassandra_graph.py b/src/packages/core/lfx/components/vectorstores/cassandra_graph.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/cassandra_graph.py
rename to src/packages/core/lfx/components/vectorstores/cassandra_graph.py
diff --git a/src/lfx/src/lfx/components/vectorstores/chroma.py b/src/packages/core/lfx/components/vectorstores/chroma.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/chroma.py
rename to src/packages/core/lfx/components/vectorstores/chroma.py
diff --git a/src/lfx/src/lfx/components/vectorstores/clickhouse.py b/src/packages/core/lfx/components/vectorstores/clickhouse.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/clickhouse.py
rename to src/packages/core/lfx/components/vectorstores/clickhouse.py
diff --git a/src/lfx/src/lfx/components/vectorstores/couchbase.py b/src/packages/core/lfx/components/vectorstores/couchbase.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/couchbase.py
rename to src/packages/core/lfx/components/vectorstores/couchbase.py
diff --git a/src/lfx/src/lfx/components/vectorstores/elasticsearch.py b/src/packages/core/lfx/components/vectorstores/elasticsearch.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/elasticsearch.py
rename to src/packages/core/lfx/components/vectorstores/elasticsearch.py
diff --git a/src/lfx/src/lfx/components/vectorstores/faiss.py b/src/packages/core/lfx/components/vectorstores/faiss.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/faiss.py
rename to src/packages/core/lfx/components/vectorstores/faiss.py
diff --git a/src/lfx/src/lfx/components/vectorstores/graph_rag.py b/src/packages/core/lfx/components/vectorstores/graph_rag.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/graph_rag.py
rename to src/packages/core/lfx/components/vectorstores/graph_rag.py
diff --git a/src/lfx/src/lfx/components/vectorstores/hcd.py b/src/packages/core/lfx/components/vectorstores/hcd.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/hcd.py
rename to src/packages/core/lfx/components/vectorstores/hcd.py
diff --git a/src/lfx/src/lfx/components/vectorstores/local_db.py b/src/packages/core/lfx/components/vectorstores/local_db.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/local_db.py
rename to src/packages/core/lfx/components/vectorstores/local_db.py
diff --git a/src/lfx/src/lfx/components/vectorstores/milvus.py b/src/packages/core/lfx/components/vectorstores/milvus.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/milvus.py
rename to src/packages/core/lfx/components/vectorstores/milvus.py
diff --git a/src/lfx/src/lfx/components/vectorstores/mongodb_atlas.py b/src/packages/core/lfx/components/vectorstores/mongodb_atlas.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/mongodb_atlas.py
rename to src/packages/core/lfx/components/vectorstores/mongodb_atlas.py
diff --git a/src/lfx/src/lfx/components/vectorstores/opensearch.py b/src/packages/core/lfx/components/vectorstores/opensearch.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/opensearch.py
rename to src/packages/core/lfx/components/vectorstores/opensearch.py
diff --git a/src/lfx/src/lfx/components/vectorstores/pgvector.py b/src/packages/core/lfx/components/vectorstores/pgvector.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/pgvector.py
rename to src/packages/core/lfx/components/vectorstores/pgvector.py
diff --git a/src/lfx/src/lfx/components/vectorstores/pinecone.py b/src/packages/core/lfx/components/vectorstores/pinecone.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/pinecone.py
rename to src/packages/core/lfx/components/vectorstores/pinecone.py
diff --git a/src/lfx/src/lfx/components/vectorstores/qdrant.py b/src/packages/core/lfx/components/vectorstores/qdrant.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/qdrant.py
rename to src/packages/core/lfx/components/vectorstores/qdrant.py
diff --git a/src/lfx/src/lfx/components/vectorstores/supabase.py b/src/packages/core/lfx/components/vectorstores/supabase.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/supabase.py
rename to src/packages/core/lfx/components/vectorstores/supabase.py
diff --git a/src/lfx/src/lfx/components/vectorstores/upstash.py b/src/packages/core/lfx/components/vectorstores/upstash.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/upstash.py
rename to src/packages/core/lfx/components/vectorstores/upstash.py
diff --git a/src/lfx/src/lfx/components/vectorstores/vectara.py b/src/packages/core/lfx/components/vectorstores/vectara.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/vectara.py
rename to src/packages/core/lfx/components/vectorstores/vectara.py
diff --git a/src/lfx/src/lfx/components/vectorstores/vectara_rag.py b/src/packages/core/lfx/components/vectorstores/vectara_rag.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/vectara_rag.py
rename to src/packages/core/lfx/components/vectorstores/vectara_rag.py
diff --git a/src/lfx/src/lfx/components/vectorstores/weaviate.py b/src/packages/core/lfx/components/vectorstores/weaviate.py
similarity index 100%
rename from src/lfx/src/lfx/components/vectorstores/weaviate.py
rename to src/packages/core/lfx/components/vectorstores/weaviate.py
diff --git a/src/lfx/src/lfx/components/vertexai/__init__.py b/src/packages/core/lfx/components/vertexai/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/vertexai/__init__.py
rename to src/packages/core/lfx/components/vertexai/__init__.py
diff --git a/src/lfx/src/lfx/components/vertexai/vertexai.py b/src/packages/core/lfx/components/vertexai/vertexai.py
similarity index 100%
rename from src/lfx/src/lfx/components/vertexai/vertexai.py
rename to src/packages/core/lfx/components/vertexai/vertexai.py
diff --git a/src/lfx/src/lfx/components/vertexai/vertexai_embeddings.py b/src/packages/core/lfx/components/vertexai/vertexai_embeddings.py
similarity index 100%
rename from src/lfx/src/lfx/components/vertexai/vertexai_embeddings.py
rename to src/packages/core/lfx/components/vertexai/vertexai_embeddings.py
diff --git a/src/lfx/src/lfx/components/weaviate/__init__.py b/src/packages/core/lfx/components/weaviate/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/weaviate/__init__.py
rename to src/packages/core/lfx/components/weaviate/__init__.py
diff --git a/src/lfx/src/lfx/components/weaviate/weaviate.py b/src/packages/core/lfx/components/weaviate/weaviate.py
similarity index 100%
rename from src/lfx/src/lfx/components/weaviate/weaviate.py
rename to src/packages/core/lfx/components/weaviate/weaviate.py
diff --git a/src/lfx/src/lfx/components/wikipedia/__init__.py b/src/packages/core/lfx/components/wikipedia/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/wikipedia/__init__.py
rename to src/packages/core/lfx/components/wikipedia/__init__.py
diff --git a/src/lfx/src/lfx/components/wikipedia/wikidata.py b/src/packages/core/lfx/components/wikipedia/wikidata.py
similarity index 100%
rename from src/lfx/src/lfx/components/wikipedia/wikidata.py
rename to src/packages/core/lfx/components/wikipedia/wikidata.py
diff --git a/src/lfx/src/lfx/components/wikipedia/wikipedia.py b/src/packages/core/lfx/components/wikipedia/wikipedia.py
similarity index 100%
rename from src/lfx/src/lfx/components/wikipedia/wikipedia.py
rename to src/packages/core/lfx/components/wikipedia/wikipedia.py
diff --git a/src/lfx/src/lfx/components/wolframalpha/__init__.py b/src/packages/core/lfx/components/wolframalpha/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/wolframalpha/__init__.py
rename to src/packages/core/lfx/components/wolframalpha/__init__.py
diff --git a/src/lfx/src/lfx/components/wolframalpha/wolfram_alpha_api.py b/src/packages/core/lfx/components/wolframalpha/wolfram_alpha_api.py
similarity index 100%
rename from src/lfx/src/lfx/components/wolframalpha/wolfram_alpha_api.py
rename to src/packages/core/lfx/components/wolframalpha/wolfram_alpha_api.py
diff --git a/src/lfx/src/lfx/components/xai/__init__.py b/src/packages/core/lfx/components/xai/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/xai/__init__.py
rename to src/packages/core/lfx/components/xai/__init__.py
diff --git a/src/lfx/src/lfx/components/xai/xai.py b/src/packages/core/lfx/components/xai/xai.py
similarity index 100%
rename from src/lfx/src/lfx/components/xai/xai.py
rename to src/packages/core/lfx/components/xai/xai.py
diff --git a/src/lfx/src/lfx/components/yahoosearch/__init__.py b/src/packages/core/lfx/components/yahoosearch/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/yahoosearch/__init__.py
rename to src/packages/core/lfx/components/yahoosearch/__init__.py
diff --git a/src/lfx/src/lfx/components/yahoosearch/yahoo.py b/src/packages/core/lfx/components/yahoosearch/yahoo.py
similarity index 100%
rename from src/lfx/src/lfx/components/yahoosearch/yahoo.py
rename to src/packages/core/lfx/components/yahoosearch/yahoo.py
diff --git a/src/lfx/src/lfx/components/youtube/__init__.py b/src/packages/core/lfx/components/youtube/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/youtube/__init__.py
rename to src/packages/core/lfx/components/youtube/__init__.py
diff --git a/src/lfx/src/lfx/components/youtube/channel.py b/src/packages/core/lfx/components/youtube/channel.py
similarity index 100%
rename from src/lfx/src/lfx/components/youtube/channel.py
rename to src/packages/core/lfx/components/youtube/channel.py
diff --git a/src/lfx/src/lfx/components/youtube/comments.py b/src/packages/core/lfx/components/youtube/comments.py
similarity index 100%
rename from src/lfx/src/lfx/components/youtube/comments.py
rename to src/packages/core/lfx/components/youtube/comments.py
diff --git a/src/lfx/src/lfx/components/youtube/playlist.py b/src/packages/core/lfx/components/youtube/playlist.py
similarity index 100%
rename from src/lfx/src/lfx/components/youtube/playlist.py
rename to src/packages/core/lfx/components/youtube/playlist.py
diff --git a/src/lfx/src/lfx/components/youtube/search.py b/src/packages/core/lfx/components/youtube/search.py
similarity index 100%
rename from src/lfx/src/lfx/components/youtube/search.py
rename to src/packages/core/lfx/components/youtube/search.py
diff --git a/src/lfx/src/lfx/components/youtube/trending.py b/src/packages/core/lfx/components/youtube/trending.py
similarity index 100%
rename from src/lfx/src/lfx/components/youtube/trending.py
rename to src/packages/core/lfx/components/youtube/trending.py
diff --git a/src/lfx/src/lfx/components/youtube/video_details.py b/src/packages/core/lfx/components/youtube/video_details.py
similarity index 100%
rename from src/lfx/src/lfx/components/youtube/video_details.py
rename to src/packages/core/lfx/components/youtube/video_details.py
diff --git a/src/lfx/src/lfx/components/youtube/youtube_transcripts.py b/src/packages/core/lfx/components/youtube/youtube_transcripts.py
similarity index 100%
rename from src/lfx/src/lfx/components/youtube/youtube_transcripts.py
rename to src/packages/core/lfx/components/youtube/youtube_transcripts.py
diff --git a/src/lfx/src/lfx/components/zep/__init__.py b/src/packages/core/lfx/components/zep/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/components/zep/__init__.py
rename to src/packages/core/lfx/components/zep/__init__.py
diff --git a/src/lfx/src/lfx/components/zep/zep.py b/src/packages/core/lfx/components/zep/zep.py
similarity index 100%
rename from src/lfx/src/lfx/components/zep/zep.py
rename to src/packages/core/lfx/components/zep/zep.py
diff --git a/src/lfx/src/lfx/constants.py b/src/packages/core/lfx/constants.py
similarity index 100%
rename from src/lfx/src/lfx/constants.py
rename to src/packages/core/lfx/constants.py
diff --git a/src/lfx/src/lfx/custom/__init__.py b/src/packages/core/lfx/custom/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/custom/__init__.py
rename to src/packages/core/lfx/custom/__init__.py
diff --git a/src/lfx/src/lfx/custom/attributes.py b/src/packages/core/lfx/custom/attributes.py
similarity index 100%
rename from src/lfx/src/lfx/custom/attributes.py
rename to src/packages/core/lfx/custom/attributes.py
diff --git a/src/lfx/src/lfx/custom/code_parser/__init__.py b/src/packages/core/lfx/custom/code_parser/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/custom/code_parser/__init__.py
rename to src/packages/core/lfx/custom/code_parser/__init__.py
diff --git a/src/lfx/src/lfx/custom/code_parser/code_parser.py b/src/packages/core/lfx/custom/code_parser/code_parser.py
similarity index 100%
rename from src/lfx/src/lfx/custom/code_parser/code_parser.py
rename to src/packages/core/lfx/custom/code_parser/code_parser.py
diff --git a/src/backend/tests/unit/components/__init__.py b/src/packages/core/lfx/custom/custom_component/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/__init__.py
rename to src/packages/core/lfx/custom/custom_component/__init__.py
diff --git a/src/lfx/src/lfx/custom/custom_component/base_component.py b/src/packages/core/lfx/custom/custom_component/base_component.py
similarity index 100%
rename from src/lfx/src/lfx/custom/custom_component/base_component.py
rename to src/packages/core/lfx/custom/custom_component/base_component.py
diff --git a/src/lfx/src/lfx/custom/custom_component/component.py b/src/packages/core/lfx/custom/custom_component/component.py
similarity index 100%
rename from src/lfx/src/lfx/custom/custom_component/component.py
rename to src/packages/core/lfx/custom/custom_component/component.py
diff --git a/src/lfx/src/lfx/custom/custom_component/component_with_cache.py b/src/packages/core/lfx/custom/custom_component/component_with_cache.py
similarity index 100%
rename from src/lfx/src/lfx/custom/custom_component/component_with_cache.py
rename to src/packages/core/lfx/custom/custom_component/component_with_cache.py
diff --git a/src/lfx/src/lfx/custom/custom_component/custom_component.py b/src/packages/core/lfx/custom/custom_component/custom_component.py
similarity index 100%
rename from src/lfx/src/lfx/custom/custom_component/custom_component.py
rename to src/packages/core/lfx/custom/custom_component/custom_component.py
diff --git a/src/lfx/src/lfx/custom/dependency_analyzer.py b/src/packages/core/lfx/custom/dependency_analyzer.py
similarity index 100%
rename from src/lfx/src/lfx/custom/dependency_analyzer.py
rename to src/packages/core/lfx/custom/dependency_analyzer.py
diff --git a/src/lfx/src/lfx/custom/directory_reader/__init__.py b/src/packages/core/lfx/custom/directory_reader/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/custom/directory_reader/__init__.py
rename to src/packages/core/lfx/custom/directory_reader/__init__.py
diff --git a/src/lfx/src/lfx/custom/directory_reader/directory_reader.py b/src/packages/core/lfx/custom/directory_reader/directory_reader.py
similarity index 100%
rename from src/lfx/src/lfx/custom/directory_reader/directory_reader.py
rename to src/packages/core/lfx/custom/directory_reader/directory_reader.py
diff --git a/src/lfx/src/lfx/custom/directory_reader/utils.py b/src/packages/core/lfx/custom/directory_reader/utils.py
similarity index 100%
rename from src/lfx/src/lfx/custom/directory_reader/utils.py
rename to src/packages/core/lfx/custom/directory_reader/utils.py
diff --git a/src/lfx/src/lfx/custom/eval.py b/src/packages/core/lfx/custom/eval.py
similarity index 100%
rename from src/lfx/src/lfx/custom/eval.py
rename to src/packages/core/lfx/custom/eval.py
diff --git a/src/lfx/src/lfx/custom/schema.py b/src/packages/core/lfx/custom/schema.py
similarity index 100%
rename from src/lfx/src/lfx/custom/schema.py
rename to src/packages/core/lfx/custom/schema.py
diff --git a/src/lfx/src/lfx/custom/tree_visitor.py b/src/packages/core/lfx/custom/tree_visitor.py
similarity index 100%
rename from src/lfx/src/lfx/custom/tree_visitor.py
rename to src/packages/core/lfx/custom/tree_visitor.py
diff --git a/src/lfx/src/lfx/custom/utils.py b/src/packages/core/lfx/custom/utils.py
similarity index 100%
rename from src/lfx/src/lfx/custom/utils.py
rename to src/packages/core/lfx/custom/utils.py
diff --git a/src/lfx/src/lfx/custom/validate.py b/src/packages/core/lfx/custom/validate.py
similarity index 100%
rename from src/lfx/src/lfx/custom/validate.py
rename to src/packages/core/lfx/custom/validate.py
diff --git a/src/lfx/src/lfx/events/__init__.py b/src/packages/core/lfx/events/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/events/__init__.py
rename to src/packages/core/lfx/events/__init__.py
diff --git a/src/lfx/src/lfx/events/event_manager.py b/src/packages/core/lfx/events/event_manager.py
similarity index 100%
rename from src/lfx/src/lfx/events/event_manager.py
rename to src/packages/core/lfx/events/event_manager.py
diff --git a/src/backend/tests/unit/components/agents/__init__.py b/src/packages/core/lfx/exceptions/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/agents/__init__.py
rename to src/packages/core/lfx/exceptions/__init__.py
diff --git a/src/lfx/src/lfx/exceptions/component.py b/src/packages/core/lfx/exceptions/component.py
similarity index 100%
rename from src/lfx/src/lfx/exceptions/component.py
rename to src/packages/core/lfx/exceptions/component.py
diff --git a/src/lfx/src/lfx/field_typing/__init__.py b/src/packages/core/lfx/field_typing/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/field_typing/__init__.py
rename to src/packages/core/lfx/field_typing/__init__.py
diff --git a/src/lfx/src/lfx/field_typing/constants.py b/src/packages/core/lfx/field_typing/constants.py
similarity index 100%
rename from src/lfx/src/lfx/field_typing/constants.py
rename to src/packages/core/lfx/field_typing/constants.py
diff --git a/src/lfx/src/lfx/field_typing/range_spec.py b/src/packages/core/lfx/field_typing/range_spec.py
similarity index 100%
rename from src/lfx/src/lfx/field_typing/range_spec.py
rename to src/packages/core/lfx/field_typing/range_spec.py
diff --git a/src/lfx/src/lfx/graph/__init__.py b/src/packages/core/lfx/graph/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/graph/__init__.py
rename to src/packages/core/lfx/graph/__init__.py
diff --git a/src/backend/tests/unit/components/bundles/__init__.py b/src/packages/core/lfx/graph/edge/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/bundles/__init__.py
rename to src/packages/core/lfx/graph/edge/__init__.py
diff --git a/src/lfx/src/lfx/graph/edge/base.py b/src/packages/core/lfx/graph/edge/base.py
similarity index 100%
rename from src/lfx/src/lfx/graph/edge/base.py
rename to src/packages/core/lfx/graph/edge/base.py
diff --git a/src/lfx/src/lfx/graph/edge/schema.py b/src/packages/core/lfx/graph/edge/schema.py
similarity index 100%
rename from src/lfx/src/lfx/graph/edge/schema.py
rename to src/packages/core/lfx/graph/edge/schema.py
diff --git a/src/lfx/src/lfx/graph/edge/utils.py b/src/packages/core/lfx/graph/edge/utils.py
similarity index 100%
rename from src/lfx/src/lfx/graph/edge/utils.py
rename to src/packages/core/lfx/graph/edge/utils.py
diff --git a/src/backend/tests/unit/components/bundles/langwatch/__init__.py b/src/packages/core/lfx/graph/graph/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/bundles/langwatch/__init__.py
rename to src/packages/core/lfx/graph/graph/__init__.py
diff --git a/src/lfx/src/lfx/graph/graph/ascii.py b/src/packages/core/lfx/graph/graph/ascii.py
similarity index 100%
rename from src/lfx/src/lfx/graph/graph/ascii.py
rename to src/packages/core/lfx/graph/graph/ascii.py
diff --git a/src/lfx/src/lfx/graph/graph/base.py b/src/packages/core/lfx/graph/graph/base.py
similarity index 100%
rename from src/lfx/src/lfx/graph/graph/base.py
rename to src/packages/core/lfx/graph/graph/base.py
diff --git a/src/lfx/src/lfx/graph/graph/constants.py b/src/packages/core/lfx/graph/graph/constants.py
similarity index 100%
rename from src/lfx/src/lfx/graph/graph/constants.py
rename to src/packages/core/lfx/graph/graph/constants.py
diff --git a/src/lfx/src/lfx/graph/graph/runnable_vertices_manager.py b/src/packages/core/lfx/graph/graph/runnable_vertices_manager.py
similarity index 100%
rename from src/lfx/src/lfx/graph/graph/runnable_vertices_manager.py
rename to src/packages/core/lfx/graph/graph/runnable_vertices_manager.py
diff --git a/src/lfx/src/lfx/graph/graph/schema.py b/src/packages/core/lfx/graph/graph/schema.py
similarity index 100%
rename from src/lfx/src/lfx/graph/graph/schema.py
rename to src/packages/core/lfx/graph/graph/schema.py
diff --git a/src/lfx/src/lfx/graph/graph/state_model.py b/src/packages/core/lfx/graph/graph/state_model.py
similarity index 100%
rename from src/lfx/src/lfx/graph/graph/state_model.py
rename to src/packages/core/lfx/graph/graph/state_model.py
diff --git a/src/lfx/src/lfx/graph/graph/utils.py b/src/packages/core/lfx/graph/graph/utils.py
similarity index 100%
rename from src/lfx/src/lfx/graph/graph/utils.py
rename to src/packages/core/lfx/graph/graph/utils.py
diff --git a/src/lfx/src/lfx/graph/schema.py b/src/packages/core/lfx/graph/schema.py
similarity index 100%
rename from src/lfx/src/lfx/graph/schema.py
rename to src/packages/core/lfx/graph/schema.py
diff --git a/src/backend/tests/unit/components/bundles/youtube/__init__.py b/src/packages/core/lfx/graph/state/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/bundles/youtube/__init__.py
rename to src/packages/core/lfx/graph/state/__init__.py
diff --git a/src/lfx/src/lfx/graph/state/model.py b/src/packages/core/lfx/graph/state/model.py
similarity index 100%
rename from src/lfx/src/lfx/graph/state/model.py
rename to src/packages/core/lfx/graph/state/model.py
diff --git a/src/lfx/src/lfx/graph/utils.py b/src/packages/core/lfx/graph/utils.py
similarity index 100%
rename from src/lfx/src/lfx/graph/utils.py
rename to src/packages/core/lfx/graph/utils.py
diff --git a/src/backend/tests/unit/components/data/__init__.py b/src/packages/core/lfx/graph/vertex/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/data/__init__.py
rename to src/packages/core/lfx/graph/vertex/__init__.py
diff --git a/src/lfx/src/lfx/graph/vertex/base.py b/src/packages/core/lfx/graph/vertex/base.py
similarity index 100%
rename from src/lfx/src/lfx/graph/vertex/base.py
rename to src/packages/core/lfx/graph/vertex/base.py
diff --git a/src/lfx/src/lfx/graph/vertex/constants.py b/src/packages/core/lfx/graph/vertex/constants.py
similarity index 100%
rename from src/lfx/src/lfx/graph/vertex/constants.py
rename to src/packages/core/lfx/graph/vertex/constants.py
diff --git a/src/lfx/src/lfx/graph/vertex/exceptions.py b/src/packages/core/lfx/graph/vertex/exceptions.py
similarity index 100%
rename from src/lfx/src/lfx/graph/vertex/exceptions.py
rename to src/packages/core/lfx/graph/vertex/exceptions.py
diff --git a/src/lfx/src/lfx/graph/vertex/param_handler.py b/src/packages/core/lfx/graph/vertex/param_handler.py
similarity index 100%
rename from src/lfx/src/lfx/graph/vertex/param_handler.py
rename to src/packages/core/lfx/graph/vertex/param_handler.py
diff --git a/src/lfx/src/lfx/graph/vertex/schema.py b/src/packages/core/lfx/graph/vertex/schema.py
similarity index 100%
rename from src/lfx/src/lfx/graph/vertex/schema.py
rename to src/packages/core/lfx/graph/vertex/schema.py
diff --git a/src/lfx/src/lfx/graph/vertex/utils.py b/src/packages/core/lfx/graph/vertex/utils.py
similarity index 100%
rename from src/lfx/src/lfx/graph/vertex/utils.py
rename to src/packages/core/lfx/graph/vertex/utils.py
diff --git a/src/lfx/src/lfx/graph/vertex/vertex_types.py b/src/packages/core/lfx/graph/vertex/vertex_types.py
similarity index 100%
rename from src/lfx/src/lfx/graph/vertex/vertex_types.py
rename to src/packages/core/lfx/graph/vertex/vertex_types.py
diff --git a/src/lfx/src/lfx/helpers/__init__.py b/src/packages/core/lfx/helpers/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/helpers/__init__.py
rename to src/packages/core/lfx/helpers/__init__.py
diff --git a/src/lfx/src/lfx/helpers/base_model.py b/src/packages/core/lfx/helpers/base_model.py
similarity index 100%
rename from src/lfx/src/lfx/helpers/base_model.py
rename to src/packages/core/lfx/helpers/base_model.py
diff --git a/src/lfx/src/lfx/helpers/custom.py b/src/packages/core/lfx/helpers/custom.py
similarity index 100%
rename from src/lfx/src/lfx/helpers/custom.py
rename to src/packages/core/lfx/helpers/custom.py
diff --git a/src/lfx/src/lfx/helpers/data.py b/src/packages/core/lfx/helpers/data.py
similarity index 100%
rename from src/lfx/src/lfx/helpers/data.py
rename to src/packages/core/lfx/helpers/data.py
diff --git a/src/lfx/src/lfx/helpers/flow.py b/src/packages/core/lfx/helpers/flow.py
similarity index 100%
rename from src/lfx/src/lfx/helpers/flow.py
rename to src/packages/core/lfx/helpers/flow.py
diff --git a/src/lfx/src/lfx/inputs/__init__.py b/src/packages/core/lfx/inputs/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/inputs/__init__.py
rename to src/packages/core/lfx/inputs/__init__.py
diff --git a/src/lfx/src/lfx/inputs/constants.py b/src/packages/core/lfx/inputs/constants.py
similarity index 100%
rename from src/lfx/src/lfx/inputs/constants.py
rename to src/packages/core/lfx/inputs/constants.py
diff --git a/src/lfx/src/lfx/inputs/input_mixin.py b/src/packages/core/lfx/inputs/input_mixin.py
similarity index 100%
rename from src/lfx/src/lfx/inputs/input_mixin.py
rename to src/packages/core/lfx/inputs/input_mixin.py
diff --git a/src/lfx/src/lfx/inputs/inputs.py b/src/packages/core/lfx/inputs/inputs.py
similarity index 100%
rename from src/lfx/src/lfx/inputs/inputs.py
rename to src/packages/core/lfx/inputs/inputs.py
diff --git a/src/lfx/src/lfx/inputs/validators.py b/src/packages/core/lfx/inputs/validators.py
similarity index 100%
rename from src/lfx/src/lfx/inputs/validators.py
rename to src/packages/core/lfx/inputs/validators.py
diff --git a/src/lfx/src/lfx/interface/__init__.py b/src/packages/core/lfx/interface/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/interface/__init__.py
rename to src/packages/core/lfx/interface/__init__.py
diff --git a/src/lfx/src/lfx/interface/components.py b/src/packages/core/lfx/interface/components.py
similarity index 100%
rename from src/lfx/src/lfx/interface/components.py
rename to src/packages/core/lfx/interface/components.py
diff --git a/src/lfx/src/lfx/interface/importing/__init__.py b/src/packages/core/lfx/interface/importing/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/interface/importing/__init__.py
rename to src/packages/core/lfx/interface/importing/__init__.py
diff --git a/src/lfx/src/lfx/interface/importing/utils.py b/src/packages/core/lfx/interface/importing/utils.py
similarity index 100%
rename from src/lfx/src/lfx/interface/importing/utils.py
rename to src/packages/core/lfx/interface/importing/utils.py
diff --git a/src/lfx/src/lfx/interface/initialize/__init__.py b/src/packages/core/lfx/interface/initialize/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/interface/initialize/__init__.py
rename to src/packages/core/lfx/interface/initialize/__init__.py
diff --git a/src/lfx/src/lfx/interface/initialize/loading.py b/src/packages/core/lfx/interface/initialize/loading.py
similarity index 100%
rename from src/lfx/src/lfx/interface/initialize/loading.py
rename to src/packages/core/lfx/interface/initialize/loading.py
diff --git a/src/lfx/src/lfx/interface/listing.py b/src/packages/core/lfx/interface/listing.py
similarity index 100%
rename from src/lfx/src/lfx/interface/listing.py
rename to src/packages/core/lfx/interface/listing.py
diff --git a/src/lfx/src/lfx/interface/run.py b/src/packages/core/lfx/interface/run.py
similarity index 100%
rename from src/lfx/src/lfx/interface/run.py
rename to src/packages/core/lfx/interface/run.py
diff --git a/src/lfx/src/lfx/interface/utils.py b/src/packages/core/lfx/interface/utils.py
similarity index 100%
rename from src/lfx/src/lfx/interface/utils.py
rename to src/packages/core/lfx/interface/utils.py
diff --git a/src/lfx/src/lfx/io/__init__.py b/src/packages/core/lfx/io/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/io/__init__.py
rename to src/packages/core/lfx/io/__init__.py
diff --git a/src/lfx/src/lfx/io/schema.py b/src/packages/core/lfx/io/schema.py
similarity index 100%
rename from src/lfx/src/lfx/io/schema.py
rename to src/packages/core/lfx/io/schema.py
diff --git a/src/lfx/src/lfx/load/__init__.py b/src/packages/core/lfx/load/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/load/__init__.py
rename to src/packages/core/lfx/load/__init__.py
diff --git a/src/lfx/src/lfx/load/load.py b/src/packages/core/lfx/load/load.py
similarity index 100%
rename from src/lfx/src/lfx/load/load.py
rename to src/packages/core/lfx/load/load.py
diff --git a/src/lfx/src/lfx/load/utils.py b/src/packages/core/lfx/load/utils.py
similarity index 100%
rename from src/lfx/src/lfx/load/utils.py
rename to src/packages/core/lfx/load/utils.py
diff --git a/src/lfx/src/lfx/log/__init__.py b/src/packages/core/lfx/log/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/log/__init__.py
rename to src/packages/core/lfx/log/__init__.py
diff --git a/src/lfx/src/lfx/log/logger.py b/src/packages/core/lfx/log/logger.py
similarity index 100%
rename from src/lfx/src/lfx/log/logger.py
rename to src/packages/core/lfx/log/logger.py
diff --git a/src/lfx/src/lfx/memory/__init__.py b/src/packages/core/lfx/memory/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/memory/__init__.py
rename to src/packages/core/lfx/memory/__init__.py
diff --git a/src/lfx/src/lfx/memory/stubs.py b/src/packages/core/lfx/memory/stubs.py
similarity index 100%
rename from src/lfx/src/lfx/memory/stubs.py
rename to src/packages/core/lfx/memory/stubs.py
diff --git a/src/lfx/src/lfx/processing/__init__.py b/src/packages/core/lfx/processing/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/processing/__init__.py
rename to src/packages/core/lfx/processing/__init__.py
diff --git a/src/lfx/src/lfx/processing/process.py b/src/packages/core/lfx/processing/process.py
similarity index 100%
rename from src/lfx/src/lfx/processing/process.py
rename to src/packages/core/lfx/processing/process.py
diff --git a/src/lfx/src/lfx/processing/utils.py b/src/packages/core/lfx/processing/utils.py
similarity index 100%
rename from src/lfx/src/lfx/processing/utils.py
rename to src/packages/core/lfx/processing/utils.py
diff --git a/src/lfx/src/lfx/py.typed b/src/packages/core/lfx/py.typed
similarity index 100%
rename from src/lfx/src/lfx/py.typed
rename to src/packages/core/lfx/py.typed
diff --git a/src/lfx/src/lfx/schema/__init__.py b/src/packages/core/lfx/schema/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/schema/__init__.py
rename to src/packages/core/lfx/schema/__init__.py
diff --git a/src/lfx/src/lfx/schema/artifact.py b/src/packages/core/lfx/schema/artifact.py
similarity index 100%
rename from src/lfx/src/lfx/schema/artifact.py
rename to src/packages/core/lfx/schema/artifact.py
diff --git a/src/lfx/src/lfx/schema/content_block.py b/src/packages/core/lfx/schema/content_block.py
similarity index 100%
rename from src/lfx/src/lfx/schema/content_block.py
rename to src/packages/core/lfx/schema/content_block.py
diff --git a/src/lfx/src/lfx/schema/content_types.py b/src/packages/core/lfx/schema/content_types.py
similarity index 100%
rename from src/lfx/src/lfx/schema/content_types.py
rename to src/packages/core/lfx/schema/content_types.py
diff --git a/src/lfx/src/lfx/schema/data.py b/src/packages/core/lfx/schema/data.py
similarity index 100%
rename from src/lfx/src/lfx/schema/data.py
rename to src/packages/core/lfx/schema/data.py
diff --git a/src/lfx/src/lfx/schema/dataframe.py b/src/packages/core/lfx/schema/dataframe.py
similarity index 100%
rename from src/lfx/src/lfx/schema/dataframe.py
rename to src/packages/core/lfx/schema/dataframe.py
diff --git a/src/lfx/src/lfx/schema/dotdict.py b/src/packages/core/lfx/schema/dotdict.py
similarity index 100%
rename from src/lfx/src/lfx/schema/dotdict.py
rename to src/packages/core/lfx/schema/dotdict.py
diff --git a/src/lfx/src/lfx/schema/encoders.py b/src/packages/core/lfx/schema/encoders.py
similarity index 100%
rename from src/lfx/src/lfx/schema/encoders.py
rename to src/packages/core/lfx/schema/encoders.py
diff --git a/src/lfx/src/lfx/schema/graph.py b/src/packages/core/lfx/schema/graph.py
similarity index 100%
rename from src/lfx/src/lfx/schema/graph.py
rename to src/packages/core/lfx/schema/graph.py
diff --git a/src/lfx/src/lfx/schema/image.py b/src/packages/core/lfx/schema/image.py
similarity index 100%
rename from src/lfx/src/lfx/schema/image.py
rename to src/packages/core/lfx/schema/image.py
diff --git a/src/lfx/src/lfx/schema/json_schema.py b/src/packages/core/lfx/schema/json_schema.py
similarity index 100%
rename from src/lfx/src/lfx/schema/json_schema.py
rename to src/packages/core/lfx/schema/json_schema.py
diff --git a/src/lfx/src/lfx/schema/log.py b/src/packages/core/lfx/schema/log.py
similarity index 100%
rename from src/lfx/src/lfx/schema/log.py
rename to src/packages/core/lfx/schema/log.py
diff --git a/src/lfx/src/lfx/schema/message.py b/src/packages/core/lfx/schema/message.py
similarity index 100%
rename from src/lfx/src/lfx/schema/message.py
rename to src/packages/core/lfx/schema/message.py
diff --git a/src/lfx/src/lfx/schema/openai_responses_schemas.py b/src/packages/core/lfx/schema/openai_responses_schemas.py
similarity index 100%
rename from src/lfx/src/lfx/schema/openai_responses_schemas.py
rename to src/packages/core/lfx/schema/openai_responses_schemas.py
diff --git a/src/lfx/src/lfx/schema/properties.py b/src/packages/core/lfx/schema/properties.py
similarity index 100%
rename from src/lfx/src/lfx/schema/properties.py
rename to src/packages/core/lfx/schema/properties.py
diff --git a/src/lfx/src/lfx/schema/schema.py b/src/packages/core/lfx/schema/schema.py
similarity index 100%
rename from src/lfx/src/lfx/schema/schema.py
rename to src/packages/core/lfx/schema/schema.py
diff --git a/src/lfx/src/lfx/schema/serialize.py b/src/packages/core/lfx/schema/serialize.py
similarity index 100%
rename from src/lfx/src/lfx/schema/serialize.py
rename to src/packages/core/lfx/schema/serialize.py
diff --git a/src/lfx/src/lfx/schema/table.py b/src/packages/core/lfx/schema/table.py
similarity index 100%
rename from src/lfx/src/lfx/schema/table.py
rename to src/packages/core/lfx/schema/table.py
diff --git a/src/lfx/src/lfx/schema/validators.py b/src/packages/core/lfx/schema/validators.py
similarity index 100%
rename from src/lfx/src/lfx/schema/validators.py
rename to src/packages/core/lfx/schema/validators.py
diff --git a/src/lfx/src/lfx/serialization/__init__.py b/src/packages/core/lfx/serialization/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/serialization/__init__.py
rename to src/packages/core/lfx/serialization/__init__.py
diff --git a/src/lfx/src/lfx/serialization/constants.py b/src/packages/core/lfx/serialization/constants.py
similarity index 100%
rename from src/lfx/src/lfx/serialization/constants.py
rename to src/packages/core/lfx/serialization/constants.py
diff --git a/src/lfx/src/lfx/serialization/serialization.py b/src/packages/core/lfx/serialization/serialization.py
similarity index 100%
rename from src/lfx/src/lfx/serialization/serialization.py
rename to src/packages/core/lfx/serialization/serialization.py
diff --git a/src/lfx/src/lfx/services/__init__.py b/src/packages/core/lfx/services/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/services/__init__.py
rename to src/packages/core/lfx/services/__init__.py
diff --git a/src/lfx/src/lfx/services/base.py b/src/packages/core/lfx/services/base.py
similarity index 100%
rename from src/lfx/src/lfx/services/base.py
rename to src/packages/core/lfx/services/base.py
diff --git a/src/lfx/src/lfx/services/cache/__init__.py b/src/packages/core/lfx/services/cache/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/services/cache/__init__.py
rename to src/packages/core/lfx/services/cache/__init__.py
diff --git a/src/lfx/src/lfx/services/cache/base.py b/src/packages/core/lfx/services/cache/base.py
similarity index 100%
rename from src/lfx/src/lfx/services/cache/base.py
rename to src/packages/core/lfx/services/cache/base.py
diff --git a/src/lfx/src/lfx/services/cache/service.py b/src/packages/core/lfx/services/cache/service.py
similarity index 100%
rename from src/lfx/src/lfx/services/cache/service.py
rename to src/packages/core/lfx/services/cache/service.py
diff --git a/src/lfx/src/lfx/services/cache/utils.py b/src/packages/core/lfx/services/cache/utils.py
similarity index 100%
rename from src/lfx/src/lfx/services/cache/utils.py
rename to src/packages/core/lfx/services/cache/utils.py
diff --git a/src/lfx/src/lfx/services/chat/__init__.py b/src/packages/core/lfx/services/chat/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/services/chat/__init__.py
rename to src/packages/core/lfx/services/chat/__init__.py
diff --git a/src/lfx/src/lfx/services/chat/config.py b/src/packages/core/lfx/services/chat/config.py
similarity index 100%
rename from src/lfx/src/lfx/services/chat/config.py
rename to src/packages/core/lfx/services/chat/config.py
diff --git a/src/lfx/src/lfx/services/chat/schema.py b/src/packages/core/lfx/services/chat/schema.py
similarity index 100%
rename from src/lfx/src/lfx/services/chat/schema.py
rename to src/packages/core/lfx/services/chat/schema.py
diff --git a/src/lfx/src/lfx/services/deps.py b/src/packages/core/lfx/services/deps.py
similarity index 100%
rename from src/lfx/src/lfx/services/deps.py
rename to src/packages/core/lfx/services/deps.py
diff --git a/src/lfx/src/lfx/services/factory.py b/src/packages/core/lfx/services/factory.py
similarity index 100%
rename from src/lfx/src/lfx/services/factory.py
rename to src/packages/core/lfx/services/factory.py
diff --git a/src/lfx/src/lfx/services/initialize.py b/src/packages/core/lfx/services/initialize.py
similarity index 100%
rename from src/lfx/src/lfx/services/initialize.py
rename to src/packages/core/lfx/services/initialize.py
diff --git a/src/lfx/src/lfx/services/interfaces.py b/src/packages/core/lfx/services/interfaces.py
similarity index 100%
rename from src/lfx/src/lfx/services/interfaces.py
rename to src/packages/core/lfx/services/interfaces.py
diff --git a/src/lfx/src/lfx/services/manager.py b/src/packages/core/lfx/services/manager.py
similarity index 100%
rename from src/lfx/src/lfx/services/manager.py
rename to src/packages/core/lfx/services/manager.py
diff --git a/src/lfx/src/lfx/services/schema.py b/src/packages/core/lfx/services/schema.py
similarity index 100%
rename from src/lfx/src/lfx/services/schema.py
rename to src/packages/core/lfx/services/schema.py
diff --git a/src/lfx/src/lfx/services/session.py b/src/packages/core/lfx/services/session.py
similarity index 100%
rename from src/lfx/src/lfx/services/session.py
rename to src/packages/core/lfx/services/session.py
diff --git a/src/lfx/src/lfx/services/settings/__init__.py b/src/packages/core/lfx/services/settings/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/services/settings/__init__.py
rename to src/packages/core/lfx/services/settings/__init__.py
diff --git a/src/lfx/src/lfx/services/settings/auth.py b/src/packages/core/lfx/services/settings/auth.py
similarity index 100%
rename from src/lfx/src/lfx/services/settings/auth.py
rename to src/packages/core/lfx/services/settings/auth.py
diff --git a/src/lfx/src/lfx/services/settings/base.py b/src/packages/core/lfx/services/settings/base.py
similarity index 100%
rename from src/lfx/src/lfx/services/settings/base.py
rename to src/packages/core/lfx/services/settings/base.py
diff --git a/src/lfx/src/lfx/services/settings/constants.py b/src/packages/core/lfx/services/settings/constants.py
similarity index 100%
rename from src/lfx/src/lfx/services/settings/constants.py
rename to src/packages/core/lfx/services/settings/constants.py
diff --git a/src/lfx/src/lfx/services/settings/factory.py b/src/packages/core/lfx/services/settings/factory.py
similarity index 100%
rename from src/lfx/src/lfx/services/settings/factory.py
rename to src/packages/core/lfx/services/settings/factory.py
diff --git a/src/lfx/src/lfx/services/settings/feature_flags.py b/src/packages/core/lfx/services/settings/feature_flags.py
similarity index 100%
rename from src/lfx/src/lfx/services/settings/feature_flags.py
rename to src/packages/core/lfx/services/settings/feature_flags.py
diff --git a/src/lfx/src/lfx/services/settings/service.py b/src/packages/core/lfx/services/settings/service.py
similarity index 100%
rename from src/lfx/src/lfx/services/settings/service.py
rename to src/packages/core/lfx/services/settings/service.py
diff --git a/src/lfx/src/lfx/services/settings/utils.py b/src/packages/core/lfx/services/settings/utils.py
similarity index 100%
rename from src/lfx/src/lfx/services/settings/utils.py
rename to src/packages/core/lfx/services/settings/utils.py
diff --git a/src/lfx/src/lfx/services/shared_component_cache/__init__.py b/src/packages/core/lfx/services/shared_component_cache/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/services/shared_component_cache/__init__.py
rename to src/packages/core/lfx/services/shared_component_cache/__init__.py
diff --git a/src/lfx/src/lfx/services/shared_component_cache/factory.py b/src/packages/core/lfx/services/shared_component_cache/factory.py
similarity index 100%
rename from src/lfx/src/lfx/services/shared_component_cache/factory.py
rename to src/packages/core/lfx/services/shared_component_cache/factory.py
diff --git a/src/lfx/src/lfx/services/shared_component_cache/service.py b/src/packages/core/lfx/services/shared_component_cache/service.py
similarity index 100%
rename from src/lfx/src/lfx/services/shared_component_cache/service.py
rename to src/packages/core/lfx/services/shared_component_cache/service.py
diff --git a/src/lfx/src/lfx/services/storage/__init__.py b/src/packages/core/lfx/services/storage/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/services/storage/__init__.py
rename to src/packages/core/lfx/services/storage/__init__.py
diff --git a/src/lfx/src/lfx/services/storage/local.py b/src/packages/core/lfx/services/storage/local.py
similarity index 100%
rename from src/lfx/src/lfx/services/storage/local.py
rename to src/packages/core/lfx/services/storage/local.py
diff --git a/src/lfx/src/lfx/services/storage/service.py b/src/packages/core/lfx/services/storage/service.py
similarity index 100%
rename from src/lfx/src/lfx/services/storage/service.py
rename to src/packages/core/lfx/services/storage/service.py
diff --git a/src/lfx/src/lfx/services/tracing/__init__.py b/src/packages/core/lfx/services/tracing/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/services/tracing/__init__.py
rename to src/packages/core/lfx/services/tracing/__init__.py
diff --git a/src/lfx/src/lfx/services/tracing/service.py b/src/packages/core/lfx/services/tracing/service.py
similarity index 100%
rename from src/lfx/src/lfx/services/tracing/service.py
rename to src/packages/core/lfx/services/tracing/service.py
diff --git a/src/lfx/src/lfx/settings.py b/src/packages/core/lfx/settings.py
similarity index 100%
rename from src/lfx/src/lfx/settings.py
rename to src/packages/core/lfx/settings.py
diff --git a/src/lfx/src/lfx/template/__init__.py b/src/packages/core/lfx/template/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/template/__init__.py
rename to src/packages/core/lfx/template/__init__.py
diff --git a/src/backend/tests/unit/components/embeddings/__init__.py b/src/packages/core/lfx/template/field/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/embeddings/__init__.py
rename to src/packages/core/lfx/template/field/__init__.py
diff --git a/src/lfx/src/lfx/template/field/base.py b/src/packages/core/lfx/template/field/base.py
similarity index 100%
rename from src/lfx/src/lfx/template/field/base.py
rename to src/packages/core/lfx/template/field/base.py
diff --git a/src/lfx/src/lfx/template/field/prompt.py b/src/packages/core/lfx/template/field/prompt.py
similarity index 100%
rename from src/lfx/src/lfx/template/field/prompt.py
rename to src/packages/core/lfx/template/field/prompt.py
diff --git a/src/lfx/src/lfx/template/frontend_node/__init__.py b/src/packages/core/lfx/template/frontend_node/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/template/frontend_node/__init__.py
rename to src/packages/core/lfx/template/frontend_node/__init__.py
diff --git a/src/lfx/src/lfx/template/frontend_node/base.py b/src/packages/core/lfx/template/frontend_node/base.py
similarity index 100%
rename from src/lfx/src/lfx/template/frontend_node/base.py
rename to src/packages/core/lfx/template/frontend_node/base.py
diff --git a/src/lfx/src/lfx/template/frontend_node/constants.py b/src/packages/core/lfx/template/frontend_node/constants.py
similarity index 100%
rename from src/lfx/src/lfx/template/frontend_node/constants.py
rename to src/packages/core/lfx/template/frontend_node/constants.py
diff --git a/src/lfx/src/lfx/template/frontend_node/custom_components.py b/src/packages/core/lfx/template/frontend_node/custom_components.py
similarity index 100%
rename from src/lfx/src/lfx/template/frontend_node/custom_components.py
rename to src/packages/core/lfx/template/frontend_node/custom_components.py
diff --git a/src/backend/tests/unit/components/inputs/__init__.py b/src/packages/core/lfx/template/template/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/inputs/__init__.py
rename to src/packages/core/lfx/template/template/__init__.py
diff --git a/src/lfx/src/lfx/template/template/base.py b/src/packages/core/lfx/template/template/base.py
similarity index 100%
rename from src/lfx/src/lfx/template/template/base.py
rename to src/packages/core/lfx/template/template/base.py
diff --git a/src/lfx/src/lfx/template/utils.py b/src/packages/core/lfx/template/utils.py
similarity index 100%
rename from src/lfx/src/lfx/template/utils.py
rename to src/packages/core/lfx/template/utils.py
diff --git a/src/lfx/src/lfx/type_extraction.py b/src/packages/core/lfx/type_extraction.py
similarity index 100%
rename from src/lfx/src/lfx/type_extraction.py
rename to src/packages/core/lfx/type_extraction.py
diff --git a/src/lfx/src/lfx/type_extraction/__init__.py b/src/packages/core/lfx/type_extraction/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/type_extraction/__init__.py
rename to src/packages/core/lfx/type_extraction/__init__.py
diff --git a/src/lfx/src/lfx/type_extraction/type_extraction.py b/src/packages/core/lfx/type_extraction/type_extraction.py
similarity index 100%
rename from src/lfx/src/lfx/type_extraction/type_extraction.py
rename to src/packages/core/lfx/type_extraction/type_extraction.py
diff --git a/src/lfx/src/lfx/utils/__init__.py b/src/packages/core/lfx/utils/__init__.py
similarity index 100%
rename from src/lfx/src/lfx/utils/__init__.py
rename to src/packages/core/lfx/utils/__init__.py
diff --git a/src/lfx/src/lfx/utils/async_helpers.py b/src/packages/core/lfx/utils/async_helpers.py
similarity index 100%
rename from src/lfx/src/lfx/utils/async_helpers.py
rename to src/packages/core/lfx/utils/async_helpers.py
diff --git a/src/lfx/src/lfx/utils/component_utils.py b/src/packages/core/lfx/utils/component_utils.py
similarity index 100%
rename from src/lfx/src/lfx/utils/component_utils.py
rename to src/packages/core/lfx/utils/component_utils.py
diff --git a/src/lfx/src/lfx/utils/concurrency.py b/src/packages/core/lfx/utils/concurrency.py
similarity index 100%
rename from src/lfx/src/lfx/utils/concurrency.py
rename to src/packages/core/lfx/utils/concurrency.py
diff --git a/src/lfx/src/lfx/utils/connection_string_parser.py b/src/packages/core/lfx/utils/connection_string_parser.py
similarity index 100%
rename from src/lfx/src/lfx/utils/connection_string_parser.py
rename to src/packages/core/lfx/utils/connection_string_parser.py
diff --git a/src/lfx/src/lfx/utils/constants.py b/src/packages/core/lfx/utils/constants.py
similarity index 100%
rename from src/lfx/src/lfx/utils/constants.py
rename to src/packages/core/lfx/utils/constants.py
diff --git a/src/lfx/src/lfx/utils/data_structure.py b/src/packages/core/lfx/utils/data_structure.py
similarity index 100%
rename from src/lfx/src/lfx/utils/data_structure.py
rename to src/packages/core/lfx/utils/data_structure.py
diff --git a/src/lfx/src/lfx/utils/exceptions.py b/src/packages/core/lfx/utils/exceptions.py
similarity index 100%
rename from src/lfx/src/lfx/utils/exceptions.py
rename to src/packages/core/lfx/utils/exceptions.py
diff --git a/src/lfx/src/lfx/utils/helpers.py b/src/packages/core/lfx/utils/helpers.py
similarity index 100%
rename from src/lfx/src/lfx/utils/helpers.py
rename to src/packages/core/lfx/utils/helpers.py
diff --git a/src/lfx/src/lfx/utils/image.py b/src/packages/core/lfx/utils/image.py
similarity index 100%
rename from src/lfx/src/lfx/utils/image.py
rename to src/packages/core/lfx/utils/image.py
diff --git a/src/lfx/src/lfx/utils/lazy_load.py b/src/packages/core/lfx/utils/lazy_load.py
similarity index 100%
rename from src/lfx/src/lfx/utils/lazy_load.py
rename to src/packages/core/lfx/utils/lazy_load.py
diff --git a/src/lfx/src/lfx/utils/request_utils.py b/src/packages/core/lfx/utils/request_utils.py
similarity index 100%
rename from src/lfx/src/lfx/utils/request_utils.py
rename to src/packages/core/lfx/utils/request_utils.py
diff --git a/src/lfx/src/lfx/utils/schemas.py b/src/packages/core/lfx/utils/schemas.py
similarity index 100%
rename from src/lfx/src/lfx/utils/schemas.py
rename to src/packages/core/lfx/utils/schemas.py
diff --git a/src/lfx/src/lfx/utils/util.py b/src/packages/core/lfx/utils/util.py
similarity index 100%
rename from src/lfx/src/lfx/utils/util.py
rename to src/packages/core/lfx/utils/util.py
diff --git a/src/lfx/src/lfx/utils/util_strings.py b/src/packages/core/lfx/utils/util_strings.py
similarity index 100%
rename from src/lfx/src/lfx/utils/util_strings.py
rename to src/packages/core/lfx/utils/util_strings.py
diff --git a/src/lfx/src/lfx/utils/version.py b/src/packages/core/lfx/utils/version.py
similarity index 100%
rename from src/lfx/src/lfx/utils/version.py
rename to src/packages/core/lfx/utils/version.py
diff --git a/src/lfx/pyproject.toml b/src/packages/core/pyproject.toml
similarity index 94%
rename from src/lfx/pyproject.toml
rename to src/packages/core/pyproject.toml
index 14b2cb895d75..9ad33001bc0c 100644
--- a/src/lfx/pyproject.toml
+++ b/src/packages/core/pyproject.toml
@@ -47,7 +47,7 @@ requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
-packages = ["src/lfx"]
+packages = ["lfx"]
[tool.ruff]
line-length = 120
@@ -85,16 +85,16 @@ external = ["RUF027"]
"S101",
"SLF001",
]
-"src/lfx/base/*" = [
+"src/packages/core/base/*" = [
"SLF001",
]
-"src/lfx/components/*" = [
+"src/packages/core/components/*" = [
"SLF001",
]
-"src/lfx/custom/*" = [
+"src/packages/core/custom/*" = [
"SLF001",
]
-"src/lfx/graph/*" = [
+"src/packages/core/graph/*" = [
"SLF001",
]
diff --git a/src/lfx/tests/__init__.py b/src/packages/core/tests/__init__.py
similarity index 100%
rename from src/lfx/tests/__init__.py
rename to src/packages/core/tests/__init__.py
diff --git a/src/lfx/tests/conftest.py b/src/packages/core/tests/conftest.py
similarity index 100%
rename from src/lfx/tests/conftest.py
rename to src/packages/core/tests/conftest.py
diff --git a/src/backend/tests/data/BasicChatwithPromptandHistory.json b/src/packages/core/tests/data/BasicChatwithPromptandHistory.json
similarity index 100%
rename from src/backend/tests/data/BasicChatwithPromptandHistory.json
rename to src/packages/core/tests/data/BasicChatwithPromptandHistory.json
diff --git a/src/backend/tests/data/ChatInputTest.json b/src/packages/core/tests/data/ChatInputTest.json
similarity index 100%
rename from src/backend/tests/data/ChatInputTest.json
rename to src/packages/core/tests/data/ChatInputTest.json
diff --git a/src/lfx/tests/data/LoopTest.json b/src/packages/core/tests/data/LoopTest.json
similarity index 100%
rename from src/lfx/tests/data/LoopTest.json
rename to src/packages/core/tests/data/LoopTest.json
diff --git a/src/backend/tests/data/MemoryChatbotNoLLM.json b/src/packages/core/tests/data/MemoryChatbotNoLLM.json
similarity index 100%
rename from src/backend/tests/data/MemoryChatbotNoLLM.json
rename to src/packages/core/tests/data/MemoryChatbotNoLLM.json
diff --git a/src/backend/tests/data/Openapi.json b/src/packages/core/tests/data/Openapi.json
similarity index 100%
rename from src/backend/tests/data/Openapi.json
rename to src/packages/core/tests/data/Openapi.json
diff --git a/src/backend/tests/data/SimpleAPITest.json b/src/packages/core/tests/data/SimpleAPITest.json
similarity index 100%
rename from src/backend/tests/data/SimpleAPITest.json
rename to src/packages/core/tests/data/SimpleAPITest.json
diff --git a/src/backend/tests/data/TwoOutputsTest.json b/src/packages/core/tests/data/TwoOutputsTest.json
similarity index 100%
rename from src/backend/tests/data/TwoOutputsTest.json
rename to src/packages/core/tests/data/TwoOutputsTest.json
diff --git a/src/backend/tests/data/Vector_store.json b/src/packages/core/tests/data/Vector_store.json
similarity index 100%
rename from src/backend/tests/data/Vector_store.json
rename to src/packages/core/tests/data/Vector_store.json
diff --git a/src/backend/tests/data/WebhookTest.json b/src/packages/core/tests/data/WebhookTest.json
similarity index 100%
rename from src/backend/tests/data/WebhookTest.json
rename to src/packages/core/tests/data/WebhookTest.json
diff --git a/src/backend/tests/unit/components/knowledge_bases/__init__.py b/src/packages/core/tests/data/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/knowledge_bases/__init__.py
rename to src/packages/core/tests/data/__init__.py
diff --git a/src/backend/tests/data/basic_example.json b/src/packages/core/tests/data/basic_example.json
similarity index 100%
rename from src/backend/tests/data/basic_example.json
rename to src/packages/core/tests/data/basic_example.json
diff --git a/src/lfx/tests/data/complex_chat_flow.py b/src/packages/core/tests/data/complex_chat_flow.py
similarity index 100%
rename from src/lfx/tests/data/complex_chat_flow.py
rename to src/packages/core/tests/data/complex_chat_flow.py
diff --git a/src/backend/tests/data/complex_example.json b/src/packages/core/tests/data/complex_example.json
similarity index 100%
rename from src/backend/tests/data/complex_example.json
rename to src/packages/core/tests/data/complex_example.json
diff --git a/src/backend/tests/data/component.py b/src/packages/core/tests/data/component.py
similarity index 100%
rename from src/backend/tests/data/component.py
rename to src/packages/core/tests/data/component.py
diff --git a/src/backend/tests/data/component_multiple_outputs.py b/src/packages/core/tests/data/component_multiple_outputs.py
similarity index 100%
rename from src/backend/tests/data/component_multiple_outputs.py
rename to src/packages/core/tests/data/component_multiple_outputs.py
diff --git a/src/backend/tests/data/component_nested_call.py b/src/packages/core/tests/data/component_nested_call.py
similarity index 100%
rename from src/backend/tests/data/component_nested_call.py
rename to src/packages/core/tests/data/component_nested_call.py
diff --git a/src/backend/tests/data/component_with_templatefield.py b/src/packages/core/tests/data/component_with_templatefield.py
similarity index 100%
rename from src/backend/tests/data/component_with_templatefield.py
rename to src/packages/core/tests/data/component_with_templatefield.py
diff --git a/src/backend/tests/data/debug_incoming_24k.raw b/src/packages/core/tests/data/debug_incoming_24k.raw
similarity index 100%
rename from src/backend/tests/data/debug_incoming_24k.raw
rename to src/packages/core/tests/data/debug_incoming_24k.raw
diff --git a/src/backend/tests/data/dynamic_output_component.py b/src/packages/core/tests/data/dynamic_output_component.py
similarity index 100%
rename from src/backend/tests/data/dynamic_output_component.py
rename to src/packages/core/tests/data/dynamic_output_component.py
diff --git a/src/backend/tests/data/env_variable_test.json b/src/packages/core/tests/data/env_variable_test.json
similarity index 100%
rename from src/backend/tests/data/env_variable_test.json
rename to src/packages/core/tests/data/env_variable_test.json
diff --git a/src/backend/tests/data/grouped_chat.json b/src/packages/core/tests/data/grouped_chat.json
similarity index 100%
rename from src/backend/tests/data/grouped_chat.json
rename to src/packages/core/tests/data/grouped_chat.json
diff --git a/src/backend/tests/data/one_group_chat.json b/src/packages/core/tests/data/one_group_chat.json
similarity index 100%
rename from src/backend/tests/data/one_group_chat.json
rename to src/packages/core/tests/data/one_group_chat.json
diff --git a/src/lfx/tests/data/simple_chat_no_llm.json b/src/packages/core/tests/data/simple_chat_no_llm.json
similarity index 100%
rename from src/lfx/tests/data/simple_chat_no_llm.json
rename to src/packages/core/tests/data/simple_chat_no_llm.json
diff --git a/src/lfx/tests/data/simple_chat_no_llm.py b/src/packages/core/tests/data/simple_chat_no_llm.py
similarity index 100%
rename from src/lfx/tests/data/simple_chat_no_llm.py
rename to src/packages/core/tests/data/simple_chat_no_llm.py
diff --git a/src/backend/tests/data/vector_store_grouped.json b/src/packages/core/tests/data/vector_store_grouped.json
similarity index 100%
rename from src/backend/tests/data/vector_store_grouped.json
rename to src/packages/core/tests/data/vector_store_grouped.json
diff --git a/src/lfx/tests/unit/__init__.py b/src/packages/core/tests/unit/__init__.py
similarity index 100%
rename from src/lfx/tests/unit/__init__.py
rename to src/packages/core/tests/unit/__init__.py
diff --git a/src/lfx/tests/unit/cli/__init__.py b/src/packages/core/tests/unit/cli/__init__.py
similarity index 100%
rename from src/lfx/tests/unit/cli/__init__.py
rename to src/packages/core/tests/unit/cli/__init__.py
diff --git a/src/lfx/tests/unit/cli/test_common.py b/src/packages/core/tests/unit/cli/test_common.py
similarity index 100%
rename from src/lfx/tests/unit/cli/test_common.py
rename to src/packages/core/tests/unit/cli/test_common.py
diff --git a/src/lfx/tests/unit/cli/test_run_command.py b/src/packages/core/tests/unit/cli/test_run_command.py
similarity index 100%
rename from src/lfx/tests/unit/cli/test_run_command.py
rename to src/packages/core/tests/unit/cli/test_run_command.py
diff --git a/src/lfx/tests/unit/cli/test_run_real_flows.py b/src/packages/core/tests/unit/cli/test_run_real_flows.py
similarity index 100%
rename from src/lfx/tests/unit/cli/test_run_real_flows.py
rename to src/packages/core/tests/unit/cli/test_run_real_flows.py
diff --git a/src/lfx/tests/unit/cli/test_script_loader.py b/src/packages/core/tests/unit/cli/test_script_loader.py
similarity index 100%
rename from src/lfx/tests/unit/cli/test_script_loader.py
rename to src/packages/core/tests/unit/cli/test_script_loader.py
diff --git a/src/lfx/tests/unit/cli/test_serve.py b/src/packages/core/tests/unit/cli/test_serve.py
similarity index 100%
rename from src/lfx/tests/unit/cli/test_serve.py
rename to src/packages/core/tests/unit/cli/test_serve.py
diff --git a/src/lfx/tests/unit/cli/test_serve_app.py b/src/packages/core/tests/unit/cli/test_serve_app.py
similarity index 100%
rename from src/lfx/tests/unit/cli/test_serve_app.py
rename to src/packages/core/tests/unit/cli/test_serve_app.py
diff --git a/src/lfx/tests/unit/cli/test_serve_app_streaming.py b/src/packages/core/tests/unit/cli/test_serve_app_streaming.py
similarity index 100%
rename from src/lfx/tests/unit/cli/test_serve_app_streaming.py
rename to src/packages/core/tests/unit/cli/test_serve_app_streaming.py
diff --git a/src/lfx/tests/unit/cli/test_serve_components.py b/src/packages/core/tests/unit/cli/test_serve_components.py
similarity index 100%
rename from src/lfx/tests/unit/cli/test_serve_components.py
rename to src/packages/core/tests/unit/cli/test_serve_components.py
diff --git a/src/lfx/tests/unit/cli/test_serve_simple.py b/src/packages/core/tests/unit/cli/test_serve_simple.py
similarity index 100%
rename from src/lfx/tests/unit/cli/test_serve_simple.py
rename to src/packages/core/tests/unit/cli/test_serve_simple.py
diff --git a/src/lfx/tests/unit/cli/test_validation.py b/src/packages/core/tests/unit/cli/test_validation.py
similarity index 100%
rename from src/lfx/tests/unit/cli/test_validation.py
rename to src/packages/core/tests/unit/cli/test_validation.py
diff --git a/src/backend/tests/unit/components/languagemodels/__init__.py b/src/packages/core/tests/unit/custom/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/languagemodels/__init__.py
rename to src/packages/core/tests/unit/custom/__init__.py
diff --git a/src/backend/tests/unit/components/logic/__init__.py b/src/packages/core/tests/unit/custom/component/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/logic/__init__.py
rename to src/packages/core/tests/unit/custom/component/__init__.py
diff --git a/src/lfx/tests/unit/custom/component/test_component_instance_attributes.py b/src/packages/core/tests/unit/custom/component/test_component_instance_attributes.py
similarity index 100%
rename from src/lfx/tests/unit/custom/component/test_component_instance_attributes.py
rename to src/packages/core/tests/unit/custom/component/test_component_instance_attributes.py
diff --git a/src/lfx/tests/unit/custom/component/test_component_to_tool.py b/src/packages/core/tests/unit/custom/component/test_component_to_tool.py
similarity index 100%
rename from src/lfx/tests/unit/custom/component/test_component_to_tool.py
rename to src/packages/core/tests/unit/custom/component/test_component_to_tool.py
diff --git a/src/lfx/tests/unit/custom/component/test_componet_set_functionality.py b/src/packages/core/tests/unit/custom/component/test_componet_set_functionality.py
similarity index 100%
rename from src/lfx/tests/unit/custom/component/test_componet_set_functionality.py
rename to src/packages/core/tests/unit/custom/component/test_componet_set_functionality.py
diff --git a/src/lfx/tests/unit/custom/component/test_dynamic_imports.py b/src/packages/core/tests/unit/custom/component/test_dynamic_imports.py
similarity index 100%
rename from src/lfx/tests/unit/custom/component/test_dynamic_imports.py
rename to src/packages/core/tests/unit/custom/component/test_dynamic_imports.py
diff --git a/src/lfx/tests/unit/custom/component/test_validate.py b/src/packages/core/tests/unit/custom/component/test_validate.py
similarity index 100%
rename from src/lfx/tests/unit/custom/component/test_validate.py
rename to src/packages/core/tests/unit/custom/component/test_validate.py
diff --git a/src/backend/tests/unit/components/models/__init__.py b/src/packages/core/tests/unit/custom/custom_component/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/models/__init__.py
rename to src/packages/core/tests/unit/custom/custom_component/__init__.py
diff --git a/src/lfx/tests/unit/custom/custom_component/test_component.py b/src/packages/core/tests/unit/custom/custom_component/test_component.py
similarity index 100%
rename from src/lfx/tests/unit/custom/custom_component/test_component.py
rename to src/packages/core/tests/unit/custom/custom_component/test_component.py
diff --git a/src/lfx/tests/unit/custom/custom_component/test_component_events.py b/src/packages/core/tests/unit/custom/custom_component/test_component_events.py
similarity index 100%
rename from src/lfx/tests/unit/custom/custom_component/test_component_events.py
rename to src/packages/core/tests/unit/custom/custom_component/test_component_events.py
diff --git a/src/lfx/tests/unit/custom/custom_component/test_update_outputs.py b/src/packages/core/tests/unit/custom/custom_component/test_update_outputs.py
similarity index 100%
rename from src/lfx/tests/unit/custom/custom_component/test_update_outputs.py
rename to src/packages/core/tests/unit/custom/custom_component/test_update_outputs.py
diff --git a/src/lfx/tests/unit/custom/test_utils_metadata.py b/src/packages/core/tests/unit/custom/test_utils_metadata.py
similarity index 100%
rename from src/lfx/tests/unit/custom/test_utils_metadata.py
rename to src/packages/core/tests/unit/custom/test_utils_metadata.py
diff --git a/src/backend/tests/unit/components/outputs/__init__.py b/src/packages/core/tests/unit/events/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/outputs/__init__.py
rename to src/packages/core/tests/unit/events/__init__.py
diff --git a/src/lfx/tests/unit/events/test_event_manager.py b/src/packages/core/tests/unit/events/test_event_manager.py
similarity index 100%
rename from src/lfx/tests/unit/events/test_event_manager.py
rename to src/packages/core/tests/unit/events/test_event_manager.py
diff --git a/src/backend/tests/unit/components/processing/__init__.py b/src/packages/core/tests/unit/graph/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/processing/__init__.py
rename to src/packages/core/tests/unit/graph/__init__.py
diff --git a/src/backend/tests/unit/components/prompts/__init__.py b/src/packages/core/tests/unit/graph/edge/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/prompts/__init__.py
rename to src/packages/core/tests/unit/graph/edge/__init__.py
diff --git a/src/lfx/tests/unit/graph/edge/test_edge_base.py b/src/packages/core/tests/unit/graph/edge/test_edge_base.py
similarity index 100%
rename from src/lfx/tests/unit/graph/edge/test_edge_base.py
rename to src/packages/core/tests/unit/graph/edge/test_edge_base.py
diff --git a/src/backend/tests/unit/components/prototypes/__init__.py b/src/packages/core/tests/unit/graph/graph/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/prototypes/__init__.py
rename to src/packages/core/tests/unit/graph/graph/__init__.py
diff --git a/src/backend/tests/unit/components/search/__init__.py b/src/packages/core/tests/unit/graph/graph/state/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/search/__init__.py
rename to src/packages/core/tests/unit/graph/graph/state/__init__.py
diff --git a/src/lfx/tests/unit/graph/graph/state/test_state_model.py b/src/packages/core/tests/unit/graph/graph/state/test_state_model.py
similarity index 100%
rename from src/lfx/tests/unit/graph/graph/state/test_state_model.py
rename to src/packages/core/tests/unit/graph/graph/state/test_state_model.py
diff --git a/src/lfx/tests/unit/graph/graph/test_base.py b/src/packages/core/tests/unit/graph/graph/test_base.py
similarity index 100%
rename from src/lfx/tests/unit/graph/graph/test_base.py
rename to src/packages/core/tests/unit/graph/graph/test_base.py
diff --git a/src/lfx/tests/unit/graph/graph/test_callback_graph.py b/src/packages/core/tests/unit/graph/graph/test_callback_graph.py
similarity index 100%
rename from src/lfx/tests/unit/graph/graph/test_callback_graph.py
rename to src/packages/core/tests/unit/graph/graph/test_callback_graph.py
diff --git a/src/lfx/tests/unit/graph/graph/test_cycles.py b/src/packages/core/tests/unit/graph/graph/test_cycles.py
similarity index 100%
rename from src/lfx/tests/unit/graph/graph/test_cycles.py
rename to src/packages/core/tests/unit/graph/graph/test_cycles.py
diff --git a/src/lfx/tests/unit/graph/graph/test_graph_state_model.py b/src/packages/core/tests/unit/graph/graph/test_graph_state_model.py
similarity index 100%
rename from src/lfx/tests/unit/graph/graph/test_graph_state_model.py
rename to src/packages/core/tests/unit/graph/graph/test_graph_state_model.py
diff --git a/src/lfx/tests/unit/graph/graph/test_runnable_vertices_manager.py b/src/packages/core/tests/unit/graph/graph/test_runnable_vertices_manager.py
similarity index 100%
rename from src/lfx/tests/unit/graph/graph/test_runnable_vertices_manager.py
rename to src/packages/core/tests/unit/graph/graph/test_runnable_vertices_manager.py
diff --git a/src/lfx/tests/unit/graph/graph/test_utils.py b/src/packages/core/tests/unit/graph/graph/test_utils.py
similarity index 100%
rename from src/lfx/tests/unit/graph/graph/test_utils.py
rename to src/packages/core/tests/unit/graph/graph/test_utils.py
diff --git a/src/lfx/tests/unit/graph/test_graph.py b/src/packages/core/tests/unit/graph/test_graph.py
similarity index 100%
rename from src/lfx/tests/unit/graph/test_graph.py
rename to src/packages/core/tests/unit/graph/test_graph.py
diff --git a/src/backend/tests/unit/components/tools/__init__.py b/src/packages/core/tests/unit/graph/vertex/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/tools/__init__.py
rename to src/packages/core/tests/unit/graph/vertex/__init__.py
diff --git a/src/lfx/tests/unit/graph/vertex/test_vertex_base.py b/src/packages/core/tests/unit/graph/vertex/test_vertex_base.py
similarity index 100%
rename from src/lfx/tests/unit/graph/vertex/test_vertex_base.py
rename to src/packages/core/tests/unit/graph/vertex/test_vertex_base.py
diff --git a/src/backend/tests/unit/components/vectorstores/__init__.py b/src/packages/core/tests/unit/inputs/__init__.py
similarity index 100%
rename from src/backend/tests/unit/components/vectorstores/__init__.py
rename to src/packages/core/tests/unit/inputs/__init__.py
diff --git a/src/lfx/tests/unit/inputs/test_inputs_schema.py b/src/packages/core/tests/unit/inputs/test_inputs_schema.py
similarity index 100%
rename from src/lfx/tests/unit/inputs/test_inputs_schema.py
rename to src/packages/core/tests/unit/inputs/test_inputs_schema.py
diff --git a/src/backend/tests/unit/custom/__init__.py b/src/packages/core/tests/unit/memory/__init__.py
similarity index 100%
rename from src/backend/tests/unit/custom/__init__.py
rename to src/packages/core/tests/unit/memory/__init__.py
diff --git a/src/lfx/tests/unit/memory/test_memory.py b/src/packages/core/tests/unit/memory/test_memory.py
similarity index 100%
rename from src/lfx/tests/unit/memory/test_memory.py
rename to src/packages/core/tests/unit/memory/test_memory.py
diff --git a/src/backend/tests/unit/custom/component/__init__.py b/src/packages/core/tests/unit/schema/__init__.py
similarity index 100%
rename from src/backend/tests/unit/custom/component/__init__.py
rename to src/packages/core/tests/unit/schema/__init__.py
diff --git a/src/lfx/tests/unit/schema/test_content_block.py b/src/packages/core/tests/unit/schema/test_content_block.py
similarity index 100%
rename from src/lfx/tests/unit/schema/test_content_block.py
rename to src/packages/core/tests/unit/schema/test_content_block.py
diff --git a/src/lfx/tests/unit/schema/test_content_types.py b/src/packages/core/tests/unit/schema/test_content_types.py
similarity index 100%
rename from src/lfx/tests/unit/schema/test_content_types.py
rename to src/packages/core/tests/unit/schema/test_content_types.py
diff --git a/src/lfx/tests/unit/schema/test_dotdict.py b/src/packages/core/tests/unit/schema/test_dotdict.py
similarity index 100%
rename from src/lfx/tests/unit/schema/test_dotdict.py
rename to src/packages/core/tests/unit/schema/test_dotdict.py
diff --git a/src/lfx/tests/unit/schema/test_image.py b/src/packages/core/tests/unit/schema/test_image.py
similarity index 100%
rename from src/lfx/tests/unit/schema/test_image.py
rename to src/packages/core/tests/unit/schema/test_image.py
diff --git a/src/lfx/tests/unit/schema/test_schema_data.py b/src/packages/core/tests/unit/schema/test_schema_data.py
similarity index 100%
rename from src/lfx/tests/unit/schema/test_schema_data.py
rename to src/packages/core/tests/unit/schema/test_schema_data.py
diff --git a/src/lfx/tests/unit/schema/test_schema_data_set.py b/src/packages/core/tests/unit/schema/test_schema_data_set.py
similarity index 100%
rename from src/lfx/tests/unit/schema/test_schema_data_set.py
rename to src/packages/core/tests/unit/schema/test_schema_data_set.py
diff --git a/src/lfx/tests/unit/schema/test_schema_dataframe.py b/src/packages/core/tests/unit/schema/test_schema_dataframe.py
similarity index 100%
rename from src/lfx/tests/unit/schema/test_schema_dataframe.py
rename to src/packages/core/tests/unit/schema/test_schema_dataframe.py
diff --git a/src/lfx/tests/unit/schema/test_schema_message.py b/src/packages/core/tests/unit/schema/test_schema_message.py
similarity index 100%
rename from src/lfx/tests/unit/schema/test_schema_message.py
rename to src/packages/core/tests/unit/schema/test_schema_message.py
diff --git a/src/lfx/tests/unit/schema/test_table.py b/src/packages/core/tests/unit/schema/test_table.py
similarity index 100%
rename from src/lfx/tests/unit/schema/test_table.py
rename to src/packages/core/tests/unit/schema/test_table.py
diff --git a/src/lfx/tests/unit/test_data_class.py b/src/packages/core/tests/unit/test_data_class.py
similarity index 100%
rename from src/lfx/tests/unit/test_data_class.py
rename to src/packages/core/tests/unit/test_data_class.py
diff --git a/src/lfx/tests/unit/test_import_utils.py b/src/packages/core/tests/unit/test_import_utils.py
similarity index 100%
rename from src/lfx/tests/unit/test_import_utils.py
rename to src/packages/core/tests/unit/test_import_utils.py
diff --git a/src/packages/core/uv.lock b/src/packages/core/uv.lock
new file mode 100644
index 000000000000..5ba74124c72a
--- /dev/null
+++ b/src/packages/core/uv.lock
@@ -0,0 +1,1598 @@
+version = 1
+revision = 3
+requires-python = ">=3.10, <3.14"
+resolution-markers = [
+ "python_full_version >= '3.12'",
+ "python_full_version == '3.11.*'",
+ "python_full_version < '3.11'",
+]
+
+[[package]]
+name = "aiofile"
+version = "3.9.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "caio" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/67/e2/d7cb819de8df6b5c1968a2756c3cb4122d4fa2b8fc768b53b7c9e5edb646/aiofile-3.9.0.tar.gz", hash = "sha256:e5ad718bb148b265b6df1b3752c4d1d83024b93da9bd599df74b9d9ffcf7919b", size = 17943, upload-time = "2024-10-08T10:39:35.846Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/50/25/da1f0b4dd970e52bf5a36c204c107e11a0c6d3ed195eba0bfbc664c312b2/aiofile-3.9.0-py3-none-any.whl", hash = "sha256:ce2f6c1571538cbdfa0143b04e16b208ecb0e9cb4148e528af8a640ed51cc8aa", size = 19539, upload-time = "2024-10-08T10:39:32.955Z" },
+]
+
+[[package]]
+name = "aiofiles"
+version = "24.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0b/03/a88171e277e8caa88a4c77808c20ebb04ba74cc4681bf1e9416c862de237/aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c", size = 30247, upload-time = "2024-06-24T11:02:03.584Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a5/45/30bb92d442636f570cb5651bc661f52b610e2eec3f891a5dc3a4c3667db0/aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5", size = 15896, upload-time = "2024-06-24T11:02:01.529Z" },
+]
+
+[[package]]
+name = "annotated-types"
+version = "0.7.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" },
+]
+
+[[package]]
+name = "anyio"
+version = "4.10.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "exceptiongroup", marker = "python_full_version < '3.11'" },
+ { name = "idna" },
+ { name = "sniffio" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f1/b4/636b3b65173d3ce9a38ef5f0522789614e590dab6a8d505340a4efe4c567/anyio-4.10.0.tar.gz", hash = "sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6", size = 213252, upload-time = "2025-08-04T08:54:26.451Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6f/12/e5e0282d673bb9746bacfb6e2dba8719989d3660cdb2ea79aee9a9651afb/anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1", size = 107213, upload-time = "2025-08-04T08:54:24.882Z" },
+]
+
+[[package]]
+name = "asgi-lifespan"
+version = "2.1.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "sniffio" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/6a/da/e7908b54e0f8043725a990bf625f2041ecf6bfe8eb7b19407f1c00b630f7/asgi-lifespan-2.1.0.tar.gz", hash = "sha256:5e2effaf0bfe39829cf2d64e7ecc47c7d86d676a6599f7afba378c31f5e3a308", size = 15627, upload-time = "2023-03-28T17:35:49.126Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2f/f5/c36551e93acba41a59939ae6a0fb77ddb3f2e8e8caa716410c65f7341f72/asgi_lifespan-2.1.0-py3-none-any.whl", hash = "sha256:ed840706680e28428c01e14afb3875d7d76d3206f3d5b2f2294e059b5c23804f", size = 10895, upload-time = "2023-03-28T17:35:47.772Z" },
+]
+
+[[package]]
+name = "asyncer"
+version = "0.0.8"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "anyio" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/ff/67/7ea59c3e69eaeee42e7fc91a5be67ca5849c8979acac2b920249760c6af2/asyncer-0.0.8.tar.gz", hash = "sha256:a589d980f57e20efb07ed91d0dbe67f1d2fd343e7142c66d3a099f05c620739c", size = 18217, upload-time = "2024-08-24T23:15:36.449Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/8a/04/15b6ca6b7842eda2748bda0a0af73f2d054e9344320f8bba01f994294bcb/asyncer-0.0.8-py3-none-any.whl", hash = "sha256:5920d48fc99c8f8f0f1576e1882f5022885589c5fcbc46ce4224ec3e53776eeb", size = 9209, upload-time = "2024-08-24T23:15:35.317Z" },
+]
+
+[[package]]
+name = "attrs"
+version = "25.3.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" },
+]
+
+[[package]]
+name = "backports-asyncio-runner"
+version = "1.2.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/8e/ff/70dca7d7cb1cbc0edb2c6cc0c38b65cba36cccc491eca64cabd5fe7f8670/backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162", size = 69893, upload-time = "2025-07-02T02:27:15.685Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" },
+]
+
+[[package]]
+name = "blockbuster"
+version = "1.5.25"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "forbiddenfruit", marker = "implementation_name == 'cpython'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/7f/bc/57c49465decaeeedd58ce2d970b4cdfd93a74ba9993abff2dc498a31c283/blockbuster-1.5.25.tar.gz", hash = "sha256:b72f1d2aefdeecd2a820ddf1e1c8593bf00b96e9fdc4cd2199ebafd06f7cb8f0", size = 36058, upload-time = "2025-07-14T16:00:20.766Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/0b/01/dccc277c014f171f61a6047bb22c684e16c7f2db6bb5c8cce1feaf41ec55/blockbuster-1.5.25-py3-none-any.whl", hash = "sha256:cb06229762273e0f5f3accdaed3d2c5a3b61b055e38843de202311ede21bb0f5", size = 13196, upload-time = "2025-07-14T16:00:19.396Z" },
+]
+
+[[package]]
+name = "cachetools"
+version = "6.2.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/9d/61/e4fad8155db4a04bfb4734c7c8ff0882f078f24294d42798b3568eb63bff/cachetools-6.2.0.tar.gz", hash = "sha256:38b328c0889450f05f5e120f56ab68c8abaf424e1275522b138ffc93253f7e32", size = 30988, upload-time = "2025-08-25T18:57:30.924Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6c/56/3124f61d37a7a4e7cc96afc5492c78ba0cb551151e530b54669ddd1436ef/cachetools-6.2.0-py3-none-any.whl", hash = "sha256:1c76a8960c0041fcc21097e357f882197c79da0dbff766e7317890a65d7d8ba6", size = 11276, upload-time = "2025-08-25T18:57:29.684Z" },
+]
+
+[[package]]
+name = "caio"
+version = "0.9.24"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/98/04/ec9b6864135032fd454f6cd1d9444e0bb01040196ad0cd776c061fc92c6b/caio-0.9.24.tar.gz", hash = "sha256:5bcdecaea02a9aa8e3acf0364eff8ad9903d57d70cdb274a42270126290a77f1", size = 27174, upload-time = "2025-04-23T16:31:19.191Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/5c/59/62e1fe2f11790d04cf6c54d1872444eab70ae4bad948277ed9f8532a7dcd/caio-0.9.24-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d80322126a97ba572412b17b2f086ff95195de2c4261deb19db6bfcdc9ef7540", size = 42066, upload-time = "2025-04-23T16:31:01.306Z" },
+ { url = "https://files.pythonhosted.org/packages/66/fb/134f5014937c454571c2510685ace79c5c1bb399446b3d2acd21e85930fc/caio-0.9.24-cp310-cp310-manylinux_2_34_aarch64.whl", hash = "sha256:37bc172349686139e8dc97fff7662c67b1837e18a67b99e8ef25585f2893d013", size = 79534, upload-time = "2025-04-23T16:31:03.111Z" },
+ { url = "https://files.pythonhosted.org/packages/85/dc/222f6c525f8e23850315ea82ad3ca01721ef9628d63daf98a3b6736efa75/caio-0.9.24-cp310-cp310-manylinux_2_34_x86_64.whl", hash = "sha256:ad7f0902bf952237e120606252c14ab3cb05995c9f79f39154b5248744864832", size = 77712, upload-time = "2025-04-23T16:31:04.468Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/9d/4f9f58ef6b708e0bf67c6af0c1b3d21d4b1b6dc1a4c2d741793cf4ac8e5d/caio-0.9.24-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:925b9e3748ce1a79386dfb921c0aee450e43225534551abd1398b1c08f9ba29f", size = 42073, upload-time = "2025-04-23T16:31:05.853Z" },
+ { url = "https://files.pythonhosted.org/packages/57/89/6e6830c4920f47c0aabffd920893777595893eef9577a965e7511566a214/caio-0.9.24-cp311-cp311-manylinux_2_34_aarch64.whl", hash = "sha256:3b4dc0a8fb9a58ab40f967ad5a8a858cc0bfb2348a580b4142595849457f9c9a", size = 80116, upload-time = "2025-04-23T16:31:06.671Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/58/25e43b2a46a802da39efa6d5e98a8dd9e2b92ec997d6c2ea1de216bf3f35/caio-0.9.24-cp311-cp311-manylinux_2_34_x86_64.whl", hash = "sha256:fa74d111b3b165bfad2e333367976bdf118bcf505a1cb44d3bcddea2849e3297", size = 78274, upload-time = "2025-04-23T16:31:07.553Z" },
+ { url = "https://files.pythonhosted.org/packages/5f/76/b33a89dc2516aae045ef509cf2febe7ffb2a36c4eebb8f301a7ef2093385/caio-0.9.24-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7ae3566228383175265a7583107f21a7cb044a752ea29ba84fce7c1a49a05903", size = 42212, upload-time = "2025-04-23T16:31:08.457Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/8c/cb62483e69309bbad503c2ace29c4ac3466558a20e9aed840d313e1dcacd/caio-0.9.24-cp312-cp312-manylinux_2_34_aarch64.whl", hash = "sha256:a306b0dda91cb4ca3170f066c114597f8ea41b3da578574a9d2b54f86963de68", size = 81517, upload-time = "2025-04-23T16:31:09.686Z" },
+ { url = "https://files.pythonhosted.org/packages/64/80/8a8cdfd4b47e06d1e9de6d5431c2603e0741282fa06f757f10c04e619d8f/caio-0.9.24-cp312-cp312-manylinux_2_34_x86_64.whl", hash = "sha256:8ee158e56128d865fb7d57a9c9c22fca4e8aa8d8664859c977a36fff3ccb3609", size = 80216, upload-time = "2025-04-23T16:31:10.98Z" },
+ { url = "https://files.pythonhosted.org/packages/66/35/06e77837fc5455d330c5502460fc3743989d4ff840b61aa79af3a7ec5b19/caio-0.9.24-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1d47ef8d76aca74c17cb07339a441c5530fc4b8dd9222dfb1e1abd7f9f9b814f", size = 42214, upload-time = "2025-04-23T16:31:12.272Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/e2/c16aeaea4b2103e04fdc2e7088ede6313e1971704c87fcd681b58ab1c6b4/caio-0.9.24-cp313-cp313-manylinux_2_34_aarch64.whl", hash = "sha256:d15fc746c4bf0077d75df05939d1e97c07ccaa8e580681a77021d6929f65d9f4", size = 81557, upload-time = "2025-04-23T16:31:13.526Z" },
+ { url = "https://files.pythonhosted.org/packages/78/3b/adeb0cffe98dbe60661f316ec0060037a5209a5ed8be38ac8e79fdbc856d/caio-0.9.24-cp313-cp313-manylinux_2_34_x86_64.whl", hash = "sha256:9368eae0a9badd5f31264896c51b47431d96c0d46f1979018fb1d20c49f56156", size = 80242, upload-time = "2025-04-23T16:31:14.365Z" },
+]
+
+[[package]]
+name = "certifi"
+version = "2025.8.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/dc/67/960ebe6bf230a96cda2e0abcf73af550ec4f090005363542f0765df162e0/certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407", size = 162386, upload-time = "2025-08-03T03:07:47.08Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/48/1549795ba7742c948d2ad169c1c8cdbae65bc450d6cd753d124b17c8cd32/certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5", size = 161216, upload-time = "2025-08-03T03:07:45.777Z" },
+]
+
+[[package]]
+name = "chardet"
+version = "5.2.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/f7b6ab21ec75897ed80c17d79b15951a719226b9fababf1e40ea74d69079/chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7", size = 2069618, upload-time = "2023-08-01T19:23:02.662Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/38/6f/f5fbc992a329ee4e0f288c1fe0e2ad9485ed064cac731ed2fe47dcc38cbf/chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970", size = 199385, upload-time = "2023-08-01T19:23:00.661Z" },
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.4.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/83/2d/5fd176ceb9b2fc619e63405525573493ca23441330fcdaee6bef9460e924/charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14", size = 122371, upload-time = "2025-08-09T07:57:28.46Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d6/98/f3b8013223728a99b908c9344da3aa04ee6e3fa235f19409033eda92fb78/charset_normalizer-3.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72", size = 207695, upload-time = "2025-08-09T07:55:36.452Z" },
+ { url = "https://files.pythonhosted.org/packages/21/40/5188be1e3118c82dcb7c2a5ba101b783822cfb413a0268ed3be0468532de/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe", size = 147153, upload-time = "2025-08-09T07:55:38.467Z" },
+ { url = "https://files.pythonhosted.org/packages/37/60/5d0d74bc1e1380f0b72c327948d9c2aca14b46a9efd87604e724260f384c/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601", size = 160428, upload-time = "2025-08-09T07:55:40.072Z" },
+ { url = "https://files.pythonhosted.org/packages/85/9a/d891f63722d9158688de58d050c59dc3da560ea7f04f4c53e769de5140f5/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c", size = 157627, upload-time = "2025-08-09T07:55:41.706Z" },
+ { url = "https://files.pythonhosted.org/packages/65/1a/7425c952944a6521a9cfa7e675343f83fd82085b8af2b1373a2409c683dc/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2", size = 152388, upload-time = "2025-08-09T07:55:43.262Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/c9/a2c9c2a355a8594ce2446085e2ec97fd44d323c684ff32042e2a6b718e1d/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0", size = 150077, upload-time = "2025-08-09T07:55:44.903Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/38/20a1f44e4851aa1c9105d6e7110c9d020e093dfa5836d712a5f074a12bf7/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0", size = 161631, upload-time = "2025-08-09T07:55:46.346Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/fa/384d2c0f57edad03d7bec3ebefb462090d8905b4ff5a2d2525f3bb711fac/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0", size = 159210, upload-time = "2025-08-09T07:55:47.539Z" },
+ { url = "https://files.pythonhosted.org/packages/33/9e/eca49d35867ca2db336b6ca27617deed4653b97ebf45dfc21311ce473c37/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a", size = 153739, upload-time = "2025-08-09T07:55:48.744Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/91/26c3036e62dfe8de8061182d33be5025e2424002125c9500faff74a6735e/charset_normalizer-3.4.3-cp310-cp310-win32.whl", hash = "sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f", size = 99825, upload-time = "2025-08-09T07:55:50.305Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/c6/f05db471f81af1fa01839d44ae2a8bfeec8d2a8b4590f16c4e7393afd323/charset_normalizer-3.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669", size = 107452, upload-time = "2025-08-09T07:55:51.461Z" },
+ { url = "https://files.pythonhosted.org/packages/7f/b5/991245018615474a60965a7c9cd2b4efbaabd16d582a5547c47ee1c7730b/charset_normalizer-3.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b", size = 204483, upload-time = "2025-08-09T07:55:53.12Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/2a/ae245c41c06299ec18262825c1569c5d3298fc920e4ddf56ab011b417efd/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64", size = 145520, upload-time = "2025-08-09T07:55:54.712Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/a4/b3b6c76e7a635748c4421d2b92c7b8f90a432f98bda5082049af37ffc8e3/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91", size = 158876, upload-time = "2025-08-09T07:55:56.024Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/e6/63bb0e10f90a8243c5def74b5b105b3bbbfb3e7bb753915fe333fb0c11ea/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f", size = 156083, upload-time = "2025-08-09T07:55:57.582Z" },
+ { url = "https://files.pythonhosted.org/packages/87/df/b7737ff046c974b183ea9aa111b74185ac8c3a326c6262d413bd5a1b8c69/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07", size = 150295, upload-time = "2025-08-09T07:55:59.147Z" },
+ { url = "https://files.pythonhosted.org/packages/61/f1/190d9977e0084d3f1dc169acd060d479bbbc71b90bf3e7bf7b9927dec3eb/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30", size = 148379, upload-time = "2025-08-09T07:56:00.364Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/92/27dbe365d34c68cfe0ca76f1edd70e8705d82b378cb54ebbaeabc2e3029d/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14", size = 160018, upload-time = "2025-08-09T07:56:01.678Z" },
+ { url = "https://files.pythonhosted.org/packages/99/04/baae2a1ea1893a01635d475b9261c889a18fd48393634b6270827869fa34/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c", size = 157430, upload-time = "2025-08-09T07:56:02.87Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/36/77da9c6a328c54d17b960c89eccacfab8271fdaaa228305330915b88afa9/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae", size = 151600, upload-time = "2025-08-09T07:56:04.089Z" },
+ { url = "https://files.pythonhosted.org/packages/64/d4/9eb4ff2c167edbbf08cdd28e19078bf195762e9bd63371689cab5ecd3d0d/charset_normalizer-3.4.3-cp311-cp311-win32.whl", hash = "sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849", size = 99616, upload-time = "2025-08-09T07:56:05.658Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/9c/996a4a028222e7761a96634d1820de8a744ff4327a00ada9c8942033089b/charset_normalizer-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c", size = 107108, upload-time = "2025-08-09T07:56:07.176Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/5e/14c94999e418d9b87682734589404a25854d5f5d0408df68bc15b6ff54bb/charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1", size = 205655, upload-time = "2025-08-09T07:56:08.475Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/a8/c6ec5d389672521f644505a257f50544c074cf5fc292d5390331cd6fc9c3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884", size = 146223, upload-time = "2025-08-09T07:56:09.708Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/eb/a2ffb08547f4e1e5415fb69eb7db25932c52a52bed371429648db4d84fb1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018", size = 159366, upload-time = "2025-08-09T07:56:11.326Z" },
+ { url = "https://files.pythonhosted.org/packages/82/10/0fd19f20c624b278dddaf83b8464dcddc2456cb4b02bb902a6da126b87a1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392", size = 157104, upload-time = "2025-08-09T07:56:13.014Z" },
+ { url = "https://files.pythonhosted.org/packages/16/ab/0233c3231af734f5dfcf0844aa9582d5a1466c985bbed6cedab85af9bfe3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f", size = 151830, upload-time = "2025-08-09T07:56:14.428Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/02/e29e22b4e02839a0e4a06557b1999d0a47db3567e82989b5bb21f3fbbd9f/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154", size = 148854, upload-time = "2025-08-09T07:56:16.051Z" },
+ { url = "https://files.pythonhosted.org/packages/05/6b/e2539a0a4be302b481e8cafb5af8792da8093b486885a1ae4d15d452bcec/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491", size = 160670, upload-time = "2025-08-09T07:56:17.314Z" },
+ { url = "https://files.pythonhosted.org/packages/31/e7/883ee5676a2ef217a40ce0bffcc3d0dfbf9e64cbcfbdf822c52981c3304b/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93", size = 158501, upload-time = "2025-08-09T07:56:18.641Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/35/6525b21aa0db614cf8b5792d232021dca3df7f90a1944db934efa5d20bb1/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f", size = 153173, upload-time = "2025-08-09T07:56:20.289Z" },
+ { url = "https://files.pythonhosted.org/packages/50/ee/f4704bad8201de513fdc8aac1cabc87e38c5818c93857140e06e772b5892/charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37", size = 99822, upload-time = "2025-08-09T07:56:21.551Z" },
+ { url = "https://files.pythonhosted.org/packages/39/f5/3b3836ca6064d0992c58c7561c6b6eee1b3892e9665d650c803bd5614522/charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc", size = 107543, upload-time = "2025-08-09T07:56:23.115Z" },
+ { url = "https://files.pythonhosted.org/packages/65/ca/2135ac97709b400c7654b4b764daf5c5567c2da45a30cdd20f9eefe2d658/charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe", size = 205326, upload-time = "2025-08-09T07:56:24.721Z" },
+ { url = "https://files.pythonhosted.org/packages/71/11/98a04c3c97dd34e49c7d247083af03645ca3730809a5509443f3c37f7c99/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8", size = 146008, upload-time = "2025-08-09T07:56:26.004Z" },
+ { url = "https://files.pythonhosted.org/packages/60/f5/4659a4cb3c4ec146bec80c32d8bb16033752574c20b1252ee842a95d1a1e/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9", size = 159196, upload-time = "2025-08-09T07:56:27.25Z" },
+ { url = "https://files.pythonhosted.org/packages/86/9e/f552f7a00611f168b9a5865a1414179b2c6de8235a4fa40189f6f79a1753/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31", size = 156819, upload-time = "2025-08-09T07:56:28.515Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/95/42aa2156235cbc8fa61208aded06ef46111c4d3f0de233107b3f38631803/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f", size = 151350, upload-time = "2025-08-09T07:56:29.716Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/a9/3865b02c56f300a6f94fc631ef54f0a8a29da74fb45a773dfd3dcd380af7/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927", size = 148644, upload-time = "2025-08-09T07:56:30.984Z" },
+ { url = "https://files.pythonhosted.org/packages/77/d9/cbcf1a2a5c7d7856f11e7ac2d782aec12bdfea60d104e60e0aa1c97849dc/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9", size = 160468, upload-time = "2025-08-09T07:56:32.252Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/42/6f45efee8697b89fda4d50580f292b8f7f9306cb2971d4b53f8914e4d890/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5", size = 158187, upload-time = "2025-08-09T07:56:33.481Z" },
+ { url = "https://files.pythonhosted.org/packages/70/99/f1c3bdcfaa9c45b3ce96f70b14f070411366fa19549c1d4832c935d8e2c3/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc", size = 152699, upload-time = "2025-08-09T07:56:34.739Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/ad/b0081f2f99a4b194bcbb1934ef3b12aa4d9702ced80a37026b7607c72e58/charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce", size = 99580, upload-time = "2025-08-09T07:56:35.981Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/8f/ae790790c7b64f925e5c953b924aaa42a243fb778fed9e41f147b2a5715a/charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef", size = 107366, upload-time = "2025-08-09T07:56:37.339Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/1f/f041989e93b001bc4e44bb1669ccdcf54d3f00e628229a85b08d330615c5/charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a", size = 53175, upload-time = "2025-08-09T07:57:26.864Z" },
+]
+
+[[package]]
+name = "click"
+version = "8.2.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" },
+]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
+]
+
+[[package]]
+name = "coverage"
+version = "7.10.6"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/14/70/025b179c993f019105b79575ac6edb5e084fb0f0e63f15cdebef4e454fb5/coverage-7.10.6.tar.gz", hash = "sha256:f644a3ae5933a552a29dbb9aa2f90c677a875f80ebea028e5a52a4f429044b90", size = 823736, upload-time = "2025-08-29T15:35:16.668Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a8/1d/2e64b43d978b5bd184e0756a41415597dfef30fcbd90b747474bd749d45f/coverage-7.10.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70e7bfbd57126b5554aa482691145f798d7df77489a177a6bef80de78860a356", size = 217025, upload-time = "2025-08-29T15:32:57.169Z" },
+ { url = "https://files.pythonhosted.org/packages/23/62/b1e0f513417c02cc10ef735c3ee5186df55f190f70498b3702d516aad06f/coverage-7.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e41be6f0f19da64af13403e52f2dec38bbc2937af54df8ecef10850ff8d35301", size = 217419, upload-time = "2025-08-29T15:32:59.908Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/16/b800640b7a43e7c538429e4d7223e0a94fd72453a1a048f70bf766f12e96/coverage-7.10.6-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c61fc91ab80b23f5fddbee342d19662f3d3328173229caded831aa0bd7595460", size = 244180, upload-time = "2025-08-29T15:33:01.608Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/6f/5e03631c3305cad187eaf76af0b559fff88af9a0b0c180d006fb02413d7a/coverage-7.10.6-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10356fdd33a7cc06e8051413140bbdc6f972137508a3572e3f59f805cd2832fd", size = 245992, upload-time = "2025-08-29T15:33:03.239Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/a1/f30ea0fb400b080730125b490771ec62b3375789f90af0bb68bfb8a921d7/coverage-7.10.6-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:80b1695cf7c5ebe7b44bf2521221b9bb8cdf69b1f24231149a7e3eb1ae5fa2fb", size = 247851, upload-time = "2025-08-29T15:33:04.603Z" },
+ { url = "https://files.pythonhosted.org/packages/02/8e/cfa8fee8e8ef9a6bb76c7bef039f3302f44e615d2194161a21d3d83ac2e9/coverage-7.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2e4c33e6378b9d52d3454bd08847a8651f4ed23ddbb4a0520227bd346382bbc6", size = 245891, upload-time = "2025-08-29T15:33:06.176Z" },
+ { url = "https://files.pythonhosted.org/packages/93/a9/51be09b75c55c4f6c16d8d73a6a1d46ad764acca0eab48fa2ffaef5958fe/coverage-7.10.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c8a3ec16e34ef980a46f60dc6ad86ec60f763c3f2fa0db6d261e6e754f72e945", size = 243909, upload-time = "2025-08-29T15:33:07.74Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/a6/ba188b376529ce36483b2d585ca7bdac64aacbe5aa10da5978029a9c94db/coverage-7.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7d79dabc0a56f5af990cc6da9ad1e40766e82773c075f09cc571e2076fef882e", size = 244786, upload-time = "2025-08-29T15:33:08.965Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/4c/37ed872374a21813e0d3215256180c9a382c3f5ced6f2e5da0102fc2fd3e/coverage-7.10.6-cp310-cp310-win32.whl", hash = "sha256:86b9b59f2b16e981906e9d6383eb6446d5b46c278460ae2c36487667717eccf1", size = 219521, upload-time = "2025-08-29T15:33:10.599Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/36/9311352fdc551dec5b973b61f4e453227ce482985a9368305880af4f85dd/coverage-7.10.6-cp310-cp310-win_amd64.whl", hash = "sha256:e132b9152749bd33534e5bd8565c7576f135f157b4029b975e15ee184325f528", size = 220417, upload-time = "2025-08-29T15:33:11.907Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/16/2bea27e212c4980753d6d563a0803c150edeaaddb0771a50d2afc410a261/coverage-7.10.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c706db3cabb7ceef779de68270150665e710b46d56372455cd741184f3868d8f", size = 217129, upload-time = "2025-08-29T15:33:13.575Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/51/e7159e068831ab37e31aac0969d47b8c5ee25b7d307b51e310ec34869315/coverage-7.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e0c38dc289e0508ef68ec95834cb5d2e96fdbe792eaccaa1bccac3966bbadcc", size = 217532, upload-time = "2025-08-29T15:33:14.872Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/c0/246ccbea53d6099325d25cd208df94ea435cd55f0db38099dd721efc7a1f/coverage-7.10.6-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:752a3005a1ded28f2f3a6e8787e24f28d6abe176ca64677bcd8d53d6fe2ec08a", size = 247931, upload-time = "2025-08-29T15:33:16.142Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/fb/7435ef8ab9b2594a6e3f58505cc30e98ae8b33265d844007737946c59389/coverage-7.10.6-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:689920ecfd60f992cafca4f5477d55720466ad2c7fa29bb56ac8d44a1ac2b47a", size = 249864, upload-time = "2025-08-29T15:33:17.434Z" },
+ { url = "https://files.pythonhosted.org/packages/51/f8/d9d64e8da7bcddb094d511154824038833c81e3a039020a9d6539bf303e9/coverage-7.10.6-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ec98435796d2624d6905820a42f82149ee9fc4f2d45c2c5bc5a44481cc50db62", size = 251969, upload-time = "2025-08-29T15:33:18.822Z" },
+ { url = "https://files.pythonhosted.org/packages/43/28/c43ba0ef19f446d6463c751315140d8f2a521e04c3e79e5c5fe211bfa430/coverage-7.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b37201ce4a458c7a758ecc4efa92fa8ed783c66e0fa3c42ae19fc454a0792153", size = 249659, upload-time = "2025-08-29T15:33:20.407Z" },
+ { url = "https://files.pythonhosted.org/packages/79/3e/53635bd0b72beaacf265784508a0b386defc9ab7fad99ff95f79ce9db555/coverage-7.10.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:2904271c80898663c810a6b067920a61dd8d38341244a3605bd31ab55250dad5", size = 247714, upload-time = "2025-08-29T15:33:21.751Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/55/0964aa87126624e8c159e32b0bc4e84edef78c89a1a4b924d28dd8265625/coverage-7.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5aea98383463d6e1fa4e95416d8de66f2d0cb588774ee20ae1b28df826bcb619", size = 248351, upload-time = "2025-08-29T15:33:23.105Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/ab/6cfa9dc518c6c8e14a691c54e53a9433ba67336c760607e299bfcf520cb1/coverage-7.10.6-cp311-cp311-win32.whl", hash = "sha256:e3fb1fa01d3598002777dd259c0c2e6d9d5e10e7222976fc8e03992f972a2cba", size = 219562, upload-time = "2025-08-29T15:33:24.717Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/18/99b25346690cbc55922e7cfef06d755d4abee803ef335baff0014268eff4/coverage-7.10.6-cp311-cp311-win_amd64.whl", hash = "sha256:f35ed9d945bece26553d5b4c8630453169672bea0050a564456eb88bdffd927e", size = 220453, upload-time = "2025-08-29T15:33:26.482Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/ed/81d86648a07ccb124a5cf1f1a7788712b8d7216b593562683cd5c9b0d2c1/coverage-7.10.6-cp311-cp311-win_arm64.whl", hash = "sha256:99e1a305c7765631d74b98bf7dbf54eeea931f975e80f115437d23848ee8c27c", size = 219127, upload-time = "2025-08-29T15:33:27.777Z" },
+ { url = "https://files.pythonhosted.org/packages/26/06/263f3305c97ad78aab066d116b52250dd316e74fcc20c197b61e07eb391a/coverage-7.10.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5b2dd6059938063a2c9fee1af729d4f2af28fd1a545e9b7652861f0d752ebcea", size = 217324, upload-time = "2025-08-29T15:33:29.06Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/60/1e1ded9a4fe80d843d7d53b3e395c1db3ff32d6c301e501f393b2e6c1c1f/coverage-7.10.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:388d80e56191bf846c485c14ae2bc8898aa3124d9d35903fef7d907780477634", size = 217560, upload-time = "2025-08-29T15:33:30.748Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/25/52136173c14e26dfed8b106ed725811bb53c30b896d04d28d74cb64318b3/coverage-7.10.6-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:90cb5b1a4670662719591aa92d0095bb41714970c0b065b02a2610172dbf0af6", size = 249053, upload-time = "2025-08-29T15:33:32.041Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/1d/ae25a7dc58fcce8b172d42ffe5313fc267afe61c97fa872b80ee72d9515a/coverage-7.10.6-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:961834e2f2b863a0e14260a9a273aff07ff7818ab6e66d2addf5628590c628f9", size = 251802, upload-time = "2025-08-29T15:33:33.625Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/7a/1f561d47743710fe996957ed7c124b421320f150f1d38523d8d9102d3e2a/coverage-7.10.6-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf9a19f5012dab774628491659646335b1928cfc931bf8d97b0d5918dd58033c", size = 252935, upload-time = "2025-08-29T15:33:34.909Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/ad/8b97cd5d28aecdfde792dcbf646bac141167a5cacae2cd775998b45fabb5/coverage-7.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:99c4283e2a0e147b9c9cc6bc9c96124de9419d6044837e9799763a0e29a7321a", size = 250855, upload-time = "2025-08-29T15:33:36.922Z" },
+ { url = "https://files.pythonhosted.org/packages/33/6a/95c32b558d9a61858ff9d79580d3877df3eb5bc9eed0941b1f187c89e143/coverage-7.10.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:282b1b20f45df57cc508c1e033403f02283adfb67d4c9c35a90281d81e5c52c5", size = 248974, upload-time = "2025-08-29T15:33:38.175Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/9c/8ce95dee640a38e760d5b747c10913e7a06554704d60b41e73fdea6a1ffd/coverage-7.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8cdbe264f11afd69841bd8c0d83ca10b5b32853263ee62e6ac6a0ab63895f972", size = 250409, upload-time = "2025-08-29T15:33:39.447Z" },
+ { url = "https://files.pythonhosted.org/packages/04/12/7a55b0bdde78a98e2eb2356771fd2dcddb96579e8342bb52aa5bc52e96f0/coverage-7.10.6-cp312-cp312-win32.whl", hash = "sha256:a517feaf3a0a3eca1ee985d8373135cfdedfbba3882a5eab4362bda7c7cf518d", size = 219724, upload-time = "2025-08-29T15:33:41.172Z" },
+ { url = "https://files.pythonhosted.org/packages/36/4a/32b185b8b8e327802c9efce3d3108d2fe2d9d31f153a0f7ecfd59c773705/coverage-7.10.6-cp312-cp312-win_amd64.whl", hash = "sha256:856986eadf41f52b214176d894a7de05331117f6035a28ac0016c0f63d887629", size = 220536, upload-time = "2025-08-29T15:33:42.524Z" },
+ { url = "https://files.pythonhosted.org/packages/08/3a/d5d8dc703e4998038c3099eaf77adddb00536a3cec08c8dcd556a36a3eb4/coverage-7.10.6-cp312-cp312-win_arm64.whl", hash = "sha256:acf36b8268785aad739443fa2780c16260ee3fa09d12b3a70f772ef100939d80", size = 219171, upload-time = "2025-08-29T15:33:43.974Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/e7/917e5953ea29a28c1057729c1d5af9084ab6d9c66217523fd0e10f14d8f6/coverage-7.10.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ffea0575345e9ee0144dfe5701aa17f3ba546f8c3bb48db62ae101afb740e7d6", size = 217351, upload-time = "2025-08-29T15:33:45.438Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/86/2e161b93a4f11d0ea93f9bebb6a53f113d5d6e416d7561ca41bb0a29996b/coverage-7.10.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:95d91d7317cde40a1c249d6b7382750b7e6d86fad9d8eaf4fa3f8f44cf171e80", size = 217600, upload-time = "2025-08-29T15:33:47.269Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/66/d03348fdd8df262b3a7fb4ee5727e6e4936e39e2f3a842e803196946f200/coverage-7.10.6-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3e23dd5408fe71a356b41baa82892772a4cefcf758f2ca3383d2aa39e1b7a003", size = 248600, upload-time = "2025-08-29T15:33:48.953Z" },
+ { url = "https://files.pythonhosted.org/packages/73/dd/508420fb47d09d904d962f123221bc249f64b5e56aa93d5f5f7603be475f/coverage-7.10.6-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0f3f56e4cb573755e96a16501a98bf211f100463d70275759e73f3cbc00d4f27", size = 251206, upload-time = "2025-08-29T15:33:50.697Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/1f/9020135734184f439da85c70ea78194c2730e56c2d18aee6e8ff1719d50d/coverage-7.10.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:db4a1d897bbbe7339946ffa2fe60c10cc81c43fab8b062d3fcb84188688174a4", size = 252478, upload-time = "2025-08-29T15:33:52.303Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/a4/3d228f3942bb5a2051fde28c136eea23a761177dc4ff4ef54533164ce255/coverage-7.10.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d8fd7879082953c156d5b13c74aa6cca37f6a6f4747b39538504c3f9c63d043d", size = 250637, upload-time = "2025-08-29T15:33:53.67Z" },
+ { url = "https://files.pythonhosted.org/packages/36/e3/293dce8cdb9a83de971637afc59b7190faad60603b40e32635cbd15fbf61/coverage-7.10.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:28395ca3f71cd103b8c116333fa9db867f3a3e1ad6a084aa3725ae002b6583bc", size = 248529, upload-time = "2025-08-29T15:33:55.022Z" },
+ { url = "https://files.pythonhosted.org/packages/90/26/64eecfa214e80dd1d101e420cab2901827de0e49631d666543d0e53cf597/coverage-7.10.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:61c950fc33d29c91b9e18540e1aed7d9f6787cc870a3e4032493bbbe641d12fc", size = 250143, upload-time = "2025-08-29T15:33:56.386Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/70/bd80588338f65ea5b0d97e424b820fb4068b9cfb9597fbd91963086e004b/coverage-7.10.6-cp313-cp313-win32.whl", hash = "sha256:160c00a5e6b6bdf4e5984b0ef21fc860bc94416c41b7df4d63f536d17c38902e", size = 219770, upload-time = "2025-08-29T15:33:58.063Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/14/0b831122305abcc1060c008f6c97bbdc0a913ab47d65070a01dc50293c2b/coverage-7.10.6-cp313-cp313-win_amd64.whl", hash = "sha256:628055297f3e2aa181464c3808402887643405573eb3d9de060d81531fa79d32", size = 220566, upload-time = "2025-08-29T15:33:59.766Z" },
+ { url = "https://files.pythonhosted.org/packages/83/c6/81a83778c1f83f1a4a168ed6673eeedc205afb562d8500175292ca64b94e/coverage-7.10.6-cp313-cp313-win_arm64.whl", hash = "sha256:df4ec1f8540b0bcbe26ca7dd0f541847cc8a108b35596f9f91f59f0c060bfdd2", size = 219195, upload-time = "2025-08-29T15:34:01.191Z" },
+ { url = "https://files.pythonhosted.org/packages/d7/1c/ccccf4bf116f9517275fa85047495515add43e41dfe8e0bef6e333c6b344/coverage-7.10.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c9a8b7a34a4de3ed987f636f71881cd3b8339f61118b1aa311fbda12741bff0b", size = 218059, upload-time = "2025-08-29T15:34:02.91Z" },
+ { url = "https://files.pythonhosted.org/packages/92/97/8a3ceff833d27c7492af4f39d5da6761e9ff624831db9e9f25b3886ddbca/coverage-7.10.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8dd5af36092430c2b075cee966719898f2ae87b636cefb85a653f1d0ba5d5393", size = 218287, upload-time = "2025-08-29T15:34:05.106Z" },
+ { url = "https://files.pythonhosted.org/packages/92/d8/50b4a32580cf41ff0423777a2791aaf3269ab60c840b62009aec12d3970d/coverage-7.10.6-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b0353b0f0850d49ada66fdd7d0c7cdb0f86b900bb9e367024fd14a60cecc1e27", size = 259625, upload-time = "2025-08-29T15:34:06.575Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/7e/6a7df5a6fb440a0179d94a348eb6616ed4745e7df26bf2a02bc4db72c421/coverage-7.10.6-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d6b9ae13d5d3e8aeca9ca94198aa7b3ebbc5acfada557d724f2a1f03d2c0b0df", size = 261801, upload-time = "2025-08-29T15:34:08.006Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/4c/a270a414f4ed5d196b9d3d67922968e768cd971d1b251e1b4f75e9362f75/coverage-7.10.6-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:675824a363cc05781b1527b39dc2587b8984965834a748177ee3c37b64ffeafb", size = 264027, upload-time = "2025-08-29T15:34:09.806Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/8b/3210d663d594926c12f373c5370bf1e7c5c3a427519a8afa65b561b9a55c/coverage-7.10.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:692d70ea725f471a547c305f0d0fc6a73480c62fb0da726370c088ab21aed282", size = 261576, upload-time = "2025-08-29T15:34:11.585Z" },
+ { url = "https://files.pythonhosted.org/packages/72/d0/e1961eff67e9e1dba3fc5eb7a4caf726b35a5b03776892da8d79ec895775/coverage-7.10.6-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:851430a9a361c7a8484a36126d1d0ff8d529d97385eacc8dfdc9bfc8c2d2cbe4", size = 259341, upload-time = "2025-08-29T15:34:13.159Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/06/d6478d152cd189b33eac691cba27a40704990ba95de49771285f34a5861e/coverage-7.10.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d9369a23186d189b2fc95cc08b8160ba242057e887d766864f7adf3c46b2df21", size = 260468, upload-time = "2025-08-29T15:34:14.571Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/73/737440247c914a332f0b47f7598535b29965bf305e19bbc22d4c39615d2b/coverage-7.10.6-cp313-cp313t-win32.whl", hash = "sha256:92be86fcb125e9bda0da7806afd29a3fd33fdf58fba5d60318399adf40bf37d0", size = 220429, upload-time = "2025-08-29T15:34:16.394Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/76/b92d3214740f2357ef4a27c75a526eb6c28f79c402e9f20a922c295c05e2/coverage-7.10.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6b3039e2ca459a70c79523d39347d83b73f2f06af5624905eba7ec34d64d80b5", size = 221493, upload-time = "2025-08-29T15:34:17.835Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/8e/6dcb29c599c8a1f654ec6cb68d76644fe635513af16e932d2d4ad1e5ac6e/coverage-7.10.6-cp313-cp313t-win_arm64.whl", hash = "sha256:3fb99d0786fe17b228eab663d16bee2288e8724d26a199c29325aac4b0319b9b", size = 219757, upload-time = "2025-08-29T15:34:19.248Z" },
+ { url = "https://files.pythonhosted.org/packages/44/0c/50db5379b615854b5cf89146f8f5bd1d5a9693d7f3a987e269693521c404/coverage-7.10.6-py3-none-any.whl", hash = "sha256:92c4ecf6bf11b2e85fd4d8204814dc26e6a19f0c9d938c207c5cb0eadfcabbe3", size = 208986, upload-time = "2025-08-29T15:35:14.506Z" },
+]
+
+[[package]]
+name = "defusedxml"
+version = "0.7.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0f/d5/c66da9b79e5bdb124974bfe172b4daf3c984ebd9c2a06e2b8a4dc7331c72/defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69", size = 75520, upload-time = "2021-03-08T10:59:26.269Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/07/6c/aa3f2f849e01cb6a001cd8554a88d4c77c5c1a31c95bdf1cf9301e6d9ef4/defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61", size = 25604, upload-time = "2021-03-08T10:59:24.45Z" },
+]
+
+[[package]]
+name = "docstring-parser"
+version = "0.17.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b2/9d/c3b43da9515bd270df0f80548d9944e389870713cc1fe2b8fb35fe2bcefd/docstring_parser-0.17.0.tar.gz", hash = "sha256:583de4a309722b3315439bb31d64ba3eebada841f2e2cee23b99df001434c912", size = 27442, upload-time = "2025-07-21T07:35:01.868Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" },
+]
+
+[[package]]
+name = "emoji"
+version = "2.14.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/cb/7d/01cddcbb6f5cc0ba72e00ddf9b1fa206c802d557fd0a20b18e130edf1336/emoji-2.14.1.tar.gz", hash = "sha256:f8c50043d79a2c1410ebfae833ae1868d5941a67a6cd4d18377e2eb0bd79346b", size = 597182, upload-time = "2025-01-16T06:31:24.983Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/91/db/a0335710caaa6d0aebdaa65ad4df789c15d89b7babd9a30277838a7d9aac/emoji-2.14.1-py3-none-any.whl", hash = "sha256:35a8a486c1460addb1499e3bf7929d3889b2e2841a57401903699fef595e942b", size = 590617, upload-time = "2025-01-16T06:31:23.526Z" },
+]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.3.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749, upload-time = "2025-05-10T17:42:51.123Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" },
+]
+
+[[package]]
+name = "fastapi"
+version = "0.116.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pydantic" },
+ { name = "starlette" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/78/d7/6c8b3bfe33eeffa208183ec037fee0cce9f7f024089ab1c5d12ef04bd27c/fastapi-0.116.1.tar.gz", hash = "sha256:ed52cbf946abfd70c5a0dccb24673f0670deeb517a88b3544d03c2a6bf283143", size = 296485, upload-time = "2025-07-11T16:22:32.057Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/47/d63c60f59a59467fda0f93f46335c9d18526d7071f025cb5b89d5353ea42/fastapi-0.116.1-py3-none-any.whl", hash = "sha256:c46ac7c312df840f0c9e220f7964bada936781bc4e2e6eb71f1c4d7553786565", size = 95631, upload-time = "2025-07-11T16:22:30.485Z" },
+]
+
+[[package]]
+name = "forbiddenfruit"
+version = "0.1.4"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e6/79/d4f20e91327c98096d605646bdc6a5ffedae820f38d378d3515c42ec5e60/forbiddenfruit-0.1.4.tar.gz", hash = "sha256:e3f7e66561a29ae129aac139a85d610dbf3dd896128187ed5454b6421f624253", size = 43756, upload-time = "2021-01-16T21:03:35.401Z" }
+
+[[package]]
+name = "h11"
+version = "0.16.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
+]
+
+[[package]]
+name = "h2"
+version = "4.3.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "hpack" },
+ { name = "hyperframe" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/1d/17/afa56379f94ad0fe8defd37d6eb3f89a25404ffc71d4d848893d270325fc/h2-4.3.0.tar.gz", hash = "sha256:6c59efe4323fa18b47a632221a1888bd7fde6249819beda254aeca909f221bf1", size = 2152026, upload-time = "2025-08-23T18:12:19.778Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/69/b2/119f6e6dcbd96f9069ce9a2665e0146588dc9f88f29549711853645e736a/h2-4.3.0-py3-none-any.whl", hash = "sha256:c438f029a25f7945c69e0ccf0fb951dc3f73a5f6412981daee861431b70e2bdd", size = 61779, upload-time = "2025-08-23T18:12:17.779Z" },
+]
+
+[[package]]
+name = "hpack"
+version = "4.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/2c/48/71de9ed269fdae9c8057e5a4c0aa7402e8bb16f2c6e90b3aa53327b113f8/hpack-4.1.0.tar.gz", hash = "sha256:ec5eca154f7056aa06f196a557655c5b009b382873ac8d1e66e79e87535f1dca", size = 51276, upload-time = "2025-01-22T21:44:58.347Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/07/c6/80c95b1b2b94682a72cbdbfb85b81ae2daffa4291fbfa1b1464502ede10d/hpack-4.1.0-py3-none-any.whl", hash = "sha256:157ac792668d995c657d93111f46b4535ed114f0c9c8d672271bbec7eae1b496", size = 34357, upload-time = "2025-01-22T21:44:56.92Z" },
+]
+
+[[package]]
+name = "httpcore"
+version = "1.0.9"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "certifi" },
+ { name = "h11" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" },
+]
+
+[[package]]
+name = "httpx"
+version = "0.28.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "anyio" },
+ { name = "certifi" },
+ { name = "httpcore" },
+ { name = "idna" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
+]
+
+[package.optional-dependencies]
+http2 = [
+ { name = "h2" },
+]
+
+[[package]]
+name = "hyperframe"
+version = "6.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/02/e7/94f8232d4a74cc99514c13a9f995811485a6903d48e5d952771ef6322e30/hyperframe-6.1.0.tar.gz", hash = "sha256:f630908a00854a7adeabd6382b43923a4c4cd4b821fcb527e6ab9e15382a3b08", size = 26566, upload-time = "2025-01-22T21:41:49.302Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/48/30/47d0bf6072f7252e6521f3447ccfa40b421b6824517f82854703d0f5a98b/hyperframe-6.1.0-py3-none-any.whl", hash = "sha256:b03380493a519fce58ea5af42e4a42317bf9bd425596f7a0835ffce80f1a42e5", size = 13007, upload-time = "2025-01-22T21:41:47.295Z" },
+]
+
+[[package]]
+name = "hypothesis"
+version = "6.138.15"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "attrs" },
+ { name = "exceptiongroup", marker = "python_full_version < '3.11'" },
+ { name = "sortedcontainers" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/3b/68/adc338edec178cf6c08b4843ea2b2d639d47bed4b06ea9331433b71acc0a/hypothesis-6.138.15.tar.gz", hash = "sha256:6b0e1aa182eacde87110995a3543530d69ef411f642162a656efcd46c2823ad1", size = 466116, upload-time = "2025-09-08T05:34:15.956Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/39/49/911eb0cd17884a7a6f510e78acf0a70592e414d194695a0c7c1db91645b2/hypothesis-6.138.15-py3-none-any.whl", hash = "sha256:b7cf743d461c319eb251a13c8e1dcf00f4ef7085e4ab5bf5abf102b2a5ffd694", size = 533621, upload-time = "2025-09-08T05:34:12.272Z" },
+]
+
+[[package]]
+name = "idna"
+version = "3.10"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" },
+]
+
+[[package]]
+name = "iniconfig"
+version = "2.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" },
+]
+
+[[package]]
+name = "json-repair"
+version = "0.50.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/91/71/6d57ed93e43e98cdd124e82ab6231c6817f06a10743e7ae4bc6f66d03a02/json_repair-0.50.1.tar.gz", hash = "sha256:4ee69bc4be7330fbb90a3f19e890852c5fe1ceacec5ed1d2c25cdeeebdfaec76", size = 34864, upload-time = "2025-09-06T05:43:34.331Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ad/be/b1e05740d9c6f333dab67910f3894e2e2416c1ef00f9f7e20a327ab1f396/json_repair-0.50.1-py3-none-any.whl", hash = "sha256:9b78358bb7572a6e0b8effe7a8bd8cb959a3e311144842b1d2363fe39e2f13c5", size = 26020, upload-time = "2025-09-06T05:43:32.718Z" },
+]
+
+[[package]]
+name = "jsonpatch"
+version = "1.33"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jsonpointer" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/42/78/18813351fe5d63acad16aec57f94ec2b70a09e53ca98145589e185423873/jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c", size = 21699, upload-time = "2023-06-26T12:07:29.144Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/73/07/02e16ed01e04a374e644b575638ec7987ae846d25ad97bcc9945a3ee4b0e/jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade", size = 12898, upload-time = "2023-06-16T21:01:28.466Z" },
+]
+
+[[package]]
+name = "jsonpointer"
+version = "3.0.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/6a/0a/eebeb1fa92507ea94016a2a790b93c2ae41a7e18778f85471dc54475ed25/jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef", size = 9114, upload-time = "2024-06-10T19:24:42.462Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" },
+]
+
+[[package]]
+name = "langchain-core"
+version = "0.3.76"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jsonpatch" },
+ { name = "langsmith" },
+ { name = "packaging" },
+ { name = "pydantic" },
+ { name = "pyyaml" },
+ { name = "tenacity" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/4f/4d/5e2ea7754ee0a1f524c412801c6ba9ad49318ecb58b0d524903c3d9efe0a/langchain_core-0.3.76.tar.gz", hash = "sha256:71136a122dd1abae2c289c5809d035cf12b5f2bb682d8a4c1078cd94feae7419", size = 573568, upload-time = "2025-09-10T14:49:39.863Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/77/b5/501c0ffcb09c734457ceaa86bc7b1dd37b6a261147bd653add03b838aacb/langchain_core-0.3.76-py3-none-any.whl", hash = "sha256:46e0eb48c7ac532432d51f8ca1ece1804c82afe9ae3dcf027b867edadf82b3ec", size = 447508, upload-time = "2025-09-10T14:49:38.179Z" },
+]
+
+[[package]]
+name = "langsmith"
+version = "0.4.27"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "httpx" },
+ { name = "orjson", marker = "platform_python_implementation != 'PyPy'" },
+ { name = "packaging" },
+ { name = "pydantic" },
+ { name = "requests" },
+ { name = "requests-toolbelt" },
+ { name = "zstandard" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/62/6f/7d88228b7614fa0204e58b8b8c46e6f564659ee07a525c8aeae77a05598a/langsmith-0.4.27.tar.gz", hash = "sha256:6e8bbc425797202952d4e849431e6276e7985b44536ec0582eb96eaf9129c393", size = 956062, upload-time = "2025-09-08T19:01:49.677Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2d/26/99bc52e1c47fb4b995aece85a5313349a5e2559e4143ee2345d8bd1446ff/langsmith-0.4.27-py3-none-any.whl", hash = "sha256:23708e6478d1c74ac0e428bbc92df6704993e34305fb62a0c64d2fefc35bd67f", size = 384752, upload-time = "2025-09-08T19:01:47.362Z" },
+]
+
+[[package]]
+name = "lfx"
+version = "0.1.12"
+source = { editable = "." }
+dependencies = [
+ { name = "aiofile" },
+ { name = "aiofiles" },
+ { name = "asyncer" },
+ { name = "cachetools" },
+ { name = "chardet" },
+ { name = "defusedxml" },
+ { name = "docstring-parser" },
+ { name = "emoji" },
+ { name = "fastapi" },
+ { name = "httpx", extra = ["http2"] },
+ { name = "json-repair" },
+ { name = "langchain-core" },
+ { name = "loguru" },
+ { name = "nanoid" },
+ { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
+ { name = "networkx", version = "3.5", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
+ { name = "orjson" },
+ { name = "pandas" },
+ { name = "passlib" },
+ { name = "pillow" },
+ { name = "platformdirs" },
+ { name = "pydantic" },
+ { name = "pydantic-settings" },
+ { name = "python-dotenv" },
+ { name = "rich" },
+ { name = "structlog" },
+ { name = "tomli" },
+ { name = "typer" },
+ { name = "typing-extensions" },
+ { name = "uvicorn" },
+]
+
+[package.dev-dependencies]
+dev = [
+ { name = "asgi-lifespan" },
+ { name = "blockbuster" },
+ { name = "coverage" },
+ { name = "hypothesis" },
+ { name = "pytest" },
+ { name = "pytest-asyncio" },
+ { name = "ruff" },
+]
+
+[package.metadata]
+requires-dist = [
+ { name = "aiofile", specifier = ">=3.8.0" },
+ { name = "aiofiles", specifier = ">=24.1.0" },
+ { name = "asyncer", specifier = ">=0.0.8" },
+ { name = "cachetools", specifier = ">=5.5.2" },
+ { name = "chardet", specifier = ">=5.2.0" },
+ { name = "defusedxml", specifier = ">=0.7.1" },
+ { name = "docstring-parser", specifier = ">=0.16" },
+ { name = "emoji", specifier = ">=2.14.1" },
+ { name = "fastapi", specifier = ">=0.115.13" },
+ { name = "httpx", extras = ["http2"], specifier = ">=0.24.0" },
+ { name = "json-repair", specifier = ">=0.30.3" },
+ { name = "langchain-core", specifier = ">=0.3.66" },
+ { name = "loguru", specifier = ">=0.7.3" },
+ { name = "nanoid", specifier = ">=2.0.0" },
+ { name = "networkx", specifier = ">=3.4.2" },
+ { name = "orjson", specifier = ">=3.10.15" },
+ { name = "pandas", specifier = ">=2.0.0" },
+ { name = "passlib", specifier = ">=1.7.4" },
+ { name = "pillow", specifier = ">=10.0.0" },
+ { name = "platformdirs", specifier = ">=4.3.8" },
+ { name = "pydantic", specifier = ">=2.0.0" },
+ { name = "pydantic-settings", specifier = ">=2.10.1" },
+ { name = "python-dotenv", specifier = ">=1.0.0" },
+ { name = "rich", specifier = ">=13.0.0" },
+ { name = "structlog" },
+ { name = "tomli", specifier = ">=2.2.1" },
+ { name = "typer", specifier = ">=0.16.0" },
+ { name = "typing-extensions", specifier = ">=4.14.0" },
+ { name = "uvicorn", specifier = ">=0.34.3" },
+]
+
+[package.metadata.requires-dev]
+dev = [
+ { name = "asgi-lifespan", specifier = ">=2.1.0" },
+ { name = "blockbuster", specifier = ">=1.5.25" },
+ { name = "coverage", specifier = ">=7.9.2" },
+ { name = "hypothesis", specifier = ">=6.136.3" },
+ { name = "pytest", specifier = ">=8.4.1" },
+ { name = "pytest-asyncio", specifier = ">=0.26.0" },
+ { name = "ruff", specifier = ">=0.9.10" },
+]
+
+[[package]]
+name = "loguru"
+version = "0.7.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+ { name = "win32-setctime", marker = "sys_platform == 'win32'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559, upload-time = "2024-12-06T11:20:56.608Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595, upload-time = "2024-12-06T11:20:54.538Z" },
+]
+
+[[package]]
+name = "markdown-it-py"
+version = "4.0.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "mdurl" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" },
+]
+
+[[package]]
+name = "mdurl"
+version = "0.1.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" },
+]
+
+[[package]]
+name = "nanoid"
+version = "2.0.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b7/9d/0250bf5935d88e214df469d35eccc0f6ff7e9db046fc8a9aeb4b2a192775/nanoid-2.0.0.tar.gz", hash = "sha256:5a80cad5e9c6e9ae3a41fa2fb34ae189f7cb420b2a5d8f82bd9d23466e4efa68", size = 3290, upload-time = "2018-11-20T14:45:51.578Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2e/0d/8630f13998638dc01e187fadd2e5c6d42d127d08aeb4943d231664d6e539/nanoid-2.0.0-py3-none-any.whl", hash = "sha256:90aefa650e328cffb0893bbd4c236cfd44c48bc1f2d0b525ecc53c3187b653bb", size = 5844, upload-time = "2018-11-20T14:45:50.165Z" },
+]
+
+[[package]]
+name = "networkx"
+version = "3.4.2"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version < '3.11'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fd/1d/06475e1cd5264c0b870ea2cc6fdb3e37177c1e565c43f56ff17a10e3937f/networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1", size = 2151368, upload-time = "2024-10-21T12:39:38.695Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f", size = 1723263, upload-time = "2024-10-21T12:39:36.247Z" },
+]
+
+[[package]]
+name = "networkx"
+version = "3.5"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.12'",
+ "python_full_version == '3.11.*'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/6c/4f/ccdb8ad3a38e583f214547fd2f7ff1fc160c43a75af88e6aec213404b96a/networkx-3.5.tar.gz", hash = "sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037", size = 2471065, upload-time = "2025-05-29T11:35:07.804Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/eb/8d/776adee7bbf76365fdd7f2552710282c79a4ead5d2a46408c9043a2b70ba/networkx-3.5-py3-none-any.whl", hash = "sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec", size = 2034406, upload-time = "2025-05-29T11:35:04.961Z" },
+]
+
+[[package]]
+name = "numpy"
+version = "2.2.6"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version < '3.11'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/76/21/7d2a95e4bba9dc13d043ee156a356c0a8f0c6309dff6b21b4d71a073b8a8/numpy-2.2.6.tar.gz", hash = "sha256:e29554e2bef54a90aa5cc07da6ce955accb83f21ab5de01a62c8478897b264fd", size = 20276440, upload-time = "2025-05-17T22:38:04.611Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9a/3e/ed6db5be21ce87955c0cbd3009f2803f59fa08df21b5df06862e2d8e2bdd/numpy-2.2.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb", size = 21165245, upload-time = "2025-05-17T21:27:58.555Z" },
+ { url = "https://files.pythonhosted.org/packages/22/c2/4b9221495b2a132cc9d2eb862e21d42a009f5a60e45fc44b00118c174bff/numpy-2.2.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90", size = 14360048, upload-time = "2025-05-17T21:28:21.406Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/77/dc2fcfc66943c6410e2bf598062f5959372735ffda175b39906d54f02349/numpy-2.2.6-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:37e990a01ae6ec7fe7fa1c26c55ecb672dd98b19c3d0e1d1f326fa13cb38d163", size = 5340542, upload-time = "2025-05-17T21:28:30.931Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/4f/1cb5fdc353a5f5cc7feb692db9b8ec2c3d6405453f982435efc52561df58/numpy-2.2.6-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:5a6429d4be8ca66d889b7cf70f536a397dc45ba6faeb5f8c5427935d9592e9cf", size = 6878301, upload-time = "2025-05-17T21:28:41.613Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/17/96a3acd228cec142fcb8723bd3cc39c2a474f7dcf0a5d16731980bcafa95/numpy-2.2.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efd28d4e9cd7d7a8d39074a4d44c63eda73401580c5c76acda2ce969e0a38e83", size = 14297320, upload-time = "2025-05-17T21:29:02.78Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/63/3de6a34ad7ad6646ac7d2f55ebc6ad439dbbf9c4370017c50cf403fb19b5/numpy-2.2.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc7b73d02efb0e18c000e9ad8b83480dfcd5dfd11065997ed4c6747470ae8915", size = 16801050, upload-time = "2025-05-17T21:29:27.675Z" },
+ { url = "https://files.pythonhosted.org/packages/07/b6/89d837eddef52b3d0cec5c6ba0456c1bf1b9ef6a6672fc2b7873c3ec4e2e/numpy-2.2.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:74d4531beb257d2c3f4b261bfb0fc09e0f9ebb8842d82a7b4209415896adc680", size = 15807034, upload-time = "2025-05-17T21:29:51.102Z" },
+ { url = "https://files.pythonhosted.org/packages/01/c8/dc6ae86e3c61cfec1f178e5c9f7858584049b6093f843bca541f94120920/numpy-2.2.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8fc377d995680230e83241d8a96def29f204b5782f371c532579b4f20607a289", size = 18614185, upload-time = "2025-05-17T21:30:18.703Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/c5/0064b1b7e7c89137b471ccec1fd2282fceaae0ab3a9550f2568782d80357/numpy-2.2.6-cp310-cp310-win32.whl", hash = "sha256:b093dd74e50a8cba3e873868d9e93a85b78e0daf2e98c6797566ad8044e8363d", size = 6527149, upload-time = "2025-05-17T21:30:29.788Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/dd/4b822569d6b96c39d1215dbae0582fd99954dcbcf0c1a13c61783feaca3f/numpy-2.2.6-cp310-cp310-win_amd64.whl", hash = "sha256:f0fd6321b839904e15c46e0d257fdd101dd7f530fe03fd6359c1ea63738703f3", size = 12904620, upload-time = "2025-05-17T21:30:48.994Z" },
+ { url = "https://files.pythonhosted.org/packages/da/a8/4f83e2aa666a9fbf56d6118faaaf5f1974d456b1823fda0a176eff722839/numpy-2.2.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f9f1adb22318e121c5c69a09142811a201ef17ab257a1e66ca3025065b7f53ae", size = 21176963, upload-time = "2025-05-17T21:31:19.36Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/2b/64e1affc7972decb74c9e29e5649fac940514910960ba25cd9af4488b66c/numpy-2.2.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c820a93b0255bc360f53eca31a0e676fd1101f673dda8da93454a12e23fc5f7a", size = 14406743, upload-time = "2025-05-17T21:31:41.087Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/9f/0121e375000b5e50ffdd8b25bf78d8e1a5aa4cca3f185d41265198c7b834/numpy-2.2.6-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3d70692235e759f260c3d837193090014aebdf026dfd167834bcba43e30c2a42", size = 5352616, upload-time = "2025-05-17T21:31:50.072Z" },
+ { url = "https://files.pythonhosted.org/packages/31/0d/b48c405c91693635fbe2dcd7bc84a33a602add5f63286e024d3b6741411c/numpy-2.2.6-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:481b49095335f8eed42e39e8041327c05b0f6f4780488f61286ed3c01368d491", size = 6889579, upload-time = "2025-05-17T21:32:01.712Z" },
+ { url = "https://files.pythonhosted.org/packages/52/b8/7f0554d49b565d0171eab6e99001846882000883998e7b7d9f0d98b1f934/numpy-2.2.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b64d8d4d17135e00c8e346e0a738deb17e754230d7e0810ac5012750bbd85a5a", size = 14312005, upload-time = "2025-05-17T21:32:23.332Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/dd/2238b898e51bd6d389b7389ffb20d7f4c10066d80351187ec8e303a5a475/numpy-2.2.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba10f8411898fc418a521833e014a77d3ca01c15b0c6cdcce6a0d2897e6dbbdf", size = 16821570, upload-time = "2025-05-17T21:32:47.991Z" },
+ { url = "https://files.pythonhosted.org/packages/83/6c/44d0325722cf644f191042bf47eedad61c1e6df2432ed65cbe28509d404e/numpy-2.2.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bd48227a919f1bafbdda0583705e547892342c26fb127219d60a5c36882609d1", size = 15818548, upload-time = "2025-05-17T21:33:11.728Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/9d/81e8216030ce66be25279098789b665d49ff19eef08bfa8cb96d4957f422/numpy-2.2.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9551a499bf125c1d4f9e250377c1ee2eddd02e01eac6644c080162c0c51778ab", size = 18620521, upload-time = "2025-05-17T21:33:39.139Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/fd/e19617b9530b031db51b0926eed5345ce8ddc669bb3bc0044b23e275ebe8/numpy-2.2.6-cp311-cp311-win32.whl", hash = "sha256:0678000bb9ac1475cd454c6b8c799206af8107e310843532b04d49649c717a47", size = 6525866, upload-time = "2025-05-17T21:33:50.273Z" },
+ { url = "https://files.pythonhosted.org/packages/31/0a/f354fb7176b81747d870f7991dc763e157a934c717b67b58456bc63da3df/numpy-2.2.6-cp311-cp311-win_amd64.whl", hash = "sha256:e8213002e427c69c45a52bbd94163084025f533a55a59d6f9c5b820774ef3303", size = 12907455, upload-time = "2025-05-17T21:34:09.135Z" },
+ { url = "https://files.pythonhosted.org/packages/82/5d/c00588b6cf18e1da539b45d3598d3557084990dcc4331960c15ee776ee41/numpy-2.2.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41c5a21f4a04fa86436124d388f6ed60a9343a6f767fced1a8a71c3fbca038ff", size = 20875348, upload-time = "2025-05-17T21:34:39.648Z" },
+ { url = "https://files.pythonhosted.org/packages/66/ee/560deadcdde6c2f90200450d5938f63a34b37e27ebff162810f716f6a230/numpy-2.2.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de749064336d37e340f640b05f24e9e3dd678c57318c7289d222a8a2f543e90c", size = 14119362, upload-time = "2025-05-17T21:35:01.241Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/65/4baa99f1c53b30adf0acd9a5519078871ddde8d2339dc5a7fde80d9d87da/numpy-2.2.6-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:894b3a42502226a1cac872f840030665f33326fc3dac8e57c607905773cdcde3", size = 5084103, upload-time = "2025-05-17T21:35:10.622Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/89/e5a34c071a0570cc40c9a54eb472d113eea6d002e9ae12bb3a8407fb912e/numpy-2.2.6-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:71594f7c51a18e728451bb50cc60a3ce4e6538822731b2933209a1f3614e9282", size = 6625382, upload-time = "2025-05-17T21:35:21.414Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/35/8c80729f1ff76b3921d5c9487c7ac3de9b2a103b1cd05e905b3090513510/numpy-2.2.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2618db89be1b4e05f7a1a847a9c1c0abd63e63a1607d892dd54668dd92faf87", size = 14018462, upload-time = "2025-05-17T21:35:42.174Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/3d/1e1db36cfd41f895d266b103df00ca5b3cbe965184df824dec5c08c6b803/numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd83c01228a688733f1ded5201c678f0c53ecc1006ffbc404db9f7a899ac6249", size = 16527618, upload-time = "2025-05-17T21:36:06.711Z" },
+ { url = "https://files.pythonhosted.org/packages/61/c6/03ed30992602c85aa3cd95b9070a514f8b3c33e31124694438d88809ae36/numpy-2.2.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:37c0ca431f82cd5fa716eca9506aefcabc247fb27ba69c5062a6d3ade8cf8f49", size = 15505511, upload-time = "2025-05-17T21:36:29.965Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/25/5761d832a81df431e260719ec45de696414266613c9ee268394dd5ad8236/numpy-2.2.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fe27749d33bb772c80dcd84ae7e8df2adc920ae8297400dabec45f0dedb3f6de", size = 18313783, upload-time = "2025-05-17T21:36:56.883Z" },
+ { url = "https://files.pythonhosted.org/packages/57/0a/72d5a3527c5ebffcd47bde9162c39fae1f90138c961e5296491ce778e682/numpy-2.2.6-cp312-cp312-win32.whl", hash = "sha256:4eeaae00d789f66c7a25ac5f34b71a7035bb474e679f410e5e1a94deb24cf2d4", size = 6246506, upload-time = "2025-05-17T21:37:07.368Z" },
+ { url = "https://files.pythonhosted.org/packages/36/fa/8c9210162ca1b88529ab76b41ba02d433fd54fecaf6feb70ef9f124683f1/numpy-2.2.6-cp312-cp312-win_amd64.whl", hash = "sha256:c1f9540be57940698ed329904db803cf7a402f3fc200bfe599334c9bd84a40b2", size = 12614190, upload-time = "2025-05-17T21:37:26.213Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/5c/6657823f4f594f72b5471f1db1ab12e26e890bb2e41897522d134d2a3e81/numpy-2.2.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0811bb762109d9708cca4d0b13c4f67146e3c3b7cf8d34018c722adb2d957c84", size = 20867828, upload-time = "2025-05-17T21:37:56.699Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/9e/14520dc3dadf3c803473bd07e9b2bd1b69bc583cb2497b47000fed2fa92f/numpy-2.2.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:287cc3162b6f01463ccd86be154f284d0893d2b3ed7292439ea97eafa8170e0b", size = 14143006, upload-time = "2025-05-17T21:38:18.291Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/06/7e96c57d90bebdce9918412087fc22ca9851cceaf5567a45c1f404480e9e/numpy-2.2.6-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:f1372f041402e37e5e633e586f62aa53de2eac8d98cbfb822806ce4bbefcb74d", size = 5076765, upload-time = "2025-05-17T21:38:27.319Z" },
+ { url = "https://files.pythonhosted.org/packages/73/ed/63d920c23b4289fdac96ddbdd6132e9427790977d5457cd132f18e76eae0/numpy-2.2.6-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:55a4d33fa519660d69614a9fad433be87e5252f4b03850642f88993f7b2ca566", size = 6617736, upload-time = "2025-05-17T21:38:38.141Z" },
+ { url = "https://files.pythonhosted.org/packages/85/c5/e19c8f99d83fd377ec8c7e0cf627a8049746da54afc24ef0a0cb73d5dfb5/numpy-2.2.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f92729c95468a2f4f15e9bb94c432a9229d0d50de67304399627a943201baa2f", size = 14010719, upload-time = "2025-05-17T21:38:58.433Z" },
+ { url = "https://files.pythonhosted.org/packages/19/49/4df9123aafa7b539317bf6d342cb6d227e49f7a35b99c287a6109b13dd93/numpy-2.2.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bc23a79bfabc5d056d106f9befb8d50c31ced2fbc70eedb8155aec74a45798f", size = 16526072, upload-time = "2025-05-17T21:39:22.638Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/6c/04b5f47f4f32f7c2b0e7260442a8cbcf8168b0e1a41ff1495da42f42a14f/numpy-2.2.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e3143e4451880bed956e706a3220b4e5cf6172ef05fcc397f6f36a550b1dd868", size = 15503213, upload-time = "2025-05-17T21:39:45.865Z" },
+ { url = "https://files.pythonhosted.org/packages/17/0a/5cd92e352c1307640d5b6fec1b2ffb06cd0dabe7d7b8227f97933d378422/numpy-2.2.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4f13750ce79751586ae2eb824ba7e1e8dba64784086c98cdbbcc6a42112ce0d", size = 18316632, upload-time = "2025-05-17T21:40:13.331Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/3b/5cba2b1d88760ef86596ad0f3d484b1cbff7c115ae2429678465057c5155/numpy-2.2.6-cp313-cp313-win32.whl", hash = "sha256:5beb72339d9d4fa36522fc63802f469b13cdbe4fdab4a288f0c441b74272ebfd", size = 6244532, upload-time = "2025-05-17T21:43:46.099Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/3b/d58c12eafcb298d4e6d0d40216866ab15f59e55d148a5658bb3132311fcf/numpy-2.2.6-cp313-cp313-win_amd64.whl", hash = "sha256:b0544343a702fa80c95ad5d3d608ea3599dd54d4632df855e4c8d24eb6ecfa1c", size = 12610885, upload-time = "2025-05-17T21:44:05.145Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/9e/4bf918b818e516322db999ac25d00c75788ddfd2d2ade4fa66f1f38097e1/numpy-2.2.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0bca768cd85ae743b2affdc762d617eddf3bcf8724435498a1e80132d04879e6", size = 20963467, upload-time = "2025-05-17T21:40:44Z" },
+ { url = "https://files.pythonhosted.org/packages/61/66/d2de6b291507517ff2e438e13ff7b1e2cdbdb7cb40b3ed475377aece69f9/numpy-2.2.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fc0c5673685c508a142ca65209b4e79ed6740a4ed6b2267dbba90f34b0b3cfda", size = 14225144, upload-time = "2025-05-17T21:41:05.695Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/25/480387655407ead912e28ba3a820bc69af9adf13bcbe40b299d454ec011f/numpy-2.2.6-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:5bd4fc3ac8926b3819797a7c0e2631eb889b4118a9898c84f585a54d475b7e40", size = 5200217, upload-time = "2025-05-17T21:41:15.903Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/4a/6e313b5108f53dcbf3aca0c0f3e9c92f4c10ce57a0a721851f9785872895/numpy-2.2.6-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:fee4236c876c4e8369388054d02d0e9bb84821feb1a64dd59e137e6511a551f8", size = 6712014, upload-time = "2025-05-17T21:41:27.321Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/30/172c2d5c4be71fdf476e9de553443cf8e25feddbe185e0bd88b096915bcc/numpy-2.2.6-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1dda9c7e08dc141e0247a5b8f49cf05984955246a327d4c48bda16821947b2f", size = 14077935, upload-time = "2025-05-17T21:41:49.738Z" },
+ { url = "https://files.pythonhosted.org/packages/12/fb/9e743f8d4e4d3c710902cf87af3512082ae3d43b945d5d16563f26ec251d/numpy-2.2.6-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f447e6acb680fd307f40d3da4852208af94afdfab89cf850986c3ca00562f4fa", size = 16600122, upload-time = "2025-05-17T21:42:14.046Z" },
+ { url = "https://files.pythonhosted.org/packages/12/75/ee20da0e58d3a66f204f38916757e01e33a9737d0b22373b3eb5a27358f9/numpy-2.2.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:389d771b1623ec92636b0786bc4ae56abafad4a4c513d36a55dce14bd9ce8571", size = 15586143, upload-time = "2025-05-17T21:42:37.464Z" },
+ { url = "https://files.pythonhosted.org/packages/76/95/bef5b37f29fc5e739947e9ce5179ad402875633308504a52d188302319c8/numpy-2.2.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8e9ace4a37db23421249ed236fdcdd457d671e25146786dfc96835cd951aa7c1", size = 18385260, upload-time = "2025-05-17T21:43:05.189Z" },
+ { url = "https://files.pythonhosted.org/packages/09/04/f2f83279d287407cf36a7a8053a5abe7be3622a4363337338f2585e4afda/numpy-2.2.6-cp313-cp313t-win32.whl", hash = "sha256:038613e9fb8c72b0a41f025a7e4c3f0b7a1b5d768ece4796b674c8f3fe13efff", size = 6377225, upload-time = "2025-05-17T21:43:16.254Z" },
+ { url = "https://files.pythonhosted.org/packages/67/0e/35082d13c09c02c011cf21570543d202ad929d961c02a147493cb0c2bdf5/numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06", size = 12771374, upload-time = "2025-05-17T21:43:35.479Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/3b/d94a75f4dbf1ef5d321523ecac21ef23a3cd2ac8b78ae2aac40873590229/numpy-2.2.6-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0b605b275d7bd0c640cad4e5d30fa701a8d59302e127e5f79138ad62762c3e3d", size = 21040391, upload-time = "2025-05-17T21:44:35.948Z" },
+ { url = "https://files.pythonhosted.org/packages/17/f4/09b2fa1b58f0fb4f7c7963a1649c64c4d315752240377ed74d9cd878f7b5/numpy-2.2.6-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:7befc596a7dc9da8a337f79802ee8adb30a552a94f792b9c9d18c840055907db", size = 6786754, upload-time = "2025-05-17T21:44:47.446Z" },
+ { url = "https://files.pythonhosted.org/packages/af/30/feba75f143bdc868a1cc3f44ccfa6c4b9ec522b36458e738cd00f67b573f/numpy-2.2.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce47521a4754c8f4593837384bd3424880629f718d87c5d44f8ed763edd63543", size = 16643476, upload-time = "2025-05-17T21:45:11.871Z" },
+ { url = "https://files.pythonhosted.org/packages/37/48/ac2a9584402fb6c0cd5b5d1a91dcf176b15760130dd386bbafdbfe3640bf/numpy-2.2.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d042d24c90c41b54fd506da306759e06e568864df8ec17ccc17e9e884634fd00", size = 12812666, upload-time = "2025-05-17T21:45:31.426Z" },
+]
+
+[[package]]
+name = "numpy"
+version = "2.3.3"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.12'",
+ "python_full_version == '3.11.*'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/d0/19/95b3d357407220ed24c139018d2518fab0a61a948e68286a25f1a4d049ff/numpy-2.3.3.tar.gz", hash = "sha256:ddc7c39727ba62b80dfdbedf400d1c10ddfa8eefbd7ec8dcb118be8b56d31029", size = 20576648, upload-time = "2025-09-09T16:54:12.543Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7a/45/e80d203ef6b267aa29b22714fb558930b27960a0c5ce3c19c999232bb3eb/numpy-2.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ffc4f5caba7dfcbe944ed674b7eef683c7e94874046454bb79ed7ee0236f59d", size = 21259253, upload-time = "2025-09-09T15:56:02.094Z" },
+ { url = "https://files.pythonhosted.org/packages/52/18/cf2c648fccf339e59302e00e5f2bc87725a3ce1992f30f3f78c9044d7c43/numpy-2.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7e946c7170858a0295f79a60214424caac2ffdb0063d4d79cb681f9aa0aa569", size = 14450980, upload-time = "2025-09-09T15:56:05.926Z" },
+ { url = "https://files.pythonhosted.org/packages/93/fb/9af1082bec870188c42a1c239839915b74a5099c392389ff04215dcee812/numpy-2.3.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:cd4260f64bc794c3390a63bf0728220dd1a68170c169088a1e0dfa2fde1be12f", size = 5379709, upload-time = "2025-09-09T15:56:07.95Z" },
+ { url = "https://files.pythonhosted.org/packages/75/0f/bfd7abca52bcbf9a4a65abc83fe18ef01ccdeb37bfb28bbd6ad613447c79/numpy-2.3.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:f0ddb4b96a87b6728df9362135e764eac3cfa674499943ebc44ce96c478ab125", size = 6913923, upload-time = "2025-09-09T15:56:09.443Z" },
+ { url = "https://files.pythonhosted.org/packages/79/55/d69adad255e87ab7afda1caf93ca997859092afeb697703e2f010f7c2e55/numpy-2.3.3-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:afd07d377f478344ec6ca2b8d4ca08ae8bd44706763d1efb56397de606393f48", size = 14589591, upload-time = "2025-09-09T15:56:11.234Z" },
+ { url = "https://files.pythonhosted.org/packages/10/a2/010b0e27ddeacab7839957d7a8f00e91206e0c2c47abbb5f35a2630e5387/numpy-2.3.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bc92a5dedcc53857249ca51ef29f5e5f2f8c513e22cfb90faeb20343b8c6f7a6", size = 16938714, upload-time = "2025-09-09T15:56:14.637Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/6b/12ce8ede632c7126eb2762b9e15e18e204b81725b81f35176eac14dc5b82/numpy-2.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7af05ed4dc19f308e1d9fc759f36f21921eb7bbfc82843eeec6b2a2863a0aefa", size = 16370592, upload-time = "2025-09-09T15:56:17.285Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/35/aba8568b2593067bb6a8fe4c52babb23b4c3b9c80e1b49dff03a09925e4a/numpy-2.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:433bf137e338677cebdd5beac0199ac84712ad9d630b74eceeb759eaa45ddf30", size = 18884474, upload-time = "2025-09-09T15:56:20.943Z" },
+ { url = "https://files.pythonhosted.org/packages/45/fa/7f43ba10c77575e8be7b0138d107e4f44ca4a1ef322cd16980ea3e8b8222/numpy-2.3.3-cp311-cp311-win32.whl", hash = "sha256:eb63d443d7b4ffd1e873f8155260d7f58e7e4b095961b01c91062935c2491e57", size = 6599794, upload-time = "2025-09-09T15:56:23.258Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/a2/a4f78cb2241fe5664a22a10332f2be886dcdea8784c9f6a01c272da9b426/numpy-2.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:ec9d249840f6a565f58d8f913bccac2444235025bbb13e9a4681783572ee3caa", size = 13088104, upload-time = "2025-09-09T15:56:25.476Z" },
+ { url = "https://files.pythonhosted.org/packages/79/64/e424e975adbd38282ebcd4891661965b78783de893b381cbc4832fb9beb2/numpy-2.3.3-cp311-cp311-win_arm64.whl", hash = "sha256:74c2a948d02f88c11a3c075d9733f1ae67d97c6bdb97f2bb542f980458b257e7", size = 10460772, upload-time = "2025-09-09T15:56:27.679Z" },
+ { url = "https://files.pythonhosted.org/packages/51/5d/bb7fc075b762c96329147799e1bcc9176ab07ca6375ea976c475482ad5b3/numpy-2.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cfdd09f9c84a1a934cde1eec2267f0a43a7cd44b2cca4ff95b7c0d14d144b0bf", size = 20957014, upload-time = "2025-09-09T15:56:29.966Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/0e/c6211bb92af26517acd52125a237a92afe9c3124c6a68d3b9f81b62a0568/numpy-2.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb32e3cf0f762aee47ad1ddc6672988f7f27045b0783c887190545baba73aa25", size = 14185220, upload-time = "2025-09-09T15:56:32.175Z" },
+ { url = "https://files.pythonhosted.org/packages/22/f2/07bb754eb2ede9073f4054f7c0286b0d9d2e23982e090a80d478b26d35ca/numpy-2.3.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:396b254daeb0a57b1fe0ecb5e3cff6fa79a380fa97c8f7781a6d08cd429418fe", size = 5113918, upload-time = "2025-09-09T15:56:34.175Z" },
+ { url = "https://files.pythonhosted.org/packages/81/0a/afa51697e9fb74642f231ea36aca80fa17c8fb89f7a82abd5174023c3960/numpy-2.3.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:067e3d7159a5d8f8a0b46ee11148fc35ca9b21f61e3c49fbd0a027450e65a33b", size = 6647922, upload-time = "2025-09-09T15:56:36.149Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/f5/122d9cdb3f51c520d150fef6e87df9279e33d19a9611a87c0d2cf78a89f4/numpy-2.3.3-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1c02d0629d25d426585fb2e45a66154081b9fa677bc92a881ff1d216bc9919a8", size = 14281991, upload-time = "2025-09-09T15:56:40.548Z" },
+ { url = "https://files.pythonhosted.org/packages/51/64/7de3c91e821a2debf77c92962ea3fe6ac2bc45d0778c1cbe15d4fce2fd94/numpy-2.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9192da52b9745f7f0766531dcfa978b7763916f158bb63bdb8a1eca0068ab20", size = 16641643, upload-time = "2025-09-09T15:56:43.343Z" },
+ { url = "https://files.pythonhosted.org/packages/30/e4/961a5fa681502cd0d68907818b69f67542695b74e3ceaa513918103b7e80/numpy-2.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cd7de500a5b66319db419dc3c345244404a164beae0d0937283b907d8152e6ea", size = 16056787, upload-time = "2025-09-09T15:56:46.141Z" },
+ { url = "https://files.pythonhosted.org/packages/99/26/92c912b966e47fbbdf2ad556cb17e3a3088e2e1292b9833be1dfa5361a1a/numpy-2.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:93d4962d8f82af58f0b2eb85daaf1b3ca23fe0a85d0be8f1f2b7bb46034e56d7", size = 18579598, upload-time = "2025-09-09T15:56:49.844Z" },
+ { url = "https://files.pythonhosted.org/packages/17/b6/fc8f82cb3520768718834f310c37d96380d9dc61bfdaf05fe5c0b7653e01/numpy-2.3.3-cp312-cp312-win32.whl", hash = "sha256:5534ed6b92f9b7dca6c0a19d6df12d41c68b991cef051d108f6dbff3babc4ebf", size = 6320800, upload-time = "2025-09-09T15:56:52.499Z" },
+ { url = "https://files.pythonhosted.org/packages/32/ee/de999f2625b80d043d6d2d628c07d0d5555a677a3cf78fdf868d409b8766/numpy-2.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:497d7cad08e7092dba36e3d296fe4c97708c93daf26643a1ae4b03f6294d30eb", size = 12786615, upload-time = "2025-09-09T15:56:54.422Z" },
+ { url = "https://files.pythonhosted.org/packages/49/6e/b479032f8a43559c383acb20816644f5f91c88f633d9271ee84f3b3a996c/numpy-2.3.3-cp312-cp312-win_arm64.whl", hash = "sha256:ca0309a18d4dfea6fc6262a66d06c26cfe4640c3926ceec90e57791a82b6eee5", size = 10195936, upload-time = "2025-09-09T15:56:56.541Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/b9/984c2b1ee61a8b803bf63582b4ac4242cf76e2dbd663efeafcb620cc0ccb/numpy-2.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f5415fb78995644253370985342cd03572ef8620b934da27d77377a2285955bf", size = 20949588, upload-time = "2025-09-09T15:56:59.087Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/e4/07970e3bed0b1384d22af1e9912527ecbeb47d3b26e9b6a3bced068b3bea/numpy-2.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d00de139a3324e26ed5b95870ce63be7ec7352171bc69a4cf1f157a48e3eb6b7", size = 14177802, upload-time = "2025-09-09T15:57:01.73Z" },
+ { url = "https://files.pythonhosted.org/packages/35/c7/477a83887f9de61f1203bad89cf208b7c19cc9fef0cebef65d5a1a0619f2/numpy-2.3.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:9dc13c6a5829610cc07422bc74d3ac083bd8323f14e2827d992f9e52e22cd6a6", size = 5106537, upload-time = "2025-09-09T15:57:03.765Z" },
+ { url = "https://files.pythonhosted.org/packages/52/47/93b953bd5866a6f6986344d045a207d3f1cfbad99db29f534ea9cee5108c/numpy-2.3.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:d79715d95f1894771eb4e60fb23f065663b2298f7d22945d66877aadf33d00c7", size = 6640743, upload-time = "2025-09-09T15:57:07.921Z" },
+ { url = "https://files.pythonhosted.org/packages/23/83/377f84aaeb800b64c0ef4de58b08769e782edcefa4fea712910b6f0afd3c/numpy-2.3.3-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:952cfd0748514ea7c3afc729a0fc639e61655ce4c55ab9acfab14bda4f402b4c", size = 14278881, upload-time = "2025-09-09T15:57:11.349Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/a5/bf3db6e66c4b160d6ea10b534c381a1955dfab34cb1017ea93aa33c70ed3/numpy-2.3.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5b83648633d46f77039c29078751f80da65aa64d5622a3cd62aaef9d835b6c93", size = 16636301, upload-time = "2025-09-09T15:57:14.245Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/59/1287924242eb4fa3f9b3a2c30400f2e17eb2707020d1c5e3086fe7330717/numpy-2.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b001bae8cea1c7dfdb2ae2b017ed0a6f2102d7a70059df1e338e307a4c78a8ae", size = 16053645, upload-time = "2025-09-09T15:57:16.534Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/93/b3d47ed882027c35e94ac2320c37e452a549f582a5e801f2d34b56973c97/numpy-2.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8e9aced64054739037d42fb84c54dd38b81ee238816c948c8f3ed134665dcd86", size = 18578179, upload-time = "2025-09-09T15:57:18.883Z" },
+ { url = "https://files.pythonhosted.org/packages/20/d9/487a2bccbf7cc9d4bfc5f0f197761a5ef27ba870f1e3bbb9afc4bbe3fcc2/numpy-2.3.3-cp313-cp313-win32.whl", hash = "sha256:9591e1221db3f37751e6442850429b3aabf7026d3b05542d102944ca7f00c8a8", size = 6312250, upload-time = "2025-09-09T15:57:21.296Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/b5/263ebbbbcede85028f30047eab3d58028d7ebe389d6493fc95ae66c636ab/numpy-2.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:f0dadeb302887f07431910f67a14d57209ed91130be0adea2f9793f1a4f817cf", size = 12783269, upload-time = "2025-09-09T15:57:23.034Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/75/67b8ca554bbeaaeb3fac2e8bce46967a5a06544c9108ec0cf5cece559b6c/numpy-2.3.3-cp313-cp313-win_arm64.whl", hash = "sha256:3c7cf302ac6e0b76a64c4aecf1a09e51abd9b01fc7feee80f6c43e3ab1b1dbc5", size = 10195314, upload-time = "2025-09-09T15:57:25.045Z" },
+ { url = "https://files.pythonhosted.org/packages/11/d0/0d1ddec56b162042ddfafeeb293bac672de9b0cfd688383590090963720a/numpy-2.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:eda59e44957d272846bb407aad19f89dc6f58fecf3504bd144f4c5cf81a7eacc", size = 21048025, upload-time = "2025-09-09T15:57:27.257Z" },
+ { url = "https://files.pythonhosted.org/packages/36/9e/1996ca6b6d00415b6acbdd3c42f7f03ea256e2c3f158f80bd7436a8a19f3/numpy-2.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:823d04112bc85ef5c4fda73ba24e6096c8f869931405a80aa8b0e604510a26bc", size = 14301053, upload-time = "2025-09-09T15:57:30.077Z" },
+ { url = "https://files.pythonhosted.org/packages/05/24/43da09aa764c68694b76e84b3d3f0c44cb7c18cdc1ba80e48b0ac1d2cd39/numpy-2.3.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:40051003e03db4041aa325da2a0971ba41cf65714e65d296397cc0e32de6018b", size = 5229444, upload-time = "2025-09-09T15:57:32.733Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/14/50ffb0f22f7218ef8af28dd089f79f68289a7a05a208db9a2c5dcbe123c1/numpy-2.3.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:6ee9086235dd6ab7ae75aba5662f582a81ced49f0f1c6de4260a78d8f2d91a19", size = 6738039, upload-time = "2025-09-09T15:57:34.328Z" },
+ { url = "https://files.pythonhosted.org/packages/55/52/af46ac0795e09657d45a7f4db961917314377edecf66db0e39fa7ab5c3d3/numpy-2.3.3-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94fcaa68757c3e2e668ddadeaa86ab05499a70725811e582b6a9858dd472fb30", size = 14352314, upload-time = "2025-09-09T15:57:36.255Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/b1/dc226b4c90eb9f07a3fff95c2f0db3268e2e54e5cce97c4ac91518aee71b/numpy-2.3.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da1a74b90e7483d6ce5244053399a614b1d6b7bc30a60d2f570e5071f8959d3e", size = 16701722, upload-time = "2025-09-09T15:57:38.622Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/9d/9d8d358f2eb5eced14dba99f110d83b5cd9a4460895230f3b396ad19a323/numpy-2.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2990adf06d1ecee3b3dcbb4977dfab6e9f09807598d647f04d385d29e7a3c3d3", size = 16132755, upload-time = "2025-09-09T15:57:41.16Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/27/b3922660c45513f9377b3fb42240bec63f203c71416093476ec9aa0719dc/numpy-2.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ed635ff692483b8e3f0fcaa8e7eb8a75ee71aa6d975388224f70821421800cea", size = 18651560, upload-time = "2025-09-09T15:57:43.459Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/8e/3ab61a730bdbbc201bb245a71102aa609f0008b9ed15255500a99cd7f780/numpy-2.3.3-cp313-cp313t-win32.whl", hash = "sha256:a333b4ed33d8dc2b373cc955ca57babc00cd6f9009991d9edc5ddbc1bac36bcd", size = 6442776, upload-time = "2025-09-09T15:57:45.793Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/3a/e22b766b11f6030dc2decdeff5c2fb1610768055603f9f3be88b6d192fb2/numpy-2.3.3-cp313-cp313t-win_amd64.whl", hash = "sha256:4384a169c4d8f97195980815d6fcad04933a7e1ab3b530921c3fef7a1c63426d", size = 12927281, upload-time = "2025-09-09T15:57:47.492Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/42/c2e2bc48c5e9b2a83423f99733950fbefd86f165b468a3d85d52b30bf782/numpy-2.3.3-cp313-cp313t-win_arm64.whl", hash = "sha256:75370986cc0bc66f4ce5110ad35aae6d182cc4ce6433c40ad151f53690130bf1", size = 10265275, upload-time = "2025-09-09T15:57:49.647Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/f2/7e0a37cfced2644c9563c529f29fa28acbd0960dde32ece683aafa6f4949/numpy-2.3.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1e02c7159791cd481e1e6d5ddd766b62a4d5acf8df4d4d1afe35ee9c5c33a41e", size = 21131019, upload-time = "2025-09-09T15:58:42.838Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/7e/3291f505297ed63831135a6cc0f474da0c868a1f31b0dd9a9f03a7a0d2ed/numpy-2.3.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:dca2d0fc80b3893ae72197b39f69d55a3cd8b17ea1b50aa4c62de82419936150", size = 14376288, upload-time = "2025-09-09T15:58:45.425Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/4b/ae02e985bdeee73d7b5abdefeb98aef1207e96d4c0621ee0cf228ddfac3c/numpy-2.3.3-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:99683cbe0658f8271b333a1b1b4bb3173750ad59c0c61f5bbdc5b318918fffe3", size = 5305425, upload-time = "2025-09-09T15:58:48.6Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/eb/9df215d6d7250db32007941500dc51c48190be25f2401d5b2b564e467247/numpy-2.3.3-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:d9d537a39cc9de668e5cd0e25affb17aec17b577c6b3ae8a3d866b479fbe88d0", size = 6819053, upload-time = "2025-09-09T15:58:50.401Z" },
+ { url = "https://files.pythonhosted.org/packages/57/62/208293d7d6b2a8998a4a1f23ac758648c3c32182d4ce4346062018362e29/numpy-2.3.3-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8596ba2f8af5f93b01d97563832686d20206d303024777f6dfc2e7c7c3f1850e", size = 14420354, upload-time = "2025-09-09T15:58:52.704Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/0c/8e86e0ff7072e14a71b4c6af63175e40d1e7e933ce9b9e9f765a95b4e0c3/numpy-2.3.3-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1ec5615b05369925bd1125f27df33f3b6c8bc10d788d5999ecd8769a1fa04db", size = 16760413, upload-time = "2025-09-09T15:58:55.027Z" },
+ { url = "https://files.pythonhosted.org/packages/af/11/0cc63f9f321ccf63886ac203336777140011fb669e739da36d8db3c53b98/numpy-2.3.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:2e267c7da5bf7309670523896df97f93f6e469fb931161f483cd6882b3b1a5dc", size = 12971844, upload-time = "2025-09-09T15:58:57.359Z" },
+]
+
+[[package]]
+name = "orjson"
+version = "3.11.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/be/4d/8df5f83256a809c22c4d6792ce8d43bb503be0fb7a8e4da9025754b09658/orjson-3.11.3.tar.gz", hash = "sha256:1c0603b1d2ffcd43a411d64797a19556ef76958aef1c182f22dc30860152a98a", size = 5482394, upload-time = "2025-08-26T17:46:43.171Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9b/64/4a3cef001c6cd9c64256348d4c13a7b09b857e3e1cbb5185917df67d8ced/orjson-3.11.3-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:29cb1f1b008d936803e2da3d7cba726fc47232c45df531b29edf0b232dd737e7", size = 238600, upload-time = "2025-08-26T17:44:36.875Z" },
+ { url = "https://files.pythonhosted.org/packages/10/ce/0c8c87f54f79d051485903dc46226c4d3220b691a151769156054df4562b/orjson-3.11.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97dceed87ed9139884a55db8722428e27bd8452817fbf1869c58b49fecab1120", size = 123526, upload-time = "2025-08-26T17:44:39.574Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/d0/249497e861f2d438f45b3ab7b7b361484237414945169aa285608f9f7019/orjson-3.11.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:58533f9e8266cb0ac298e259ed7b4d42ed3fa0b78ce76860626164de49e0d467", size = 128075, upload-time = "2025-08-26T17:44:40.672Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/64/00485702f640a0fd56144042a1ea196469f4a3ae93681871564bf74fa996/orjson-3.11.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c212cfdd90512fe722fa9bd620de4d46cda691415be86b2e02243242ae81873", size = 130483, upload-time = "2025-08-26T17:44:41.788Z" },
+ { url = "https://files.pythonhosted.org/packages/64/81/110d68dba3909171bf3f05619ad0cf187b430e64045ae4e0aa7ccfe25b15/orjson-3.11.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff835b5d3e67d9207343effb03760c00335f8b5285bfceefd4dc967b0e48f6a", size = 132539, upload-time = "2025-08-26T17:44:43.12Z" },
+ { url = "https://files.pythonhosted.org/packages/79/92/dba25c22b0ddfafa1e6516a780a00abac28d49f49e7202eb433a53c3e94e/orjson-3.11.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f5aa4682912a450c2db89cbd92d356fef47e115dffba07992555542f344d301b", size = 135390, upload-time = "2025-08-26T17:44:44.199Z" },
+ { url = "https://files.pythonhosted.org/packages/44/1d/ca2230fd55edbd87b58a43a19032d63a4b180389a97520cc62c535b726f9/orjson-3.11.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7d18dd34ea2e860553a579df02041845dee0af8985dff7f8661306f95504ddf", size = 132966, upload-time = "2025-08-26T17:44:45.719Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/b9/96bbc8ed3e47e52b487d504bd6861798977445fbc410da6e87e302dc632d/orjson-3.11.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d8b11701bc43be92ea42bd454910437b355dfb63696c06fe953ffb40b5f763b4", size = 131349, upload-time = "2025-08-26T17:44:46.862Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/3c/418fbd93d94b0df71cddf96b7fe5894d64a5d890b453ac365120daec30f7/orjson-3.11.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:90368277087d4af32d38bd55f9da2ff466d25325bf6167c8f382d8ee40cb2bbc", size = 404087, upload-time = "2025-08-26T17:44:48.079Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/a9/2bfd58817d736c2f63608dec0c34857339d423eeed30099b126562822191/orjson-3.11.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fd7ff459fb393358d3a155d25b275c60b07a2c83dcd7ea962b1923f5a1134569", size = 146067, upload-time = "2025-08-26T17:44:49.302Z" },
+ { url = "https://files.pythonhosted.org/packages/33/ba/29023771f334096f564e48d82ed855a0ed3320389d6748a9c949e25be734/orjson-3.11.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f8d902867b699bcd09c176a280b1acdab57f924489033e53d0afe79817da37e6", size = 135506, upload-time = "2025-08-26T17:44:50.558Z" },
+ { url = "https://files.pythonhosted.org/packages/39/62/b5a1eca83f54cb3aa11a9645b8a22f08d97dbd13f27f83aae7c6666a0a05/orjson-3.11.3-cp310-cp310-win32.whl", hash = "sha256:bb93562146120bb51e6b154962d3dadc678ed0fce96513fa6bc06599bb6f6edc", size = 136352, upload-time = "2025-08-26T17:44:51.698Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/c0/7ebfaa327d9a9ed982adc0d9420dbce9a3fec45b60ab32c6308f731333fa/orjson-3.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:976c6f1975032cc327161c65d4194c549f2589d88b105a5e3499429a54479770", size = 131539, upload-time = "2025-08-26T17:44:52.974Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/8b/360674cd817faef32e49276187922a946468579fcaf37afdfb6c07046e92/orjson-3.11.3-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9d2ae0cc6aeb669633e0124531f342a17d8e97ea999e42f12a5ad4adaa304c5f", size = 238238, upload-time = "2025-08-26T17:44:54.214Z" },
+ { url = "https://files.pythonhosted.org/packages/05/3d/5fa9ea4b34c1a13be7d9046ba98d06e6feb1d8853718992954ab59d16625/orjson-3.11.3-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:ba21dbb2493e9c653eaffdc38819b004b7b1b246fb77bfc93dc016fe664eac91", size = 127713, upload-time = "2025-08-26T17:44:55.596Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/5f/e18367823925e00b1feec867ff5f040055892fc474bf5f7875649ecfa586/orjson-3.11.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00f1a271e56d511d1569937c0447d7dce5a99a33ea0dec76673706360a051904", size = 123241, upload-time = "2025-08-26T17:44:57.185Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/bd/3c66b91c4564759cf9f473251ac1650e446c7ba92a7c0f9f56ed54f9f0e6/orjson-3.11.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b67e71e47caa6680d1b6f075a396d04fa6ca8ca09aafb428731da9b3ea32a5a6", size = 127895, upload-time = "2025-08-26T17:44:58.349Z" },
+ { url = "https://files.pythonhosted.org/packages/82/b5/dc8dcd609db4766e2967a85f63296c59d4722b39503e5b0bf7fd340d387f/orjson-3.11.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7d012ebddffcce8c85734a6d9e5f08180cd3857c5f5a3ac70185b43775d043d", size = 130303, upload-time = "2025-08-26T17:44:59.491Z" },
+ { url = "https://files.pythonhosted.org/packages/48/c2/d58ec5fd1270b2aa44c862171891adc2e1241bd7dab26c8f46eb97c6c6f1/orjson-3.11.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd759f75d6b8d1b62012b7f5ef9461d03c804f94d539a5515b454ba3a6588038", size = 132366, upload-time = "2025-08-26T17:45:00.654Z" },
+ { url = "https://files.pythonhosted.org/packages/73/87/0ef7e22eb8dd1ef940bfe3b9e441db519e692d62ed1aae365406a16d23d0/orjson-3.11.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6890ace0809627b0dff19cfad92d69d0fa3f089d3e359a2a532507bb6ba34efb", size = 135180, upload-time = "2025-08-26T17:45:02.424Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/6a/e5bf7b70883f374710ad74faf99bacfc4b5b5a7797c1d5e130350e0e28a3/orjson-3.11.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9d4a5e041ae435b815e568537755773d05dac031fee6a57b4ba70897a44d9d2", size = 132741, upload-time = "2025-08-26T17:45:03.663Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/0c/4577fd860b6386ffaa56440e792af01c7882b56d2766f55384b5b0e9d39b/orjson-3.11.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2d68bf97a771836687107abfca089743885fb664b90138d8761cce61d5625d55", size = 131104, upload-time = "2025-08-26T17:45:04.939Z" },
+ { url = "https://files.pythonhosted.org/packages/66/4b/83e92b2d67e86d1c33f2ea9411742a714a26de63641b082bdbf3d8e481af/orjson-3.11.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:bfc27516ec46f4520b18ef645864cee168d2a027dbf32c5537cb1f3e3c22dac1", size = 403887, upload-time = "2025-08-26T17:45:06.228Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/e5/9eea6a14e9b5ceb4a271a1fd2e1dec5f2f686755c0fab6673dc6ff3433f4/orjson-3.11.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f66b001332a017d7945e177e282a40b6997056394e3ed7ddb41fb1813b83e824", size = 145855, upload-time = "2025-08-26T17:45:08.338Z" },
+ { url = "https://files.pythonhosted.org/packages/45/78/8d4f5ad0c80ba9bf8ac4d0fc71f93a7d0dc0844989e645e2074af376c307/orjson-3.11.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:212e67806525d2561efbfe9e799633b17eb668b8964abed6b5319b2f1cfbae1f", size = 135361, upload-time = "2025-08-26T17:45:09.625Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/5f/16386970370178d7a9b438517ea3d704efcf163d286422bae3b37b88dbb5/orjson-3.11.3-cp311-cp311-win32.whl", hash = "sha256:6e8e0c3b85575a32f2ffa59de455f85ce002b8bdc0662d6b9c2ed6d80ab5d204", size = 136190, upload-time = "2025-08-26T17:45:10.962Z" },
+ { url = "https://files.pythonhosted.org/packages/09/60/db16c6f7a41dd8ac9fb651f66701ff2aeb499ad9ebc15853a26c7c152448/orjson-3.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:6be2f1b5d3dc99a5ce5ce162fc741c22ba9f3443d3dd586e6a1211b7bc87bc7b", size = 131389, upload-time = "2025-08-26T17:45:12.285Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/2a/bb811ad336667041dea9b8565c7c9faf2f59b47eb5ab680315eea612ef2e/orjson-3.11.3-cp311-cp311-win_arm64.whl", hash = "sha256:fafb1a99d740523d964b15c8db4eabbfc86ff29f84898262bf6e3e4c9e97e43e", size = 126120, upload-time = "2025-08-26T17:45:13.515Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/b0/a7edab2a00cdcb2688e1c943401cb3236323e7bfd2839815c6131a3742f4/orjson-3.11.3-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8c752089db84333e36d754c4baf19c0e1437012242048439c7e80eb0e6426e3b", size = 238259, upload-time = "2025-08-26T17:45:15.093Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/c6/ff4865a9cc398a07a83342713b5932e4dc3cb4bf4bc04e8f83dedfc0d736/orjson-3.11.3-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:9b8761b6cf04a856eb544acdd82fc594b978f12ac3602d6374a7edb9d86fd2c2", size = 127633, upload-time = "2025-08-26T17:45:16.417Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/e6/e00bea2d9472f44fe8794f523e548ce0ad51eb9693cf538a753a27b8bda4/orjson-3.11.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b13974dc8ac6ba22feaa867fc19135a3e01a134b4f7c9c28162fed4d615008a", size = 123061, upload-time = "2025-08-26T17:45:17.673Z" },
+ { url = "https://files.pythonhosted.org/packages/54/31/9fbb78b8e1eb3ac605467cb846e1c08d0588506028b37f4ee21f978a51d4/orjson-3.11.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f83abab5bacb76d9c821fd5c07728ff224ed0e52d7a71b7b3de822f3df04e15c", size = 127956, upload-time = "2025-08-26T17:45:19.172Z" },
+ { url = "https://files.pythonhosted.org/packages/36/88/b0604c22af1eed9f98d709a96302006915cfd724a7ebd27d6dd11c22d80b/orjson-3.11.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6fbaf48a744b94091a56c62897b27c31ee2da93d826aa5b207131a1e13d4064", size = 130790, upload-time = "2025-08-26T17:45:20.586Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/9d/1c1238ae9fffbfed51ba1e507731b3faaf6b846126a47e9649222b0fd06f/orjson-3.11.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc779b4f4bba2847d0d2940081a7b6f7b5877e05408ffbb74fa1faf4a136c424", size = 132385, upload-time = "2025-08-26T17:45:22.036Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/b5/c06f1b090a1c875f337e21dd71943bc9d84087f7cdf8c6e9086902c34e42/orjson-3.11.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd4b909ce4c50faa2192da6bb684d9848d4510b736b0611b6ab4020ea6fd2d23", size = 135305, upload-time = "2025-08-26T17:45:23.4Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/26/5f028c7d81ad2ebbf84414ba6d6c9cac03f22f5cd0d01eb40fb2d6a06b07/orjson-3.11.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:524b765ad888dc5518bbce12c77c2e83dee1ed6b0992c1790cc5fb49bb4b6667", size = 132875, upload-time = "2025-08-26T17:45:25.182Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/d4/b8df70d9cfb56e385bf39b4e915298f9ae6c61454c8154a0f5fd7efcd42e/orjson-3.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:84fd82870b97ae3cdcea9d8746e592b6d40e1e4d4527835fc520c588d2ded04f", size = 130940, upload-time = "2025-08-26T17:45:27.209Z" },
+ { url = "https://files.pythonhosted.org/packages/da/5e/afe6a052ebc1a4741c792dd96e9f65bf3939d2094e8b356503b68d48f9f5/orjson-3.11.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fbecb9709111be913ae6879b07bafd4b0785b44c1eb5cac8ac76da048b3885a1", size = 403852, upload-time = "2025-08-26T17:45:28.478Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/90/7bbabafeb2ce65915e9247f14a56b29c9334003536009ef5b122783fe67e/orjson-3.11.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9dba358d55aee552bd868de348f4736ca5a4086d9a62e2bfbbeeb5629fe8b0cc", size = 146293, upload-time = "2025-08-26T17:45:29.86Z" },
+ { url = "https://files.pythonhosted.org/packages/27/b3/2d703946447da8b093350570644a663df69448c9d9330e5f1d9cce997f20/orjson-3.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eabcf2e84f1d7105f84580e03012270c7e97ecb1fb1618bda395061b2a84a049", size = 135470, upload-time = "2025-08-26T17:45:31.243Z" },
+ { url = "https://files.pythonhosted.org/packages/38/70/b14dcfae7aff0e379b0119c8a812f8396678919c431efccc8e8a0263e4d9/orjson-3.11.3-cp312-cp312-win32.whl", hash = "sha256:3782d2c60b8116772aea8d9b7905221437fdf53e7277282e8d8b07c220f96cca", size = 136248, upload-time = "2025-08-26T17:45:32.567Z" },
+ { url = "https://files.pythonhosted.org/packages/35/b8/9e3127d65de7fff243f7f3e53f59a531bf6bb295ebe5db024c2503cc0726/orjson-3.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:79b44319268af2eaa3e315b92298de9a0067ade6e6003ddaef72f8e0bedb94f1", size = 131437, upload-time = "2025-08-26T17:45:34.949Z" },
+ { url = "https://files.pythonhosted.org/packages/51/92/a946e737d4d8a7fd84a606aba96220043dcc7d6988b9e7551f7f6d5ba5ad/orjson-3.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:0e92a4e83341ef79d835ca21b8bd13e27c859e4e9e4d7b63defc6e58462a3710", size = 125978, upload-time = "2025-08-26T17:45:36.422Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/79/8932b27293ad35919571f77cb3693b5906cf14f206ef17546052a241fdf6/orjson-3.11.3-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:af40c6612fd2a4b00de648aa26d18186cd1322330bd3a3cc52f87c699e995810", size = 238127, upload-time = "2025-08-26T17:45:38.146Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/82/cb93cd8cf132cd7643b30b6c5a56a26c4e780c7a145db6f83de977b540ce/orjson-3.11.3-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:9f1587f26c235894c09e8b5b7636a38091a9e6e7fe4531937534749c04face43", size = 127494, upload-time = "2025-08-26T17:45:39.57Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/b8/2d9eb181a9b6bb71463a78882bcac1027fd29cf62c38a40cc02fc11d3495/orjson-3.11.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61dcdad16da5bb486d7227a37a2e789c429397793a6955227cedbd7252eb5a27", size = 123017, upload-time = "2025-08-26T17:45:40.876Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/14/a0e971e72d03b509190232356d54c0f34507a05050bd026b8db2bf2c192c/orjson-3.11.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:11c6d71478e2cbea0a709e8a06365fa63da81da6498a53e4c4f065881d21ae8f", size = 127898, upload-time = "2025-08-26T17:45:42.188Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/af/dc74536722b03d65e17042cc30ae586161093e5b1f29bccda24765a6ae47/orjson-3.11.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff94112e0098470b665cb0ed06efb187154b63649403b8d5e9aedeb482b4548c", size = 130742, upload-time = "2025-08-26T17:45:43.511Z" },
+ { url = "https://files.pythonhosted.org/packages/62/e6/7a3b63b6677bce089fe939353cda24a7679825c43a24e49f757805fc0d8a/orjson-3.11.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae8b756575aaa2a855a75192f356bbda11a89169830e1439cfb1a3e1a6dde7be", size = 132377, upload-time = "2025-08-26T17:45:45.525Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/cd/ce2ab93e2e7eaf518f0fd15e3068b8c43216c8a44ed82ac2b79ce5cef72d/orjson-3.11.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9416cc19a349c167ef76135b2fe40d03cea93680428efee8771f3e9fb66079d", size = 135313, upload-time = "2025-08-26T17:45:46.821Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/b4/f98355eff0bd1a38454209bbc73372ce351ba29933cb3e2eba16c04b9448/orjson-3.11.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b822caf5b9752bc6f246eb08124c3d12bf2175b66ab74bac2ef3bbf9221ce1b2", size = 132908, upload-time = "2025-08-26T17:45:48.126Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/92/8f5182d7bc2a1bed46ed960b61a39af8389f0ad476120cd99e67182bfb6d/orjson-3.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:414f71e3bdd5573893bf5ecdf35c32b213ed20aa15536fe2f588f946c318824f", size = 130905, upload-time = "2025-08-26T17:45:49.414Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/60/c41ca753ce9ffe3d0f67b9b4c093bdd6e5fdb1bc53064f992f66bb99954d/orjson-3.11.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:828e3149ad8815dc14468f36ab2a4b819237c155ee1370341b91ea4c8672d2ee", size = 403812, upload-time = "2025-08-26T17:45:51.085Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/13/e4a4f16d71ce1868860db59092e78782c67082a8f1dc06a3788aef2b41bc/orjson-3.11.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac9e05f25627ffc714c21f8dfe3a579445a5c392a9c8ae7ba1d0e9fb5333f56e", size = 146277, upload-time = "2025-08-26T17:45:52.851Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/8b/bafb7f0afef9344754a3a0597a12442f1b85a048b82108ef2c956f53babd/orjson-3.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e44fbe4000bd321d9f3b648ae46e0196d21577cf66ae684a96ff90b1f7c93633", size = 135418, upload-time = "2025-08-26T17:45:54.806Z" },
+ { url = "https://files.pythonhosted.org/packages/60/d4/bae8e4f26afb2c23bea69d2f6d566132584d1c3a5fe89ee8c17b718cab67/orjson-3.11.3-cp313-cp313-win32.whl", hash = "sha256:2039b7847ba3eec1f5886e75e6763a16e18c68a63efc4b029ddf994821e2e66b", size = 136216, upload-time = "2025-08-26T17:45:57.182Z" },
+ { url = "https://files.pythonhosted.org/packages/88/76/224985d9f127e121c8cad882cea55f0ebe39f97925de040b75ccd4b33999/orjson-3.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:29be5ac4164aa8bdcba5fa0700a3c9c316b411d8ed9d39ef8a882541bd452fae", size = 131362, upload-time = "2025-08-26T17:45:58.56Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/cf/0dce7a0be94bd36d1346be5067ed65ded6adb795fdbe3abd234c8d576d01/orjson-3.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:18bd1435cb1f2857ceb59cfb7de6f92593ef7b831ccd1b9bfb28ca530e539dce", size = 125989, upload-time = "2025-08-26T17:45:59.95Z" },
+]
+
+[[package]]
+name = "packaging"
+version = "25.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
+]
+
+[[package]]
+name = "pandas"
+version = "2.3.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
+ { name = "numpy", version = "2.3.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
+ { name = "python-dateutil" },
+ { name = "pytz" },
+ { name = "tzdata" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/79/8e/0e90233ac205ad182bd6b422532695d2b9414944a280488105d598c70023/pandas-2.3.2.tar.gz", hash = "sha256:ab7b58f8f82706890924ccdfb5f48002b83d2b5a3845976a9fb705d36c34dcdb", size = 4488684, upload-time = "2025-08-21T10:28:29.257Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2e/16/a8eeb70aad84ccbf14076793f90e0031eded63c1899aeae9fdfbf37881f4/pandas-2.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52bc29a946304c360561974c6542d1dd628ddafa69134a7131fdfd6a5d7a1a35", size = 11539648, upload-time = "2025-08-21T10:26:36.236Z" },
+ { url = "https://files.pythonhosted.org/packages/47/f1/c5bdaea13bf3708554d93e948b7ea74121ce6e0d59537ca4c4f77731072b/pandas-2.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:220cc5c35ffaa764dd5bb17cf42df283b5cb7fdf49e10a7b053a06c9cb48ee2b", size = 10786923, upload-time = "2025-08-21T10:26:40.518Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/10/811fa01476d29ffed692e735825516ad0e56d925961819e6126b4ba32147/pandas-2.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c05e15111221384019897df20c6fe893b2f697d03c811ee67ec9e0bb5a3424", size = 11726241, upload-time = "2025-08-21T10:26:43.175Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/6a/40b043b06e08df1ea1b6d20f0e0c2f2c4ec8c4f07d1c92948273d943a50b/pandas-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc03acc273c5515ab69f898df99d9d4f12c4d70dbfc24c3acc6203751d0804cf", size = 12349533, upload-time = "2025-08-21T10:26:46.611Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/ea/2e081a2302e41a9bca7056659fdd2b85ef94923723e41665b42d65afd347/pandas-2.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d25c20a03e8870f6339bcf67281b946bd20b86f1a544ebbebb87e66a8d642cba", size = 13202407, upload-time = "2025-08-21T10:26:49.068Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/12/7ff9f6a79e2ee8869dcf70741ef998b97ea20050fe25f83dc759764c1e32/pandas-2.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21bb612d148bb5860b7eb2c10faacf1a810799245afd342cf297d7551513fbb6", size = 13837212, upload-time = "2025-08-21T10:26:51.832Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/df/5ab92fcd76455a632b3db34a746e1074d432c0cdbbd28d7cd1daba46a75d/pandas-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:b62d586eb25cb8cb70a5746a378fc3194cb7f11ea77170d59f889f5dfe3cec7a", size = 11338099, upload-time = "2025-08-21T10:26:54.382Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/59/f3e010879f118c2d400902d2d871c2226cef29b08c09fb8dc41111730400/pandas-2.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1333e9c299adcbb68ee89a9bb568fc3f20f9cbb419f1dd5225071e6cddb2a743", size = 11563308, upload-time = "2025-08-21T10:26:56.656Z" },
+ { url = "https://files.pythonhosted.org/packages/38/18/48f10f1cc5c397af59571d638d211f494dba481f449c19adbd282aa8f4ca/pandas-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:76972bcbd7de8e91ad5f0ca884a9f2c477a2125354af624e022c49e5bd0dfff4", size = 10820319, upload-time = "2025-08-21T10:26:59.162Z" },
+ { url = "https://files.pythonhosted.org/packages/95/3b/1e9b69632898b048e223834cd9702052bcf06b15e1ae716eda3196fb972e/pandas-2.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b98bdd7c456a05eef7cd21fd6b29e3ca243591fe531c62be94a2cc987efb5ac2", size = 11790097, upload-time = "2025-08-21T10:27:02.204Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/ef/0e2ffb30b1f7fbc9a588bd01e3c14a0d96854d09a887e15e30cc19961227/pandas-2.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d81573b3f7db40d020983f78721e9bfc425f411e616ef019a10ebf597aedb2e", size = 12397958, upload-time = "2025-08-21T10:27:05.409Z" },
+ { url = "https://files.pythonhosted.org/packages/23/82/e6b85f0d92e9afb0e7f705a51d1399b79c7380c19687bfbf3d2837743249/pandas-2.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e190b738675a73b581736cc8ec71ae113d6c3768d0bd18bffa5b9a0927b0b6ea", size = 13225600, upload-time = "2025-08-21T10:27:07.791Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/f1/f682015893d9ed51611948bd83683670842286a8edd4f68c2c1c3b231eef/pandas-2.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c253828cb08f47488d60f43c5fc95114c771bbfff085da54bfc79cb4f9e3a372", size = 13879433, upload-time = "2025-08-21T10:27:10.347Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/e7/ae86261695b6c8a36d6a4c8d5f9b9ede8248510d689a2f379a18354b37d7/pandas-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:9467697b8083f9667b212633ad6aa4ab32436dcbaf4cd57325debb0ddef2012f", size = 11336557, upload-time = "2025-08-21T10:27:12.983Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/db/614c20fb7a85a14828edd23f1c02db58a30abf3ce76f38806155d160313c/pandas-2.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fbb977f802156e7a3f829e9d1d5398f6192375a3e2d1a9ee0803e35fe70a2b9", size = 11587652, upload-time = "2025-08-21T10:27:15.888Z" },
+ { url = "https://files.pythonhosted.org/packages/99/b0/756e52f6582cade5e746f19bad0517ff27ba9c73404607c0306585c201b3/pandas-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b9b52693123dd234b7c985c68b709b0b009f4521000d0525f2b95c22f15944b", size = 10717686, upload-time = "2025-08-21T10:27:18.486Z" },
+ { url = "https://files.pythonhosted.org/packages/37/4c/dd5ccc1e357abfeee8353123282de17997f90ff67855f86154e5a13b81e5/pandas-2.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bd281310d4f412733f319a5bc552f86d62cddc5f51d2e392c8787335c994175", size = 11278722, upload-time = "2025-08-21T10:27:21.149Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/a4/f7edcfa47e0a88cda0be8b068a5bae710bf264f867edfdf7b71584ace362/pandas-2.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96d31a6b4354e3b9b8a2c848af75d31da390657e3ac6f30c05c82068b9ed79b9", size = 11987803, upload-time = "2025-08-21T10:27:23.767Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/61/1bce4129f93ab66f1c68b7ed1c12bac6a70b1b56c5dab359c6bbcd480b52/pandas-2.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:df4df0b9d02bb873a106971bb85d448378ef14b86ba96f035f50bbd3688456b4", size = 12766345, upload-time = "2025-08-21T10:27:26.6Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/46/80d53de70fee835531da3a1dae827a1e76e77a43ad22a8cd0f8142b61587/pandas-2.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:213a5adf93d020b74327cb2c1b842884dbdd37f895f42dcc2f09d451d949f811", size = 13439314, upload-time = "2025-08-21T10:27:29.213Z" },
+ { url = "https://files.pythonhosted.org/packages/28/30/8114832daff7489f179971dbc1d854109b7f4365a546e3ea75b6516cea95/pandas-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:8c13b81a9347eb8c7548f53fd9a4f08d4dfe996836543f805c987bafa03317ae", size = 10983326, upload-time = "2025-08-21T10:27:31.901Z" },
+ { url = "https://files.pythonhosted.org/packages/27/64/a2f7bf678af502e16b472527735d168b22b7824e45a4d7e96a4fbb634b59/pandas-2.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0c6ecbac99a354a051ef21c5307601093cb9e0f4b1855984a084bfec9302699e", size = 11531061, upload-time = "2025-08-21T10:27:34.647Z" },
+ { url = "https://files.pythonhosted.org/packages/54/4c/c3d21b2b7769ef2f4c2b9299fcadd601efa6729f1357a8dbce8dd949ed70/pandas-2.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c6f048aa0fd080d6a06cc7e7537c09b53be6642d330ac6f54a600c3ace857ee9", size = 10668666, upload-time = "2025-08-21T10:27:37.203Z" },
+ { url = "https://files.pythonhosted.org/packages/50/e2/f775ba76ecfb3424d7f5862620841cf0edb592e9abd2d2a5387d305fe7a8/pandas-2.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0064187b80a5be6f2f9c9d6bdde29372468751dfa89f4211a3c5871854cfbf7a", size = 11332835, upload-time = "2025-08-21T10:27:40.188Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/52/0634adaace9be2d8cac9ef78f05c47f3a675882e068438b9d7ec7ef0c13f/pandas-2.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ac8c320bded4718b298281339c1a50fb00a6ba78cb2a63521c39bec95b0209b", size = 12057211, upload-time = "2025-08-21T10:27:43.117Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/9d/2df913f14b2deb9c748975fdb2491da1a78773debb25abbc7cbc67c6b549/pandas-2.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:114c2fe4f4328cf98ce5716d1532f3ab79c5919f95a9cfee81d9140064a2e4d6", size = 12749277, upload-time = "2025-08-21T10:27:45.474Z" },
+ { url = "https://files.pythonhosted.org/packages/87/af/da1a2417026bd14d98c236dba88e39837182459d29dcfcea510b2ac9e8a1/pandas-2.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:48fa91c4dfb3b2b9bfdb5c24cd3567575f4e13f9636810462ffed8925352be5a", size = 13415256, upload-time = "2025-08-21T10:27:49.885Z" },
+ { url = "https://files.pythonhosted.org/packages/22/3c/f2af1ce8840ef648584a6156489636b5692c162771918aa95707c165ad2b/pandas-2.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:12d039facec710f7ba305786837d0225a3444af7bbd9c15c32ca2d40d157ed8b", size = 10982579, upload-time = "2025-08-21T10:28:08.435Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/98/8df69c4097a6719e357dc249bf437b8efbde808038268e584421696cbddf/pandas-2.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c624b615ce97864eb588779ed4046186f967374185c047070545253a52ab2d57", size = 12028163, upload-time = "2025-08-21T10:27:52.232Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/23/f95cbcbea319f349e10ff90db488b905c6883f03cbabd34f6b03cbc3c044/pandas-2.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0cee69d583b9b128823d9514171cabb6861e09409af805b54459bd0c821a35c2", size = 11391860, upload-time = "2025-08-21T10:27:54.673Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/1b/6a984e98c4abee22058aa75bfb8eb90dce58cf8d7296f8bc56c14bc330b0/pandas-2.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2319656ed81124982900b4c37f0e0c58c015af9a7bbc62342ba5ad07ace82ba9", size = 11309830, upload-time = "2025-08-21T10:27:56.957Z" },
+ { url = "https://files.pythonhosted.org/packages/15/d5/f0486090eb18dd8710bf60afeaf638ba6817047c0c8ae5c6a25598665609/pandas-2.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b37205ad6f00d52f16b6d09f406434ba928c1a1966e2771006a9033c736d30d2", size = 11883216, upload-time = "2025-08-21T10:27:59.302Z" },
+ { url = "https://files.pythonhosted.org/packages/10/86/692050c119696da19e20245bbd650d8dfca6ceb577da027c3a73c62a047e/pandas-2.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:837248b4fc3a9b83b9c6214699a13f069dc13510a6a6d7f9ba33145d2841a012", size = 12699743, upload-time = "2025-08-21T10:28:02.447Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/d7/612123674d7b17cf345aad0a10289b2a384bff404e0463a83c4a3a59d205/pandas-2.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d2c3554bd31b731cd6490d94a28f3abb8dd770634a9e06eb6d2911b9827db370", size = 13186141, upload-time = "2025-08-21T10:28:05.377Z" },
+]
+
+[[package]]
+name = "passlib"
+version = "1.7.4"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b6/06/9da9ee59a67fae7761aab3ccc84fa4f3f33f125b370f1ccdb915bf967c11/passlib-1.7.4.tar.gz", hash = "sha256:defd50f72b65c5402ab2c573830a6978e5f202ad0d984793c8dde2c4152ebe04", size = 689844, upload-time = "2020-10-08T19:00:52.121Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/3b/a4/ab6b7589382ca3df236e03faa71deac88cae040af60c071a78d254a62172/passlib-1.7.4-py2.py3-none-any.whl", hash = "sha256:aa6bca462b8d8bda89c70b382f0c298a20b5560af6cbfa2dce410c0a2fb669f1", size = 525554, upload-time = "2020-10-08T19:00:49.856Z" },
+]
+
+[[package]]
+name = "pillow"
+version = "11.3.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4c/5d/45a3553a253ac8763f3561371432a90bdbe6000fbdcf1397ffe502aa206c/pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860", size = 5316554, upload-time = "2025-07-01T09:13:39.342Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/c8/67c12ab069ef586a25a4a79ced553586748fad100c77c0ce59bb4983ac98/pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad", size = 4686548, upload-time = "2025-07-01T09:13:41.835Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/bd/6741ebd56263390b382ae4c5de02979af7f8bd9807346d068700dd6d5cf9/pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0", size = 5859742, upload-time = "2025-07-03T13:09:47.439Z" },
+ { url = "https://files.pythonhosted.org/packages/ca/0b/c412a9e27e1e6a829e6ab6c2dca52dd563efbedf4c9c6aa453d9a9b77359/pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b", size = 7633087, upload-time = "2025-07-03T13:09:51.796Z" },
+ { url = "https://files.pythonhosted.org/packages/59/9d/9b7076aaf30f5dd17e5e5589b2d2f5a5d7e30ff67a171eb686e4eecc2adf/pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50", size = 5963350, upload-time = "2025-07-01T09:13:43.865Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/16/1a6bf01fb622fb9cf5c91683823f073f053005c849b1f52ed613afcf8dae/pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae", size = 6631840, upload-time = "2025-07-01T09:13:46.161Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/e6/6ff7077077eb47fde78739e7d570bdcd7c10495666b6afcd23ab56b19a43/pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9", size = 6074005, upload-time = "2025-07-01T09:13:47.829Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/3a/b13f36832ea6d279a697231658199e0a03cd87ef12048016bdcc84131601/pillow-11.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:040a5b691b0713e1f6cbe222e0f4f74cd233421e105850ae3b3c0ceda520f42e", size = 6708372, upload-time = "2025-07-01T09:13:52.145Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/e4/61b2e1a7528740efbc70b3d581f33937e38e98ef3d50b05007267a55bcb2/pillow-11.3.0-cp310-cp310-win32.whl", hash = "sha256:89bd777bc6624fe4115e9fac3352c79ed60f3bb18651420635f26e643e3dd1f6", size = 6277090, upload-time = "2025-07-01T09:13:53.915Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/d3/60c781c83a785d6afbd6a326ed4d759d141de43aa7365725cbcd65ce5e54/pillow-11.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:19d2ff547c75b8e3ff46f4d9ef969a06c30ab2d4263a9e287733aa8b2429ce8f", size = 6985988, upload-time = "2025-07-01T09:13:55.699Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/28/4f4a0203165eefb3763939c6789ba31013a2e90adffb456610f30f613850/pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f", size = 2422899, upload-time = "2025-07-01T09:13:57.497Z" },
+ { url = "https://files.pythonhosted.org/packages/db/26/77f8ed17ca4ffd60e1dcd220a6ec6d71210ba398cfa33a13a1cd614c5613/pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722", size = 5316531, upload-time = "2025-07-01T09:13:59.203Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/39/ee475903197ce709322a17a866892efb560f57900d9af2e55f86db51b0a5/pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288", size = 4686560, upload-time = "2025-07-01T09:14:01.101Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/90/442068a160fd179938ba55ec8c97050a612426fae5ec0a764e345839f76d/pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d", size = 5870978, upload-time = "2025-07-03T13:09:55.638Z" },
+ { url = "https://files.pythonhosted.org/packages/13/92/dcdd147ab02daf405387f0218dcf792dc6dd5b14d2573d40b4caeef01059/pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494", size = 7641168, upload-time = "2025-07-03T13:10:00.37Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/db/839d6ba7fd38b51af641aa904e2960e7a5644d60ec754c046b7d2aee00e5/pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58", size = 5973053, upload-time = "2025-07-01T09:14:04.491Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/2f/d7675ecae6c43e9f12aa8d58b6012683b20b6edfbdac7abcb4e6af7a3784/pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f", size = 6640273, upload-time = "2025-07-01T09:14:06.235Z" },
+ { url = "https://files.pythonhosted.org/packages/45/ad/931694675ede172e15b2ff03c8144a0ddaea1d87adb72bb07655eaffb654/pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e", size = 6082043, upload-time = "2025-07-01T09:14:07.978Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/04/ba8f2b11fc80d2dd462d7abec16351b45ec99cbbaea4387648a44190351a/pillow-11.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94", size = 6715516, upload-time = "2025-07-01T09:14:10.233Z" },
+ { url = "https://files.pythonhosted.org/packages/48/59/8cd06d7f3944cc7d892e8533c56b0acb68399f640786313275faec1e3b6f/pillow-11.3.0-cp311-cp311-win32.whl", hash = "sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0", size = 6274768, upload-time = "2025-07-01T09:14:11.921Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/cc/29c0f5d64ab8eae20f3232da8f8571660aa0ab4b8f1331da5c2f5f9a938e/pillow-11.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac", size = 6986055, upload-time = "2025-07-01T09:14:13.623Z" },
+ { url = "https://files.pythonhosted.org/packages/c6/df/90bd886fabd544c25addd63e5ca6932c86f2b701d5da6c7839387a076b4a/pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd", size = 2423079, upload-time = "2025-07-01T09:14:15.268Z" },
+ { url = "https://files.pythonhosted.org/packages/40/fe/1bc9b3ee13f68487a99ac9529968035cca2f0a51ec36892060edcc51d06a/pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4", size = 5278800, upload-time = "2025-07-01T09:14:17.648Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/32/7e2ac19b5713657384cec55f89065fb306b06af008cfd87e572035b27119/pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69", size = 4686296, upload-time = "2025-07-01T09:14:19.828Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/1e/b9e12bbe6e4c2220effebc09ea0923a07a6da1e1f1bfbc8d7d29a01ce32b/pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d", size = 5871726, upload-time = "2025-07-03T13:10:04.448Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/33/e9200d2bd7ba00dc3ddb78df1198a6e80d7669cce6c2bdbeb2530a74ec58/pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6", size = 7644652, upload-time = "2025-07-03T13:10:10.391Z" },
+ { url = "https://files.pythonhosted.org/packages/41/f1/6f2427a26fc683e00d985bc391bdd76d8dd4e92fac33d841127eb8fb2313/pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7", size = 5977787, upload-time = "2025-07-01T09:14:21.63Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/c9/06dd4a38974e24f932ff5f98ea3c546ce3f8c995d3f0985f8e5ba48bba19/pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024", size = 6645236, upload-time = "2025-07-01T09:14:23.321Z" },
+ { url = "https://files.pythonhosted.org/packages/40/e7/848f69fb79843b3d91241bad658e9c14f39a32f71a301bcd1d139416d1be/pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809", size = 6086950, upload-time = "2025-07-01T09:14:25.237Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/1a/7cff92e695a2a29ac1958c2a0fe4c0b2393b60aac13b04a4fe2735cad52d/pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d", size = 6723358, upload-time = "2025-07-01T09:14:27.053Z" },
+ { url = "https://files.pythonhosted.org/packages/26/7d/73699ad77895f69edff76b0f332acc3d497f22f5d75e5360f78cbcaff248/pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149", size = 6275079, upload-time = "2025-07-01T09:14:30.104Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/ce/e7dfc873bdd9828f3b6e5c2bbb74e47a98ec23cc5c74fc4e54462f0d9204/pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d", size = 6986324, upload-time = "2025-07-01T09:14:31.899Z" },
+ { url = "https://files.pythonhosted.org/packages/16/8f/b13447d1bf0b1f7467ce7d86f6e6edf66c0ad7cf44cf5c87a37f9bed9936/pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542", size = 2423067, upload-time = "2025-07-01T09:14:33.709Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" },
+ { url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" },
+ { url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" },
+ { url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" },
+ { url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" },
+ { url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" },
+ { url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" },
+ { url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" },
+ { url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" },
+ { url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" },
+ { url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/8b/209bd6b62ce8367f47e68a218bffac88888fdf2c9fcf1ecadc6c3ec1ebc7/pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967", size = 5270556, upload-time = "2025-07-01T09:16:09.961Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/e6/231a0b76070c2cfd9e260a7a5b504fb72da0a95279410fa7afd99d9751d6/pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe", size = 4654625, upload-time = "2025-07-01T09:16:11.913Z" },
+ { url = "https://files.pythonhosted.org/packages/13/f4/10cf94fda33cb12765f2397fc285fa6d8eb9c29de7f3185165b702fc7386/pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c", size = 4874207, upload-time = "2025-07-03T13:11:10.201Z" },
+ { url = "https://files.pythonhosted.org/packages/72/c9/583821097dc691880c92892e8e2d41fe0a5a3d6021f4963371d2f6d57250/pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25", size = 6583939, upload-time = "2025-07-03T13:11:15.68Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/8e/5c9d410f9217b12320efc7c413e72693f48468979a013ad17fd690397b9a/pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27", size = 4957166, upload-time = "2025-07-01T09:16:13.74Z" },
+ { url = "https://files.pythonhosted.org/packages/62/bb/78347dbe13219991877ffb3a91bf09da8317fbfcd4b5f9140aeae020ad71/pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a", size = 5581482, upload-time = "2025-07-01T09:16:16.107Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/28/1000353d5e61498aaeaaf7f1e4b49ddb05f2c6575f9d4f9f914a3538b6e1/pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f", size = 6984596, upload-time = "2025-07-01T09:16:18.07Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/e3/6fa84033758276fb31da12e5fb66ad747ae83b93c67af17f8c6ff4cc8f34/pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6", size = 5270566, upload-time = "2025-07-01T09:16:19.801Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/ee/e8d2e1ab4892970b561e1ba96cbd59c0d28cf66737fc44abb2aec3795a4e/pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438", size = 4654618, upload-time = "2025-07-01T09:16:21.818Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/6d/17f80f4e1f0761f02160fc433abd4109fa1548dcfdca46cfdadaf9efa565/pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3", size = 4874248, upload-time = "2025-07-03T13:11:20.738Z" },
+ { url = "https://files.pythonhosted.org/packages/de/5f/c22340acd61cef960130585bbe2120e2fd8434c214802f07e8c03596b17e/pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c", size = 6583963, upload-time = "2025-07-03T13:11:26.283Z" },
+ { url = "https://files.pythonhosted.org/packages/31/5e/03966aedfbfcbb4d5f8aa042452d3361f325b963ebbadddac05b122e47dd/pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361", size = 4957170, upload-time = "2025-07-01T09:16:23.762Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/2d/e082982aacc927fc2cab48e1e731bdb1643a1406acace8bed0900a61464e/pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7", size = 5581505, upload-time = "2025-07-01T09:16:25.593Z" },
+ { url = "https://files.pythonhosted.org/packages/34/e7/ae39f538fd6844e982063c3a5e4598b8ced43b9633baa3a85ef33af8c05c/pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8", size = 6984598, upload-time = "2025-07-01T09:16:27.732Z" },
+]
+
+[[package]]
+name = "platformdirs"
+version = "4.4.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/23/e8/21db9c9987b0e728855bd57bff6984f67952bea55d6f75e055c46b5383e8/platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf", size = 21634, upload-time = "2025-08-26T14:32:04.268Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/40/4b/2028861e724d3bd36227adfa20d3fd24c3fc6d52032f4a93c133be5d17ce/platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85", size = 18654, upload-time = "2025-08-26T14:32:02.735Z" },
+]
+
+[[package]]
+name = "pluggy"
+version = "1.6.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
+]
+
+[[package]]
+name = "pydantic"
+version = "2.11.7"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "annotated-types" },
+ { name = "pydantic-core" },
+ { name = "typing-extensions" },
+ { name = "typing-inspection" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" },
+]
+
+[[package]]
+name = "pydantic-core"
+version = "2.33.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/92/b31726561b5dae176c2d2c2dc43a9c5bfba5d32f96f8b4c0a600dd492447/pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8", size = 2028817, upload-time = "2025-04-23T18:30:43.919Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/44/3f0b95fafdaca04a483c4e685fe437c6891001bf3ce8b2fded82b9ea3aa1/pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d", size = 1861357, upload-time = "2025-04-23T18:30:46.372Z" },
+ { url = "https://files.pythonhosted.org/packages/30/97/e8f13b55766234caae05372826e8e4b3b96e7b248be3157f53237682e43c/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d", size = 1898011, upload-time = "2025-04-23T18:30:47.591Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/a3/99c48cf7bafc991cc3ee66fd544c0aae8dc907b752f1dad2d79b1b5a471f/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572", size = 1982730, upload-time = "2025-04-23T18:30:49.328Z" },
+ { url = "https://files.pythonhosted.org/packages/de/8e/a5b882ec4307010a840fb8b58bd9bf65d1840c92eae7534c7441709bf54b/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02", size = 2136178, upload-time = "2025-04-23T18:30:50.907Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/bb/71e35fc3ed05af6834e890edb75968e2802fe98778971ab5cba20a162315/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b", size = 2736462, upload-time = "2025-04-23T18:30:52.083Z" },
+ { url = "https://files.pythonhosted.org/packages/31/0d/c8f7593e6bc7066289bbc366f2235701dcbebcd1ff0ef8e64f6f239fb47d/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2", size = 2005652, upload-time = "2025-04-23T18:30:53.389Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/7a/996d8bd75f3eda405e3dd219ff5ff0a283cd8e34add39d8ef9157e722867/pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a", size = 2113306, upload-time = "2025-04-23T18:30:54.661Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/84/daf2a6fb2db40ffda6578a7e8c5a6e9c8affb251a05c233ae37098118788/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac", size = 2073720, upload-time = "2025-04-23T18:30:56.11Z" },
+ { url = "https://files.pythonhosted.org/packages/77/fb/2258da019f4825128445ae79456a5499c032b55849dbd5bed78c95ccf163/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a", size = 2244915, upload-time = "2025-04-23T18:30:57.501Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/7a/925ff73756031289468326e355b6fa8316960d0d65f8b5d6b3a3e7866de7/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b", size = 2241884, upload-time = "2025-04-23T18:30:58.867Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/b0/249ee6d2646f1cdadcb813805fe76265745c4010cf20a8eba7b0e639d9b2/pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22", size = 1910496, upload-time = "2025-04-23T18:31:00.078Z" },
+ { url = "https://files.pythonhosted.org/packages/66/ff/172ba8f12a42d4b552917aa65d1f2328990d3ccfc01d5b7c943ec084299f/pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640", size = 1955019, upload-time = "2025-04-23T18:31:01.335Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/8d/71db63483d518cbbf290261a1fc2839d17ff89fce7089e08cad07ccfce67/pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7", size = 2028584, upload-time = "2025-04-23T18:31:03.106Z" },
+ { url = "https://files.pythonhosted.org/packages/24/2f/3cfa7244ae292dd850989f328722d2aef313f74ffc471184dc509e1e4e5a/pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246", size = 1855071, upload-time = "2025-04-23T18:31:04.621Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/d3/4ae42d33f5e3f50dd467761304be2fa0a9417fbf09735bc2cce003480f2a/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f", size = 1897823, upload-time = "2025-04-23T18:31:06.377Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/f3/aa5976e8352b7695ff808599794b1fba2a9ae2ee954a3426855935799488/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc", size = 1983792, upload-time = "2025-04-23T18:31:07.93Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/7a/cda9b5a23c552037717f2b2a5257e9b2bfe45e687386df9591eff7b46d28/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de", size = 2136338, upload-time = "2025-04-23T18:31:09.283Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/9f/b8f9ec8dd1417eb9da784e91e1667d58a2a4a7b7b34cf4af765ef663a7e5/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a", size = 2730998, upload-time = "2025-04-23T18:31:11.7Z" },
+ { url = "https://files.pythonhosted.org/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef", size = 2003200, upload-time = "2025-04-23T18:31:13.536Z" },
+ { url = "https://files.pythonhosted.org/packages/ca/22/3602b895ee2cd29d11a2b349372446ae9727c32e78a94b3d588a40fdf187/pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e", size = 2113890, upload-time = "2025-04-23T18:31:15.011Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/e6/e3c5908c03cf00d629eb38393a98fccc38ee0ce8ecce32f69fc7d7b558a7/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d", size = 2073359, upload-time = "2025-04-23T18:31:16.393Z" },
+ { url = "https://files.pythonhosted.org/packages/12/e7/6a36a07c59ebefc8777d1ffdaf5ae71b06b21952582e4b07eba88a421c79/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30", size = 2245883, upload-time = "2025-04-23T18:31:17.892Z" },
+ { url = "https://files.pythonhosted.org/packages/16/3f/59b3187aaa6cc0c1e6616e8045b284de2b6a87b027cce2ffcea073adf1d2/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf", size = 2241074, upload-time = "2025-04-23T18:31:19.205Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/ed/55532bb88f674d5d8f67ab121a2a13c385df382de2a1677f30ad385f7438/pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51", size = 1910538, upload-time = "2025-04-23T18:31:20.541Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/1b/25b7cccd4519c0b23c2dd636ad39d381abf113085ce4f7bec2b0dc755eb1/pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab", size = 1952909, upload-time = "2025-04-23T18:31:22.371Z" },
+ { url = "https://files.pythonhosted.org/packages/49/a9/d809358e49126438055884c4366a1f6227f0f84f635a9014e2deb9b9de54/pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65", size = 1897786, upload-time = "2025-04-23T18:31:24.161Z" },
+ { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" },
+ { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" },
+ { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" },
+ { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" },
+ { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" },
+ { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" },
+ { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" },
+ { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" },
+ { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" },
+ { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" },
+ { url = "https://files.pythonhosted.org/packages/30/68/373d55e58b7e83ce371691f6eaa7175e3a24b956c44628eb25d7da007917/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa", size = 2023982, upload-time = "2025-04-23T18:32:53.14Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/16/145f54ac08c96a63d8ed6442f9dec17b2773d19920b627b18d4f10a061ea/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29", size = 1858412, upload-time = "2025-04-23T18:32:55.52Z" },
+ { url = "https://files.pythonhosted.org/packages/41/b1/c6dc6c3e2de4516c0bb2c46f6a373b91b5660312342a0cf5826e38ad82fa/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d", size = 1892749, upload-time = "2025-04-23T18:32:57.546Z" },
+ { url = "https://files.pythonhosted.org/packages/12/73/8cd57e20afba760b21b742106f9dbdfa6697f1570b189c7457a1af4cd8a0/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e", size = 2067527, upload-time = "2025-04-23T18:32:59.771Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/d5/0bb5d988cc019b3cba4a78f2d4b3854427fc47ee8ec8e9eaabf787da239c/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c", size = 2108225, upload-time = "2025-04-23T18:33:04.51Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/c5/00c02d1571913d496aabf146106ad8239dc132485ee22efe08085084ff7c/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec", size = 2069490, upload-time = "2025-04-23T18:33:06.391Z" },
+ { url = "https://files.pythonhosted.org/packages/22/a8/dccc38768274d3ed3a59b5d06f59ccb845778687652daa71df0cab4040d7/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052", size = 2237525, upload-time = "2025-04-23T18:33:08.44Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/e7/4f98c0b125dda7cf7ccd14ba936218397b44f50a56dd8c16a3091df116c3/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c", size = 2238446, upload-time = "2025-04-23T18:33:10.313Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/91/2ec36480fdb0b783cd9ef6795753c1dea13882f2e68e73bce76ae8c21e6a/pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808", size = 2066678, upload-time = "2025-04-23T18:33:12.224Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/27/d4ae6487d73948d6f20dddcd94be4ea43e74349b56eba82e9bdee2d7494c/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8", size = 2025200, upload-time = "2025-04-23T18:33:14.199Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/b8/b3cb95375f05d33801024079b9392a5ab45267a63400bf1866e7ce0f0de4/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593", size = 1859123, upload-time = "2025-04-23T18:33:16.555Z" },
+ { url = "https://files.pythonhosted.org/packages/05/bc/0d0b5adeda59a261cd30a1235a445bf55c7e46ae44aea28f7bd6ed46e091/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612", size = 1892852, upload-time = "2025-04-23T18:33:18.513Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/11/d37bdebbda2e449cb3f519f6ce950927b56d62f0b84fd9cb9e372a26a3d5/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7", size = 2067484, upload-time = "2025-04-23T18:33:20.475Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/55/1f95f0a05ce72ecb02a8a8a1c3be0579bbc29b1d5ab68f1378b7bebc5057/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e", size = 2108896, upload-time = "2025-04-23T18:33:22.501Z" },
+ { url = "https://files.pythonhosted.org/packages/53/89/2b2de6c81fa131f423246a9109d7b2a375e83968ad0800d6e57d0574629b/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8", size = 2069475, upload-time = "2025-04-23T18:33:24.528Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/e9/1f7efbe20d0b2b10f6718944b5d8ece9152390904f29a78e68d4e7961159/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf", size = 2239013, upload-time = "2025-04-23T18:33:26.621Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/b2/5309c905a93811524a49b4e031e9851a6b00ff0fb668794472ea7746b448/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb", size = 2238715, upload-time = "2025-04-23T18:33:28.656Z" },
+ { url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757, upload-time = "2025-04-23T18:33:30.645Z" },
+]
+
+[[package]]
+name = "pydantic-settings"
+version = "2.10.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pydantic" },
+ { name = "python-dotenv" },
+ { name = "typing-inspection" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/68/85/1ea668bbab3c50071ca613c6ab30047fb36ab0da1b92fa8f17bbc38fd36c/pydantic_settings-2.10.1.tar.gz", hash = "sha256:06f0062169818d0f5524420a360d632d5857b83cffd4d42fe29597807a1614ee", size = 172583, upload-time = "2025-06-24T13:26:46.841Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/58/f0/427018098906416f580e3cf1366d3b1abfb408a0652e9f31600c24a1903c/pydantic_settings-2.10.1-py3-none-any.whl", hash = "sha256:a60952460b99cf661dc25c29c0ef171721f98bfcb52ef8d9ea4c943d7c8cc796", size = 45235, upload-time = "2025-06-24T13:26:45.485Z" },
+]
+
+[[package]]
+name = "pygments"
+version = "2.19.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
+]
+
+[[package]]
+name = "pytest"
+version = "8.4.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+ { name = "exceptiongroup", marker = "python_full_version < '3.11'" },
+ { name = "iniconfig" },
+ { name = "packaging" },
+ { name = "pluggy" },
+ { name = "pygments" },
+ { name = "tomli", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" },
+]
+
+[[package]]
+name = "pytest-asyncio"
+version = "1.2.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "backports-asyncio-runner", marker = "python_full_version < '3.11'" },
+ { name = "pytest" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/42/86/9e3c5f48f7b7b638b216e4b9e645f54d199d7abbbab7a64a13b4e12ba10f/pytest_asyncio-1.2.0.tar.gz", hash = "sha256:c609a64a2a8768462d0c99811ddb8bd2583c33fd33cf7f21af1c142e824ffb57", size = 50119, upload-time = "2025-09-12T07:33:53.816Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/04/93/2fa34714b7a4ae72f2f8dad66ba17dd9a2c793220719e736dda28b7aec27/pytest_asyncio-1.2.0-py3-none-any.whl", hash = "sha256:8e17ae5e46d8e7efe51ab6494dd2010f4ca8dae51652aa3c8d55acf50bfb2e99", size = 15095, upload-time = "2025-09-12T07:33:52.639Z" },
+]
+
+[[package]]
+name = "python-dateutil"
+version = "2.9.0.post0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "six" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
+]
+
+[[package]]
+name = "python-dotenv"
+version = "1.1.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" },
+]
+
+[[package]]
+name = "pytz"
+version = "2025.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" },
+]
+
+[[package]]
+name = "pyyaml"
+version = "6.0.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9b/95/a3fac87cb7158e231b5a6012e438c647e1a87f09f8e0d123acec8ab8bf71/PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", size = 184199, upload-time = "2024-08-06T20:31:40.178Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/7a/68bd47624dab8fd4afbfd3c48e3b79efe09098ae941de5b58abcbadff5cb/PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", size = 171758, upload-time = "2024-08-06T20:31:42.173Z" },
+ { url = "https://files.pythonhosted.org/packages/49/ee/14c54df452143b9ee9f0f29074d7ca5516a36edb0b4cc40c3f280131656f/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", size = 718463, upload-time = "2024-08-06T20:31:44.263Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/61/de363a97476e766574650d742205be468921a7b532aa2499fcd886b62530/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", size = 719280, upload-time = "2024-08-06T20:31:50.199Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/4e/1523cb902fd98355e2e9ea5e5eb237cbc5f3ad5f3075fa65087aa0ecb669/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", size = 751239, upload-time = "2024-08-06T20:31:52.292Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/33/5504b3a9a4464893c32f118a9cc045190a91637b119a9c881da1cf6b7a72/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", size = 695802, upload-time = "2024-08-06T20:31:53.836Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/20/8347dcabd41ef3a3cdc4f7b7a2aff3d06598c8779faa189cdbf878b626a4/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", size = 720527, upload-time = "2024-08-06T20:31:55.565Z" },
+ { url = "https://files.pythonhosted.org/packages/be/aa/5afe99233fb360d0ff37377145a949ae258aaab831bde4792b32650a4378/PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", size = 144052, upload-time = "2024-08-06T20:31:56.914Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/84/0fa4b06f6d6c958d207620fc60005e241ecedceee58931bb20138e1e5776/PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", size = 161774, upload-time = "2024-08-06T20:31:58.304Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612, upload-time = "2024-08-06T20:32:03.408Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040, upload-time = "2024-08-06T20:32:04.926Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829, upload-time = "2024-08-06T20:32:06.459Z" },
+ { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167, upload-time = "2024-08-06T20:32:08.338Z" },
+ { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952, upload-time = "2024-08-06T20:32:14.124Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301, upload-time = "2024-08-06T20:32:16.17Z" },
+ { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638, upload-time = "2024-08-06T20:32:18.555Z" },
+ { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850, upload-time = "2024-08-06T20:32:19.889Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980, upload-time = "2024-08-06T20:32:21.273Z" },
+ { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" },
+ { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" },
+ { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" },
+ { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" },
+ { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" },
+ { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" },
+]
+
+[[package]]
+name = "requests"
+version = "2.32.5"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "certifi" },
+ { name = "charset-normalizer" },
+ { name = "idna" },
+ { name = "urllib3" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
+]
+
+[[package]]
+name = "requests-toolbelt"
+version = "1.0.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "requests" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f3/61/d7545dafb7ac2230c70d38d31cbfe4cc64f7144dc41f6e4e4b78ecd9f5bb/requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6", size = 206888, upload-time = "2023-05-01T04:11:33.229Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481, upload-time = "2023-05-01T04:11:28.427Z" },
+]
+
+[[package]]
+name = "rich"
+version = "14.1.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "markdown-it-py" },
+ { name = "pygments" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fe/75/af448d8e52bf1d8fa6a9d089ca6c07ff4453d86c65c145d0a300bb073b9b/rich-14.1.0.tar.gz", hash = "sha256:e497a48b844b0320d45007cdebfeaeed8db2a4f4bcf49f15e455cfc4af11eaa8", size = 224441, upload-time = "2025-07-25T07:32:58.125Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e3/30/3c4d035596d3cf444529e0b2953ad0466f6049528a879d27534700580395/rich-14.1.0-py3-none-any.whl", hash = "sha256:536f5f1785986d6dbdea3c75205c473f970777b4a0d6c6dd1b696aa05a3fa04f", size = 243368, upload-time = "2025-07-25T07:32:56.73Z" },
+]
+
+[[package]]
+name = "ruff"
+version = "0.13.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/6e/1a/1f4b722862840295bcaba8c9e5261572347509548faaa99b2d57ee7bfe6a/ruff-0.13.0.tar.gz", hash = "sha256:5b4b1ee7eb35afae128ab94459b13b2baaed282b1fb0f472a73c82c996c8ae60", size = 5372863, upload-time = "2025-09-10T16:25:37.917Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ac/fe/6f87b419dbe166fd30a991390221f14c5b68946f389ea07913e1719741e0/ruff-0.13.0-py3-none-linux_armv6l.whl", hash = "sha256:137f3d65d58ee828ae136a12d1dc33d992773d8f7644bc6b82714570f31b2004", size = 12187826, upload-time = "2025-09-10T16:24:39.5Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/25/c92296b1fc36d2499e12b74a3fdb230f77af7bdf048fad7b0a62e94ed56a/ruff-0.13.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:21ae48151b66e71fd111b7d79f9ad358814ed58c339631450c66a4be33cc28b9", size = 12933428, upload-time = "2025-09-10T16:24:43.866Z" },
+ { url = "https://files.pythonhosted.org/packages/44/cf/40bc7221a949470307d9c35b4ef5810c294e6cfa3caafb57d882731a9f42/ruff-0.13.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:64de45f4ca5441209e41742d527944635a05a6e7c05798904f39c85bafa819e3", size = 12095543, upload-time = "2025-09-10T16:24:46.638Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/03/8b5ff2a211efb68c63a1d03d157e924997ada87d01bebffbd13a0f3fcdeb/ruff-0.13.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b2c653ae9b9d46e0ef62fc6fbf5b979bda20a0b1d2b22f8f7eb0cde9f4963b8", size = 12312489, upload-time = "2025-09-10T16:24:49.556Z" },
+ { url = "https://files.pythonhosted.org/packages/37/fc/2336ef6d5e9c8d8ea8305c5f91e767d795cd4fc171a6d97ef38a5302dadc/ruff-0.13.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4cec632534332062bc9eb5884a267b689085a1afea9801bf94e3ba7498a2d207", size = 11991631, upload-time = "2025-09-10T16:24:53.439Z" },
+ { url = "https://files.pythonhosted.org/packages/39/7f/f6d574d100fca83d32637d7f5541bea2f5e473c40020bbc7fc4a4d5b7294/ruff-0.13.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dcd628101d9f7d122e120ac7c17e0a0f468b19bc925501dbe03c1cb7f5415b24", size = 13720602, upload-time = "2025-09-10T16:24:56.392Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/c8/a8a5b81d8729b5d1f663348d11e2a9d65a7a9bd3c399763b1a51c72be1ce/ruff-0.13.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:afe37db8e1466acb173bb2a39ca92df00570e0fd7c94c72d87b51b21bb63efea", size = 14697751, upload-time = "2025-09-10T16:24:59.89Z" },
+ { url = "https://files.pythonhosted.org/packages/57/f5/183ec292272ce7ec5e882aea74937f7288e88ecb500198b832c24debc6d3/ruff-0.13.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f96a8d90bb258d7d3358b372905fe7333aaacf6c39e2408b9f8ba181f4b6ef2", size = 14095317, upload-time = "2025-09-10T16:25:03.025Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/8d/7f9771c971724701af7926c14dab31754e7b303d127b0d3f01116faef456/ruff-0.13.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b5e3d883e4f924c5298e3f2ee0f3085819c14f68d1e5b6715597681433f153", size = 13144418, upload-time = "2025-09-10T16:25:06.272Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/a6/7985ad1778e60922d4bef546688cd8a25822c58873e9ff30189cfe5dc4ab/ruff-0.13.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03447f3d18479df3d24917a92d768a89f873a7181a064858ea90a804a7538991", size = 13370843, upload-time = "2025-09-10T16:25:09.965Z" },
+ { url = "https://files.pythonhosted.org/packages/64/1c/bafdd5a7a05a50cc51d9f5711da704942d8dd62df3d8c70c311e98ce9f8a/ruff-0.13.0-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:fbc6b1934eb1c0033da427c805e27d164bb713f8e273a024a7e86176d7f462cf", size = 13321891, upload-time = "2025-09-10T16:25:12.969Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/3e/7817f989cb9725ef7e8d2cee74186bf90555279e119de50c750c4b7a72fe/ruff-0.13.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:a8ab6a3e03665d39d4a25ee199d207a488724f022db0e1fe4002968abdb8001b", size = 12119119, upload-time = "2025-09-10T16:25:16.621Z" },
+ { url = "https://files.pythonhosted.org/packages/58/07/9df080742e8d1080e60c426dce6e96a8faf9a371e2ce22eef662e3839c95/ruff-0.13.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2a5c62f8ccc6dd2fe259917482de7275cecc86141ee10432727c4816235bc41", size = 11961594, upload-time = "2025-09-10T16:25:19.49Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/f4/ae1185349197d26a2316840cb4d6c3fba61d4ac36ed728bf0228b222d71f/ruff-0.13.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:b7b85ca27aeeb1ab421bc787009831cffe6048faae08ad80867edab9f2760945", size = 12933377, upload-time = "2025-09-10T16:25:22.371Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/39/e776c10a3b349fc8209a905bfb327831d7516f6058339a613a8d2aaecacd/ruff-0.13.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:79ea0c44a3032af768cabfd9616e44c24303af49d633b43e3a5096e009ebe823", size = 13418555, upload-time = "2025-09-10T16:25:25.681Z" },
+ { url = "https://files.pythonhosted.org/packages/46/09/dca8df3d48e8b3f4202bf20b1658898e74b6442ac835bfe2c1816d926697/ruff-0.13.0-py3-none-win32.whl", hash = "sha256:4e473e8f0e6a04e4113f2e1de12a5039579892329ecc49958424e5568ef4f768", size = 12141613, upload-time = "2025-09-10T16:25:28.664Z" },
+ { url = "https://files.pythonhosted.org/packages/61/21/0647eb71ed99b888ad50e44d8ec65d7148babc0e242d531a499a0bbcda5f/ruff-0.13.0-py3-none-win_amd64.whl", hash = "sha256:48e5c25c7a3713eea9ce755995767f4dcd1b0b9599b638b12946e892123d1efb", size = 13258250, upload-time = "2025-09-10T16:25:31.773Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/a3/03216a6a86c706df54422612981fb0f9041dbb452c3401501d4a22b942c9/ruff-0.13.0-py3-none-win_arm64.whl", hash = "sha256:ab80525317b1e1d38614addec8ac954f1b3e662de9d59114ecbf771d00cf613e", size = 12312357, upload-time = "2025-09-10T16:25:35.595Z" },
+]
+
+[[package]]
+name = "shellingham"
+version = "1.5.4"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" },
+]
+
+[[package]]
+name = "six"
+version = "1.17.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
+]
+
+[[package]]
+name = "sniffio"
+version = "1.3.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" },
+]
+
+[[package]]
+name = "sortedcontainers"
+version = "2.4.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e8/c4/ba2f8066cceb6f23394729afe52f3bf7adec04bf9ed2c820b39e19299111/sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", size = 30594, upload-time = "2021-05-16T22:03:42.897Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/32/46/9cb0e58b2deb7f82b84065f37f3bffeb12413f947f9388e4cac22c4621ce/sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0", size = 29575, upload-time = "2021-05-16T22:03:41.177Z" },
+]
+
+[[package]]
+name = "starlette"
+version = "0.47.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "anyio" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/15/b9/cc3017f9a9c9b6e27c5106cc10cc7904653c3eec0729793aec10479dd669/starlette-0.47.3.tar.gz", hash = "sha256:6bc94f839cc176c4858894f1f8908f0ab79dfec1a6b8402f6da9be26ebea52e9", size = 2584144, upload-time = "2025-08-24T13:36:42.122Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ce/fd/901cfa59aaa5b30a99e16876f11abe38b59a1a2c51ffb3d7142bb6089069/starlette-0.47.3-py3-none-any.whl", hash = "sha256:89c0778ca62a76b826101e7c709e70680a1699ca7da6b44d38eb0a7e61fe4b51", size = 72991, upload-time = "2025-08-24T13:36:40.887Z" },
+]
+
+[[package]]
+name = "structlog"
+version = "25.4.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/79/b9/6e672db4fec07349e7a8a8172c1a6ae235c58679ca29c3f86a61b5e59ff3/structlog-25.4.0.tar.gz", hash = "sha256:186cd1b0a8ae762e29417095664adf1d6a31702160a46dacb7796ea82f7409e4", size = 1369138, upload-time = "2025-06-02T08:21:12.971Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a0/4a/97ee6973e3a73c74c8120d59829c3861ea52210667ec3e7a16045c62b64d/structlog-25.4.0-py3-none-any.whl", hash = "sha256:fe809ff5c27e557d14e613f45ca441aabda051d119ee5a0102aaba6ce40eed2c", size = 68720, upload-time = "2025-06-02T08:21:11.43Z" },
+]
+
+[[package]]
+name = "tenacity"
+version = "9.1.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0a/d4/2b0cd0fe285e14b36db076e78c93766ff1d529d70408bd1d2a5a84f1d929/tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb", size = 48036, upload-time = "2025-04-02T08:25:09.966Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" },
+]
+
+[[package]]
+name = "tomli"
+version = "2.2.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175, upload-time = "2024-11-27T22:38:36.873Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077, upload-time = "2024-11-27T22:37:54.956Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429, upload-time = "2024-11-27T22:37:56.698Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067, upload-time = "2024-11-27T22:37:57.63Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030, upload-time = "2024-11-27T22:37:59.344Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898, upload-time = "2024-11-27T22:38:00.429Z" },
+ { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894, upload-time = "2024-11-27T22:38:02.094Z" },
+ { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319, upload-time = "2024-11-27T22:38:03.206Z" },
+ { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273, upload-time = "2024-11-27T22:38:04.217Z" },
+ { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310, upload-time = "2024-11-27T22:38:05.908Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309, upload-time = "2024-11-27T22:38:06.812Z" },
+ { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762, upload-time = "2024-11-27T22:38:07.731Z" },
+ { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453, upload-time = "2024-11-27T22:38:09.384Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486, upload-time = "2024-11-27T22:38:10.329Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349, upload-time = "2024-11-27T22:38:11.443Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159, upload-time = "2024-11-27T22:38:13.099Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243, upload-time = "2024-11-27T22:38:14.766Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645, upload-time = "2024-11-27T22:38:15.843Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584, upload-time = "2024-11-27T22:38:17.645Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875, upload-time = "2024-11-27T22:38:19.159Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418, upload-time = "2024-11-27T22:38:20.064Z" },
+ { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708, upload-time = "2024-11-27T22:38:21.659Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582, upload-time = "2024-11-27T22:38:22.693Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543, upload-time = "2024-11-27T22:38:24.367Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691, upload-time = "2024-11-27T22:38:26.081Z" },
+ { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170, upload-time = "2024-11-27T22:38:27.921Z" },
+ { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530, upload-time = "2024-11-27T22:38:29.591Z" },
+ { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666, upload-time = "2024-11-27T22:38:30.639Z" },
+ { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954, upload-time = "2024-11-27T22:38:31.702Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724, upload-time = "2024-11-27T22:38:32.837Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383, upload-time = "2024-11-27T22:38:34.455Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" },
+]
+
+[[package]]
+name = "typer"
+version = "0.17.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "click" },
+ { name = "rich" },
+ { name = "shellingham" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/92/e8/2a73ccf9874ec4c7638f172efc8972ceab13a0e3480b389d6ed822f7a822/typer-0.17.4.tar.gz", hash = "sha256:b77dc07d849312fd2bb5e7f20a7af8985c7ec360c45b051ed5412f64d8dc1580", size = 103734, upload-time = "2025-09-05T18:14:40.746Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/93/72/6b3e70d32e89a5cbb6a4513726c1ae8762165b027af569289e19ec08edd8/typer-0.17.4-py3-none-any.whl", hash = "sha256:015534a6edaa450e7007eba705d5c18c3349dcea50a6ad79a5ed530967575824", size = 46643, upload-time = "2025-09-05T18:14:39.166Z" },
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.15.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
+]
+
+[[package]]
+name = "typing-inspection"
+version = "0.4.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" },
+]
+
+[[package]]
+name = "tzdata"
+version = "2025.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" },
+]
+
+[[package]]
+name = "urllib3"
+version = "2.5.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" },
+]
+
+[[package]]
+name = "uvicorn"
+version = "0.35.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "click" },
+ { name = "h11" },
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/5e/42/e0e305207bb88c6b8d3061399c6a961ffe5fbb7e2aa63c9234df7259e9cd/uvicorn-0.35.0.tar.gz", hash = "sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01", size = 78473, upload-time = "2025-06-28T16:15:46.058Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d2/e2/dc81b1bd1dcfe91735810265e9d26bc8ec5da45b4c0f6237e286819194c3/uvicorn-0.35.0-py3-none-any.whl", hash = "sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a", size = 66406, upload-time = "2025-06-28T16:15:44.816Z" },
+]
+
+[[package]]
+name = "win32-setctime"
+version = "1.2.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b3/8f/705086c9d734d3b663af0e9bb3d4de6578d08f46b1b101c2442fd9aecaa2/win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0", size = 4867, upload-time = "2024-12-07T15:28:28.314Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083, upload-time = "2024-12-07T15:28:26.465Z" },
+]
+
+[[package]]
+name = "zstandard"
+version = "0.24.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/09/1b/c20b2ef1d987627765dcd5bf1dadb8ef6564f00a87972635099bb76b7a05/zstandard-0.24.0.tar.gz", hash = "sha256:fe3198b81c00032326342d973e526803f183f97aa9e9a98e3f897ebafe21178f", size = 905681, upload-time = "2025-08-17T18:36:36.352Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/63/9d/d1ca1e7bff6a7938e81180322c053c080ae9e31b0e3b393434deae7a1ae5/zstandard-0.24.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:af1394c2c5febc44e0bbf0fc6428263fa928b50d1b1982ce1d870dc793a8e5f4", size = 795228, upload-time = "2025-08-17T18:21:12.444Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/ba/a40ddfbbb9f0773127701a802338f215211b018f9222b9fab1e2d498f9cd/zstandard-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e941654cef13a1d53634ec30933722eda11f44f99e1d0bc62bbce3387580d50", size = 640522, upload-time = "2025-08-17T18:21:14.133Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/7c/edeee3ef8d469a1345edd86f8d123a3825d60df033bcbbd16df417bdb9e7/zstandard-0.24.0-cp310-cp310-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:561123d05681197c0e24eb8ab3cfdaf299e2b59c293d19dad96e1610ccd8fbc6", size = 5344625, upload-time = "2025-08-17T18:21:16.067Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/2c/2f76e5058435d96ab0187303d4e9663372893cdcc95d64fdb60824951162/zstandard-0.24.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0f6d9a146e07458cb41423ca2d783aefe3a3a97fe72838973c13b8f1ecc7343a", size = 5055074, upload-time = "2025-08-17T18:21:18.483Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/87/3962530a568d38e64f287e11b9a38936d873617120589611c49c29af94a8/zstandard-0.24.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:bf02f915fa7934ea5dfc8d96757729c99a8868b7c340b97704795d6413cf5fe6", size = 5401308, upload-time = "2025-08-17T18:21:20.859Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/69/85e65f0fb05b4475130888cf7934ff30ac14b5979527e8f1ccb6f56e21ec/zstandard-0.24.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:35f13501a8accf834457d8e40e744568287a215818778bc4d79337af2f3f0d97", size = 5448948, upload-time = "2025-08-17T18:21:23.015Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/2f/1b607274bf20ea8bcd13bea3edc0a48f984c438c09d0a050b9667dadcaed/zstandard-0.24.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:92be52ca4e6e604f03d5daa079caec9e04ab4cbf6972b995aaebb877d3d24e13", size = 5555870, upload-time = "2025-08-17T18:21:24.985Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/9a/fadd5ffded6ab113b26704658a40444865b914de072fb460b6b51aa5fa2f/zstandard-0.24.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0c9c3cba57f5792532a3df3f895980d47d78eda94b0e5b800651b53e96e0b604", size = 5044917, upload-time = "2025-08-17T18:21:27.082Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/0d/c5edc3b00e070d0b4156993bd7bef9cba58c5f2571bd0003054cbe90005c/zstandard-0.24.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:dd91b0134a32dfcd8be504e8e46de44ad0045a569efc25101f2a12ccd41b5759", size = 5571834, upload-time = "2025-08-17T18:21:29.239Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/7e/9e353ed08c3d7a93050bbadbebe2f5f783b13393e0e8e08e970ef3396390/zstandard-0.24.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d6975f2d903bc354916a17b91a7aaac7299603f9ecdb788145060dde6e573a16", size = 4959108, upload-time = "2025-08-17T18:21:31.228Z" },
+ { url = "https://files.pythonhosted.org/packages/af/28/135dffba375ab1f4d2c569de804647eba8bd682f36d3c01b5a012c560ff2/zstandard-0.24.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:7ac6e4d727521d86d20ec291a3f4e64a478e8a73eaee80af8f38ec403e77a409", size = 5265997, upload-time = "2025-08-17T18:21:33.369Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/7a/702e7cbc51c39ce104c198ea6d069fb6a918eb24c5709ac79fe9371f7a55/zstandard-0.24.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:87ae1684bc3c02d5c35884b3726525eda85307073dbefe68c3c779e104a59036", size = 5440015, upload-time = "2025-08-17T18:21:35.023Z" },
+ { url = "https://files.pythonhosted.org/packages/77/40/4a2d0faa2ae6f4c847c7f77ec626abed80873035891c4a4349b735a36fb4/zstandard-0.24.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:7de5869e616d426b56809be7dc6dba4d37b95b90411ccd3de47f421a42d4d42c", size = 5819056, upload-time = "2025-08-17T18:21:39.661Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/fc/580504a2d7c71411a8e403b83f2388ee083819a68e0e740bf974e78839f8/zstandard-0.24.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:388aad2d693707f4a0f6cc687eb457b33303d6b57ecf212c8ff4468c34426892", size = 5362621, upload-time = "2025-08-17T18:21:42.605Z" },
+ { url = "https://files.pythonhosted.org/packages/70/66/97f6b38eeda955eaa6b5e7cfc0528039bfcb9eb8338016aacf6d83d8a75e/zstandard-0.24.0-cp310-cp310-win32.whl", hash = "sha256:962ea3aecedcc944f8034812e23d7200d52c6e32765b8da396eeb8b8ffca71ce", size = 435575, upload-time = "2025-08-17T18:21:45.477Z" },
+ { url = "https://files.pythonhosted.org/packages/68/a2/5814bdd22d879b10fcc5dc37366e39603767063f06ae970f2a657f76ddac/zstandard-0.24.0-cp310-cp310-win_amd64.whl", hash = "sha256:869bf13f66b124b13be37dd6e08e4b728948ff9735308694e0b0479119e08ea7", size = 505115, upload-time = "2025-08-17T18:21:44.011Z" },
+ { url = "https://files.pythonhosted.org/packages/01/1f/5c72806f76043c0ef9191a2b65281dacdf3b65b0828eb13bb2c987c4fb90/zstandard-0.24.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:addfc23e3bd5f4b6787b9ca95b2d09a1a67ad5a3c318daaa783ff90b2d3a366e", size = 795228, upload-time = "2025-08-17T18:21:46.978Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/ba/3059bd5cd834666a789251d14417621b5c61233bd46e7d9023ea8bc1043a/zstandard-0.24.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6b005bcee4be9c3984b355336283afe77b2defa76ed6b89332eced7b6fa68b68", size = 640520, upload-time = "2025-08-17T18:21:48.162Z" },
+ { url = "https://files.pythonhosted.org/packages/57/07/f0e632bf783f915c1fdd0bf68614c4764cae9dd46ba32cbae4dd659592c3/zstandard-0.24.0-cp311-cp311-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:3f96a9130171e01dbb6c3d4d9925d604e2131a97f540e223b88ba45daf56d6fb", size = 5347682, upload-time = "2025-08-17T18:21:50.266Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/4c/63523169fe84773a7462cd090b0989cb7c7a7f2a8b0a5fbf00009ba7d74d/zstandard-0.24.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd0d3d16e63873253bad22b413ec679cf6586e51b5772eb10733899832efec42", size = 5057650, upload-time = "2025-08-17T18:21:52.634Z" },
+ { url = "https://files.pythonhosted.org/packages/c6/16/49013f7ef80293f5cebf4c4229535a9f4c9416bbfd238560edc579815dbe/zstandard-0.24.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:b7a8c30d9bf4bd5e4dcfe26900bef0fcd9749acde45cdf0b3c89e2052fda9a13", size = 5404893, upload-time = "2025-08-17T18:21:54.54Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/38/78e8bcb5fc32a63b055f2b99e0be49b506f2351d0180173674f516cf8a7a/zstandard-0.24.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:52cd7d9fa0a115c9446abb79b06a47171b7d916c35c10e0c3aa6f01d57561382", size = 5452389, upload-time = "2025-08-17T18:21:56.822Z" },
+ { url = "https://files.pythonhosted.org/packages/55/8a/81671f05619edbacd49bd84ce6899a09fc8299be20c09ae92f6618ccb92d/zstandard-0.24.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a0f6fc2ea6e07e20df48752e7700e02e1892c61f9a6bfbacaf2c5b24d5ad504b", size = 5558888, upload-time = "2025-08-17T18:21:58.68Z" },
+ { url = "https://files.pythonhosted.org/packages/49/cc/e83feb2d7d22d1f88434defbaeb6e5e91f42a4f607b5d4d2d58912b69d67/zstandard-0.24.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e46eb6702691b24ddb3e31e88b4a499e31506991db3d3724a85bd1c5fc3cfe4e", size = 5048038, upload-time = "2025-08-17T18:22:00.642Z" },
+ { url = "https://files.pythonhosted.org/packages/08/c3/7a5c57ff49ef8943877f85c23368c104c2aea510abb339a2dc31ad0a27c3/zstandard-0.24.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5e3b9310fd7f0d12edc75532cd9a56da6293840c84da90070d692e0bb15f186", size = 5573833, upload-time = "2025-08-17T18:22:02.402Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/00/64519983cd92535ba4bdd4ac26ac52db00040a52d6c4efb8d1764abcc343/zstandard-0.24.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76cdfe7f920738ea871f035568f82bad3328cbc8d98f1f6988264096b5264efd", size = 4961072, upload-time = "2025-08-17T18:22:04.384Z" },
+ { url = "https://files.pythonhosted.org/packages/72/ab/3a08a43067387d22994fc87c3113636aa34ccd2914a4d2d188ce365c5d85/zstandard-0.24.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3f2fe35ec84908dddf0fbf66b35d7c2878dbe349552dd52e005c755d3493d61c", size = 5268462, upload-time = "2025-08-17T18:22:06.095Z" },
+ { url = "https://files.pythonhosted.org/packages/49/cf/2abb3a1ad85aebe18c53e7eca73223f1546ddfa3bf4d2fb83fc5a064c5ca/zstandard-0.24.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:aa705beb74ab116563f4ce784fa94771f230c05d09ab5de9c397793e725bb1db", size = 5443319, upload-time = "2025-08-17T18:22:08.572Z" },
+ { url = "https://files.pythonhosted.org/packages/40/42/0dd59fc2f68f1664cda11c3b26abdf987f4e57cb6b6b0f329520cd074552/zstandard-0.24.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:aadf32c389bb7f02b8ec5c243c38302b92c006da565e120dfcb7bf0378f4f848", size = 5822355, upload-time = "2025-08-17T18:22:10.537Z" },
+ { url = "https://files.pythonhosted.org/packages/99/c0/ea4e640fd4f7d58d6f87a1e7aca11fb886ac24db277fbbb879336c912f63/zstandard-0.24.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e40cd0fc734aa1d4bd0e7ad102fd2a1aefa50ce9ef570005ffc2273c5442ddc3", size = 5365257, upload-time = "2025-08-17T18:22:13.159Z" },
+ { url = "https://files.pythonhosted.org/packages/27/a9/92da42a5c4e7e4003271f2e1f0efd1f37cfd565d763ad3604e9597980a1c/zstandard-0.24.0-cp311-cp311-win32.whl", hash = "sha256:cda61c46343809ecda43dc620d1333dd7433a25d0a252f2dcc7667f6331c7b61", size = 435559, upload-time = "2025-08-17T18:22:17.29Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/8e/2c8e5c681ae4937c007938f954a060fa7c74f36273b289cabdb5ef0e9a7e/zstandard-0.24.0-cp311-cp311-win_amd64.whl", hash = "sha256:3b95fc06489aa9388400d1aab01a83652bc040c9c087bd732eb214909d7fb0dd", size = 505070, upload-time = "2025-08-17T18:22:14.808Z" },
+ { url = "https://files.pythonhosted.org/packages/52/10/a2f27a66bec75e236b575c9f7b0d7d37004a03aa2dcde8e2decbe9ed7b4d/zstandard-0.24.0-cp311-cp311-win_arm64.whl", hash = "sha256:ad9fd176ff6800a0cf52bcf59c71e5de4fa25bf3ba62b58800e0f84885344d34", size = 461507, upload-time = "2025-08-17T18:22:15.964Z" },
+ { url = "https://files.pythonhosted.org/packages/26/e9/0bd281d9154bba7fc421a291e263911e1d69d6951aa80955b992a48289f6/zstandard-0.24.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a2bda8f2790add22773ee7a4e43c90ea05598bffc94c21c40ae0a9000b0133c3", size = 795710, upload-time = "2025-08-17T18:22:19.189Z" },
+ { url = "https://files.pythonhosted.org/packages/36/26/b250a2eef515caf492e2d86732e75240cdac9d92b04383722b9753590c36/zstandard-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cc76de75300f65b8eb574d855c12518dc25a075dadb41dd18f6322bda3fe15d5", size = 640336, upload-time = "2025-08-17T18:22:20.466Z" },
+ { url = "https://files.pythonhosted.org/packages/79/bf/3ba6b522306d9bf097aac8547556b98a4f753dc807a170becaf30dcd6f01/zstandard-0.24.0-cp312-cp312-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:d2b3b4bda1a025b10fe0269369475f420177f2cb06e0f9d32c95b4873c9f80b8", size = 5342533, upload-time = "2025-08-17T18:22:22.326Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/ec/22bc75bf054e25accdf8e928bc68ab36b4466809729c554ff3a1c1c8bce6/zstandard-0.24.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b84c6c210684286e504022d11ec294d2b7922d66c823e87575d8b23eba7c81f", size = 5062837, upload-time = "2025-08-17T18:22:24.416Z" },
+ { url = "https://files.pythonhosted.org/packages/48/cc/33edfc9d286e517fb5b51d9c3210e5bcfce578d02a675f994308ca587ae1/zstandard-0.24.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c59740682a686bf835a1a4d8d0ed1eefe31ac07f1c5a7ed5f2e72cf577692b00", size = 5393855, upload-time = "2025-08-17T18:22:26.786Z" },
+ { url = "https://files.pythonhosted.org/packages/73/36/59254e9b29da6215fb3a717812bf87192d89f190f23817d88cb8868c47ac/zstandard-0.24.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:6324fde5cf5120fbf6541d5ff3c86011ec056e8d0f915d8e7822926a5377193a", size = 5451058, upload-time = "2025-08-17T18:22:28.885Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/c7/31674cb2168b741bbbe71ce37dd397c9c671e73349d88ad3bca9e9fae25b/zstandard-0.24.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:51a86bd963de3f36688553926a84e550d45d7f9745bd1947d79472eca27fcc75", size = 5546619, upload-time = "2025-08-17T18:22:31.115Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/01/1a9f22239f08c00c156f2266db857545ece66a6fc0303d45c298564bc20b/zstandard-0.24.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d82ac87017b734f2fb70ff93818c66f0ad2c3810f61040f077ed38d924e19980", size = 5046676, upload-time = "2025-08-17T18:22:33.077Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/91/6c0cf8fa143a4988a0361380ac2ef0d7cb98a374704b389fbc38b5891712/zstandard-0.24.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:92ea7855d5bcfb386c34557516c73753435fb2d4a014e2c9343b5f5ba148b5d8", size = 5576381, upload-time = "2025-08-17T18:22:35.391Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/77/1526080e22e78871e786ccf3c84bf5cec9ed25110a9585507d3c551da3d6/zstandard-0.24.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3adb4b5414febf074800d264ddf69ecade8c658837a83a19e8ab820e924c9933", size = 4953403, upload-time = "2025-08-17T18:22:37.266Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/d0/a3a833930bff01eab697eb8abeafb0ab068438771fa066558d96d7dafbf9/zstandard-0.24.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6374feaf347e6b83ec13cc5dcfa70076f06d8f7ecd46cc71d58fac798ff08b76", size = 5267396, upload-time = "2025-08-17T18:22:39.757Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/5e/90a0db9a61cd4769c06374297ecfcbbf66654f74cec89392519deba64d76/zstandard-0.24.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:13fc548e214df08d896ee5f29e1f91ee35db14f733fef8eabea8dca6e451d1e2", size = 5433269, upload-time = "2025-08-17T18:22:42.131Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/58/fc6a71060dd67c26a9c5566e0d7c99248cbe5abfda6b3b65b8f1a28d59f7/zstandard-0.24.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0a416814608610abf5488889c74e43ffa0343ca6cf43957c6b6ec526212422da", size = 5814203, upload-time = "2025-08-17T18:22:44.017Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/6a/89573d4393e3ecbfa425d9a4e391027f58d7810dec5cdb13a26e4cdeef5c/zstandard-0.24.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0d66da2649bb0af4471699aeb7a83d6f59ae30236fb9f6b5d20fb618ef6c6777", size = 5359622, upload-time = "2025-08-17T18:22:45.802Z" },
+ { url = "https://files.pythonhosted.org/packages/60/ff/2cbab815d6f02a53a9d8d8703bc727d8408a2e508143ca9af6c3cca2054b/zstandard-0.24.0-cp312-cp312-win32.whl", hash = "sha256:ff19efaa33e7f136fe95f9bbcc90ab7fb60648453b03f95d1de3ab6997de0f32", size = 435968, upload-time = "2025-08-17T18:22:49.493Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/a3/8f96b8ddb7ad12344218fbd0fd2805702dafd126ae9f8a1fb91eef7b33da/zstandard-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc05f8a875eb651d1cc62e12a4a0e6afa5cd0cc231381adb830d2e9c196ea895", size = 505195, upload-time = "2025-08-17T18:22:47.193Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/4a/bfca20679da63bfc236634ef2e4b1b4254203098b0170e3511fee781351f/zstandard-0.24.0-cp312-cp312-win_arm64.whl", hash = "sha256:b04c94718f7a8ed7cdd01b162b6caa1954b3c9d486f00ecbbd300f149d2b2606", size = 461605, upload-time = "2025-08-17T18:22:48.317Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/ef/db949de3bf81ed122b8ee4db6a8d147a136fe070e1015f5a60d8a3966748/zstandard-0.24.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e4ebb000c0fe24a6d0f3534b6256844d9dbf042fdf003efe5cf40690cf4e0f3e", size = 795700, upload-time = "2025-08-17T18:22:50.851Z" },
+ { url = "https://files.pythonhosted.org/packages/99/56/fc04395d6f5eabd2fe6d86c0800d198969f3038385cb918bfbe94f2b0c62/zstandard-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:498f88f5109666c19531f0243a90d2fdd2252839cd6c8cc6e9213a3446670fa8", size = 640343, upload-time = "2025-08-17T18:22:51.999Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/0f/0b0e0d55f2f051d5117a0d62f4f9a8741b3647440c0ee1806b7bd47ed5ae/zstandard-0.24.0-cp313-cp313-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:0a9e95ceb180ccd12a8b3437bac7e8a8a089c9094e39522900a8917745542184", size = 5342571, upload-time = "2025-08-17T18:22:53.734Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/43/d74e49f04fbd62d4b5d89aeb7a29d693fc637c60238f820cd5afe6ca8180/zstandard-0.24.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bcf69e0bcddbf2adcfafc1a7e864edcc204dd8171756d3a8f3340f6f6cc87b7b", size = 5062723, upload-time = "2025-08-17T18:22:55.624Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/97/df14384d4d6a004388e6ed07ded02933b5c7e0833a9150c57d0abc9545b7/zstandard-0.24.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:10e284748a7e7fbe2815ca62a9d6e84497d34cfdd0143fa9e8e208efa808d7c4", size = 5393282, upload-time = "2025-08-17T18:22:57.655Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/09/8f5c520e59a4d41591b30b7568595eda6fd71c08701bb316d15b7ed0613a/zstandard-0.24.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:1bda8a85e5b9d5e73af2e61b23609a8cc1598c1b3b2473969912979205a1ff25", size = 5450895, upload-time = "2025-08-17T18:22:59.749Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/3d/02aba892327a67ead8cba160ee835cfa1fc292a9dcb763639e30c07da58b/zstandard-0.24.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1b14bc92af065d0534856bf1b30fc48753163ea673da98857ea4932be62079b1", size = 5546353, upload-time = "2025-08-17T18:23:01.457Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/6e/96c52afcde44da6a5313a1f6c356349792079808f12d8b69a7d1d98ef353/zstandard-0.24.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:b4f20417a4f511c656762b001ec827500cbee54d1810253c6ca2df2c0a307a5f", size = 5046404, upload-time = "2025-08-17T18:23:03.418Z" },
+ { url = "https://files.pythonhosted.org/packages/da/b6/eefee6b92d341a7db7cd1b3885d42d30476a093720fb5c181e35b236d695/zstandard-0.24.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:337572a7340e1d92fd7fb5248c8300d0e91071002d92e0b8cabe8d9ae7b58159", size = 5576095, upload-time = "2025-08-17T18:23:05.331Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/29/743de3131f6239ba6611e17199581e6b5e0f03f268924d42468e29468ca0/zstandard-0.24.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:df4be1cf6e8f0f2bbe2a3eabfff163ef592c84a40e1a20a8d7db7f27cfe08fc2", size = 4953448, upload-time = "2025-08-17T18:23:07.225Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/11/bd36ef49fba82e307d69d93b5abbdcdc47d6a0bcbc7ffbbfe0ef74c2fec5/zstandard-0.24.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6885ae4b33aee8835dbdb4249d3dfec09af55e705d74d9b660bfb9da51baaa8b", size = 5267388, upload-time = "2025-08-17T18:23:09.127Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/23/a4cfe1b871d3f1ce1f88f5c68d7e922e94be0043f3ae5ed58c11578d1e21/zstandard-0.24.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:663848a8bac4fdbba27feea2926049fdf7b55ec545d5b9aea096ef21e7f0b079", size = 5433383, upload-time = "2025-08-17T18:23:11.343Z" },
+ { url = "https://files.pythonhosted.org/packages/77/26/f3fb85f00e732cca617d4b9cd1ffa6484f613ea07fad872a8bdc3a0ce753/zstandard-0.24.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:05d27c953f2e0a3ecc8edbe91d6827736acc4c04d0479672e0400ccdb23d818c", size = 5813988, upload-time = "2025-08-17T18:23:13.194Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/8c/d7e3b424b73f3ce66e754595cbcb6d94ff49790c9ac37d50e40e8145cd44/zstandard-0.24.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:77b8b7b98893eaf47da03d262816f01f251c2aa059c063ed8a45c50eada123a5", size = 5359756, upload-time = "2025-08-17T18:23:15.021Z" },
+ { url = "https://files.pythonhosted.org/packages/90/6c/f1f0e11f1b295138f9da7e7ae22dcd9a1bb96a9544fa3b31507e431288f5/zstandard-0.24.0-cp313-cp313-win32.whl", hash = "sha256:cf7fbb4e54136e9a03c7ed7691843c4df6d2ecc854a2541f840665f4f2bb2edd", size = 435957, upload-time = "2025-08-17T18:23:18.835Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/03/ab8b82ae5eb49eca4d3662705399c44442666cc1ce45f44f2d263bb1ae31/zstandard-0.24.0-cp313-cp313-win_amd64.whl", hash = "sha256:d64899cc0f33a8f446f1e60bffc21fa88b99f0e8208750d9144ea717610a80ce", size = 505171, upload-time = "2025-08-17T18:23:16.44Z" },
+ { url = "https://files.pythonhosted.org/packages/db/12/89a2ecdea4bc73a934a30b66a7cfac5af352beac94d46cf289e103b65c34/zstandard-0.24.0-cp313-cp313-win_arm64.whl", hash = "sha256:57be3abb4313e0dd625596376bbb607f40059d801d51c1a1da94d7477e63b255", size = 461596, upload-time = "2025-08-17T18:23:17.603Z" },
+]
diff --git a/src/backend/langflow/version/__init__.py b/src/packages/langflow/version/__init__.py
similarity index 100%
rename from src/backend/langflow/version/__init__.py
rename to src/packages/langflow/version/__init__.py
diff --git a/src/backend/langflow/version/version.py b/src/packages/langflow/version/version.py
similarity index 100%
rename from src/backend/langflow/version/version.py
rename to src/packages/langflow/version/version.py
diff --git a/uv.lock b/uv.lock
index 781257a679cd..0af54810eba0 100644
--- a/uv.lock
+++ b/uv.lock
@@ -5260,7 +5260,7 @@ requires-dist = [
{ name = "langchain-pinecone", specifier = ">=0.2.8" },
{ name = "langchain-sambanova", specifier = "==0.1.0" },
{ name = "langchain-unstructured", specifier = "==0.1.5" },
- { name = "langflow-base", editable = "src/backend/base" },
+ { name = "langflow-base", editable = "src/packages/base" },
{ name = "langfuse", specifier = "==2.53.9" },
{ name = "langsmith", specifier = ">=0.3.42,<1.0.0" },
{ name = "langwatch", specifier = ">=0.2.11,<0.3.0" },
@@ -5376,7 +5376,7 @@ dev = [
[[package]]
name = "langflow-base"
version = "0.6.0"
-source = { editable = "src/backend/base" }
+source = { editable = "src/packages/base" }
dependencies = [
{ name = "aiofile" },
{ name = "aiofiles" },
@@ -5551,7 +5551,7 @@ requires-dist = [
{ name = "langchain-experimental", specifier = ">=0.3.4,<1.0.0" },
{ name = "langchain-ibm", specifier = ">=0.3.8" },
{ name = "langchainhub", specifier = "~=0.1.15" },
- { name = "lfx", editable = "src/lfx" },
+ { name = "lfx", editable = "src/packages/core" },
{ name = "llama-cpp-python", marker = "extra == 'all'", specifier = ">=0.2.0" },
{ name = "llama-cpp-python", marker = "extra == 'local'", specifier = ">=0.2.0" },
{ name = "loguru", specifier = ">=0.7.1,<1.0.0" },
@@ -5790,7 +5790,7 @@ wheels = [
[[package]]
name = "lfx"
version = "0.1.12"
-source = { editable = "src/lfx" }
+source = { editable = "src/packages/core" }
dependencies = [
{ name = "aiofile" },
{ name = "aiofiles" },