diff --git a/.devcontainer/README.md b/.devcontainer/README.md index 3712d1bcd71e..bde30c8fca48 100644 --- a/.devcontainer/README.md +++ b/.devcontainer/README.md @@ -33,7 +33,7 @@ You now need to manually build the frontend. Open a new Terminal and run command make build_frontend ``` -This will take a short period of time, you should have a message similar to `Building frontend static files` and the command will complete successfully. +This will take a short period of time, you should have a message similar to `Building frontend static files` and the command will complete successfully. Installation is now complete. @@ -57,4 +57,4 @@ The service will start, and you will may notice a dialog in the lower right indi ╰───────────────────────────────────────────────────────────────────────╯ ``` -At this point you can connect to the service via the port, or if the dialog is gone you can find the "Forwarded Address" on the "Ports" tab (which is next the "Terminal" tab). If there is no port forwarded, you can click the "Forward a Port" button on the "Ports" tab, and forward `7860`. \ No newline at end of file +At this point you can connect to the service via the port, or if the dialog is gone you can find the "Forwarded Address" on the "Ports" tab (which is next the "Terminal" tab). If there is no port forwarded, you can click the "Forward a Port" button on the "Ports" tab, and forward `7860`. \ No newline at end of file diff --git a/.github/workflows/cross-platform-test.md b/.github/workflows/cross-platform-test.md index bacc40036634..597949c739d1 100644 --- a/.github/workflows/cross-platform-test.md +++ b/.github/workflows/cross-platform-test.md @@ -122,7 +122,7 @@ cross-platform-test.yml **Key Benefits:** - **Single File**: No complex workflow chains or parameter passing issues -- **Unified Logic**: Same test matrix for all use cases +- **Unified Logic**: Same test matrix for all use cases - **Smart Routing**: Automatically determines install method based on trigger type - **Context-Aware**: Summary messages adapt to manual vs programmatic usage @@ -154,11 +154,11 @@ build-if-needed: test-installation: steps: - name: Determine install method - # workflow_dispatch: maps boolean to install method + # workflow_dispatch: maps boolean to install method # workflow_call: always uses wheel method - name: Install from PyPI if: steps.install-method.outputs.method == 'pypi' - - name: Install from wheels + - name: Install from wheels if: steps.install-method.outputs.method == 'wheel' ``` diff --git a/.github/workflows/nightly_build.yml b/.github/workflows/nightly_build.yml index e8fad2d1d3de..f22eb85e170e 100644 --- a/.github/workflows/nightly_build.yml +++ b/.github/workflows/nightly_build.yml @@ -23,6 +23,7 @@ jobs: outputs: main_tag: ${{ steps.generate_main_tag.outputs.main_tag }} base_tag: ${{ steps.set_base_tag.outputs.base_tag }} + lfx_tag: ${{ steps.generate_lfx_tag.outputs.lfx_tag }} steps: - name: Checkout code uses: actions/checkout@v4 @@ -62,6 +63,14 @@ jobs: echo "base_tag=$BASE_TAG" >> $GITHUB_OUTPUT echo "base_tag=$BASE_TAG" + - name: Generate LFX nightly tag + id: generate_lfx_tag + run: | + # NOTE: This outputs the tag with the `v` prefix. + LFX_TAG="$(uv run ./scripts/ci/lfx_nightly_tag.py)" + echo "lfx_tag=$LFX_TAG" >> $GITHUB_OUTPUT + echo "lfx_tag=$LFX_TAG" + - name: Commit tag id: commit_tag run: | @@ -72,13 +81,17 @@ jobs: MAIN_TAG="${{ steps.generate_main_tag.outputs.main_tag }}" BASE_TAG="${{ steps.generate_base_tag.outputs.base_tag }}" + LFX_TAG="${{ steps.generate_lfx_tag.outputs.lfx_tag }}" echo "Updating base project version to $BASE_TAG and updating main project version to $MAIN_TAG" uv run ./scripts/ci/update_pyproject_combined.py main $MAIN_TAG $BASE_TAG + echo "Updating LFX project version to $LFX_TAG" + uv run ./scripts/ci/update_lfx_version.py $LFX_TAG uv lock cd src/backend/base && uv lock && cd ../../.. + cd src/lfx && uv lock && cd ../.. - git add pyproject.toml src/backend/base/pyproject.toml uv.lock src/backend/base/uv.lock + git add pyproject.toml src/backend/base/pyproject.toml src/lfx/pyproject.toml uv.lock src/backend/base/uv.lock src/lfx/uv.lock git commit -m "Update version and project name" echo "Tagging main with $MAIN_TAG" @@ -149,6 +162,31 @@ jobs: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + lfx-tests: + if: github.repository == 'langflow-ai/langflow' + name: Run LFX Tests + needs: create-nightly-tag + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.10", "3.11", "3.12", "3.13"] + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ needs.create-nightly-tag.outputs.main_tag }} + - name: Setup Environment + uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + cache-dependency-glob: "uv.lock" + python-version: ${{ matrix.python-version }} + prune-cache: false + - name: Install LFX dependencies + run: uv sync --dev --package lfx + - name: Run LFX tests + run: cd src/lfx && uv run pytest tests/unit -v + # Not making nightly builds dependent on integration test success # due to inherent flakiness of 3rd party integrations # Revisit when https://github.com/langflow-ai/langflow/pull/3607 is merged. @@ -163,13 +201,15 @@ jobs: release-nightly-build: if: github.repository == 'langflow-ai/langflow' name: Run Nightly Langflow Build - needs: [frontend-tests, backend-unit-tests, create-nightly-tag] + needs: [frontend-tests, backend-unit-tests, lfx-tests, create-nightly-tag] uses: ./.github/workflows/release_nightly.yml with: build_docker_base: true build_docker_main: true + build_lfx: true nightly_tag_main: ${{ needs.create-nightly-tag.outputs.main_tag }} nightly_tag_base: ${{ needs.create-nightly-tag.outputs.base_tag }} + nightly_tag_lfx: ${{ needs.create-nightly-tag.outputs.lfx_tag }} secrets: inherit # slack-notification: diff --git a/.github/workflows/python_test.yml b/.github/workflows/python_test.yml index f9faf7625a46..c2a8afd9ac51 100644 --- a/.github/workflows/python_test.yml +++ b/.github/workflows/python_test.yml @@ -131,6 +131,28 @@ jobs: PYLEAK_LOG_LEVEL: debug # enable pyleak logging DO_NOT_TRACK: true # disable telemetry reporting + lfx-tests: + name: LFX Tests - Python ${{ matrix.python-version }} + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ${{ fromJson(inputs.python-versions || '["3.10", "3.11", "3.12", "3.13"]' ) }} + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ inputs.ref || github.ref }} + - name: "Setup Environment" + uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + cache-dependency-glob: "uv.lock" + python-version: ${{ matrix.python-version }} + prune-cache: false + - name: Run lfx tests + run: make lfx_tests + env: + DO_NOT_TRACK: true # disable telemetry reporting + test-cli: name: Test CLI - Python ${{ matrix.python-version }} runs-on: ubuntu-latest diff --git a/.github/workflows/release-lfx.yml b/.github/workflows/release-lfx.yml new file mode 100644 index 000000000000..3ccff33baf5c --- /dev/null +++ b/.github/workflows/release-lfx.yml @@ -0,0 +1,389 @@ +name: LFX Release +run-name: LFX Release ${{ github.event.inputs.version || 'dev' }} by @${{ github.actor }} + +on: + workflow_dispatch: + inputs: + version: + description: "Version to release (e.g., 0.1.0)" + required: true + type: string + publish_pypi: + description: "Publish to PyPI" + required: true + type: boolean + default: true + build_docker: + description: "Build and publish Docker images" + required: true + type: boolean + default: true + pre_release: + description: "Mark as pre-release" + required: false + type: boolean + default: false + create_github_release: + description: "Create GitHub release" + required: true + type: boolean + default: true + +env: + PYTHON_VERSION: "3.13" + +permissions: + contents: write + packages: write + +jobs: + validate-version: + name: Validate Version + runs-on: ubuntu-latest + outputs: + should_release: ${{ steps.check.outputs.should_release }} + current_version: ${{ steps.check.outputs.current_version }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Environment + uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + cache-dependency-glob: "uv.lock" + python-version: ${{ env.PYTHON_VERSION }} + prune-cache: false + + - name: Check version + id: check + run: | + cd src/lfx + # Use uv tree to get package info, consistent with nightly workflow + name=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $1}') + version=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $2}') + + # Strip leading 'v' if present + version=$(echo $version | sed 's/^v//') + echo "current_version=$version" >> $GITHUB_OUTPUT + + if [ "$version" != "${{ github.event.inputs.version }}" ]; then + echo "❌ Version mismatch: package has $version but input is ${{ github.event.inputs.version }}" + echo "Please update the version in pyproject.toml first" + echo "should_release=false" >> $GITHUB_OUTPUT + exit 1 + fi + + # Check if version already exists on PyPI + if curl -s "https://pypi.org/pypi/lfx/json" | jq -r '.releases | keys[]' | grep -q "^${{ github.event.inputs.version }}$"; then + echo "❌ Version ${{ github.event.inputs.version }} already exists on PyPI" + echo "should_release=false" >> $GITHUB_OUTPUT + exit 1 + fi + + echo "✅ Version ${{ github.event.inputs.version }} is valid and not yet released" + echo "should_release=true" >> $GITHUB_OUTPUT + + run-tests: + name: Run Tests + needs: validate-version + if: needs.validate-version.outputs.should_release == 'true' + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.10", "3.11", "3.12", "3.13"] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Environment + uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + cache-dependency-glob: "uv.lock" + python-version: ${{ matrix.python-version }} + prune-cache: false + + - name: Run LFX tests + run: | + cd src/lfx + make test + + - name: Test CLI installation + run: | + cd src/lfx + uv pip install . + uv run lfx --help + uv run lfx run --help + uv run lfx serve --help + + release-lfx: + name: Build and Release LFX + needs: [validate-version, run-tests] + runs-on: ubuntu-latest + outputs: + version: ${{ steps.check-version.outputs.version }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Environment + uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + cache-dependency-glob: "uv.lock" + python-version: ${{ env.PYTHON_VERSION }} + prune-cache: false + + - name: Install LFX dependencies + run: uv sync --dev --package lfx + + - name: Verify Version + id: check-version + run: | + cd src/lfx + # Use uv tree to get package info, consistent with nightly workflow + name=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $1}') + version=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $2}') + + # Verify package name + if [ "$name" != "lfx" ]; then + echo "Package name $name does not match lfx. Exiting the workflow." + exit 1 + fi + + # Strip leading 'v' if present + version=$(echo $version | sed 's/^v//') + + # Verify version matches input + if [ "$version" != "${{ github.event.inputs.version }}" ]; then + echo "Version $version does not match input ${{ github.event.inputs.version }}. Exiting the workflow." + exit 1 + fi + + echo "version=$version" >> $GITHUB_OUTPUT + + - name: Build distribution + run: | + cd src/lfx + rm -rf dist/ + uv build --wheel --out-dir dist + + - name: Check build artifacts + run: | + cd src/lfx + ls -la dist/ + # Verify wheel contents + unzip -l dist/*.whl | grep -E "(lfx/__main__.py|lfx/cli/run.py|lfx/cli/commands.py)" + + - name: Test installation from wheel + run: | + cd src/lfx + uv pip install dist/*.whl --force-reinstall + uv run lfx --help + echo "LFX CLI test completed successfully" + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: lfx-dist + path: src/lfx/dist/ + retention-days: 5 + + - name: Publish to PyPI + if: github.event.inputs.publish_pypi == 'true' + env: + UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }} + run: | + cd src/lfx + uv publish dist/*.whl + + build-docker: + name: Build Docker Images + needs: [validate-version, run-tests] + if: github.event.inputs.build_docker == 'true' + runs-on: ubuntu-latest + strategy: + matrix: + variant: [production, alpine] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Prepare Docker metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: | + langflowai/lfx + ghcr.io/langflow-ai/lfx + tags: | + type=raw,value=${{ github.event.inputs.version }}${{ matrix.variant == 'alpine' && '-alpine' || '' }} + type=raw,value=latest${{ matrix.variant == 'alpine' && '-alpine' || '' }},enable=${{ github.event.inputs.pre_release == 'false' }} + labels: | + org.opencontainers.image.title=LFX + org.opencontainers.image.description=Langflow Executor - CLI tool for running Langflow AI workflows + org.opencontainers.image.vendor=Langflow + org.opencontainers.image.version=${{ github.event.inputs.version }} + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: . + file: src/lfx/docker/Dockerfile${{ matrix.variant == 'alpine' && '.alpine' || '' }} + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + build-args: | + LFX_VERSION=${{ github.event.inputs.version }} + + create-release: + name: Create GitHub Release + needs: [release-lfx, build-docker] + if: always() && github.event.inputs.create_github_release == 'true' && needs.release-lfx.result == 'success' + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Download artifacts + uses: actions/download-artifact@v4 + with: + name: lfx-dist + path: dist/ + + - name: Generate release notes + id: notes + run: | + cat > release_notes.md << EOF + # LFX ${{ github.event.inputs.version }} + + ## 🚀 Installation + + ### PyPI + \`\`\`bash + pip install lfx==${{ github.event.inputs.version }} + # or + uv pip install lfx==${{ github.event.inputs.version }} + # or run without installing + uvx lfx@${{ github.event.inputs.version }} --help + \`\`\` + + ### Docker + \`\`\`bash + # Standard image + docker pull langflowai/lfx:${{ github.event.inputs.version }} + + # Alpine image (smaller) + docker pull langflowai/lfx:${{ github.event.inputs.version }}-alpine + + # Run a flow + docker run --rm -v \$(pwd):/app/data langflowai/lfx:${{ github.event.inputs.version }} lfx run flow.json --input-value "Hello" + \`\`\` + + ## 📦 What's New + + + + ## 📋 Checksums + + \`\`\` + $(cd dist && sha256sum *) + \`\`\` + + --- + + **Full Changelog**: https://github.com/${{ github.repository }}/compare/v${{ needs.validate-version.outputs.current_version }}...lfx-v${{ github.event.inputs.version }} + EOF + + - name: Create Release + uses: softprops/action-gh-release@v2 + with: + tag_name: lfx-v${{ github.event.inputs.version }} + name: LFX ${{ github.event.inputs.version }} + body_path: release_notes.md + draft: false + prerelease: ${{ github.event.inputs.pre_release }} + files: | + dist/* + generate_release_notes: true + + test-release: + name: Test Release + needs: [release-lfx, build-docker] + if: always() && (needs.release-lfx.result == 'success' || needs.build-docker.result == 'success') + runs-on: ubuntu-latest + steps: + - name: Wait for PyPI propagation + if: needs.release-lfx.result == 'success' + run: sleep 60 + + - name: Test PyPI installation + if: needs.release-lfx.result == 'success' + run: | + # Test installation using uv + uv pip install lfx==${{ github.event.inputs.version }} + uv run lfx --help + + - name: Test Docker image + if: needs.build-docker.result == 'success' + run: | + # Test standard image + docker run --rm langflowai/lfx:${{ github.event.inputs.version }} lfx --help + + # Test alpine image + docker run --rm langflowai/lfx:${{ github.event.inputs.version }}-alpine lfx --help + + # Test with a simple flow + cat > test_flow.json << 'EOF' + { + "nodes": [], + "edges": [] + } + EOF + + docker run --rm -v $(pwd):/app/data langflowai/lfx:${{ github.event.inputs.version }} \ + lfx run /app/data/test_flow.json --input-value "test" || true + + notify: + name: Notify Release Status + needs: [create-release, test-release] + if: always() + runs-on: ubuntu-latest + steps: + - name: Notify success + if: needs.create-release.result == 'success' + run: | + echo "✅ LFX ${{ github.event.inputs.version }} released successfully!" + echo "PyPI: https://pypi.org/project/lfx/${{ github.event.inputs.version }}/" + echo "Docker Hub: https://hub.docker.com/r/langflowai/lfx/tags" + echo "GitHub Release: https://github.com/${{ github.repository }}/releases/tag/lfx-v${{ github.event.inputs.version }}" + + - name: Notify failure + if: needs.create-release.result != 'success' + run: | + echo "❌ LFX ${{ github.event.inputs.version }} release failed!" + exit 1 \ No newline at end of file diff --git a/.github/workflows/release_nightly.yml b/.github/workflows/release_nightly.yml index 80e6df568423..33ef6ced6011 100644 --- a/.github/workflows/release_nightly.yml +++ b/.github/workflows/release_nightly.yml @@ -19,6 +19,11 @@ on: required: false type: boolean default: false + build_lfx: + description: "Build and release LFX package" + required: false + type: boolean + default: false nightly_tag_main: description: "Tag for the nightly main build" required: true @@ -27,6 +32,10 @@ on: description: "Tag for the nightly base build" required: true type: string + nightly_tag_lfx: + description: "Tag for the nightly LFX build" + required: false + type: string workflow_call: inputs: build_docker_base: @@ -44,6 +53,11 @@ on: required: false type: boolean default: false + build_lfx: + description: "Build and release LFX package" + required: false + type: boolean + default: false nightly_tag_main: description: "Tag for the nightly main build" required: true @@ -52,14 +66,84 @@ on: description: "Tag for the nightly base build" required: true type: string + nightly_tag_lfx: + description: "Tag for the nightly LFX build" + required: false + type: string env: POETRY_VERSION: "1.8.3" PYTHON_VERSION: "3.13" jobs: + build-nightly-lfx: + name: Build LFX Nightly + if: ${{ inputs.build_lfx == true }} + runs-on: ubuntu-latest + outputs: + version: ${{ steps.verify.outputs.version }} + defaults: + run: + shell: bash + steps: + - name: Check out the code at a specific ref + uses: actions/checkout@v4 + with: + ref: ${{ inputs.nightly_tag_main }} + persist-credentials: true + - name: "Setup Environment" + uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + cache-dependency-glob: "uv.lock" + python-version: ${{ env.PYTHON_VERSION }} + prune-cache: false + - name: Install LFX dependencies + run: uv sync --dev --package lfx + + - name: Verify Nightly Name and Version + id: verify + run: | + cd src/lfx + name=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $1}') + version=$(uv tree | grep 'lfx' | head -n 1 | awk '{print $2}') + if [ "$name" != "lfx-nightly" ]; then + echo "Name $name does not match lfx-nightly. Exiting the workflow." + exit 1 + fi + if [ "$version" != "${{ inputs.nightly_tag_lfx }}" ]; then + echo "Version $version does not match nightly tag ${{ inputs.nightly_tag_lfx }}. Exiting the workflow." + exit 1 + fi + # Strip the leading `v` from the version + version=$(echo $version | sed 's/^v//') + echo "version=$version" >> $GITHUB_OUTPUT + + - name: Build LFX for distribution + run: | + cd src/lfx + rm -rf dist/ + uv build --wheel --out-dir dist + + - name: Test LFX CLI + run: | + cd src/lfx + uv pip install dist/*.whl --force-reinstall + uv run lfx --help + echo "LFX CLI test completed successfully" + + # PyPI publishing moved to after cross-platform testing + + - name: Upload LFX Artifact + uses: actions/upload-artifact@v4 + with: + name: dist-nightly-lfx + path: src/lfx/dist + build-nightly-base: name: Build Langflow Nightly Base + needs: [build-nightly-lfx] + if: always() && (needs.build-nightly-lfx.result == 'success' || inputs.build_lfx == false) runs-on: ubuntu-latest defaults: run: @@ -83,6 +167,10 @@ jobs: - name: Install the project run: uv sync + - name: Wait for PyPI Propagation + if: ${{ inputs.build_lfx == true }} + run: sleep 300 # wait for 5 minutes to ensure PyPI propagation of LFX + - name: Verify Nightly Name and Version id: verify run: | @@ -100,13 +188,13 @@ jobs: version=$(echo $version | sed 's/^v//') echo "version=$version" >> $GITHUB_OUTPUT - - name: Build project for distribution + - name: Build Langflow Base for distribution run: | rm -rf src/backend/base/dist rm -rf dist make build base=true args="--wheel" - - name: Test CLI + - name: Test Langflow Base CLI run: | # TODO: Unsure why the whl is not built in src/backend/base/dist mkdir src/backend/base/dist @@ -180,9 +268,9 @@ jobs: if: needs.build-nightly-base.outputs.skipped == 'false' run: sleep 300 # wait for 5 minutes to ensure PyPI propagation of base - - name: Build project for distribution + - name: Build Langflow Main for distribution run: make build main=true args="--no-sources --wheel" - - name: Test CLI + - name: Test Langflow Main CLI run: | uv pip install dist/*.whl uv run python -m langflow run --host localhost --port 7860 --backend-only & @@ -216,9 +304,35 @@ jobs: base-artifact-name: "dist-nightly-base" main-artifact-name: "dist-nightly-main" + publish-nightly-lfx: + name: Publish LFX Nightly to PyPI + needs: [build-nightly-lfx, test-cross-platform] + if: ${{ inputs.build_lfx == true }} + runs-on: ubuntu-latest + steps: + - name: Check out the code + uses: actions/checkout@v4 + - name: Download LFX artifact + uses: actions/download-artifact@v4 + with: + name: dist-nightly-lfx + path: src/lfx/dist + - name: Setup Environment + uses: astral-sh/setup-uv@v6 + with: + enable-cache: false + python-version: "3.13" + - name: Publish LFX to PyPI + env: + POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }} + UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }} + run: | + make lfx_publish + publish-nightly-base: name: Publish Langflow Base Nightly to PyPI - needs: [build-nightly-base, test-cross-platform] + needs: [build-nightly-base, test-cross-platform, publish-nightly-lfx] + if: always() && needs.build-nightly-base.result == 'success' && needs.test-cross-platform.result == 'success' && (needs.publish-nightly-lfx.result == 'success' || inputs.build_lfx == false) runs-on: ubuntu-latest steps: - name: Checkout code diff --git a/.gitignore b/.gitignore index 5896b9850389..e9fcace844db 100644 --- a/.gitignore +++ b/.gitignore @@ -277,4 +277,7 @@ src/frontend/temp .dspy_cache/ *.db -*.mcp.json \ No newline at end of file +*.mcp.json + +news-aggregated.json +CLAUDE.md \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json index 1bc163fb5815..9ab4589d1249 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -19,7 +19,9 @@ "--loop", "asyncio", "--reload-include", - "./src/backend/*" + "./src/backend/*", + "--reload-include", + "./src/lfx/*" ], "jinja": true, "justMyCode": false, diff --git a/Makefile b/Makefile index bff5575926c7..5dd8c3d326e2 100644 --- a/Makefile +++ b/Makefile @@ -140,6 +140,12 @@ unit_tests: ## run unit tests unit_tests_looponfail: @make unit_tests args="-f" +lfx_tests: ## run lfx package unit tests + @echo 'Running LFX Package Tests...' + @cd src/lfx && \ + uv sync && \ + uv run pytest tests/unit -v $(args) + integration_tests: uv run pytest src/backend/tests/integration \ --instafail -ra \ @@ -387,6 +393,50 @@ endif publish_testpypi: ## build the frontend static files and package the project and publish it to PyPI @echo 'Publishing the project' +###################### +# LFX PACKAGE +###################### + +lfx_build: ## build the LFX package + @echo 'Building LFX package' + @cd src/lfx && make build + +lfx_publish: ## publish LFX package to PyPI + @echo 'Publishing LFX package' + @cd src/lfx && make publish + +lfx_publish_testpypi: ## publish LFX package to test PyPI + @echo 'Publishing LFX package to test PyPI' + @cd src/lfx && make publish_test + +lfx_test: ## run LFX tests + @echo 'Running LFX tests' + @cd src/lfx && make test + +lfx_format: ## format LFX code + @echo 'Formatting LFX code' + @cd src/lfx && make format + +lfx_lint: ## lint LFX code + @echo 'Linting LFX code' + @cd src/lfx && make lint + +lfx_clean: ## clean LFX build artifacts + @echo 'Cleaning LFX build artifacts' + @cd src/lfx && make clean + +lfx_docker_build: ## build LFX production Docker image + @echo 'Building LFX Docker image' + @cd src/lfx && make docker_build + +lfx_docker_dev: ## start LFX development environment + @echo 'Starting LFX development environment' + @cd src/lfx && make docker_dev + +lfx_docker_test: ## run LFX tests in Docker + @echo 'Running LFX tests in Docker' + @cd src/lfx && make docker_test + # example make alembic-revision message="Add user table" alembic-revision: ## generate a new migration @echo 'Generating a new Alembic revision' diff --git a/docker/frontend/default.conf.template b/docker/frontend/default.conf.template index efd23b956983..6430c6a823aa 100644 --- a/docker/frontend/default.conf.template +++ b/docker/frontend/default.conf.template @@ -36,7 +36,7 @@ http { add_header Cache-Control "no-cache, no-store, must-revalidate"; etag on; } - + location /api { proxy_pass ${BACKEND_URL}; } diff --git a/docs/.yarnrc.yml b/docs/.yarnrc.yml index 789221eb7f02..8b757b29a176 100644 --- a/docs/.yarnrc.yml +++ b/docs/.yarnrc.yml @@ -1 +1 @@ -nodeLinker: node-modules \ No newline at end of file +nodeLinker: node-modules \ No newline at end of file diff --git a/docs/src/plugins/scroll-tracking/index.js b/docs/src/plugins/scroll-tracking/index.js index 66ad81c83f99..166f558c89a5 100644 --- a/docs/src/plugins/scroll-tracking/index.js +++ b/docs/src/plugins/scroll-tracking/index.js @@ -35,7 +35,7 @@ function pluginScrollTracking(context, options = {}) { const config = { selectors: options.selectors || DEFAULT_SELECTORS }; - + const configScript = ` window.__SCROLL_TRACKING_CONFIG__ = ${JSON.stringify(config)}; `; @@ -52,4 +52,4 @@ function pluginScrollTracking(context, options = {}) { }; } -module.exports = pluginScrollTracking; \ No newline at end of file +module.exports = pluginScrollTracking; diff --git a/docs/src/plugins/scroll-tracking/scroll-tracking.js b/docs/src/plugins/scroll-tracking/scroll-tracking.js index dd712d8c32fa..2fd185417f05 100644 --- a/docs/src/plugins/scroll-tracking/scroll-tracking.js +++ b/docs/src/plugins/scroll-tracking/scroll-tracking.js @@ -7,18 +7,18 @@ const propertyHelpers = { // Extract language from code elements - try multiple approaches codeLanguage: (element) => { // Method 1: Look for data-ch-lang attribute - const codeElement = element.querySelector('[data-ch-lang]') || + const codeElement = element.querySelector('[data-ch-lang]') || element.closest('[data-ch-lang]'); if (codeElement) { const lang = codeElement.getAttribute('data-ch-lang'); if (lang && lang !== 'text') return lang; } - + // Method 2: Look for active tab in the same container - const container = element.closest('.theme-code-block') || + const container = element.closest('.theme-code-block') || element.parentElement?.closest('[class*="code"]') || element.parentElement; - + if (container) { const activeTab = container.querySelector('li[role="tab"][aria-selected="true"]'); if (activeTab) { @@ -28,7 +28,7 @@ const propertyHelpers = { } } } - + // Method 3: Look for any tab as fallback if (container) { const anyTab = container.querySelector('li[role="tab"]'); @@ -39,7 +39,7 @@ const propertyHelpers = { } } } - + return null; } }; @@ -71,9 +71,9 @@ function getScrollDepthPercentage() { ); const windowHeight = window.innerHeight; const scrollableHeight = documentHeight - windowHeight; - + if (scrollableHeight <= 0) return 100; - + return Math.min(100, Math.round((scrollTop / scrollableHeight) * 100)); } @@ -82,11 +82,11 @@ function getScrollDepthPercentage() { */ function getElementProperties(element, baseProperties = {}) { const properties = {}; - + // Process base properties, handling helper function references Object.keys(baseProperties).forEach(key => { const value = baseProperties[key]; - + if (typeof value === 'function') { // Direct function (for programmatic config) try { @@ -116,32 +116,32 @@ function getElementProperties(element, baseProperties = {}) { properties[key] = value; } }); - + // Add common properties properties.page_path = window.location.pathname; properties.page_url = window.location.href; properties.scroll_depth = getScrollDepthPercentage(); - + // Add element-specific properties if (element.tagName) { properties.tag_name = element.tagName.toLowerCase(); } - + if (element.id) { properties.element_id = element.id; } - + if (element.className) { properties.element_class = element.className; } - + // For headings, add text content and level if (element.tagName && element.tagName.match(/^H[1-6]$/)) { properties.heading_level = element.tagName.toLowerCase(); properties.heading_text = element.textContent?.trim().substring(0, 200); // Limit text length to 200 chars properties.text = element.textContent?.trim().substring(0, 200); // Add 'text' property as requested } - + return properties; } @@ -153,25 +153,25 @@ function setupElementTracking(config) { console.warn('IntersectionObserver not supported, element tracking disabled'); return; } - + const observer = new IntersectionObserver((entries) => { entries.forEach(entry => { // Fire event every time element comes into view (not just first time) if (entry.isIntersecting) { // Find matching selector config - const selectorConfig = config.selectors.find(sc => + const selectorConfig = config.selectors.find(sc => entry.target.matches(sc.selector) ); - + if (selectorConfig) { // For code blocks on mobile, add a small delay to ensure DOM has updated const isMobile = window.innerWidth <= 768; const isCodeBlock = entry.target.matches('.ch-codeblock'); const delay = (isMobile && isCodeBlock) ? 100 : 0; - + setTimeout(() => { const properties = getElementProperties(entry.target, selectorConfig.properties || {}); - + if (window.analytics && typeof window.analytics.track === 'function') { window.analytics.track(selectorConfig.eventName, properties); } @@ -183,7 +183,7 @@ function setupElementTracking(config) { threshold: 0.1, // Element needs to be 10% visible rootMargin: '0px' }); - + // Function to observe elements for a given selector const observeElementsForSelector = (selectorConfig) => { const elements = document.querySelectorAll(selectorConfig.selector); @@ -194,15 +194,15 @@ function setupElementTracking(config) { } }); }; - + // Observe all existing elements matching the selectors config.selectors.forEach(observeElementsForSelector); - + // Also scan after a delay for dynamically rendered content setTimeout(() => { config.selectors.forEach(observeElementsForSelector); }, 1000); - + // Set up mutation observer for dynamically added elements if (window.MutationObserver) { const mutationObserver = new MutationObserver((mutations) => { @@ -218,7 +218,7 @@ function setupElementTracking(config) { node._scrollTrackingObserved = true; } } - + // Check children const childElements = node.querySelectorAll ? node.querySelectorAll(selectorConfig.selector) : []; childElements.forEach(child => { @@ -232,16 +232,16 @@ function setupElementTracking(config) { }); }); }); - + mutationObserver.observe(document.body, { childList: true, subtree: true }); - + // Store mutation observer for cleanup observer._mutationObserver = mutationObserver; } - + return observer; } @@ -252,17 +252,17 @@ function setupElementTracking(config) { function initializeScrollTracking(userConfig = {}) { // Only run on client side and prevent duplicate initialization if (!ExecutionEnvironment.canUseDOM || isScrollTrackingInitialized) return; - + // Merge default config with injected config and user config const injectedConfig = window.__SCROLL_TRACKING_CONFIG__ || {}; const config = { ...defaultConfig, ...injectedConfig, ...userConfig }; - + // Set up element intersection tracking const observer = setupElementTracking(config); - + // Mark as initialized isScrollTrackingInitialized = true; - + // Store observer for cleanup window._scrollTrackingObserver = observer; } @@ -276,18 +276,18 @@ function cleanupScrollTracking() { if (window._scrollTrackingObserver._mutationObserver) { window._scrollTrackingObserver._mutationObserver.disconnect(); } - + // Clean up intersection observer window._scrollTrackingObserver.disconnect(); window._scrollTrackingObserver = null; } - + // Clear tracking flags from elements document.querySelectorAll('[data-scroll-tracked]').forEach(el => { delete el._scrollTrackingObserved; el.removeAttribute('data-scroll-tracked'); }); - + isScrollTrackingInitialized = false; } @@ -310,7 +310,7 @@ if (ExecutionEnvironment.canUseDOM) { // Document is fully loaded initWhenReady(); } - + // Re-initialize on route changes for SPA navigation window.addEventListener('popstate', () => { cleanupScrollTracking(); @@ -328,4 +328,4 @@ export function onRouteDidUpdate({location, previousLocation}) { cleanupScrollTracking(); setTimeout(() => initializeScrollTracking(), 100); } -} \ No newline at end of file +} diff --git a/docs/static/logos/botmessage.svg b/docs/static/logos/botmessage.svg index ab468da41574..e83cd11571d1 100644 --- a/docs/static/logos/botmessage.svg +++ b/docs/static/logos/botmessage.svg @@ -5,4 +5,4 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/docs/tailwind.config.js b/docs/tailwind.config.js index 52bf19c57c31..e0136d225b9f 100644 --- a/docs/tailwind.config.js +++ b/docs/tailwind.config.js @@ -14,4 +14,4 @@ module.exports = { preflight: false, // This is important to prevent Tailwind from conflicting with Docusaurus styles }, darkMode: ['class', '[data-theme="dark"]'], // This helps with Docusaurus dark mode -} \ No newline at end of file +} diff --git a/pyproject.toml b/pyproject.toml index 52460dfff421..f8eca371534f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -185,9 +185,14 @@ dev = [ [tool.uv.sources] langflow-base = { workspace = true } langflow = { workspace = true } +lfx = { workspace = true } [tool.uv.workspace] -members = ["src/backend/base", "."] +members = [ + "src/backend/base", + ".", + "src/lfx", +] [tool.hatch.build.targets.wheel] packages = ["src/backend/langflow"] @@ -256,13 +261,21 @@ ignore-regex = '.*(Stati Uniti|Tense=Pres).*' timeout = 150 timeout_method = "signal" minversion = "6.0" -testpaths = ["src/backend/tests"] +testpaths = ["src/backend/tests", "src/lfx/tests"] console_output_style = "progress" filterwarnings = ["ignore::DeprecationWarning", "ignore::ResourceWarning"] log_cli = true log_cli_format = "%(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)" log_cli_date_format = "%Y-%m-%d %H:%M:%S" -markers = ["async_test", "api_key_required", "no_blockbuster", "benchmark"] +markers = [ + "async_test", + "api_key_required", + "no_blockbuster", + "benchmark", + "unit: Unit tests", + "integration: Integration tests", + "slow: Slow-running tests" +] asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "function" addopts = "-p no:benchmark" @@ -312,7 +325,7 @@ ignore = [ "D10", # Missing docstrings "PLW1641", # Object does not implement `__hash__` method (mutable objects shouldn't be hashable) # Rules that are TODOs - "ANN", + "ANN" ] # Preview rules that are not yet activated diff --git a/scripts/aws/bin/cdk.ts b/scripts/aws/bin/cdk.ts index 82b96f649b0b..856f6a267d6e 100644 --- a/scripts/aws/bin/cdk.ts +++ b/scripts/aws/bin/cdk.ts @@ -19,4 +19,4 @@ new LangflowAppStack(app, 'LangflowAppStack', { // env: { account: '123456789012', region: 'us-east-1' }, /* For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html */ -}); \ No newline at end of file +}); diff --git a/scripts/aws/lib/construct/backend.ts b/scripts/aws/lib/construct/backend.ts index cba31f988cc1..393a54c4c530 100644 --- a/scripts/aws/lib/construct/backend.ts +++ b/scripts/aws/lib/construct/backend.ts @@ -27,7 +27,7 @@ interface BackEndProps { } export class BackEndCluster extends Construct { - + constructor(scope: Construct, id: string, props:BackEndProps) { super(scope, id) const backendServiceName = 'backend' @@ -76,7 +76,7 @@ export class BackEndCluster extends Construct { "password": ecs.Secret.fromSecretsManager(secretsDB, 'password'), }, }); - + const backendService = new ecs.FargateService(this, 'BackEndService', { cluster: props.cluster, serviceName: backendServiceName, @@ -87,4 +87,4 @@ export class BackEndCluster extends Construct { }); props.albTG.addTarget(backendService); } -} \ No newline at end of file +} diff --git a/scripts/aws/lib/construct/frontend.ts b/scripts/aws/lib/construct/frontend.ts index 85eec2c93f58..5d3c1f32e62d 100644 --- a/scripts/aws/lib/construct/frontend.ts +++ b/scripts/aws/lib/construct/frontend.ts @@ -25,7 +25,7 @@ export class Web extends Construct { readonly distribution; constructor(scope: Construct, id: string, props:WebProps) { super(scope, id) - + const commonBucketProps: s3.BucketProps = { blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, encryption: s3.BucketEncryption.S3_MANAGED, @@ -37,7 +37,7 @@ export class Web extends Construct { // CDKにて 静的WebサイトをホストするためのAmazon S3バケットを作成 const websiteBucket = new s3.Bucket(this, 'LangflowWebsiteBucket', commonBucketProps); - + const originAccessIdentity = new cloudfront.OriginAccessIdentity( this, 'OriginAccessIdentity', @@ -63,12 +63,12 @@ export class Web extends Construct { const s3SpaOrigin = new origins.S3Origin(websiteBucket); const ApiSpaOrigin = new origins.LoadBalancerV2Origin(props.alb,{ protocolPolicy: cloudfront.OriginProtocolPolicy.HTTP_ONLY - }); + }); const albBehaviorOptions = { origin: ApiSpaOrigin, allowedMethods: cloudfront.AllowedMethods.ALLOW_ALL, - + viewerProtocolPolicy: cloudfront.ViewerProtocolPolicy.ALLOW_ALL, cachePolicy: cloudfront.CachePolicy.CACHING_DISABLED, originRequestPolicy: cloudfront.OriginRequestPolicy.ALL_VIEWER_EXCEPT_HOST_HEADER @@ -126,7 +126,7 @@ export class Web extends Construct { // VITE_AXIOS_BASE_URL: `https://${this.distribution.domainName}` }, }); - + // distribution から backendへのinbound 許可 const alb_listen_port=80 props.albSG.addIngressRule(ec2.Peer.anyIpv4(), ec2.Port.tcp(alb_listen_port)) @@ -139,4 +139,4 @@ export class Web extends Construct { }); } -} \ No newline at end of file +} diff --git a/scripts/aws/lib/construct/iam.ts b/scripts/aws/lib/construct/iam.ts index 13949bda5cf8..79469ddc6716 100644 --- a/scripts/aws/lib/construct/iam.ts +++ b/scripts/aws/lib/construct/iam.ts @@ -65,7 +65,7 @@ export class EcsIAM extends Construct { // KendraとBedrockのアクセス権付与 this.backendTaskRole.attachInlinePolicy(RagAccessPolicy); - // BackEnd Task ExecutionRole + // BackEnd Task ExecutionRole this.backendTaskExecutionRole = new iam.Role(this, 'backendTaskExecutionRole', { assumedBy: new iam.ServicePrincipal('ecs-tasks.amazonaws.com'), managedPolicies: [ @@ -75,8 +75,8 @@ export class EcsIAM extends Construct { }, ], }); - + this.backendTaskExecutionRole.attachInlinePolicy(SecretsManagerPolicy); this.backendTaskExecutionRole.attachInlinePolicy(RagAccessPolicy); } -} \ No newline at end of file +} diff --git a/scripts/aws/lib/construct/index.ts b/scripts/aws/lib/construct/index.ts index 91e2d2c0a817..872b9be08757 100644 --- a/scripts/aws/lib/construct/index.ts +++ b/scripts/aws/lib/construct/index.ts @@ -4,4 +4,4 @@ export * from './iam'; export * from './frontend'; export * from './backend'; export * from './network'; -export * from './kendra'; \ No newline at end of file +export * from './kendra'; diff --git a/scripts/aws/lib/construct/kendra.ts b/scripts/aws/lib/construct/kendra.ts index 80f60ebadd69..4cf04335c7bd 100644 --- a/scripts/aws/lib/construct/kendra.ts +++ b/scripts/aws/lib/construct/kendra.ts @@ -138,4 +138,4 @@ export class Rag extends Construct { }) ); } -} \ No newline at end of file +} diff --git a/scripts/aws/lib/construct/network.ts b/scripts/aws/lib/construct/network.ts index 1abd78ddf58d..be651969ad43 100644 --- a/scripts/aws/lib/construct/network.ts +++ b/scripts/aws/lib/construct/network.ts @@ -59,7 +59,7 @@ export class Network extends Construct { internetFacing: true, //インターネットからのアクセスを許可するかどうか指定 loadBalancerName: 'langflow-alb', securityGroup: this.albSG, //作成したセキュリティグループを割り当てる - vpc:this.vpc, + vpc:this.vpc, }) const listener = this.alb.addListener('Listener', { port: alb_listen_port }); @@ -110,4 +110,4 @@ export class Network extends Construct { }); } -} \ No newline at end of file +} diff --git a/scripts/ci/lfx_nightly_tag.py b/scripts/ci/lfx_nightly_tag.py new file mode 100644 index 000000000000..d54c6dda5b6e --- /dev/null +++ b/scripts/ci/lfx_nightly_tag.py @@ -0,0 +1,70 @@ +"""Script to generate nightly tags for LFX package.""" + +import packaging.version +import requests +from packaging.version import Version + +PYPI_LFX_URL = "https://pypi.org/pypi/lfx/json" +PYPI_LFX_NIGHTLY_URL = "https://pypi.org/pypi/lfx-nightly/json" + + +def get_latest_published_version(*, is_nightly: bool) -> Version: + url = PYPI_LFX_NIGHTLY_URL if is_nightly else PYPI_LFX_URL + + res = requests.get(url, timeout=10) + if res.status_code == requests.codes.not_found: + msg = "Package not found on PyPI" + raise requests.RequestException(msg) + + try: + version_str = res.json()["info"]["version"] + except (KeyError, ValueError) as e: + msg = "Got unexpected response from PyPI" + raise requests.RequestException(msg) from e + return Version(version_str) + + +def create_lfx_tag(): + # Since LFX has never been released, we'll use the version from pyproject.toml as base + from pathlib import Path + + import tomllib + + # Read version from pyproject.toml + lfx_pyproject_path = Path(__file__).parent.parent.parent / "src" / "lfx" / "pyproject.toml" + pyproject_data = tomllib.loads(lfx_pyproject_path.read_text()) + + current_version_str = pyproject_data["project"]["version"] + current_version = Version(current_version_str) + + try: + current_nightly_version = get_latest_published_version(is_nightly=True) + nightly_base_version = current_nightly_version.base_version + except (requests.RequestException, KeyError, ValueError): + # If LFX nightly doesn't exist on PyPI yet, this is the first nightly + current_nightly_version = None + nightly_base_version = None + + build_number = "0" + latest_base_version = current_version.base_version + + if current_nightly_version and latest_base_version == nightly_base_version: + # If the latest version is the same as the nightly version, increment the build number + build_number = str(current_nightly_version.dev + 1) + + new_nightly_version = latest_base_version + ".dev" + build_number + + # Prepend "v" to the version, if DNE. + # This is an update to the nightly version format. + if not new_nightly_version.startswith("v"): + new_nightly_version = "v" + new_nightly_version + + # Verify if version is PEP440 compliant. + packaging.version.Version(new_nightly_version) + + return new_nightly_version + + +if __name__ == "__main__": + tag = create_lfx_tag() + print(tag) diff --git a/scripts/ci/update_lf_base_dependency.py b/scripts/ci/update_lf_base_dependency.py index e3e5d1aabd20..90a2a1e71b19 100755 --- a/scripts/ci/update_lf_base_dependency.py +++ b/scripts/ci/update_lf_base_dependency.py @@ -7,7 +7,7 @@ import packaging.version BASE_DIR = Path(__file__).parent.parent.parent -ARGUMENT_NUMBER = 2 +ARGUMENT_NUMBER = 3 def update_base_dep(pyproject_path: str, new_version: str) -> None: @@ -15,13 +15,35 @@ def update_base_dep(pyproject_path: str, new_version: str) -> None: filepath = BASE_DIR / pyproject_path content = filepath.read_text(encoding="utf-8") - replacement = f'langflow-base-nightly = "{new_version}"' + # Updated pattern to handle PEP 440 version suffixes and both ~= and == version specifiers + pattern = re.compile(r'("langflow-base(?:~=|==)[\d.]+(?:\.(?:post|dev|a|b|rc)\d+)*")') + replacement = f'"langflow-base-nightly=={new_version}"' - # Updates the pattern for poetry - pattern = re.compile(r'langflow-base = \{ path = "\./src/backend/base", develop = true \}') + # Check if the pattern is found if not pattern.search(content): - msg = f'langflow-base poetry dependency not found in "{filepath}"' + msg = f'langflow-base dependency not found in "{filepath}"' raise ValueError(msg) + + # Replace the matched pattern with the new one + content = pattern.sub(replacement, content) + filepath.write_text(content, encoding="utf-8") + + +def update_lfx_dep_in_base(pyproject_path: str, lfx_version: str) -> None: + """Update the LFX dependency in langflow-base pyproject.toml to use nightly version.""" + filepath = BASE_DIR / pyproject_path + content = filepath.read_text(encoding="utf-8") + + # Updated pattern to handle PEP 440 version suffixes and both ~= and == version specifiers + pattern = re.compile(r'("lfx(?:~=|==)[\d.]+(?:\.(?:post|dev|a|b|rc)\d+)*")') + replacement = f'"lfx-nightly=={lfx_version}"' + + # Check if the pattern is found + if not pattern.search(content): + msg = f'LFX dependency not found in "{filepath}"' + raise ValueError(msg) + + # Replace the matched pattern with the new one content = pattern.sub(replacement, content) filepath.write_text(content, encoding="utf-8") @@ -36,16 +58,24 @@ def verify_pep440(version): def main() -> None: if len(sys.argv) != ARGUMENT_NUMBER: - msg = "New version not specified" + msg = "Usage: update_lf_base_dependency.py " raise ValueError(msg) base_version = sys.argv[1] + lfx_version = sys.argv[2] - # Strip "v" prefix from version if present + # Strip "v" prefix from versions if present base_version = base_version.removeprefix("v") + lfx_version = lfx_version.removeprefix("v") verify_pep440(base_version) + verify_pep440(lfx_version) + + # Update langflow-base dependency in main project update_base_dep("pyproject.toml", base_version) + # Update LFX dependency in langflow-base + update_lfx_dep_in_base("src/backend/base/pyproject.toml", lfx_version) + if __name__ == "__main__": main() diff --git a/scripts/ci/update_lfx_version.py b/scripts/ci/update_lfx_version.py new file mode 100644 index 000000000000..ead0bd6cea19 --- /dev/null +++ b/scripts/ci/update_lfx_version.py @@ -0,0 +1,48 @@ +"""Script to update LFX version for nightly builds.""" + +import sys +from pathlib import Path + +from update_pyproject_name import update_pyproject_name +from update_pyproject_version import update_pyproject_version + +# Add the current directory to the path so we can import the other scripts +current_dir = Path(__file__).resolve().parent +sys.path.append(str(current_dir)) + + +def update_lfx_for_nightly(lfx_tag: str): + """Update LFX package for nightly build. + + Args: + lfx_tag: The nightly tag for LFX (e.g., "v0.1.0.dev0") + """ + lfx_pyproject_path = "src/lfx/pyproject.toml" + + # Update name to lfx-nightly + update_pyproject_name(lfx_pyproject_path, "lfx-nightly") + + # Update version (strip 'v' prefix if present) + version = lfx_tag.lstrip("v") + update_pyproject_version(lfx_pyproject_path, version) + + print(f"Updated LFX package to lfx-nightly version {version}") + + +def main(): + """Update LFX for nightly builds. + + Usage: + update_lfx_version.py + """ + expected_args = 2 + if len(sys.argv) != expected_args: + print("Usage: update_lfx_version.py ") + sys.exit(1) + + lfx_tag = sys.argv[1] + update_lfx_for_nightly(lfx_tag) + + +if __name__ == "__main__": + main() diff --git a/scripts/ci/update_pyproject_combined.py b/scripts/ci/update_pyproject_combined.py index 20c6e3dedfc1..4ad73bbfa7db 100755 --- a/scripts/ci/update_pyproject_combined.py +++ b/scripts/ci/update_pyproject_combined.py @@ -3,6 +3,7 @@ import sys from pathlib import Path +from update_lf_base_dependency import update_lfx_dep_in_base from update_pyproject_name import update_pyproject_name from update_pyproject_name import update_uv_dep as update_name_uv_dep from update_pyproject_version import update_pyproject_version @@ -17,28 +18,33 @@ def main(): """Universal update script that handles both base and main updates in a single run. Usage: - update_pyproject_combined.py main + update_pyproject_combined.py main """ - arg_count = 4 + arg_count = 5 if len(sys.argv) != arg_count: print("Usage:") - print(" update_pyproject_combined.py main ") + print(" update_pyproject_combined.py main ") sys.exit(1) mode = sys.argv[1] if mode != "main": print("Only 'main' mode is supported") - print("Usage: update_pyproject_combined.py main ") + print("Usage: update_pyproject_combined.py main ") sys.exit(1) main_tag = sys.argv[2] base_tag = sys.argv[3] + lfx_tag = sys.argv[4] # First handle base package updates update_pyproject_name("src/backend/base/pyproject.toml", "langflow-base-nightly") update_name_uv_dep("pyproject.toml", "langflow-base-nightly") update_pyproject_version("src/backend/base/pyproject.toml", base_tag) + # Update LFX dependency in langflow-base + lfx_version = lfx_tag.lstrip("v") + update_lfx_dep_in_base("src/backend/base/pyproject.toml", lfx_version) + # Then handle main package updates update_pyproject_name("pyproject.toml", "langflow-nightly") update_name_uv_dep("pyproject.toml", "langflow-nightly") diff --git a/scripts/ci/update_starter_projects.py b/scripts/ci/update_starter_projects.py index b001ebfc82e5..cb56b0f198ac 100644 --- a/scripts/ci/update_starter_projects.py +++ b/scripts/ci/update_starter_projects.py @@ -11,10 +11,11 @@ update_project_file, update_projects_components_with_latest_component_versions, ) -from langflow.interface.components import get_and_cache_all_types_dict -from langflow.services.deps import get_settings_service from langflow.services.utils import initialize_services +from lfx.interface.components import get_and_cache_all_types_dict +from lfx.services.deps import get_settings_service + async def main(): """Updates the starter projects with the latest component versions. diff --git a/scripts/gcp/walkthroughtutorial.md b/scripts/gcp/walkthroughtutorial.md index 83ea3086a2a0..8e8f4733312b 100644 --- a/scripts/gcp/walkthroughtutorial.md +++ b/scripts/gcp/walkthroughtutorial.md @@ -1,6 +1,6 @@ # Deploy Langflow on Google Cloud Platform -**Duration**: 45 minutes +**Duration**: 45 minutes **Author**: [Robert Wilkins III](https://www.linkedin.com/in/robertwilkinsiii) ## Introduction @@ -27,8 +27,8 @@ In the next step, you'll configure the GCP environment and deploy Langflow. ## Configure the GCP environment and deploy Langflow Run the deploy_langflow_gcp.sh script to configure the GCP environment and deploy Langflow: -```sh -gcloud config set project +```sh +gcloud config set project bash ./deploy_langflow_gcp.sh ``` diff --git a/scripts/gcp/walkthroughtutorial_spot.md b/scripts/gcp/walkthroughtutorial_spot.md index 3792bc1caffb..cd0165f99472 100644 --- a/scripts/gcp/walkthroughtutorial_spot.md +++ b/scripts/gcp/walkthroughtutorial_spot.md @@ -1,6 +1,6 @@ # Deploy Langflow on Google Cloud Platform -**Duration**: 45 minutes +**Duration**: 45 minutes **Author**: [Robert Wilkins III](https://www.linkedin.com/in/robertwilkinsiii) ## Introduction @@ -27,8 +27,8 @@ In the next step, you'll configure the GCP environment and deploy Langflow. ## Configure the GCP environment and deploy Langflow Run the deploy_langflow_gcp_spot.sh script to configure the GCP environment and deploy Langflow: -```sh -gcloud config set project +```sh +gcloud config set project bash ./deploy_langflow_gcp_spot.sh ``` diff --git a/scripts/generate_coverage_config.md b/scripts/generate_coverage_config.md index 03c1bb333357..7301ad9de2b3 100644 --- a/scripts/generate_coverage_config.md +++ b/scripts/generate_coverage_config.md @@ -26,7 +26,7 @@ The script runs automatically in CI before backend tests via `.github/workflows/ ## Files affected - **Input**: `src/frontend/src/utils/styleUtils.ts` (SIDEBAR_BUNDLES) -- **Input**: `src/backend/base/langflow/components/**/*.py` (legacy components) +- **Input**: `src/backend/base/langflow/components/**/*.py` (legacy components) - **Output**: `src/backend/.coveragerc` (auto-generated, in .gitignore) ## Benefits diff --git a/scripts/release-lfx.sh b/scripts/release-lfx.sh new file mode 100755 index 000000000000..a6a95b248514 --- /dev/null +++ b/scripts/release-lfx.sh @@ -0,0 +1,221 @@ +#!/bin/bash +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Default values +DRY_RUN=false + +# Function to print colored output +print_info() { + echo -e "${GREEN}[INFO]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +print_dry_run() { + echo -e "${BLUE}[DRY RUN]${NC} $1" +} + +# Function to show usage +show_usage() { + echo "Usage: $0 [OPTIONS] [VERSION]" + echo "" + echo "Options:" + echo " --dry-run Run the script without making actual changes" + echo " --help Show this help message" + echo "" + echo "Arguments:" + echo " VERSION The new version to release (e.g., 0.1.0)" + echo "" + echo "Examples:" + echo " $0 0.1.0 # Release version 0.1.0" + echo " $0 --dry-run 0.1.0 # Dry run for version 0.1.0" + echo " $0 --dry-run # Dry run with interactive version prompt" +} + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --dry-run) + DRY_RUN=true + shift + ;; + --help|-h) + show_usage + exit 0 + ;; + *) + if [ -z "$NEW_VERSION" ]; then + NEW_VERSION=$1 + fi + shift + ;; + esac +done + +# Check if we're in the right directory +if [ ! -f "src/lfx/pyproject.toml" ]; then + print_error "This script must be run from the root of the langflow repository" + exit 1 +fi + +# Get current version +CURRENT_VERSION=$(grep '^version = ' src/lfx/pyproject.toml | cut -d'"' -f2) +print_info "Current LFX version: $CURRENT_VERSION" + +if [ "$DRY_RUN" = true ]; then + print_dry_run "Running in dry run mode - no changes will be made" +fi + +# Check for uncommitted changes (skip in dry run) +if [ "$DRY_RUN" = false ]; then + if ! git diff-index --quiet HEAD --; then + print_warning "You have uncommitted changes. Please commit or stash them before releasing." + exit 1 + fi +else + if ! git diff-index --quiet HEAD --; then + print_warning "Uncommitted changes detected (ignored in dry run mode)" + fi +fi + +# Get new version from argument or prompt +if [ -z "$NEW_VERSION" ]; then + echo -n "Enter new version (current: $CURRENT_VERSION): " + read NEW_VERSION +fi + +# Validate version format +if ! [[ $NEW_VERSION =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9]+)?$ ]]; then + print_error "Invalid version format. Use semantic versioning (e.g., 0.1.0 or 0.1.0-alpha)" + exit 1 +fi + +print_info "Preparing to release LFX version $NEW_VERSION" + +# Update version in pyproject.toml +if [ "$DRY_RUN" = true ]; then + print_dry_run "Would update version in pyproject.toml to $NEW_VERSION" +else + print_info "Updating version in pyproject.toml..." + sed -i.bak "s/^version = \".*\"/version = \"$NEW_VERSION\"/" src/lfx/pyproject.toml + rm src/lfx/pyproject.toml.bak +fi + +# Update version in Dockerfiles if they have ARG LFX_VERSION +if grep -q "ARG LFX_VERSION" src/lfx/docker/Dockerfile 2>/dev/null; then + if [ "$DRY_RUN" = true ]; then + print_dry_run "Would update version in Dockerfiles to $NEW_VERSION" + else + print_info "Updating version in Dockerfiles..." + sed -i.bak "s/ARG LFX_VERSION=.*/ARG LFX_VERSION=$NEW_VERSION/" src/lfx/docker/Dockerfile* + rm src/lfx/docker/Dockerfile*.bak + fi +fi + +# Run tests +print_info "Running tests..." +cd src/lfx +if ! make test; then + print_error "Tests failed!" + if [ "$DRY_RUN" = false ]; then + print_info "Rolling back changes..." + git checkout -- . + fi + exit 1 +fi +cd ../.. + +# Build package to verify +print_info "Building package..." +cd src/lfx +if ! uv build; then + print_error "Build failed!" + if [ "$DRY_RUN" = false ]; then + print_info "Rolling back changes..." + cd ../.. + git checkout -- . + fi + exit 1 +fi +cd ../.. +if [ "$DRY_RUN" = true ]; then + print_dry_run "Skipping cleanup of build artifacts in dry run mode" +else + # Clean up build artifacts + rm -rf src/lfx/dist/ +fi + +# Create git commit +if [ "$DRY_RUN" = true ]; then + print_dry_run "Would create git commit: 'chore(lfx): bump version to $NEW_VERSION'" +else + print_info "Creating git commit..." + git add src/lfx/pyproject.toml src/lfx/docker/Dockerfile* 2>/dev/null || true + git commit -m "chore(lfx): bump version to $NEW_VERSION + +- Update version in pyproject.toml +- Prepare for PyPI and Docker release" +fi + +# Create git tag +TAG_NAME="lfx-v$NEW_VERSION" +if [ "$DRY_RUN" = true ]; then + print_dry_run "Would create git tag: $TAG_NAME" +else + print_info "Creating git tag: $TAG_NAME" + git tag -a "$TAG_NAME" -m "LFX Release $NEW_VERSION" +fi + +if [ "$DRY_RUN" = true ]; then + print_info "✅ Dry run complete!" + echo "" + echo "Dry run performed:" + echo "✅ Validated version format" + echo "✅ Ran tests successfully" + echo "✅ Built package successfully" + echo "" + echo "What would happen in a real run:" + echo "1. Update version in pyproject.toml to $NEW_VERSION" + echo "2. Update version in Dockerfiles (if applicable)" + echo "3. Create git commit with message: 'chore(lfx): bump version to $NEW_VERSION'" + echo "4. Create git tag: $TAG_NAME" + echo "" + echo "To perform the actual release, run without --dry-run:" + echo " $0 $NEW_VERSION" +else + print_info "✅ Release preparation complete!" + echo "" + echo "Next steps:" + echo "1. Push the commit and tag:" + echo " git push origin HEAD" + echo " git push origin $TAG_NAME" + echo "" + echo "2. Go to GitHub Actions and run the 'LFX Release' workflow:" + echo " https://github.com/langflow-ai/langflow/actions/workflows/release-lfx.yml" + echo "" + echo "3. Enter version: $NEW_VERSION" + echo "" + echo "4. Select options:" + echo " - Publish to PyPI: Yes" + echo " - Build Docker images: Yes" + echo " - Create GitHub release: Yes" + echo "" + echo "The workflow will:" + echo "- Run tests on all Python versions" + echo "- Build and publish to PyPI" + echo "- Build and push Docker images (standard and alpine)" + echo "- Create a GitHub release with artifacts" +fi \ No newline at end of file diff --git a/src/backend/base/langflow/__init__.py b/src/backend/base/langflow/__init__.py index e69de29bb2d1..8a28fc5d3557 100644 --- a/src/backend/base/langflow/__init__.py +++ b/src/backend/base/langflow/__init__.py @@ -0,0 +1,215 @@ +"""Langflow backwards compatibility layer. + +This module provides backwards compatibility by forwarding imports from +langflow.* to lfx.* to maintain compatibility with existing code that +references the old langflow module structure. +""" + +import importlib +import importlib.util +import sys +from types import ModuleType +from typing import Any + + +class LangflowCompatibilityModule(ModuleType): + """A module that forwards attribute access to the corresponding lfx module.""" + + def __init__(self, name: str, lfx_module_name: str): + super().__init__(name) + self._lfx_module_name = lfx_module_name + self._lfx_module = None + + def _get_lfx_module(self): + """Lazily import and cache the lfx module.""" + if self._lfx_module is None: + try: + self._lfx_module = importlib.import_module(self._lfx_module_name) + except ImportError as e: + msg = f"Cannot import {self._lfx_module_name} for backwards compatibility with {self.__name__}" + raise ImportError(msg) from e + return self._lfx_module + + def __getattr__(self, name: str) -> Any: + """Forward attribute access to the lfx module with caching.""" + lfx_module = self._get_lfx_module() + try: + attr = getattr(lfx_module, name) + except AttributeError as e: + msg = f"module '{self.__name__}' has no attribute '{name}'" + raise AttributeError(msg) from e + else: + # Cache the attribute in our __dict__ for faster subsequent access + setattr(self, name, attr) + return attr + + def __dir__(self): + """Return directory of the lfx module.""" + try: + lfx_module = self._get_lfx_module() + return dir(lfx_module) + except ImportError: + return [] + + +def _setup_compatibility_modules(): + """Set up comprehensive compatibility modules for langflow.base imports.""" + # First, set up the base attribute on this module (langflow) + current_module = sys.modules[__name__] + + # Define all the modules we need to support + module_mappings = { + # Core base module + "langflow.base": "lfx.base", + # Inputs module - critical for class identity + "langflow.inputs": "lfx.inputs", + "langflow.inputs.inputs": "lfx.inputs.inputs", + # Schema modules - also critical for class identity + "langflow.schema": "lfx.schema", + "langflow.schema.data": "lfx.schema.data", + "langflow.schema.serialize": "lfx.schema.serialize", + # Template modules + "langflow.template": "lfx.template", + "langflow.template.field": "lfx.template.field", + "langflow.template.field.base": "lfx.template.field.base", + # Components modules + "langflow.components": "lfx.components", + "langflow.components.helpers": "lfx.components.helpers", + "langflow.components.helpers.calculator_core": "lfx.components.helpers.calculator_core", + "langflow.components.helpers.create_list": "lfx.components.helpers.create_list", + "langflow.components.helpers.current_date": "lfx.components.helpers.current_date", + "langflow.components.helpers.id_generator": "lfx.components.helpers.id_generator", + "langflow.components.helpers.memory": "lfx.components.helpers.memory", + "langflow.components.helpers.output_parser": "lfx.components.helpers.output_parser", + "langflow.components.helpers.store_message": "lfx.components.helpers.store_message", + # Individual modules that exist in lfx + "langflow.base.agents": "lfx.base.agents", + "langflow.base.chains": "lfx.base.chains", + "langflow.base.data": "lfx.base.data", + "langflow.base.data.utils": "lfx.base.data.utils", + "langflow.base.document_transformers": "lfx.base.document_transformers", + "langflow.base.embeddings": "lfx.base.embeddings", + "langflow.base.flow_processing": "lfx.base.flow_processing", + "langflow.base.io": "lfx.base.io", + "langflow.base.io.chat": "lfx.base.io.chat", + "langflow.base.io.text": "lfx.base.io.text", + "langflow.base.langchain_utilities": "lfx.base.langchain_utilities", + "langflow.base.memory": "lfx.base.memory", + "langflow.base.models": "lfx.base.models", + "langflow.base.models.google_generative_ai_constants": "lfx.base.models.google_generative_ai_constants", + "langflow.base.models.openai_constants": "lfx.base.models.openai_constants", + "langflow.base.models.anthropic_constants": "lfx.base.models.anthropic_constants", + "langflow.base.models.aiml_constants": "lfx.base.models.aiml_constants", + "langflow.base.models.aws_constants": "lfx.base.models.aws_constants", + "langflow.base.models.groq_constants": "lfx.base.models.groq_constants", + "langflow.base.models.novita_constants": "lfx.base.models.novita_constants", + "langflow.base.models.ollama_constants": "lfx.base.models.ollama_constants", + "langflow.base.models.sambanova_constants": "lfx.base.models.sambanova_constants", + "langflow.base.prompts": "lfx.base.prompts", + "langflow.base.prompts.api_utils": "lfx.base.prompts.api_utils", + "langflow.base.prompts.utils": "lfx.base.prompts.utils", + "langflow.base.textsplitters": "lfx.base.textsplitters", + "langflow.base.tools": "lfx.base.tools", + "langflow.base.vectorstores": "lfx.base.vectorstores", + } + + # Create compatibility modules for each mapping + for langflow_name, lfx_name in module_mappings.items(): + if langflow_name not in sys.modules: + # Check if the lfx module exists + try: + spec = importlib.util.find_spec(lfx_name) + if spec is not None: + # Create compatibility module + compat_module = LangflowCompatibilityModule(langflow_name, lfx_name) + sys.modules[langflow_name] = compat_module + + # Set up the module hierarchy + parts = langflow_name.split(".") + if len(parts) > 1: + parent_name = ".".join(parts[:-1]) + parent_module = sys.modules.get(parent_name) + if parent_module is not None: + setattr(parent_module, parts[-1], compat_module) + + # Special handling for top-level modules + if langflow_name == "langflow.base": + current_module.base = compat_module + elif langflow_name == "langflow.inputs": + current_module.inputs = compat_module + elif langflow_name == "langflow.schema": + current_module.schema = compat_module + elif langflow_name == "langflow.template": + current_module.template = compat_module + elif langflow_name == "langflow.components": + current_module.components = compat_module + except (ImportError, ValueError): + # Skip modules that don't exist in lfx + continue + + # Handle modules that exist only in langflow (like knowledge_bases) + # These need special handling because they're not in lfx yet + langflow_only_modules = { + "langflow.base.data.kb_utils": "langflow.base.data.kb_utils", + "langflow.base.knowledge_bases": "langflow.base.knowledge_bases", + "langflow.components.knowledge_bases": "langflow.components.knowledge_bases", + } + + for langflow_name in langflow_only_modules: + if langflow_name not in sys.modules: + try: + # Try to find the actual physical module file + from pathlib import Path + + base_dir = Path(__file__).parent + + if langflow_name == "langflow.base.data.kb_utils": + kb_utils_file = base_dir / "base" / "data" / "kb_utils.py" + if kb_utils_file.exists(): + spec = importlib.util.spec_from_file_location(langflow_name, kb_utils_file) + if spec is not None and spec.loader is not None: + module = importlib.util.module_from_spec(spec) + sys.modules[langflow_name] = module + spec.loader.exec_module(module) + + # Also add to parent module + parent_module = sys.modules.get("langflow.base.data") + if parent_module is not None: + parent_module.kb_utils = module + + elif langflow_name == "langflow.base.knowledge_bases": + kb_dir = base_dir / "base" / "knowledge_bases" + kb_init_file = kb_dir / "__init__.py" + if kb_init_file.exists(): + spec = importlib.util.spec_from_file_location(langflow_name, kb_init_file) + if spec is not None and spec.loader is not None: + module = importlib.util.module_from_spec(spec) + sys.modules[langflow_name] = module + spec.loader.exec_module(module) + + # Also add to parent module + parent_module = sys.modules.get("langflow.base") + if parent_module is not None: + parent_module.knowledge_bases = module + + elif langflow_name == "langflow.components.knowledge_bases": + components_kb_dir = base_dir / "components" / "knowledge_bases" + components_kb_init_file = components_kb_dir / "__init__.py" + if components_kb_init_file.exists(): + spec = importlib.util.spec_from_file_location(langflow_name, components_kb_init_file) + if spec is not None and spec.loader is not None: + module = importlib.util.module_from_spec(spec) + sys.modules[langflow_name] = module + spec.loader.exec_module(module) + + # Also add to parent module + parent_module = sys.modules.get("langflow.components") + if parent_module is not None: + parent_module.knowledge_bases = module + except (ImportError, AttributeError): + # If direct file loading fails, skip silently + continue + + +# Set up all the compatibility modules +_setup_compatibility_modules() diff --git a/src/backend/base/langflow/__main__.py b/src/backend/base/langflow/__main__.py index 804c19134d3d..538b541a951f 100644 --- a/src/backend/base/langflow/__main__.py +++ b/src/backend/base/langflow/__main__.py @@ -18,6 +18,8 @@ from fastapi import HTTPException from httpx import HTTPError from jose import JWTError +from lfx.log.logger import configure, logger +from lfx.services.settings.constants import DEFAULT_SUPERUSER, DEFAULT_SUPERUSER_PASSWORD from multiprocess import cpu_count from multiprocess.context import Process from packaging import version as pkg_version @@ -29,11 +31,9 @@ from langflow.cli.progress import create_langflow_progress from langflow.initial_setup.setup import get_or_create_default_folder -from langflow.logging.logger import configure, logger from langflow.main import setup_app from langflow.services.auth.utils import check_key, get_current_user_by_jwt from langflow.services.deps import get_db_service, get_settings_service, session_scope -from langflow.services.settings.constants import DEFAULT_SUPERUSER, DEFAULT_SUPERUSER_PASSWORD from langflow.services.utils import initialize_services from langflow.utils.version import fetch_latest_version, get_version_info from langflow.utils.version import is_pre_release as langflow_is_pre_release @@ -43,6 +43,20 @@ app = typer.Typer(no_args_is_help=True) +# Add LFX commands as a sub-app +try: + from lfx.cli.commands import serve_command + from lfx.cli.run import run as lfx_run + + lfx_app = typer.Typer(name="lfx", help="Langflow Executor commands") + lfx_app.command(name="serve", help="Serve a flow as an API", no_args_is_help=True)(serve_command) + lfx_app.command(name="run", help="Run a flow directly", no_args_is_help=True)(lfx_run) + + app.add_typer(lfx_app, name="lfx") +except ImportError: + # LFX not available, skip adding the sub-app + pass + class ProcessManager: """Manages the lifecycle of the backend process.""" diff --git a/src/backend/base/langflow/alembic/versions/006b3990db50_add_unique_constraints.py b/src/backend/base/langflow/alembic/versions/006b3990db50_add_unique_constraints.py index efb4c5321902..85f2f97242e5 100644 --- a/src/backend/base/langflow/alembic/versions/006b3990db50_add_unique_constraints.py +++ b/src/backend/base/langflow/alembic/versions/006b3990db50_add_unique_constraints.py @@ -6,17 +6,16 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "006b3990db50" -down_revision: Union[str, None] = "1ef9c4f3765d" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "1ef9c4f3765d" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: @@ -38,7 +37,6 @@ def upgrade() -> None: batch_op.create_unique_constraint("uq_user_id", ["id"]) except Exception as e: print(e) - pass # ### end Alembic commands ### @@ -62,5 +60,4 @@ def downgrade() -> None: batch_op.drop_constraint("uq_apikey_id", type_="unique") except Exception as e: print(e) - pass # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/012fb73ac359_add_folder_table.py b/src/backend/base/langflow/alembic/versions/012fb73ac359_add_folder_table.py index 8000ce23756c..d488885a907c 100644 --- a/src/backend/base/langflow/alembic/versions/012fb73ac359_add_folder_table.py +++ b/src/backend/base/langflow/alembic/versions/012fb73ac359_add_folder_table.py @@ -6,18 +6,17 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "012fb73ac359" -down_revision: Union[str, None] = "c153816fd85f" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "c153816fd85f" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/0ae3a2674f32_update_the_columns_that_need_to_change_.py b/src/backend/base/langflow/alembic/versions/0ae3a2674f32_update_the_columns_that_need_to_change_.py index ea8eddad76c9..3449e8f22d6c 100644 --- a/src/backend/base/langflow/alembic/versions/0ae3a2674f32_update_the_columns_that_need_to_change_.py +++ b/src/backend/base/langflow/alembic/versions/0ae3a2674f32_update_the_columns_that_need_to_change_.py @@ -5,21 +5,20 @@ Create Date: 2024-10-04 17:30:12.924809 """ -from typing import Sequence, Union + +from collections.abc import Sequence import sqlalchemy as sa -import sqlmodel from alembic import op -from sqlalchemy.dialects import sqlite -from sqlalchemy.engine.reflection import Inspector from langflow.utils import migration # revision identifiers, used by Alembic. -revision: str = '0ae3a2674f32' -down_revision: Union[str, None] = 'd2d475a1f7c0' -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +revision: str = "0ae3a2674f32" +down_revision: str | None = "d2d475a1f7c0" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None + def upgrade() -> None: conn = op.get_bind() @@ -31,18 +30,14 @@ def upgrade() -> None: columns = inspector.get_columns("vertex_build") params_column = next((column for column in columns if column["name"] == "params"), None) if params_column is not None and isinstance(params_column["type"], sa.VARCHAR): - batch_op.alter_column( - "params", existing_type=sa.VARCHAR(), type_=sa.Text(), existing_nullable=True - ) + batch_op.alter_column("params", existing_type=sa.VARCHAR(), type_=sa.Text(), existing_nullable=True) with op.batch_alter_table("message", schema=None) as batch_op: if migration.column_exists(table_name="message", column_name="text", conn=conn): columns = inspector.get_columns("message") text_column = next((column for column in columns if column["name"] == "text"), None) if text_column is not None and isinstance(text_column["type"], sa.VARCHAR): - batch_op.alter_column( - "text", existing_type=sa.VARCHAR(), type_=sa.Text(), existing_nullable=True - ) + batch_op.alter_column("text", existing_type=sa.VARCHAR(), type_=sa.Text(), existing_nullable=True) # ### end Alembic commands ### @@ -56,16 +51,12 @@ def downgrade() -> None: columns = inspector.get_columns("message") text_column = next((column for column in columns if column["name"] == "text"), None) if text_column is not None and isinstance(text_column["type"], sa.VARCHAR): - batch_op.alter_column( - "text", existing_type=sa.VARCHAR(), type_=sa.Text(), existing_nullable=True - ) + batch_op.alter_column("text", existing_type=sa.VARCHAR(), type_=sa.Text(), existing_nullable=True) with op.batch_alter_table("vertex_build", schema=None) as batch_op: if migration.column_exists(table_name="vertex_build", column_name="params", conn=conn): columns = inspector.get_columns("vertex_build") params_column = next((column for column in columns if column["name"] == "params"), None) if params_column is not None and isinstance(params_column["type"], sa.VARCHAR): - batch_op.alter_column( - "params", existing_type=sa.VARCHAR(), type_=sa.Text(), existing_nullable=True - ) + batch_op.alter_column("params", existing_type=sa.VARCHAR(), type_=sa.Text(), existing_nullable=True) # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/0b8757876a7c_.py b/src/backend/base/langflow/alembic/versions/0b8757876a7c_.py index e53b61c87448..085b7995539f 100644 --- a/src/backend/base/langflow/alembic/versions/0b8757876a7c_.py +++ b/src/backend/base/langflow/alembic/versions/0b8757876a7c_.py @@ -6,16 +6,13 @@ """ -from typing import Sequence, Union - -import sqlalchemy as sa -from alembic import op +from collections.abc import Sequence # revision identifiers, used by Alembic. revision: str = "0b8757876a7c" -down_revision: Union[str, None] = "006b3990db50" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "006b3990db50" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/0d60fcbd4e8e_create_vertex_builds_table.py b/src/backend/base/langflow/alembic/versions/0d60fcbd4e8e_create_vertex_builds_table.py index db13294288b1..f3d6fd235e5b 100644 --- a/src/backend/base/langflow/alembic/versions/0d60fcbd4e8e_create_vertex_builds_table.py +++ b/src/backend/base/langflow/alembic/versions/0d60fcbd4e8e_create_vertex_builds_table.py @@ -6,7 +6,7 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel @@ -16,9 +16,9 @@ # revision identifiers, used by Alembic. revision: str = "0d60fcbd4e8e" -down_revision: Union[str, None] = "90be8e2ed91e" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "90be8e2ed91e" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: @@ -41,11 +41,9 @@ def upgrade() -> None: ), sa.PrimaryKeyConstraint("build_id"), ) - pass def downgrade() -> None: conn = op.get_bind() if migration.table_exists("vertex_build", conn): op.drop_table("vertex_build") - pass diff --git a/src/backend/base/langflow/alembic/versions/1a110b568907_replace_credential_table_with_variable.py b/src/backend/base/langflow/alembic/versions/1a110b568907_replace_credential_table_with_variable.py index 3443d2ba9d59..0435d4309fd0 100644 --- a/src/backend/base/langflow/alembic/versions/1a110b568907_replace_credential_table_with_variable.py +++ b/src/backend/base/langflow/alembic/versions/1a110b568907_replace_credential_table_with_variable.py @@ -6,18 +6,17 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "1a110b568907" -down_revision: Union[str, None] = "63b9c451fd30" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "63b9c451fd30" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/1b8b740a6fa3_remove_fk_constraint_in_message_.py b/src/backend/base/langflow/alembic/versions/1b8b740a6fa3_remove_fk_constraint_in_message_.py index 495c0a1b4346..2196c35bf53e 100644 --- a/src/backend/base/langflow/alembic/versions/1b8b740a6fa3_remove_fk_constraint_in_message_.py +++ b/src/backend/base/langflow/alembic/versions/1b8b740a6fa3_remove_fk_constraint_in_message_.py @@ -1,25 +1,25 @@ """remove fk constraint in message transaction and vertex build - Revision ID: 1b8b740a6fa3 Revises: f3b2d1f1002d Create Date: 2025-04-10 10:17:32.493181 """ -from typing import Sequence, Union -from alembic import op +from collections.abc import Sequence + import sqlalchemy as sa import sqlmodel +from alembic import op from sqlalchemy.engine.reflection import Inspector -from langflow.utils import migration +from langflow.utils import migration # revision identifiers, used by Alembic. -revision: str = '1b8b740a6fa3' -down_revision: Union[str, None] = 'f3b2d1f1002d' -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +revision: str = "1b8b740a6fa3" +down_revision: str | None = "f3b2d1f1002d" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None NAMING_CONVENTION = { "ix": "ix_%(column_0_label)s", @@ -29,6 +29,7 @@ "pk": "pk_%(table_name)s", } + def constraint_exists(constraint_name: str, conn) -> bool: """Check if a constraint with the given name already exists in the database. @@ -46,12 +47,16 @@ def constraint_exists(constraint_name: str, conn) -> bool: # Check each table for the constraint for table in tables: - for constraint in inspector.get_pk_constraint(table).get("name"), *[c.get("name") for c in inspector.get_foreign_keys(table)]: + for constraint in ( + inspector.get_pk_constraint(table).get("name"), + *[c.get("name") for c in inspector.get_foreign_keys(table)], + ): if constraint == constraint_name: return True return False + def upgrade() -> None: conn = op.get_bind() @@ -67,7 +72,7 @@ def upgrade() -> None: # Check if PK constraint already exists if constraint_exists(pk_name, conn): # Use a different PK name if it already exists - pk_name = f"pk_temp_vertex_build" + pk_name = "pk_temp_vertex_build" # Create temp table with same schema but no FK constraint op.create_table( @@ -85,7 +90,7 @@ def upgrade() -> None: # Copy data - use a window function to ensure build_id uniqueness across SQLite, PostgreSQL and MySQL # Filter out rows where the original 'id' (vertex id) is NULL, as the new table requires it. - op.execute(f''' + op.execute(f""" INSERT INTO "{temp_table_name}" (timestamp, id, data, artifacts, params, build_id, flow_id, valid) SELECT timestamp, id, data, artifacts, params, build_id, flow_id, valid FROM ( @@ -95,7 +100,7 @@ def upgrade() -> None: WHERE id IS NOT NULL -- Ensure vertex id is not NULL ) sub WHERE rn = 1 - ''') + """) # Drop original table and rename temp table op.drop_table("vertex_build") @@ -110,7 +115,7 @@ def upgrade() -> None: # Check if PK constraint already exists if constraint_exists(pk_name, conn): # Use a different PK name if it already exists - pk_name = f"pk_temp_transaction" + pk_name = "pk_temp_transaction" # Create temp table with same schema but no FK constraint op.create_table( @@ -128,12 +133,12 @@ def upgrade() -> None: ) # Copy data - explicitly list columns and filter out rows where id is NULL - op.execute(f''' + op.execute(f""" INSERT INTO "{temp_table_name}" (timestamp, vertex_id, target_id, inputs, outputs, status, id, flow_id, error) SELECT timestamp, vertex_id, target_id, inputs, outputs, status, id, flow_id, error FROM "transaction" WHERE id IS NOT NULL - ''') + """) # Drop original table and rename temp table op.drop_table("transaction") @@ -148,7 +153,7 @@ def upgrade() -> None: # Check if PK constraint already exists if constraint_exists(pk_name, conn): # Use a different PK name if it already exists - pk_name = f"pk_temp_message" + pk_name = "pk_temp_message" # Create temp table with same schema but no FK constraint op.create_table( @@ -170,12 +175,12 @@ def upgrade() -> None: ) # Copy data - explicitly list columns and filter out rows where id is NULL - op.execute(f''' + op.execute(f""" INSERT INTO "{temp_table_name}" (timestamp, sender, sender_name, session_id, text, id, flow_id, files, error, edit, properties, category, content_blocks) SELECT timestamp, sender, sender_name, session_id, text, id, flow_id, files, error, edit, properties, category, content_blocks FROM "message" WHERE id IS NOT NULL - ''') + """) # Drop original table and rename temp table op.drop_table("message") @@ -196,7 +201,7 @@ def downgrade() -> None: # Check if constraints already exist if constraint_exists(pk_name, conn): - pk_name = f"pk_temp_vertex_build" + pk_name = "pk_temp_vertex_build" if constraint_exists(fk_name, conn): fk_name = f"fk_vertex_build_flow_id_flow_{revision[:8]}" @@ -223,7 +228,7 @@ def downgrade() -> None: # Copy data - use a window function to ensure build_id uniqueness. # Filter out rows where build_id is NULL (PK constraint) # No need to filter by 'id' here as the target column allows NULLs. - op.execute(f''' + op.execute(f""" INSERT INTO "{temp_table_name}" (timestamp, id, data, artifacts, params, build_id, flow_id, valid) SELECT timestamp, id, data, artifacts, params, build_id, flow_id, valid FROM ( @@ -233,7 +238,7 @@ def downgrade() -> None: WHERE build_id IS NOT NULL -- Ensure primary key is not NULL ) sub WHERE rn = 1 - ''') + """) # Drop original table and rename temp table op.drop_table("vertex_build") @@ -248,7 +253,7 @@ def downgrade() -> None: # Check if constraints already exist if constraint_exists(pk_name, conn): - pk_name = f"pk_temp_transaction" + pk_name = "pk_temp_transaction" if constraint_exists(fk_name, conn): fk_name = f"fk_transaction_flow_id_flow_{revision[:8]}" @@ -274,12 +279,12 @@ def downgrade() -> None: ) # Copy data - explicitly list columns and filter out rows where id is NULL - op.execute(f''' + op.execute(f""" INSERT INTO "{temp_table_name}" (timestamp, vertex_id, target_id, inputs, outputs, status, id, flow_id, error) SELECT timestamp, vertex_id, target_id, inputs, outputs, status, id, flow_id, error FROM "transaction" WHERE id IS NOT NULL - ''') + """) # Drop original table and rename temp table op.drop_table("transaction") @@ -294,7 +299,7 @@ def downgrade() -> None: # Check if constraints already exist if constraint_exists(pk_name, conn): - pk_name = f"pk_temp_message" + pk_name = "pk_temp_message" if constraint_exists(fk_name, conn): fk_name = f"fk_message_flow_id_flow_{revision[:8]}" @@ -324,12 +329,12 @@ def downgrade() -> None: ) # Copy data - explicitly list columns and filter out rows where id is NULL - op.execute(f''' + op.execute(f""" INSERT INTO "{temp_table_name}" (timestamp, sender, sender_name, session_id, text, id, flow_id, files, error, edit, properties, category, content_blocks) SELECT timestamp, sender, sender_name, session_id, text, id, flow_id, files, error, edit, properties, category, content_blocks FROM "message" WHERE id IS NOT NULL - ''') + """) # Drop original table and rename temp table op.drop_table("message") diff --git a/src/backend/base/langflow/alembic/versions/1c79524817ed_add_unique_constraints_per_user_in_.py b/src/backend/base/langflow/alembic/versions/1c79524817ed_add_unique_constraints_per_user_in_.py index c1ddf82e378b..d8dc7f3ad96a 100644 --- a/src/backend/base/langflow/alembic/versions/1c79524817ed_add_unique_constraints_per_user_in_.py +++ b/src/backend/base/langflow/alembic/versions/1c79524817ed_add_unique_constraints_per_user_in_.py @@ -6,17 +6,16 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "1c79524817ed" -down_revision: Union[str, None] = "3bb0ddf32dfb" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "3bb0ddf32dfb" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/1d90f8a0efe1_update_description_columns_type.py b/src/backend/base/langflow/alembic/versions/1d90f8a0efe1_update_description_columns_type.py index dc32c97d316f..8a5be6eb2d46 100644 --- a/src/backend/base/langflow/alembic/versions/1d90f8a0efe1_update_description_columns_type.py +++ b/src/backend/base/langflow/alembic/versions/1d90f8a0efe1_update_description_columns_type.py @@ -6,19 +6,18 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector from langflow.utils import migration # revision identifiers, used by Alembic. revision: str = "4522eb831f5c" -down_revision: Union[str, None] = "0d60fcbd4e8e" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "0d60fcbd4e8e" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/1eab2c3eb45e_event_error.py b/src/backend/base/langflow/alembic/versions/1eab2c3eb45e_event_error.py index 6a711552c0d1..294fcf2cc709 100644 --- a/src/backend/base/langflow/alembic/versions/1eab2c3eb45e_event_error.py +++ b/src/backend/base/langflow/alembic/versions/1eab2c3eb45e_event_error.py @@ -5,18 +5,17 @@ Create Date: 2024-10-24 12:03:24.118937 """ -from typing import Sequence, Union + +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.dialects import sqlite -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. -revision: str = '1eab2c3eb45e' -down_revision: Union[str, None] = 'eb5e72293a8e' -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +revision: str = "1eab2c3eb45e" +down_revision: str | None = "eb5e72293a8e" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: @@ -25,13 +24,13 @@ def upgrade() -> None: table_names = inspector.get_table_names() # noqa column_names = [column["name"] for column in inspector.get_columns("message")] # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('message', schema=None) as batch_op: + with op.batch_alter_table("message", schema=None) as batch_op: if "properties" not in column_names: - batch_op.add_column(sa.Column('properties', sa.JSON(), nullable=True)) + batch_op.add_column(sa.Column("properties", sa.JSON(), nullable=True)) if "category" not in column_names: - batch_op.add_column(sa.Column('category', sa.Text(), nullable=True)) + batch_op.add_column(sa.Column("category", sa.Text(), nullable=True)) if "content_blocks" not in column_names: - batch_op.add_column(sa.Column('content_blocks', sa.JSON(), nullable=True)) + batch_op.add_column(sa.Column("content_blocks", sa.JSON(), nullable=True)) # ### end Alembic commands ### @@ -42,12 +41,12 @@ def downgrade() -> None: table_names = inspector.get_table_names() # noqa column_names = [column["name"] for column in inspector.get_columns("message")] # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('message', schema=None) as batch_op: + with op.batch_alter_table("message", schema=None) as batch_op: if "content_blocks" in column_names: - batch_op.drop_column('content_blocks') + batch_op.drop_column("content_blocks") if "category" in column_names: - batch_op.drop_column('category') + batch_op.drop_column("category") if "properties" in column_names: - batch_op.drop_column('properties') + batch_op.drop_column("properties") # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/1ef9c4f3765d_.py b/src/backend/base/langflow/alembic/versions/1ef9c4f3765d_.py index 5607df8d3fe1..2967a507949f 100644 --- a/src/backend/base/langflow/alembic/versions/1ef9c4f3765d_.py +++ b/src/backend/base/langflow/alembic/versions/1ef9c4f3765d_.py @@ -1,26 +1,21 @@ -""" - - -Revision ID: 1ef9c4f3765d +"""Revision ID: 1ef9c4f3765d Revises: fd531f8868b1 Create Date: 2023-12-04 15:00:27.968998 """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa -import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector from langflow.utils import migration # revision identifiers, used by Alembic. revision: str = "1ef9c4f3765d" -down_revision: Union[str, None] = "fd531f8868b1" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "fd531f8868b1" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/1f4d6df60295_add_default_fields_column.py b/src/backend/base/langflow/alembic/versions/1f4d6df60295_add_default_fields_column.py index f2617463b811..afa1836bfbd2 100644 --- a/src/backend/base/langflow/alembic/versions/1f4d6df60295_add_default_fields_column.py +++ b/src/backend/base/langflow/alembic/versions/1f4d6df60295_add_default_fields_column.py @@ -6,17 +6,16 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "1f4d6df60295" -down_revision: Union[str, None] = "6e7b581b5648" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "6e7b581b5648" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/260dbcc8b680_adds_tables.py b/src/backend/base/langflow/alembic/versions/260dbcc8b680_adds_tables.py index 9a3275d7c8df..49e4df320c54 100644 --- a/src/backend/base/langflow/alembic/versions/260dbcc8b680_adds_tables.py +++ b/src/backend/base/langflow/alembic/versions/260dbcc8b680_adds_tables.py @@ -6,18 +6,17 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "260dbcc8b680" -down_revision: Union[str, None] = None -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = None +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/29fe8f1f806b_add_missing_index.py b/src/backend/base/langflow/alembic/versions/29fe8f1f806b_add_missing_index.py index 5d10247499c0..0478eeac6f59 100644 --- a/src/backend/base/langflow/alembic/versions/29fe8f1f806b_add_missing_index.py +++ b/src/backend/base/langflow/alembic/versions/29fe8f1f806b_add_missing_index.py @@ -6,16 +6,15 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector revision: str = "29fe8f1f806b" -down_revision: Union[str, None] = "012fb73ac359" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "012fb73ac359" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/2ac71eb9c3ae_adds_credential_table.py b/src/backend/base/langflow/alembic/versions/2ac71eb9c3ae_adds_credential_table.py index baf792201f19..ceb69e64cb24 100644 --- a/src/backend/base/langflow/alembic/versions/2ac71eb9c3ae_adds_credential_table.py +++ b/src/backend/base/langflow/alembic/versions/2ac71eb9c3ae_adds_credential_table.py @@ -6,18 +6,17 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "2ac71eb9c3ae" -down_revision: Union[str, None] = "7d2162acc8b2" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "7d2162acc8b2" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: @@ -41,7 +40,6 @@ def upgrade() -> None: except Exception as e: print(e) - pass # ### end Alembic commands ### @@ -51,5 +49,4 @@ def downgrade() -> None: op.drop_table("credential") except Exception as e: print(e) - pass # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/3bb0ddf32dfb_add_unique_constraints_per_user_in_flow_.py b/src/backend/base/langflow/alembic/versions/3bb0ddf32dfb_add_unique_constraints_per_user_in_flow_.py index 944f27c88bcb..ccfacdcf0bdf 100644 --- a/src/backend/base/langflow/alembic/versions/3bb0ddf32dfb_add_unique_constraints_per_user_in_flow_.py +++ b/src/backend/base/langflow/alembic/versions/3bb0ddf32dfb_add_unique_constraints_per_user_in_flow_.py @@ -6,17 +6,16 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "3bb0ddf32dfb" -down_revision: Union[str, None] = "a72f5cf9c2f9" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "a72f5cf9c2f9" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py b/src/backend/base/langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py index f5e52926b005..5948b1739f8f 100644 --- a/src/backend/base/langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py +++ b/src/backend/base/langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py @@ -10,10 +10,9 @@ import sqlalchemy as sa from alembic import op +from lfx.log.logger import logger from sqlalchemy.dialects import postgresql -from langflow.logging.logger import logger - # revision identifiers, used by Alembic. revision: str = "4e5980a44eaa" down_revision: str | None = "79e675cb6752" diff --git a/src/backend/base/langflow/alembic/versions/58b28437a398_modify_nullable.py b/src/backend/base/langflow/alembic/versions/58b28437a398_modify_nullable.py index 4d9b1825b4f8..b7cac3b4e6db 100644 --- a/src/backend/base/langflow/alembic/versions/58b28437a398_modify_nullable.py +++ b/src/backend/base/langflow/alembic/versions/58b28437a398_modify_nullable.py @@ -10,8 +10,7 @@ import sqlalchemy as sa from alembic import op - -from langflow.logging.logger import logger +from lfx.log.logger import logger down_revision: str | None = "4e5980a44eaa" branch_labels: str | Sequence[str] | None = None diff --git a/src/backend/base/langflow/alembic/versions/5ace73a7f223_new_remove_table_upgrade_op.py b/src/backend/base/langflow/alembic/versions/5ace73a7f223_new_remove_table_upgrade_op.py index f2b8f9bff1cc..95b42ac5d460 100644 --- a/src/backend/base/langflow/alembic/versions/5ace73a7f223_new_remove_table_upgrade_op.py +++ b/src/backend/base/langflow/alembic/versions/5ace73a7f223_new_remove_table_upgrade_op.py @@ -6,25 +6,19 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence -from alembic import op import sqlalchemy as sa -import sqlmodel -from sqlalchemy.engine.reflection import Inspector -from langflow.utils import migration -from sqlalchemy.dialects import sqlite -from langflow.utils import migration +from alembic import op # revision identifiers, used by Alembic. revision: str = "5ace73a7f223" -down_revision: Union[str, None] = "0ae3a2674f32" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "0ae3a2674f32" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: - with op.batch_alter_table("message", schema=None) as batch_op: batch_op.alter_column("text", existing_type=sa.TEXT(), nullable=True) @@ -35,5 +29,5 @@ def downgrade() -> None: # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table("message", schema=None) as batch_op: batch_op.alter_column("text", existing_type=sa.TEXT(), nullable=False) - + # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/631faacf5da2_add_webhook_columns.py b/src/backend/base/langflow/alembic/versions/631faacf5da2_add_webhook_columns.py index 8f90648ef4f6..a5cbd579e5fc 100644 --- a/src/backend/base/langflow/alembic/versions/631faacf5da2_add_webhook_columns.py +++ b/src/backend/base/langflow/alembic/versions/631faacf5da2_add_webhook_columns.py @@ -6,17 +6,16 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "631faacf5da2" -down_revision: Union[str, None] = "1c79524817ed" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "1c79524817ed" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/63b9c451fd30_add_icon_and_icon_bg_color_to_flow.py b/src/backend/base/langflow/alembic/versions/63b9c451fd30_add_icon_and_icon_bg_color_to_flow.py index 3b4822d10d2b..ba2008ead7d6 100644 --- a/src/backend/base/langflow/alembic/versions/63b9c451fd30_add_icon_and_icon_bg_color_to_flow.py +++ b/src/backend/base/langflow/alembic/versions/63b9c451fd30_add_icon_and_icon_bg_color_to_flow.py @@ -6,18 +6,17 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "63b9c451fd30" -down_revision: Union[str, None] = "bc2f01c40e4a" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "bc2f01c40e4a" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/66f72f04a1de_add_mcp_support_with_project_settings_.py b/src/backend/base/langflow/alembic/versions/66f72f04a1de_add_mcp_support_with_project_settings_.py index ca5f3d82e533..1b0d844a064a 100644 --- a/src/backend/base/langflow/alembic/versions/66f72f04a1de_add_mcp_support_with_project_settings_.py +++ b/src/backend/base/langflow/alembic/versions/66f72f04a1de_add_mcp_support_with_project_settings_.py @@ -5,20 +5,18 @@ Create Date: 2025-04-24 18:42:15.828332 """ -from typing import Sequence, Union -from alembic import op +from collections.abc import Sequence + import sqlalchemy as sa import sqlmodel -from sqlalchemy.engine.reflection import Inspector -from langflow.utils import migration - +from alembic import op # revision identifiers, used by Alembic. -revision: str = '66f72f04a1de' -down_revision: Union[str, None] = 'e56d87f8994a' -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +revision: str = "66f72f04a1de" +down_revision: str | None = "e56d87f8994a" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: @@ -26,13 +24,13 @@ def upgrade() -> None: inspector = sa.inspect(conn) # type: ignore column_names = [column["name"] for column in inspector.get_columns("flow")] # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('flow', schema=None) as batch_op: - if 'mcp_enabled' not in column_names: - batch_op.add_column(sa.Column('mcp_enabled', sa.Boolean(), nullable=True)) - if 'action_name' not in column_names: - batch_op.add_column(sa.Column('action_name', sqlmodel.sql.sqltypes.AutoString(), nullable=True)) - if 'action_description' not in column_names: - batch_op.add_column(sa.Column('action_description', sa.Text(), nullable=True)) + with op.batch_alter_table("flow", schema=None) as batch_op: + if "mcp_enabled" not in column_names: + batch_op.add_column(sa.Column("mcp_enabled", sa.Boolean(), nullable=True)) + if "action_name" not in column_names: + batch_op.add_column(sa.Column("action_name", sqlmodel.sql.sqltypes.AutoString(), nullable=True)) + if "action_description" not in column_names: + batch_op.add_column(sa.Column("action_description", sa.Text(), nullable=True)) # ### end Alembic commands ### @@ -43,12 +41,12 @@ def downgrade() -> None: column_names = [column["name"] for column in inspector.get_columns("flow")] # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('flow', schema=None) as batch_op: - if 'action_description' in column_names: - batch_op.drop_column('action_description') - if 'action_name' in column_names: - batch_op.drop_column('action_name') - if 'mcp_enabled' in column_names: - batch_op.drop_column('mcp_enabled') + with op.batch_alter_table("flow", schema=None) as batch_op: + if "action_description" in column_names: + batch_op.drop_column("action_description") + if "action_name" in column_names: + batch_op.drop_column("action_name") + if "mcp_enabled" in column_names: + batch_op.drop_column("mcp_enabled") # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/67cc006d50bf_add_profile_image_column.py b/src/backend/base/langflow/alembic/versions/67cc006d50bf_add_profile_image_column.py index e7ae54f7c2ea..f333ee3183c1 100644 --- a/src/backend/base/langflow/alembic/versions/67cc006d50bf_add_profile_image_column.py +++ b/src/backend/base/langflow/alembic/versions/67cc006d50bf_add_profile_image_column.py @@ -6,18 +6,17 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "67cc006d50bf" -down_revision: Union[str, None] = "260dbcc8b680" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "260dbcc8b680" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/6e7b581b5648_fix_nullable.py b/src/backend/base/langflow/alembic/versions/6e7b581b5648_fix_nullable.py index a60ddf728ccd..0825796d4d62 100644 --- a/src/backend/base/langflow/alembic/versions/6e7b581b5648_fix_nullable.py +++ b/src/backend/base/langflow/alembic/versions/6e7b581b5648_fix_nullable.py @@ -6,17 +6,16 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "6e7b581b5648" -down_revision: Union[str, None] = "58b28437a398" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "58b28437a398" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/7843803a87b5_store_updates.py b/src/backend/base/langflow/alembic/versions/7843803a87b5_store_updates.py index d58ceef11c41..6b3244237494 100644 --- a/src/backend/base/langflow/alembic/versions/7843803a87b5_store_updates.py +++ b/src/backend/base/langflow/alembic/versions/7843803a87b5_store_updates.py @@ -6,18 +6,17 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "7843803a87b5" -down_revision: Union[str, None] = "eb5866d51fd2" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "eb5866d51fd2" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: @@ -51,5 +50,4 @@ def downgrade() -> None: batch_op.drop_column("is_component") except Exception as e: print(e) - pass # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/79e675cb6752_change_datetime_type.py b/src/backend/base/langflow/alembic/versions/79e675cb6752_change_datetime_type.py index 4c1619cbb379..d52a6f74334c 100644 --- a/src/backend/base/langflow/alembic/versions/79e675cb6752_change_datetime_type.py +++ b/src/backend/base/langflow/alembic/versions/79e675cb6752_change_datetime_type.py @@ -10,10 +10,9 @@ import sqlalchemy as sa from alembic import op +from lfx.log.logger import logger from sqlalchemy.dialects import postgresql -from langflow.logging.logger import logger - # revision identifiers, used by Alembic. revision: str = "79e675cb6752" down_revision: str | None = "e3bc869fa272" diff --git a/src/backend/base/langflow/alembic/versions/7d2162acc8b2_adds_updated_at_and_folder_cols.py b/src/backend/base/langflow/alembic/versions/7d2162acc8b2_adds_updated_at_and_folder_cols.py index 743d6a2eaf63..91fa2d625897 100644 --- a/src/backend/base/langflow/alembic/versions/7d2162acc8b2_adds_updated_at_and_folder_cols.py +++ b/src/backend/base/langflow/alembic/versions/7d2162acc8b2_adds_updated_at_and_folder_cols.py @@ -6,18 +6,17 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "7d2162acc8b2" -down_revision: Union[str, None] = "f5ee9749d1a6" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "f5ee9749d1a6" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: @@ -34,7 +33,6 @@ def upgrade() -> None: except Exception as e: print(e) - pass try: with op.batch_alter_table("flow", schema=None) as batch_op: if "updated_at" not in flow_columns: @@ -44,8 +42,6 @@ def upgrade() -> None: except Exception as e: print(e) - pass - # ### end Alembic commands ### @@ -62,13 +58,11 @@ def downgrade() -> None: batch_op.drop_column("updated_at") except Exception as e: print(e) - pass try: with op.batch_alter_table("apikey", schema=None) as batch_op: batch_op.alter_column("name", existing_type=sa.VARCHAR(), nullable=True) except Exception as e: print(e) - pass # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/90be8e2ed91e_create_transactions_table.py b/src/backend/base/langflow/alembic/versions/90be8e2ed91e_create_transactions_table.py index 1c3edd87715b..ce955c70b60a 100644 --- a/src/backend/base/langflow/alembic/versions/90be8e2ed91e_create_transactions_table.py +++ b/src/backend/base/langflow/alembic/versions/90be8e2ed91e_create_transactions_table.py @@ -6,7 +6,7 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel @@ -16,9 +16,9 @@ # revision identifiers, used by Alembic. revision: str = "90be8e2ed91e" -down_revision: Union[str, None] = "325180f0c4e1" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "325180f0c4e1" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: @@ -41,11 +41,9 @@ def upgrade() -> None: ), sa.PrimaryKeyConstraint("id"), ) - pass def downgrade() -> None: conn = op.get_bind() if migration.table_exists("transaction", conn): op.drop_table("transaction") - pass diff --git a/src/backend/base/langflow/alembic/versions/93e2705fa8d6_add_column_save_path_to_flow.py b/src/backend/base/langflow/alembic/versions/93e2705fa8d6_add_column_save_path_to_flow.py index 3c8848dd5b06..ff75ded14553 100644 --- a/src/backend/base/langflow/alembic/versions/93e2705fa8d6_add_column_save_path_to_flow.py +++ b/src/backend/base/langflow/alembic/versions/93e2705fa8d6_add_column_save_path_to_flow.py @@ -5,20 +5,18 @@ Create Date: 2025-02-25 13:08:11.263504 """ -from typing import Sequence, Union -from alembic import op +from collections.abc import Sequence + import sqlalchemy as sa import sqlmodel -from sqlalchemy.engine.reflection import Inspector -from langflow.utils import migration - +from alembic import op # revision identifiers, used by Alembic. -revision: str = '93e2705fa8d6' -down_revision: Union[str, None] = 'dd9e0804ebd1' -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +revision: str = "93e2705fa8d6" +down_revision: str | None = "dd9e0804ebd1" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/a72f5cf9c2f9_add_endpoint_name_col.py b/src/backend/base/langflow/alembic/versions/a72f5cf9c2f9_add_endpoint_name_col.py index ee34c0756131..e0270569f040 100644 --- a/src/backend/base/langflow/alembic/versions/a72f5cf9c2f9_add_endpoint_name_col.py +++ b/src/backend/base/langflow/alembic/versions/a72f5cf9c2f9_add_endpoint_name_col.py @@ -6,18 +6,17 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "a72f5cf9c2f9" -down_revision: Union[str, None] = "29fe8f1f806b" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "29fe8f1f806b" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py b/src/backend/base/langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py index a1575eeffa3b..b0bec0f0850e 100644 --- a/src/backend/base/langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py +++ b/src/backend/base/langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py @@ -11,8 +11,7 @@ import sqlalchemy as sa import sqlmodel from alembic import op - -from langflow.logging.logger import logger +from lfx.log.logger import logger # revision identifiers, used by Alembic. revision: str = "b2fa308044b5" diff --git a/src/backend/base/langflow/alembic/versions/bc2f01c40e4a_new_fixes.py b/src/backend/base/langflow/alembic/versions/bc2f01c40e4a_new_fixes.py index 872497b8d515..378cf0b82e47 100644 --- a/src/backend/base/langflow/alembic/versions/bc2f01c40e4a_new_fixes.py +++ b/src/backend/base/langflow/alembic/versions/bc2f01c40e4a_new_fixes.py @@ -6,19 +6,18 @@ """ -from typing import Sequence, Union import warnings +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "bc2f01c40e4a" -down_revision: Union[str, None] = "b2fa308044b5" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "b2fa308044b5" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/c153816fd85f_set_name_and_value_to_not_nullable.py b/src/backend/base/langflow/alembic/versions/c153816fd85f_set_name_and_value_to_not_nullable.py index d41e494e0597..c42e3ffd9e30 100644 --- a/src/backend/base/langflow/alembic/versions/c153816fd85f_set_name_and_value_to_not_nullable.py +++ b/src/backend/base/langflow/alembic/versions/c153816fd85f_set_name_and_value_to_not_nullable.py @@ -6,17 +6,16 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "c153816fd85f" -down_revision: Union[str, None] = "1f4d6df60295" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "1f4d6df60295" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/d066bfd22890_add_message_table.py b/src/backend/base/langflow/alembic/versions/d066bfd22890_add_message_table.py index e985a7b40946..b24a0a6c3a1c 100644 --- a/src/backend/base/langflow/alembic/versions/d066bfd22890_add_message_table.py +++ b/src/backend/base/langflow/alembic/versions/d066bfd22890_add_message_table.py @@ -6,7 +6,7 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel @@ -16,9 +16,9 @@ # revision identifiers, used by Alembic. revision: str = "325180f0c4e1" -down_revision: Union[str, None] = "631faacf5da2" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "631faacf5da2" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/d2d475a1f7c0_add_tags_column_to_flow.py b/src/backend/base/langflow/alembic/versions/d2d475a1f7c0_add_tags_column_to_flow.py index d314930ff96a..03c56a686f56 100644 --- a/src/backend/base/langflow/alembic/versions/d2d475a1f7c0_add_tags_column_to_flow.py +++ b/src/backend/base/langflow/alembic/versions/d2d475a1f7c0_add_tags_column_to_flow.py @@ -5,28 +5,27 @@ Create Date: 2024-10-03 13:33:59.517261 """ -from typing import Sequence, Union + +from collections.abc import Sequence import sqlalchemy as sa -import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector from langflow.utils import migration # revision identifiers, used by Alembic. -revision: str = 'd2d475a1f7c0' -down_revision: Union[str, None] = 'd3dbf656a499' -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +revision: str = "d2d475a1f7c0" +down_revision: str | None = "d3dbf656a499" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: conn = op.get_bind() # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('flow', schema=None) as batch_op: - if not migration.column_exists(table_name='flow', column_name='tags', conn=conn): - batch_op.add_column(sa.Column('tags', sa.JSON(), nullable=True)) + with op.batch_alter_table("flow", schema=None) as batch_op: + if not migration.column_exists(table_name="flow", column_name="tags", conn=conn): + batch_op.add_column(sa.Column("tags", sa.JSON(), nullable=True)) # ### end Alembic commands ### @@ -34,8 +33,8 @@ def upgrade() -> None: def downgrade() -> None: conn = op.get_bind() # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('flow', schema=None) as batch_op: - if migration.column_exists(table_name='flow', column_name='tags', conn=conn): - batch_op.drop_column('tags') + with op.batch_alter_table("flow", schema=None) as batch_op: + if migration.column_exists(table_name="flow", column_name="tags", conn=conn): + batch_op.drop_column("tags") # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/d3dbf656a499_add_gradient_column_in_flow.py b/src/backend/base/langflow/alembic/versions/d3dbf656a499_add_gradient_column_in_flow.py index b40c63d464fb..8954fd268c16 100644 --- a/src/backend/base/langflow/alembic/versions/d3dbf656a499_add_gradient_column_in_flow.py +++ b/src/backend/base/langflow/alembic/versions/d3dbf656a499_add_gradient_column_in_flow.py @@ -5,28 +5,28 @@ Create Date: 2024-09-27 09:35:19.424089 """ -from typing import Sequence, Union + +from collections.abc import Sequence import sqlalchemy as sa import sqlmodel from alembic import op -from sqlalchemy.engine.reflection import Inspector from langflow.utils import migration # revision identifiers, used by Alembic. -revision: str = 'd3dbf656a499' -down_revision: Union[str, None] = 'e5a65ecff2cd' -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +revision: str = "d3dbf656a499" +down_revision: str | None = "e5a65ecff2cd" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: conn = op.get_bind() # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('flow', schema=None) as batch_op: - if not migration.column_exists(table_name='flow', column_name='gradient', conn=conn): - batch_op.add_column(sa.Column('gradient', sqlmodel.sql.sqltypes.AutoString(), nullable=True)) + with op.batch_alter_table("flow", schema=None) as batch_op: + if not migration.column_exists(table_name="flow", column_name="gradient", conn=conn): + batch_op.add_column(sa.Column("gradient", sqlmodel.sql.sqltypes.AutoString(), nullable=True)) # ### end Alembic commands ### @@ -34,8 +34,8 @@ def upgrade() -> None: def downgrade() -> None: conn = op.get_bind() # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('flow', schema=None) as batch_op: - if migration.column_exists(table_name='flow', column_name='gradient', conn=conn): - batch_op.drop_column('gradient') + with op.batch_alter_table("flow", schema=None) as batch_op: + if migration.column_exists(table_name="flow", column_name="gradient", conn=conn): + batch_op.drop_column("gradient") # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/d9a6ea21edcd_rename_default_folder.py b/src/backend/base/langflow/alembic/versions/d9a6ea21edcd_rename_default_folder.py index 631dcfb62a4c..500bbf15a8ba 100644 --- a/src/backend/base/langflow/alembic/versions/d9a6ea21edcd_rename_default_folder.py +++ b/src/backend/base/langflow/alembic/versions/d9a6ea21edcd_rename_default_folder.py @@ -5,20 +5,17 @@ Create Date: 2025-07-02 09:42:46.891585 """ -from typing import Sequence, Union -from alembic import op -import sqlalchemy as sa -import sqlmodel -from sqlalchemy.engine.reflection import Inspector -from langflow.utils import migration +from collections.abc import Sequence +import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. -revision: str = 'd9a6ea21edcd' -down_revision: Union[str, None] = '66f72f04a1de' -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +revision: str = "d9a6ea21edcd" +down_revision: str | None = "66f72f04a1de" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/dd9e0804ebd1_add_v2_file_table.py b/src/backend/base/langflow/alembic/versions/dd9e0804ebd1_add_v2_file_table.py index 2f9575b99f53..e7bf28fcbbb4 100644 --- a/src/backend/base/langflow/alembic/versions/dd9e0804ebd1_add_v2_file_table.py +++ b/src/backend/base/langflow/alembic/versions/dd9e0804ebd1_add_v2_file_table.py @@ -5,19 +5,20 @@ Create Date: 2025-02-03 11:47:16.101523 """ -from typing import Sequence, Union -from alembic import op +from collections.abc import Sequence + import sqlalchemy as sa import sqlmodel -from langflow.utils import migration +from alembic import op +from langflow.utils import migration # revision identifiers, used by Alembic. -revision: str = 'dd9e0804ebd1' -down_revision: Union[str, None] = 'e3162c1804e6' -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +revision: str = "dd9e0804ebd1" +down_revision: str | None = "e3162c1804e6" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/e3162c1804e6_add_persistent_locked_state.py b/src/backend/base/langflow/alembic/versions/e3162c1804e6_add_persistent_locked_state.py index 9bfa34eb968e..fde5951758a4 100644 --- a/src/backend/base/langflow/alembic/versions/e3162c1804e6_add_persistent_locked_state.py +++ b/src/backend/base/langflow/alembic/versions/e3162c1804e6_add_persistent_locked_state.py @@ -5,18 +5,18 @@ Create Date: 2024-11-07 14:50:35.201760 """ -from typing import Sequence, Union + +from collections.abc import Sequence import sqlalchemy as sa -import sqlmodel from alembic import op from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. -revision: str = 'e3162c1804e6' -down_revision: Union[str, None] = '1eab2c3eb45e' -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +revision: str = "e3162c1804e6" +down_revision: str | None = "1eab2c3eb45e" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: @@ -25,9 +25,9 @@ def upgrade() -> None: table_names = inspector.get_table_names() # noqa column_names = [column["name"] for column in inspector.get_columns("flow")] # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('flow', schema=None) as batch_op: + with op.batch_alter_table("flow", schema=None) as batch_op: if "locked" not in column_names: - batch_op.add_column(sa.Column('locked', sa.Boolean(), nullable=True)) + batch_op.add_column(sa.Column("locked", sa.Boolean(), nullable=True)) # ### end Alembic commands ### @@ -37,7 +37,7 @@ def downgrade() -> None: table_names = inspector.get_table_names() # noqa column_names = [column["name"] for column in inspector.get_columns("flow")] # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('flow', schema=None) as batch_op: + with op.batch_alter_table("flow", schema=None) as batch_op: if "locked" in column_names: - batch_op.drop_column('locked') + batch_op.drop_column("locked") # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/e3bc869fa272_fix_nullable.py b/src/backend/base/langflow/alembic/versions/e3bc869fa272_fix_nullable.py index 2d806acf5ba4..904f977d98e1 100644 --- a/src/backend/base/langflow/alembic/versions/e3bc869fa272_fix_nullable.py +++ b/src/backend/base/langflow/alembic/versions/e3bc869fa272_fix_nullable.py @@ -6,17 +6,16 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "e3bc869fa272" -down_revision: Union[str, None] = "1a110b568907" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "1a110b568907" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/e56d87f8994a_add_optins_column_to_user.py b/src/backend/base/langflow/alembic/versions/e56d87f8994a_add_optins_column_to_user.py index 599c8138d126..bab5fb83c588 100644 --- a/src/backend/base/langflow/alembic/versions/e56d87f8994a_add_optins_column_to_user.py +++ b/src/backend/base/langflow/alembic/versions/e56d87f8994a_add_optins_column_to_user.py @@ -5,34 +5,33 @@ Create Date: 2025-04-09 15:57:46.904977 """ -from typing import Sequence, Union -from alembic import op +from collections.abc import Sequence + import sqlalchemy as sa -import sqlmodel -from sqlalchemy.engine.reflection import Inspector -from langflow.utils import migration +from alembic import op +from langflow.utils import migration # revision identifiers, used by Alembic. -revision: str = 'e56d87f8994a' -down_revision: Union[str, None] = '1b8b740a6fa3' -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +revision: str = "e56d87f8994a" +down_revision: str | None = "1b8b740a6fa3" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: conn = op.get_bind() # ### commands auto generated by Alembic - please adjust! ### - if not migration.column_exists(table_name='user', column_name='optins', conn=conn): - with op.batch_alter_table('user', schema=None) as batch_op: - batch_op.add_column(sa.Column('optins', sa.JSON(), nullable=True)) + if not migration.column_exists(table_name="user", column_name="optins", conn=conn): + with op.batch_alter_table("user", schema=None) as batch_op: + batch_op.add_column(sa.Column("optins", sa.JSON(), nullable=True)) # ### end Alembic commands ### def downgrade() -> None: conn = op.get_bind() # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('user', schema=None) as batch_op: - batch_op.drop_column('optins') + with op.batch_alter_table("user", schema=None) as batch_op: + batch_op.drop_column("optins") # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/e5a65ecff2cd_nullable_in_vertex_build.py b/src/backend/base/langflow/alembic/versions/e5a65ecff2cd_nullable_in_vertex_build.py index b22ee1cb0733..f9505345d0a5 100644 --- a/src/backend/base/langflow/alembic/versions/e5a65ecff2cd_nullable_in_vertex_build.py +++ b/src/backend/base/langflow/alembic/versions/e5a65ecff2cd_nullable_in_vertex_build.py @@ -6,19 +6,18 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector from langflow.utils import migration # revision identifiers, used by Alembic. revision: str = "e5a65ecff2cd" -down_revision: Union[str, None] = "4522eb831f5c" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "4522eb831f5c" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/eb5866d51fd2_change_columns_to_be_nullable.py b/src/backend/base/langflow/alembic/versions/eb5866d51fd2_change_columns_to_be_nullable.py index acb09cd5f4c4..d9687ed3503a 100644 --- a/src/backend/base/langflow/alembic/versions/eb5866d51fd2_change_columns_to_be_nullable.py +++ b/src/backend/base/langflow/alembic/versions/eb5866d51fd2_change_columns_to_be_nullable.py @@ -6,22 +6,21 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence from alembic import op # revision identifiers, used by Alembic. revision: str = "eb5866d51fd2" -down_revision: Union[str, None] = "67cc006d50bf" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "67cc006d50bf" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: # ### commands auto generated by Alembic - please adjust! ### connection = op.get_bind() # noqa - pass # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/eb5e72293a8e_add_error_and_edit_flags_to_message.py b/src/backend/base/langflow/alembic/versions/eb5e72293a8e_add_error_and_edit_flags_to_message.py index 496ab2a6abbe..3e928477f3ff 100644 --- a/src/backend/base/langflow/alembic/versions/eb5e72293a8e_add_error_and_edit_flags_to_message.py +++ b/src/backend/base/langflow/alembic/versions/eb5e72293a8e_add_error_and_edit_flags_to_message.py @@ -6,17 +6,16 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "eb5e72293a8e" -down_revision: Union[str, None] = "5ace73a7f223" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "5ace73a7f223" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/src/backend/base/langflow/alembic/versions/f3b2d1f1002d_add_column_access_type_to_flow.py b/src/backend/base/langflow/alembic/versions/f3b2d1f1002d_add_column_access_type_to_flow.py index c621d249e718..190fe42c14ca 100644 --- a/src/backend/base/langflow/alembic/versions/f3b2d1f1002d_add_column_access_type_to_flow.py +++ b/src/backend/base/langflow/alembic/versions/f3b2d1f1002d_add_column_access_type_to_flow.py @@ -5,33 +5,37 @@ Create Date: 2025-02-05 14:35:29.658101 """ -from typing import Sequence, Union -from alembic import op +from collections.abc import Sequence + import sqlalchemy as sa -from langflow.utils import migration +from alembic import op +from langflow.utils import migration # revision identifiers, used by Alembic. -revision: str = 'f3b2d1f1002d' -down_revision: Union[str, None] = '93e2705fa8d6' -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +revision: str = "f3b2d1f1002d" +down_revision: str | None = "93e2705fa8d6" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: conn = op.get_bind() - access_type_enum = sa.Enum('PRIVATE', 'PUBLIC', name='access_type_enum') + access_type_enum = sa.Enum("PRIVATE", "PUBLIC", name="access_type_enum") access_type_enum.create(conn, checkfirst=True) - with op.batch_alter_table('flow', schema=None) as batch_op: - if not migration.column_exists(table_name='flow', column_name='access_type', conn=conn): - batch_op.add_column(sa.Column('access_type', access_type_enum, server_default=sa.text("'PRIVATE'"), nullable=False)) + with op.batch_alter_table("flow", schema=None) as batch_op: + if not migration.column_exists(table_name="flow", column_name="access_type", conn=conn): + batch_op.add_column( + sa.Column("access_type", access_type_enum, server_default=sa.text("'PRIVATE'"), nullable=False) + ) + def downgrade() -> None: conn = op.get_bind() - with op.batch_alter_table('flow', schema=None) as batch_op: - if migration.column_exists(table_name='flow', column_name='access_type', conn=conn): - batch_op.drop_column('access_type') + with op.batch_alter_table("flow", schema=None) as batch_op: + if migration.column_exists(table_name="flow", column_name="access_type", conn=conn): + batch_op.drop_column("access_type") - access_type_enum = sa.Enum('PRIVATE', 'PUBLIC', name='access_type_enum') + access_type_enum = sa.Enum("PRIVATE", "PUBLIC", name="access_type_enum") access_type_enum.drop(conn, checkfirst=True) diff --git a/src/backend/base/langflow/alembic/versions/f5ee9749d1a6_user_id_can_be_null_in_flow.py b/src/backend/base/langflow/alembic/versions/f5ee9749d1a6_user_id_can_be_null_in_flow.py index 842c558571b8..6f5ec2a4c487 100644 --- a/src/backend/base/langflow/alembic/versions/f5ee9749d1a6_user_id_can_be_null_in_flow.py +++ b/src/backend/base/langflow/alembic/versions/f5ee9749d1a6_user_id_can_be_null_in_flow.py @@ -6,16 +6,16 @@ """ -from typing import Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision: str = "f5ee9749d1a6" -down_revision: Union[str, None] = "7843803a87b5" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "7843803a87b5" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: @@ -25,7 +25,6 @@ def upgrade() -> None: batch_op.alter_column("user_id", existing_type=sa.CHAR(length=32), nullable=True) except Exception as e: print(e) - pass # ### end Alembic commands ### @@ -37,6 +36,5 @@ def downgrade() -> None: batch_op.alter_column("user_id", existing_type=sa.CHAR(length=32), nullable=False) except Exception as e: print(e) - pass # ### end Alembic commands ### diff --git a/src/backend/base/langflow/alembic/versions/fd531f8868b1_fix_credential_table.py b/src/backend/base/langflow/alembic/versions/fd531f8868b1_fix_credential_table.py index e180b713c77a..b462954bc86f 100644 --- a/src/backend/base/langflow/alembic/versions/fd531f8868b1_fix_credential_table.py +++ b/src/backend/base/langflow/alembic/versions/fd531f8868b1_fix_credential_table.py @@ -6,17 +6,16 @@ """ -from typing import Optional, Sequence, Union +from collections.abc import Sequence import sqlalchemy as sa from alembic import op -from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision: str = "fd531f8868b1" -down_revision: Union[str, None] = "2ac71eb9c3ae" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = "2ac71eb9c3ae" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: @@ -35,7 +34,6 @@ def upgrade() -> None: batch_op.create_foreign_key("fk_credential_user_id", "user", ["user_id"], ["id"]) except Exception as e: print(e) - pass # ### end Alembic commands ### @@ -45,7 +43,7 @@ def downgrade() -> None: conn = op.get_bind() inspector = sa.inspect(conn) # type: ignore tables = inspector.get_table_names() - foreign_keys_names: list[Optional[str]] = [] + foreign_keys_names: list[str | None] = [] if "credential" in tables: foreign_keys = inspector.get_foreign_keys("credential") foreign_keys_names = [fk["name"] for fk in foreign_keys] @@ -55,6 +53,5 @@ def downgrade() -> None: batch_op.drop_constraint("fk_credential_user_id", type_="foreignkey") except Exception as e: print(e) - pass # ### end Alembic commands ### diff --git a/src/backend/base/langflow/api/build.py b/src/backend/base/langflow/api/build.py index a35980c8fe42..570e565041d6 100644 --- a/src/backend/base/langflow/api/build.py +++ b/src/backend/base/langflow/api/build.py @@ -6,6 +6,10 @@ from collections.abc import AsyncIterator from fastapi import BackgroundTasks, HTTPException, Response +from lfx.graph.graph.base import Graph +from lfx.graph.utils import log_vertex_build +from lfx.log.logger import logger +from lfx.schema.schema import InputValueRequest from sqlmodel import select from langflow.api.disconnect import DisconnectHandlerStreamingResponse @@ -19,12 +23,9 @@ get_top_level_vertices, parse_exception, ) -from langflow.api.v1.schemas import FlowDataRequest, InputValueRequest, ResultDataResponse, VertexBuildResponse +from langflow.api.v1.schemas import FlowDataRequest, ResultDataResponse, VertexBuildResponse from langflow.events.event_manager import EventManager from langflow.exceptions.component import ComponentBuildError -from langflow.graph.graph.base import Graph -from langflow.graph.utils import log_vertex_build -from langflow.logging.logger import logger from langflow.schema.message import ErrorMessage from langflow.schema.schema import OutputValue from langflow.services.database.models.flow.model import Flow diff --git a/src/backend/base/langflow/api/health_check_router.py b/src/backend/base/langflow/api/health_check_router.py index 02c4c387bdab..84968b1af243 100644 --- a/src/backend/base/langflow/api/health_check_router.py +++ b/src/backend/base/langflow/api/health_check_router.py @@ -1,11 +1,11 @@ import uuid from fastapi import APIRouter, HTTPException, status +from lfx.log.logger import logger from pydantic import BaseModel from sqlmodel import select from langflow.api.utils import DbSession -from langflow.logging.logger import logger from langflow.services.database.models.flow.model import Flow from langflow.services.deps import get_chat_service diff --git a/src/backend/base/langflow/api/limited_background_tasks.py b/src/backend/base/langflow/api/limited_background_tasks.py index b09bc31db82b..e316524620a6 100644 --- a/src/backend/base/langflow/api/limited_background_tasks.py +++ b/src/backend/base/langflow/api/limited_background_tasks.py @@ -1,6 +1,6 @@ from fastapi import BackgroundTasks +from lfx.graph.utils import log_vertex_build -from langflow.graph.utils import log_vertex_build from langflow.services.deps import get_settings_service diff --git a/src/backend/base/langflow/api/log_router.py b/src/backend/base/langflow/api/log_router.py index 3b3af73cd9b2..67492f3670df 100644 --- a/src/backend/base/langflow/api/log_router.py +++ b/src/backend/base/langflow/api/log_router.py @@ -5,8 +5,7 @@ from fastapi import APIRouter, HTTPException, Query, Request from fastapi.responses import JSONResponse, StreamingResponse - -from langflow.logging.logger import log_buffer +from lfx.log.logger import log_buffer log_router = APIRouter(tags=["Log"]) diff --git a/src/backend/base/langflow/api/utils.py b/src/backend/base/langflow/api/utils.py index 0982ee02c88d..55e1fd32e39c 100644 --- a/src/backend/base/langflow/api/utils.py +++ b/src/backend/base/langflow/api/utils.py @@ -8,11 +8,11 @@ from fastapi import Depends, HTTPException, Query from fastapi_pagination import Params +from lfx.graph.graph.base import Graph +from lfx.log.logger import logger from sqlalchemy import delete from sqlmodel.ext.asyncio.session import AsyncSession -from langflow.graph.graph.base import Graph -from langflow.logging.logger import logger from langflow.services.auth.utils import get_current_active_user, get_current_active_user_mcp from langflow.services.database.models.flow.model import Flow from langflow.services.database.models.message.model import MessageTable diff --git a/src/backend/base/langflow/api/v1/base.py b/src/backend/base/langflow/api/v1/base.py index 879637b88935..d6beb81c18f0 100644 --- a/src/backend/base/langflow/api/v1/base.py +++ b/src/backend/base/langflow/api/v1/base.py @@ -1,7 +1,6 @@ +from lfx.template.frontend_node.base import FrontendNode from pydantic import BaseModel, field_validator, model_serializer -from langflow.template.frontend_node.base import FrontendNode - class CacheResponse(BaseModel): data: dict diff --git a/src/backend/base/langflow/api/v1/callback.py b/src/backend/base/langflow/api/v1/callback.py index 2459bf5d6f67..5628d2be123c 100644 --- a/src/backend/base/langflow/api/v1/callback.py +++ b/src/backend/base/langflow/api/v1/callback.py @@ -1,19 +1,16 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any +from typing import Any from uuid import UUID from langchain_core.agents import AgentAction, AgentFinish from langchain_core.callbacks.base import AsyncCallbackHandler +from lfx.log.logger import logger +from lfx.utils.util import remove_ansi_escape_codes from typing_extensions import override from langflow.api.v1.schemas import ChatResponse, PromptResponse -from langflow.logging.logger import logger -from langflow.services.deps import get_chat_service, get_socket_service -from langflow.utils.util import remove_ansi_escape_codes - -if TYPE_CHECKING: - from langflow.services.socket.service import SocketIOService +from langflow.services.deps import get_chat_service # https://github.com/hwchase17/chat-langchain/blob/master/callback.py @@ -28,9 +25,7 @@ def ignore_chain(self) -> bool: def __init__(self, session_id: str): self.chat_service = get_chat_service() self.client_id = session_id - self.socketio_service: SocketIOService = get_socket_service() self.sid = session_id - # self.socketio_service = self.chat_service.active_connections[self.client_id] @override async def on_llm_new_token(self, token: str, **kwargs: Any) -> None: # type: ignore[misc] diff --git a/src/backend/base/langflow/api/v1/chat.py b/src/backend/base/langflow/api/v1/chat.py index 693901961a71..e3c9f5c9e28c 100644 --- a/src/backend/base/langflow/api/v1/chat.py +++ b/src/backend/base/langflow/api/v1/chat.py @@ -8,6 +8,11 @@ from fastapi import APIRouter, BackgroundTasks, Body, Depends, HTTPException, Request, status from fastapi.responses import StreamingResponse +from lfx.graph.graph.base import Graph +from lfx.graph.utils import log_vertex_build +from lfx.log.logger import logger +from lfx.schema.schema import InputValueRequest, OutputValue +from lfx.services.cache.utils import CacheMiss from langflow.api.build import cancel_flow_build, get_flow_events_response, start_flow_build from langflow.api.limited_background_tasks import LimitVertexBuildBackgroundTasks @@ -26,18 +31,12 @@ from langflow.api.v1.schemas import ( CancelFlowResponse, FlowDataRequest, - InputValueRequest, ResultDataResponse, StreamData, VertexBuildResponse, VerticesOrderResponse, ) from langflow.exceptions.component import ComponentBuildError -from langflow.graph.graph.base import Graph -from langflow.graph.utils import log_vertex_build -from langflow.logging.logger import logger -from langflow.schema.schema import OutputValue -from langflow.services.cache.utils import CacheMiss from langflow.services.chat.service import ChatService from langflow.services.database.models.flow.model import Flow from langflow.services.deps import ( @@ -51,7 +50,7 @@ from langflow.services.telemetry.schema import ComponentPayload, PlaygroundPayload if TYPE_CHECKING: - from langflow.graph.vertex.vertex_types import InterfaceVertex + from lfx.graph.vertex.vertex_types import InterfaceVertex router = APIRouter(tags=["Chat"]) diff --git a/src/backend/base/langflow/api/v1/endpoints.py b/src/backend/base/langflow/api/v1/endpoints.py index 9ddc7a02a4ee..4a86bc06ad8e 100644 --- a/src/backend/base/langflow/api/v1/endpoints.py +++ b/src/backend/base/langflow/api/v1/endpoints.py @@ -11,6 +11,18 @@ from fastapi import APIRouter, BackgroundTasks, Body, Depends, HTTPException, Request, UploadFile, status from fastapi.encoders import jsonable_encoder from fastapi.responses import StreamingResponse +from lfx.custom.custom_component.component import Component +from lfx.custom.utils import ( + add_code_field_to_build_config, + build_custom_component_template, + get_instance_name, + update_component_build_config, +) +from lfx.graph.graph.base import Graph +from lfx.graph.schema import RunOutputs +from lfx.log.logger import logger +from lfx.schema.schema import InputValueRequest +from lfx.services.settings.service import SettingsService from sqlmodel import select from langflow.api.utils import CurrentActiveUser, DbSession, parse_value @@ -18,28 +30,17 @@ ConfigResponse, CustomComponentRequest, CustomComponentResponse, - InputValueRequest, RunResponse, SimplifiedAPIRequest, TaskStatusResponse, UpdateCustomComponentRequest, UploadFileResponse, ) -from langflow.custom.custom_component.component import Component -from langflow.custom.utils import ( - add_code_field_to_build_config, - build_custom_component_template, - get_instance_name, - update_component_build_config, -) from langflow.events.event_manager import create_stream_tokens_event_manager from langflow.exceptions.api import APIException, InvalidChatInputError from langflow.exceptions.serialization import SerializationError -from langflow.graph.graph.base import Graph -from langflow.graph.schema import RunOutputs from langflow.helpers.flow import get_flow_by_id_or_endpoint_name from langflow.interface.initialize.loading import update_params_with_load_from_db_fields -from langflow.logging.logger import logger from langflow.processing.process import process_tweaks, run_graph_internal from langflow.schema.graph import Tweaks from langflow.services.auth.utils import api_key_security, get_current_active_user, get_webhook_user @@ -54,7 +55,6 @@ if TYPE_CHECKING: from langflow.events.event_manager import EventManager - from langflow.services.settings.service import SettingsService router = APIRouter(tags=["Base"]) @@ -723,9 +723,9 @@ async def custom_component_update( for field_name, field_dict in template.items() if isinstance(field_dict, dict) and field_dict.get("load_from_db") and field_dict.get("value") ] - - params = await update_params_with_load_from_db_fields(cc_instance, params, load_from_db_fields) - cc_instance.set_attributes(params) + if isinstance(cc_instance, Component): + params = await update_params_with_load_from_db_fields(cc_instance, params, load_from_db_fields) + cc_instance.set_attributes(params) updated_build_config = code_request.get_template() await update_component_build_config( cc_instance, diff --git a/src/backend/base/langflow/api/v1/files.py b/src/backend/base/langflow/api/v1/files.py index 6909d87f56bd..b50015535261 100644 --- a/src/backend/base/langflow/api/v1/files.py +++ b/src/backend/base/langflow/api/v1/files.py @@ -8,12 +8,12 @@ from fastapi import APIRouter, Depends, HTTPException, UploadFile from fastapi.responses import StreamingResponse +from lfx.services.settings.service import SettingsService from langflow.api.utils import CurrentActiveUser, DbSession from langflow.api.v1.schemas import UploadFileResponse from langflow.services.database.models.flow.model import Flow from langflow.services.deps import get_settings_service, get_storage_service -from langflow.services.settings.service import SettingsService from langflow.services.storage.service import StorageService from langflow.services.storage.utils import build_content_type_from_extension diff --git a/src/backend/base/langflow/api/v1/flows.py b/src/backend/base/langflow/api/v1/flows.py index 04faa6898788..6caa0f7bc05b 100644 --- a/src/backend/base/langflow/api/v1/flows.py +++ b/src/backend/base/langflow/api/v1/flows.py @@ -16,6 +16,7 @@ from fastapi.responses import StreamingResponse from fastapi_pagination import Page, Params from fastapi_pagination.ext.sqlmodel import apaginate +from lfx.log import logger from sqlmodel import and_, col, select from sqlmodel.ext.asyncio.session import AsyncSession @@ -23,7 +24,6 @@ from langflow.api.v1.schemas import FlowListCreate from langflow.helpers.user import get_user_by_flow_id_or_endpoint_name from langflow.initial_setup.constants import STARTER_FOLDER_NAME -from langflow.logging import logger from langflow.services.database.models.flow.model import ( AccessTypeEnum, Flow, diff --git a/src/backend/base/langflow/api/v1/knowledge_bases.py b/src/backend/base/langflow/api/v1/knowledge_bases.py index d2375b9b14cc..b5bbbd57d95c 100644 --- a/src/backend/base/langflow/api/v1/knowledge_bases.py +++ b/src/backend/base/langflow/api/v1/knowledge_bases.py @@ -6,10 +6,10 @@ import pandas as pd from fastapi import APIRouter, HTTPException from langchain_chroma import Chroma +from lfx.log import logger from pydantic import BaseModel from langflow.api.utils import CurrentActiveUser -from langflow.logging import logger from langflow.services.deps import get_settings_service router = APIRouter(tags=["Knowledge Bases"], prefix="/knowledge_bases") diff --git a/src/backend/base/langflow/api/v1/mcp.py b/src/backend/base/langflow/api/v1/mcp.py index 7d6b5d55e142..d4ed8b18c643 100644 --- a/src/backend/base/langflow/api/v1/mcp.py +++ b/src/backend/base/langflow/api/v1/mcp.py @@ -4,6 +4,7 @@ from anyio import BrokenResourceError from fastapi import APIRouter, HTTPException, Request, Response from fastapi.responses import HTMLResponse, StreamingResponse +from lfx.log.logger import logger from mcp import types from mcp.server import NotificationOptions, Server from mcp.server.sse import SseServerTransport @@ -17,7 +18,6 @@ handle_mcp_errors, handle_read_resource, ) -from langflow.logging.logger import logger from langflow.services.deps import get_settings_service router = APIRouter(prefix="/mcp", tags=["mcp"]) diff --git a/src/backend/base/langflow/api/v1/mcp_projects.py b/src/backend/base/langflow/api/v1/mcp_projects.py index 2a8150ccbeb0..dfed41aca62c 100644 --- a/src/backend/base/langflow/api/v1/mcp_projects.py +++ b/src/backend/base/langflow/api/v1/mcp_projects.py @@ -14,6 +14,11 @@ from anyio import BrokenResourceError from fastapi import APIRouter, Depends, HTTPException, Request, Response from fastapi.responses import HTMLResponse +from lfx.base.mcp.constants import MAX_MCP_SERVER_NAME_LENGTH +from lfx.base.mcp.util import sanitize_mcp_name +from lfx.log import logger +from lfx.services.deps import get_settings_service, session_scope +from lfx.services.settings.feature_flags import FEATURE_FLAGS from mcp import types from mcp.server import NotificationOptions, Server from mcp.server.sse import SseServerTransport @@ -36,16 +41,11 @@ MCPProjectUpdateRequest, MCPSettings, ) -from langflow.base.mcp.constants import MAX_MCP_SERVER_NAME_LENGTH -from langflow.base.mcp.util import sanitize_mcp_name -from langflow.logging import logger from langflow.services.auth.mcp_encryption import decrypt_auth_settings, encrypt_auth_settings from langflow.services.database.models import Flow, Folder from langflow.services.database.models.api_key.crud import check_key, create_api_key from langflow.services.database.models.api_key.model import ApiKeyCreate from langflow.services.database.models.user.model import User -from langflow.services.deps import get_settings_service, session_scope -from langflow.services.settings.feature_flags import FEATURE_FLAGS router = APIRouter(prefix="/mcp/project", tags=["mcp_projects"]) @@ -249,6 +249,15 @@ async def handle_project_sse( current_user: Annotated[User, Depends(verify_project_auth_conditional)], ): """Handle SSE connections for a specific project.""" + # Verify project exists and user has access + async with session_scope() as session: + project = ( + await session.exec(select(Folder).where(Folder.id == project_id, Folder.user_id == current_user.id)) + ).first() + + if not project: + raise HTTPException(status_code=404, detail="Project not found") + # Get project-specific SSE transport and MCP server sse = get_project_sse(project_id) project_server = get_project_mcp_server(project_id) diff --git a/src/backend/base/langflow/api/v1/mcp_utils.py b/src/backend/base/langflow/api/v1/mcp_utils.py index ae8e05ccb06f..86b8e46c56fe 100644 --- a/src/backend/base/langflow/api/v1/mcp_utils.py +++ b/src/backend/base/langflow/api/v1/mcp_utils.py @@ -12,15 +12,15 @@ from urllib.parse import quote, unquote, urlparse from uuid import uuid4 +from lfx.base.mcp.constants import MAX_MCP_TOOL_NAME_LENGTH +from lfx.base.mcp.util import get_flow_snake_case, get_unique_name, sanitize_mcp_name +from lfx.log.logger import logger from mcp import types from sqlmodel import select from langflow.api.v1.endpoints import simple_run_flow from langflow.api.v1.schemas import SimplifiedAPIRequest -from langflow.base.mcp.constants import MAX_MCP_TOOL_NAME_LENGTH -from langflow.base.mcp.util import get_flow_snake_case, get_unique_name, sanitize_mcp_name from langflow.helpers.flow import json_schema_from_flow -from langflow.logging.logger import logger from langflow.schema.message import Message from langflow.services.database.models import Flow from langflow.services.database.models.user.model import User diff --git a/src/backend/base/langflow/api/v1/openai_responses.py b/src/backend/base/langflow/api/v1/openai_responses.py index ca0c280b3a4d..bc3ed66a4dde 100644 --- a/src/backend/base/langflow/api/v1/openai_responses.py +++ b/src/backend/base/langflow/api/v1/openai_responses.py @@ -7,20 +7,20 @@ from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, Request from fastapi.responses import StreamingResponse -from loguru import logger +from lfx.log.logger import logger +from lfx.schema.openai_responses_schemas import create_openai_error from langflow.api.v1.endpoints import consume_and_yield, run_flow_generator, simple_run_flow from langflow.api.v1.schemas import SimplifiedAPIRequest from langflow.events.event_manager import create_stream_tokens_event_manager from langflow.helpers.flow import get_flow_by_id_or_endpoint_name -from langflow.schema.content_types import ToolContent -from langflow.schema.openai_responses_schemas import ( +from langflow.schema import ( OpenAIErrorResponse, OpenAIResponsesRequest, OpenAIResponsesResponse, OpenAIResponsesStreamChunk, - create_openai_error, ) +from langflow.schema.content_types import ToolContent from langflow.services.auth.utils import api_key_security from langflow.services.database.models.flow.model import FlowRead from langflow.services.database.models.user.model import UserRead diff --git a/src/backend/base/langflow/api/v1/schemas.py b/src/backend/base/langflow/api/v1/schemas.py index fcf9e1e1f839..0fb3ddaade42 100644 --- a/src/backend/base/langflow/api/v1/schemas.py +++ b/src/backend/base/langflow/api/v1/schemas.py @@ -4,6 +4,9 @@ from typing import Any, Literal from uuid import UUID +from lfx.graph.schema import RunOutputs +from lfx.services.settings.base import Settings +from lfx.services.settings.feature_flags import FEATURE_FLAGS, FeatureFlags from pydantic import ( BaseModel, ConfigDict, @@ -14,7 +17,6 @@ model_serializer, ) -from langflow.graph.schema import RunOutputs from langflow.schema.dotdict import dotdict from langflow.schema.graph import Tweaks from langflow.schema.schema import InputType, OutputType, OutputValue @@ -23,8 +25,6 @@ from langflow.services.database.models.base import orjson_dumps from langflow.services.database.models.flow.model import FlowCreate, FlowRead from langflow.services.database.models.user.model import UserRead -from langflow.services.settings.base import Settings -from langflow.services.settings.feature_flags import FEATURE_FLAGS, FeatureFlags from langflow.services.tracing.schema import Log @@ -335,41 +335,6 @@ class VerticesBuiltResponse(BaseModel): vertices: list[VertexBuildResponse] -class InputValueRequest(BaseModel): - components: list[str] | None = [] - input_value: str | None = None - session: str | None = None - type: InputType | None = Field( - "any", - description="Defines on which components the input value should be applied. " - "'any' applies to all input components.", - ) - - # add an example - model_config = ConfigDict( - json_schema_extra={ - "examples": [ - { - "components": ["components_id", "Component Name"], - "input_value": "input_value", - "session": "session_id", - }, - {"components": ["Component Name"], "input_value": "input_value"}, - {"input_value": "input_value"}, - { - "components": ["Component Name"], - "input_value": "input_value", - "session": "session_id", - }, - {"input_value": "input_value", "session": "session_id"}, - {"type": "chat", "input_value": "input_value"}, - {"type": "json", "input_value": '{"key": "value"}'}, - ] - }, - extra="forbid", - ) - - class SimplifiedAPIRequest(BaseModel): input_value: str | None = Field(default=None, description="The input value") input_type: InputType | None = Field(default="chat", description="The input type") diff --git a/src/backend/base/langflow/api/v1/starter_projects.py b/src/backend/base/langflow/api/v1/starter_projects.py index 8e8b99a84ade..c51ab02af371 100644 --- a/src/backend/base/langflow/api/v1/starter_projects.py +++ b/src/backend/base/langflow/api/v1/starter_projects.py @@ -1,17 +1,75 @@ +from typing import Any + from fastapi import APIRouter, Depends, HTTPException +from pydantic import BaseModel -from langflow.graph.graph.schema import GraphDump from langflow.services.auth.utils import get_current_active_user router = APIRouter(prefix="/starter-projects", tags=["Flows"]) +# Pydantic models for API schema compatibility +class ViewPort(BaseModel): + x: float + y: float + zoom: float + + +class NodeData(BaseModel): + # This is a simplified version - the actual NodeData has many more fields + # but we only need the basic structure for the API schema + model_config = {"extra": "allow"} # Allow extra fields + + +class EdgeData(BaseModel): + # This is a simplified version - the actual EdgeData has many more fields + # but we only need the basic structure for the API schema + model_config = {"extra": "allow"} # Allow extra fields + + +class GraphData(BaseModel): + nodes: list[dict[str, Any]] # Use dict to be flexible with the complex NodeData structure + edges: list[dict[str, Any]] # Use dict to be flexible with the complex EdgeData structure + viewport: ViewPort | None = None + + +class GraphDumpResponse(BaseModel): + data: GraphData + is_component: bool | None = None + name: str | None = None + description: str | None = None + endpoint_name: str | None = None + + @router.get("/", dependencies=[Depends(get_current_active_user)], status_code=200) -async def get_starter_projects() -> list[GraphDump]: +async def get_starter_projects() -> list[GraphDumpResponse]: """Get a list of starter projects.""" from langflow.initial_setup.load import get_starter_projects_dump try: - return get_starter_projects_dump() + # Get the raw data from lfx GraphDump + raw_data = get_starter_projects_dump() + + # Convert TypedDict GraphDump to Pydantic GraphDumpResponse + results = [] + for item in raw_data: + # Create GraphData + graph_data = GraphData( + nodes=item.get("data", {}).get("nodes", []), + edges=item.get("data", {}).get("edges", []), + viewport=item.get("data", {}).get("viewport"), + ) + + # Create GraphDumpResponse + graph_dump = GraphDumpResponse( + data=graph_data, + is_component=item.get("is_component"), + name=item.get("name"), + description=item.get("description"), + endpoint_name=item.get("endpoint_name"), + ) + results.append(graph_dump) + except Exception as exc: raise HTTPException(status_code=500, detail=str(exc)) from exc + return results diff --git a/src/backend/base/langflow/api/v1/store.py b/src/backend/base/langflow/api/v1/store.py index 39b1bea7b6fe..3f61dcf334d4 100644 --- a/src/backend/base/langflow/api/v1/store.py +++ b/src/backend/base/langflow/api/v1/store.py @@ -2,9 +2,9 @@ from uuid import UUID from fastapi import APIRouter, Depends, HTTPException, Query +from lfx.log.logger import logger from langflow.api.utils import CurrentActiveUser, check_langflow_version -from langflow.logging.logger import logger from langflow.services.auth import utils as auth_utils from langflow.services.deps import get_settings_service, get_store_service from langflow.services.store.exceptions import CustomError diff --git a/src/backend/base/langflow/api/v1/validate.py b/src/backend/base/langflow/api/v1/validate.py index a7b829a0921d..9150b3245290 100644 --- a/src/backend/base/langflow/api/v1/validate.py +++ b/src/backend/base/langflow/api/v1/validate.py @@ -1,10 +1,10 @@ from fastapi import APIRouter, HTTPException +from lfx.base.prompts.api_utils import process_prompt_template +from lfx.custom.validate import validate_code +from lfx.log.logger import logger from langflow.api.utils import CurrentActiveUser from langflow.api.v1.base import Code, CodeValidationResponse, PromptValidationResponse, ValidatePromptRequest -from langflow.base.prompts.api_utils import process_prompt_template -from langflow.logging.logger import logger -from langflow.utils.validate import validate_code # build router router = APIRouter(prefix="/validate", tags=["Validate"]) diff --git a/src/backend/base/langflow/api/v1/voice_mode.py b/src/backend/base/langflow/api/v1/voice_mode.py index ec4a07611a4d..ec585a243a34 100644 --- a/src/backend/base/langflow/api/v1/voice_mode.py +++ b/src/backend/base/langflow/api/v1/voice_mode.py @@ -18,14 +18,14 @@ from cryptography.fernet import InvalidToken from elevenlabs import ElevenLabs from fastapi import APIRouter, BackgroundTasks +from lfx.log import logger +from lfx.schema.schema import InputValueRequest from openai import OpenAI from sqlalchemy import select from starlette.websockets import WebSocket, WebSocketDisconnect from langflow.api.utils import CurrentActiveUser, DbSession from langflow.api.v1.chat import build_flow_and_stream -from langflow.api.v1.schemas import InputValueRequest -from langflow.logging import logger from langflow.memory import aadd_messagetables from langflow.schema.properties import Properties from langflow.services.auth.utils import get_current_user_for_websocket diff --git a/src/backend/base/langflow/api/v2/files.py b/src/backend/base/langflow/api/v2/files.py index afbab6151cf1..d95390c83e05 100644 --- a/src/backend/base/langflow/api/v2/files.py +++ b/src/backend/base/langflow/api/v2/files.py @@ -11,11 +11,11 @@ from fastapi import APIRouter, Depends, File, HTTPException, UploadFile from fastapi.responses import StreamingResponse +from lfx.log.logger import logger from sqlmodel import col, select from langflow.api.schemas import UploadFileResponse from langflow.api.utils import CurrentActiveUser, DbSession -from langflow.logging.logger import logger from langflow.services.database.models.file.model import File as UserFile from langflow.services.deps import get_settings_service, get_storage_service from langflow.services.storage.service import StorageService diff --git a/src/backend/base/langflow/api/v2/mcp.py b/src/backend/base/langflow/api/v2/mcp.py index 4a00cf8adbe6..984de2f6c69b 100644 --- a/src/backend/base/langflow/api/v2/mcp.py +++ b/src/backend/base/langflow/api/v2/mcp.py @@ -3,11 +3,11 @@ from io import BytesIO from fastapi import APIRouter, Depends, HTTPException, UploadFile +from lfx.base.mcp.util import update_tools +from lfx.log import logger from langflow.api.utils import CurrentActiveUser, DbSession from langflow.api.v2.files import MCP_SERVERS_FILE, delete_file, download_file, get_file_by_name, upload_user_file -from langflow.base.mcp.util import update_tools -from langflow.logging import logger from langflow.services.deps import get_settings_service, get_storage_service router = APIRouter(tags=["MCP"], prefix="/mcp") diff --git a/src/backend/base/langflow/base/__init__.py b/src/backend/base/langflow/base/__init__.py index e69de29bb2d1..90bbd217df28 100644 --- a/src/backend/base/langflow/base/__init__.py +++ b/src/backend/base/langflow/base/__init__.py @@ -0,0 +1,11 @@ +"""Backwards compatibility module for langflow.base. + +This module imports from lfx.base to maintain compatibility with existing code +that expects to import from langflow.base. +""" + +# Import all base modules from lfx for backwards compatibility +from lfx.base import * # noqa: F403 + +# Import langflow-specific modules that aren't in lfx.base +from . import knowledge_bases # noqa: F401 diff --git a/src/backend/base/langflow/base/agents/__init__.py b/src/backend/base/langflow/base/agents/__init__.py index e69de29bb2d1..550236949dcf 100644 --- a/src/backend/base/langflow/base/agents/__init__.py +++ b/src/backend/base/langflow/base/agents/__init__.py @@ -0,0 +1,3 @@ +"""Backwards compatibility module for langflow.base.agents.""" + +from lfx.base.agents import * # noqa: F403 diff --git a/src/backend/base/langflow/base/data/__init__.py b/src/backend/base/langflow/base/data/__init__.py index 8a92e12b04e9..94a3527470e3 100644 --- a/src/backend/base/langflow/base/data/__init__.py +++ b/src/backend/base/langflow/base/data/__init__.py @@ -1,5 +1,3 @@ -from .base_file import BaseFileComponent +"""Backwards compatibility module for langflow.base.data.""" -__all__ = [ - "BaseFileComponent", -] +from lfx.base.data import * # noqa: F403 diff --git a/src/backend/base/langflow/base/data/utils.py b/src/backend/base/langflow/base/data/utils.py index 24a7061bdd41..9aa123c5cb1d 100644 --- a/src/backend/base/langflow/base/data/utils.py +++ b/src/backend/base/langflow/base/data/utils.py @@ -1,198 +1,3 @@ -import unicodedata -from collections.abc import Callable -from concurrent import futures -from pathlib import Path +"""Backwards compatibility module for langflow.base.data.utils.""" -import chardet -import orjson -import yaml -from defusedxml import ElementTree - -from langflow.schema.data import Data - -# Types of files that can be read simply by file.read() -# and have 100% to be completely readable -TEXT_FILE_TYPES = [ - "txt", - "md", - "mdx", - "csv", - "json", - "yaml", - "yml", - "xml", - "html", - "htm", - "pdf", - "docx", - "py", - "sh", - "sql", - "js", - "ts", - "tsx", -] - -IMG_FILE_TYPES = ["jpg", "jpeg", "png", "bmp", "image"] - - -def normalize_text(text): - return unicodedata.normalize("NFKD", text) - - -def is_hidden(path: Path) -> bool: - return path.name.startswith(".") - - -def format_directory_path(path: str) -> str: - """Format a directory path to ensure it's properly escaped and valid. - - Args: - path (str): The input path string. - - Returns: - str: A properly formatted path string. - """ - return path.replace("\n", "\\n") - - -# Ignoring FBT001 because the DirectoryComponent in 1.0.19 -# calls this function without keyword arguments -def retrieve_file_paths( - path: str, - load_hidden: bool, # noqa: FBT001 - recursive: bool, # noqa: FBT001 - depth: int, - types: list[str] = TEXT_FILE_TYPES, -) -> list[str]: - path = format_directory_path(path) - path_obj = Path(path) - if not path_obj.exists() or not path_obj.is_dir(): - msg = f"Path {path} must exist and be a directory." - raise ValueError(msg) - - def match_types(p: Path) -> bool: - return any(p.suffix == f".{t}" for t in types) if types else True - - def is_not_hidden(p: Path) -> bool: - return not is_hidden(p) or load_hidden - - def walk_level(directory: Path, max_depth: int): - directory = directory.resolve() - prefix_length = len(directory.parts) - for p in directory.rglob("*" if recursive else "[!.]*"): - if len(p.parts) - prefix_length <= max_depth: - yield p - - glob = "**/*" if recursive else "*" - paths = walk_level(path_obj, depth) if depth else path_obj.glob(glob) - return [str(p) for p in paths if p.is_file() and match_types(p) and is_not_hidden(p)] - - -def partition_file_to_data(file_path: str, *, silent_errors: bool) -> Data | None: - # Use the partition function to load the file - from unstructured.partition.auto import partition - - try: - elements = partition(file_path) - except Exception as e: - if not silent_errors: - msg = f"Error loading file {file_path}: {e}" - raise ValueError(msg) from e - return None - - # Create a Data - text = "\n\n".join([str(el) for el in elements]) - metadata = elements.metadata if hasattr(elements, "metadata") else {} - metadata["file_path"] = file_path - return Data(text=text, data=metadata) - - -def read_text_file(file_path: str) -> str: - file_path_ = Path(file_path) - raw_data = file_path_.read_bytes() - result = chardet.detect(raw_data) - encoding = result["encoding"] - - if encoding in {"Windows-1252", "Windows-1254", "MacRoman"}: - encoding = "utf-8" - - return file_path_.read_text(encoding=encoding) - - -def read_docx_file(file_path: str) -> str: - from docx import Document - - doc = Document(file_path) - return "\n\n".join([p.text for p in doc.paragraphs]) - - -def parse_pdf_to_text(file_path: str) -> str: - from pypdf import PdfReader - - with Path(file_path).open("rb") as f, PdfReader(f) as reader: - return "\n\n".join([page.extract_text() for page in reader.pages]) - - -def parse_text_file_to_data(file_path: str, *, silent_errors: bool) -> Data | None: - try: - if file_path.endswith(".pdf"): - text = parse_pdf_to_text(file_path) - elif file_path.endswith(".docx"): - text = read_docx_file(file_path) - else: - text = read_text_file(file_path) - - # if file is json, yaml, or xml, we can parse it - if file_path.endswith(".json"): - loaded_json = orjson.loads(text) - if isinstance(loaded_json, dict): - loaded_json = {k: normalize_text(v) if isinstance(v, str) else v for k, v in loaded_json.items()} - elif isinstance(loaded_json, list): - loaded_json = [normalize_text(item) if isinstance(item, str) else item for item in loaded_json] - text = orjson.dumps(loaded_json).decode("utf-8") - - elif file_path.endswith((".yaml", ".yml")): - text = yaml.safe_load(text) - elif file_path.endswith(".xml"): - xml_element = ElementTree.fromstring(text) - text = ElementTree.tostring(xml_element, encoding="unicode") - except Exception as e: - if not silent_errors: - msg = f"Error loading file {file_path}: {e}" - raise ValueError(msg) from e - return None - - return Data(data={"file_path": file_path, "text": text}) - - -# ! Removing unstructured dependency until -# ! 3.12 is supported -# def get_elements( -# file_paths: List[str], -# silent_errors: bool, -# max_concurrency: int, -# use_multithreading: bool, -# ) -> List[Optional[Data]]: -# if use_multithreading: -# data = parallel_load_data(file_paths, silent_errors, max_concurrency) -# else: -# data = [partition_file_to_data(file_path, silent_errors) for file_path in file_paths] -# data = list(filter(None, data)) -# return data - - -def parallel_load_data( - file_paths: list[str], - *, - silent_errors: bool, - max_concurrency: int, - load_function: Callable = parse_text_file_to_data, -) -> list[Data | None]: - with futures.ThreadPoolExecutor(max_workers=max_concurrency) as executor: - loaded_files = executor.map( - lambda file_path: load_function(file_path, silent_errors=silent_errors), - file_paths, - ) - # loaded_files is an iterator, so we need to convert it to a list - return list(loaded_files) +from lfx.base.data.utils import * # noqa: F403 diff --git a/src/backend/base/langflow/base/embeddings/__init__.py b/src/backend/base/langflow/base/embeddings/__init__.py index e69de29bb2d1..1bb6cf8fe620 100644 --- a/src/backend/base/langflow/base/embeddings/__init__.py +++ b/src/backend/base/langflow/base/embeddings/__init__.py @@ -0,0 +1,3 @@ +"""Backwards compatibility module for langflow.base.embeddings.""" + +from lfx.base.embeddings import * # noqa: F403 diff --git a/src/backend/base/langflow/base/io/__init__.py b/src/backend/base/langflow/base/io/__init__.py index e69de29bb2d1..8e62b1ac2195 100644 --- a/src/backend/base/langflow/base/io/__init__.py +++ b/src/backend/base/langflow/base/io/__init__.py @@ -0,0 +1,7 @@ +"""Backwards compatibility module for langflow.base.io. + +This module imports from lfx.base.io to maintain compatibility. +""" + +# Import all io modules from lfx for backwards compatibility +from lfx.base.io import * # noqa: F403 diff --git a/src/backend/base/langflow/base/io/chat.py b/src/backend/base/langflow/base/io/chat.py index a0abe06171db..2fbb72590583 100644 --- a/src/backend/base/langflow/base/io/chat.py +++ b/src/backend/base/langflow/base/io/chat.py @@ -1,20 +1,7 @@ -from langflow.custom.custom_component.component import Component +"""Backwards compatibility module for langflow.base.io.chat. +This module imports from lfx.base.io.chat to maintain compatibility. +""" -class ChatComponent(Component): - display_name = "Chat Component" - description = "Use as base for chat components." - - def get_properties_from_source_component(self): - if hasattr(self, "_vertex") and hasattr(self._vertex, "incoming_edges") and self._vertex.incoming_edges: - source_id = self._vertex.incoming_edges[0].source_id - source_vertex = self.graph.get_vertex(source_id) - component = source_vertex.custom_component - source = component.display_name - icon = component.icon - possible_attributes = ["model_name", "model_id", "model"] - for attribute in possible_attributes: - if hasattr(component, attribute) and getattr(component, attribute): - return getattr(component, attribute), icon, source, component._id - return source, icon, component.display_name, component._id - return None, None, None, None +# Import all chat modules from lfx for backwards compatibility +from lfx.base.io.chat import * # noqa: F403 diff --git a/src/backend/base/langflow/base/io/text.py b/src/backend/base/langflow/base/io/text.py index b9cb03797857..4e85e27196d5 100644 --- a/src/backend/base/langflow/base/io/text.py +++ b/src/backend/base/langflow/base/io/text.py @@ -1,22 +1,3 @@ -from langflow.custom.custom_component.component import Component +"""Backwards compatibility module for langflow.base.io.text.""" - -class TextComponent(Component): - display_name = "Text Component" - description = "Used to pass text to the next component." - - def build_config(self): - return { - "input_value": { - "display_name": "Value", - "input_types": ["Message", "Data"], - "info": "Text or Data to be passed.", - }, - "data_template": { - "display_name": "Data Template", - "multiline": True, - "info": "Template to convert Data to Text. " - "If left empty, it will be dynamically set to the Data's text key.", - "advanced": True, - }, - } +from lfx.base.io.text import * # noqa: F403 diff --git a/src/backend/base/langflow/base/knowledge_bases/__init__.py b/src/backend/base/langflow/base/knowledge_bases/__init__.py index e69de29bb2d1..45b973ed435b 100644 --- a/src/backend/base/langflow/base/knowledge_bases/__init__.py +++ b/src/backend/base/langflow/base/knowledge_bases/__init__.py @@ -0,0 +1,3 @@ +from .knowledge_base_utils import compute_bm25, compute_tfidf, get_knowledge_bases + +__all__ = ["compute_bm25", "compute_tfidf", "get_knowledge_bases"] diff --git a/src/backend/base/langflow/base/memory/__init__.py b/src/backend/base/langflow/base/memory/__init__.py index e69de29bb2d1..5d18a796fd8e 100644 --- a/src/backend/base/langflow/base/memory/__init__.py +++ b/src/backend/base/langflow/base/memory/__init__.py @@ -0,0 +1,3 @@ +"""Backwards compatibility module for langflow.base.memory.""" + +from lfx.base.memory import * # noqa: F403 diff --git a/src/backend/base/langflow/base/models/__init__.py b/src/backend/base/langflow/base/models/__init__.py index 921f10336a9f..9b4c2d260b64 100644 --- a/src/backend/base/langflow/base/models/__init__.py +++ b/src/backend/base/langflow/base/models/__init__.py @@ -1,3 +1,3 @@ -from .model import LCModelComponent +"""Backwards compatibility module for langflow.base.models.""" -__all__ = ["LCModelComponent"] +from lfx.base.models import * # noqa: F403 diff --git a/src/backend/base/langflow/base/models/openai_constants.py b/src/backend/base/langflow/base/models/openai_constants.py index 8eb028e7219e..e20b003aeb32 100644 --- a/src/backend/base/langflow/base/models/openai_constants.py +++ b/src/backend/base/langflow/base/models/openai_constants.py @@ -1,121 +1,7 @@ -from .model_metadata import create_model_metadata +# Forward import for OpenAI constants +from lfx.base.models.openai_constants import ( + OPENAI_EMBEDDING_MODEL_NAMES, + OPENAI_MODEL_NAMES, +) -# Unified model metadata - single source of truth -OPENAI_MODELS_DETAILED = [ - # GPT-5 Series - create_model_metadata( - provider="OpenAI", - name="gpt-5", - icon="OpenAI", - tool_calling=True, - reasoning=True, - ), - create_model_metadata( - provider="OpenAI", - name="gpt-5-mini", - icon="OpenAI", - tool_calling=True, - reasoning=True, - ), - create_model_metadata( - provider="OpenAI", - name="gpt-5-nano", - icon="OpenAI", - tool_calling=True, - reasoning=True, - ), - create_model_metadata( - provider="OpenAI", - name="gpt-5-chat-latest", - icon="OpenAI", - tool_calling=False, - reasoning=True, - ), - # Regular OpenAI Models - create_model_metadata(provider="OpenAI", name="gpt-4o-mini", icon="OpenAI", tool_calling=True), - create_model_metadata(provider="OpenAI", name="gpt-4o", icon="OpenAI", tool_calling=True), - create_model_metadata(provider="OpenAI", name="gpt-4.1", icon="OpenAI", tool_calling=True), - create_model_metadata(provider="OpenAI", name="gpt-4.1-mini", icon="OpenAI", tool_calling=True), - create_model_metadata(provider="OpenAI", name="gpt-4.1-nano", icon="OpenAI", tool_calling=True), - create_model_metadata( - provider="OpenAI", name="gpt-4.5-preview", icon="OpenAI", tool_calling=True, preview=True, not_supported=True - ), - create_model_metadata(provider="OpenAI", name="gpt-4-turbo", icon="OpenAI", tool_calling=True), - create_model_metadata( - provider="OpenAI", name="gpt-4-turbo-preview", icon="OpenAI", tool_calling=True, preview=True - ), - create_model_metadata(provider="OpenAI", name="gpt-4", icon="OpenAI", tool_calling=True), - create_model_metadata(provider="OpenAI", name="gpt-3.5-turbo", icon="OpenAI", tool_calling=True), - # Reasoning Models - create_model_metadata(provider="OpenAI", name="o1", icon="OpenAI", reasoning=True), - create_model_metadata(provider="OpenAI", name="o1-mini", icon="OpenAI", reasoning=True, not_supported=True), - create_model_metadata(provider="OpenAI", name="o1-pro", icon="OpenAI", reasoning=True, not_supported=True), - create_model_metadata(provider="OpenAI", name="o3-mini", icon="OpenAI", reasoning=True), - create_model_metadata(provider="OpenAI", name="o3", icon="OpenAI", reasoning=True), - create_model_metadata(provider="OpenAI", name="o3-pro", icon="OpenAI", reasoning=True), - create_model_metadata(provider="OpenAI", name="o4-mini", icon="OpenAI", reasoning=True), - create_model_metadata(provider="OpenAI", name="o4-mini-high", icon="OpenAI", reasoning=True), - # Search Models - create_model_metadata( - provider="OpenAI", - name="gpt-4o-mini-search-preview", - icon="OpenAI", - tool_calling=True, - search=True, - preview=True, - ), - create_model_metadata( - provider="OpenAI", - name="gpt-4o-search-preview", - icon="OpenAI", - tool_calling=True, - search=True, - preview=True, - ), - # Not Supported Models - create_model_metadata( - provider="OpenAI", name="computer-use-preview", icon="OpenAI", not_supported=True, preview=True - ), - create_model_metadata( - provider="OpenAI", name="gpt-4o-audio-preview", icon="OpenAI", not_supported=True, preview=True - ), - create_model_metadata( - provider="OpenAI", name="gpt-4o-realtime-preview", icon="OpenAI", not_supported=True, preview=True - ), - create_model_metadata( - provider="OpenAI", name="gpt-4o-mini-audio-preview", icon="OpenAI", not_supported=True, preview=True - ), - create_model_metadata( - provider="OpenAI", name="gpt-4o-mini-realtime-preview", icon="OpenAI", not_supported=True, preview=True - ), -] -OPENAI_CHAT_MODEL_NAMES = [ - metadata["name"] - for metadata in OPENAI_MODELS_DETAILED - if not metadata.get("not_supported", False) - and not metadata.get("reasoning", False) - and not metadata.get("search", False) -] - -OPENAI_REASONING_MODEL_NAMES = [ - metadata["name"] - for metadata in OPENAI_MODELS_DETAILED - if metadata.get("reasoning", False) and not metadata.get("not_supported", False) -] - -OPENAI_SEARCH_MODEL_NAMES = [ - metadata["name"] - for metadata in OPENAI_MODELS_DETAILED - if metadata.get("search", False) and not metadata.get("not_supported", False) -] - -NOT_SUPPORTED_MODELS = [metadata["name"] for metadata in OPENAI_MODELS_DETAILED if metadata.get("not_supported", False)] - -OPENAI_EMBEDDING_MODEL_NAMES = [ - "text-embedding-3-small", - "text-embedding-3-large", - "text-embedding-ada-002", -] - -# Backwards compatibility -MODEL_NAMES = OPENAI_CHAT_MODEL_NAMES +__all__ = ["OPENAI_EMBEDDING_MODEL_NAMES", "OPENAI_MODEL_NAMES"] diff --git a/src/backend/base/langflow/base/prompts/__init__.py b/src/backend/base/langflow/base/prompts/__init__.py index e69de29bb2d1..68ed65ecfb27 100644 --- a/src/backend/base/langflow/base/prompts/__init__.py +++ b/src/backend/base/langflow/base/prompts/__init__.py @@ -0,0 +1,3 @@ +"""Backwards compatibility module for langflow.base.prompts.""" + +from lfx.base.prompts import * # noqa: F403 diff --git a/src/backend/base/langflow/base/prompts/api_utils.py b/src/backend/base/langflow/base/prompts/api_utils.py index c07a3c5ac497..c5518ce0cc02 100644 --- a/src/backend/base/langflow/base/prompts/api_utils.py +++ b/src/backend/base/langflow/base/prompts/api_utils.py @@ -1,224 +1,3 @@ -from collections import defaultdict -from typing import Any +"""Backwards compatibility module for langflow.base.prompts.api_utils.""" -from fastapi import HTTPException -from langchain_core.prompts import PromptTemplate - -from langflow.inputs.inputs import DefaultPromptField -from langflow.interface.utils import extract_input_variables_from_prompt -from langflow.logging.logger import logger - -_INVALID_CHARACTERS = { - " ", - ",", - ".", - ":", - ";", - "!", - "?", - "/", - "\\", - "(", - ")", - "[", - "]", -} - -_INVALID_NAMES = { - "code", - "input_variables", - "output_parser", - "partial_variables", - "template", - "template_format", - "validate_template", -} - - -def _is_json_like(var): - if var.startswith("{{") and var.endswith("}}"): - # If it is a double brance variable - # we don't want to validate any of its content - return True - # the above doesn't work on all cases because the json string can be multiline - # or indented which can add \n or spaces at the start or end of the string - # test_case_3 new_var == '\n{{\n "test": "hello",\n "text": "world"\n}}\n' - # what we can do is to remove the \n and spaces from the start and end of the string - # and then check if the string starts with {{ and ends with }} - var = var.strip() - var = var.replace("\n", "") - var = var.replace(" ", "") - # Now it should be a valid json string - return var.startswith("{{") and var.endswith("}}") - - -def _fix_variable(var, invalid_chars, wrong_variables): - if not var: - return var, invalid_chars, wrong_variables - new_var = var - - # Handle variables starting with a number - if var[0].isdigit(): - invalid_chars.append(var[0]) - new_var, invalid_chars, wrong_variables = _fix_variable(var[1:], invalid_chars, wrong_variables) - - # Temporarily replace {{ and }} to avoid treating them as invalid - new_var = new_var.replace("{{", "ᴛᴇᴍᴘᴏᴘᴇɴ").replace("}}", "ᴛᴇᴍᴘᴄʟᴏsᴇ") # noqa: RUF001 - - # Remove invalid characters - for char in new_var: - if char in _INVALID_CHARACTERS: - invalid_chars.append(char) - new_var = new_var.replace(char, "") - if var not in wrong_variables: # Avoid duplicating entries - wrong_variables.append(var) - - # Restore {{ and }} - new_var = new_var.replace("ᴛᴇᴍᴘᴏᴘᴇɴ", "{{").replace("ᴛᴇᴍᴘᴄʟᴏsᴇ", "}}") # noqa: RUF001 - - return new_var, invalid_chars, wrong_variables - - -def _check_variable(var, invalid_chars, wrong_variables, empty_variables): - if any(char in invalid_chars for char in var): - wrong_variables.append(var) - elif var == "": - empty_variables.append(var) - return wrong_variables, empty_variables - - -def _check_for_errors(input_variables, fixed_variables, wrong_variables, empty_variables) -> None: - if any(var for var in input_variables if var not in fixed_variables): - error_message = ( - f"Error: Input variables contain invalid characters or formats. \n" - f"Invalid variables: {', '.join(wrong_variables)}.\n" - f"Empty variables: {', '.join(empty_variables)}. \n" - f"Fixed variables: {', '.join(fixed_variables)}." - ) - raise ValueError(error_message) - - -def _check_input_variables(input_variables): - invalid_chars = [] - fixed_variables = [] - wrong_variables = [] - empty_variables = [] - variables_to_check = [] - - for var in input_variables: - # First, let's check if the variable is a JSON string - # because if it is, it won't be considered a variable - # and we don't need to validate it - if _is_json_like(var): - continue - - new_var, wrong_variables, empty_variables = _fix_variable(var, invalid_chars, wrong_variables) - wrong_variables, empty_variables = _check_variable(var, _INVALID_CHARACTERS, wrong_variables, empty_variables) - fixed_variables.append(new_var) - variables_to_check.append(var) - - _check_for_errors(variables_to_check, fixed_variables, wrong_variables, empty_variables) - - return fixed_variables - - -def validate_prompt(prompt_template: str, *, silent_errors: bool = False) -> list[str]: - input_variables = extract_input_variables_from_prompt(prompt_template) - - # Check if there are invalid characters in the input_variables - input_variables = _check_input_variables(input_variables) - if any(var in _INVALID_NAMES for var in input_variables): - msg = f"Invalid input variables. None of the variables can be named {', '.join(input_variables)}. " - raise ValueError(msg) - - try: - PromptTemplate(template=prompt_template, input_variables=input_variables) - except Exception as exc: - msg = f"Invalid prompt: {exc}" - logger.exception(msg) - if not silent_errors: - raise ValueError(msg) from exc - - return input_variables - - -def get_old_custom_fields(custom_fields, name): - try: - if len(custom_fields) == 1 and name == "": - # If there is only one custom field and the name is empty string - # then we are dealing with the first prompt request after the node was created - name = next(iter(custom_fields.keys())) - - old_custom_fields = custom_fields[name] - if not old_custom_fields: - old_custom_fields = [] - - old_custom_fields = old_custom_fields.copy() - except KeyError: - old_custom_fields = [] - custom_fields[name] = [] - return old_custom_fields - - -def add_new_variables_to_template(input_variables, custom_fields, template, name) -> None: - for variable in input_variables: - try: - template_field = DefaultPromptField(name=variable, display_name=variable) - if variable in template: - # Set the new field with the old value - template_field.value = template[variable]["value"] - - template[variable] = template_field.to_dict() - - # Check if variable is not already in the list before appending - if variable not in custom_fields[name]: - custom_fields[name].append(variable) - - except Exception as exc: - raise HTTPException(status_code=500, detail=str(exc)) from exc - - -def remove_old_variables_from_template(old_custom_fields, input_variables, custom_fields, template, name) -> None: - for variable in old_custom_fields: - if variable not in input_variables: - try: - # Remove the variable from custom_fields associated with the given name - if variable in custom_fields[name]: - custom_fields[name].remove(variable) - - # Remove the variable from the template - template.pop(variable, None) - - except Exception as exc: - raise HTTPException(status_code=500, detail=str(exc)) from exc - - -def update_input_variables_field(input_variables, template) -> None: - if "input_variables" in template: - template["input_variables"]["value"] = input_variables - - -def process_prompt_template( - template: str, name: str, custom_fields: dict[str, list[str]] | None, frontend_node_template: dict[str, Any] -): - """Process and validate prompt template, update template and custom fields.""" - # Validate the prompt template and extract input variables - input_variables = validate_prompt(template) - - # Initialize custom_fields if None - if custom_fields is None: - custom_fields = defaultdict(list) - - # Retrieve old custom fields - old_custom_fields = get_old_custom_fields(custom_fields, name) - - # Add new variables to the template - add_new_variables_to_template(input_variables, custom_fields, frontend_node_template, name) - - # Remove old variables from the template - remove_old_variables_from_template(old_custom_fields, input_variables, custom_fields, frontend_node_template, name) - - # Update the input variables field in the template - update_input_variables_field(input_variables, frontend_node_template) - - return input_variables +from lfx.base.prompts.api_utils import * # noqa: F403 diff --git a/src/backend/base/langflow/base/textsplitters/__init__.py b/src/backend/base/langflow/base/textsplitters/__init__.py index e69de29bb2d1..42754662bf97 100644 --- a/src/backend/base/langflow/base/textsplitters/__init__.py +++ b/src/backend/base/langflow/base/textsplitters/__init__.py @@ -0,0 +1,3 @@ +"""Backwards compatibility module for langflow.base.textsplitters.""" + +from lfx.base.textsplitters import * # noqa: F403 diff --git a/src/backend/base/langflow/base/tools/__init__.py b/src/backend/base/langflow/base/tools/__init__.py index e69de29bb2d1..2df45fb2a7e8 100644 --- a/src/backend/base/langflow/base/tools/__init__.py +++ b/src/backend/base/langflow/base/tools/__init__.py @@ -0,0 +1,3 @@ +"""Backwards compatibility module for langflow.base.tools.""" + +from lfx.base.tools import * # noqa: F403 diff --git a/src/backend/base/langflow/base/vectorstores/__init__.py b/src/backend/base/langflow/base/vectorstores/__init__.py index e69de29bb2d1..ed810b81d11c 100644 --- a/src/backend/base/langflow/base/vectorstores/__init__.py +++ b/src/backend/base/langflow/base/vectorstores/__init__.py @@ -0,0 +1,3 @@ +"""Backwards compatibility module for langflow.base.vectorstores.""" + +from lfx.base.vectorstores import * # noqa: F403 diff --git a/src/backend/base/langflow/components/__init__.py b/src/backend/base/langflow/components/__init__.py index 7dfd9bf278b6..67f356a2f83f 100644 --- a/src/backend/base/langflow/components/__init__.py +++ b/src/backend/base/langflow/components/__init__.py @@ -2,272 +2,20 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any +from typing import Any -from langflow.components._importing import import_mod +from lfx.components import __all__ as _lfx_all -if TYPE_CHECKING: - from langflow.components import ( - Notion, - agentql, - agents, - aiml, - amazon, - anthropic, - apify, - arxiv, - assemblyai, - azure, - baidu, - bing, - cleanlab, - cloudflare, - cohere, - composio, - confluence, - crewai, - custom_component, - data, - datastax, - deepseek, - docling, - duckduckgo, - embeddings, - exa, - firecrawl, - git, - glean, - google, - groq, - helpers, - homeassistant, - huggingface, - ibm, - icosacomputing, - input_output, - langchain_utilities, - langwatch, - lmstudio, - logic, - maritalk, - mem0, - mistral, - models, - needle, - notdiamond, - novita, - nvidia, - olivya, - ollama, - openai, - openrouter, - perplexity, - processing, - prototypes, - redis, - sambanova, - scrapegraph, - searchapi, - serpapi, - tavily, - tools, - twelvelabs, - unstructured, - vectorstores, - vertexai, - wikipedia, - wolframalpha, - xai, - yahoosearch, - youtube, - zep, - ) - -_dynamic_imports = { - "agents": "langflow.components.agents", - "data": "langflow.components.data", - "processing": "langflow.components.processing", - "vectorstores": "langflow.components.vectorstores", - "tools": "langflow.components.tools", - "models": "langflow.components.models", - "embeddings": "langflow.components.embeddings", - "helpers": "langflow.components.helpers", - "input_output": "langflow.components.input_output", - "logic": "langflow.components.logic", - "custom_component": "langflow.components.custom_component", - "prototypes": "langflow.components.prototypes", - "openai": "langflow.components.openai", - "anthropic": "langflow.components.anthropic", - "google": "langflow.components.google", - "azure": "langflow.components.azure", - "huggingface": "langflow.components.huggingface", - "ollama": "langflow.components.ollama", - "groq": "langflow.components.groq", - "cohere": "langflow.components.cohere", - "mistral": "langflow.components.mistral", - "deepseek": "langflow.components.deepseek", - "nvidia": "langflow.components.nvidia", - "amazon": "langflow.components.amazon", - "vertexai": "langflow.components.vertexai", - "xai": "langflow.components.xai", - "perplexity": "langflow.components.perplexity", - "openrouter": "langflow.components.openrouter", - "lmstudio": "langflow.components.lmstudio", - "sambanova": "langflow.components.sambanova", - "maritalk": "langflow.components.maritalk", - "novita": "langflow.components.novita", - "olivya": "langflow.components.olivya", - "notdiamond": "langflow.components.notdiamond", - "needle": "langflow.components.needle", - "cloudflare": "langflow.components.cloudflare", - "baidu": "langflow.components.baidu", - "aiml": "langflow.components.aiml", - "ibm": "langflow.components.ibm", - "langchain_utilities": "langflow.components.langchain_utilities", - "crewai": "langflow.components.crewai", - "composio": "langflow.components.composio", - "mem0": "langflow.components.mem0", - "datastax": "langflow.components.datastax", - "cleanlab": "langflow.components.cleanlab", - "langwatch": "langflow.components.langwatch", - "icosacomputing": "langflow.components.icosacomputing", - "homeassistant": "langflow.components.homeassistant", - "agentql": "langflow.components.agentql", - "assemblyai": "langflow.components.assemblyai", - "twelvelabs": "langflow.components.twelvelabs", - "docling": "langflow.components.docling", - "unstructured": "langflow.components.unstructured", - "redis": "langflow.components.redis", - "zep": "langflow.components.zep", - "bing": "langflow.components.bing", - "duckduckgo": "langflow.components.duckduckgo", - "serpapi": "langflow.components.serpapi", - "searchapi": "langflow.components.searchapi", - "tavily": "langflow.components.tavily", - "exa": "langflow.components.exa", - "glean": "langflow.components.glean", - "yahoosearch": "langflow.components.yahoosearch", - "apify": "langflow.components.apify", - "arxiv": "langflow.components.arxiv", - "confluence": "langflow.components.confluence", - "firecrawl": "langflow.components.firecrawl", - "git": "langflow.components.git", - "wikipedia": "langflow.components.wikipedia", - "youtube": "langflow.components.youtube", - "scrapegraph": "langflow.components.scrapegraph", - "Notion": "langflow.components.Notion", - "wolframalpha": "langflow.components.wolframalpha", -} - -__all__: list[str] = [ - "Notion", - "agentql", - "agents", - "aiml", - "amazon", - "anthropic", - "apify", - "arxiv", - "assemblyai", - "azure", - "baidu", - "bing", - "cleanlab", - "cloudflare", - "cohere", - "composio", - "confluence", - "crewai", - "custom_component", - "data", - "datastax", - "deepseek", - "docling", - "duckduckgo", - "embeddings", - "exa", - "firecrawl", - "git", - "glean", - "google", - "groq", - "helpers", - "homeassistant", - "huggingface", - "ibm", - "icosacomputing", - "input_output", - "langchain_utilities", - "langwatch", - "lmstudio", - "logic", - "maritalk", - "mem0", - "mistral", - "models", - "needle", - "notdiamond", - "novita", - "nvidia", - "olivya", - "ollama", - "openai", - "openrouter", - "perplexity", - "processing", - "prototypes", - "redis", - "sambanova", - "scrapegraph", - "searchapi", - "serpapi", - "tavily", - "tools", - "twelvelabs", - "unstructured", - "vectorstores", - "vertexai", - "wikipedia", - "wolframalpha", - "xai", - "yahoosearch", - "youtube", - "zep", -] +__all__: list[str] = list(_lfx_all) def __getattr__(attr_name: str) -> Any: - """Lazily import component modules on attribute access. - - Args: - attr_name (str): The attribute/module name to import. + """Forward attribute access to lfx.components.""" + from lfx import components - Returns: - Any: The imported module or attribute. - - Raises: - AttributeError: If the attribute is not a known component or cannot be imported. - """ - if attr_name not in _dynamic_imports: - msg = f"module '{__name__}' has no attribute '{attr_name}'" - raise AttributeError(msg) - try: - # Use import_mod as in LangChain, passing the module name and package - result = import_mod(attr_name, "__module__", __spec__.parent) - except (ModuleNotFoundError, ImportError, AttributeError) as e: - msg = f"Could not import '{attr_name}' from '{__name__}': {e}" - raise AttributeError(msg) from e - globals()[attr_name] = result # Cache for future access - return result + return getattr(components, attr_name) def __dir__() -> list[str]: - """Return list of available attributes for tab-completion and dir().""" + """Forward dir() to lfx.components.""" return list(__all__) - - -# Optional: Consistency check (can be removed in production) -_missing = set(__all__) - set(_dynamic_imports) -if _missing: - msg = f"Missing dynamic import mapping for: {', '.join(_missing)}" - raise ImportError(msg) diff --git a/src/backend/base/langflow/components/agents.py b/src/backend/base/langflow/components/agents.py new file mode 100644 index 000000000000..4459f382c781 --- /dev/null +++ b/src/backend/base/langflow/components/agents.py @@ -0,0 +1,6 @@ +"""Forward langflow.components.agents to lfx.components.agents.""" + +from lfx.components.agents import * # noqa: F403 +from lfx.components.agents import __all__ as _all + +__all__ = list(_all) diff --git a/src/backend/base/langflow/components/agents/__init__.py b/src/backend/base/langflow/components/agents/__init__.py deleted file mode 100644 index 33a483db3228..000000000000 --- a/src/backend/base/langflow/components/agents/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .agent import AgentComponent -from .mcp_component import MCPToolsComponent - -__all__ = ["AgentComponent", "MCPToolsComponent"] diff --git a/src/backend/base/langflow/components/anthropic.py b/src/backend/base/langflow/components/anthropic.py new file mode 100644 index 000000000000..00e262faf5c0 --- /dev/null +++ b/src/backend/base/langflow/components/anthropic.py @@ -0,0 +1,6 @@ +"""Forward langflow.components.anthropic to lfx.components.anthropic.""" + +from lfx.components.anthropic import * # noqa: F403 +from lfx.components.anthropic import __all__ as _all + +__all__ = list(_all) diff --git a/src/backend/base/langflow/components/composio/slack_composio.py b/src/backend/base/langflow/components/composio/slack_composio.py deleted file mode 100644 index db7b6162c409..000000000000 --- a/src/backend/base/langflow/components/composio/slack_composio.py +++ /dev/null @@ -1,11 +0,0 @@ -from langflow.base.composio.composio_base import ComposioBaseComponent - - -class ComposioSlackAPIComponent(ComposioBaseComponent): - display_name: str = "Slack" - icon = "Slack" - documentation: str = "https://docs.composio.dev" - app_name = "slack" - - def set_default_tools(self): - """Set the default tools for Slack component.""" diff --git a/src/backend/base/langflow/components/data.py b/src/backend/base/langflow/components/data.py new file mode 100644 index 000000000000..be1a765c2867 --- /dev/null +++ b/src/backend/base/langflow/components/data.py @@ -0,0 +1,6 @@ +"""Forward langflow.components.data to lfx.components.data.""" + +from lfx.components.data import * # noqa: F403 +from lfx.components.data import __all__ as _all + +__all__ = list(_all) diff --git a/src/backend/base/langflow/components/data/__init__.py b/src/backend/base/langflow/components/data/__init__.py deleted file mode 100644 index 6e90f042685e..000000000000 --- a/src/backend/base/langflow/components/data/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .api_request import APIRequestComponent -from .csv_to_data import CSVToDataComponent -from .directory import DirectoryComponent -from .file import FileComponent -from .json_to_data import JSONToDataComponent -from .news_search import NewsSearchComponent -from .rss import RSSReaderComponent -from .sql_executor import SQLComponent -from .url import URLComponent -from .web_search import WebSearchComponent -from .webhook import WebhookComponent - -__all__ = [ - "APIRequestComponent", - "CSVToDataComponent", - "DirectoryComponent", - "FileComponent", - "JSONToDataComponent", - "NewsSearchComponent", - "RSSReaderComponent", - "SQLComponent", - "URLComponent", - "WebSearchComponent", - "WebhookComponent", -] diff --git a/src/backend/base/langflow/components/helpers.py b/src/backend/base/langflow/components/helpers.py new file mode 100644 index 000000000000..47b9962b3b67 --- /dev/null +++ b/src/backend/base/langflow/components/helpers.py @@ -0,0 +1,6 @@ +"""Forward langflow.components.helpers to lfx.components.helpers.""" + +from lfx.components.helpers import * # noqa: F403 +from lfx.components.helpers import __all__ as _all + +__all__ = list(_all) diff --git a/src/backend/base/langflow/components/knowledge_bases/ingestion.py b/src/backend/base/langflow/components/knowledge_bases/ingestion.py index 7a2078a8e231..d3c082411b6d 100644 --- a/src/backend/base/langflow/components/knowledge_bases/ingestion.py +++ b/src/backend/base/langflow/components/knowledge_bases/ingestion.py @@ -9,28 +9,27 @@ from dataclasses import asdict, dataclass, field from datetime import datetime, timezone from pathlib import Path -from typing import TYPE_CHECKING, Any +from typing import Any import pandas as pd from cryptography.fernet import InvalidToken from langchain_chroma import Chroma -from loguru import logger - -from langflow.base.knowledge_bases.knowledge_base_utils import get_knowledge_bases -from langflow.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES -from langflow.components.processing.converter import convert_to_dataframe -from langflow.custom import Component -from langflow.io import BoolInput, DropdownInput, HandleInput, IntInput, Output, SecretStrInput, StrInput, TableInput -from langflow.schema.data import Data -from langflow.schema.dotdict import dotdict # noqa: TC001 -from langflow.schema.table import EditMode +from lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES +from lfx.components.processing.converter import convert_to_dataframe +from lfx.custom import Component +from lfx.inputs.inputs import HandleInput +from lfx.io import BoolInput, DropdownInput, IntInput, Output, SecretStrInput, StrInput, TableInput +from lfx.log.logger import logger +from lfx.schema.data import Data +from lfx.schema.dataframe import DataFrame # noqa: TC002 +from lfx.schema.dotdict import dotdict # noqa: TC002 +from lfx.schema.table import EditMode + +from langflow.base.knowledge_bases import get_knowledge_bases from langflow.services.auth.utils import decrypt_api_key, encrypt_api_key from langflow.services.database.models.user.crud import get_user_by_id from langflow.services.deps import get_settings_service, get_variable_service, session_scope -if TYPE_CHECKING: - from langflow.schema.dataframe import DataFrame - HUGGINGFACE_MODEL_NAMES = ["sentence-transformers/all-MiniLM-L6-v2", "sentence-transformers/all-mpnet-base-v2"] COHERE_MODEL_NAMES = ["embed-english-v3.0", "embed-multilingual-v3.0"] diff --git a/src/backend/base/langflow/components/knowledge_bases/retrieval.py b/src/backend/base/langflow/components/knowledge_bases/retrieval.py index 6c5fa387393a..f3df20855a7c 100644 --- a/src/backend/base/langflow/components/knowledge_bases/retrieval.py +++ b/src/backend/base/langflow/components/knowledge_bases/retrieval.py @@ -4,17 +4,18 @@ from cryptography.fernet import InvalidToken from langchain_chroma import Chroma -from loguru import logger +from lfx.custom import Component +from lfx.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput +from lfx.log.logger import logger +from lfx.schema.data import Data +from lfx.schema.dataframe import DataFrame +from lfx.services.deps import get_settings_service from pydantic import SecretStr -from langflow.base.knowledge_bases.knowledge_base_utils import get_knowledge_bases -from langflow.custom import Component -from langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput -from langflow.schema.data import Data -from langflow.schema.dataframe import DataFrame +from langflow.base.knowledge_bases import get_knowledge_bases from langflow.services.auth.utils import decrypt_api_key from langflow.services.database.models.user.crud import get_user_by_id -from langflow.services.deps import get_settings_service, session_scope +from langflow.services.deps import session_scope settings = get_settings_service().settings knowledge_directory = settings.knowledge_bases_dir diff --git a/src/backend/base/langflow/components/openai.py b/src/backend/base/langflow/components/openai.py new file mode 100644 index 000000000000..d5f9fa7a866d --- /dev/null +++ b/src/backend/base/langflow/components/openai.py @@ -0,0 +1,6 @@ +"""Forward langflow.components.openai to lfx.components.openai.""" + +from lfx.components.openai import * # noqa: F403 +from lfx.components.openai import __all__ as _all + +__all__ = list(_all) diff --git a/src/backend/base/langflow/components/processing/__init__.py b/src/backend/base/langflow/components/processing/__init__.py index 40cd21fb9bb1..ab8dc841845c 100644 --- a/src/backend/base/langflow/components/processing/__init__.py +++ b/src/backend/base/langflow/components/processing/__init__.py @@ -1,117 +1 @@ -"""Processing components for LangFlow.""" - -from __future__ import annotations - -from typing import TYPE_CHECKING, Any - -from langflow.components._importing import import_mod - -if TYPE_CHECKING: - from langflow.components.processing.alter_metadata import AlterMetadataComponent - from langflow.components.processing.batch_run import BatchRunComponent - from langflow.components.processing.combine_text import CombineTextComponent - from langflow.components.processing.converter import TypeConverterComponent - from langflow.components.processing.create_data import CreateDataComponent - from langflow.components.processing.data_operations import DataOperationsComponent - from langflow.components.processing.data_to_dataframe import DataToDataFrameComponent - from langflow.components.processing.dataframe_operations import DataFrameOperationsComponent - from langflow.components.processing.extract_key import ExtractDataKeyComponent - from langflow.components.processing.filter_data import FilterDataComponent - from langflow.components.processing.filter_data_values import DataFilterComponent - from langflow.components.processing.json_cleaner import JSONCleaner - from langflow.components.processing.lambda_filter import LambdaFilterComponent - from langflow.components.processing.llm_router import LLMRouterComponent - from langflow.components.processing.merge_data import MergeDataComponent - from langflow.components.processing.message_to_data import MessageToDataComponent - from langflow.components.processing.parse_data import ParseDataComponent - from langflow.components.processing.parse_dataframe import ParseDataFrameComponent - from langflow.components.processing.parse_json_data import ParseJSONDataComponent - from langflow.components.processing.parser import ParserComponent - from langflow.components.processing.prompt import PromptComponent - from langflow.components.processing.python_repl_core import PythonREPLComponent - from langflow.components.processing.regex import RegexExtractorComponent - from langflow.components.processing.save_file import SaveToFileComponent - from langflow.components.processing.select_data import SelectDataComponent - from langflow.components.processing.split_text import SplitTextComponent - from langflow.components.processing.structured_output import StructuredOutputComponent - from langflow.components.processing.update_data import UpdateDataComponent - -_dynamic_imports = { - "AlterMetadataComponent": "alter_metadata", - "BatchRunComponent": "batch_run", - "CombineTextComponent": "combine_text", - "TypeConverterComponent": "converter", - "CreateDataComponent": "create_data", - "DataOperationsComponent": "data_operations", - "DataToDataFrameComponent": "data_to_dataframe", - "DataFrameOperationsComponent": "dataframe_operations", - "ExtractDataKeyComponent": "extract_key", - "FilterDataComponent": "filter_data", - "DataFilterComponent": "filter_data_values", - "JSONCleaner": "json_cleaner", - "LambdaFilterComponent": "lambda_filter", - "LLMRouterComponent": "llm_router", - "MergeDataComponent": "merge_data", - "MessageToDataComponent": "message_to_data", - "ParseDataComponent": "parse_data", - "ParseDataFrameComponent": "parse_dataframe", - "ParseJSONDataComponent": "parse_json_data", - "ParserComponent": "parser", - "PromptComponent": "prompt", - "PythonREPLComponent": "python_repl_core", - "RegexExtractorComponent": "regex", - "SaveToFileComponent": "save_file", - "SelectDataComponent": "select_data", - "SplitTextComponent": "split_text", - "StructuredOutputComponent": "structured_output", - "UpdateDataComponent": "update_data", -} - -__all__ = [ - "AlterMetadataComponent", - "BatchRunComponent", - "CombineTextComponent", - "CreateDataComponent", - "DataFilterComponent", - "DataFrameOperationsComponent", - "DataOperationsComponent", - "DataToDataFrameComponent", - "ExtractDataKeyComponent", - "FilterDataComponent", - "JSONCleaner", - "LLMRouterComponent", - "LambdaFilterComponent", - "MergeDataComponent", - "MessageToDataComponent", - "ParseDataComponent", - "ParseDataFrameComponent", - "ParseJSONDataComponent", - "ParserComponent", - "PromptComponent", - "PythonREPLComponent", - "RegexExtractorComponent", - "SaveToFileComponent", - "SelectDataComponent", - "SplitTextComponent", - "StructuredOutputComponent", - "TypeConverterComponent", - "UpdateDataComponent", -] - - -def __getattr__(attr_name: str) -> Any: - """Lazily import processing components on attribute access.""" - if attr_name not in _dynamic_imports: - msg = f"module '{__name__}' has no attribute '{attr_name}'" - raise AttributeError(msg) - try: - result = import_mod(attr_name, _dynamic_imports[attr_name], __spec__.parent) - except (ModuleNotFoundError, ImportError, AttributeError) as e: - msg = f"Could not import '{attr_name}' from '{__name__}': {e}" - raise AttributeError(msg) from e - globals()[attr_name] = result - return result - - -def __dir__() -> list[str]: - return list(__all__) +# Processing components diff --git a/src/backend/base/langflow/components/processing/converter.py b/src/backend/base/langflow/components/processing/converter.py index 023facfb771d..880b96d8631e 100644 --- a/src/backend/base/langflow/components/processing/converter.py +++ b/src/backend/base/langflow/components/processing/converter.py @@ -1,150 +1,4 @@ -from typing import Any +# Forward import for converter utilities +from lfx.components.processing.converter import convert_to_dataframe -from langflow.custom import Component -from langflow.io import HandleInput, Output, TabInput -from langflow.schema import Data, DataFrame, Message - - -def convert_to_message(v) -> Message: - """Convert input to Message type. - - Args: - v: Input to convert (Message, Data, DataFrame, or dict) - - Returns: - Message: Converted Message object - """ - return v if isinstance(v, Message) else v.to_message() - - -def convert_to_data(v: DataFrame | Data | Message | dict) -> Data: - """Convert input to Data type. - - Args: - v: Input to convert (Message, Data, DataFrame, or dict) - - Returns: - Data: Converted Data object - """ - if isinstance(v, dict): - return Data(v) - if isinstance(v, Message): - return v.to_data() - return v if isinstance(v, Data) else v.to_data() - - -def convert_to_dataframe(v: DataFrame | Data | Message | dict) -> DataFrame: - """Convert input to DataFrame type. - - Args: - v: Input to convert (Message, Data, DataFrame, or dict) - - Returns: - DataFrame: Converted DataFrame object - """ - if isinstance(v, dict): - return DataFrame([v]) - return v if isinstance(v, DataFrame) else v.to_dataframe() - - -class TypeConverterComponent(Component): - display_name = "Type Convert" - description = "Convert between different types (Message, Data, DataFrame)" - documentation: str = "https://docs.langflow.org/components-processing#type-convert" - icon = "repeat" - - inputs = [ - HandleInput( - name="input_data", - display_name="Input", - input_types=["Message", "Data", "DataFrame"], - info="Accept Message, Data or DataFrame as input", - required=True, - ), - TabInput( - name="output_type", - display_name="Output Type", - options=["Message", "Data", "DataFrame"], - info="Select the desired output data type", - real_time_refresh=True, - value="Message", - ), - ] - - outputs = [ - Output( - display_name="Message Output", - name="message_output", - method="convert_to_message", - ) - ] - - def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict: - """Dynamically show only the relevant output based on the selected output type.""" - if field_name == "output_type": - # Start with empty outputs - frontend_node["outputs"] = [] - - # Add only the selected output type - if field_value == "Message": - frontend_node["outputs"].append( - Output( - display_name="Message Output", - name="message_output", - method="convert_to_message", - ).to_dict() - ) - elif field_value == "Data": - frontend_node["outputs"].append( - Output( - display_name="Data Output", - name="data_output", - method="convert_to_data", - ).to_dict() - ) - elif field_value == "DataFrame": - frontend_node["outputs"].append( - Output( - display_name="DataFrame Output", - name="dataframe_output", - method="convert_to_dataframe", - ).to_dict() - ) - - return frontend_node - - def convert_to_message(self) -> Message: - """Convert input to Message type.""" - input_value = self.input_data[0] if isinstance(self.input_data, list) else self.input_data - - # Handle string input by converting to Message first - if isinstance(input_value, str): - input_value = Message(text=input_value) - - result = convert_to_message(input_value) - self.status = result - return result - - def convert_to_data(self) -> Data: - """Convert input to Data type.""" - input_value = self.input_data[0] if isinstance(self.input_data, list) else self.input_data - - # Handle string input by converting to Message first - if isinstance(input_value, str): - input_value = Message(text=input_value) - - result = convert_to_data(input_value) - self.status = result - return result - - def convert_to_dataframe(self) -> DataFrame: - """Convert input to DataFrame type.""" - input_value = self.input_data[0] if isinstance(self.input_data, list) else self.input_data - - # Handle string input by converting to Message first - if isinstance(input_value, str): - input_value = Message(text=input_value) - - result = convert_to_dataframe(input_value) - self.status = result - return result +__all__ = ["convert_to_dataframe"] diff --git a/src/backend/base/langflow/components/serper/__init__.py b/src/backend/base/langflow/components/serper/__init__.py deleted file mode 100644 index d7779b8d7934..000000000000 --- a/src/backend/base/langflow/components/serper/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .google_serper_api_core import GoogleSerperAPICore - -__all__ = ["GoogleSerperAPICore"] diff --git a/src/backend/base/langflow/components/serper/google_serper_api_core.py b/src/backend/base/langflow/components/serper/google_serper_api_core.py deleted file mode 100644 index cf86fd069204..000000000000 --- a/src/backend/base/langflow/components/serper/google_serper_api_core.py +++ /dev/null @@ -1,74 +0,0 @@ -from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper - -from langflow.custom.custom_component.component import Component -from langflow.io import IntInput, MultilineInput, Output, SecretStrInput -from langflow.schema.dataframe import DataFrame -from langflow.schema.message import Message - - -class GoogleSerperAPICore(Component): - display_name = "Serper Google Search API" - description = "Calls the Serper.dev Google Search API and fetches the results." - icon = "Serper" - - inputs = [ - SecretStrInput( - name="serper_api_key", - display_name="Serper API Key", - required=True, - ), - MultilineInput( - name="input_value", - display_name="Input", - tool_mode=True, - ), - IntInput( - name="k", - display_name="Number of results", - value=4, - required=True, - ), - ] - - outputs = [ - Output( - display_name="Results", - name="results", - type_=DataFrame, - method="search_serper", - ), - ] - - def search_serper(self) -> DataFrame: - try: - wrapper = self._build_wrapper() - results = wrapper.results(query=self.input_value) - list_results = results.get("organic", []) - - # Convert results to DataFrame using list comprehension - df_data = [ - { - "title": result.get("title", ""), - "link": result.get("link", ""), - "snippet": result.get("snippet", ""), - } - for result in list_results - ] - - return DataFrame(df_data) - except (ValueError, KeyError, ConnectionError) as e: - error_message = f"Error occurred while searching: {e!s}" - self.status = error_message - # Return DataFrame with error as a list of dictionaries - return DataFrame([{"error": error_message}]) - - def text_search_serper(self) -> Message: - search_results = self.search_serper() - text_result = search_results.to_string(index=False) if not search_results.empty else "No results found." - return Message(text=text_result) - - def _build_wrapper(self): - return GoogleSerperAPIWrapper(serper_api_key=self.serper_api_key, k=self.k) - - def build(self): - return self.search_serper diff --git a/src/backend/base/langflow/custom/__init__.py b/src/backend/base/langflow/custom/__init__.py index 55a2b1973425..8923f43bb651 100644 --- a/src/backend/base/langflow/custom/__init__.py +++ b/src/backend/base/langflow/custom/__init__.py @@ -1,4 +1,27 @@ -from langflow.custom.custom_component.component import Component -from langflow.custom.custom_component.custom_component import CustomComponent +from lfx import custom as custom +from lfx.custom import custom_component as custom_component +from lfx.custom import utils as utils +from lfx.custom.custom_component.component import Component, get_component_toolkit +from lfx.custom.custom_component.custom_component import CustomComponent -__all__ = ["Component", "CustomComponent"] +# Import commonly used functions +from lfx.custom.utils import build_custom_component_template +from lfx.custom.validate import create_class, create_function, extract_class_name, extract_function_name + +# Import the validate module +from . import validate + +__all__ = [ + "Component", + "CustomComponent", + "build_custom_component_template", + "create_class", + "create_function", + "custom", + "custom_component", + "extract_class_name", + "extract_function_name", + "get_component_toolkit", + "utils", + "validate", +] diff --git a/src/backend/base/langflow/custom/custom_component/__init__.py b/src/backend/base/langflow/custom/custom_component/__init__.py index e69de29bb2d1..d64b694ebc67 100644 --- a/src/backend/base/langflow/custom/custom_component/__init__.py +++ b/src/backend/base/langflow/custom/custom_component/__init__.py @@ -0,0 +1,4 @@ +from lfx.custom.custom_component import component, custom_component +from lfx.custom.custom_component.component import Component + +__all__ = ["Component", "component", "custom_component"] diff --git a/src/backend/base/langflow/custom/custom_component/component.py b/src/backend/base/langflow/custom/custom_component/component.py index ecc96f769004..b1ce64e42346 100644 --- a/src/backend/base/langflow/custom/custom_component/component.py +++ b/src/backend/base/langflow/custom/custom_component/component.py @@ -1,1742 +1,24 @@ -from __future__ import annotations +"""Component module for langflow - imports from lfx. -import ast -import asyncio -import inspect -import json -from collections.abc import AsyncIterator, Iterator -from copy import deepcopy -from textwrap import dedent -from typing import TYPE_CHECKING, Any, ClassVar, NamedTuple, get_type_hints -from uuid import UUID +This maintains backward compatibility while using the lfx implementation. +""" -import nanoid -import pandas as pd -import yaml -from langchain_core.tools import StructuredTool -from pydantic import BaseModel, ValidationError - -from langflow.base.tools.constants import ( - TOOL_OUTPUT_DISPLAY_NAME, - TOOL_OUTPUT_NAME, - TOOLS_METADATA_INFO, - TOOLS_METADATA_INPUT_NAME, +from lfx.custom.custom_component.component import ( + BACKWARDS_COMPATIBLE_ATTRIBUTES, + CONFIG_ATTRIBUTES, + Component, + PlaceholderGraph, + get_component_toolkit, ) -from langflow.custom.tree_visitor import RequiredInputsVisitor -from langflow.exceptions.component import StreamingError -from langflow.field_typing import Tool # noqa: TC001 Needed by _add_toolkit_output - -# Lazy import to avoid circular dependency -# from langflow.graph.state.model import create_state_model -# Lazy import to avoid circular dependency -# from langflow.graph.utils import has_chat_output -from langflow.helpers.custom import format_type -from langflow.memory import astore_message, aupdate_messages, delete_message -from langflow.schema.artifact import get_artifact_type, post_process_raw -from langflow.schema.data import Data -from langflow.schema.message import ErrorMessage, Message -from langflow.schema.properties import Source -from langflow.services.tracing.schema import Log -from langflow.template.field.base import UNDEFINED, Input, Output -from langflow.template.frontend_node.custom_components import ComponentFrontendNode -from langflow.utils.async_helpers import run_until_complete -from langflow.utils.util import find_closest_match - -from .custom_component import CustomComponent - -if TYPE_CHECKING: - from collections.abc import Callable - - from langflow.base.tools.component_tool import ComponentToolkit - from langflow.events.event_manager import EventManager - from langflow.graph.edge.schema import EdgeData - from langflow.graph.vertex.base import Vertex - from langflow.inputs.inputs import InputTypes - from langflow.schema.dataframe import DataFrame - from langflow.schema.log import LoggableType - - -_ComponentToolkit = None - - -def _get_component_toolkit(): - global _ComponentToolkit # noqa: PLW0603 - if _ComponentToolkit is None: - from langflow.base.tools.component_tool import ComponentToolkit - - _ComponentToolkit = ComponentToolkit - return _ComponentToolkit - - -BACKWARDS_COMPATIBLE_ATTRIBUTES = ["user_id", "vertex", "tracing_service"] -CONFIG_ATTRIBUTES = ["_display_name", "_description", "_icon", "_name", "_metadata"] - - -class PlaceholderGraph(NamedTuple): - """A placeholder graph structure for components, providing backwards compatibility. - - and enabling component execution without a full graph object. - - This lightweight structure contains essential information typically found in a complete graph, - allowing components to function in isolation or in simplified contexts. - - Attributes: - flow_id (str | None): Unique identifier for the flow, if applicable. - user_id (str | None): Identifier of the user associated with the flow, if any. - session_id (str | None): Identifier for the current session, if applicable. - context (dict): Additional contextual information for the component's execution. - flow_name (str | None): Name of the flow, if available. - """ - - flow_id: str | None - user_id: str | None - session_id: str | None - context: dict - flow_name: str | None - - -class Component(CustomComponent): - inputs: list[InputTypes] = [] - outputs: list[Output] = [] - selected_output: str | None = None - code_class_base_inheritance: ClassVar[str] = "Component" - - def __init__(self, **kwargs) -> None: - # Initialize instance-specific attributes first - if overlap := self._there_is_overlap_in_inputs_and_outputs(): - msg = f"Inputs and outputs have overlapping names: {overlap}" - raise ValueError(msg) - self._output_logs: dict[str, list[Log]] = {} - self._current_output: str = "" - self._metadata: dict = {} - self._ctx: dict = {} - self._code: str | None = None - self._logs: list[Log] = [] - - # Initialize component-specific collections - self._inputs: dict[str, InputTypes] = {} - self._outputs_map: dict[str, Output] = {} - self._results: dict[str, Any] = {} - self._attributes: dict[str, Any] = {} - self._edges: list[EdgeData] = [] - self._components: list[Component] = [] - self._event_manager: EventManager | None = None - self._state_model = None - - # Process input kwargs - inputs = {} - config = {} - for key, value in kwargs.items(): - if key.startswith("_"): - config[key] = value - elif key in CONFIG_ATTRIBUTES: - config[key[1:]] = value - else: - inputs[key] = value - - self._parameters = inputs or {} - self.set_attributes(self._parameters) - - # Store original inputs and config for reference - self.__inputs = inputs - self.__config = config or {} - - # Add unique ID if not provided - if "_id" not in self.__config: - self.__config |= {"_id": f"{self.__class__.__name__}-{nanoid.generate(size=5)}"} - - # Initialize base class - super().__init__(**self.__config) - - # Post-initialization setup - if hasattr(self, "_trace_type"): - self.trace_type = self._trace_type - if not hasattr(self, "trace_type"): - self.trace_type = "chain" - - # Setup inputs and outputs - self._reset_all_output_values() - if self.inputs is not None: - self.map_inputs(self.inputs) - self.map_outputs() - - # Final setup - self._set_output_types(list(self._outputs_map.values())) - self.set_class_code() - - def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source: - source_dict = {} - if id_: - source_dict["id"] = id_ - if display_name: - source_dict["display_name"] = display_name - if source: - # Handle case where source is a ChatOpenAI and other models objects - if hasattr(source, "model_name"): - source_dict["source"] = source.model_name - elif hasattr(source, "model"): - source_dict["source"] = str(source.model) - else: - source_dict["source"] = str(source) - return Source(**source_dict) - - def get_incoming_edge_by_target_param(self, target_param: str) -> str | None: - """Get the source vertex ID for an incoming edge that targets a specific parameter. - - This method delegates to the underlying vertex to find an incoming edge that connects - to the specified target parameter. - - Args: - target_param (str): The name of the target parameter to find an incoming edge for - - Returns: - str | None: The ID of the source vertex if an incoming edge is found, None otherwise - """ - if self._vertex is None: - msg = "Vertex not found. Please build the graph first." - raise ValueError(msg) - return self._vertex.get_incoming_edge_by_target_param(target_param) - - @property - def enabled_tools(self) -> list[str] | None: - """Dynamically determine which tools should be enabled. - - This property can be overridden by subclasses to provide custom tool filtering. - By default, it returns None, which means all tools are enabled. - - Returns: - list[str] | None: List of tool names or tags to enable, or None to enable all tools. - """ - # Default implementation returns None (all tools enabled) - # Subclasses can override this to provide custom filtering - return None - - def _there_is_overlap_in_inputs_and_outputs(self) -> set[str]: - """Check the `.name` of inputs and outputs to see if there is overlap. - - Returns: - set[str]: Set of names that overlap between inputs and outputs. - """ - # Create sets of input and output names for O(1) lookup - input_names = {input_.name for input_ in self.inputs if input_.name is not None} - output_names = {output.name for output in self.outputs} - - # Return the intersection of the sets - return input_names & output_names - - def get_base_args(self): - """Get the base arguments required for component initialization. - - Returns: - dict: A dictionary containing the base arguments: - - _user_id: The ID of the current user - - _session_id: The ID of the current session - - _tracing_service: The tracing service instance for logging/monitoring - """ - return { - "_user_id": self.user_id, - "_session_id": self.graph.session_id, - "_tracing_service": self._tracing_service, - } - - @property - def ctx(self): - if not hasattr(self, "graph") or self.graph is None: - msg = "Graph not found. Please build the graph first." - raise ValueError(msg) - return self.graph.context - - def add_to_ctx(self, key: str, value: Any, *, overwrite: bool = False) -> None: - """Add a key-value pair to the context. - - Args: - key (str): The key to add. - value (Any): The value to associate with the key. - overwrite (bool, optional): Whether to overwrite the existing value. Defaults to False. - - Raises: - ValueError: If the graph is not built. - """ - if not hasattr(self, "graph") or self.graph is None: - msg = "Graph not found. Please build the graph first." - raise ValueError(msg) - if key in self.graph.context and not overwrite: - msg = f"Key {key} already exists in context. Set overwrite=True to overwrite." - raise ValueError(msg) - self.graph.context.update({key: value}) - - def update_ctx(self, value_dict: dict[str, Any]) -> None: - """Update the context with a dictionary of values. - - Args: - value_dict (dict[str, Any]): The dictionary of values to update. - - Raises: - ValueError: If the graph is not built. - """ - if not hasattr(self, "graph") or self.graph is None: - msg = "Graph not found. Please build the graph first." - raise ValueError(msg) - if not isinstance(value_dict, dict): - msg = "Value dict must be a dictionary" - raise TypeError(msg) - - self.graph.context.update(value_dict) - - def _pre_run_setup(self): - pass - - def set_event_manager(self, event_manager: EventManager | None = None) -> None: - self._event_manager = event_manager - - def _reset_all_output_values(self) -> None: - if isinstance(self._outputs_map, dict): - for output in self._outputs_map.values(): - output.value = UNDEFINED - - def _build_state_model(self): - if self._state_model: - return self._state_model - name = self.name or self.__class__.__name__ - model_name = f"{name}StateModel" - fields = {} - for output in self._outputs_map.values(): - fields[output.name] = getattr(self, output.method) - # Lazy import to avoid circular dependency - from langflow.graph.state.model import create_state_model - - self._state_model = create_state_model(model_name=model_name, **fields) - return self._state_model - - def get_state_model_instance_getter(self): - state_model = self._build_state_model() - - def _instance_getter(_): - return state_model() - - _instance_getter.__annotations__["return"] = state_model - return _instance_getter - - def __deepcopy__(self, memo: dict) -> Component: - if id(self) in memo: - return memo[id(self)] - kwargs = deepcopy(self.__config, memo) - kwargs["inputs"] = deepcopy(self.__inputs, memo) - new_component = type(self)(**kwargs) - new_component._code = self._code - new_component._outputs_map = self._outputs_map - new_component._inputs = self._inputs - new_component._edges = self._edges - new_component._components = self._components - new_component._parameters = self._parameters - new_component._attributes = self._attributes - new_component._output_logs = self._output_logs - new_component._logs = self._logs # type: ignore[attr-defined] - memo[id(self)] = new_component - return new_component - - def set_class_code(self) -> None: - # Get the source code of the calling class - if self._code: - return - try: - module = inspect.getmodule(self.__class__) - if module is None: - msg = "Could not find module for class" - raise ValueError(msg) - - class_code = inspect.getsource(module) - self._code = class_code - except (OSError, TypeError) as e: - msg = f"Could not find source code for {self.__class__.__name__}" - raise ValueError(msg) from e - - def set(self, **kwargs): - """Connects the component to other components or sets parameters and attributes. - - Args: - **kwargs: Keyword arguments representing the connections, parameters, and attributes. - - Returns: - None - - Raises: - KeyError: If the specified input name does not exist. - """ - for key, value in kwargs.items(): - self._process_connection_or_parameters(key, value) - return self - - def list_inputs(self): - """Returns a list of input names.""" - return [_input.name for _input in self.inputs] - - def list_outputs(self): - """Returns a list of output names.""" - return [_output.name for _output in self._outputs_map.values()] - - async def run(self): - """Executes the component's logic and returns the result. - - Returns: - The result of executing the component's logic. - """ - return await self._run() - - def set_vertex(self, vertex: Vertex) -> None: - """Sets the vertex for the component. - - Args: - vertex (Vertex): The vertex to set. - - Returns: - None - """ - self._vertex = vertex - - def get_input(self, name: str) -> Any: - """Retrieves the value of the input with the specified name. - - Args: - name (str): The name of the input. - - Returns: - Any: The value of the input. - - Raises: - ValueError: If the input with the specified name is not found. - """ - if name in self._inputs: - return self._inputs[name] - msg = f"Input {name} not found in {self.__class__.__name__}" - raise ValueError(msg) - - def get_output(self, name: str) -> Any: - """Retrieves the output with the specified name. - - Args: - name (str): The name of the output to retrieve. - - Returns: - Any: The output value. - - Raises: - ValueError: If the output with the specified name is not found. - """ - if name in self._outputs_map: - return self._outputs_map[name] - msg = f"Output {name} not found in {self.__class__.__name__}" - raise ValueError(msg) - - def set_on_output(self, name: str, **kwargs) -> None: - output = self.get_output(name) - for key, value in kwargs.items(): - if not hasattr(output, key): - msg = f"Output {name} does not have a method {key}" - raise ValueError(msg) - setattr(output, key, value) - - def set_output_value(self, name: str, value: Any) -> None: - if name in self._outputs_map: - self._outputs_map[name].value = value - else: - msg = f"Output {name} not found in {self.__class__.__name__}" - raise ValueError(msg) - - def map_outputs(self) -> None: - """Maps the given list of outputs to the component. - - Args: - outputs (List[Output]): The list of outputs to be mapped. - - Raises: - ValueError: If the output name is None. - - Returns: - None - """ - # override outputs (generated from the class code) with vertex outputs - # if they exist (generated from the frontend) - outputs = [] - if self._vertex and self._vertex.outputs: - for output in self._vertex.outputs: - try: - output_ = Output(**output) - outputs.append(output_) - except ValidationError as e: - msg = f"Invalid output: {e}" - raise ValueError(msg) from e - else: - outputs = self.outputs - for output in outputs: - if output.name is None: - msg = "Output name cannot be None." - raise ValueError(msg) - # Deepcopy is required to avoid modifying the original component; - # allows each instance of each component to modify its own output - self._outputs_map[output.name] = deepcopy(output) - - def map_inputs(self, inputs: list[InputTypes]) -> None: - """Maps the given inputs to the component. - - Args: - inputs (List[InputTypes]): A list of InputTypes objects representing the inputs. - - Raises: - ValueError: If the input name is None. - - """ - for input_ in inputs: - if input_.name is None: - msg = self.build_component_error_message("Input name cannot be None") - raise ValueError(msg) - try: - self._inputs[input_.name] = deepcopy(input_) - except TypeError: - self._inputs[input_.name] = input_ - - def validate(self, params: dict) -> None: - """Validates the component parameters. - - Args: - params (dict): A dictionary containing the component parameters. - - Raises: - ValueError: If the inputs are not valid. - ValueError: If the outputs are not valid. - """ - self._validate_inputs(params) - self._validate_outputs() - - async def run_and_validate_update_outputs(self, frontend_node: dict, field_name: str, field_value: Any): - frontend_node = self.update_outputs(frontend_node, field_name, field_value) - if field_name == "tool_mode" or frontend_node.get("tool_mode"): - is_tool_mode = field_value or frontend_node.get("tool_mode") - frontend_node["outputs"] = [self._build_tool_output()] if is_tool_mode else frontend_node["outputs"] - if is_tool_mode: - frontend_node.setdefault("template", {}) - frontend_node["tool_mode"] = True - tools_metadata_input = await self._build_tools_metadata_input() - frontend_node["template"][TOOLS_METADATA_INPUT_NAME] = tools_metadata_input.to_dict() - self._append_tool_to_outputs_map() - elif "template" in frontend_node: - frontend_node["template"].pop(TOOLS_METADATA_INPUT_NAME, None) - self.tools_metadata = frontend_node.get("template", {}).get(TOOLS_METADATA_INPUT_NAME, {}).get("value") - return self._validate_frontend_node(frontend_node) - - def _validate_frontend_node(self, frontend_node: dict): - # Check if all outputs are either Output or a valid Output model - for index, output in enumerate(frontend_node["outputs"]): - if isinstance(output, dict): - try: - output_ = Output(**output) - self._set_output_return_type(output_) - output_dict = output_.model_dump() - except ValidationError as e: - msg = f"Invalid output: {e}" - raise ValueError(msg) from e - elif isinstance(output, Output): - # we need to serialize it - self._set_output_return_type(output) - output_dict = output.model_dump() - else: - msg = f"Invalid output type: {type(output)}" - raise TypeError(msg) - frontend_node["outputs"][index] = output_dict - return frontend_node - - def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict: # noqa: ARG002 - """Default implementation for updating outputs based on field changes. - - Subclasses can override this to modify outputs based on field_name and field_value. - """ - return frontend_node - - def _set_output_types(self, outputs: list[Output]) -> None: - for output in outputs: - self._set_output_return_type(output) - - def _set_output_return_type(self, output: Output) -> None: - if output.method is None: - msg = f"Output {output.name} does not have a method" - raise ValueError(msg) - return_types = self._get_method_return_type(output.method) - output.add_types(return_types) - - def _set_output_required_inputs(self) -> None: - for output in self.outputs: - if not output.method: - continue - method = getattr(self, output.method, None) - if not method or not callable(method): - continue - try: - source_code = inspect.getsource(method) - ast_tree = ast.parse(dedent(source_code)) - except Exception: # noqa: BLE001 - ast_tree = ast.parse(dedent(self._code or "")) - - visitor = RequiredInputsVisitor(self._inputs) - visitor.visit(ast_tree) - output.required_inputs = sorted(visitor.required_inputs) - - def get_output_by_method(self, method: Callable): - # method is a callable and output.method is a string - # we need to find the output that has the same method - output = next((output for output in self._outputs_map.values() if output.method == method.__name__), None) - if output is None: - method_name = method.__name__ if hasattr(method, "__name__") else str(method) - msg = f"Output with method {method_name} not found" - raise ValueError(msg) - return output - - def _inherits_from_component(self, method: Callable): - # check if the method is a method from a class that inherits from Component - # and that it is an output of that class - return hasattr(method, "__self__") and isinstance(method.__self__, Component) - - def _method_is_valid_output(self, method: Callable): - # check if the method is a method from a class that inherits from Component - # and that it is an output of that class - return ( - hasattr(method, "__self__") - and isinstance(method.__self__, Component) - and method.__self__.get_output_by_method(method) - ) - - def _build_error_string_from_matching_pairs(self, matching_pairs: list[tuple[Output, Input]]): - text = "" - for output, input_ in matching_pairs: - text += f"{output.name}[{','.join(output.types)}]->{input_.name}[{','.join(input_.input_types or [])}]\n" - return text - - def _find_matching_output_method(self, input_name: str, value: Component): - """Find the output method from the given component and input name. - - Find the output method from the given component (`value`) that matches the specified input (`input_name`) - in the current component. - This method searches through all outputs of the provided component to find outputs whose types match - the input types of the specified input in the current component. If exactly one matching output is found, - it returns the corresponding method. If multiple matching outputs are found, it raises an error indicating - ambiguity. If no matching outputs are found, it raises an error indicating that no suitable output was found. - - Args: - input_name (str): The name of the input in the current component to match. - value (Component): The component whose outputs are to be considered. - - Returns: - Callable: The method corresponding to the matching output. - - Raises: - ValueError: If multiple matching outputs are found, if no matching outputs are found, - or if the output method is invalid. - """ - # Retrieve all outputs from the given component - outputs = value._outputs_map.values() - # Prepare to collect matching output-input pairs - matching_pairs = [] - # Get the input object from the current component - input_ = self._inputs[input_name] - # Iterate over outputs to find matches based on types - matching_pairs = [ - (output, input_) - for output in outputs - for output_type in output.types - # Check if the output type matches the input's accepted types - if input_.input_types and output_type in input_.input_types - ] - # If multiple matches are found, raise an error indicating ambiguity - if len(matching_pairs) > 1: - matching_pairs_str = self._build_error_string_from_matching_pairs(matching_pairs) - msg = self.build_component_error_message( - f"There are multiple outputs from {value.display_name} that can connect to inputs: {matching_pairs_str}" - ) - raise ValueError(msg) - # If no matches are found, raise an error indicating no suitable output - if not matching_pairs: - msg = self.build_input_error_message(input_name, f"No matching output from {value.display_name} found") - raise ValueError(msg) - # Get the matching output and input pair - output, input_ = matching_pairs[0] - # Ensure that the output method is a valid method name (string) - if not isinstance(output.method, str): - msg = self.build_component_error_message( - f"Method {output.method} is not a valid output of {value.display_name}" - ) - raise TypeError(msg) - return getattr(value, output.method) - - def _process_connection_or_parameter(self, key, value) -> None: - # Special handling for Loop components: check if we're setting a loop-enabled output - if self._is_loop_connection(key, value): - self._process_loop_connection(key, value) - return - - input_ = self._get_or_create_input(key) - # We need to check if callable AND if it is a method from a class that inherits from Component - if isinstance(value, Component): - # We need to find the Output that can connect to an input of the current component - # if there's more than one output that matches, we need to raise an error - # because we don't know which one to connect to - value = self._find_matching_output_method(key, value) - if callable(value) and self._inherits_from_component(value): - try: - self._method_is_valid_output(value) - except ValueError as e: - msg = f"Method {value.__name__} is not a valid output of {value.__self__.__class__.__name__}" - raise ValueError(msg) from e - self._connect_to_component(key, value, input_) - else: - self._set_parameter_or_attribute(key, value) - - def _is_loop_connection(self, key: str, value) -> bool: - """Check if this is a loop feedback connection. - - A loop connection occurs when: - 1. The key matches an output name of this component - 2. That output has allows_loop=True - 3. The value is a callable method from another component - """ - # Check if key matches a loop-enabled output - if key not in self._outputs_map: - return False - - output = self._outputs_map[key] - if not getattr(output, "allows_loop", False): - return False - - # Check if value is a callable method from a Component - return callable(value) and self._inherits_from_component(value) - - def _process_loop_connection(self, key: str, value) -> None: - """Process a loop feedback connection. - - Creates a special edge that connects the source component's output - to this Loop component's loop-enabled output (not an input). - """ - try: - self._method_is_valid_output(value) - except ValueError as e: - msg = f"Method {value.__name__} is not a valid output of {value.__self__.__class__.__name__}" - raise ValueError(msg) from e - - source_component = value.__self__ - self._components.append(source_component) - source_output = source_component.get_output_by_method(value) - target_output = self._outputs_map[key] - - # Create special loop feedback edge - self._add_loop_edge(source_component, source_output, target_output) - - def _add_loop_edge(self, source_component, source_output, target_output) -> None: - """Add a special loop feedback edge that targets an output instead of an input.""" - self._edges.append( - { - "source": source_component._id, - "target": self._id, - "data": { - "sourceHandle": { - "dataType": source_component.name or source_component.__class__.__name__, - "id": source_component._id, - "name": source_output.name, - "output_types": source_output.types, - }, - "targetHandle": { - # Special loop edge structure - targets an output, not an input - "dataType": self.name or self.__class__.__name__, - "id": self._id, - "name": target_output.name, - "output_types": target_output.types, - }, - }, - } - ) - - def _process_connection_or_parameters(self, key, value) -> None: - # if value is a list of components, we need to process each component - # Note this update make sure it is not a list str | int | float | bool | type(None) - if isinstance(value, list) and not any( - isinstance(val, str | int | float | bool | type(None) | Message | Data | StructuredTool) for val in value - ): - for val in value: - self._process_connection_or_parameter(key, val) - else: - self._process_connection_or_parameter(key, value) - - def _get_or_create_input(self, key): - try: - return self._inputs[key] - except KeyError: - input_ = self._get_fallback_input(name=key, display_name=key) - self._inputs[key] = input_ - self.inputs.append(input_) - return input_ - - def _connect_to_component(self, key, value, input_) -> None: - component = value.__self__ - self._components.append(component) - output = component.get_output_by_method(value) - self._add_edge(component, key, output, input_) - - def _add_edge(self, component, key, output, input_) -> None: - self._edges.append( - { - "source": component._id, - "target": self._id, - "data": { - "sourceHandle": { - "dataType": component.name or component.__class__.__name__, - "id": component._id, - "name": output.name, - "output_types": output.types, - }, - "targetHandle": { - "fieldName": key, - "id": self._id, - "inputTypes": input_.input_types, - "type": input_.field_type, - }, - }, - } - ) - - def _set_parameter_or_attribute(self, key, value) -> None: - if isinstance(value, Component): - methods = ", ".join([f"'{output.method}'" for output in value.outputs]) - msg = f"You set {value.display_name} as value for `{key}`. You should pass one of the following: {methods}" - raise TypeError(msg) - self._set_input_value(key, value) - self._parameters[key] = value - self._attributes[key] = value - - def __call__(self, **kwargs): - self.set(**kwargs) - - return run_until_complete(self.run()) - - async def _run(self): - # Resolve callable inputs - for key, _input in self._inputs.items(): - if asyncio.iscoroutinefunction(_input.value): - self._inputs[key].value = await _input.value() - elif callable(_input.value): - self._inputs[key].value = await asyncio.to_thread(_input.value) - - self.set_attributes({}) - - return await self.build_results() - - def __getattr__(self, name: str) -> Any: - if "_attributes" in self.__dict__ and name in self.__dict__["_attributes"]: - # It is a dict of attributes that are not inputs or outputs all the raw data it should have the loop input. - return self.__dict__["_attributes"][name] - if "_inputs" in self.__dict__ and name in self.__dict__["_inputs"]: - return self.__dict__["_inputs"][name].value - if "_outputs_map" in self.__dict__ and name in self.__dict__["_outputs_map"]: - return self.__dict__["_outputs_map"][name] - if name in BACKWARDS_COMPATIBLE_ATTRIBUTES: - return self.__dict__[f"_{name}"] - if name.startswith("_") and name[1:] in BACKWARDS_COMPATIBLE_ATTRIBUTES: - return self.__dict__[name] - if name == "graph": - # If it got up to here it means it was going to raise - session_id = self._session_id if hasattr(self, "_session_id") else None - user_id = self._user_id if hasattr(self, "_user_id") else None - flow_name = self._flow_name if hasattr(self, "_flow_name") else None - flow_id = self._flow_id if hasattr(self, "_flow_id") else None - return PlaceholderGraph( - flow_id=flow_id, user_id=str(user_id), session_id=session_id, context={}, flow_name=flow_name - ) - msg = f"Attribute {name} not found in {self.__class__.__name__}" - raise AttributeError(msg) - - def _set_input_value(self, name: str, value: Any) -> None: - if name in self._inputs: - input_value = self._inputs[name].value - if isinstance(input_value, Component): - methods = ", ".join([f"'{output.method}'" for output in input_value.outputs]) - msg = self.build_input_error_message( - name, - f"You set {input_value.display_name} as value. You should pass one of the following: {methods}", - ) - raise ValueError(msg) - if callable(input_value) and hasattr(input_value, "__self__"): - msg = self.build_input_error_message( - name, f"Input is connected to {input_value.__self__.display_name}.{input_value.__name__}" - ) - raise ValueError(msg) - try: - self._inputs[name].value = value - except Exception as e: - msg = f"Error setting input value for {name}: {e}" - raise ValueError(msg) from e - if hasattr(self._inputs[name], "load_from_db"): - self._inputs[name].load_from_db = False - else: - msg = self.build_component_error_message(f"Input {name} not found") - raise ValueError(msg) - - def _validate_outputs(self) -> None: - # Raise Error if some rule isn't met - if self.selected_output is not None and self.selected_output not in self._outputs_map: - output_names = ", ".join(list(self._outputs_map.keys())) - msg = f"selected_output '{self.selected_output}' is not valid. Must be one of: {output_names}" - raise ValueError(msg) - - def _map_parameters_on_frontend_node(self, frontend_node: ComponentFrontendNode) -> None: - for name, value in self._parameters.items(): - frontend_node.set_field_value_in_template(name, value) - - def _map_parameters_on_template(self, template: dict) -> None: - for name, value in self._parameters.items(): - try: - template[name]["value"] = value - except KeyError as e: - close_match = find_closest_match(name, list(template.keys())) - if close_match: - msg = f"Parameter '{name}' not found in {self.__class__.__name__}. Did you mean '{close_match}'?" - raise ValueError(msg) from e - msg = f"Parameter {name} not found in {self.__class__.__name__}. " - raise ValueError(msg) from e - - def _get_method_return_type(self, method_name: str) -> list[str]: - method = getattr(self, method_name) - return_type = get_type_hints(method).get("return") - if return_type is None: - return [] - extracted_return_types = self._extract_return_type(return_type) - return [format_type(extracted_return_type) for extracted_return_type in extracted_return_types] - - def _update_template(self, frontend_node: dict): - return frontend_node - - def to_frontend_node(self): - # ! This part here is clunky but we need it like this for - # ! backwards compatibility. We can change how prompt component - # ! works and then update this later - field_config = self.get_template_config(self) - frontend_node = ComponentFrontendNode.from_inputs(**field_config) - for key in self._inputs: - frontend_node.set_field_load_from_db_in_template(key, value=False) - self._map_parameters_on_frontend_node(frontend_node) - - frontend_node_dict = frontend_node.to_dict(keep_name=False) - frontend_node_dict = self._update_template(frontend_node_dict) - self._map_parameters_on_template(frontend_node_dict["template"]) - - frontend_node = ComponentFrontendNode.from_dict(frontend_node_dict) - if not self._code: - self.set_class_code() - code_field = Input( - dynamic=True, - required=True, - placeholder="", - multiline=True, - value=self._code, - password=False, - name="code", - advanced=True, - field_type="code", - is_list=False, - ) - frontend_node.template.add_field(code_field) - - for output in frontend_node.outputs: - if output.types: - continue - return_types = self._get_method_return_type(output.method) - output.add_types(return_types) - - frontend_node.validate_component() - frontend_node.set_base_classes_from_outputs() - - # Get the node dictionary and add selected_output if specified - node_dict = frontend_node.to_dict(keep_name=False) - if self.selected_output is not None: - node_dict["selected_output"] = self.selected_output - - return { - "data": { - "node": node_dict, - "type": self.name or self.__class__.__name__, - "id": self._id, - }, - "id": self._id, - } - - def _validate_inputs(self, params: dict) -> None: - # Params keys are the `name` attribute of the Input objects - """Validates and assigns input values from the provided parameters dictionary. - - For each parameter matching a defined input, sets the input's value and updates the parameter - dictionary with the validated value. - """ - for key, value in params.copy().items(): - if key not in self._inputs: - continue - input_ = self._inputs[key] - # BaseInputMixin has a `validate_assignment=True` - - input_.value = value - params[input_.name] = input_.value - - def set_attributes(self, params: dict) -> None: - """Sets component attributes from the given parameters, preventing conflicts with reserved attribute names. - - Raises: - ValueError: If a parameter name matches a reserved attribute not managed in _attributes and its - value differs from the current attribute value. - """ - self._validate_inputs(params) - attributes = {} - for key, value in params.items(): - if key in self.__dict__ and key not in self._attributes and value != getattr(self, key): - msg = ( - f"{self.__class__.__name__} defines an input parameter named '{key}' " - f"that is a reserved word and cannot be used." - ) - raise ValueError(msg) - attributes[key] = value - for key, input_obj in self._inputs.items(): - if key not in attributes and key not in self._attributes: - attributes[key] = input_obj.value or None - - self._attributes.update(attributes) - - def _set_outputs(self, outputs: list[dict]) -> None: - self.outputs = [Output(**output) for output in outputs] - for output in self.outputs: - setattr(self, output.name, output) - self._outputs_map[output.name] = output - - def get_trace_as_inputs(self): - predefined_inputs = { - input_.name: input_.value - for input_ in self.inputs - if hasattr(input_, "trace_as_input") and input_.trace_as_input - } - # Runtime inputs - runtime_inputs = {name: input_.value for name, input_ in self._inputs.items() if hasattr(input_, "value")} - return {**predefined_inputs, **runtime_inputs} - - def get_trace_as_metadata(self): - def safe_list_values(items): - return [v if isinstance(v, str | int | float | bool) or v is None else str(v) for v in items] - - def safe_value(val): - if isinstance(val, str | int | float | bool) or val is None: - return val - if isinstance(val, list | tuple): - return safe_list_values(val) - try: - return json.dumps(val) - except (TypeError, ValueError): - return str(val) - - return { - input_.name: safe_value(getattr(self, input_.name, input_.value)) - for input_ in self.inputs - if getattr(input_, "trace_as_metadata", False) - } - - async def _build_with_tracing(self): - inputs = self.get_trace_as_inputs() - metadata = self.get_trace_as_metadata() - async with self._tracing_service.trace_component(self, self.trace_name, inputs, metadata): - results, artifacts = await self._build_results() - self._tracing_service.set_outputs(self.trace_name, results) - - return results, artifacts - - async def _build_without_tracing(self): - return await self._build_results() - - async def build_results(self): - """Build the results of the component.""" - if hasattr(self, "graph"): - session_id = self.graph.session_id - elif hasattr(self, "_session_id"): - session_id = self._session_id - else: - session_id = None - try: - if self._tracing_service: - return await self._build_with_tracing() - return await self._build_without_tracing() - except StreamingError as e: - await self.send_error( - exception=e.cause, - session_id=session_id, - trace_name=getattr(self, "trace_name", None), - source=e.source, - ) - raise e.cause # noqa: B904 - except Exception as e: - await self.send_error( - exception=e, - session_id=session_id, - source=Source(id=self._id, display_name=self.display_name, source=self.display_name), - trace_name=getattr(self, "trace_name", None), - ) - raise - - async def _build_results(self) -> tuple[dict, dict]: - results, artifacts = {}, {} - - self._pre_run_setup_if_needed() - self._handle_tool_mode() - - for output in self._get_outputs_to_process(): - self._current_output = output.name - result = await self._get_output_result(output) - results[output.name] = result - artifacts[output.name] = self._build_artifact(result) - self._log_output(output) - - self._finalize_results(results, artifacts) - return results, artifacts - - def _pre_run_setup_if_needed(self): - if hasattr(self, "_pre_run_setup"): - self._pre_run_setup() - - def _handle_tool_mode(self): - if ( - hasattr(self, "outputs") and any(getattr(_input, "tool_mode", False) for _input in self.inputs) - ) or self.add_tool_output: - self._append_tool_to_outputs_map() - - def _should_process_output(self, output): - """Determines whether a given output should be processed based on vertex edge configuration. - - Returns True if the component has no vertex or outgoing edges, or if the output's name is among - the vertex's source edge names. - """ - if not self._vertex or not self._vertex.outgoing_edges: - return True - return output.name in self._vertex.edges_source_names - - def _get_outputs_to_process(self): - """Returns a list of outputs to process, ordered according to self.outputs. - - Outputs are included only if they should be processed, as determined by _should_process_output. - First processes outputs in the order defined by self.outputs, then processes any remaining outputs - from _outputs_map that weren't in self.outputs. - - Returns: - list: Outputs to be processed in the defined order. - - Raises: - ValueError: If an output name in self.outputs is not present in _outputs_map. - """ - result = [] - processed_names = set() - - # First process outputs in the order defined by self.outputs - for output in self.outputs: - output_obj = self._outputs_map.get(output.name, deepcopy(output)) - if self._should_process_output(output_obj): - result.append(output_obj) - processed_names.add(output_obj.name) - - # Then process any remaining outputs from _outputs_map - for name, output_obj in self._outputs_map.items(): - if name not in processed_names and self._should_process_output(output_obj): - result.append(output_obj) - - return result - - async def _get_output_result(self, output): - """Computes and returns the result for a given output, applying caching and output options. - - If the output is cached and a value is already defined, returns the cached value. Otherwise, - invokes the associated output method asynchronously, applies output options, updates the cache, - and returns the result. Raises a ValueError if the output method is not defined, or a TypeError - if the method invocation fails. - """ - if output.cache and output.value != UNDEFINED: - return output.value - - if output.method is None: - msg = f'Output "{output.name}" does not have a method defined.' - raise ValueError(msg) - - method = getattr(self, output.method) - try: - result = await method() if inspect.iscoroutinefunction(method) else await asyncio.to_thread(method) - except TypeError as e: - msg = f'Error running method "{output.method}": {e}' - raise TypeError(msg) from e - - if ( - self._vertex is not None - and isinstance(result, Message) - and result.flow_id is None - and self._vertex.graph.flow_id is not None - ): - result.set_flow_id(self._vertex.graph.flow_id) - result = output.apply_options(result) - output.value = result - - return result - - async def resolve_output(self, output_name: str) -> Any: - """Resolves and returns the value for a specified output by name. - - If output caching is enabled and a value is already available, returns the cached value; - otherwise, computes and returns the output result. Raises a KeyError if the output name - does not exist. - """ - output = self._outputs_map.get(output_name) - if output is None: - msg = ( - f"Sorry, an output named '{output_name}' could not be found. " - "Please ensure that the output is correctly configured and try again." - ) - raise KeyError(msg) - if output.cache and output.value != UNDEFINED: - return output.value - return await self._get_output_result(output) - - def _build_artifact(self, result): - """Builds an artifact dictionary containing a string representation, raw data, and type for a result. - - The artifact includes a human-readable representation, the processed raw result, and its determined type. - """ - custom_repr = self.custom_repr() - if custom_repr is None and isinstance(result, dict | Data | str): - custom_repr = result - if not isinstance(custom_repr, str): - custom_repr = str(custom_repr) - - raw = self._process_raw_result(result) - artifact_type = get_artifact_type(self.status or raw, result) - raw, artifact_type = post_process_raw(raw, artifact_type) - return {"repr": custom_repr, "raw": raw, "type": artifact_type} - - def _process_raw_result(self, result): - return self.extract_data(result) - - def extract_data(self, result): - """Extract the data from the result. this is where the self.status is set.""" - if isinstance(result, Message): - self.status = result.get_text() - return ( - self.status if self.status is not None else "No text available" - ) # Provide a default message if .text_key is missing - if hasattr(result, "data"): - return result.data - if hasattr(result, "model_dump"): - return result.model_dump() - if isinstance(result, Data | dict | str): - return result.data if isinstance(result, Data) else result - - if self.status: - return self.status - return result - - def _log_output(self, output): - self._output_logs[output.name] = self._logs - self._logs = [] - self._current_output = "" - - def _finalize_results(self, results, artifacts): - self._artifacts = artifacts - self._results = results - if self._tracing_service: - self._tracing_service.set_outputs(self.trace_name, results) - - def custom_repr(self): - if self.repr_value == "": - self.repr_value = self.status - if isinstance(self.repr_value, dict): - return yaml.dump(self.repr_value) - if isinstance(self.repr_value, str): - return self.repr_value - if isinstance(self.repr_value, BaseModel) and not isinstance(self.repr_value, Data): - return str(self.repr_value) - return self.repr_value - - def build_inputs(self): - """Builds the inputs for the custom component. - - Returns: - List[Input]: The list of inputs. - """ - # This function is similar to build_config, but it will process the inputs - # and return them as a dict with keys being the Input.name and values being the Input.model_dump() - self.inputs = self.template_config.get("inputs", []) - if not self.inputs: - return {} - return {_input.name: _input.model_dump(by_alias=True, exclude_none=True) for _input in self.inputs} - - def _get_field_order(self): - try: - inputs = self.template_config["inputs"] - return [field.name for field in inputs] - except KeyError: - return [] - - def build(self, **kwargs) -> None: - self.set_attributes(kwargs) - - def _get_fallback_input(self, **kwargs): - return Input(**kwargs) - - async def to_toolkit(self) -> list[Tool]: - """Convert component to a list of tools. - - This is a template method that defines the skeleton of the toolkit creation - algorithm. Subclasses can override _get_tools() to provide custom tool - implementations while maintaining the metadata update functionality. - - Returns: - list[Tool]: A list of tools with updated metadata. Each tool contains: - - name: The name of the tool - - description: A description of what the tool does - - tags: List of tags associated with the tool - """ - # Get tools from subclass implementation - tools = await self._get_tools() - - if hasattr(self, TOOLS_METADATA_INPUT_NAME): - tools = self._filter_tools_by_status(tools=tools, metadata=self.tools_metadata) - return self._update_tools_with_metadata(tools=tools, metadata=self.tools_metadata) - - # If no metadata exists yet, filter based on enabled_tools - return self._filter_tools_by_status(tools=tools, metadata=None) - - async def _get_tools(self) -> list[Tool]: - """Get the list of tools for this component. - - This method can be overridden by subclasses to provide custom tool implementations. - The default implementation uses ComponentToolkit. - - Returns: - list[Tool]: List of tools provided by this component - """ - component_toolkit: type[ComponentToolkit] = _get_component_toolkit() - return component_toolkit(component=self).get_tools(callbacks=self.get_langchain_callbacks()) - - def _extract_tools_tags(self, tools_metadata: list[dict]) -> list[str]: - """Extract the first tag from each tool's metadata.""" - return [tool["tags"][0] for tool in tools_metadata if tool["tags"]] - - def _update_tools_with_metadata(self, tools: list[Tool], metadata: DataFrame | None) -> list[Tool]: - """Update tools with provided metadata.""" - component_toolkit: type[ComponentToolkit] = _get_component_toolkit() - return component_toolkit(component=self, metadata=metadata).update_tools_metadata(tools=tools) - - def check_for_tool_tag_change(self, old_tags: list[str], new_tags: list[str]) -> bool: - # First check length - if different lengths, they can't be equal - if len(old_tags) != len(new_tags): - return True - # Use set comparison for O(n) average case complexity, earlier the old_tags.sort() != new_tags.sort() was used - return set(old_tags) != set(new_tags) - - def _filter_tools_by_status(self, tools: list[Tool], metadata: pd.DataFrame | None) -> list[Tool]: - """Filter tools based on their status in metadata. - - Args: - tools (list[Tool]): List of tools to filter. - metadata (list[dict] | None): Tools metadata containing status information. - - Returns: - list[Tool]: Filtered list of tools. - """ - # Convert metadata to a list of dicts if it's a DataFrame - metadata_dict = None # Initialize as None to avoid lint issues with empty dict - if isinstance(metadata, pd.DataFrame): - metadata_dict = metadata.to_dict(orient="records") - - # If metadata is None or empty, use enabled_tools - if not metadata_dict: - enabled = self.enabled_tools - return ( - tools - if enabled is None - else [ - tool for tool in tools if any(enabled_name in [tool.name, *tool.tags] for enabled_name in enabled) - ] - ) - - # Ensure metadata is a list of dicts - if not isinstance(metadata_dict, list): - return tools - - # Create a mapping of tool names to their status - tool_status = {item["name"]: item.get("status", True) for item in metadata_dict} - return [tool for tool in tools if tool_status.get(tool.name, True)] - - def _build_tool_data(self, tool: Tool) -> dict: - if tool.metadata is None: - tool.metadata = {} - return { - "name": tool.name, - "description": tool.description, - "tags": tool.tags if hasattr(tool, "tags") and tool.tags else [tool.name], - "status": True, # Initialize all tools with status True - "display_name": tool.metadata.get("display_name", tool.name), - "display_description": tool.metadata.get("display_description", tool.description), - "readonly": tool.metadata.get("readonly", False), - "args": tool.args, - # "args_schema": tool.args_schema, - } - - async def _build_tools_metadata_input(self): - try: - from langflow.io import ToolsInput - except ImportError as e: - msg = "Failed to import ToolsInput from langflow.io" - raise ImportError(msg) from e - placeholder = None - tools = [] - try: - tools = await self._get_tools() - placeholder = "Loading actions..." if len(tools) == 0 else "" - except (TimeoutError, asyncio.TimeoutError): - placeholder = "Timeout loading actions" - except (ConnectionError, OSError, ValueError): - placeholder = "Error loading actions" - # Always use the latest tool data - tool_data = [self._build_tool_data(tool) for tool in tools] - # print(tool_data) - if hasattr(self, TOOLS_METADATA_INPUT_NAME): - old_tags = self._extract_tools_tags(self.tools_metadata) - new_tags = self._extract_tools_tags(tool_data) - if self.check_for_tool_tag_change(old_tags, new_tags): - # If enabled tools are set, update status based on them - enabled = self.enabled_tools - if enabled is not None: - for item in tool_data: - item["status"] = any(enabled_name in [item["name"], *item["tags"]] for enabled_name in enabled) - self.tools_metadata = tool_data - else: - # Preserve existing status values - existing_status = {item["name"]: item.get("status", True) for item in self.tools_metadata} - for item in tool_data: - item["status"] = existing_status.get(item["name"], True) - tool_data = self.tools_metadata - else: - # If enabled tools are set, update status based on them - enabled = self.enabled_tools - if enabled is not None: - for item in tool_data: - item["status"] = any(enabled_name in [item["name"], *item["tags"]] for enabled_name in enabled) - self.tools_metadata = tool_data - - return ToolsInput( - name=TOOLS_METADATA_INPUT_NAME, - placeholder=placeholder, - display_name="Actions", - info=TOOLS_METADATA_INFO, - value=tool_data, - ) - - def get_project_name(self): - if hasattr(self, "_tracing_service") and self._tracing_service: - return self._tracing_service.project_name - return "Langflow" - - def log(self, message: LoggableType | list[LoggableType], name: str | None = None) -> None: - """Logs a message. - - Args: - message (LoggableType | list[LoggableType]): The message to log. - name (str, optional): The name of the log. Defaults to None. - """ - if name is None: - name = f"Log {len(self._logs) + 1}" - log = Log(message=message, type=get_artifact_type(message), name=name) - self._logs.append(log) - if self._tracing_service and self._vertex: - self._tracing_service.add_log(trace_name=self.trace_name, log=log) - if self._event_manager is not None and self._current_output: - data = log.model_dump() - data["output"] = self._current_output - data["component_id"] = self._id - self._event_manager.on_log(data=data) - - def _append_tool_output(self) -> None: - if next((output for output in self.outputs if output.name == TOOL_OUTPUT_NAME), None) is None: - self.outputs.append( - Output( - name=TOOL_OUTPUT_NAME, - display_name=TOOL_OUTPUT_DISPLAY_NAME, - method="to_toolkit", - types=["Tool"], - ) - ) - - def is_connected_to_chat_output(self) -> bool: - # Lazy import to avoid circular dependency - from langflow.graph.utils import has_chat_output - - return has_chat_output(self.graph.get_vertex_neighbors(self._vertex)) - - def _should_skip_message(self, message: Message) -> bool: - """Check if the message should be skipped based on vertex configuration and message type.""" - return ( - self._vertex is not None - and not (self._vertex.is_output or self._vertex.is_input) - and not self.is_connected_to_chat_output() - and not isinstance(message, ErrorMessage) - ) - - async def send_message(self, message: Message, id_: str | None = None): - if self._should_skip_message(message): - return message - if (hasattr(self, "graph") and self.graph.session_id) and (message is not None and not message.session_id): - session_id = ( - UUID(self.graph.session_id) if isinstance(self.graph.session_id, str) else self.graph.session_id - ) - message.session_id = session_id - if hasattr(message, "flow_id") and isinstance(message.flow_id, str): - message.flow_id = UUID(message.flow_id) - stored_message = await self._store_message(message) - - self._stored_message_id = stored_message.id - try: - complete_message = "" - if ( - self._should_stream_message(stored_message, message) - and message is not None - and isinstance(message.text, AsyncIterator | Iterator) - ): - complete_message = await self._stream_message(message.text, stored_message) - stored_message.text = complete_message - stored_message = await self._update_stored_message(stored_message) - else: - # Only send message event for non-streaming messages - await self._send_message_event(stored_message, id_=id_) - except Exception: - # remove the message from the database - await delete_message(stored_message.id) - raise - self.status = stored_message - return stored_message - - async def _store_message(self, message: Message) -> Message: - flow_id: str | None = None - if hasattr(self, "graph"): - # Convert UUID to str if needed - flow_id = str(self.graph.flow_id) if self.graph.flow_id else None - stored_messages = await astore_message(message, flow_id=flow_id) - if len(stored_messages) != 1: - msg = "Only one message can be stored at a time." - raise ValueError(msg) - stored_message = stored_messages[0] - return await Message.create(**stored_message.model_dump()) - - async def _send_message_event(self, message: Message, id_: str | None = None, category: str | None = None) -> None: - if hasattr(self, "_event_manager") and self._event_manager: - data_dict = message.model_dump()["data"] if hasattr(message, "data") else message.model_dump() - if id_ and not data_dict.get("id"): - data_dict["id"] = id_ - category = category or data_dict.get("category", None) - - def _send_event(): - match category: - case "error": - self._event_manager.on_error(data=data_dict) - case "remove_message": - # Check if id exists in data_dict before accessing it - if "id" in data_dict: - self._event_manager.on_remove_message(data={"id": data_dict["id"]}) - else: - # If no id, try to get it from the message object or id_ parameter - message_id = getattr(message, "id", None) or id_ - if message_id: - self._event_manager.on_remove_message(data={"id": message_id}) - case _: - self._event_manager.on_message(data=data_dict) - - await asyncio.to_thread(_send_event) - - def _should_stream_message(self, stored_message: Message, original_message: Message) -> bool: - return bool( - hasattr(self, "_event_manager") - and self._event_manager - and stored_message.id - and not isinstance(original_message.text, str) - ) - - async def _update_stored_message(self, message: Message) -> Message: - """Update the stored message.""" - if hasattr(self, "_vertex") and self._vertex is not None and hasattr(self._vertex, "graph"): - flow_id = ( - UUID(self._vertex.graph.flow_id) - if isinstance(self._vertex.graph.flow_id, str) - else self._vertex.graph.flow_id - ) - - message.flow_id = flow_id - - message_tables = await aupdate_messages(message) - if not message_tables: - msg = "Failed to update message" - raise ValueError(msg) - message_table = message_tables[0] - return await Message.create(**message_table.model_dump()) - - async def _stream_message(self, iterator: AsyncIterator | Iterator, message: Message) -> str: - if not isinstance(iterator, AsyncIterator | Iterator): - msg = "The message must be an iterator or an async iterator." - raise TypeError(msg) - - if isinstance(iterator, AsyncIterator): - return await self._handle_async_iterator(iterator, message.id, message) - try: - complete_message = "" - first_chunk = True - for chunk in iterator: - complete_message = await self._process_chunk( - chunk.content, complete_message, message.id, message, first_chunk=first_chunk - ) - first_chunk = False - except Exception as e: - raise StreamingError(cause=e, source=message.properties.source) from e - else: - return complete_message - - async def _handle_async_iterator(self, iterator: AsyncIterator, message_id: str, message: Message) -> str: - complete_message = "" - first_chunk = True - async for chunk in iterator: - complete_message = await self._process_chunk( - chunk.content, complete_message, message_id, message, first_chunk=first_chunk - ) - first_chunk = False - return complete_message - - async def _process_chunk( - self, chunk: str, complete_message: str, message_id: str, message: Message, *, first_chunk: bool = False - ) -> str: - complete_message += chunk - if self._event_manager: - if first_chunk: - # Send the initial message only on the first chunk - msg_copy = message.model_copy() - msg_copy.text = complete_message - await self._send_message_event(msg_copy, id_=message_id) - await asyncio.to_thread( - self._event_manager.on_token, - data={ - "chunk": chunk, - "id": str(message_id), - }, - ) - return complete_message - - async def send_error( - self, - exception: Exception, - session_id: str, - trace_name: str, - source: Source, - ) -> Message | None: - """Send an error message to the frontend.""" - flow_id = self.graph.flow_id if hasattr(self, "graph") else None - if not session_id: - return None - error_message = ErrorMessage( - flow_id=flow_id, - exception=exception, - session_id=session_id, - trace_name=trace_name, - source=source, - ) - await self.send_message(error_message) - return error_message - - def _append_tool_to_outputs_map(self): - self._outputs_map[TOOL_OUTPUT_NAME] = self._build_tool_output() - # add a new input for the tool schema - # self.inputs.append(self._build_tool_schema()) - - def _build_tool_output(self) -> Output: - return Output(name=TOOL_OUTPUT_NAME, display_name=TOOL_OUTPUT_DISPLAY_NAME, method="to_toolkit", types=["Tool"]) - - def get_input_display_name(self, input_name: str) -> str: - """Get the display name of an input. - - This is a public utility method that subclasses can use to get user-friendly - display names for inputs when building error messages or UI elements. - - Usage: - msg = f"Input {self.get_input_display_name(input_name)} not found" - - Args: - input_name (str): The name of the input. - - Returns: - str: The display name of the input, or the input name if not found. - """ - if input_name in self._inputs: - return getattr(self._inputs[input_name], "display_name", input_name) - return input_name - - def get_output_display_name(self, output_name: str) -> str: - """Get the display name of an output. - - This is a public utility method that subclasses can use to get user-friendly - display names for outputs when building error messages or UI elements. - - Args: - output_name (str): The name of the output. - - Returns: - str: The display name of the output, or the output name if not found. - """ - if output_name in self._outputs_map: - return getattr(self._outputs_map[output_name], "display_name", output_name) - return output_name - - def build_input_error_message(self, input_name: str, message: str) -> str: - """Build an error message for an input. - - This is a public utility method that subclasses can use to create consistent, - user-friendly error messages that reference inputs by their display names. - The input name is placed at the beginning to ensure it's visible even if the message is truncated. - - Args: - input_name (str): The name of the input. - message (str): The error message. - - Returns: - str: The formatted error message with display name. - """ - display_name = self.get_input_display_name(input_name) - return f"[Input: {display_name}] {message}" - - def build_output_error_message(self, output_name: str, message: str) -> str: - """Build an error message for an output. - - This is a public utility method that subclasses can use to create consistent, - user-friendly error messages that reference outputs by their display names. - The output name is placed at the beginning to ensure it's visible even if the message is truncated. - - Args: - output_name (str): The name of the output. - message (str): The error message. - - Returns: - str: The formatted error message with display name. - """ - display_name = self.get_output_display_name(output_name) - return f"[Output: {display_name}] {message}" - - def build_component_error_message(self, message: str) -> str: - """Build an error message for the component. - - This is a public utility method that subclasses can use to create consistent, - user-friendly error messages that reference the component by its display name. - The component name is placed at the beginning to ensure it's visible even if the message is truncated. - - Args: - message (str): The error message. - Returns: - str: The formatted error message with component display name. - """ - return f"[Component: {self.display_name or self.__class__.__name__}] {message}" +# For backwards compatibility - some code might still use the private function +_get_component_toolkit = get_component_toolkit + +__all__ = [ + "BACKWARDS_COMPATIBLE_ATTRIBUTES", + "CONFIG_ATTRIBUTES", + "Component", + "PlaceholderGraph", + "_get_component_toolkit", + "get_component_toolkit", +] diff --git a/src/backend/base/langflow/custom/custom_component/component_with_cache.py b/src/backend/base/langflow/custom/custom_component/component_with_cache.py index 74f93528ffae..325d9597c3ad 100644 --- a/src/backend/base/langflow/custom/custom_component/component_with_cache.py +++ b/src/backend/base/langflow/custom/custom_component/component_with_cache.py @@ -1,8 +1 @@ -from langflow.custom.custom_component.component import Component -from langflow.services.deps import get_shared_component_cache_service - - -class ComponentWithCache(Component): - def __init__(self, **data) -> None: - super().__init__(**data) - self._shared_component_cache = get_shared_component_cache_service() +from lfx.custom.custom_component.component_with_cache import * # noqa: F403 diff --git a/src/backend/base/langflow/custom/custom_component/custom_component.py b/src/backend/base/langflow/custom/custom_component/custom_component.py index cf54f6f9aebc..a10ead38ea11 100644 --- a/src/backend/base/langflow/custom/custom_component/custom_component.py +++ b/src/backend/base/langflow/custom/custom_component/custom_component.py @@ -1,552 +1 @@ -from __future__ import annotations - -import uuid -from collections.abc import Callable, Sequence -from pathlib import Path -from typing import TYPE_CHECKING, Any, ClassVar - -import yaml -from cachetools import TTLCache -from langchain_core.documents import Document -from loguru import logger -from pydantic import BaseModel - -from langflow.custom.custom_component.base_component import BaseComponent -from langflow.helpers.flow import list_flows, load_flow, run_flow -from langflow.schema.data import Data -from langflow.services.deps import get_storage_service, get_variable_service, session_scope -from langflow.services.storage.service import StorageService -from langflow.template.utils import update_frontend_node_with_template_values -from langflow.type_extraction.type_extraction import post_process_type -from langflow.utils import validate -from langflow.utils.async_helpers import run_until_complete - -if TYPE_CHECKING: - from langchain.callbacks.base import BaseCallbackHandler - - from langflow.graph.graph.base import Graph - from langflow.graph.vertex.base import Vertex - from langflow.schema.dotdict import dotdict - from langflow.schema.schema import OutputValue - from langflow.services.storage.service import StorageService - from langflow.services.tracing.schema import Log - from langflow.services.tracing.service import TracingService - - -class CustomComponent(BaseComponent): - """Represents a custom component in Langflow. - - Attributes: - name (Optional[str]): This attribute helps the frontend apply styles to known components. - display_name (Optional[str]): The display name of the custom component. - description (Optional[str]): The description of the custom component. - code (Optional[str]): The code of the custom component. - field_config (dict): The field configuration of the custom component. - code_class_base_inheritance (ClassVar[str]): The base class name for the custom component. - function_entrypoint_name (ClassVar[str]): The name of the function entrypoint for the custom component. - function (Optional[Callable]): The function associated with the custom component. - repr_value (Optional[Any]): The representation value of the custom component. - user_id (Optional[Union[UUID, str]]): The user ID associated with the custom component. - status (Optional[Any]): The status of the custom component. - _tree (Optional[dict]): The code tree of the custom component. - """ - - # True constants that should be shared (using ClassVar) - _code_class_base_inheritance: ClassVar[str] = "CustomComponent" - function_entrypoint_name: ClassVar[str] = "build" - name: str | None = None - """The name of the component used to styles. Defaults to None.""" - display_name: str | None = None - """The display name of the component. Defaults to None.""" - description: str | None = None - """The description of the component. Defaults to None.""" - icon: str | None = None - """The icon of the component. It should be an emoji. Defaults to None.""" - priority: int | None = None - """The priority of the component in the category. Lower priority means it will be displayed first. Defaults to None. - """ - - def __init__(self, **data) -> None: - """Initializes a new instance of the CustomComponent class. - - Args: - **data: Additional keyword arguments to initialize the custom component. - """ - # Initialize instance-specific attributes first - self.is_input: bool | None = None - self.is_output: bool | None = None - self.add_tool_output: bool = False - self.field_config: dict = {} - self.field_order: list[str] | None = None - self.frozen: bool = False - self.build_parameters: dict | None = None - self._vertex: Vertex | None = None - self.function: Callable | None = None - self.repr_value: Any = "" - self.status: Any | None = None - - # Initialize collections with empty defaults - self._flows_data: list[Data] | None = None - self._outputs: list[OutputValue] = [] - self._logs: list[Log] = [] - self._output_logs: dict[str, list[Log] | Log] = {} - self._tracing_service: TracingService | None = None - self._tree: dict | None = None - - # Initialize additional instance state - self.cache: TTLCache = TTLCache(maxsize=1024, ttl=60) - self._results: dict = {} - self._artifacts: dict = {} - - # Call parent's init after setting up our attributes - super().__init__(**data) - - def set_attributes(self, parameters: dict) -> None: - pass - - def set_parameters(self, parameters: dict) -> None: - self._parameters = parameters - self.set_attributes(self._parameters) - - @property - def trace_name(self) -> str: - if hasattr(self, "_id") and self._id is None: - msg = "Component id is not set" - raise ValueError(msg) - if hasattr(self, "_id"): - return f"{self.display_name} ({self._id})" - return f"{self.display_name}" - - def stop(self, output_name: str | None = None) -> None: - if not output_name and self._vertex and len(self._vertex.outputs) == 1: - output_name = self._vertex.outputs[0]["name"] - elif not output_name: - msg = "You must specify an output name to call stop" - raise ValueError(msg) - if not self._vertex: - msg = "Vertex is not set" - raise ValueError(msg) - try: - self.graph.mark_branch(vertex_id=self._vertex.id, output_name=output_name, state="INACTIVE") - except Exception as e: - msg = f"Error stopping {self.display_name}: {e}" - raise ValueError(msg) from e - - def start(self, output_name: str | None = None) -> None: - if not output_name and self._vertex and len(self._vertex.outputs) == 1: - output_name = self._vertex.outputs[0]["name"] - elif not output_name: - msg = "You must specify an output name to call start" - raise ValueError(msg) - if not self._vertex: - msg = "Vertex is not set" - raise ValueError(msg) - try: - self.graph.mark_branch(vertex_id=self._vertex.id, output_name=output_name, state="ACTIVE") - except Exception as e: - msg = f"Error starting {self.display_name}: {e}" - raise ValueError(msg) from e - - @staticmethod - def resolve_path(path: str) -> str: - """Resolves the path to an absolute path.""" - if not path: - return path - path_object = Path(path) - - if path_object.parts and path_object.parts[0] == "~": - path_object = path_object.expanduser() - elif path_object.is_relative_to("."): - path_object = path_object.resolve() - return str(path_object) - - def get_full_path(self, path: str) -> str: - storage_svc: StorageService = get_storage_service() - - flow_id, file_name = path.split("/", 1) - return storage_svc.build_full_path(flow_id, file_name) - - @property - def graph(self): - return self._vertex.graph - - @property - def user_id(self): - if hasattr(self, "_user_id") and self._user_id: - return self._user_id - return self.graph.user_id - - @property - def flow_id(self): - return self.graph.flow_id - - @property - def flow_name(self): - return self.graph.flow_name - - def _get_field_order(self): - return self.field_order or list(self.field_config.keys()) - - def custom_repr(self): - """Returns the custom representation of the custom component. - - Returns: - str: The custom representation of the custom component. - """ - if self.repr_value == "": - self.repr_value = self.status - if isinstance(self.repr_value, dict): - return yaml.dump(self.repr_value) - if isinstance(self.repr_value, str): - return self.repr_value - if isinstance(self.repr_value, BaseModel) and not isinstance(self.repr_value, Data): - return str(self.repr_value) - return self.repr_value - - def build_config(self): - """Builds the configuration for the custom component. - - Returns: - dict: The configuration for the custom component. - """ - return self.field_config - - def update_build_config( - self, - build_config: dotdict, - field_value: Any, - field_name: str | None = None, - ): - """Updates the build configuration for the custom component. - - Do not call directly as implementation can be a coroutine. - """ - build_config[field_name]["value"] = field_value - return build_config - - @property - def tree(self): - """Gets the code tree of the custom component. - - Returns: - dict: The code tree of the custom component. - """ - return self.get_code_tree(self._code or "") - - def to_data(self, data: Any, *, keys: list[str] | None = None, silent_errors: bool = False) -> list[Data]: - """Converts input data into a list of Data objects. - - Args: - data (Any): The input data to be converted. It can be a single item or a sequence of items. - If the input data is a Langchain Document, text_key and data_key are ignored. - - keys (List[str], optional): The keys to access the text and data values in each item. - It should be a list of strings where the first element is the text key and the second element - is the data key. - Defaults to None, in which case the default keys "text" and "data" are used. - silent_errors (bool, optional): Whether to suppress errors when the specified keys are not found - in the data. - - Returns: - List[Data]: A list of Data objects. - - Raises: - ValueError: If the input data is not of a valid type or if the specified keys are not found in the data. - - """ - if not keys: - keys = [] - data_objects = [] - if not isinstance(data, Sequence): - data = [data] - for item in data: - data_dict = {} - if isinstance(item, Document): - data_dict = item.metadata - data_dict["text"] = item.page_content - elif isinstance(item, BaseModel): - model_dump = item.model_dump() - for key in keys: - if silent_errors: - data_dict[key] = model_dump.get(key, "") - else: - try: - data_dict[key] = model_dump[key] - except KeyError as e: - msg = f"Key {key} not found in {item}" - raise ValueError(msg) from e - - elif isinstance(item, str): - data_dict = {"text": item} - elif isinstance(item, dict): - data_dict = item.copy() - else: - msg = f"Invalid data type: {type(item)}" - raise TypeError(msg) - - data_objects.append(Data(data=data_dict)) - - return data_objects - - def get_method_return_type(self, method_name: str): - build_method = self.get_method(method_name) - if not build_method or not build_method.get("has_return"): - return [] - return_type = build_method["return_type"] - - return self._extract_return_type(return_type) - - def create_references_from_data(self, data: list[Data], *, include_data: bool = False) -> str: - """Create references from a list of data. - - Args: - data (List[dict]): A list of data, where each record is a dictionary. - include_data (bool, optional): Whether to include data in the references. Defaults to False. - - Returns: - str: A string containing the references in markdown format. - """ - if not data: - return "" - markdown_string = "---\n" - for value in data: - markdown_string += f"- Text: {value.get_text()}" - if include_data: - markdown_string += f" Data: {value.data}" - markdown_string += "\n" - return markdown_string - - @property - def get_function_entrypoint_args(self) -> list: - """Gets the arguments of the function entrypoint for the custom component. - - Returns: - list: The arguments of the function entrypoint. - """ - build_method = self.get_method(self._function_entrypoint_name) - if not build_method: - return [] - - args = build_method["args"] - for arg in args: - if not arg.get("type") and arg.get("name") != "self": - # Set the type to Data - arg["type"] = "Data" - return args - - def get_method(self, method_name: str): - """Gets the build method for the custom component. - - Returns: - dict: The build method for the custom component. - """ - if not self._code: - return {} - - component_classes = [ - cls for cls in self.tree["classes"] if "Component" in cls["bases"] or "CustomComponent" in cls["bases"] - ] - if not component_classes: - return {} - - # Assume the first Component class is the one we're interested in - component_class = component_classes[0] - build_methods = [method for method in component_class["methods"] if method["name"] == (method_name)] - - return build_methods[0] if build_methods else {} - - @property - def _get_function_entrypoint_return_type(self) -> list[Any]: - """Gets the return type of the function entrypoint for the custom component. - - Returns: - List[Any]: The return type of the function entrypoint. - """ - return self.get_method_return_type(self._function_entrypoint_name) - - def _extract_return_type(self, return_type: Any) -> list[Any]: - return post_process_type(return_type) - - @property - def get_main_class_name(self): - """Gets the main class name of the custom component. - - Returns: - str: The main class name of the custom component. - """ - if not self._code: - return "" - - base_name = self._code_class_base_inheritance - method_name = self._function_entrypoint_name - - classes = [] - for item in self.tree.get("classes", []): - if base_name in item["bases"]: - method_names = [method["name"] for method in item["methods"]] - if method_name in method_names: - classes.append(item["name"]) - - # Get just the first item - return next(iter(classes), "") - - @property - def template_config(self): - """Gets the template configuration for the custom component. - - Returns: - dict: The template configuration for the custom component. - """ - if not self._template_config: - self._template_config = self.build_template_config() - return self._template_config - - def variables(self, name: str, field: str): - """DEPRECATED - This is kept for backward compatibility. Use get_variables instead.""" - return run_until_complete(self.get_variables(name, field)) - - async def get_variables(self, name: str, field: str): - """DEPRECATED - This is kept for backward compatibility. Use get_variable instead.""" - async with session_scope() as session: - return await self.get_variable(name, field, session) - - async def get_variable(self, name: str, field: str, session): - """Returns the variable for the current user with the specified name. - - Raises: - ValueError: If the user id is not set. - - Returns: - The variable for the current user with the specified name. - """ - if hasattr(self, "_user_id") and not self.user_id: - msg = f"User id is not set for {self.__class__.__name__}" - raise ValueError(msg) - - # Check graph context for request-level variable overrides first - if hasattr(self, "graph") and self.graph and hasattr(self.graph, "context"): - context = self.graph.context - if context and "request_variables" in context: - request_variables = context["request_variables"] - if name in request_variables: - logger.debug(f"Found context override for variable '{name}': {request_variables[name]}") - return request_variables[name] - - variable_service = get_variable_service() # Get service instance - # Retrieve and decrypt the variable by name for the current user - if isinstance(self.user_id, str): - user_id = uuid.UUID(self.user_id) - elif isinstance(self.user_id, uuid.UUID): - user_id = self.user_id - else: - msg = f"Invalid user id: {self.user_id}" - raise TypeError(msg) - return await variable_service.get_variable(user_id=user_id, name=name, field=field, session=session) - - async def list_key_names(self): - """Lists the names of the variables for the current user. - - Raises: - ValueError: If the user id is not set. - - Returns: - List[str]: The names of the variables for the current user. - """ - if hasattr(self, "_user_id") and not self.user_id: - msg = f"User id is not set for {self.__class__.__name__}" - raise ValueError(msg) - variable_service = get_variable_service() - - async with session_scope() as session: - return await variable_service.list_variables(user_id=self.user_id, session=session) - - def index(self, value: int = 0): - """Returns a function that returns the value at the given index in the iterable. - - Args: - value (int): The index value. - - Returns: - Callable: A function that returns the value at the given index. - """ - - def get_index(iterable: list[Any]): - return iterable[value] if iterable else iterable - - return get_index - - def get_function(self): - """Gets the function associated with the custom component. - - Returns: - Callable: The function associated with the custom component. - """ - return validate.create_function(self._code, self._function_entrypoint_name) - - async def load_flow(self, flow_id: str, tweaks: dict | None = None) -> Graph: - if not self.user_id: - msg = "Session is invalid" - raise ValueError(msg) - return await load_flow(user_id=str(self.user_id), flow_id=flow_id, tweaks=tweaks) - - async def run_flow( - self, - inputs: dict | list[dict] | None = None, - flow_id: str | None = None, - flow_name: str | None = None, - output_type: str | None = "chat", - tweaks: dict | None = None, - ) -> Any: - return await run_flow( - inputs=inputs, - output_type=output_type, - flow_id=flow_id, - flow_name=flow_name, - tweaks=tweaks, - user_id=str(self.user_id), - run_id=self.graph.run_id, - ) - - def list_flows(self) -> list[Data]: - """DEPRECATED - This is kept for backward compatibility. Using alist_flows instead is recommended.""" - return run_until_complete(self.alist_flows()) - - async def alist_flows(self) -> list[Data]: - if not self.user_id: - msg = "Session is invalid" - raise ValueError(msg) - try: - return await list_flows(user_id=str(self.user_id)) - except Exception as e: - msg = f"Error listing flows: {e}" - raise ValueError(msg) from e - - def build(self, *args: Any, **kwargs: Any) -> Any: - """Builds the custom component. - - Args: - *args: The positional arguments. - **kwargs: The keyword arguments. - - Returns: - Any: The result of the build process. - """ - raise NotImplementedError - - def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict): - """DEPRECATED - Kept for backward compatibility. Use update_frontend_node instead.""" - run_until_complete(self.update_frontend_node(new_frontend_node, current_frontend_node)) - - async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict): - """Updates the given new frontend node with values from the current frontend node. - - This function is called after the code validation is done. - """ - return update_frontend_node_with_template_values( - frontend_node=new_frontend_node, raw_frontend_node=current_frontend_node - ) - - def get_langchain_callbacks(self) -> list[BaseCallbackHandler]: - if self._tracing_service: - return self._tracing_service.get_langchain_callbacks() - return [] +from lfx.custom.custom_component.custom_component import * # noqa: F403 diff --git a/src/backend/base/langflow/custom/utils.py b/src/backend/base/langflow/custom/utils.py index 5d7fcdc852f0..d58d1c6550e1 100644 --- a/src/backend/base/langflow/custom/utils.py +++ b/src/backend/base/langflow/custom/utils.py @@ -1,872 +1 @@ -# mypy: ignore-errors -from __future__ import annotations - -import ast -import asyncio -import contextlib -import hashlib -import inspect -import re -import traceback -from pathlib import Path -from typing import TYPE_CHECKING, Any - -from fastapi import HTTPException -from pydantic import BaseModel - -from langflow.custom.custom_component.component import Component -from langflow.custom.dependency_analyzer import analyze_component_dependencies -from langflow.custom.directory_reader.utils import ( - abuild_custom_component_list_from_path, - build_custom_component_list_from_path, - merge_nested_dicts_with_renaming, -) -from langflow.custom.eval import eval_custom_component_code -from langflow.custom.schema import MissingDefault -from langflow.field_typing.range_spec import RangeSpec -from langflow.helpers.custom import format_type -from langflow.logging.logger import logger -from langflow.schema.dotdict import dotdict -from langflow.template.field.base import Input -from langflow.template.frontend_node.custom_components import ComponentFrontendNode, CustomComponentFrontendNode -from langflow.type_extraction.type_extraction import extract_inner_type -from langflow.utils import validate -from langflow.utils.util import get_base_classes - -if TYPE_CHECKING: - from uuid import UUID - - from langflow.custom.custom_component.custom_component import CustomComponent - - -def _generate_code_hash(source_code: str, modname: str) -> str: - """Generate a hash of the component source code. - - Args: - source_code: The source code string - modname: The module name for context - - Returns: - SHA256 hash of the source code - - Raises: - ValueError: If source_code is empty or None - UnicodeEncodeError: If source_code cannot be encoded - TypeError: If source_code is not a string - """ - if not source_code: - msg = f"Empty source code for {modname}" - raise ValueError(msg) - - # Generate SHA256 hash of the source code - return hashlib.sha256(source_code.encode("utf-8")).hexdigest()[:12] # First 12 chars for brevity - - -class UpdateBuildConfigError(Exception): - pass - - -def add_output_types(frontend_node: CustomComponentFrontendNode, return_types: list[str]) -> None: - """Add output types to the frontend node.""" - for return_type in return_types: - if return_type is None: - raise HTTPException( - status_code=400, - detail={ - "error": ("Invalid return type. Please check your code and try again."), - "traceback": traceback.format_exc(), - }, - ) - if return_type is str: - return_type_ = "Text" - elif hasattr(return_type, "__name__"): - return_type_ = return_type.__name__ - elif hasattr(return_type, "__class__"): - return_type_ = return_type.__class__.__name__ - else: - return_type_ = str(return_type) - - frontend_node.add_output_type(return_type_) - - -def reorder_fields(frontend_node: CustomComponentFrontendNode, field_order: list[str]) -> None: - """Reorder fields in the frontend node based on the specified field_order.""" - if not field_order: - return - - # Create a dictionary for O(1) lookup time. - field_dict = {field.name: field for field in frontend_node.template.fields} - reordered_fields = [field_dict[name] for name in field_order if name in field_dict] - # Add any fields that are not in the field_order list - reordered_fields.extend(field for field in frontend_node.template.fields if field.name not in field_order) - frontend_node.template.fields = reordered_fields - frontend_node.field_order = field_order - - -def add_base_classes(frontend_node: CustomComponentFrontendNode, return_types: list[str]) -> None: - """Add base classes to the frontend node.""" - for return_type_instance in return_types: - if return_type_instance is None: - raise HTTPException( - status_code=400, - detail={ - "error": ("Invalid return type. Please check your code and try again."), - "traceback": traceback.format_exc(), - }, - ) - - base_classes = get_base_classes(return_type_instance) - if return_type_instance is str: - base_classes.append("Text") - - for base_class in base_classes: - frontend_node.add_base_class(base_class) - - -def extract_type_from_optional(field_type): - """Extract the type from a string formatted as "Optional[]". - - Parameters: - field_type (str): The string from which to extract the type. - - Returns: - str: The extracted type, or an empty string if no type was found. - """ - if "optional" not in field_type.lower(): - return field_type - match = re.search(r"\[(.*?)\]$", field_type) - return match[1] if match else field_type - - -def get_field_properties(extra_field): - """Get the properties of an extra field.""" - field_name = extra_field["name"] - field_type = extra_field.get("type", "str") - field_value = extra_field.get("default", "") - # a required field is a field that does not contain - # optional in field_type - # and a field that does not have a default value - field_required = "optional" not in field_type.lower() and isinstance(field_value, MissingDefault) - field_value = field_value if not isinstance(field_value, MissingDefault) else None - - if not field_required: - field_type = extract_type_from_optional(field_type) - if field_value is not None: - with contextlib.suppress(Exception): - field_value = ast.literal_eval(field_value) - return field_name, field_type, field_value, field_required - - -def process_type(field_type: str): - if field_type.startswith(("list", "List")): - return extract_inner_type(field_type) - - # field_type is a string can be Prompt or Code too - # so we just need to lower if it is the case - lowercase_type = field_type.lower() - if lowercase_type in {"prompt", "code"}: - return lowercase_type - return field_type - - -def add_new_custom_field( - *, - frontend_node: CustomComponentFrontendNode, - field_name: str, - field_type: str, - field_value: Any, - field_required: bool, - field_config: dict, -): - # Check field_config if any of the keys are in it - # if it is, update the value - display_name = field_config.pop("display_name", None) - if not field_type: - if "type" in field_config and field_config["type"] is not None: - field_type = field_config.pop("type") - elif "field_type" in field_config and field_config["field_type"] is not None: - field_type = field_config.pop("field_type") - field_contains_list = "list" in field_type.lower() - field_type = process_type(field_type) - field_value = field_config.pop("value", field_value) - field_advanced = field_config.pop("advanced", False) - - if field_type == "Dict": - field_type = "dict" - - if field_type == "bool" and field_value is None: - field_value = False - - if field_type == "SecretStr": - field_config["password"] = True - field_config["load_from_db"] = True - field_config["input_types"] = ["Text"] - - # If options is a list, then it's a dropdown or multiselect - # If options is None, then it's a list of strings - is_list = isinstance(field_config.get("options"), list) - field_config["is_list"] = is_list or field_config.get("list", False) or field_contains_list - - if "name" in field_config: - logger.warning("The 'name' key in field_config is used to build the object and can't be changed.") - required = field_config.pop("required", field_required) - placeholder = field_config.pop("placeholder", "") - - new_field = Input( - name=field_name, - field_type=field_type, - value=field_value, - show=True, - required=required, - advanced=field_advanced, - placeholder=placeholder, - display_name=display_name, - **sanitize_field_config(field_config), - ) - frontend_node.template.upsert_field(field_name, new_field) - if isinstance(frontend_node.custom_fields, dict): - frontend_node.custom_fields[field_name] = None - - return frontend_node - - -def add_extra_fields(frontend_node, field_config, function_args) -> None: - """Add extra fields to the frontend node.""" - if not function_args: - return - field_config_ = field_config.copy() - function_args_names = [arg["name"] for arg in function_args] - # If kwargs is in the function_args and not all field_config keys are in function_args - # then we need to add the extra fields - - for extra_field in function_args: - if "name" not in extra_field or extra_field["name"] in { - "self", - "kwargs", - "args", - }: - continue - - field_name, field_type, field_value, field_required = get_field_properties(extra_field) - config = field_config_.pop(field_name, {}) - frontend_node = add_new_custom_field( - frontend_node=frontend_node, - field_name=field_name, - field_type=field_type, - field_value=field_value, - field_required=field_required, - field_config=config, - ) - if "kwargs" in function_args_names and not all(key in function_args_names for key in field_config): - for field_name, config in field_config_.items(): - if "name" not in config or field_name == "code": - continue - config_ = config.model_dump() if isinstance(config, BaseModel) else config - field_name_, field_type, field_value, field_required = get_field_properties(extra_field=config_) - frontend_node = add_new_custom_field( - frontend_node=frontend_node, - field_name=field_name_, - field_type=field_type, - field_value=field_value, - field_required=field_required, - field_config=config_, - ) - - -def get_field_dict(field: Input | dict): - """Get the field dictionary from a Input or a dict.""" - if isinstance(field, Input): - return dotdict(field.model_dump(by_alias=True, exclude_none=True)) - return field - - -def run_build_inputs( - custom_component: Component, -): - """Run the build inputs of a custom component.""" - try: - return custom_component.build_inputs() - # add_extra_fields(frontend_node, field_config, field_config.values()) - except Exception as exc: - logger.exception("Error running build inputs") - raise HTTPException(status_code=500, detail=str(exc)) from exc - - -def get_component_instance(custom_component: CustomComponent | Component, user_id: str | UUID | None = None): - """Returns an instance of a custom component, evaluating its code if necessary. - - If the input is already an instance of `Component` or `CustomComponent`, it is returned directly. - Otherwise, the function evaluates the component's code to create and return an instance. Raises an - HTTP 400 error if the code is missing, invalid, or instantiation fails. - """ - # Fast path: avoid repeated str comparisons - - code = custom_component._code - if not isinstance(code, str): - # Only two failure cases: None, or other non-str - error = "Code is None" if code is None else "Invalid code type" - msg = f"Invalid type conversion: {error}. Please check your code and try again." - logger.error(msg) - raise HTTPException(status_code=400, detail={"error": msg}) - - # Only now, try to process expensive exception/log traceback only *if needed* - try: - custom_class = eval_custom_component_code(code) - except Exception as exc: - # Only generate traceback if an error occurs (save time on success) - tb = traceback.format_exc() - logger.error("Error while evaluating custom component code\n%s", tb) - raise HTTPException( - status_code=400, - detail={ - "error": "Invalid type conversion. Please check your code and try again.", - "traceback": tb, - }, - ) from exc - - try: - return custom_class(_user_id=user_id, _code=code) - except Exception as exc: - tb = traceback.format_exc() - logger.error("Error while instantiating custom component\n%s", tb) - # Only log inner traceback if present in 'detail' - detail_tb = getattr(exc, "detail", {}).get("traceback", None) - if detail_tb is not None: - logger.error(detail_tb) - raise - - -def is_a_preimported_component(custom_component: CustomComponent): - """Check if the component is a preimported component.""" - klass = type(custom_component) - # This avoids double type lookups, and may speed up the common-case short-circuit - return issubclass(klass, Component) and klass is not Component - - -def run_build_config( - custom_component: CustomComponent, - user_id: str | UUID | None = None, -) -> tuple[dict, CustomComponent]: - """Builds the field configuration dictionary for a custom component. - - If the input is an instance of a subclass of Component (excluding Component itself), returns its - build configuration and the instance. Otherwise, evaluates the component's code to create an instance, - calls its build_config method, and processes any RangeSpec objects in the configuration. Raises an - HTTP 400 error if the code is missing or invalid, or if instantiation or configuration building fails. - - Returns: - A tuple containing the field configuration dictionary and the component instance. - """ - # Check if the instance's class is a subclass of Component (but not Component itself) - # If we have a Component that is a subclass of Component, that means - # we have imported it - # If not, it means the component was loaded through LANGFLOW_COMPONENTS_PATH - # and loaded from a file - if is_a_preimported_component(custom_component): - return custom_component.build_config(), custom_component - - if custom_component._code is None: - error = "Code is None" - elif not isinstance(custom_component._code, str): - error = "Invalid code type" - else: - try: - custom_class = eval_custom_component_code(custom_component._code) - except Exception as exc: - logger.exception("Error while evaluating custom component code") - raise HTTPException( - status_code=400, - detail={ - "error": ("Invalid type conversion. Please check your code and try again."), - "traceback": traceback.format_exc(), - }, - ) from exc - - try: - custom_instance = custom_class(_user_id=user_id) - build_config: dict = custom_instance.build_config() - - for field_name, field in build_config.copy().items(): - # Allow user to build Input as well - # as a dict with the same keys as Input - field_dict = get_field_dict(field) - # Let's check if "rangeSpec" is a RangeSpec object - if "rangeSpec" in field_dict and isinstance(field_dict["rangeSpec"], RangeSpec): - field_dict["rangeSpec"] = field_dict["rangeSpec"].model_dump() - build_config[field_name] = field_dict - - except Exception as exc: - logger.exception("Error while building field config") - if hasattr(exc, "detail") and "traceback" in exc.detail: - logger.error(exc.detail["traceback"]) - raise - return build_config, custom_instance - - msg = f"Invalid type conversion: {error}. Please check your code and try again." - logger.error(msg) - raise HTTPException( - status_code=400, - detail={"error": msg}, - ) - - -def add_code_field(frontend_node: CustomComponentFrontendNode, raw_code): - code_field = Input( - dynamic=True, - required=True, - placeholder="", - multiline=True, - value=raw_code, - password=False, - name="code", - advanced=True, - field_type="code", - is_list=False, - ) - frontend_node.template.add_field(code_field) - - return frontend_node - - -def add_code_field_to_build_config(build_config: dict, raw_code: str): - build_config["code"] = Input( - dynamic=True, - required=True, - placeholder="", - multiline=True, - value=raw_code, - password=False, - name="code", - advanced=True, - field_type="code", - is_list=False, - ).model_dump() - return build_config - - -def get_module_name_from_display_name(display_name: str): - """Get the module name from the display name.""" - # Convert display name to snake_case for Python module name - # e.g., "Custom Component" -> "custom_component" - # Remove extra spaces and convert to lowercase - cleaned_name = re.sub(r"\s+", " ", display_name.strip()) - # Replace spaces with underscores and convert to lowercase - module_name = cleaned_name.replace(" ", "_").lower() - # Remove any non-alphanumeric characters except underscores - return re.sub(r"[^a-z0-9_]", "", module_name) - - -def build_custom_component_template_from_inputs( - custom_component: Component | CustomComponent, user_id: str | UUID | None = None, module_name: str | None = None -): - # The List of Inputs fills the role of the build_config and the entrypoint_args - """Builds a frontend node template from a custom component using its input-based configuration. - - This function generates a frontend node template by extracting input fields from the component, - adding the code field, determining output types from method return types, validating the component, - setting base classes, and reordering fields. Returns the frontend node as a dictionary along with - the component instance. - - Returns: - A tuple containing the frontend node dictionary and the component instance. - """ - ctype_name = custom_component.__class__.__name__ - if ctype_name in _COMPONENT_TYPE_NAMES: - cc_instance = get_component_instance(custom_component, user_id=user_id) - - field_config = cc_instance.get_template_config(cc_instance) - frontend_node = ComponentFrontendNode.from_inputs(**field_config) - - else: - frontend_node = ComponentFrontendNode.from_inputs(**custom_component.template_config) - cc_instance = custom_component - frontend_node = add_code_field(frontend_node, custom_component._code) - # But we now need to calculate the return_type of the methods in the outputs - for output in frontend_node.outputs: - if output.types: - continue - return_types = cc_instance.get_method_return_type(output.method) - return_types = [format_type(return_type) for return_type in return_types] - output.add_types(return_types) - - # Validate that there is not name overlap between inputs and outputs - frontend_node.validate_component() - # ! This should be removed when we have a better way to handle this - frontend_node.set_base_classes_from_outputs() - reorder_fields(frontend_node, cc_instance._get_field_order()) - frontend_node = build_component_metadata(frontend_node, cc_instance, module_name, ctype_name) - - return frontend_node.to_dict(keep_name=False), cc_instance - - -def build_component_metadata( - frontend_node: CustomComponentFrontendNode, custom_component: CustomComponent, module_name: str, ctype_name: str -): - """Build the metadata for a custom component.""" - if module_name: - frontend_node.metadata["module"] = module_name - else: - module_name = get_module_name_from_display_name(frontend_node.display_name) - frontend_node.metadata["module"] = f"custom_components.{module_name}" - - # Generate code hash for cache invalidation and debugging - try: - code_hash = _generate_code_hash(custom_component._code, module_name) - if code_hash: - frontend_node.metadata["code_hash"] = code_hash - except Exception as exc: # noqa: BLE001 - logger.debug(f"Error generating code hash for {custom_component.__class__.__name__}", exc_info=exc) - - # Analyze component dependencies - try: - dependency_info = analyze_component_dependencies(custom_component._code) - frontend_node.metadata["dependencies"] = dependency_info - except (SyntaxError, TypeError, ValueError, ImportError) as exc: - logger.warning(f"Failed to analyze dependencies for component {ctype_name}: {exc}") - # Set minimal dependency info on failure - frontend_node.metadata["dependencies"] = { - "total_dependencies": 0, - "dependencies": [], - } - - return frontend_node - - -def build_custom_component_template( - custom_component: CustomComponent, - user_id: str | UUID | None = None, - module_name: str | None = None, -) -> tuple[dict[str, Any], CustomComponent | Component]: - """Builds a frontend node template and instance for a custom component. - - If the component uses input-based configuration, delegates to the appropriate builder. Otherwise, - constructs a frontend node from the component's template configuration, adds extra fields, code, - base classes, and output types, reorders fields, and returns the resulting template dictionary - along with the component instance. - - Raises: - HTTPException: If the component is missing required attributes or if any error occurs during - template construction. - """ - try: - has_template_config = hasattr(custom_component, "template_config") - except Exception as exc: - raise HTTPException( - status_code=400, - detail={ - "error": (f"Error building Component: {exc}"), - "traceback": traceback.format_exc(), - }, - ) from exc - if not has_template_config: - raise HTTPException( - status_code=400, - detail={ - "error": ("Error building Component. Please check if you are importing Component correctly."), - }, - ) - try: - if "inputs" in custom_component.template_config: - return build_custom_component_template_from_inputs( - custom_component, user_id=user_id, module_name=module_name - ) - frontend_node = CustomComponentFrontendNode(**custom_component.template_config) - - field_config, custom_instance = run_build_config( - custom_component, - user_id=user_id, - ) - - entrypoint_args = custom_component.get_function_entrypoint_args - - add_extra_fields(frontend_node, field_config, entrypoint_args) - - frontend_node = add_code_field(frontend_node, custom_component._code) - - add_base_classes(frontend_node, custom_component._get_function_entrypoint_return_type) - add_output_types(frontend_node, custom_component._get_function_entrypoint_return_type) - - reorder_fields(frontend_node, custom_instance._get_field_order()) - - if module_name: - frontend_node = build_component_metadata( - frontend_node, custom_component, module_name, custom_component.__class__.__name__ - ) - - return frontend_node.to_dict(keep_name=False), custom_instance - except Exception as exc: - if isinstance(exc, HTTPException): - raise - raise HTTPException( - status_code=400, - detail={ - "error": (f"Error building Component: {exc}"), - "traceback": traceback.format_exc(), - }, - ) from exc - - -def create_component_template( - component: dict | None = None, - component_extractor: Component | CustomComponent | None = None, - module_name: str | None = None, -): - """Creates a component template and instance from either a component dictionary or an existing component extractor. - - If a component dictionary is provided, a new Component instance is created from its code. If a component - extractor is provided, it is used directly. The function returns the generated template and the component - instance. Output types are set on the template if missing. - """ - component_output_types = [] - if component_extractor is None and component is not None: - component_code = component["code"] - component_output_types = component["output_types"] - - component_extractor = Component(_code=component_code) - - component_template, component_instance = build_custom_component_template( - component_extractor, module_name=module_name - ) - if not component_template["output_types"] and component_output_types: - component_template["output_types"] = component_output_types - - return component_template, component_instance - - -def build_custom_components(components_paths: list[str]): - """Build custom components from the specified paths.""" - if not components_paths: - return {} - - logger.info(f"Building custom components from {components_paths}") - - custom_components_from_file: dict = {} - processed_paths = set() - for path in components_paths: - path_str = str(path) - if path_str in processed_paths: - continue - - custom_component_dict = build_custom_component_list_from_path(path_str) - if custom_component_dict: - category = next(iter(custom_component_dict)) - logger.debug(f"Loading {len(custom_component_dict[category])} component(s) from category {category}") - custom_components_from_file = merge_nested_dicts_with_renaming( - custom_components_from_file, custom_component_dict - ) - processed_paths.add(path_str) - - return custom_components_from_file - - -async def abuild_custom_components(components_paths: list[str]): - """Build custom components from the specified paths.""" - if not components_paths: - return {} - - await logger.adebug(f"Building custom components from {components_paths}") - custom_components_from_file: dict = {} - processed_paths = set() - for path in components_paths: - path_str = str(path) - if path_str in processed_paths: - continue - - custom_component_dict = await abuild_custom_component_list_from_path(path_str) - if custom_component_dict: - category = next(iter(custom_component_dict)) - await logger.adebug(f"Loading {len(custom_component_dict[category])} component(s) from category {category}") - custom_components_from_file = merge_nested_dicts_with_renaming( - custom_components_from_file, custom_component_dict - ) - processed_paths.add(path_str) - - return custom_components_from_file - - -def sanitize_field_config(field_config: dict | Input): - # If any of the already existing keys are in field_config, remove them - field_dict = field_config.to_dict() if isinstance(field_config, Input) else field_config - for key in [ - "name", - "field_type", - "value", - "required", - "placeholder", - "display_name", - "advanced", - "show", - ]: - field_dict.pop(key, None) - - # Remove field_type and type because they were extracted already - field_dict.pop("field_type", None) - field_dict.pop("type", None) - - return field_dict - - -def build_component(component): - """Build a single component.""" - component_template, component_instance = create_component_template(component) - component_name = get_instance_name(component_instance) - return component_name, component_template - - -def get_function(code): - """Get the function.""" - function_name = validate.extract_function_name(code) - - return validate.create_function(code, function_name) - - -def get_instance_name(instance): - name = instance.__class__.__name__ - if hasattr(instance, "name") and instance.name: - name = instance.name - return name - - -async def update_component_build_config( - component: CustomComponent, - build_config: dotdict, - field_value: Any, - field_name: str | None = None, -): - if inspect.iscoroutinefunction(component.update_build_config): - return await component.update_build_config(build_config, field_value, field_name) - return await asyncio.to_thread(component.update_build_config, build_config, field_value, field_name) - - -async def get_all_types_dict(components_paths: list[str]): - """Get all types dictionary with full component loading.""" - # This is the async version of the existing function - return await abuild_custom_components(components_paths=components_paths) - - -async def get_single_component_dict(component_type: str, component_name: str, components_paths: list[str]): - """Get a single component dictionary.""" - # For example, if components are loaded by importing Python modules: - for base_path in components_paths: - module_path = Path(base_path) / component_type / f"{component_name}.py" - if module_path.exists(): - # Try to import the module - module_name = f"langflow.components.{component_type}.{component_name}" - try: - # This is a simplified example - actual implementation may vary - import importlib.util - - spec = importlib.util.spec_from_file_location(module_name, module_path) - if spec and spec.loader: - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - if hasattr(module, "template"): - return module.template - except ImportError as e: - await logger.aerror(f"Import error loading component {module_path}: {e!s}") - except AttributeError as e: - await logger.aerror(f"Attribute error loading component {module_path}: {e!s}") - except ValueError as e: - await logger.aerror(f"Value error loading component {module_path}: {e!s}") - except (KeyError, IndexError) as e: - await logger.aerror(f"Data structure error loading component {module_path}: {e!s}") - except RuntimeError as e: - await logger.aerror(f"Runtime error loading component {module_path}: {e!s}") - await logger.adebug("Full traceback for runtime error", exc_info=True) - except OSError as e: - await logger.aerror(f"OS error loading component {module_path}: {e!s}") - - # If we get here, the component wasn't found or couldn't be loaded - return None - - -async def load_custom_component(component_name: str, components_paths: list[str]): - """Load a custom component by name. - - Args: - component_name: Name of the component to load - components_paths: List of paths to search for components - """ - from langflow.interface.custom_component import get_custom_component_from_name - - try: - # First try to get the component from the registered components - component_class = get_custom_component_from_name(component_name) - if component_class: - # Define the function locally if it's not imported - def get_custom_component_template(component_cls): - """Get template for a custom component class.""" - # This is a simplified implementation - adjust as needed - if hasattr(component_cls, "get_template"): - return component_cls.get_template() - if hasattr(component_cls, "template"): - return component_cls.template - return None - - return get_custom_component_template(component_class) - - # If not found in registered components, search in the provided paths - for path in components_paths: - # Try to find the component in different category directories - base_path = Path(path) - if base_path.exists() and base_path.is_dir(): - # Search for the component in all subdirectories - for category_dir in base_path.iterdir(): - if category_dir.is_dir(): - component_file = category_dir / f"{component_name}.py" - if component_file.exists(): - # Try to import the module - module_name = f"langflow.components.{category_dir.name}.{component_name}" - try: - import importlib.util - - spec = importlib.util.spec_from_file_location(module_name, component_file) - if spec and spec.loader: - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - if hasattr(module, "template"): - return module.template - if hasattr(module, "get_template"): - return module.get_template() - except ImportError as e: - await logger.aerror(f"Import error loading component {component_file}: {e!s}") - await logger.adebug("Import error traceback", exc_info=True) - except AttributeError as e: - await logger.aerror(f"Attribute error loading component {component_file}: {e!s}") - await logger.adebug("Attribute error traceback", exc_info=True) - except (ValueError, TypeError) as e: - await logger.aerror(f"Value/Type error loading component {component_file}: {e!s}") - await logger.adebug("Value/Type error traceback", exc_info=True) - except (KeyError, IndexError) as e: - await logger.aerror(f"Data structure error loading component {component_file}: {e!s}") - await logger.adebug("Data structure error traceback", exc_info=True) - except RuntimeError as e: - await logger.aerror(f"Runtime error loading component {component_file}: {e!s}") - await logger.adebug("Runtime error traceback", exc_info=True) - except OSError as e: - await logger.aerror(f"OS error loading component {component_file}: {e!s}") - await logger.adebug("OS error traceback", exc_info=True) - - except ImportError as e: - await logger.aerror(f"Import error loading custom component {component_name}: {e!s}") - return None - except AttributeError as e: - await logger.aerror(f"Attribute error loading custom component {component_name}: {e!s}") - return None - except ValueError as e: - await logger.aerror(f"Value error loading custom component {component_name}: {e!s}") - return None - except (KeyError, IndexError) as e: - await logger.aerror(f"Data structure error loading custom component {component_name}: {e!s}") - return None - except RuntimeError as e: - await logger.aerror(f"Runtime error loading custom component {component_name}: {e!s}") - logger.debug("Full traceback for runtime error", exc_info=True) - return None - - # If we get here, the component wasn't found in any of the paths - await logger.awarning(f"Component {component_name} not found in any of the provided paths") - return None - - -_COMPONENT_TYPE_NAMES = {"Component", "CustomComponent"} +from lfx.custom.utils import * # noqa: F403 diff --git a/src/backend/base/langflow/custom/validate.py b/src/backend/base/langflow/custom/validate.py new file mode 100644 index 000000000000..61768ffe9240 --- /dev/null +++ b/src/backend/base/langflow/custom/validate.py @@ -0,0 +1 @@ +from lfx.custom.validate import * # noqa: F403 diff --git a/src/backend/base/langflow/events/event_manager.py b/src/backend/base/langflow/events/event_manager.py index ab19813c6e13..989ad8d14864 100644 --- a/src/backend/base/langflow/events/event_manager.py +++ b/src/backend/base/langflow/events/event_manager.py @@ -1,108 +1,18 @@ -from __future__ import annotations - -import inspect -import json -import time -import uuid -from functools import partial -from typing import TYPE_CHECKING - -from fastapi.encoders import jsonable_encoder -from typing_extensions import Protocol - -from langflow.logging.logger import logger -from langflow.schema.playground_events import create_event_by_type - -if TYPE_CHECKING: - import asyncio - - from langflow.schema.log import LoggableType - - -class EventCallback(Protocol): - def __call__(self, *, manager: EventManager, event_type: str, data: LoggableType): ... - - -class PartialEventCallback(Protocol): - def __call__(self, *, data: LoggableType): ... - - -class EventManager: - def __init__(self, queue: asyncio.Queue): - self.queue = queue - self.events: dict[str, PartialEventCallback] = {} - - @staticmethod - def _validate_callback(callback: EventCallback) -> None: - if not callable(callback): - msg = "Callback must be callable" - raise TypeError(msg) - # Check if it has `self, event_type and data` - sig = inspect.signature(callback) - parameters = ["manager", "event_type", "data"] - if len(sig.parameters) != len(parameters): - msg = "Callback must have exactly 3 parameters" - raise ValueError(msg) - if not all(param.name in parameters for param in sig.parameters.values()): - msg = "Callback must have exactly 3 parameters: manager, event_type, and data" - raise ValueError(msg) - - def register_event( - self, - name: str, - event_type: str, - callback: EventCallback | None = None, - ) -> None: - if not name: - msg = "Event name cannot be empty" - raise ValueError(msg) - if not name.startswith("on_"): - msg = "Event name must start with 'on_'" - raise ValueError(msg) - if callback is None: - callback_ = partial(self.send_event, event_type=event_type) - else: - callback_ = partial(callback, manager=self, event_type=event_type) - self.events[name] = callback_ - - def send_event(self, *, event_type: str, data: LoggableType): - try: - if isinstance(data, dict) and event_type in {"message", "error", "warning", "info", "token"}: - data = create_event_by_type(event_type, **data) - except TypeError as e: - logger.debug(f"Error creating playground event: {e}") - except Exception: - raise - jsonable_data = jsonable_encoder(data) - json_data = {"event": event_type, "data": jsonable_data} - event_id = f"{event_type}-{uuid.uuid4()}" - str_data = json.dumps(json_data) + "\n\n" - self.queue.put_nowait((event_id, str_data.encode("utf-8"), time.time())) - - def noop(self, *, data: LoggableType) -> None: - pass - - def __getattr__(self, name: str) -> PartialEventCallback: - return self.events.get(name, self.noop) - - -def create_default_event_manager(queue): - manager = EventManager(queue) - manager.register_event("on_token", "token") - manager.register_event("on_vertices_sorted", "vertices_sorted") - manager.register_event("on_error", "error") - manager.register_event("on_end", "end") - manager.register_event("on_message", "add_message") - manager.register_event("on_remove_message", "remove_message") - manager.register_event("on_end_vertex", "end_vertex") - manager.register_event("on_build_start", "build_start") - manager.register_event("on_build_end", "build_end") - return manager - - -def create_stream_tokens_event_manager(queue): - manager = EventManager(queue) - manager.register_event("on_message", "add_message") - manager.register_event("on_token", "token") - manager.register_event("on_end", "end") - return manager +# Backwards compatibility module for langflow.events.event_manager +# This module redirects imports to the new lfx.events.event_manager module + +from lfx.events.event_manager import ( + EventCallback, + EventManager, + PartialEventCallback, + create_default_event_manager, + create_stream_tokens_event_manager, +) + +__all__ = [ + "EventCallback", + "EventManager", + "PartialEventCallback", + "create_default_event_manager", + "create_stream_tokens_event_manager", +] diff --git a/src/backend/base/langflow/field_typing/__init__.py b/src/backend/base/langflow/field_typing/__init__.py index 8eb159179626..d4e633e88885 100644 --- a/src/backend/base/langflow/field_typing/__init__.py +++ b/src/backend/base/langflow/field_typing/__init__.py @@ -1,6 +1,6 @@ from typing import Any -from .constants import ( +from lfx.field_typing.constants import ( AgentExecutor, BaseChatMemory, BaseChatModel, @@ -29,17 +29,17 @@ Tool, VectorStore, ) -from .range_spec import RangeSpec +from lfx.field_typing.range_spec import RangeSpec def _import_input_class(): - from langflow.template.field.base import Input + from lfx.template.field.base import Input return Input def _import_output_class(): - from langflow.template.field.base import Output + from lfx.template.field.base import Output return Output @@ -48,9 +48,10 @@ def __getattr__(name: str) -> Any: # This is to avoid circular imports if name == "Input": return _import_input_class() - return RangeSpec if name == "Output": return _import_output_class() + if name == "RangeSpec": + return RangeSpec # The other names should work as if they were imported from constants # Import the constants module langflow.field_typing.constants from . import constants @@ -77,7 +78,6 @@ def __getattr__(name: str) -> Any: "Data", "Document", "Embeddings", - "Input", "LanguageModel", "NestedDict", "Object", diff --git a/src/backend/base/langflow/field_typing/constants.py b/src/backend/base/langflow/field_typing/constants.py index 1ec8187ae8ce..99dae42c653b 100644 --- a/src/backend/base/langflow/field_typing/constants.py +++ b/src/backend/base/langflow/field_typing/constants.py @@ -1,135 +1,100 @@ +# Re-export everything from lfx.field_typing.constants for backward compatibility +# Import additional types from collections.abc import Callable -from typing import Text, TypeAlias, TypeVar +from typing import Text -from langchain.agents.agent import AgentExecutor -from langchain.chains.base import Chain -from langchain.memory.chat_memory import BaseChatMemory -from langchain_core.chat_history import BaseChatMessageHistory -from langchain_core.document_loaders import BaseLoader -from langchain_core.documents import Document -from langchain_core.documents.compressor import BaseDocumentCompressor -from langchain_core.embeddings import Embeddings -from langchain_core.language_models import BaseLanguageModel, BaseLLM -from langchain_core.language_models.chat_models import BaseChatModel -from langchain_core.memory import BaseMemory -from langchain_core.output_parsers import BaseLLMOutputParser, BaseOutputParser -from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate, PromptTemplate -from langchain_core.retrievers import BaseRetriever -from langchain_core.tools import BaseTool, Tool -from langchain_core.vectorstores import VectorStore, VectorStoreRetriever -from langchain_text_splitters import TextSplitter - -from langflow.schema.data import Data -from langflow.schema.dataframe import DataFrame -from langflow.schema.message import Message - -NestedDict: TypeAlias = dict[str, str | dict] -LanguageModel = TypeVar("LanguageModel", BaseLanguageModel, BaseLLM, BaseChatModel) -ToolEnabledLanguageModel = TypeVar("ToolEnabledLanguageModel", BaseLanguageModel, BaseLLM, BaseChatModel) -Memory = TypeVar("Memory", bound=BaseChatMessageHistory) - -Retriever = TypeVar( - "Retriever", +from lfx.field_typing.constants import ( + CUSTOM_COMPONENT_SUPPORTED_TYPES, + DEFAULT_IMPORT_STRING, + LANGCHAIN_BASE_TYPES, + # Import all the langchain types that may be needed + AgentExecutor, + BaseChatMemory, + BaseChatMessageHistory, + BaseChatModel, + BaseDocumentCompressor, + BaseLanguageModel, + BaseLLM, + BaseLLMOutputParser, + BaseLoader, + BaseMemory, + BaseOutputParser, + BasePromptTemplate, BaseRetriever, + BaseTool, + Chain, + ChatPromptTemplate, + Code, + Document, + Embeddings, + LanguageModel, + Memory, + NestedDict, + Object, + OutputParser, + PromptTemplate, + Retriever, + TextSplitter, + Tool, + ToolEnabledLanguageModel, + VectorStore, VectorStoreRetriever, ) -OutputParser = TypeVar( - "OutputParser", - BaseOutputParser, - BaseLLMOutputParser, -) +# Import lfx schema types +from lfx.schema.data import Data +from lfx.schema.dataframe import DataFrame -class Object: - pass - - -class Code: - pass - +# Import Message from langflow.schema for backward compatibility +from langflow.schema.message import Message -LANGCHAIN_BASE_TYPES = { - "Chain": Chain, - "AgentExecutor": AgentExecutor, - "BaseTool": BaseTool, - "Tool": Tool, - "BaseLLM": BaseLLM, - "BaseLanguageModel": BaseLanguageModel, - "PromptTemplate": PromptTemplate, - "ChatPromptTemplate": ChatPromptTemplate, - "BasePromptTemplate": BasePromptTemplate, - "BaseLoader": BaseLoader, - "Document": Document, - "TextSplitter": TextSplitter, - "VectorStore": VectorStore, - "Embeddings": Embeddings, - "BaseRetriever": BaseRetriever, - "BaseOutputParser": BaseOutputParser, - "BaseMemory": BaseMemory, - "BaseChatMemory": BaseChatMemory, - "BaseChatModel": BaseChatModel, - "Memory": Memory, - "BaseDocumentCompressor": BaseDocumentCompressor, -} -# Langchain base types plus Python base types +# Add Message and DataFrame to CUSTOM_COMPONENT_SUPPORTED_TYPES CUSTOM_COMPONENT_SUPPORTED_TYPES = { - **LANGCHAIN_BASE_TYPES, - "NestedDict": NestedDict, - "Data": Data, + **CUSTOM_COMPONENT_SUPPORTED_TYPES, "Message": Message, - "Text": Text, # noqa: UP019 - "Object": Object, - "Callable": Callable, - "LanguageModel": LanguageModel, - "Retriever": Retriever, "DataFrame": DataFrame, } -DEFAULT_IMPORT_STRING = """from langchain.agents.agent import AgentExecutor -from langchain.chains.base import Chain -from langchain.memory.chat_memory import BaseChatMemory -from langchain_core.chat_history import BaseChatMessageHistory -from langchain_core.document_loaders import BaseLoader -from langchain_core.documents import Document -from langchain_core.embeddings import Embeddings -from langchain_core.language_models import BaseLanguageModel, BaseLLM -from langchain_core.language_models.chat_models import BaseChatModel -from langchain_core.memory import BaseMemory -from langchain_core.output_parsers import BaseLLMOutputParser, BaseOutputParser -from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate, PromptTemplate -from langchain_core.retrievers import BaseRetriever -from langchain_core.documents.compressor import BaseDocumentCompressor -from langchain_core.tools import BaseTool, Tool -from langchain_core.vectorstores import VectorStore, VectorStoreRetriever -from langchain_text_splitters import TextSplitter - -from langflow.io import ( - BoolInput, - CodeInput, - DataFrameInput, - DataInput, - DefaultPromptField, - DictInput, - DropdownInput, - FileInput, - FloatInput, - HandleInput, - IntInput, - LinkInput, - MessageInput, - MessageTextInput, - MultilineInput, - MultilineSecretInput, - MultiselectInput, - NestedDictInput, - Output, - PromptInput, - SecretStrInput, - SliderInput, - StrInput, - TableInput, -) -from langflow.schema.data import Data -from langflow.schema.dataframe import DataFrame -from langflow.schema.message import Message -""" +__all__ = [ + "CUSTOM_COMPONENT_SUPPORTED_TYPES", + "DEFAULT_IMPORT_STRING", + "LANGCHAIN_BASE_TYPES", + # Langchain types + "AgentExecutor", + "BaseChatMemory", + "BaseChatMessageHistory", + "BaseChatModel", + "BaseDocumentCompressor", + "BaseLLM", + "BaseLLMOutputParser", + "BaseLanguageModel", + "BaseLoader", + "BaseMemory", + "BaseOutputParser", + "BasePromptTemplate", + "BaseRetriever", + "BaseTool", + # Additional types + "Callable", + "Chain", + "ChatPromptTemplate", + "Code", + "Data", + "DataFrame", + "Document", + "Embeddings", + "LanguageModel", + "Memory", + "Message", + "NestedDict", + "Object", + "OutputParser", + "PromptTemplate", + "Retriever", + "Text", + "TextSplitter", + "Tool", + "ToolEnabledLanguageModel", + "VectorStore", + "VectorStoreRetriever", +] diff --git a/src/backend/base/langflow/field_typing/range_spec.py b/src/backend/base/langflow/field_typing/range_spec.py index eabd1c07a163..8b733d899eb0 100644 --- a/src/backend/base/langflow/field_typing/range_spec.py +++ b/src/backend/base/langflow/field_typing/range_spec.py @@ -1,33 +1,3 @@ -from typing import Literal +from lfx.field_typing.range_spec import RangeSpec -from pydantic import BaseModel, field_validator - - -class RangeSpec(BaseModel): - step_type: Literal["int", "float"] = "float" - min: float = -1.0 - max: float = 1.0 - step: float = 0.1 - - @field_validator("max") - @classmethod - def max_must_be_greater_than_min(cls, v, values): - if "min" in values.data and v <= values.data["min"]: - msg = "Max must be greater than min" - raise ValueError(msg) - return v - - @field_validator("step") - @classmethod - def step_must_be_positive(cls, v, values): - if v <= 0: - msg = "Step must be positive" - raise ValueError(msg) - if values.data["step_type"] == "int" and isinstance(v, float) and not v.is_integer(): - msg = "When step_type is int, step must be an integer" - raise ValueError(msg) - return v - - @classmethod - def set_step_type(cls, step_type: Literal["int", "float"], range_spec: "RangeSpec") -> "RangeSpec": - return cls(min=range_spec.min, max=range_spec.max, step=range_spec.step, step_type=step_type) +__all__ = ["RangeSpec"] diff --git a/src/backend/base/langflow/graph/__init__.py b/src/backend/base/langflow/graph/__init__.py index d68fd432b323..925d4636868d 100644 --- a/src/backend/base/langflow/graph/__init__.py +++ b/src/backend/base/langflow/graph/__init__.py @@ -1,6 +1,6 @@ -from langflow.graph.edge.base import Edge -from langflow.graph.graph.base import Graph -from langflow.graph.vertex.base import Vertex -from langflow.graph.vertex.vertex_types import CustomComponentVertex, InterfaceVertex, StateVertex +from lfx.graph.edge.base import Edge +from lfx.graph.graph.base import Graph +from lfx.graph.vertex.base import Vertex +from lfx.graph.vertex.vertex_types import CustomComponentVertex, InterfaceVertex, StateVertex __all__ = ["CustomComponentVertex", "Edge", "Graph", "InterfaceVertex", "StateVertex", "Vertex"] diff --git a/src/backend/base/langflow/graph/utils.py b/src/backend/base/langflow/graph/utils.py deleted file mode 100644 index db3bfc91998f..000000000000 --- a/src/backend/base/langflow/graph/utils.py +++ /dev/null @@ -1,229 +0,0 @@ -from __future__ import annotations - -from collections.abc import Generator -from enum import Enum -from typing import TYPE_CHECKING, Any -from uuid import UUID - -import pandas as pd - -from langflow.interface.utils import extract_input_variables_from_prompt -from langflow.logging.logger import logger -from langflow.schema.data import Data -from langflow.schema.message import Message -from langflow.serialization.serialization import get_max_items_length, get_max_text_length, serialize -from langflow.services.database.models.transactions.crud import log_transaction as crud_log_transaction -from langflow.services.database.models.transactions.model import TransactionBase -from langflow.services.database.models.vertex_builds.crud import log_vertex_build as crud_log_vertex_build -from langflow.services.database.models.vertex_builds.model import VertexBuildBase -from langflow.services.database.utils import session_getter -from langflow.services.deps import get_db_service, get_settings_service - -if TYPE_CHECKING: - from langflow.api.v1.schemas import ResultDataResponse - from langflow.graph.vertex.base import Vertex - - -class UnbuiltObject: - pass - - -class UnbuiltResult: - pass - - -class ArtifactType(str, Enum): - TEXT = "text" - RECORD = "record" - OBJECT = "object" - ARRAY = "array" - STREAM = "stream" - UNKNOWN = "unknown" - MESSAGE = "message" - - -def validate_prompt(prompt: str): - """Validate prompt.""" - if extract_input_variables_from_prompt(prompt): - return prompt - - return fix_prompt(prompt) - - -def fix_prompt(prompt: str): - """Fix prompt.""" - return prompt + " {input}" - - -def flatten_list(list_of_lists: list[list | Any]) -> list: - """Flatten list of lists.""" - new_list = [] - for item in list_of_lists: - if isinstance(item, list): - new_list.extend(item) - else: - new_list.append(item) - return new_list - - -def get_artifact_type(value, build_result) -> str: - result = ArtifactType.UNKNOWN - match value: - case Data(): - result = ArtifactType.RECORD - - case str(): - result = ArtifactType.TEXT - - case dict(): - result = ArtifactType.OBJECT - - case list(): - result = ArtifactType.ARRAY - - case Message(): - result = ArtifactType.MESSAGE - - if result == ArtifactType.UNKNOWN and ( - isinstance(build_result, Generator) or (isinstance(value, Message) and isinstance(value.text, Generator)) - ): - result = ArtifactType.STREAM - - return result.value - - -def post_process_raw(raw, artifact_type: str): - if artifact_type == ArtifactType.STREAM.value: - raw = "" - - return raw - - -def _vertex_to_primitive_dict(target: Vertex) -> dict: - """Cleans the parameters of the target vertex.""" - # Removes all keys that the values aren't python types like str, int, bool, etc. - params = { - key: value for key, value in target.params.items() if isinstance(value, str | int | bool | float | list | dict) - } - # if it is a list we need to check if the contents are python types - for key, value in params.items(): - if isinstance(value, list): - params[key] = [item for item in value if isinstance(item, str | int | bool | float | list | dict)] - return params - - -async def log_transaction( - flow_id: str | UUID, source: Vertex, status, target: Vertex | None = None, error=None -) -> None: - """Asynchronously logs a transaction record for a vertex in a flow if transaction storage is enabled. - - Serializes the source vertex's primitive parameters and result, handling pandas DataFrames as needed, - and records transaction details including inputs, outputs, status, error, and flow ID in the database. - If the flow ID is not provided, attempts to retrieve it from the source vertex's graph. - Logs warnings and errors on serialization or database failures. - """ - try: - if not get_settings_service().settings.transactions_storage_enabled: - return - if not flow_id: - if source.graph.flow_id: - flow_id = source.graph.flow_id - else: - return - inputs = _vertex_to_primitive_dict(source) - - # Convert the result to a serializable format - if source.result: - try: - result_dict = source.result.model_dump() - for key, value in result_dict.items(): - if isinstance(value, pd.DataFrame): - result_dict[key] = value.to_dict() - outputs = result_dict - except Exception as e: # noqa: BLE001 - await logger.awarning(f"Error serializing result: {e!s}") - outputs = None - else: - outputs = None - - transaction = TransactionBase( - vertex_id=source.id, - target_id=target.id if target else None, - inputs=serialize(inputs, max_length=get_max_text_length(), max_items=get_max_items_length()), - outputs=serialize(outputs, max_length=get_max_text_length(), max_items=get_max_items_length()), - status=status, - error=error, - flow_id=flow_id if isinstance(flow_id, UUID) else UUID(flow_id), - ) - async with session_getter(get_db_service()) as session: - with session.no_autoflush: - inserted = await crud_log_transaction(session, transaction) - if inserted: - await logger.adebug(f"Logged transaction: {inserted.id}") - except Exception as exc: # noqa: BLE001 - await logger.aerror(f"Error logging transaction: {exc!s}") - - -async def log_vertex_build( - *, - flow_id: str | UUID, - vertex_id: str, - valid: bool, - params: Any, - data: ResultDataResponse | dict, - artifacts: dict | None = None, -) -> None: - """Asynchronously logs a vertex build record to the database if vertex build storage is enabled. - - Serializes the provided data and artifacts with configurable length and item limits before storing. - Converts parameters to string if present. Handles exceptions by logging errors. - """ - try: - if not get_settings_service().settings.vertex_builds_storage_enabled: - return - try: - if isinstance(flow_id, str): - flow_id = UUID(flow_id) - except ValueError: - msg = f"Invalid flow_id passed to log_vertex_build: {flow_id!r}(type: {type(flow_id)})" - raise ValueError(msg) from None - - vertex_build = VertexBuildBase( - flow_id=flow_id, - id=vertex_id, - valid=valid, - params=str(params) if params else None, - data=serialize(data, max_length=get_max_text_length(), max_items=get_max_items_length()), - artifacts=serialize(artifacts, max_length=get_max_text_length(), max_items=get_max_items_length()), - ) - async with session_getter(get_db_service()) as session: - inserted = await crud_log_vertex_build(session, vertex_build) - await logger.adebug(f"Logged vertex build: {inserted.build_id}") - except Exception: # noqa: BLE001 - await logger.aexception("Error logging vertex build") - - -def rewrite_file_path(file_path: str): - file_path = file_path.replace("\\", "/") - - if ":" in file_path: - file_path = file_path.split(":", 1)[-1] - - file_path_split = [part for part in file_path.split("/") if part] - - if len(file_path_split) > 1: - consistent_file_path = f"{file_path_split[-2]}/{file_path_split[-1]}" - else: - consistent_file_path = "/".join(file_path_split) - - return [consistent_file_path] - - -def has_output_vertex(vertices: dict[Vertex, int]): - return any(vertex.is_output for vertex in vertices) - - -def has_chat_output(vertices: dict[Vertex, int]): - from langflow.graph.schema import InterfaceComponentTypes - - return any(InterfaceComponentTypes.ChatOutput in vertex.id for vertex in vertices) diff --git a/src/backend/base/langflow/helpers/data.py b/src/backend/base/langflow/helpers/data.py index e6217addf165..10c77a404f0f 100644 --- a/src/backend/base/langflow/helpers/data.py +++ b/src/backend/base/langflow/helpers/data.py @@ -5,9 +5,9 @@ import orjson from fastapi.encoders import jsonable_encoder from langchain_core.documents import Document +from lfx.schema.data import Data +from lfx.schema.dataframe import DataFrame -from langflow.schema.data import Data -from langflow.schema.dataframe import DataFrame from langflow.schema.message import Message diff --git a/src/backend/base/langflow/helpers/flow.py b/src/backend/base/langflow/helpers/flow.py index c7a31c0521d9..46f4b3810f33 100644 --- a/src/backend/base/langflow/helpers/flow.py +++ b/src/backend/base/langflow/helpers/flow.py @@ -4,10 +4,10 @@ from uuid import UUID from fastapi import HTTPException +from lfx.log.logger import logger from pydantic.v1 import BaseModel, Field, create_model from sqlmodel import select -from langflow.logging.logger import logger from langflow.schema.schema import INPUT_FIELD_NAME from langflow.services.database.models.flow.model import Flow, FlowRead from langflow.services.deps import get_settings_service, session_scope @@ -15,9 +15,10 @@ if TYPE_CHECKING: from collections.abc import Awaitable, Callable - from langflow.graph.graph.base import Graph - from langflow.graph.schema import RunOutputs - from langflow.graph.vertex.base import Vertex + from lfx.graph.graph.base import Graph + from lfx.graph.schema import RunOutputs + from lfx.graph.vertex.base import Vertex + from langflow.schema.data import Data INPUT_TYPE_MAP = { @@ -46,7 +47,8 @@ async def list_flows(*, user_id: str | None = None) -> list[Data]: async def load_flow( user_id: str, flow_id: str | None = None, flow_name: str | None = None, tweaks: dict | None = None ) -> Graph: - from langflow.graph.graph.base import Graph + from lfx.graph.graph.base import Graph + from langflow.processing.process import process_tweaks if not flow_id and not flow_name: @@ -180,7 +182,7 @@ async def flow_function({func_args}): tweaks = {{ {arg_mappings} }} from langflow.helpers.flow import run_flow from langchain_core.tools import ToolException - from langflow.base.flow_processing.utils import build_data_from_result_data, format_flow_output_data + from lfx.base.flow_processing.utils import build_data_from_result_data, format_flow_output_data try: run_outputs = await run_flow( tweaks={{key: {{'input_value': value}} for key, value in tweaks.items()}}, @@ -318,7 +320,7 @@ async def generate_unique_flow_name(flow_name, user_id, session): def json_schema_from_flow(flow: Flow) -> dict: """Generate JSON schema from flow input nodes.""" - from langflow.graph.graph.base import Graph + from lfx.graph.graph.base import Graph # Get the flow's data which contains the nodes and their configurations flow_data = flow.data or {} diff --git a/src/backend/base/langflow/helpers/user.py b/src/backend/base/langflow/helpers/user.py index df0e3a1c6116..268b0dac08b4 100644 --- a/src/backend/base/langflow/helpers/user.py +++ b/src/backend/base/langflow/helpers/user.py @@ -5,11 +5,11 @@ from langflow.services.database.models.flow.model import Flow from langflow.services.database.models.user.model import User, UserRead -from langflow.services.deps import get_db_service +from langflow.services.deps import session_scope async def get_user_by_flow_id_or_endpoint_name(flow_id_or_name: str) -> UserRead | None: - async with get_db_service().with_session() as session: + async with session_scope() as session: try: flow_id = UUID(flow_id_or_name) flow = await session.get(Flow, flow_id) diff --git a/src/backend/base/langflow/initial_setup/setup.py b/src/backend/base/langflow/initial_setup/setup.py index 493cd721d14f..02ff93e8a2dd 100644 --- a/src/backend/base/langflow/initial_setup/setup.py +++ b/src/backend/base/langflow/initial_setup/setup.py @@ -19,28 +19,28 @@ import sqlalchemy as sa from aiofile import async_open from emoji import demojize, purely_emoji -from sqlalchemy.exc import NoResultFound -from sqlalchemy.orm import selectinload -from sqlmodel import col, select -from sqlmodel.ext.asyncio.session import AsyncSession - -from langflow.base.constants import ( +from lfx.base.constants import ( FIELD_FORMAT_ATTRIBUTES, NODE_FORMAT_ATTRIBUTES, ORJSON_OPTIONS, SKIPPED_COMPONENTS, SKIPPED_FIELD_ATTRIBUTES, ) +from lfx.log.logger import logger +from lfx.template.field.prompt import DEFAULT_PROMPT_INTUT_TYPES +from lfx.utils.util import escape_json_dump +from sqlalchemy.exc import NoResultFound +from sqlalchemy.orm import selectinload +from sqlmodel import col, select +from sqlmodel.ext.asyncio.session import AsyncSession + from langflow.initial_setup.constants import STARTER_FOLDER_DESCRIPTION, STARTER_FOLDER_NAME -from langflow.logging.logger import logger from langflow.services.auth.utils import create_super_user from langflow.services.database.models.flow.model import Flow, FlowCreate from langflow.services.database.models.folder.constants import DEFAULT_FOLDER_NAME from langflow.services.database.models.folder.model import Folder, FolderCreate, FolderRead from langflow.services.database.models.user.crud import get_user_by_username from langflow.services.deps import get_settings_service, get_storage_service, get_variable_service, session_scope -from langflow.template.field.prompt import DEFAULT_PROMPT_INTUT_TYPES -from langflow.utils.util import escape_json_dump # In the folder ./starter_projects we have a few JSON files that represent # starter projects. We want to load these into the database so that users diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json index bf8b1f250559..6c65ddfd4fd8 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json @@ -362,17 +362,17 @@ "legacy": false, "lf_version": "1.5.0", "metadata": { - "code_hash": "192913db3453", + "code_hash": "715a37648834", "dependencies": { "dependencies": [ { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 1 }, - "module": "langflow.components.input_output.chat.ChatInput" + "module": "lfx.components.input_output.chat.ChatInput" }, "output_types": [], "outputs": [ @@ -452,7 +452,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" + "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" }, "files": { "_input_type": "FileInput", @@ -672,7 +672,7 @@ "legacy": false, "lf_version": "1.5.0", "metadata": { - "code_hash": "6f74e04e39d5", + "code_hash": "9619107fecd1", "dependencies": { "dependencies": [ { @@ -684,13 +684,13 @@ "version": "0.116.1" }, { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 3 }, - "module": "langflow.components.input_output.chat_output.ChatOutput" + "module": "lfx.components.input_output.chat_output.ChatOutput" }, "output_types": [], "outputs": [ @@ -790,7 +790,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "data_template": { "_input_type": "MessageTextInput", @@ -1233,7 +1233,7 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Runs a language model given a specified provider. ", + "description": "Runs a language model given a specified provider.", "display_name": "Language Model", "documentation": "", "edited": false, @@ -1251,12 +1251,35 @@ "legacy": false, "lf_version": "1.5.0", "metadata": { + "code_hash": "bb5f8714781b", + "dependencies": { + "dependencies": [ + { + "name": "langchain_anthropic", + "version": "0.3.14" + }, + { + "name": "langchain_google_genai", + "version": "2.0.6" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 4 + }, "keywords": [ "model", "llm", "language model", "large language model" - ] + ], + "module": "lfx.components.models.language_model.LanguageModelComponent" }, "minimized": false, "output_types": [], @@ -1266,11 +1289,8 @@ "cache": true, "display_name": "Model Response", "group_outputs": false, - "hidden": null, "method": "text_response", "name": "text_output", - "options": null, - "required_inputs": null, "selected": "Message", "tool_mode": true, "types": [ @@ -1283,11 +1303,8 @@ "cache": true, "display_name": "Language Model", "group_outputs": false, - "hidden": null, "method": "build_model", "name": "model_output", - "options": null, - "required_inputs": null, "selected": "LanguageModel", "tool_mode": true, "types": [ @@ -1334,7 +1351,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom langflow.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom langflow.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom langflow.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" + "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom lfx.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom lfx.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" }, "input_value": { "_input_type": "MessageInput", @@ -1374,11 +1391,20 @@ "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "gpt-4-turbo", "gpt-4-turbo-preview", "gpt-4", - "gpt-3.5-turbo" + "gpt-3.5-turbo", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-chat-latest", + "o1", + "o3-mini", + "o3", + "o3-pro", + "o4-mini", + "o4-mini-high" ], "options_metadata": [], "placeholder": "", @@ -1529,7 +1555,7 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Runs a language model given a specified provider. ", + "description": "Runs a language model given a specified provider.", "display_name": "Language Model", "documentation": "", "edited": false, @@ -1547,12 +1573,35 @@ "legacy": false, "lf_version": "1.5.0", "metadata": { + "code_hash": "bb5f8714781b", + "dependencies": { + "dependencies": [ + { + "name": "langchain_anthropic", + "version": "0.3.14" + }, + { + "name": "langchain_google_genai", + "version": "2.0.6" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 4 + }, "keywords": [ "model", "llm", "language model", "large language model" - ] + ], + "module": "lfx.components.models.language_model.LanguageModelComponent" }, "minimized": false, "output_types": [], @@ -1562,11 +1611,8 @@ "cache": true, "display_name": "Model Response", "group_outputs": false, - "hidden": null, "method": "text_response", "name": "text_output", - "options": null, - "required_inputs": null, "selected": "Message", "tool_mode": true, "types": [ @@ -1579,11 +1625,8 @@ "cache": true, "display_name": "Language Model", "group_outputs": false, - "hidden": null, "method": "build_model", "name": "model_output", - "options": null, - "required_inputs": null, "selected": "LanguageModel", "tool_mode": true, "types": [ @@ -1630,7 +1673,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom langflow.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom langflow.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom langflow.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" + "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom lfx.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom lfx.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" }, "input_value": { "_input_type": "MessageInput", @@ -1670,11 +1713,20 @@ "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "gpt-4-turbo", "gpt-4-turbo-preview", "gpt-4", - "gpt-3.5-turbo" + "gpt-3.5-turbo", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-chat-latest", + "o1", + "o3-mini", + "o3", + "o3-pro", + "o4-mini", + "o4-mini-high" ], "options_metadata": [], "placeholder": "", @@ -1825,7 +1877,7 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Runs a language model given a specified provider. ", + "description": "Runs a language model given a specified provider.", "display_name": "Language Model", "documentation": "", "edited": false, @@ -1842,12 +1894,35 @@ "icon": "brain-circuit", "legacy": false, "metadata": { + "code_hash": "bb5f8714781b", + "dependencies": { + "dependencies": [ + { + "name": "langchain_anthropic", + "version": "0.3.14" + }, + { + "name": "langchain_google_genai", + "version": "2.0.6" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 4 + }, "keywords": [ "model", "llm", "language model", "large language model" - ] + ], + "module": "lfx.components.models.language_model.LanguageModelComponent" }, "minimized": false, "output_types": [], @@ -1857,11 +1932,8 @@ "cache": true, "display_name": "Model Response", "group_outputs": false, - "hidden": null, "method": "text_response", "name": "text_output", - "options": null, - "required_inputs": null, "selected": "Message", "tool_mode": true, "types": [ @@ -1874,11 +1946,8 @@ "cache": true, "display_name": "Language Model", "group_outputs": false, - "hidden": null, "method": "build_model", "name": "model_output", - "options": null, - "required_inputs": null, "selected": "LanguageModel", "tool_mode": true, "types": [ @@ -1925,7 +1994,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom langflow.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom langflow.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom langflow.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" + "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom lfx.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom lfx.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" }, "input_value": { "_input_type": "MessageInput", @@ -1965,11 +2034,20 @@ "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "gpt-4-turbo", "gpt-4-turbo-preview", "gpt-4", - "gpt-3.5-turbo" + "gpt-3.5-turbo", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-chat-latest", + "o1", + "o3-mini", + "o3", + "o3-pro", + "o4-mini", + "o4-mini-high" ], "options_metadata": [], "placeholder": "", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json index d26cb5250bae..6c340d548c21 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json @@ -117,17 +117,17 @@ "legacy": false, "lf_version": "1.4.2", "metadata": { - "code_hash": "192913db3453", + "code_hash": "715a37648834", "dependencies": { "dependencies": [ { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 1 }, - "module": "langflow.components.input_output.chat.ChatInput" + "module": "lfx.components.input_output.chat.ChatInput" }, "output_types": [], "outputs": [ @@ -207,7 +207,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" + "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" }, "files": { "advanced": true, @@ -624,7 +624,7 @@ "legacy": false, "lf_version": "1.4.2", "metadata": { - "code_hash": "6f74e04e39d5", + "code_hash": "9619107fecd1", "dependencies": { "dependencies": [ { @@ -636,13 +636,13 @@ "version": "0.116.1" }, { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 3 }, - "module": "langflow.components.input_output.chat_output.ChatOutput" + "module": "lfx.components.input_output.chat_output.ChatOutput" }, "output_types": [], "outputs": [ @@ -742,7 +742,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "data_template": { "_input_type": "MessageTextInput", @@ -927,7 +927,7 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Runs a language model given a specified provider. ", + "description": "Runs a language model given a specified provider.", "display_name": "Language Model", "documentation": "", "edited": false, @@ -944,12 +944,35 @@ "icon": "brain-circuit", "legacy": false, "metadata": { + "code_hash": "bb5f8714781b", + "dependencies": { + "dependencies": [ + { + "name": "langchain_anthropic", + "version": "0.3.14" + }, + { + "name": "langchain_google_genai", + "version": "2.0.6" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 4 + }, "keywords": [ "model", "llm", "language model", "large language model" - ] + ], + "module": "lfx.components.models.language_model.LanguageModelComponent" }, "minimized": false, "output_types": [], @@ -959,11 +982,8 @@ "cache": true, "display_name": "Model Response", "group_outputs": false, - "hidden": null, "method": "text_response", "name": "text_output", - "options": null, - "required_inputs": null, "selected": "Message", "tool_mode": true, "types": [ @@ -976,11 +996,8 @@ "cache": true, "display_name": "Language Model", "group_outputs": false, - "hidden": null, "method": "build_model", "name": "model_output", - "options": null, - "required_inputs": null, "selected": "LanguageModel", "tool_mode": true, "types": [ @@ -1027,7 +1044,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom langflow.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom langflow.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom langflow.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" + "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom lfx.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom lfx.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" }, "input_value": { "_input_type": "MessageInput", @@ -1067,11 +1084,20 @@ "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "gpt-4-turbo", "gpt-4-turbo-preview", "gpt-4", - "gpt-3.5-turbo" + "gpt-3.5-turbo", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-chat-latest", + "o1", + "o3-mini", + "o3", + "o3-pro", + "o4-mini", + "o4-mini-high" ], "options_metadata": [], "placeholder": "", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json b/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json index e790e75432c7..d6c45b65fc6c 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json @@ -352,17 +352,17 @@ "legacy": false, "lf_version": "1.4.2", "metadata": { - "code_hash": "efdcba3771af", + "code_hash": "3dd28ea591b9", "dependencies": { "dependencies": [ { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 1 }, - "module": "langflow.components.input_output.text.TextInputComponent" + "module": "lfx.components.input_output.text.TextInputComponent" }, "output_types": [], "outputs": [ @@ -400,7 +400,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langflow.base.io.text import TextComponent\nfrom langflow.io import MultilineInput, Output\nfrom langflow.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" + "value": "from lfx.base.io.text import TextComponent\nfrom lfx.io import MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" }, "input_value": { "_input_type": "MultilineInput", @@ -477,7 +477,7 @@ "legacy": false, "lf_version": "1.4.2", "metadata": { - "code_hash": "6f74e04e39d5", + "code_hash": "9619107fecd1", "dependencies": { "dependencies": [ { @@ -489,13 +489,13 @@ "version": "0.116.1" }, { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 3 }, - "module": "langflow.components.input_output.chat_output.ChatOutput" + "module": "lfx.components.input_output.chat_output.ChatOutput" }, "output_types": [], "outputs": [ @@ -593,7 +593,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "data_template": { "advanced": true, @@ -817,17 +817,17 @@ "legacy": false, "lf_version": "1.4.2", "metadata": { - "code_hash": "556209520650", + "code_hash": "bf19ee6feee3", "dependencies": { "dependencies": [ { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 1 }, - "module": "langflow.components.processing.parser.ParserComponent" + "module": "lfx.components.processing.parser.ParserComponent" }, "minimized": false, "output_types": [], @@ -867,7 +867,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n documentation: str = \"https://docs.langflow.org/components-processing#parser\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n" + "value": "from lfx.custom.custom_component.component import Component\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n documentation: str = \"https://docs.langflow.org/components-processing#parser\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n" }, "input_data": { "_input_type": "HandleInput", @@ -1013,7 +1013,7 @@ "legacy": false, "lf_version": "1.4.2", "metadata": { - "code_hash": "252132357639", + "code_hash": "cdb7d379306e", "dependencies": { "dependencies": [ { @@ -1029,13 +1029,13 @@ "version": "0.3.21" }, { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 4 }, - "module": "langflow.components.data.url.URLComponent" + "module": "lfx.components.data.url.URLComponent" }, "minimized": false, "output_types": [], @@ -1125,7 +1125,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom langchain_community.document_loaders import RecursiveUrlLoader\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SliderInput, TableInput\nfrom langflow.logging.logger import logger\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.services.deps import get_settings_service\n\n# Constants\nDEFAULT_TIMEOUT = 30\nDEFAULT_MAX_DEPTH = 1\nDEFAULT_FORMAT = \"Text\"\nURL_REGEX = re.compile(\n r\"^(https?:\\/\\/)?\" r\"(www\\.)?\" r\"([a-zA-Z0-9.-]+)\" r\"(\\.[a-zA-Z]{2,})?\" r\"(:\\d+)?\" r\"(\\/[^\\s]*)?$\",\n re.IGNORECASE,\n)\n\n\nclass URLComponent(Component):\n \"\"\"A component that loads and parses content from web pages recursively.\n\n This component allows fetching content from one or more URLs, with options to:\n - Control crawl depth\n - Prevent crawling outside the root domain\n - Use async loading for better performance\n - Extract either raw HTML or clean text\n - Configure request headers and timeouts\n \"\"\"\n\n display_name = \"URL\"\n description = \"Fetch content from one or more web pages, following links recursively.\"\n documentation: str = \"https://docs.langflow.org/components-data#url\"\n icon = \"layout-template\"\n name = \"URLComponent\"\n\n inputs = [\n MessageTextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs to crawl recursively, by clicking the '+' button.\",\n is_list=True,\n tool_mode=True,\n placeholder=\"Enter a URL...\",\n list_add_label=\"Add URL\",\n input_types=[],\n ),\n SliderInput(\n name=\"max_depth\",\n display_name=\"Depth\",\n info=(\n \"Controls how many 'clicks' away from the initial page the crawler will go:\\n\"\n \"- depth 1: only the initial page\\n\"\n \"- depth 2: initial page + all pages linked directly from it\\n\"\n \"- depth 3: initial page + direct links + links found on those direct link pages\\n\"\n \"Note: This is about link traversal, not URL path depth.\"\n ),\n value=DEFAULT_MAX_DEPTH,\n range_spec=RangeSpec(min=1, max=5, step=1),\n required=False,\n min_label=\" \",\n max_label=\" \",\n min_label_icon=\"None\",\n max_label_icon=\"None\",\n # slider_input=True\n ),\n BoolInput(\n name=\"prevent_outside\",\n display_name=\"Prevent Outside\",\n info=(\n \"If enabled, only crawls URLs within the same domain as the root URL. \"\n \"This helps prevent the crawler from going to external websites.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"use_async\",\n display_name=\"Use Async\",\n info=(\n \"If enabled, uses asynchronous loading which can be significantly faster \"\n \"but might use more system resources.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n DropdownInput(\n name=\"format\",\n display_name=\"Output Format\",\n info=\"Output Format. Use 'Text' to extract the text from the HTML or 'HTML' for the raw HTML content.\",\n options=[\"Text\", \"HTML\"],\n value=DEFAULT_FORMAT,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Timeout for the request in seconds.\",\n value=DEFAULT_TIMEOUT,\n required=False,\n advanced=True,\n ),\n TableInput(\n name=\"headers\",\n display_name=\"Headers\",\n info=\"The headers to send with the request\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Header\",\n \"type\": \"str\",\n \"description\": \"Header name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Header value\",\n },\n ],\n value=[{\"key\": \"User-Agent\", \"value\": get_settings_service().settings.user_agent}],\n advanced=True,\n input_types=[\"DataFrame\"],\n ),\n BoolInput(\n name=\"filter_text_html\",\n display_name=\"Filter Text/HTML\",\n info=\"If enabled, filters out text/css content type from the results.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"continue_on_failure\",\n display_name=\"Continue on Failure\",\n info=\"If enabled, continues crawling even if some requests fail.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"check_response_status\",\n display_name=\"Check Response Status\",\n info=\"If enabled, checks the response status of the request.\",\n value=False,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"autoset_encoding\",\n display_name=\"Autoset Encoding\",\n info=\"If enabled, automatically sets the encoding of the request.\",\n value=True,\n required=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Extracted Pages\", name=\"page_results\", method=\"fetch_content\"),\n Output(display_name=\"Raw Content\", name=\"raw_results\", method=\"fetch_content_as_message\", tool_mode=False),\n ]\n\n @staticmethod\n def validate_url(url: str) -> bool:\n \"\"\"Validates if the given string matches URL pattern.\n\n Args:\n url: The URL string to validate\n\n Returns:\n bool: True if the URL is valid, False otherwise\n \"\"\"\n return bool(URL_REGEX.match(url))\n\n def ensure_url(self, url: str) -> str:\n \"\"\"Ensures the given string is a valid URL.\n\n Args:\n url: The URL string to validate and normalize\n\n Returns:\n str: The normalized URL\n\n Raises:\n ValueError: If the URL is invalid\n \"\"\"\n url = url.strip()\n if not url.startswith((\"http://\", \"https://\")):\n url = \"https://\" + url\n\n if not self.validate_url(url):\n msg = f\"Invalid URL: {url}\"\n raise ValueError(msg)\n\n return url\n\n def _create_loader(self, url: str) -> RecursiveUrlLoader:\n \"\"\"Creates a RecursiveUrlLoader instance with the configured settings.\n\n Args:\n url: The URL to load\n\n Returns:\n RecursiveUrlLoader: Configured loader instance\n \"\"\"\n headers_dict = {header[\"key\"]: header[\"value\"] for header in self.headers}\n extractor = (lambda x: x) if self.format == \"HTML\" else (lambda x: BeautifulSoup(x, \"lxml\").get_text())\n\n return RecursiveUrlLoader(\n url=url,\n max_depth=self.max_depth,\n prevent_outside=self.prevent_outside,\n use_async=self.use_async,\n extractor=extractor,\n timeout=self.timeout,\n headers=headers_dict,\n check_response_status=self.check_response_status,\n continue_on_failure=self.continue_on_failure,\n base_url=url, # Add base_url to ensure consistent domain crawling\n autoset_encoding=self.autoset_encoding, # Enable automatic encoding detection\n exclude_dirs=[], # Allow customization of excluded directories\n link_regex=None, # Allow customization of link filtering\n )\n\n def fetch_url_contents(self) -> list[dict]:\n \"\"\"Load documents from the configured URLs.\n\n Returns:\n List[Data]: List of Data objects containing the fetched content\n\n Raises:\n ValueError: If no valid URLs are provided or if there's an error loading documents\n \"\"\"\n try:\n urls = list({self.ensure_url(url) for url in self.urls if url.strip()})\n logger.debug(f\"URLs: {urls}\")\n if not urls:\n msg = \"No valid URLs provided.\"\n raise ValueError(msg)\n\n all_docs = []\n for url in urls:\n logger.debug(f\"Loading documents from {url}\")\n\n try:\n loader = self._create_loader(url)\n docs = loader.load()\n\n if not docs:\n logger.warning(f\"No documents found for {url}\")\n continue\n\n logger.debug(f\"Found {len(docs)} documents from {url}\")\n all_docs.extend(docs)\n\n except requests.exceptions.RequestException as e:\n logger.exception(f\"Error loading documents from {url}: {e}\")\n continue\n\n if not all_docs:\n msg = \"No documents were successfully loaded from any URL\"\n raise ValueError(msg)\n\n # data = [Data(text=doc.page_content, **doc.metadata) for doc in all_docs]\n data = [\n {\n \"text\": safe_convert(doc.page_content, clean_data=True),\n \"url\": doc.metadata.get(\"source\", \"\"),\n \"title\": doc.metadata.get(\"title\", \"\"),\n \"description\": doc.metadata.get(\"description\", \"\"),\n \"content_type\": doc.metadata.get(\"content_type\", \"\"),\n \"language\": doc.metadata.get(\"language\", \"\"),\n }\n for doc in all_docs\n ]\n except Exception as e:\n error_msg = e.message if hasattr(e, \"message\") else e\n msg = f\"Error loading documents: {error_msg!s}\"\n logger.exception(msg)\n raise ValueError(msg) from e\n return data\n\n def fetch_content(self) -> DataFrame:\n \"\"\"Convert the documents to a DataFrame.\"\"\"\n return DataFrame(data=self.fetch_url_contents())\n\n def fetch_content_as_message(self) -> Message:\n \"\"\"Convert the documents to a Message.\"\"\"\n url_contents = self.fetch_url_contents()\n return Message(text=\"\\n\\n\".join([x[\"text\"] for x in url_contents]), data={\"data\": url_contents})\n" + "value": "import importlib\nimport re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom langchain_community.document_loaders import RecursiveUrlLoader\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.helpers.data import safe_convert\nfrom lfx.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SliderInput, TableInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.utils.request_utils import get_user_agent\n\n# Constants\nDEFAULT_TIMEOUT = 30\nDEFAULT_MAX_DEPTH = 1\nDEFAULT_FORMAT = \"Text\"\n\n\nURL_REGEX = re.compile(\n r\"^(https?:\\/\\/)?\" r\"(www\\.)?\" r\"([a-zA-Z0-9.-]+)\" r\"(\\.[a-zA-Z]{2,})?\" r\"(:\\d+)?\" r\"(\\/[^\\s]*)?$\",\n re.IGNORECASE,\n)\n\nUSER_AGENT = None\n# Check if langflow is installed using importlib.util.find_spec(name))\nif importlib.util.find_spec(\"langflow\"):\n langflow_installed = True\n USER_AGENT = get_user_agent()\nelse:\n langflow_installed = False\n USER_AGENT = \"lfx\"\n\n\nclass URLComponent(Component):\n \"\"\"A component that loads and parses content from web pages recursively.\n\n This component allows fetching content from one or more URLs, with options to:\n - Control crawl depth\n - Prevent crawling outside the root domain\n - Use async loading for better performance\n - Extract either raw HTML or clean text\n - Configure request headers and timeouts\n \"\"\"\n\n display_name = \"URL\"\n description = \"Fetch content from one or more web pages, following links recursively.\"\n documentation: str = \"https://docs.langflow.org/components-data#url\"\n icon = \"layout-template\"\n name = \"URLComponent\"\n\n inputs = [\n MessageTextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs to crawl recursively, by clicking the '+' button.\",\n is_list=True,\n tool_mode=True,\n placeholder=\"Enter a URL...\",\n list_add_label=\"Add URL\",\n input_types=[],\n ),\n SliderInput(\n name=\"max_depth\",\n display_name=\"Depth\",\n info=(\n \"Controls how many 'clicks' away from the initial page the crawler will go:\\n\"\n \"- depth 1: only the initial page\\n\"\n \"- depth 2: initial page + all pages linked directly from it\\n\"\n \"- depth 3: initial page + direct links + links found on those direct link pages\\n\"\n \"Note: This is about link traversal, not URL path depth.\"\n ),\n value=DEFAULT_MAX_DEPTH,\n range_spec=RangeSpec(min=1, max=5, step=1),\n required=False,\n min_label=\" \",\n max_label=\" \",\n min_label_icon=\"None\",\n max_label_icon=\"None\",\n # slider_input=True\n ),\n BoolInput(\n name=\"prevent_outside\",\n display_name=\"Prevent Outside\",\n info=(\n \"If enabled, only crawls URLs within the same domain as the root URL. \"\n \"This helps prevent the crawler from going to external websites.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"use_async\",\n display_name=\"Use Async\",\n info=(\n \"If enabled, uses asynchronous loading which can be significantly faster \"\n \"but might use more system resources.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n DropdownInput(\n name=\"format\",\n display_name=\"Output Format\",\n info=\"Output Format. Use 'Text' to extract the text from the HTML or 'HTML' for the raw HTML content.\",\n options=[\"Text\", \"HTML\"],\n value=DEFAULT_FORMAT,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Timeout for the request in seconds.\",\n value=DEFAULT_TIMEOUT,\n required=False,\n advanced=True,\n ),\n TableInput(\n name=\"headers\",\n display_name=\"Headers\",\n info=\"The headers to send with the request\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Header\",\n \"type\": \"str\",\n \"description\": \"Header name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Header value\",\n },\n ],\n value=[{\"key\": \"User-Agent\", \"value\": USER_AGENT}],\n advanced=True,\n input_types=[\"DataFrame\"],\n ),\n BoolInput(\n name=\"filter_text_html\",\n display_name=\"Filter Text/HTML\",\n info=\"If enabled, filters out text/css content type from the results.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"continue_on_failure\",\n display_name=\"Continue on Failure\",\n info=\"If enabled, continues crawling even if some requests fail.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"check_response_status\",\n display_name=\"Check Response Status\",\n info=\"If enabled, checks the response status of the request.\",\n value=False,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"autoset_encoding\",\n display_name=\"Autoset Encoding\",\n info=\"If enabled, automatically sets the encoding of the request.\",\n value=True,\n required=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Extracted Pages\", name=\"page_results\", method=\"fetch_content\"),\n Output(display_name=\"Raw Content\", name=\"raw_results\", method=\"fetch_content_as_message\", tool_mode=False),\n ]\n\n @staticmethod\n def validate_url(url: str) -> bool:\n \"\"\"Validates if the given string matches URL pattern.\n\n Args:\n url: The URL string to validate\n\n Returns:\n bool: True if the URL is valid, False otherwise\n \"\"\"\n return bool(URL_REGEX.match(url))\n\n def ensure_url(self, url: str) -> str:\n \"\"\"Ensures the given string is a valid URL.\n\n Args:\n url: The URL string to validate and normalize\n\n Returns:\n str: The normalized URL\n\n Raises:\n ValueError: If the URL is invalid\n \"\"\"\n url = url.strip()\n if not url.startswith((\"http://\", \"https://\")):\n url = \"https://\" + url\n\n if not self.validate_url(url):\n msg = f\"Invalid URL: {url}\"\n raise ValueError(msg)\n\n return url\n\n def _create_loader(self, url: str) -> RecursiveUrlLoader:\n \"\"\"Creates a RecursiveUrlLoader instance with the configured settings.\n\n Args:\n url: The URL to load\n\n Returns:\n RecursiveUrlLoader: Configured loader instance\n \"\"\"\n headers_dict = {header[\"key\"]: header[\"value\"] for header in self.headers if header[\"value\"] is not None}\n extractor = (lambda x: x) if self.format == \"HTML\" else (lambda x: BeautifulSoup(x, \"lxml\").get_text())\n\n return RecursiveUrlLoader(\n url=url,\n max_depth=self.max_depth,\n prevent_outside=self.prevent_outside,\n use_async=self.use_async,\n extractor=extractor,\n timeout=self.timeout,\n headers=headers_dict,\n check_response_status=self.check_response_status,\n continue_on_failure=self.continue_on_failure,\n base_url=url, # Add base_url to ensure consistent domain crawling\n autoset_encoding=self.autoset_encoding, # Enable automatic encoding detection\n exclude_dirs=[], # Allow customization of excluded directories\n link_regex=None, # Allow customization of link filtering\n )\n\n def fetch_url_contents(self) -> list[dict]:\n \"\"\"Load documents from the configured URLs.\n\n Returns:\n List[Data]: List of Data objects containing the fetched content\n\n Raises:\n ValueError: If no valid URLs are provided or if there's an error loading documents\n \"\"\"\n try:\n urls = list({self.ensure_url(url) for url in self.urls if url.strip()})\n logger.debug(f\"URLs: {urls}\")\n if not urls:\n msg = \"No valid URLs provided.\"\n raise ValueError(msg)\n\n all_docs = []\n for url in urls:\n logger.debug(f\"Loading documents from {url}\")\n\n try:\n loader = self._create_loader(url)\n docs = loader.load()\n\n if not docs:\n logger.warning(f\"No documents found for {url}\")\n continue\n\n logger.debug(f\"Found {len(docs)} documents from {url}\")\n all_docs.extend(docs)\n\n except requests.exceptions.RequestException as e:\n logger.exception(f\"Error loading documents from {url}: {e}\")\n continue\n\n if not all_docs:\n msg = \"No documents were successfully loaded from any URL\"\n raise ValueError(msg)\n\n # data = [Data(text=doc.page_content, **doc.metadata) for doc in all_docs]\n data = [\n {\n \"text\": safe_convert(doc.page_content, clean_data=True),\n \"url\": doc.metadata.get(\"source\", \"\"),\n \"title\": doc.metadata.get(\"title\", \"\"),\n \"description\": doc.metadata.get(\"description\", \"\"),\n \"content_type\": doc.metadata.get(\"content_type\", \"\"),\n \"language\": doc.metadata.get(\"language\", \"\"),\n }\n for doc in all_docs\n ]\n except Exception as e:\n error_msg = e.message if hasattr(e, \"message\") else e\n msg = f\"Error loading documents: {error_msg!s}\"\n logger.exception(msg)\n raise ValueError(msg) from e\n return data\n\n def fetch_content(self) -> DataFrame:\n \"\"\"Convert the documents to a DataFrame.\"\"\"\n return DataFrame(data=self.fetch_url_contents())\n\n def fetch_content_as_message(self) -> Message:\n \"\"\"Convert the documents to a Message.\"\"\"\n url_contents = self.fetch_url_contents()\n return Message(text=\"\\n\\n\".join([x[\"text\"] for x in url_contents]), data={\"data\": url_contents})\n" }, "continue_on_failure": { "_input_type": "BoolInput", @@ -1382,7 +1382,7 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Runs a language model given a specified provider. ", + "description": "Runs a language model given a specified provider.", "display_name": "Language Model", "documentation": "", "edited": false, @@ -1400,12 +1400,35 @@ "legacy": false, "lf_version": "1.4.2", "metadata": { + "code_hash": "bb5f8714781b", + "dependencies": { + "dependencies": [ + { + "name": "langchain_anthropic", + "version": "0.3.14" + }, + { + "name": "langchain_google_genai", + "version": "2.0.6" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 4 + }, "keywords": [ "model", "llm", "language model", "large language model" - ] + ], + "module": "lfx.components.models.language_model.LanguageModelComponent" }, "minimized": false, "output_types": [], @@ -1477,7 +1500,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom langflow.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom langflow.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom langflow.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" + "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom lfx.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom lfx.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" }, "input_value": { "_input_type": "MessageTextInput", @@ -1517,11 +1540,20 @@ "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "gpt-4-turbo", "gpt-4-turbo-preview", "gpt-4", - "gpt-3.5-turbo" + "gpt-3.5-turbo", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-chat-latest", + "o1", + "o3-mini", + "o3", + "o3-pro", + "o4-mini", + "o4-mini-high" ], "options_metadata": [], "placeholder": "", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Generator.json b/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Generator.json index 612808657039..5e61fe49c048 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Generator.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Generator.json @@ -7,7 +7,7 @@ "data": { "sourceHandle": { "dataType": "ChatInput", - "id": "ChatInput-u8rae", + "id": "ChatInput-eo1g2", "name": "message", "output_types": [ "Message" @@ -15,7 +15,7 @@ }, "targetHandle": { "fieldName": "USER_INPUT", - "id": "Prompt-7Jzfo", + "id": "Prompt-cMwv1", "inputTypes": [ "Message", "Text" @@ -23,12 +23,12 @@ "type": "str" } }, - "id": "reactflow__edge-ChatInput-u8rae{œdataTypeœ:œChatInputœ,œidœ:œChatInput-u8raeœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Prompt-7Jzfo{œfieldNameœ:œUSER_INPUTœ,œidœ:œPrompt-7Jzfoœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", + "id": "reactflow__edge-ChatInput-eo1g2{œdataTypeœ:œChatInputœ,œidœ:œChatInput-eo1g2œ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Prompt-cMwv1{œfieldNameœ:œUSER_INPUTœ,œidœ:œPrompt-cMwv1œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", "selected": false, - "source": "ChatInput-u8rae", - "sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-u8raeœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", - "target": "Prompt-7Jzfo", - "targetHandle": "{œfieldNameœ: œUSER_INPUTœ, œidœ: œPrompt-7Jzfoœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" + "source": "ChatInput-eo1g2", + "sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-eo1g2œ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", + "target": "Prompt-cMwv1", + "targetHandle": "{œfieldNameœ: œUSER_INPUTœ, œidœ: œPrompt-cMwv1œ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" }, { "animated": false, @@ -36,7 +36,7 @@ "data": { "sourceHandle": { "dataType": "Memory", - "id": "Memory-U33nr", + "id": "Memory-4gSCw", "name": "messages_text", "output_types": [ "Message" @@ -44,7 +44,7 @@ }, "targetHandle": { "fieldName": "CHAT_HISTORY", - "id": "Prompt-7Jzfo", + "id": "Prompt-cMwv1", "inputTypes": [ "Message", "Text" @@ -52,12 +52,12 @@ "type": "str" } }, - "id": "reactflow__edge-Memory-U33nr{œdataTypeœ:œMemoryœ,œidœ:œMemory-U33nrœ,œnameœ:œmessages_textœ,œoutput_typesœ:[œMessageœ]}-Prompt-7Jzfo{œfieldNameœ:œCHAT_HISTORYœ,œidœ:œPrompt-7Jzfoœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", + "id": "reactflow__edge-Memory-4gSCw{œdataTypeœ:œMemoryœ,œidœ:œMemory-4gSCwœ,œnameœ:œmessages_textœ,œoutput_typesœ:[œMessageœ]}-Prompt-cMwv1{œfieldNameœ:œCHAT_HISTORYœ,œidœ:œPrompt-cMwv1œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", "selected": false, - "source": "Memory-U33nr", - "sourceHandle": "{œdataTypeœ: œMemoryœ, œidœ: œMemory-U33nrœ, œnameœ: œmessages_textœ, œoutput_typesœ: [œMessageœ]}", - "target": "Prompt-7Jzfo", - "targetHandle": "{œfieldNameœ: œCHAT_HISTORYœ, œidœ: œPrompt-7Jzfoœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" + "source": "Memory-4gSCw", + "sourceHandle": "{œdataTypeœ: œMemoryœ, œidœ: œMemory-4gSCwœ, œnameœ: œmessages_textœ, œoutput_typesœ: [œMessageœ]}", + "target": "Prompt-cMwv1", + "targetHandle": "{œfieldNameœ: œCHAT_HISTORYœ, œidœ: œPrompt-cMwv1œ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" }, { "animated": false, @@ -65,7 +65,7 @@ "data": { "sourceHandle": { "dataType": "URL", - "id": "URL-LiTXv", + "id": "URL-h1gAB", "name": "raw_results", "output_types": [ "Message" @@ -73,7 +73,7 @@ }, "targetHandle": { "fieldName": "EXAMPLE_COMPONENTS", - "id": "Prompt-7Jzfo", + "id": "Prompt-cMwv1", "inputTypes": [ "Message", "Text" @@ -81,12 +81,12 @@ "type": "str" } }, - "id": "reactflow__edge-URL-LiTXv{œdataTypeœ:œURLœ,œidœ:œURL-LiTXvœ,œnameœ:œraw_resultsœ,œoutput_typesœ:[œMessageœ]}-Prompt-7Jzfo{œfieldNameœ:œEXAMPLE_COMPONENTSœ,œidœ:œPrompt-7Jzfoœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", + "id": "reactflow__edge-URL-h1gAB{œdataTypeœ:œURLœ,œidœ:œURL-h1gABœ,œnameœ:œraw_resultsœ,œoutput_typesœ:[œMessageœ]}-Prompt-cMwv1{œfieldNameœ:œEXAMPLE_COMPONENTSœ,œidœ:œPrompt-cMwv1œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", "selected": false, - "source": "URL-LiTXv", - "sourceHandle": "{œdataTypeœ: œURLœ, œidœ: œURL-LiTXvœ, œnameœ: œraw_resultsœ, œoutput_typesœ: [œMessageœ]}", - "target": "Prompt-7Jzfo", - "targetHandle": "{œfieldNameœ: œEXAMPLE_COMPONENTSœ, œidœ: œPrompt-7Jzfoœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" + "source": "URL-h1gAB", + "sourceHandle": "{œdataTypeœ: œURLœ, œidœ: œURL-h1gABœ, œnameœ: œraw_resultsœ, œoutput_typesœ: [œMessageœ]}", + "target": "Prompt-cMwv1", + "targetHandle": "{œfieldNameœ: œEXAMPLE_COMPONENTSœ, œidœ: œPrompt-cMwv1œ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" }, { "animated": false, @@ -94,7 +94,7 @@ "data": { "sourceHandle": { "dataType": "URL", - "id": "URL-E6QCv", + "id": "URL-G5J7i", "name": "raw_results", "output_types": [ "Message" @@ -102,7 +102,7 @@ }, "targetHandle": { "fieldName": "CUSTOM_COMPONENT_CODE", - "id": "Prompt-7Jzfo", + "id": "Prompt-cMwv1", "inputTypes": [ "Message", "Text" @@ -110,12 +110,12 @@ "type": "str" } }, - "id": "reactflow__edge-URL-E6QCv{œdataTypeœ:œURLœ,œidœ:œURL-E6QCvœ,œnameœ:œraw_resultsœ,œoutput_typesœ:[œMessageœ]}-Prompt-7Jzfo{œfieldNameœ:œCUSTOM_COMPONENT_CODEœ,œidœ:œPrompt-7Jzfoœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", + "id": "reactflow__edge-URL-G5J7i{œdataTypeœ:œURLœ,œidœ:œURL-G5J7iœ,œnameœ:œraw_resultsœ,œoutput_typesœ:[œMessageœ]}-Prompt-cMwv1{œfieldNameœ:œCUSTOM_COMPONENT_CODEœ,œidœ:œPrompt-cMwv1œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", "selected": false, - "source": "URL-E6QCv", - "sourceHandle": "{œdataTypeœ: œURLœ, œidœ: œURL-E6QCvœ, œnameœ: œraw_resultsœ, œoutput_typesœ: [œMessageœ]}", - "target": "Prompt-7Jzfo", - "targetHandle": "{œfieldNameœ: œCUSTOM_COMPONENT_CODEœ, œidœ: œPrompt-7Jzfoœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" + "source": "URL-G5J7i", + "sourceHandle": "{œdataTypeœ: œURLœ, œidœ: œURL-G5J7iœ, œnameœ: œraw_resultsœ, œoutput_typesœ: [œMessageœ]}", + "target": "Prompt-cMwv1", + "targetHandle": "{œfieldNameœ: œCUSTOM_COMPONENT_CODEœ, œidœ: œPrompt-cMwv1œ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" }, { "animated": false, @@ -123,7 +123,7 @@ "data": { "sourceHandle": { "dataType": "Prompt", - "id": "Prompt-7Jzfo", + "id": "Prompt-cMwv1", "name": "prompt", "output_types": [ "Message" @@ -131,19 +131,19 @@ }, "targetHandle": { "fieldName": "input_value", - "id": "LanguageModelComponent-muTzI", + "id": "LanguageModelComponent-SCqm9", "inputTypes": [ "Message" ], "type": "str" } }, - "id": "reactflow__edge-Prompt-7Jzfo{œdataTypeœ:œPromptœ,œidœ:œPrompt-7Jzfoœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-LanguageModelComponent-muTzI{œfieldNameœ:œinput_valueœ,œidœ:œLanguageModelComponent-muTzIœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "id": "reactflow__edge-Prompt-cMwv1{œdataTypeœ:œPromptœ,œidœ:œPrompt-cMwv1œ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-LanguageModelComponent-SCqm9{œfieldNameœ:œinput_valueœ,œidœ:œLanguageModelComponent-SCqm9œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", "selected": false, - "source": "Prompt-7Jzfo", - "sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-7Jzfoœ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}", - "target": "LanguageModelComponent-muTzI", - "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œLanguageModelComponent-muTzIœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" + "source": "Prompt-cMwv1", + "sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-cMwv1œ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}", + "target": "LanguageModelComponent-SCqm9", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œLanguageModelComponent-SCqm9œ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" }, { "animated": false, @@ -151,7 +151,7 @@ "data": { "sourceHandle": { "dataType": "LanguageModelComponent", - "id": "LanguageModelComponent-muTzI", + "id": "LanguageModelComponent-SCqm9", "name": "text_output", "output_types": [ "Message" @@ -159,7 +159,7 @@ }, "targetHandle": { "fieldName": "input_value", - "id": "ChatOutput-qF9Bn", + "id": "ChatOutput-VoIob", "inputTypes": [ "Data", "DataFrame", @@ -168,12 +168,12 @@ "type": "other" } }, - "id": "reactflow__edge-LanguageModelComponent-muTzI{œdataTypeœ:œLanguageModelComponentœ,œidœ:œLanguageModelComponent-muTzIœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-qF9Bn{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-qF9Bnœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}", + "id": "reactflow__edge-LanguageModelComponent-SCqm9{œdataTypeœ:œLanguageModelComponentœ,œidœ:œLanguageModelComponent-SCqm9œ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-VoIob{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-VoIobœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}", "selected": false, - "source": "LanguageModelComponent-muTzI", - "sourceHandle": "{œdataTypeœ: œLanguageModelComponentœ, œidœ: œLanguageModelComponent-muTzIœ, œnameœ: œtext_outputœ, œoutput_typesœ: [œMessageœ]}", - "target": "ChatOutput-qF9Bn", - "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-qF9Bnœ, œinputTypesœ: [œDataœ, œDataFrameœ, œMessageœ], œtypeœ: œotherœ}" + "source": "LanguageModelComponent-SCqm9", + "sourceHandle": "{œdataTypeœ: œLanguageModelComponentœ, œidœ: œLanguageModelComponent-SCqm9œ, œnameœ: œtext_outputœ, œoutput_typesœ: [œMessageœ]}", + "target": "ChatOutput-VoIob", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-VoIobœ, œinputTypesœ: [œDataœ, œDataFrameœ, œMessageœ], œtypeœ: œotherœ}" }, { "animated": false, @@ -181,7 +181,7 @@ "data": { "sourceHandle": { "dataType": "URL", - "id": "URL-Gj8oh", + "id": "URL-gEE5N", "name": "raw_results", "output_types": [ "Message" @@ -189,7 +189,7 @@ }, "targetHandle": { "fieldName": "BASE_COMPONENT_CODE", - "id": "Prompt-7Jzfo", + "id": "Prompt-cMwv1", "inputTypes": [ "Message", "Text" @@ -197,12 +197,12 @@ "type": "str" } }, - "id": "reactflow__edge-URL-Gj8oh{œdataTypeœ:œURLœ,œidœ:œURL-Gj8ohœ,œnameœ:œraw_resultsœ,œoutput_typesœ:[œMessageœ]}-Prompt-7Jzfo{œfieldNameœ:œBASE_COMPONENT_CODEœ,œidœ:œPrompt-7Jzfoœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", + "id": "reactflow__edge-URL-gEE5N{œdataTypeœ:œURLœ,œidœ:œURL-gEE5Nœ,œnameœ:œraw_resultsœ,œoutput_typesœ:[œMessageœ]}-Prompt-cMwv1{œfieldNameœ:œBASE_COMPONENT_CODEœ,œidœ:œPrompt-cMwv1œ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", "selected": false, - "source": "URL-Gj8oh", - "sourceHandle": "{œdataTypeœ: œURLœ, œidœ: œURL-Gj8ohœ, œnameœ: œraw_resultsœ, œoutput_typesœ: [œMessageœ]}", - "target": "Prompt-7Jzfo", - "targetHandle": "{œfieldNameœ: œBASE_COMPONENT_CODEœ, œidœ: œPrompt-7Jzfoœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" + "source": "URL-gEE5N", + "sourceHandle": "{œdataTypeœ: œURLœ, œidœ: œURL-gEE5Nœ, œnameœ: œraw_resultsœ, œoutput_typesœ: [œMessageœ]}", + "target": "Prompt-cMwv1", + "targetHandle": "{œfieldNameœ: œBASE_COMPONENT_CODEœ, œidœ: œPrompt-cMwv1œ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" } ], "nodes": [ @@ -210,7 +210,7 @@ "data": { "description": "Retrieves stored chat messages from Langflow tables or an external memory.", "display_name": "Chat Memory", - "id": "Memory-U33nr", + "id": "Memory-4gSCw", "node": { "base_classes": [ "Data", @@ -235,19 +235,19 @@ "frozen": false, "icon": "message-square-more", "legacy": false, - "lf_version": "1.4.3", + "lf_version": "1.6.0", "metadata": { - "code_hash": "464cc8b8fdd2", + "code_hash": "6c35f0cd5b52", "dependencies": { "dependencies": [ { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 1 }, - "module": "langflow.components.helpers.memory.MemoryComponent" + "module": "lfx.components.helpers.memory.MemoryComponent" }, "output_types": [], "outputs": [ @@ -299,7 +299,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any, cast\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.inputs.inputs import DropdownInput, HandleInput, IntInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.memory import aget_messages, astore_message\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\nfrom langflow.utils.component_utils import set_current_fields, set_field_display\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n documentation: str = \"https://docs.langflow.org/components-helpers#message-history\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender_type\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender\",\n display_name=\"Sender\",\n info=\"The sender of the message. Might be Machine or User. \"\n \"If empty, the current sender parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n value=\"\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Message\", name=\"messages_text\", method=\"retrieve_messages_as_text\", dynamic=True),\n Output(display_name=\"Dataframe\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True),\n ]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"messages_text\", method=\"retrieve_messages_as_text\", dynamic=True\n ),\n Output(\n display_name=\"Dataframe\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n ),\n ]\n return frontend_node\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n async def retrieve_messages(self) -> Data:\n sender_type = self.sender_type\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender_type == \"Machine and User\":\n sender_type = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[-n_messages:] if order == \"ASC\" else stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender_type:\n expected_type = MESSAGE_SENDER_AI if sender_type == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n # For internal memory, we always fetch the last N messages by ordering by DESC\n stored = await aget_messages(\n sender=sender_type,\n sender_name=sender_name,\n session_id=session_id,\n limit=10000,\n order=order,\n )\n if n_messages:\n stored = stored[-n_messages:] if order == \"ASC\" else stored[:n_messages]\n\n # self.status = stored\n return cast(\"Data\", stored)\n\n async def retrieve_messages_as_text(self) -> Message:\n stored_text = data_to_text(self.template, await self.retrieve_messages())\n # self.status = stored_text\n return Message(text=stored_text)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n" + "value": "from typing import Any, cast\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.helpers.data import data_to_text\nfrom lfx.inputs.inputs import DropdownInput, HandleInput, IntInput, MessageTextInput, MultilineInput, TabInput\nfrom lfx.memory import aget_messages, astore_message\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.schema.message import Message\nfrom lfx.template.field.base import Output\nfrom lfx.utils.component_utils import set_current_fields, set_field_display\nfrom lfx.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n documentation: str = \"https://docs.langflow.org/components-helpers#message-history\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender_type\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender\",\n display_name=\"Sender\",\n info=\"The sender of the message. Might be Machine or User. \"\n \"If empty, the current sender parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n value=\"\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Message\", name=\"messages_text\", method=\"retrieve_messages_as_text\", dynamic=True),\n Output(display_name=\"Dataframe\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True),\n ]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"messages_text\", method=\"retrieve_messages_as_text\", dynamic=True\n ),\n Output(\n display_name=\"Dataframe\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n ),\n ]\n return frontend_node\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n async def retrieve_messages(self) -> Data:\n sender_type = self.sender_type\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender_type == \"Machine and User\":\n sender_type = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[-n_messages:] if order == \"ASC\" else stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender_type:\n expected_type = MESSAGE_SENDER_AI if sender_type == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n # For internal memory, we always fetch the last N messages by ordering by DESC\n stored = await aget_messages(\n sender=sender_type,\n sender_name=sender_name,\n session_id=session_id,\n limit=10000,\n order=order,\n )\n if n_messages:\n stored = stored[-n_messages:] if order == \"ASC\" else stored[:n_messages]\n\n # self.status = stored\n return cast(\"Data\", stored)\n\n async def retrieve_messages_as_text(self) -> Message:\n stored_text = data_to_text(self.template, await self.retrieve_messages())\n # self.status = stored_text\n return Message(text=stored_text)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n" }, "memory": { "_input_type": "HandleInput", @@ -523,7 +523,7 @@ }, "dragging": false, "height": 262, - "id": "Memory-U33nr", + "id": "Memory-4gSCw", "measured": { "height": 262, "width": 320 @@ -544,7 +544,7 @@ "data": { "description": "Create a prompt template with dynamic variables.", "display_name": "Prompt", - "id": "Prompt-7Jzfo", + "id": "Prompt-cMwv1", "node": { "base_classes": [ "Message" @@ -570,7 +570,7 @@ "frozen": false, "icon": "braces", "legacy": false, - "lf_version": "1.4.3", + "lf_version": "1.6.0", "metadata": { "code_hash": "3bf0b511e227", "module": "langflow.components.prompts.prompt.PromptComponent" @@ -776,7 +776,7 @@ }, "dragging": false, "height": 685, - "id": "Prompt-7Jzfo", + "id": "Prompt-cMwv1", "measured": { "height": 685, "width": 320 @@ -795,7 +795,7 @@ }, { "data": { - "id": "note-x5NN0", + "id": "note-yh0aK", "node": { "description": "# 🛠️ Custom Component Generator 🚀\n\nHi! I'm here to help you create custom components for Langflow. Think of me as your technical partner who can help turn your ideas into working components! \n\n## 🎯 How to Work With Me\n\n1. Add your **Anthropic API Key** to the **Language Model** Component\n\n2. 💭 Tell Me What You Want to Build.\nSimply describe what you want your component to do in plain English. For example:\n- \"I need a component that sends Slack messages\"\n- \"I want to create a tool that can process CSV files\"\n- \"I need something that can translate text\"\n\n\nReady to build something awesome? 🚀 Let's get started!", "display_name": "", @@ -806,7 +806,7 @@ }, "dragging": false, "height": 605, - "id": "note-x5NN0", + "id": "note-yh0aK", "measured": { "height": 605, "width": 626 @@ -830,7 +830,7 @@ }, { "data": { - "id": "URL-Gj8oh", + "id": "URL-gEE5N", "node": { "base_classes": [ "Data", @@ -850,7 +850,7 @@ "frozen": false, "icon": "layout-template", "legacy": false, - "lf_version": "1.4.3", + "lf_version": "1.6.0", "metadata": {}, "output_types": [], "outputs": [ @@ -1167,7 +1167,7 @@ }, "dragging": false, "height": 365, - "id": "URL-Gj8oh", + "id": "URL-gEE5N", "measured": { "height": 365, "width": 320 @@ -1186,7 +1186,7 @@ }, { "data": { - "id": "URL-LiTXv", + "id": "URL-h1gAB", "node": { "base_classes": [ "Data", @@ -1206,7 +1206,7 @@ "frozen": false, "icon": "layout-template", "legacy": false, - "lf_version": "1.4.3", + "lf_version": "1.6.0", "metadata": {}, "output_types": [], "outputs": [ @@ -1529,7 +1529,7 @@ }, "dragging": false, "height": 661, - "id": "URL-LiTXv", + "id": "URL-h1gAB", "measured": { "height": 661, "width": 320 @@ -1548,7 +1548,7 @@ }, { "data": { - "id": "URL-E6QCv", + "id": "URL-G5J7i", "node": { "base_classes": [ "Data", @@ -1568,7 +1568,7 @@ "frozen": false, "icon": "layout-template", "legacy": false, - "lf_version": "1.4.3", + "lf_version": "1.6.0", "metadata": {}, "output_types": [], "outputs": [ @@ -1885,7 +1885,7 @@ }, "dragging": false, "height": 365, - "id": "URL-E6QCv", + "id": "URL-G5J7i", "measured": { "height": 365, "width": 320 @@ -1904,7 +1904,7 @@ }, { "data": { - "id": "ChatInput-u8rae", + "id": "ChatInput-eo1g2", "node": { "base_classes": [ "Message" @@ -1932,19 +1932,19 @@ "icon": "MessagesSquare", "key": "ChatInput", "legacy": false, - "lf_version": "1.4.3", + "lf_version": "1.6.0", "metadata": { - "code_hash": "192913db3453", + "code_hash": "715a37648834", "dependencies": { "dependencies": [ { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 1 }, - "module": "langflow.components.input_output.chat.ChatInput" + "module": "lfx.components.input_output.chat.ChatInput" }, "minimized": true, "output_types": [], @@ -2030,7 +2030,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" + "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" }, "files": { "_input_type": "FileInput", @@ -2216,7 +2216,7 @@ "type": "ChatInput" }, "dragging": false, - "id": "ChatInput-u8rae", + "id": "ChatInput-eo1g2", "measured": { "height": 48, "width": 192 @@ -2230,7 +2230,7 @@ }, { "data": { - "id": "ChatOutput-qF9Bn", + "id": "ChatOutput-VoIob", "node": { "base_classes": [ "Message" @@ -2260,7 +2260,7 @@ "key": "ChatOutput", "legacy": false, "metadata": { - "code_hash": "6f74e04e39d5", + "code_hash": "9619107fecd1", "dependencies": { "dependencies": [ { @@ -2272,13 +2272,13 @@ "version": "0.116.1" }, { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 3 }, - "module": "langflow.components.input_output.chat_output.ChatOutput" + "module": "lfx.components.input_output.chat_output.ChatOutput" }, "minimized": true, "output_types": [], @@ -2382,7 +2382,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "data_template": { "_input_type": "MessageTextInput", @@ -2546,9 +2546,9 @@ "type": "ChatOutput" }, "dragging": false, - "id": "ChatOutput-qF9Bn", + "id": "ChatOutput-VoIob", "measured": { - "height": 165, + "height": 166, "width": 320 }, "position": { @@ -2560,7 +2560,7 @@ }, { "data": { - "id": "LanguageModelComponent-muTzI", + "id": "LanguageModelComponent-SCqm9", "node": { "base_classes": [ "LanguageModel", @@ -2570,7 +2570,7 @@ "category": "models", "conditional_paths": [], "custom_fields": {}, - "description": "Runs a language model given a specified provider. ", + "description": "Runs a language model given a specified provider.", "display_name": "Language Model", "documentation": "", "edited": false, @@ -2586,14 +2586,38 @@ "frozen": false, "icon": "brain-circuit", "key": "LanguageModelComponent", + "last_updated": "2025-09-02T13:04:22.208Z", "legacy": false, "metadata": { + "code_hash": "bb5f8714781b", + "dependencies": { + "dependencies": [ + { + "name": "langchain_anthropic", + "version": "0.3.14" + }, + { + "name": "langchain_google_genai", + "version": "2.0.6" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 4 + }, "keywords": [ "model", "llm", "language model", "large language model" - ] + ], + "module": "lfx.components.models.language_model.LanguageModelComponent" }, "minimized": false, "output_types": [], @@ -2670,7 +2694,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom langflow.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom langflow.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom langflow.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" + "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom lfx.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom lfx.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" }, "input_value": { "_input_type": "MessageInput", @@ -2722,7 +2746,7 @@ "tool_mode": false, "trace_as_metadata": true, "type": "str", - "value": "claude-opus-4-20250514" + "value": "claude-sonnet-4-20250514" }, "provider": { "_input_type": "DropdownInput", @@ -2839,30 +2863,30 @@ "type": "LanguageModelComponent" }, "dragging": false, - "id": "LanguageModelComponent-muTzI", + "id": "LanguageModelComponent-SCqm9", "measured": { - "height": 449, + "height": 451, "width": 320 }, "position": { "x": 2595.812486589649, "y": 559.0945152239169 }, - "selected": false, + "selected": true, "type": "genericNode" } ], "viewport": { - "x": -169.88117007333017, - "y": -14.61264877721112, - "zoom": 0.7334501147704762 + "x": -392.63239740741324, + "y": 71.00664626754826, + "zoom": 0.5114416779009183 } }, "description": "Generates well-structured code for custom components following Langflow's specifications.", "endpoint_name": null, - "id": "81b54c06-58c5-4e91-a228-b5aaf7ffa66d", + "id": "a1bdea51-5dc4-44af-a8d2-d07cdafa8f31", "is_component": false, - "last_tested_version": "1.4.3", + "last_tested_version": "1.6.0", "name": "Custom Component Generator", "tags": [ "coding", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json b/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json index b4e240a86bad..0f6e55a07f82 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json @@ -147,17 +147,17 @@ "legacy": false, "lf_version": "1.4.3", "metadata": { - "code_hash": "192913db3453", + "code_hash": "715a37648834", "dependencies": { "dependencies": [ { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 1 }, - "module": "langflow.components.input_output.chat.ChatInput" + "module": "lfx.components.input_output.chat.ChatInput" }, "output_types": [], "outputs": [ @@ -237,7 +237,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" + "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" }, "files": { "advanced": true, @@ -451,7 +451,7 @@ "legacy": false, "lf_version": "1.4.3", "metadata": { - "code_hash": "6f74e04e39d5", + "code_hash": "9619107fecd1", "dependencies": { "dependencies": [ { @@ -463,13 +463,13 @@ "version": "0.116.1" }, { - "name": "langflow", + "name": "lfx", "version": null } ], "total_dependencies": 3 }, - "module": "langflow.components.input_output.chat_output.ChatOutput" + "module": "lfx.components.input_output.chat_output.ChatOutput" }, "output_types": [], "outputs": [ @@ -569,7 +569,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "data_template": { "_input_type": "MessageTextInput", @@ -950,7 +950,7 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Runs a language model given a specified provider. ", + "description": "Runs a language model given a specified provider.", "display_name": "Language Model", "documentation": "", "edited": false, @@ -968,12 +968,35 @@ "legacy": false, "lf_version": "1.4.3", "metadata": { + "code_hash": "bb5f8714781b", + "dependencies": { + "dependencies": [ + { + "name": "langchain_anthropic", + "version": "0.3.14" + }, + { + "name": "langchain_google_genai", + "version": "2.0.6" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 4 + }, "keywords": [ "model", "llm", "language model", "large language model" - ] + ], + "module": "lfx.components.models.language_model.LanguageModelComponent" }, "minimized": false, "output_types": [], @@ -985,8 +1008,6 @@ "group_outputs": false, "method": "text_response", "name": "text_output", - "options": null, - "required_inputs": null, "selected": "Message", "tool_mode": true, "types": [ @@ -1001,8 +1022,6 @@ "group_outputs": false, "method": "build_model", "name": "model_output", - "options": null, - "required_inputs": null, "selected": "LanguageModel", "tool_mode": true, "types": [ @@ -1049,7 +1068,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom langflow.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom langflow.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom langflow.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" + "value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom lfx.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom lfx.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" }, "input_value": { "_input_type": "MessageInput", @@ -1089,11 +1108,20 @@ "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "gpt-4-turbo", "gpt-4-turbo-preview", "gpt-4", - "gpt-3.5-turbo" + "gpt-3.5-turbo", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-chat-latest", + "o1", + "o3-mini", + "o3", + "o3-pro", + "o4-mini", + "o4-mini-high" ], "options_metadata": [], "placeholder": "", @@ -1302,7 +1330,7 @@ "show": true, "title_case": false, "type": "code", - "value": "\"\"\"Enhanced file component with clearer structure and Docling isolation.\n\nNotes:\n-----\n- Functionality is preserved with minimal behavioral changes.\n- ALL Docling parsing/export runs in a separate OS process to prevent memory\n growth and native library state from impacting the main Langflow process.\n- Standard text/structured parsing continues to use existing BaseFileComponent\n utilities (and optional threading via `parallel_load_data`).\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport subprocess\nimport sys\nimport textwrap\nfrom copy import deepcopy\nfrom typing import TYPE_CHECKING, Any\n\nfrom langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n FileInput,\n IntInput,\n MessageTextInput,\n Output,\n StrInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.message import Message\n\nif TYPE_CHECKING:\n from langflow.schema import DataFrame\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"File component with optional Docling processing (isolated in a subprocess).\"\"\"\n\n display_name = \"File\"\n description = \"Loads content from files with optional advanced document processing and export using Docling.\"\n documentation: str = \"https://docs.langflow.org/components-data#file\"\n icon = \"file-text\"\n name = \"File\"\n\n # Docling-supported/compatible extensions; TEXT_FILE_TYPES are supported by the base loader.\n VALID_EXTENSIONS = [\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"csv\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"docx\",\n \"htm\",\n \"html\",\n \"jpeg\",\n \"json\",\n \"md\",\n \"pdf\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"txt\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"xml\",\n \"webp\",\n *TEXT_FILE_TYPES,\n ]\n\n # Fixed export settings used when markdown export is requested.\n EXPORT_FORMAT = \"Markdown\"\n IMAGE_MODE = \"placeholder\"\n\n # ---- Inputs / Outputs (kept as close to original as possible) -------------------\n _base_inputs = deepcopy(BaseFileComponent._base_inputs)\n for input_item in _base_inputs:\n if isinstance(input_item, FileInput) and input_item.name == \"path\":\n input_item.real_time_refresh = True\n break\n\n inputs = [\n *_base_inputs,\n BoolInput(\n name=\"advanced_mode\",\n display_name=\"Advanced Parser\",\n value=False,\n real_time_refresh=True,\n info=(\n \"Enable advanced document processing and export with Docling for PDFs, images, and office documents. \"\n \"Available only for single file processing.\"\n ),\n show=False,\n ),\n DropdownInput(\n name=\"pipeline\",\n display_name=\"Pipeline\",\n info=\"Docling pipeline to use\",\n options=[\"standard\", \"vlm\"],\n value=\"standard\",\n advanced=True,\n ),\n DropdownInput(\n name=\"ocr_engine\",\n display_name=\"OCR Engine\",\n info=\"OCR engine to use. Only available when pipeline is set to 'standard'.\",\n options=[\"\", \"easyocr\"],\n value=\"\",\n show=False,\n advanced=True,\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder between pages in the markdown output.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n show=False,\n ),\n # Deprecated input retained for backward-compatibility.\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n BoolInput(\n name=\"markdown\",\n display_name=\"Markdown Export\",\n info=\"Export processed documents to Markdown format. Only available when advanced mode is enabled.\",\n value=False,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n ]\n\n # ------------------------------ UI helpers --------------------------------------\n\n def _path_value(self, template: dict) -> list[str]:\n \"\"\"Return the list of currently selected file paths from the template.\"\"\"\n return template.get(\"path\", {}).get(\"file_path\", [])\n\n def update_build_config(\n self,\n build_config: dict[str, Any],\n field_value: Any,\n field_name: str | None = None,\n ) -> dict[str, Any]:\n \"\"\"Show/hide Advanced Parser and related fields based on selection context.\"\"\"\n if field_name == \"path\":\n paths = self._path_value(build_config)\n file_path = paths[0] if paths else \"\"\n file_count = len(field_value) if field_value else 0\n\n # Advanced mode only for single (non-tabular) file\n allow_advanced = file_count == 1 and not file_path.endswith((\".csv\", \".xlsx\", \".parquet\"))\n build_config[\"advanced_mode\"][\"show\"] = allow_advanced\n if not allow_advanced:\n build_config[\"advanced_mode\"][\"value\"] = False\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = False\n\n elif field_name == \"advanced_mode\":\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = bool(field_value)\n\n return build_config\n\n def update_outputs(self, frontend_node: dict[str, Any], field_name: str, field_value: Any) -> dict[str, Any]: # noqa: ARG002\n \"\"\"Dynamically show outputs based on file count/type and advanced mode.\"\"\"\n if field_name not in [\"path\", \"advanced_mode\"]:\n return frontend_node\n\n template = frontend_node.get(\"template\", {})\n paths = self._path_value(template)\n if not paths:\n return frontend_node\n\n frontend_node[\"outputs\"] = []\n if len(paths) == 1:\n file_path = paths[0] if field_name == \"path\" else frontend_node[\"template\"][\"path\"][\"file_path\"][0]\n if file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"dataframe\", method=\"load_files_structured\"),\n )\n elif file_path.endswith(\".json\"):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"json\", method=\"load_files_json\"),\n )\n\n advanced_mode = frontend_node.get(\"template\", {}).get(\"advanced_mode\", {}).get(\"value\", False)\n if advanced_mode:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Output\", name=\"advanced\", method=\"load_files_advanced\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Markdown\", name=\"markdown\", method=\"load_files_markdown\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # Multiple files => DataFrame output; advanced parser disabled\n frontend_node[\"outputs\"].append(Output(display_name=\"Files\", name=\"dataframe\", method=\"load_files\"))\n\n return frontend_node\n\n # ------------------------------ Core processing ----------------------------------\n\n def _is_docling_compatible(self, file_path: str) -> bool:\n \"\"\"Lightweight extension gate for Docling-compatible types.\"\"\"\n docling_exts = (\n \".adoc\",\n \".asciidoc\",\n \".asc\",\n \".bmp\",\n \".csv\",\n \".dotx\",\n \".dotm\",\n \".docm\",\n \".docx\",\n \".htm\",\n \".html\",\n \".jpeg\",\n \".json\",\n \".md\",\n \".pdf\",\n \".png\",\n \".potx\",\n \".ppsx\",\n \".pptm\",\n \".potm\",\n \".ppsm\",\n \".pptx\",\n \".tiff\",\n \".txt\",\n \".xls\",\n \".xlsx\",\n \".xhtml\",\n \".xml\",\n \".webp\",\n )\n return file_path.lower().endswith(docling_exts)\n\n def _process_docling_in_subprocess(self, file_path: str) -> Data | None:\n \"\"\"Run Docling in a separate OS process and map the result to a Data object.\n\n We avoid multiprocessing pickling by launching `python -c \"