diff --git a/.env.example b/.env.example index e4e9713b442d..d0ce231b1358 100644 --- a/.env.example +++ b/.env.example @@ -104,7 +104,7 @@ LANGFLOW_STORE_ENVIRONMENT_VARIABLES= # Should enable the MCP composer feature in MCP projects # Values: true, false # Default: true -LANGFLOW_FEATURE_MCP_COMPOSER= +LANGFLOW_MCP_COMPOSER_ENABLED= # STORE_URL # Example: LANGFLOW_STORE_URL=https://api.langflow.store diff --git a/.gitignore b/.gitignore index e9fcace844db..919e95c80e57 100644 --- a/.gitignore +++ b/.gitignore @@ -280,4 +280,6 @@ src/frontend/temp *.mcp.json news-aggregated.json -CLAUDE.md \ No newline at end of file +CLAUDE.md + +member_servers.json \ No newline at end of file diff --git a/docker/build_and_push.Dockerfile b/docker/build_and_push.Dockerfile index cb93b2c9d5dd..407f08f95c92 100644 --- a/docker/build_and_push.Dockerfile +++ b/docker/build_and_push.Dockerfile @@ -96,3 +96,4 @@ ENV LANGFLOW_HOST=0.0.0.0 ENV LANGFLOW_PORT=7860 CMD ["langflow", "run"] + diff --git a/docs/docs/Integrations/Cleanlab/eval_and_remediate_cleanlab.json b/docs/docs/Integrations/Cleanlab/eval_and_remediate_cleanlab.json index 8ab161aae3af..09c6a31dc0ec 100644 --- a/docs/docs/Integrations/Cleanlab/eval_and_remediate_cleanlab.json +++ b/docs/docs/Integrations/Cleanlab/eval_and_remediate_cleanlab.json @@ -892,7 +892,7 @@ "tool_mode": false, "trace_as_metadata": true, "type": "str", - "value": "gpt-4o" + "value": "gpt-4.1" }, "openai_api_base": { "_input_type": "StrInput", @@ -1200,7 +1200,7 @@ "tool_mode": false, "trace_as_metadata": true, "type": "str", - "value": "gpt-4o-mini" + "value": "gpt-4.1" }, "prompt": { "_input_type": "MessageTextInput", diff --git a/docs/docs/Integrations/Notion/Conversational_Notion_Agent.json b/docs/docs/Integrations/Notion/Conversational_Notion_Agent.json index 8c0126b599e2..edf1dd06ee75 100644 --- a/docs/docs/Integrations/Notion/Conversational_Notion_Agent.json +++ b/docs/docs/Integrations/Notion/Conversational_Notion_Agent.json @@ -2811,7 +2811,7 @@ "tool_mode": false, "trace_as_metadata": true, "type": "str", - "value": "gpt-4o" + "value": "gpt-4.1" }, "openai_api_base": { "_input_type": "StrInput", diff --git a/docs/docs/Integrations/Notion/Meeting_Notes_Agent.json b/docs/docs/Integrations/Notion/Meeting_Notes_Agent.json index e567567bab6e..bc46e70bc113 100644 --- a/docs/docs/Integrations/Notion/Meeting_Notes_Agent.json +++ b/docs/docs/Integrations/Notion/Meeting_Notes_Agent.json @@ -1093,7 +1093,7 @@ "placeholder": "", "show": true, "name": "model_name", - "value": "gpt-4o", + "value": "gpt-4.1", "display_name": "Model Name", "advanced": true, "dynamic": false, @@ -1878,7 +1878,7 @@ "placeholder": "", "show": true, "name": "model_name", - "value": "gpt-4o", + "value": "gpt-4.1", "display_name": "Model Name", "advanced": true, "dynamic": false, diff --git a/pyproject.toml b/pyproject.toml index 6f042c452ae2..5fa6b18a15dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -122,13 +122,15 @@ dependencies = [ "cleanlab-tlm>=1.1.2", 'gassist>=0.0.1; sys_platform == "win32"', "twelvelabs>=0.4.7", - "docling_core>=2.36.1", + "docling>=2.36.1", "filelock>=3.18.0", "jigsawstack==0.2.7", "structlog>=25.4.0", "aiosqlite==0.21.0", "fastparquet>=2024.11.0", "traceloop-sdk>=0.43.1", + "easyocr>=1.7.2", + "opencv-python>=4.11", ] [dependency-groups] @@ -179,7 +181,6 @@ dev = [ "pytest-timeout>=2.3.1", "pyyaml>=6.0.2", "pyleak>=0.1.14", - "docling>=2.36.1" ] [tool.uv.sources] diff --git a/src/backend/base/langflow/__main__.py b/src/backend/base/langflow/__main__.py index 538b541a951f..34b776773bed 100644 --- a/src/backend/base/langflow/__main__.py +++ b/src/backend/base/langflow/__main__.py @@ -683,7 +683,7 @@ async def _create_superuser(username: str, password: str, auth_token: str | None if settings_service.auth_settings.AUTO_LOGIN: # Force default credentials for AUTO_LOGIN mode username = DEFAULT_SUPERUSER - password = DEFAULT_SUPERUSER_PASSWORD + password = DEFAULT_SUPERUSER_PASSWORD.get_secret_value() else: # Production mode - prompt for credentials if not provided if not username: @@ -711,7 +711,7 @@ async def _create_superuser(username: str, password: str, auth_token: str | None raise typer.Exit(1) typer.echo(f"AUTO_LOGIN enabled. Creating default superuser '{username}'...") - typer.echo(f"Note: Default credentials are {DEFAULT_SUPERUSER}/{DEFAULT_SUPERUSER_PASSWORD}") + # Do not echo the default password to avoid exposing it in logs. # AUTO_LOGIN is false - production mode elif is_first_setup: typer.echo("No superusers found. Creating first superuser...") diff --git a/src/backend/base/langflow/alembic/versions/d37bc4322900_drop_single_constraint_on_files_name_.py b/src/backend/base/langflow/alembic/versions/d37bc4322900_drop_single_constraint_on_files_name_.py new file mode 100644 index 000000000000..6dd28e44f8d0 --- /dev/null +++ b/src/backend/base/langflow/alembic/versions/d37bc4322900_drop_single_constraint_on_files_name_.py @@ -0,0 +1,222 @@ +"""Drop single constraint on file.name column + +Revision ID: d37bc4322900 +Revises: 0882f9657f22 +Create Date: 2025-09-15 11:11:37.610294 + +""" +import logging + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +logger = logging.getLogger(__name__) + +# revision identifiers, used by Alembic. +revision: str = "d37bc4322900" +down_revision: Union[str, None] = "0882f9657f22" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Remove single UNIQUE constraint on name column while preserving composite constraint.""" + conn = op.get_bind() + inspector = sa.inspect(conn) + + # Check if file table exists + table_names = inspector.get_table_names() + if "file" not in table_names: + logger.info("file table does not exist, skipping") + return + + db_dialect = conn.dialect.name + logger.info(f"Running migration on {db_dialect} database") + + try: + if db_dialect == "sqlite": + # SQLite: Recreate table without single UNIQUE constraint + logger.info("SQLite: Recreating table to remove single UNIQUE constraint on name") + + # Guard against schema drift: ensure expected columns before destructive rebuild + res = conn.execute(sa.text('PRAGMA table_info("file")')) + cols = [row[1] for row in res] + expected = ['id', 'user_id', 'name', 'path', 'size', 'provider', 'created_at', 'updated_at'] + if set(cols) != set(expected): + raise RuntimeError(f"SQLite: Unexpected columns on file table: {cols}. Aborting migration to avoid data loss.") + + # Create the new table without the single UNIQUE(name) constraint + op.execute(""" + CREATE TABLE file_new ( + id CHAR(32) NOT NULL, + user_id CHAR(32) NOT NULL, + name VARCHAR NOT NULL, + path VARCHAR NOT NULL, + size INTEGER NOT NULL, + provider VARCHAR, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL, + PRIMARY KEY (id), + CONSTRAINT file_name_user_id_key UNIQUE (name, user_id), + FOREIGN KEY(user_id) REFERENCES user (id) + ) + """) + + # Copy data from old table to new table + op.execute(""" + INSERT INTO file_new (id, user_id, name, path, size, provider, created_at, updated_at) + SELECT id, user_id, name, path, size, provider, created_at, updated_at + FROM file + """) + + # Drop old table and rename new table + op.execute("PRAGMA foreign_keys=OFF") + try: + op.execute("DROP TABLE file") + op.execute("ALTER TABLE file_new RENAME TO file") + finally: + op.execute("PRAGMA foreign_keys=ON") + + logger.info("SQLite: Successfully recreated file table without single UNIQUE constraint on name") + + elif db_dialect == "postgresql": + # PostgreSQL: Find and drop single-column unique constraints on 'name' + logger.info("PostgreSQL: Finding and dropping single UNIQUE constraints and indexes on name") + + # Determine target schema + schema = sa.inspect(conn).default_schema_name or "public" + + # Get constraint names that are single-column unique on 'name' + result = conn.execute(sa.text(""" + SELECT conname + FROM pg_constraint c + JOIN pg_class t ON c.conrelid = t.oid + JOIN pg_namespace n ON t.relnamespace = n.oid + WHERE t.relname = 'file' + AND n.nspname = :schema + AND c.contype = 'u' + AND array_length(c.conkey, 1) = 1 + AND EXISTS ( + SELECT 1 FROM pg_attribute a + WHERE a.attrelid = t.oid + AND a.attnum = c.conkey[1] + AND a.attname = 'name' + ) + """), {"schema": schema}) + + constraints_to_drop = [row[0] for row in result.fetchall()] + + if constraints_to_drop: + for constraint_name in constraints_to_drop: + op.drop_constraint(constraint_name, "file", type_="unique", schema=schema) + logger.info(f"PostgreSQL: Dropped constraint {constraint_name}") + else: + logger.info("PostgreSQL: No single UNIQUE constraints found on name column") + + # Also drop any single-column UNIQUE indexes on name not backed by constraints + idx_result = conn.execute(sa.text(""" + SELECT i.relname + FROM pg_class t + JOIN pg_namespace n ON n.oid = t.relnamespace + JOIN pg_index ix ON ix.indrelid = t.oid + JOIN pg_class i ON i.oid = ix.indexrelid + WHERE t.relname = 'file' + AND n.nspname = :schema + AND ix.indisunique = TRUE + AND array_length(ix.indkey, 1) = 1 + AND NOT EXISTS (SELECT 1 FROM pg_constraint c WHERE c.conindid = ix.indexrelid) + AND (SELECT a.attname FROM pg_attribute a + WHERE a.attrelid = t.oid AND a.attnum = ix.indkey[1]) = 'name' + """), {"schema": schema}) + for (index_name,) in idx_result.fetchall(): + op.drop_index(index_name, table_name="file", schema=schema) + logger.info(f"PostgreSQL: Dropped unique index {index_name}") + + else: + raise ValueError(f"Unsupported database dialect: {db_dialect}") + + except Exception as e: + logger.error(f"Error during constraint removal: {e}") + raise + + +def downgrade() -> None: + """Add back the single unique constraint on name column.""" + conn = op.get_bind() + inspector = sa.inspect(conn) + + # Check if file table exists + table_names = inspector.get_table_names() + if "file" not in table_names: + logger.info("file table does not exist, skipping downgrade") + return + + db_dialect = conn.dialect.name + + try: + # Pre-check for duplicates that would violate UNIQUE(name) + dup = conn.execute(sa.text("SELECT name FROM file GROUP BY name HAVING COUNT(*) > 1 LIMIT 1")).first() + if dup: + raise RuntimeError( + "Downgrade aborted: duplicates in file.name would violate UNIQUE(name). " + "Deduplicate before downgrading." + ) + if db_dialect == "sqlite": + # Add the same column validation as upgrade + res = conn.execute(sa.text('PRAGMA table_info("file")')) + cols = [row[1] for row in res] + expected = ['id', 'user_id', 'name', 'path', 'size', 'provider', 'created_at', 'updated_at'] + if set(cols) != set(expected): + raise RuntimeError(f"SQLite: Unexpected columns on file table: {cols}. Aborting downgrade.") + # SQLite: Recreate table with both constraints + logger.info("SQLite: Recreating table with both constraints") + + op.execute(""" + CREATE TABLE file_new ( + id CHAR(32) NOT NULL, + user_id CHAR(32) NOT NULL, + name VARCHAR NOT NULL, + path VARCHAR NOT NULL, + size INTEGER NOT NULL, + provider VARCHAR, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL, + PRIMARY KEY (id), + CONSTRAINT file_name_user_id_key UNIQUE (name, user_id), + FOREIGN KEY(user_id) REFERENCES user (id), + UNIQUE (name) + ) + """) + + # Copy data + op.execute(""" + INSERT INTO file_new (id, user_id, name, path, size, provider, created_at, updated_at) + SELECT id, user_id, name, path, size, provider, created_at, updated_at + FROM file + """) + + # Replace table + op.execute("PRAGMA foreign_keys=OFF") + try: + op.execute("DROP TABLE file") + op.execute("ALTER TABLE file_new RENAME TO file") + finally: + op.execute("PRAGMA foreign_keys=ON") + + logger.info("SQLite: Restored single unique constraint on name column") + + elif db_dialect == "postgresql": + # PostgreSQL: Add constraint back + schema = sa.inspect(conn).default_schema_name or "public" + op.create_unique_constraint("file_name_unique", "file", ["name"], schema=schema) + logger.info("PostgreSQL: Added back single unique constraint on 'name' column") + + else: + logger.info(f"Downgrade not supported for dialect: {db_dialect}") + + except Exception as e: + logger.error(f"Error during downgrade: {e}") + if "constraint" not in str(e).lower(): + raise diff --git a/src/backend/base/langflow/api/build.py b/src/backend/base/langflow/api/build.py index 570e565041d6..bee09b71eda1 100644 --- a/src/backend/base/langflow/api/build.py +++ b/src/backend/base/langflow/api/build.py @@ -345,9 +345,14 @@ async def _build_vertex(vertex_id: str, graph: Graph, event_manager: EventManage result_data_response.duration = duration result_data_response.timedelta = timedelta vertex.add_build_time(timedelta) - inactivated_vertices = list(graph.inactivated_vertices) + # Capture both inactivated and conditionally excluded vertices + inactivated_vertices = list(graph.inactivated_vertices.union(graph.conditionally_excluded_vertices)) graph.reset_inactivated_vertices() graph.reset_activated_vertices() + + # Note: Do not reset conditionally_excluded_vertices each iteration + # This is handled by the ConditionalRouter component + # graph.stop_vertex tells us if the user asked # to stop the build of the graph at a certain vertex # if it is in next_vertices_ids, we need to remove other @@ -407,7 +412,7 @@ async def build_vertices( try: vertex_build_response: VertexBuildResponse = await _build_vertex(vertex_id, graph, event_manager) except asyncio.CancelledError as exc: - await logger.aerror(f"Build cancelled: {exc}") + await logger.ainfo(f"Build cancelled: {exc}") raise # send built event or error event diff --git a/src/backend/base/langflow/api/v1/auth_helpers.py b/src/backend/base/langflow/api/v1/auth_helpers.py new file mode 100644 index 000000000000..1c7450f31e68 --- /dev/null +++ b/src/backend/base/langflow/api/v1/auth_helpers.py @@ -0,0 +1,75 @@ +from typing import Any + +from pydantic import SecretStr + +from langflow.services.auth.mcp_encryption import decrypt_auth_settings, encrypt_auth_settings +from langflow.services.database.models.folder.model import Folder + + +def handle_auth_settings_update( + existing_project: Folder, + new_auth_settings: dict | Any | None, +) -> dict[str, bool]: + """Handle auth settings update including encryption/decryption and MCP Composer logic. + + Args: + existing_project: The project being updated (modified in-place) + new_auth_settings: New auth settings (could be dict, Pydantic model, or None) + + Returns: + Dict containing: + - should_start_composer: bool + - should_stop_composer: bool + """ + # Get current auth type before update + current_auth_type = None + decrypted_current = None + if existing_project.auth_settings: + current_auth_type = existing_project.auth_settings.get("auth_type") + # Only decrypt if we need access to sensitive fields (for preserving masked values) + if current_auth_type in ["oauth", "apikey"]: + decrypted_current = decrypt_auth_settings(existing_project.auth_settings) + + if new_auth_settings is None: + # Explicitly set to None - clear auth settings + existing_project.auth_settings = None + # If we were using OAuth, stop the composer + return {"should_start_composer": False, "should_stop_composer": current_auth_type == "oauth"} + + # Handle different input types (dict vs Pydantic model) + if isinstance(new_auth_settings, dict): + auth_dict = new_auth_settings.copy() + else: + # Pydantic model - use python mode to get raw values without SecretStr masking + auth_dict = new_auth_settings.model_dump(mode="python", exclude_none=True) + + # Handle SecretStr fields + secret_fields = ["api_key", "oauth_client_secret"] + for field in secret_fields: + field_val = getattr(new_auth_settings, field, None) + if isinstance(field_val, SecretStr): + auth_dict[field] = field_val.get_secret_value() + + new_auth_type = auth_dict.get("auth_type") + + # Handle masked secret fields from frontend + # If frontend sends back "*******" for a secret field, preserve the existing value + if decrypted_current: + secret_fields = ["oauth_client_secret", "api_key"] + for field in secret_fields: + if field in auth_dict and auth_dict[field] == "*******" and field in decrypted_current: + auth_dict[field] = decrypted_current[field] + + # Encrypt and store the auth settings + existing_project.auth_settings = encrypt_auth_settings(auth_dict) + + # Determine MCP Composer actions + should_start_composer = new_auth_type == "oauth" + should_stop_composer = current_auth_type == "oauth" and new_auth_type != "oauth" + should_handle_composer = current_auth_type == "oauth" or new_auth_type == "oauth" + + return { + "should_start_composer": should_start_composer, + "should_stop_composer": should_stop_composer, + "should_handle_composer": should_handle_composer, + } diff --git a/src/backend/base/langflow/api/v1/mcp_projects.py b/src/backend/base/langflow/api/v1/mcp_projects.py index ff9e8f7a45ad..714fa54cb08b 100644 --- a/src/backend/base/langflow/api/v1/mcp_projects.py +++ b/src/backend/base/langflow/api/v1/mcp_projects.py @@ -8,24 +8,27 @@ from ipaddress import ip_address from pathlib import Path from subprocess import CalledProcessError -from typing import Annotated, Any +from typing import Annotated, Any, cast from uuid import UUID from anyio import BrokenResourceError -from fastapi import APIRouter, Depends, HTTPException, Request, Response +from fastapi import APIRouter, Depends, HTTPException, Request, Response, status from fastapi.responses import HTMLResponse from lfx.base.mcp.constants import MAX_MCP_SERVER_NAME_LENGTH from lfx.base.mcp.util import sanitize_mcp_name from lfx.log import logger from lfx.services.deps import get_settings_service, session_scope -from lfx.services.settings.feature_flags import FEATURE_FLAGS +from lfx.services.mcp_composer.service import MCPComposerError, MCPComposerService +from lfx.services.schema import ServiceType from mcp import types from mcp.server import NotificationOptions, Server from mcp.server.sse import SseServerTransport from sqlalchemy.orm import selectinload from sqlmodel import select +from sqlmodel.ext.asyncio.session import AsyncSession from langflow.api.utils import CurrentActiveMCPUser +from langflow.api.v1.auth_helpers import handle_auth_settings_update from langflow.api.v1.mcp_utils import ( current_request_variables_ctx, current_user_ctx, @@ -43,31 +46,44 @@ MCPSettings, ) from langflow.services.auth.mcp_encryption import decrypt_auth_settings, encrypt_auth_settings +from langflow.services.auth.utils import AUTO_LOGIN_WARNING from langflow.services.database.models import Flow, Folder from langflow.services.database.models.api_key.crud import check_key, create_api_key -from langflow.services.database.models.api_key.model import ApiKeyCreate +from langflow.services.database.models.api_key.model import ApiKey, ApiKeyCreate +from langflow.services.database.models.user.crud import get_user_by_username from langflow.services.database.models.user.model import User +from langflow.services.deps import get_service router = APIRouter(prefix="/mcp/project", tags=["mcp_projects"]) async def verify_project_auth( + db: AsyncSession, project_id: UUID, - query_param: str | None = None, - header_param: str | None = None, + query_param: str, + header_param: str, ) -> User: - """Custom authentication for MCP project endpoints when API key is required. + """MCP-specific user authentication that allows fallback to username lookup when not using API key auth. - This is only used when MCP composer is enabled and project requires API key auth. + This function provides authentication for MCP endpoints when using MCP Composer and no API key is provided, + or checks if the API key is valid. """ - async with session_scope() as session: - # First, get the project to check its auth settings - project = (await session.exec(select(Folder).where(Folder.id == project_id))).first() + settings_service = get_settings_service() + result: ApiKey | User | None - if not project: - raise HTTPException(status_code=404, detail="Project not found") + project = (await db.exec(select(Folder).where(Folder.id == project_id))).first() - # For MCP composer enabled, only use API key + if not project: + raise HTTPException(status_code=404, detail="Project not found") + + auth_settings: AuthSettings | None = None + # Check if this project requires API key only authentication + if project.auth_settings: + auth_settings = AuthSettings(**project.auth_settings) + + if (not auth_settings and not settings_service.auth_settings.AUTO_LOGIN) or ( + auth_settings and auth_settings.auth_type == "apikey" + ): api_key = query_param or header_param if not api_key: raise HTTPException( @@ -76,20 +92,36 @@ async def verify_project_auth( ) # Validate the API key - user = await check_key(session, api_key) + user = await check_key(db, api_key) if not user: raise HTTPException(status_code=401, detail="Invalid API key") # Verify user has access to the project project_access = ( - await session.exec(select(Folder).where(Folder.id == project_id, Folder.user_id == user.id)) + await db.exec(select(Folder).where(Folder.id == project_id, Folder.user_id == user.id)) ).first() if not project_access: - raise HTTPException(status_code=403, detail="Access denied to this project") + raise HTTPException(status_code=404, detail="Project not found") return user + # Get the first user + if not settings_service.auth_settings.SUPERUSER: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Missing superuser username in auth settings", + ) + # For MCP endpoints, always fall back to username lookup when no API key is provided + result = await get_user_by_username(db, settings_service.auth_settings.SUPERUSER) + if result: + await logger.awarning(AUTO_LOGIN_WARNING) + return result + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Invalid user", + ) + # Smart authentication dependency that chooses method based on project settings async def verify_project_auth_conditional( @@ -108,16 +140,6 @@ async def verify_project_auth_conditional( if not project: raise HTTPException(status_code=404, detail="Project not found") - # Check if this project requires API key only authentication - if FEATURE_FLAGS.mcp_composer and project.auth_settings: - auth_settings = AuthSettings(**project.auth_settings) - if auth_settings.auth_type == "apikey": - # For MCP composer projects with API key auth, use custom API key validation - api_key_header_value = request.headers.get("x-api-key") - api_key_query_value = request.query_params.get("x-api-key") - return await verify_project_auth(project_id, api_key_query_value, api_key_header_value) - - # For all other cases, use standard MCP authentication (allows JWT + API keys) # Extract token token: str | None = None auth_header = request.headers.get("authorization") @@ -128,6 +150,11 @@ async def verify_project_auth_conditional( api_key_query_value = request.query_params.get("x-api-key") api_key_header_value = request.headers.get("x-api-key") + # Check if this project requires API key only authentication + if get_settings_service().settings.mcp_composer_enabled: + return await verify_project_auth(session, project_id, api_key_query_value, api_key_header_value) + + # For all other cases, use standard MCP authentication (allows JWT + API keys) # Call the MCP auth function directly from langflow.services.auth.utils import get_current_user_mcp @@ -153,8 +180,11 @@ async def verify_project_auth_conditional( project_sse_transports = {} -def get_project_sse(project_id: UUID) -> SseServerTransport: +def get_project_sse(project_id: UUID | None) -> SseServerTransport: """Get or create an SSE transport for a specific project.""" + if not project_id: + raise HTTPException(status_code=400, detail="Project ID is required to start MCP server") + project_id_str = str(project_id) if project_id_str not in project_sse_transports: project_sse_transports[project_id_str] = SseServerTransport(f"/api/v1/mcp/project/{project_id_str}/") @@ -167,7 +197,7 @@ async def list_project_tools( current_user: CurrentActiveMCPUser, *, mcp_enabled: bool = True, -) -> MCPProjectResponse: +) -> MCPProjectResponse | None: """List all tools in a project that are enabled for MCP.""" tools: list[MCPSettings] = [] try: @@ -221,14 +251,19 @@ async def list_project_tools( await logger.awarning(msg) continue - # Get project-level auth settings and decrypt sensitive fields + # Get project-level auth settings but mask sensitive fields for security auth_settings = None if project.auth_settings: - from langflow.api.v1.schemas import AuthSettings - - # Decrypt sensitive fields before returning + # Decrypt to get the settings structure decrypted_settings = decrypt_auth_settings(project.auth_settings) - auth_settings = AuthSettings(**decrypted_settings) if decrypted_settings else None + if decrypted_settings: + # Mask sensitive fields before sending to frontend + masked_settings = decrypted_settings.copy() + if masked_settings.get("oauth_client_secret"): + masked_settings["oauth_client_secret"] = "*******" # noqa: S105 + if masked_settings.get("api_key"): + masked_settings["api_key"] = "*******" + auth_settings = AuthSettings(**masked_settings) except Exception as e: msg = f"Error listing project tools: {e!s}" @@ -362,7 +397,11 @@ async def update_project_mcp_settings( request: MCPProjectUpdateRequest, current_user: CurrentActiveMCPUser, ): - """Update the MCP settings of all flows in a project and project-level auth settings.""" + """Update the MCP settings of all flows in a project and project-level auth settings. + + On MCP Composer failure, this endpoint should return with a 200 status code and an error message in + the body of the response to display to the user. + """ try: async with session_scope() as session: # Fetch the project first to verify it exists and belongs to the current user @@ -377,32 +416,21 @@ async def update_project_mcp_settings( if not project: raise HTTPException(status_code=404, detail="Project not found") - # Update project-level auth settings with encryption - if "auth_settings" in request.model_fields_set: - if request.auth_settings is None: - # Explicitly set to None - clear auth settings - project.auth_settings = None - else: - # Use python mode to get raw values without SecretStr masking - auth_model = request.auth_settings - auth_dict = auth_model.model_dump(mode="python", exclude_none=True) - - # Extract actual secret values before encryption - from pydantic import SecretStr + # Track if MCP Composer needs to be started or stopped + should_handle_mcp_composer = False + should_start_composer = False + should_stop_composer = False - # Handle api_key if it's a SecretStr - api_key_val = getattr(auth_model, "api_key", None) - if isinstance(api_key_val, SecretStr): - auth_dict["api_key"] = api_key_val.get_secret_value() - - # Handle oauth_client_secret if it's a SecretStr - client_secret_val = getattr(auth_model, "oauth_client_secret", None) - if isinstance(client_secret_val, SecretStr): - auth_dict["oauth_client_secret"] = client_secret_val.get_secret_value() + # Update project-level auth settings with encryption + if "auth_settings" in request.model_fields_set and request.auth_settings is not None: + auth_result = handle_auth_settings_update( + existing_project=project, + new_auth_settings=request.auth_settings, + ) - # Encrypt and store - encrypted_settings = encrypt_auth_settings(auth_dict) - project.auth_settings = encrypted_settings + should_handle_mcp_composer = auth_result["should_handle_composer"] + should_start_composer = auth_result["should_start_composer"] + should_stop_composer = auth_result["should_stop_composer"] session.add(project) @@ -426,7 +454,69 @@ async def update_project_mcp_settings( await session.commit() - return {"message": f"Updated MCP settings for {len(updated_flows)} flows and project auth settings"} + response: dict[str, Any] = { + "message": f"Updated MCP settings for {len(updated_flows)} flows and project auth settings" + } + + if should_handle_mcp_composer: + if should_start_composer: + await logger.adebug( + f"Auth settings changed to OAuth for project {project.name} ({project_id}), " + "starting MCP Composer" + ) + + if should_use_mcp_composer(project): + try: + auth_config = await _get_mcp_composer_auth_config(project) + await get_or_start_mcp_composer(auth_config, project.name, project_id) + composer_sse_url = await get_composer_sse_url(project) + response["result"] = { + "project_id": str(project_id), + "sse_url": composer_sse_url, + "uses_composer": True, + } + except MCPComposerError as e: + response["result"] = { + "project_id": str(project_id), + "uses_composer": True, + "error_message": e.message, + } + except Exception as e: + # Unexpected errors + await logger.aerror(f"Failed to get mcp composer URL for project {project_id}: {e}") + raise HTTPException(status_code=500, detail=str(e)) from e + else: + # This shouldn't happen - we determined we should start composer but now we can't use it + await logger.aerror( + f"PATCH: OAuth set but MCP Composer is disabled in settings for project {project_id}" + ) + response["result"] = { + "project_id": str(project_id), + "uses_composer": False, + "error_message": "OAuth authentication is set but MCP Composer is disabled in settings", + } + elif should_stop_composer: + await logger.adebug( + f"Auth settings changed from OAuth for project {project.name} ({project_id}), " + "stopping MCP Composer" + ) + mcp_composer_service: MCPComposerService = cast( + MCPComposerService, get_service(ServiceType.MCP_COMPOSER_SERVICE) + ) + await mcp_composer_service.stop_project_composer(str(project_id)) + + # Provide the direct SSE URL since we're no longer using composer + sse_url = await get_project_sse_url(project_id) + if not sse_url: + raise HTTPException(status_code=500, detail="Failed to get direct SSE URL") + + response["result"] = { + "project_id": str(project_id), + "sse_url": sse_url, + "uses_composer": False, + } + + return response except Exception as e: msg = f"Error updating project MCP settings: {e!s}" @@ -500,110 +590,98 @@ async def install_mcp_config( removed_servers: list[str] = [] # Track removed servers for reinstallation try: - # Verify project exists and user has access - async with session_scope() as session: - project = ( - await session.exec(select(Folder).where(Folder.id == project_id, Folder.user_id == current_user.id)) - ).first() - - if not project: - raise HTTPException(status_code=404, detail="Project not found") + project = await verify_project_access(project_id, current_user) - # Check if project requires API key authentication and generate if needed - generated_api_key = None - - # Determine if we need to generate an API key based on feature flag - should_generate_api_key = False - if not FEATURE_FLAGS.mcp_composer: - # When MCP_COMPOSER is disabled, only generate API key if autologin is disabled - # (matches frontend !isAutoLogin check) - settings_service = get_settings_service() - should_generate_api_key = not settings_service.auth_settings.AUTO_LOGIN - elif project.auth_settings: - # When MCP_COMPOSER is enabled, only generate if auth_type is "apikey" - auth_settings = AuthSettings(**project.auth_settings) if project.auth_settings else AuthSettings() - should_generate_api_key = auth_settings.auth_type == "apikey" + # Check if project requires API key authentication and generate if needed + generated_api_key = None - if should_generate_api_key: - # Generate API key with specific name format - api_key_name = f"MCP Project {project.name} - {body.client}" - api_key_create = ApiKeyCreate(name=api_key_name) - unmasked_api_key = await create_api_key(session, api_key_create, current_user.id) - generated_api_key = unmasked_api_key.api_key + # Determine if we need to generate an API key + should_generate_api_key = False + if not get_settings_service().settings.mcp_composer_enabled: + # When MCP_COMPOSER is disabled, check auth settings or fallback to auto_login setting + settings_service = get_settings_service() + if project.auth_settings: + # Project has auth settings - check if it requires API key + if project.auth_settings.get("auth_type") == "apikey": + should_generate_api_key = True + elif not settings_service.auth_settings.AUTO_LOGIN: + # No project auth settings but auto_login is disabled - generate API key + should_generate_api_key = True + elif project.auth_settings: + # When MCP_COMPOSER is enabled, only generate if auth_type is "apikey" + if project.auth_settings.get("auth_type") == "apikey": + should_generate_api_key = True # Get settings service to build the SSE URL settings_service = get_settings_service() - host = getattr(settings_service.settings, "host", "localhost") - port = getattr(settings_service.settings, "port", 3000) - base_url = f"http://{host}:{port}".rstrip("/") - sse_url = f"{base_url}/api/v1/mcp/project/{project_id}/sse" + settings = settings_service.settings + host = settings.host or None + port = settings.port or None + if not host or not port: + raise HTTPException(status_code=500, detail="Host and port are not set in settings") # Determine command and args based on operating system os_type = platform.system() - command = "uvx" - # Check if running on WSL (will appear as Linux but with Microsoft in release info) - is_wsl = os_type == "Linux" and "microsoft" in platform.uname().release.lower() + use_mcp_composer = should_use_mcp_composer(project) - if is_wsl: - await logger.adebug("WSL detected, using Windows-specific configuration") + if use_mcp_composer: + try: + auth_config = await _get_mcp_composer_auth_config(project) + await get_or_start_mcp_composer(auth_config, project.name, project_id) + sse_url = await get_composer_sse_url(project) + except MCPComposerError as e: + await logger.aerror( + f"Failed to start MCP Composer for project '{project.name}' ({project_id}): {e.message}" + ) + raise HTTPException(status_code=500, detail=e.message) from e + except Exception as e: + error_msg = f"Failed to start MCP Composer for project '{project.name}' ({project_id}): {e!s}" + await logger.aerror(error_msg) + error_detail = "Failed to start MCP Composer. See logs for details." + raise HTTPException(status_code=500, detail=error_detail) from e + + # For OAuth/MCP Composer, use the special format + command = "uvx" + args = [ + "mcp-composer", + "--mode", + "stdio", + "--sse-url", + sse_url, + "--disable-composer-tools", + "--client_auth_type", + "oauth", + ] + else: + # For non-OAuth (API key or no auth), use mcp-proxy + sse_url = await get_project_sse_url(project_id) + command = "uvx" + args = ["mcp-proxy"] + # Check if we need to add Langflow API key headers + # Necessary only when Project API Key Authentication is enabled - # If we're in WSL and the host is localhost, we might need to adjust the URL - # so Windows applications can reach the WSL service - if host in {"localhost", "127.0.0.1"}: - try: - # Try to get the WSL IP address for host.docker.internal or similar access + # Generate a Langflow API key for auto-install if needed + # Only add API key headers for projects with "apikey" auth type (not "none" or OAuth) - # This might vary depending on WSL version and configuration - proc = await create_subprocess_exec( - "/usr/bin/hostname", - "-I", - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - stdout, _ = await proc.communicate() + if should_generate_api_key: + async with session_scope() as api_key_session: + api_key_create = ApiKeyCreate(name=f"MCP Server {project.name}") + api_key_response = await create_api_key(api_key_session, api_key_create, current_user.id) + langflow_api_key = api_key_response.api_key + args.extend(["--headers", "x-api-key", langflow_api_key]) - if proc.returncode == 0 and stdout.strip(): - wsl_ip = stdout.decode().strip().split()[0] # Get first IP address - await logger.adebug("Using WSL IP for external access: %s", wsl_ip) - # Replace the localhost with the WSL IP in the URL - sse_url = sse_url.replace(f"http://{host}:{port}", f"http://{wsl_ip}:{port}") - except OSError as e: - await logger.awarning("Failed to get WSL IP address: %s. Using default URL.", str(e)) - - # Base args - args = ["mcp-composer"] if FEATURE_FLAGS.mcp_composer else ["mcp-proxy"] - - # Add authentication args based on MCP_COMPOSER feature flag and auth settings - if not FEATURE_FLAGS.mcp_composer: - # When MCP_COMPOSER is disabled, only use headers format if API key was generated - # (when autologin is disabled) - if generated_api_key: - args.extend(["--headers", "x-api-key", generated_api_key]) - elif project.auth_settings: - # Decrypt sensitive fields before using them - decrypted_settings = decrypt_auth_settings(project.auth_settings) - auth_settings = AuthSettings(**decrypted_settings) if decrypted_settings else AuthSettings() - args.extend(["--auth_type", auth_settings.auth_type]) - - # When MCP_COMPOSER is enabled, only add headers if auth_type is "apikey" - auth_settings = AuthSettings(**project.auth_settings) - if auth_settings.auth_type == "apikey" and generated_api_key: - args.extend(["--headers", "x-api-key", generated_api_key]) - # If no auth_settings or auth_type is "none", don't add any auth headers - - # Add the SSE URL - if FEATURE_FLAGS.mcp_composer: - args.extend(["--sse-url", sse_url]) - else: + # Add the SSE URL for mcp-proxy args.append(sse_url) - if os_type == "Windows": + if os_type == "Windows" and not use_mcp_composer: + # Only wrap in cmd for Windows when using mcp-proxy command = "cmd" args = ["/c", "uvx", *args] await logger.adebug("Windows detected, using cmd command") name = project.name + server_name = f"lf-{sanitize_mcp_name(name)[: (MAX_MCP_SERVER_NAME_LENGTH - 4)]}" # Create the MCP configuration server_config: dict[str, Any] = { @@ -611,74 +689,23 @@ async def install_mcp_config( "args": args, } - mcp_config = { - "mcpServers": {f"lf-{sanitize_mcp_name(name)[: (MAX_MCP_SERVER_NAME_LENGTH - 4)]}": server_config} - } + mcp_config = {"mcpServers": {server_name: server_config}} - server_name = f"lf-{sanitize_mcp_name(name)[: (MAX_MCP_SERVER_NAME_LENGTH - 4)]}" await logger.adebug("Installing MCP config for project: %s (server name: %s)", project.name, server_name) - # Determine the config file path based on the client and OS - if body.client.lower() == "cursor": - config_path = Path.home() / ".cursor" / "mcp.json" - elif body.client.lower() == "windsurf": - config_path = Path.home() / ".codeium" / "windsurf" / "mcp_config.json" - elif body.client.lower() == "claude": - if os_type == "Darwin": # macOS - config_path = Path.home() / "Library" / "Application Support" / "Claude" / "claude_desktop_config.json" - elif os_type == "Windows" or is_wsl: # Windows or WSL (Claude runs on Windows host) - if is_wsl: - # In WSL, we need to access the Windows APPDATA directory - try: - # First try to get the Windows username - proc = await create_subprocess_exec( - "/mnt/c/Windows/System32/cmd.exe", - "/c", - "echo %USERNAME%", - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - stdout, stderr = await proc.communicate() + # Get the config file path and check if client is available + try: + config_path = await get_config_path(body.client.lower()) + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) from e - if proc.returncode == 0 and stdout.strip(): - windows_username = stdout.decode().strip() - config_path = Path( - f"/mnt/c/Users/{windows_username}/AppData/Roaming/Claude/claude_desktop_config.json" - ) - else: - # Fallback: try to find the Windows user directory - users_dir = Path("/mnt/c/Users") - if users_dir.exists(): - # Get the first non-system user directory - user_dirs = [ - d - for d in users_dir.iterdir() - if d.is_dir() and not d.name.startswith(("Default", "Public", "All Users")) - ] - if user_dirs: - config_path = ( - user_dirs[0] / "AppData" / "Roaming" / "Claude" / "claude_desktop_config.json" - ) - else: - raise HTTPException( - status_code=400, detail="Could not find Windows user directory in WSL" - ) - else: - raise HTTPException( - status_code=400, detail="Windows C: drive not mounted at /mnt/c in WSL" - ) - except (OSError, CalledProcessError) as e: - await logger.awarning("Failed to determine Windows user path in WSL: %s", str(e)) - raise HTTPException( - status_code=400, detail=f"Could not determine Windows Claude config path in WSL: {e!s}" - ) from e - else: - # Regular Windows - config_path = Path(os.environ["APPDATA"]) / "Claude" / "claude_desktop_config.json" - else: - raise HTTPException(status_code=400, detail="Unsupported operating system for Claude configuration") - else: - raise HTTPException(status_code=400, detail="Unsupported client") + # Check if the client application is available (config directory exists) + if not config_path.parent.exists(): + raise HTTPException( + status_code=400, + detail=f"{body.client.capitalize()} is not installed on this system. " + f"Please install {body.client.capitalize()} first.", + ) # Create parent directories if they don't exist config_path.parent.mkdir(parents=True, exist_ok=True) @@ -697,12 +724,10 @@ async def install_mcp_config( if "mcpServers" not in existing_config: existing_config["mcpServers"] = {} - # Remove any existing servers with the same SSE URL (for reinstalling) - project_sse_url = await get_project_sse_url(project_id) - existing_config, removed_servers = remove_server_by_sse_url(existing_config, project_sse_url) + existing_config, removed_servers = remove_server_by_sse_url(existing_config, sse_url) if removed_servers: - logger.info("Removed existing MCP servers with same SSE URL for reinstall: %s", removed_servers) + await logger.adebug("Removed existing MCP servers with same SSE URL for reinstall: %s", removed_servers) # Merge new config with existing config existing_config["mcpServers"].update(mcp_config["mcpServers"]) @@ -711,6 +736,8 @@ async def install_mcp_config( with config_path.open("w") as f: json.dump(existing_config, f, indent=2) + except HTTPException: + raise except Exception as e: msg = f"Error installing MCP configuration: {e!s}" await logger.aexception(msg) @@ -721,12 +748,59 @@ async def install_mcp_config( if removed_servers: message += f" (replaced existing servers: {', '.join(removed_servers)})" if generated_api_key: - auth_type = "API key" if FEATURE_FLAGS.mcp_composer else "legacy API key" + auth_type = "API key" if get_settings_service().settings.mcp_composer_enabled else "legacy API key" message += f" with {auth_type} authentication (key name: 'MCP Project {project.name} - {body.client}')" - await logger.ainfo(message) + await logger.adebug(message) return {"message": message} +@router.get("/{project_id}/composer-url") +async def get_project_composer_url( + project_id: UUID, + current_user: CurrentActiveMCPUser, +): + """Get the MCP Composer URL for a specific project. + + On failure, this endpoint should return with a 200 status code and an error message in + the body of the response to display to the user. + """ + try: + project = await verify_project_access(project_id, current_user) + if not should_use_mcp_composer(project): + return { + "project_id": str(project_id), + "uses_composer": False, + "error_message": ( + "MCP Composer is only available for projects with MCP Composer enabled and OAuth authentication" + ), + } + + auth_config = await _get_mcp_composer_auth_config(project) + + try: + await get_or_start_mcp_composer(auth_config, project.name, project_id) + composer_sse_url = await get_composer_sse_url(project) + return {"project_id": str(project_id), "sse_url": composer_sse_url, "uses_composer": True} + except MCPComposerError as e: + return {"project_id": str(project_id), "uses_composer": True, "error_message": e.message} + except Exception as e: # noqa: BLE001 + await logger.aerror(f"Unexpected error getting composer URL: {e}") + return { + "project_id": str(project_id), + "uses_composer": True, + "error_message": "Failed to start MCP Composer. See logs for details.", + } + + except Exception as e: # noqa: BLE001 + msg = f"Error getting composer URL for project {project_id}: {e!s}" + await logger.aerror(msg) + return { + "project_id": str(project_id), + "uses_composer": True, + "error_message": "Failed to get MCP Composer URL. See logs for details.", + } + + @router.get("/{project_id}/installed") async def check_installed_mcp_servers( project_id: UUID, @@ -743,127 +817,60 @@ async def check_installed_mcp_servers( if not project: raise HTTPException(status_code=404, detail="Project not found") - # Generate the SSE URL for this project - project_sse_url = await get_project_sse_url(project_id) + project = await verify_project_access(project_id, current_user) + if should_use_mcp_composer(project): + project_sse_url = await get_composer_sse_url(project) + else: + project_sse_url = await get_project_sse_url(project_id) await logger.adebug( "Checking for installed MCP servers for project: %s (SSE URL: %s)", project.name, project_sse_url ) - # Check configurations for different clients + # Define supported clients + clients = ["cursor", "windsurf", "claude"] results = [] - # Check Cursor configuration - cursor_config_path = Path.home() / ".cursor" / "mcp.json" - await logger.adebug( - "Checking Cursor config at: %s (exists: %s)", cursor_config_path, cursor_config_path.exists() - ) - if cursor_config_path.exists(): + for client_name in clients: try: - with cursor_config_path.open("r") as f: - cursor_config = json.load(f) - if config_contains_sse_url(cursor_config, project_sse_url): - await logger.adebug("Found Cursor config with matching SSE URL: %s", project_sse_url) - results.append("cursor") - else: - await logger.adebug( - "Cursor config exists but no server with SSE URL: %s (available servers: %s)", - project_sse_url, - list(cursor_config.get("mcpServers", {}).keys()), - ) - except json.JSONDecodeError: - await logger.awarning("Failed to parse Cursor config JSON at: %s", cursor_config_path) + # Get config path for this client + config_path = await get_config_path(client_name) + available = config_path.parent.exists() + installed = False - # Check Windsurf configuration - windsurf_config_path = Path.home() / ".codeium" / "windsurf" / "mcp_config.json" - await logger.adebug( - "Checking Windsurf config at: %s (exists: %s)", windsurf_config_path, windsurf_config_path.exists() - ) - if windsurf_config_path.exists(): - try: - with windsurf_config_path.open("r") as f: - windsurf_config = json.load(f) - if config_contains_sse_url(windsurf_config, project_sse_url): - await logger.adebug("Found Windsurf config with matching SSE URL: %s", project_sse_url) - results.append("windsurf") - else: - await logger.adebug( - "Windsurf config exists but no server with SSE URL: %s (available servers: %s)", - project_sse_url, - list(windsurf_config.get("mcpServers", {}).keys()), - ) - except json.JSONDecodeError: - await logger.awarning("Failed to parse Windsurf config JSON at: %s", windsurf_config_path) - - # Check Claude configuration - claude_config_path = None - os_type = platform.system() - is_wsl = os_type == "Linux" and "microsoft" in platform.uname().release.lower() + await logger.adebug("Checking %s config at: %s (exists: %s)", client_name, config_path, available) - if os_type == "Darwin": # macOS - claude_config_path = ( - Path.home() / "Library" / "Application Support" / "Claude" / "claude_desktop_config.json" - ) - elif os_type == "Windows" or is_wsl: # Windows or WSL (Claude runs on Windows host) - if is_wsl: - # In WSL, we need to access the Windows APPDATA directory - try: - # First try to get the Windows username - proc = await create_subprocess_exec( - "/mnt/c/Windows/System32/cmd.exe", - "/c", - "echo %USERNAME%", - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - stdout, stderr = await proc.communicate() - - if proc.returncode == 0 and stdout.strip(): - windows_username = stdout.decode().strip() - claude_config_path = Path( - f"/mnt/c/Users/{windows_username}/AppData/Roaming/Claude/claude_desktop_config.json" - ) - else: - # Fallback: try to find the Windows user directory - users_dir = Path("/mnt/c/Users") - if users_dir.exists(): - # Get the first non-system user directory - user_dirs = [ - d - for d in users_dir.iterdir() - if d.is_dir() and not d.name.startswith(("Default", "Public", "All Users")) - ] - if user_dirs: - claude_config_path = ( - user_dirs[0] / "AppData" / "Roaming" / "Claude" / "claude_desktop_config.json" + # If config file exists, check if project is installed + if available: + try: + with config_path.open("r") as f: + config_data = json.load(f) + if config_contains_sse_url(config_data, project_sse_url): + await logger.adebug( + "Found %s config with matching SSE URL: %s", client_name, project_sse_url ) - except (OSError, CalledProcessError) as e: - await logger.awarning( - "Failed to determine Windows user path in WSL for checking Claude config: %s", str(e) - ) - # Don't set claude_config_path, so it will be skipped - else: - # Regular Windows - claude_config_path = Path(os.environ["APPDATA"]) / "Claude" / "claude_desktop_config.json" + installed = True + else: + await logger.adebug( + "%s config exists but no server with SSE URL: %s (available servers: %s)", + client_name, + project_sse_url, + list(config_data.get("mcpServers", {}).keys()), + ) + except json.JSONDecodeError: + await logger.awarning("Failed to parse %s config JSON at: %s", client_name, config_path) + # available is True but installed remains False due to parse error + else: + await logger.adebug("%s config path not found or doesn't exist: %s", client_name, config_path) - if claude_config_path and claude_config_path.exists(): - await logger.adebug("Checking Claude config at: %s", claude_config_path) - try: - with claude_config_path.open("r") as f: - claude_config = json.load(f) - if config_contains_sse_url(claude_config, project_sse_url): - await logger.adebug("Found Claude config with matching SSE URL: %s", project_sse_url) - results.append("claude") - else: - await logger.adebug( - "Claude config exists but no server with SSE URL: %s (available servers: %s)", - project_sse_url, - list(claude_config.get("mcpServers", {}).keys()), - ) - except json.JSONDecodeError: - await logger.awarning("Failed to parse Claude config JSON at: %s", claude_config_path) - else: - await logger.adebug("Claude config path not found or doesn't exist: %s", claude_config_path) + # Add result for this client + results.append({"name": client_name, "installed": installed, "available": available}) + + except Exception as e: # noqa: BLE001 + # If there's an error getting config path or checking the client, + # mark it as not available and not installed + await logger.awarning("Error checking %s configuration: %s", client_name, str(e)) + results.append({"name": client_name, "installed": False, "available": False}) except Exception as e: msg = f"Error checking MCP configuration: {e!s}" @@ -878,7 +885,7 @@ def config_contains_sse_url(config_data: dict, sse_url: str) -> bool: for server_name, server_config in mcp_servers.items(): args = server_config.get("args", []) # The SSE URL is typically the last argument in mcp-proxy configurations - if args and args[-1] == sse_url: + if args and sse_url in args: logger.debug("Found matching SSE URL in server: %s", server_name) return True return False @@ -888,12 +895,29 @@ async def get_project_sse_url(project_id: UUID) -> str: """Generate the SSE URL for a project, including WSL handling.""" # Get settings service to build the SSE URL settings_service = get_settings_service() - host = getattr(settings_service.settings, "host", "localhost") - port = getattr(settings_service.settings, "port", 3000) - base_url = f"http://{host}:{port}".rstrip("/") - project_sse_url = f"{base_url}/api/v1/mcp/project/{project_id}/sse" - # Handle WSL case - must match the logic in install function + host = settings_service.settings.host or "localhost" + port = settings_service.settings.port or 7860 + project_sse_url = f"http://{host}:{port}/api/v1/mcp/project/{project_id}/sse" + + return await get_url_by_os(host, port, project_sse_url) + + +async def get_composer_sse_url(project: Folder) -> str: + """Get the SSE URL for a project using MCP Composer.""" + auth_config = await _get_mcp_composer_auth_config(project) + composer_host = auth_config.get("oauth_host") + composer_port = auth_config.get("oauth_port") + if not composer_host or not composer_port: + error_msg = "OAuth host and port are required to get the SSE URL for MCP Composer" + raise ValueError(error_msg) + + composer_sse_url = f"http://{composer_host}:{composer_port}/sse" + return await get_url_by_os(composer_host, composer_port, composer_sse_url) + + +async def get_url_by_os(host: str, port: int, url: str) -> str: + """Get the URL by operating system.""" os_type = platform.system() is_wsl = os_type == "Linux" and "microsoft" in platform.uname().release.lower() @@ -909,13 +933,13 @@ async def get_project_sse_url(project_id: UUID) -> str: if proc.returncode == 0 and stdout.strip(): wsl_ip = stdout.decode().strip().split()[0] # Get first IP address - logger.debug("Using WSL IP for external access: %s", wsl_ip) + await logger.adebug("Using WSL IP for external access: %s", wsl_ip) # Replace the localhost with the WSL IP in the URL - project_sse_url = project_sse_url.replace(f"http://{host}:{port}", f"http://{wsl_ip}:{port}") + url = url.replace(f"http://{host}:{port}", f"http://{wsl_ip}:{port}") except OSError as e: - logger.warning("Failed to get WSL IP address: %s. Using default URL.", str(e)) + await logger.awarning("Failed to get WSL IP address: %s. Using default URL.", str(e)) - return project_sse_url + return url async def get_config_path(client: str) -> Path: @@ -969,7 +993,7 @@ async def get_config_path(client: str) -> Path: msg = "Could not find valid Windows user directory in WSL" raise ValueError(msg) except (OSError, CalledProcessError) as e: - logger.warning("Failed to determine Windows user path in WSL: %s", str(e)) + await logger.awarning("Failed to determine Windows user path in WSL: %s", str(e)) msg = f"Could not determine Windows Claude config path in WSL: {e!s}" raise ValueError(msg) from e # Regular Windows @@ -1009,6 +1033,31 @@ def remove_server_by_sse_url(config_data: dict, sse_url: str) -> tuple[dict, lis return config_data, removed_servers +async def _get_mcp_composer_auth_config(project) -> dict: + """Get MCP Composer authentication configuration from project settings. + + Args: + project: The project object containing auth_settings + + Returns: + dict: The decrypted authentication configuration + + Raises: + HTTPException: If MCP Composer is not enabled or auth config is missing + """ + auth_config = None + if project.auth_settings: + decrypted_settings = decrypt_auth_settings(project.auth_settings) + if decrypted_settings: + auth_config = decrypted_settings + + if not auth_config: + error_message = "Auth config is missing. Please check your settings and try again." + raise ValueError(error_message) + + return auth_config + + # Project-specific MCP server instance for handling project-specific tools class ProjectMCPServer: def __init__(self, project_id: UUID): @@ -1053,29 +1102,165 @@ async def handle_call_project_tool(name: str, arguments: dict) -> list[types.Tex project_mcp_servers = {} -def get_project_mcp_server(project_id: UUID) -> ProjectMCPServer: +def get_project_mcp_server(project_id: UUID | None) -> ProjectMCPServer: """Get or create an MCP server for a specific project.""" + if project_id is None: + error_message = "Project ID cannot be None when getting project MCP server" + raise ValueError(error_message) + project_id_str = str(project_id) if project_id_str not in project_mcp_servers: project_mcp_servers[project_id_str] = ProjectMCPServer(project_id) return project_mcp_servers[project_id_str] +async def register_project_with_composer(project: Folder): + """Register a project with MCP Composer by starting a dedicated composer instance.""" + try: + mcp_composer_service: MCPComposerService = cast( + MCPComposerService, get_service(ServiceType.MCP_COMPOSER_SERVICE) + ) + + settings = get_settings_service().settings + if not settings.host or not settings.port: + error_msg = "Langflow host and port must be set in settings to register project with MCP Composer" + raise ValueError(error_msg) + + if not project.id: + error_msg = "Project must have an ID to register with MCP Composer" + raise ValueError(error_msg) + + sse_url = await get_project_sse_url(project.id) + auth_config = await _get_mcp_composer_auth_config(project) + + error_message = await mcp_composer_service.start_project_composer( + project_id=str(project.id), + sse_url=sse_url, + auth_config=auth_config, + ) + if error_message is not None: + raise RuntimeError(error_message) + + await logger.adebug(f"Registered project {project.name} ({project.id}) with MCP Composer") + + except Exception as e: # noqa: BLE001 + await logger.awarning(f"Failed to register project {project.id} with MCP Composer: {e}") + + async def init_mcp_servers(): """Initialize MCP servers for all projects.""" try: + settings_service = get_settings_service() + async with session_scope() as session: projects = (await session.exec(select(Folder))).all() for project in projects: try: + # Auto-enable API key auth for projects without auth settings or with "none" auth + # when AUTO_LOGIN is false + if not settings_service.auth_settings.AUTO_LOGIN: + should_update_to_apikey = False + + if not project.auth_settings: + # No auth settings at all + should_update_to_apikey = True + # Check if existing auth settings have auth_type "none" + elif project.auth_settings.get("auth_type") == "none": + should_update_to_apikey = True + + if should_update_to_apikey: + default_auth = {"auth_type": "apikey"} + project.auth_settings = encrypt_auth_settings(default_auth) + session.add(project) + await logger.ainfo( + f"Auto-enabled API key authentication for existing project {project.name} " + f"({project.id}) due to AUTO_LOGIN=false" + ) + + # WARN: If oauth projects exist in the database and the MCP Composer is disabled, + # these projects will be reset to "apikey" or "none" authentication, erasing all oauth settings. + if ( + not settings_service.settings.mcp_composer_enabled + and project.auth_settings + and project.auth_settings.get("auth_type") == "oauth" + ): + # Reset OAuth projects to appropriate auth type based on AUTO_LOGIN setting + fallback_auth_type = "apikey" if not settings_service.auth_settings.AUTO_LOGIN else "none" + clean_auth = AuthSettings(auth_type=fallback_auth_type) + project.auth_settings = clean_auth.model_dump(exclude_none=True) + session.add(project) + await logger.adebug( + f"Updated OAuth project {project.name} ({project.id}) to use {fallback_auth_type} " + f"authentication because MCP Composer is disabled" + ) + get_project_sse(project.id) get_project_mcp_server(project.id) + + # Only register with MCP Composer if OAuth authentication is configured + if get_settings_service().settings.mcp_composer_enabled and project.auth_settings: + auth_type = project.auth_settings.get("auth_type") + if auth_type == "oauth": + await logger.adebug( + f"Starting MCP Composer for OAuth project {project.name} ({project.id}) on startup" + ) + await register_project_with_composer(project) + except Exception as e: # noqa: BLE001 msg = f"Failed to initialize MCP server for project {project.id}: {e}" await logger.aexception(msg) # Continue to next project even if this one fails + # Commit any auth settings updates + await session.commit() + except Exception as e: # noqa: BLE001 msg = f"Failed to initialize MCP servers: {e}" await logger.aexception(msg) + + +async def verify_project_access(project_id: UUID, current_user: CurrentActiveMCPUser) -> Folder: + """Verify project exists and user has access.""" + async with session_scope() as session: + project = ( + await session.exec(select(Folder).where(Folder.id == project_id, Folder.user_id == current_user.id)) + ).first() + + if not project: + raise HTTPException(status_code=404, detail="Project not found") + + return project + + +def should_use_mcp_composer(project: Folder) -> bool: + """Check if project uses OAuth authentication and MCP Composer is enabled.""" + # If MCP Composer is disabled globally, never use it regardless of project settings + if not get_settings_service().settings.mcp_composer_enabled: + return False + + return project.auth_settings is not None and project.auth_settings.get("auth_type", "") == "oauth" + + +async def get_or_start_mcp_composer(auth_config: dict, project_name: str, project_id: UUID) -> None: + """Get MCP Composer or start it if not running. + + Raises: + MCPComposerError: If MCP Composer fails to start + """ + from lfx.services.mcp_composer.service import MCPComposerConfigError + + mcp_composer_service: MCPComposerService = cast(MCPComposerService, get_service(ServiceType.MCP_COMPOSER_SERVICE)) + + # Prepare current auth config for comparison + settings = get_settings_service().settings + if not settings.host or not settings.port: + error_msg = "Langflow host and port must be set in settings to register project with MCP Composer" + raise ValueError(error_msg) + + sse_url = await get_project_sse_url(project_id) + if not auth_config: + error_msg = f"Auth config is required to start MCP Composer for project {project_name}" + raise MCPComposerConfigError(error_msg, str(project_id)) + + await mcp_composer_service.start_project_composer(str(project_id), sse_url, auth_config) diff --git a/src/backend/base/langflow/api/v1/projects.py b/src/backend/base/langflow/api/v1/projects.py index 9cf5600ed240..4dd8be2974e1 100644 --- a/src/backend/base/langflow/api/v1/projects.py +++ b/src/backend/base/langflow/api/v1/projects.py @@ -2,26 +2,31 @@ import json import zipfile from datetime import datetime, timezone -from typing import Annotated +from typing import Annotated, cast from urllib.parse import quote from uuid import UUID import orjson -from fastapi import APIRouter, Depends, File, HTTPException, Response, UploadFile, status +from fastapi import APIRouter, BackgroundTasks, Depends, File, HTTPException, Response, UploadFile, status from fastapi.encoders import jsonable_encoder from fastapi.responses import StreamingResponse from fastapi_pagination import Params from fastapi_pagination.ext.sqlmodel import apaginate +from lfx.services.mcp_composer.service import MCPComposerService from sqlalchemy import or_, update from sqlalchemy.orm import selectinload from sqlmodel import select from langflow.api.utils import CurrentActiveUser, DbSession, cascade_delete_flow, custom_params, remove_api_keys +from langflow.api.v1.auth_helpers import handle_auth_settings_update from langflow.api.v1.flows import create_flows +from langflow.api.v1.mcp_projects import register_project_with_composer from langflow.api.v1.schemas import FlowListCreate from langflow.helpers.flow import generate_unique_flow_name from langflow.helpers.folders import generate_unique_folder_name from langflow.initial_setup.constants import STARTER_FOLDER_NAME +from langflow.logging import logger +from langflow.services.auth.mcp_encryption import encrypt_auth_settings from langflow.services.database.models.flow.model import Flow, FlowCreate, FlowRead from langflow.services.database.models.folder.constants import DEFAULT_FOLDER_NAME from langflow.services.database.models.folder.model import ( @@ -32,6 +37,8 @@ FolderUpdate, ) from langflow.services.database.models.folder.pagination_model import FolderWithPaginatedFlows +from langflow.services.deps import get_service, get_settings_service +from langflow.services.schema import ServiceType router = APIRouter(prefix="/projects", tags=["Projects"]) @@ -70,6 +77,17 @@ async def create_project( else: new_project.name = f"{new_project.name} (1)" + settings_service = get_settings_service() + + # If AUTO_LOGIN is false, automatically enable API key authentication + if not settings_service.auth_settings.AUTO_LOGIN and not new_project.auth_settings: + default_auth = {"auth_type": "apikey"} + new_project.auth_settings = encrypt_auth_settings(default_auth) + await logger.adebug( + f"Auto-enabled API key authentication for project {new_project.name} " + f"({new_project.id}) due to AUTO_LOGIN=false" + ) + session.add(new_project) await session.commit() await session.refresh(new_project) @@ -87,7 +105,6 @@ async def create_project( ) await session.exec(update_statement_flows) await session.commit() - except Exception as e: raise HTTPException(status_code=500, detail=str(e)) from e @@ -178,6 +195,7 @@ async def update_project( project_id: UUID, project: FolderUpdate, # Assuming FolderUpdate is a Pydantic model defining updatable fields current_user: CurrentActiveUser, + background_tasks: BackgroundTasks, ): try: existing_project = ( @@ -189,27 +207,69 @@ async def update_project( if not existing_project: raise HTTPException(status_code=404, detail="Project not found") + result = await session.exec( + select(Flow.id, Flow.is_component).where(Flow.folder_id == existing_project.id, Flow.user_id == current_user.id) + ) + flows_and_components = result.all() + + project.flows = [flow_id for flow_id, is_component in flows_and_components if not is_component] + project.components = [flow_id for flow_id, is_component in flows_and_components if is_component] + try: + # Track if MCP Composer needs to be started or stopped + should_start_mcp_composer = False + should_stop_mcp_composer = False + + # Check if auth_settings is being updated + if "auth_settings" in project.model_fields_set: # Check if auth_settings was explicitly provided + auth_result = handle_auth_settings_update( + existing_project=existing_project, + new_auth_settings=project.auth_settings, + ) + + should_start_mcp_composer = auth_result["should_start_composer"] + should_stop_mcp_composer = auth_result["should_stop_composer"] + + # Handle other updates if project.name and project.name != existing_project.name: existing_project.name = project.name - session.add(existing_project) - await session.commit() - await session.refresh(existing_project) - return existing_project - project_data = existing_project.model_dump(exclude_unset=True) - for key, value in project_data.items(): - if key not in {"components", "flows"}: - setattr(existing_project, key, value) + if project.description is not None: + existing_project.description = project.description + + if project.parent_id is not None: + existing_project.parent_id = project.parent_id + session.add(existing_project) await session.commit() await session.refresh(existing_project) + # Start MCP Composer if auth changed to OAuth + if should_start_mcp_composer: + await logger.adebug( + f"Auth settings changed to OAuth for project {existing_project.name} ({existing_project.id}), " + "starting MCP Composer" + ) + background_tasks.add_task(register_project_with_composer, existing_project) + + # Stop MCP Composer if auth changed FROM OAuth to something else + elif should_stop_mcp_composer: + await logger.ainfo( + f"Auth settings changed from OAuth for project {existing_project.name} ({existing_project.id}), " + "stopping MCP Composer" + ) + + # Get the MCP Composer service and stop the project's composer + mcp_composer_service: MCPComposerService = cast( + MCPComposerService, get_service(ServiceType.MCP_COMPOSER_SERVICE) + ) + await mcp_composer_service.stop_project_composer(str(existing_project.id)) + concat_project_components = project.components + project.flows flows_ids = (await session.exec(select(Flow.id).where(Flow.folder_id == existing_project.id))).all() - excluded_flows = list(set(flows_ids) - set(concat_project_components)) + excluded_flows = list(set(flows_ids) - set(project.flows)) my_collection_project = (await session.exec(select(Folder).where(Folder.name == DEFAULT_FOLDER_NAME))).first() if my_collection_project: @@ -256,6 +316,18 @@ async def delete_project( if not project: raise HTTPException(status_code=404, detail="Project not found") + # Check if project has OAuth authentication and stop MCP Composer if needed + if project.auth_settings and project.auth_settings.get("auth_type") == "oauth": + try: + mcp_composer_service: MCPComposerService = cast( + MCPComposerService, get_service(ServiceType.MCP_COMPOSER_SERVICE) + ) + await mcp_composer_service.stop_project_composer(str(project_id)) + await logger.adebug(f"Stopped MCP Composer for deleted OAuth project {project.name} ({project_id})") + except Exception as e: # noqa: BLE001 + # Log but don't fail the deletion if MCP Composer cleanup fails + await logger.aerror(f"Failed to stop MCP Composer for deleted project {project_id}: {e}") + try: await session.delete(project) await session.commit() @@ -338,10 +410,21 @@ async def upload_file( new_project = Folder.model_validate(project, from_attributes=True) new_project.id = None new_project.user_id = current_user.id + + settings_service = get_settings_service() + + # If AUTO_LOGIN is false, automatically enable API key authentication + if not settings_service.auth_settings.AUTO_LOGIN and not new_project.auth_settings: + default_auth = {"auth_type": "apikey"} + new_project.auth_settings = encrypt_auth_settings(default_auth) + await logger.adebug( + f"Auto-enabled API key authentication for uploaded project {new_project.name} " + f"({new_project.id}) due to AUTO_LOGIN=false" + ) + session.add(new_project) await session.commit() await session.refresh(new_project) - del data["folder_name"] del data["folder_description"] diff --git a/src/backend/base/langflow/api/v1/schemas.py b/src/backend/base/langflow/api/v1/schemas.py index 0fb3ddaade42..665cec360b39 100644 --- a/src/backend/base/langflow/api/v1/schemas.py +++ b/src/backend/base/langflow/api/v1/schemas.py @@ -373,6 +373,7 @@ class ConfigResponse(BaseModel): public_flow_expiration: int event_delivery: Literal["polling", "streaming", "direct"] webhook_auth_enable: bool + voice_mode_available: bool @classmethod def from_settings(cls, settings: Settings, auth_settings) -> "ConfigResponse": @@ -398,6 +399,7 @@ def from_settings(cls, settings: Settings, auth_settings) -> "ConfigResponse": public_flow_cleanup_interval=settings.public_flow_cleanup_interval, public_flow_expiration=settings.public_flow_expiration, event_delivery=settings.event_delivery, + voice_mode_available=settings.voice_mode_available, webhook_auth_enable=auth_settings.WEBHOOK_AUTH_ENABLE, ) diff --git a/src/backend/base/langflow/api/v2/files.py b/src/backend/base/langflow/api/v2/files.py index d95390c83e05..14591c2966ce 100644 --- a/src/backend/base/langflow/api/v2/files.py +++ b/src/backend/base/langflow/api/v2/files.py @@ -27,6 +27,11 @@ SAMPLE_DATA_DIR = Path(__file__).parent / "sample_data" +async def get_mcp_file(current_user: CurrentActiveUser, *, extension: bool = False) -> str: + # Create a unique MCP servers file with the user id appended + return f"{MCP_SERVERS_FILE}_{current_user.id!s}" + (".json" if extension else "") + + async def byte_stream_generator(file_input, chunk_size: int = 8192) -> AsyncGenerator[bytes, None]: """Convert bytes object or stream into an async generator that yields chunks.""" if isinstance(file_input, bytes): @@ -115,9 +120,12 @@ async def upload_user_file( root_filename, file_extension = new_filename, "" # Special handling for the MCP servers config file: always keep the same root filename - if root_filename == MCP_SERVERS_FILE: + mcp_file = await get_mcp_file(current_user) + mcp_file_ext = await get_mcp_file(current_user, extension=True) + + if new_filename == mcp_file_ext: # Check if an existing record exists; if so, delete it to replace with the new one - existing_mcp_file = await get_file_by_name(root_filename, current_user, session) + existing_mcp_file = await get_file_by_name(mcp_file, current_user, session) if existing_mcp_file: await delete_file(existing_mcp_file.id, current_user, session, storage_service) unique_filename = new_filename @@ -255,7 +263,9 @@ async def list_files( full_list = list(results) # Filter out the _mcp_servers file - return [file for file in full_list if file.name != MCP_SERVERS_FILE] + mcp_file = await get_mcp_file(current_user) + + return [file for file in full_list if file.name != mcp_file] except Exception as e: raise HTTPException(status_code=500, detail=f"Error listing files: {e}") from e diff --git a/src/backend/base/langflow/api/v2/mcp.py b/src/backend/base/langflow/api/v2/mcp.py index 984de2f6c69b..db3ae933de41 100644 --- a/src/backend/base/langflow/api/v2/mcp.py +++ b/src/backend/base/langflow/api/v2/mcp.py @@ -3,12 +3,21 @@ from io import BytesIO from fastapi import APIRouter, Depends, HTTPException, UploadFile +from lfx.base.agents.utils import safe_cache_get, safe_cache_set from lfx.base.mcp.util import update_tools -from lfx.log import logger from langflow.api.utils import CurrentActiveUser, DbSession -from langflow.api.v2.files import MCP_SERVERS_FILE, delete_file, download_file, get_file_by_name, upload_user_file -from langflow.services.deps import get_settings_service, get_storage_service +from langflow.api.v2.files import ( + MCP_SERVERS_FILE, + delete_file, + download_file, + edit_file_name, + get_file_by_name, + get_mcp_file, + upload_user_file, +) +from langflow.logging import logger +from langflow.services.deps import get_settings_service, get_shared_component_cache_service, get_storage_service router = APIRouter(tags=["MCP"], prefix="/mcp") @@ -24,7 +33,8 @@ async def upload_server_config( content_bytes = content_str.encode("utf-8") # Convert to bytes file_obj = BytesIO(content_bytes) # Use BytesIO for binary data - upload_file = UploadFile(file=file_obj, filename=MCP_SERVERS_FILE + ".json", size=len(content_str)) + mcp_file = await get_mcp_file(current_user, extension=True) + upload_file = UploadFile(file=file_obj, filename=mcp_file, size=len(content_str)) return await upload_user_file( file=upload_file, @@ -41,8 +51,14 @@ async def get_server_list( storage_service=Depends(get_storage_service), settings_service=Depends(get_settings_service), ): + # Backwards compatibilty with old format file name + mcp_file = await get_mcp_file(current_user) + old_format_config_file = await get_file_by_name(MCP_SERVERS_FILE, current_user, session) + if old_format_config_file: + await edit_file_name(old_format_config_file.id, mcp_file, current_user, session) + # Read the server configuration from a file using the files api - server_config_file = await get_file_by_name(MCP_SERVERS_FILE, current_user, session) + server_config_file = await get_file_by_name(mcp_file, current_user, session) # Attempt to download the configuration file content try: @@ -69,9 +85,10 @@ async def get_server_list( ) # Fetch and download again - server_config_file = await get_file_by_name(MCP_SERVERS_FILE, current_user, session) + mcp_file = await get_mcp_file(current_user) + server_config_file = await get_file_by_name(mcp_file, current_user, session) if not server_config_file: - raise HTTPException(status_code=500, detail="Failed to create _mcp_servers.json") from None + raise HTTPException(status_code=500, detail="Failed to create MCP Servers configuration file") from None server_config_bytes = await download_file( server_config_file.id, @@ -225,8 +242,10 @@ async def update_server( server_list["mcpServers"][server_name] = server_config # Remove the existing file - server_config_file = await get_file_by_name(MCP_SERVERS_FILE, current_user, session) + mcp_file = await get_mcp_file(current_user) + server_config_file = await get_file_by_name(mcp_file, current_user, session) + # Now we are ready to delete it and reprocess if server_config_file: await delete_file(server_config_file.id, current_user, session, storage_service) @@ -235,6 +254,14 @@ async def update_server( server_list, current_user, session, storage_service=storage_service, settings_service=settings_service ) + shared_component_cache_service = get_shared_component_cache_service() + # Clear the servers cache + servers = safe_cache_get(shared_component_cache_service, "servers", {}) + if isinstance(servers, dict): + if server_name in servers: + del servers[server_name] + safe_cache_set(shared_component_cache_service, "servers", servers) + return await get_server( server_name, current_user, diff --git a/src/backend/base/langflow/base/__init__.py b/src/backend/base/langflow/base/__init__.py index 90bbd217df28..01ad56cc12cd 100644 --- a/src/backend/base/langflow/base/__init__.py +++ b/src/backend/base/langflow/base/__init__.py @@ -6,6 +6,3 @@ # Import all base modules from lfx for backwards compatibility from lfx.base import * # noqa: F403 - -# Import langflow-specific modules that aren't in lfx.base -from . import knowledge_bases # noqa: F401 diff --git a/src/backend/base/langflow/initial_setup/setup.py b/src/backend/base/langflow/initial_setup/setup.py index 02ff93e8a2dd..2a4b32c43a2b 100644 --- a/src/backend/base/langflow/initial_setup/setup.py +++ b/src/backend/base/langflow/initial_setup/setup.py @@ -39,7 +39,6 @@ from langflow.services.database.models.flow.model import Flow, FlowCreate from langflow.services.database.models.folder.constants import DEFAULT_FOLDER_NAME from langflow.services.database.models.folder.model import Folder, FolderCreate, FolderRead -from langflow.services.database.models.user.crud import get_user_by_username from langflow.services.deps import get_settings_service, get_storage_service, get_variable_service, session_scope # In the folder ./starter_projects we have a few JSON files that represent @@ -727,20 +726,21 @@ async def load_flows_from_directory() -> None: """On langflow startup, this loads all flows from the directory specified in the settings. All flows are uploaded into the default folder for the superuser. - Note that this feature currently only works if AUTO_LOGIN is enabled in the settings. """ settings_service = get_settings_service() flows_path = settings_service.settings.load_flows_path if not flows_path: return - if not settings_service.auth_settings.AUTO_LOGIN: - await logger.awarning("AUTO_LOGIN is disabled, not loading flows from directory") - return async with session_scope() as session: - user = await get_user_by_username(session, settings_service.auth_settings.SUPERUSER) + # Find superuser by role instead of username to avoid issues with credential reset + from langflow.services.database.models.user.model import User + + stmt = select(User).where(User.is_superuser == True) # noqa: E712 + result = await session.exec(stmt) + user = result.first() if user is None: - msg = "Superuser not found in the database" + msg = "No superuser found in the database" raise NoResultFound(msg) # Ensure that the default folder exists for this user @@ -793,13 +793,16 @@ async def load_bundles_from_urls() -> tuple[list[TemporaryDirectory], list[str]] bundle_urls = settings_service.settings.bundle_urls if not bundle_urls: return [], [] - if not settings_service.auth_settings.AUTO_LOGIN: - await logger.awarning("AUTO_LOGIN is disabled, not loading flows from URLs") async with session_scope() as session: - user = await get_user_by_username(session, settings_service.auth_settings.SUPERUSER) + # Find superuser by role instead of username to avoid issues with credential reset + from langflow.services.database.models.user.model import User + + stmt = select(User).where(User.is_superuser == True) # noqa: E712 + result = await session.exec(stmt) + user = result.first() if user is None: - msg = "Superuser not found in the database" + msg = "No superuser found in the database" raise NoResultFound(msg) user_id = user.id @@ -816,11 +819,7 @@ async def load_bundles_from_urls() -> tuple[list[TemporaryDirectory], list[str]] for filename in zfile.namelist(): path = Path(filename) for dir_name in dir_names: - if ( - settings_service.auth_settings.AUTO_LOGIN - and path.is_relative_to(f"{dir_name}flows/") - and path.suffix == ".json" - ): + if path.is_relative_to(f"{dir_name}flows/") and path.suffix == ".json": file_content = zfile.read(filename) await upsert_flow_from_file(file_content, path.stem, session, user_id) elif path.is_relative_to(f"{dir_name}components/"): @@ -1002,12 +1001,16 @@ async def create_or_update_starter_projects(all_types_dict: dict) -> None: await logger.adebug(f"Successfully created {successfully_created_projects} starter projects") -async def initialize_super_user_if_needed() -> None: +async def initialize_auto_login_default_superuser() -> None: settings_service = get_settings_service() if not settings_service.auth_settings.AUTO_LOGIN: return - username = settings_service.auth_settings.SUPERUSER - password = settings_service.auth_settings.SUPERUSER_PASSWORD + # In AUTO_LOGIN mode, always use the default credentials for initial bootstrapping + # without persisting the password in memory after setup. + from lfx.services.settings.constants import DEFAULT_SUPERUSER, DEFAULT_SUPERUSER_PASSWORD + + username = DEFAULT_SUPERUSER + password = DEFAULT_SUPERUSER_PASSWORD.get_secret_value() if not username or not password: msg = "SUPERUSER and SUPERUSER_PASSWORD must be set in the settings if AUTO_LOGIN is true." raise ValueError(msg) diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json index 6c65ddfd4fd8..771fee5c6470 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json @@ -362,7 +362,7 @@ "legacy": false, "lf_version": "1.5.0", "metadata": { - "code_hash": "715a37648834", + "code_hash": "46a90558cb44", "dependencies": { "dependencies": [ { @@ -394,48 +394,6 @@ "pinned": false, "template": { "_type": "Component", - "background_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Background Color", - "dynamic": false, - "info": "The background color of the icon.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "background_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "chat_icon": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Icon", - "dynamic": false, - "info": "The icon of the message.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "chat_icon", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, "code": { "advanced": true, "dynamic": true, @@ -452,7 +410,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" + "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" }, "files": { "_input_type": "FileInput", @@ -460,6 +418,9 @@ "display_name": "Files", "dynamic": false, "fileTypes": [ + "csv", + "json", + "pdf", "txt", "md", "mdx", @@ -594,27 +555,6 @@ "trace_as_metadata": true, "type": "bool", "value": true - }, - "text_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Text Color", - "dynamic": false, - "info": "The text color of the name", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "text_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" } } }, @@ -672,7 +612,7 @@ "legacy": false, "lf_version": "1.5.0", "metadata": { - "code_hash": "9619107fecd1", + "code_hash": "ccda4dbe4ae1", "dependencies": { "dependencies": [ { @@ -712,56 +652,12 @@ "pinned": false, "template": { "_type": "Component", - "background_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Background Color", - "dynamic": false, - "info": "The background color of the icon.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "background_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "chat_icon": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Icon", - "dynamic": false, - "info": "The icon of the message.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "chat_icon", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, "clean_data": { "_input_type": "BoolInput", "advanced": true, "display_name": "Basic Clean Data", "dynamic": false, - "info": "Whether to clean the data", + "info": "Whether to clean data before converting to string.", "list": false, "list_add_label": "Add More", "name": "clean_data", @@ -790,7 +686,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "data_template": { "_input_type": "MessageTextInput", @@ -917,28 +813,6 @@ "trace_as_metadata": true, "type": "bool", "value": true - }, - "text_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Text Color", - "dynamic": false, - "info": "The text color of the name", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "text_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" } }, "tool_mode": false diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json index 6c340d548c21..18b8204b07a5 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json @@ -117,7 +117,7 @@ "legacy": false, "lf_version": "1.4.2", "metadata": { - "code_hash": "715a37648834", + "code_hash": "46a90558cb44", "dependencies": { "dependencies": [ { @@ -149,48 +149,6 @@ "pinned": false, "template": { "_type": "Component", - "background_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Background Color", - "dynamic": false, - "info": "The background color of the icon.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "background_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "chat_icon": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Icon", - "dynamic": false, - "info": "The icon of the message.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "chat_icon", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, "code": { "advanced": true, "dynamic": true, @@ -207,13 +165,16 @@ "show": true, "title_case": false, "type": "code", - "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" + "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" }, "files": { "advanced": true, "display_name": "Files", "dynamic": false, "fileTypes": [ + "csv", + "json", + "pdf", "txt", "md", "mdx", @@ -343,27 +304,6 @@ "trace_as_metadata": true, "type": "bool", "value": true - }, - "text_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Text Color", - "dynamic": false, - "info": "The text color of the name", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "text_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" } } }, @@ -624,7 +564,7 @@ "legacy": false, "lf_version": "1.4.2", "metadata": { - "code_hash": "9619107fecd1", + "code_hash": "ccda4dbe4ae1", "dependencies": { "dependencies": [ { @@ -664,56 +604,12 @@ "pinned": false, "template": { "_type": "Component", - "background_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Background Color", - "dynamic": false, - "info": "The background color of the icon.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "background_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "chat_icon": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Icon", - "dynamic": false, - "info": "The icon of the message.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "chat_icon", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, "clean_data": { "_input_type": "BoolInput", "advanced": true, "display_name": "Basic Clean Data", "dynamic": false, - "info": "Whether to clean the data", + "info": "Whether to clean data before converting to string.", "list": false, "list_add_label": "Add More", "name": "clean_data", @@ -742,7 +638,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "data_template": { "_input_type": "MessageTextInput", @@ -869,28 +765,6 @@ "trace_as_metadata": true, "type": "bool", "value": true - }, - "text_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Text Color", - "dynamic": false, - "info": "The text color of the name", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "text_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" } }, "tool_mode": false diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json b/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json index d6c45b65fc6c..090b27c8618b 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json @@ -477,7 +477,7 @@ "legacy": false, "lf_version": "1.4.2", "metadata": { - "code_hash": "9619107fecd1", + "code_hash": "ccda4dbe4ae1", "dependencies": { "dependencies": [ { @@ -517,54 +517,12 @@ "pinned": false, "template": { "_type": "Component", - "background_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Background Color", - "dynamic": false, - "info": "The background color of the icon.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "background_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "chat_icon": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Icon", - "dynamic": false, - "info": "The icon of the message.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "chat_icon", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, "clean_data": { "_input_type": "BoolInput", "advanced": true, "display_name": "Basic Clean Data", "dynamic": false, - "info": "Whether to clean the data", + "info": "Whether to clean data before converting to string.", "list": false, "list_add_label": "Add More", "name": "clean_data", @@ -593,7 +551,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "data_template": { "advanced": true, @@ -710,27 +668,6 @@ "trace_as_metadata": true, "type": "bool", "value": true - }, - "text_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Text Color", - "dynamic": false, - "info": "The text color of the name", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "text_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" } } }, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Generator.json b/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Generator.json index 5e61fe49c048..3c5188ccd352 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Generator.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Generator.json @@ -237,7 +237,7 @@ "legacy": false, "lf_version": "1.6.0", "metadata": { - "code_hash": "6c35f0cd5b52", + "code_hash": "efd11fac2416", "dependencies": { "dependencies": [ { @@ -299,7 +299,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any, cast\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.helpers.data import data_to_text\nfrom lfx.inputs.inputs import DropdownInput, HandleInput, IntInput, MessageTextInput, MultilineInput, TabInput\nfrom lfx.memory import aget_messages, astore_message\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.schema.message import Message\nfrom lfx.template.field.base import Output\nfrom lfx.utils.component_utils import set_current_fields, set_field_display\nfrom lfx.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n documentation: str = \"https://docs.langflow.org/components-helpers#message-history\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender_type\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender\",\n display_name=\"Sender\",\n info=\"The sender of the message. Might be Machine or User. \"\n \"If empty, the current sender parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n value=\"\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Message\", name=\"messages_text\", method=\"retrieve_messages_as_text\", dynamic=True),\n Output(display_name=\"Dataframe\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True),\n ]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"messages_text\", method=\"retrieve_messages_as_text\", dynamic=True\n ),\n Output(\n display_name=\"Dataframe\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n ),\n ]\n return frontend_node\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n async def retrieve_messages(self) -> Data:\n sender_type = self.sender_type\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender_type == \"Machine and User\":\n sender_type = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[-n_messages:] if order == \"ASC\" else stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender_type:\n expected_type = MESSAGE_SENDER_AI if sender_type == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n # For internal memory, we always fetch the last N messages by ordering by DESC\n stored = await aget_messages(\n sender=sender_type,\n sender_name=sender_name,\n session_id=session_id,\n limit=10000,\n order=order,\n )\n if n_messages:\n stored = stored[-n_messages:] if order == \"ASC\" else stored[:n_messages]\n\n # self.status = stored\n return cast(\"Data\", stored)\n\n async def retrieve_messages_as_text(self) -> Message:\n stored_text = data_to_text(self.template, await self.retrieve_messages())\n # self.status = stored_text\n return Message(text=stored_text)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n" + "value": "from typing import Any, cast\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.helpers.data import data_to_text\nfrom lfx.inputs.inputs import DropdownInput, HandleInput, IntInput, MessageTextInput, MultilineInput, TabInput\nfrom lfx.memory import aget_messages, astore_message\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.schema.message import Message\nfrom lfx.template.field.base import Output\nfrom lfx.utils.component_utils import set_current_fields, set_field_display\nfrom lfx.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n documentation: str = \"https://docs.langflow.org/components-helpers#message-history\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\", \"session_id\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\", \"session_id\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender_type\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender\",\n display_name=\"Sender\",\n info=\"The sender of the message. Might be Machine or User. \"\n \"If empty, the current sender parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n value=\"\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Message\", name=\"messages_text\", method=\"retrieve_messages_as_text\", dynamic=True),\n Output(display_name=\"Dataframe\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True),\n ]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"messages_text\", method=\"retrieve_messages_as_text\", dynamic=True\n ),\n Output(\n display_name=\"Dataframe\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n ),\n ]\n return frontend_node\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n async def retrieve_messages(self) -> Data:\n sender_type = self.sender_type\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender_type == \"Machine and User\":\n sender_type = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n\n if n_messages:\n stored = stored[-n_messages:] # Get last N messages first\n\n if order == \"DESC\":\n stored = stored[::-1] # Then reverse if needed\n\n stored = [Message.from_lc_message(m) for m in stored]\n if sender_type:\n expected_type = MESSAGE_SENDER_AI if sender_type == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n # For internal memory, we always fetch the last N messages by ordering by DESC\n stored = await aget_messages(\n sender=sender_type,\n sender_name=sender_name,\n session_id=session_id,\n limit=10000,\n order=order,\n )\n if n_messages:\n stored = stored[-n_messages:] # Get last N messages\n\n # self.status = stored\n return cast(\"Data\", stored)\n\n async def retrieve_messages_as_text(self) -> Message:\n stored_text = data_to_text(self.template, await self.retrieve_messages())\n # self.status = stored_text\n return Message(text=stored_text)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n" }, "memory": { "_input_type": "HandleInput", @@ -1934,7 +1934,7 @@ "legacy": false, "lf_version": "1.6.0", "metadata": { - "code_hash": "715a37648834", + "code_hash": "46a90558cb44", "dependencies": { "dependencies": [ { @@ -1968,52 +1968,6 @@ "score": 0.0020353564437605998, "template": { "_type": "Component", - "background_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Background Color", - "dynamic": false, - "info": "The background color of the icon.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "background_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "chat_icon": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Icon", - "dynamic": false, - "info": "The icon of the message.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "chat_icon", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, "code": { "advanced": true, "dynamic": true, @@ -2030,7 +1984,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" + "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" }, "files": { "_input_type": "FileInput", @@ -2038,6 +1992,9 @@ "display_name": "Files", "dynamic": false, "fileTypes": [ + "csv", + "json", + "pdf", "txt", "md", "mdx", @@ -2184,29 +2141,6 @@ "trace_as_metadata": true, "type": "bool", "value": true - }, - "text_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Text Color", - "dynamic": false, - "info": "The text color of the name", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "text_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" } }, "tool_mode": false @@ -2260,7 +2194,7 @@ "key": "ChatOutput", "legacy": false, "metadata": { - "code_hash": "9619107fecd1", + "code_hash": "ccda4dbe4ae1", "dependencies": { "dependencies": [ { @@ -2302,58 +2236,12 @@ "score": 0.003169567463043492, "template": { "_type": "Component", - "background_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Background Color", - "dynamic": false, - "info": "The background color of the icon.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "background_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "chat_icon": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Icon", - "dynamic": false, - "info": "The icon of the message.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "chat_icon", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, "clean_data": { "_input_type": "BoolInput", "advanced": true, "display_name": "Basic Clean Data", "dynamic": false, - "info": "Whether to clean the data", + "info": "Whether to clean data before converting to string.", "list": false, "list_add_label": "Add More", "name": "clean_data", @@ -2382,7 +2270,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "data_template": { "_input_type": "MessageTextInput", @@ -2515,29 +2403,6 @@ "trace_as_metadata": true, "type": "bool", "value": true - }, - "text_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Text Color", - "dynamic": false, - "info": "The text color of the name", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "text_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" } }, "tool_mode": false diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json b/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json index 0f6e55a07f82..c27dd7e4943b 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json @@ -147,7 +147,7 @@ "legacy": false, "lf_version": "1.4.3", "metadata": { - "code_hash": "715a37648834", + "code_hash": "46a90558cb44", "dependencies": { "dependencies": [ { @@ -179,48 +179,6 @@ "pinned": false, "template": { "_type": "Component", - "background_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Background Color", - "dynamic": false, - "info": "The background color of the icon.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "background_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "chat_icon": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Icon", - "dynamic": false, - "info": "The icon of the message.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "chat_icon", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, "code": { "advanced": true, "dynamic": true, @@ -237,13 +195,16 @@ "show": true, "title_case": false, "type": "code", - "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" + "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" }, "files": { "advanced": true, "display_name": "Files", "dynamic": false, "fileTypes": [ + "csv", + "json", + "pdf", "txt", "md", "mdx", @@ -373,27 +334,6 @@ "trace_as_metadata": true, "type": "bool", "value": true - }, - "text_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Text Color", - "dynamic": false, - "info": "The text color of the name", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "text_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" } } }, @@ -451,7 +391,7 @@ "legacy": false, "lf_version": "1.4.3", "metadata": { - "code_hash": "9619107fecd1", + "code_hash": "ccda4dbe4ae1", "dependencies": { "dependencies": [ { @@ -491,56 +431,12 @@ "pinned": false, "template": { "_type": "Component", - "background_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Background Color", - "dynamic": false, - "info": "The background color of the icon.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "background_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "chat_icon": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Icon", - "dynamic": false, - "info": "The icon of the message.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "chat_icon", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, "clean_data": { "_input_type": "BoolInput", "advanced": true, "display_name": "Basic Clean Data", "dynamic": false, - "info": "Whether to clean the data", + "info": "Whether to clean data before converting to string.", "list": false, "list_add_label": "Add More", "name": "clean_data", @@ -569,7 +465,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "data_template": { "_input_type": "MessageTextInput", @@ -696,28 +592,6 @@ "trace_as_metadata": true, "type": "bool", "value": true - }, - "text_color": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Text Color", - "dynamic": false, - "info": "The text color of the name", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "text_color", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" } }, "tool_mode": false @@ -1270,7 +1144,7 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Loads content from one or more files as a DataFrame.", + "description": "Loads content from one or more files.", "display_name": "File", "documentation": "", "edited": false, @@ -1290,8 +1164,17 @@ "legacy": false, "lf_version": "1.4.3", "metadata": { - "code_hash": "0c57c835f136", - "module": "langflow.components.data.file.FileComponent" + "code_hash": "9a1d497f4f91", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 + }, + "module": "lfx.components.data.file.FileComponent" }, "minimized": false, "output_types": [], @@ -1314,6 +1197,25 @@ "pinned": false, "template": { "_type": "Component", + "advanced_mode": { + "_input_type": "BoolInput", + "advanced": false, + "display_name": "Advanced Parser", + "dynamic": false, + "info": "Enable advanced document processing and export with Docling for PDFs, images, and office documents. Available only for single file processing.Note that advanced document processing can consume significant resources.", + "list": false, + "list_add_label": "Add More", + "name": "advanced_mode", + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "bool", + "value": false + }, "code": { "advanced": true, "dynamic": true, @@ -1330,7 +1232,7 @@ "show": true, "title_case": false, "type": "code", - "value": "\"\"\"Enhanced file component with clearer structure and Docling isolation.\n\nNotes:\n-----\n- Functionality is preserved with minimal behavioral changes.\n- ALL Docling parsing/export runs in a separate OS process to prevent memory\n growth and native library state from impacting the main Langflow process.\n- Standard text/structured parsing continues to use existing BaseFileComponent\n utilities (and optional threading via `parallel_load_data`).\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport subprocess\nimport sys\nimport textwrap\nfrom copy import deepcopy\nfrom typing import Any\n\nfrom lfx.base.data.base_file import BaseFileComponent\nfrom lfx.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom lfx.inputs.inputs import DropdownInput, MessageTextInput, StrInput\nfrom lfx.io import BoolInput, FileInput, IntInput, Output\nfrom lfx.schema import DataFrame # noqa: TC001\nfrom lfx.schema.data import Data\nfrom lfx.schema.message import Message\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"File component with optional Docling processing (isolated in a subprocess).\"\"\"\n\n display_name = \"File\"\n description = \"Loads content from files with optional advanced document processing and export using Docling.\"\n documentation: str = \"https://docs.langflow.org/components-data#file\"\n icon = \"file-text\"\n name = \"File\"\n\n # Docling-supported/compatible extensions; TEXT_FILE_TYPES are supported by the base loader.\n VALID_EXTENSIONS = [\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"csv\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"docx\",\n \"htm\",\n \"html\",\n \"jpeg\",\n \"json\",\n \"md\",\n \"pdf\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"txt\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"xml\",\n \"webp\",\n *TEXT_FILE_TYPES,\n ]\n\n # Fixed export settings used when markdown export is requested.\n EXPORT_FORMAT = \"Markdown\"\n IMAGE_MODE = \"placeholder\"\n\n _base_inputs = deepcopy(BaseFileComponent.get_base_inputs())\n\n for input_item in _base_inputs:\n if isinstance(input_item, FileInput) and input_item.name == \"path\":\n input_item.real_time_refresh = True\n break\n\n inputs = [\n *_base_inputs,\n BoolInput(\n name=\"advanced_mode\",\n display_name=\"Advanced Parser\",\n value=False,\n real_time_refresh=True,\n info=(\n \"Enable advanced document processing and export with Docling for PDFs, images, and office documents. \"\n \"Available only for single file processing.\"\n ),\n show=False,\n ),\n DropdownInput(\n name=\"pipeline\",\n display_name=\"Pipeline\",\n info=\"Docling pipeline to use\",\n options=[\"standard\", \"vlm\"],\n value=\"standard\",\n advanced=True,\n ),\n DropdownInput(\n name=\"ocr_engine\",\n display_name=\"OCR Engine\",\n info=\"OCR engine to use. Only available when pipeline is set to 'standard'.\",\n options=[\"\", \"easyocr\"],\n value=\"\",\n show=False,\n advanced=True,\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder between pages in the markdown output.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n show=False,\n ),\n # Deprecated input retained for backward-compatibility.\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n BoolInput(\n name=\"markdown\",\n display_name=\"Markdown Export\",\n info=\"Export processed documents to Markdown format. Only available when advanced mode is enabled.\",\n value=False,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n ]\n\n # ------------------------------ UI helpers --------------------------------------\n\n def _path_value(self, template: dict) -> list[str]:\n \"\"\"Return the list of currently selected file paths from the template.\"\"\"\n return template.get(\"path\", {}).get(\"file_path\", [])\n\n def update_build_config(\n self,\n build_config: dict[str, Any],\n field_value: Any,\n field_name: str | None = None,\n ) -> dict[str, Any]:\n \"\"\"Show/hide Advanced Parser and related fields based on selection context.\"\"\"\n if field_name == \"path\":\n paths = self._path_value(build_config)\n file_path = paths[0] if paths else \"\"\n file_count = len(field_value) if field_value else 0\n\n # Advanced mode only for single (non-tabular) file\n allow_advanced = file_count == 1 and not file_path.endswith((\".csv\", \".xlsx\", \".parquet\"))\n build_config[\"advanced_mode\"][\"show\"] = allow_advanced\n if not allow_advanced:\n build_config[\"advanced_mode\"][\"value\"] = False\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = False\n\n elif field_name == \"advanced_mode\":\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = bool(field_value)\n\n return build_config\n\n def update_outputs(self, frontend_node: dict[str, Any], field_name: str, field_value: Any) -> dict[str, Any]: # noqa: ARG002\n \"\"\"Dynamically show outputs based on file count/type and advanced mode.\"\"\"\n if field_name not in [\"path\", \"advanced_mode\"]:\n return frontend_node\n\n template = frontend_node.get(\"template\", {})\n paths = self._path_value(template)\n if not paths:\n return frontend_node\n\n frontend_node[\"outputs\"] = []\n if len(paths) == 1:\n file_path = paths[0] if field_name == \"path\" else frontend_node[\"template\"][\"path\"][\"file_path\"][0]\n if file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"dataframe\", method=\"load_files_structured\"),\n )\n elif file_path.endswith(\".json\"):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"json\", method=\"load_files_json\"),\n )\n\n advanced_mode = frontend_node.get(\"template\", {}).get(\"advanced_mode\", {}).get(\"value\", False)\n if advanced_mode:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Output\", name=\"advanced\", method=\"load_files_advanced\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Markdown\", name=\"markdown\", method=\"load_files_markdown\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # Multiple files => DataFrame output; advanced parser disabled\n frontend_node[\"outputs\"].append(Output(display_name=\"Files\", name=\"dataframe\", method=\"load_files\"))\n\n return frontend_node\n\n # ------------------------------ Core processing ----------------------------------\n\n def _is_docling_compatible(self, file_path: str) -> bool:\n \"\"\"Lightweight extension gate for Docling-compatible types.\"\"\"\n docling_exts = (\n \".adoc\",\n \".asciidoc\",\n \".asc\",\n \".bmp\",\n \".csv\",\n \".dotx\",\n \".dotm\",\n \".docm\",\n \".docx\",\n \".htm\",\n \".html\",\n \".jpeg\",\n \".json\",\n \".md\",\n \".pdf\",\n \".png\",\n \".potx\",\n \".ppsx\",\n \".pptm\",\n \".potm\",\n \".ppsm\",\n \".pptx\",\n \".tiff\",\n \".txt\",\n \".xls\",\n \".xlsx\",\n \".xhtml\",\n \".xml\",\n \".webp\",\n )\n return file_path.lower().endswith(docling_exts)\n\n def _process_docling_in_subprocess(self, file_path: str) -> Data | None:\n \"\"\"Run Docling in a separate OS process and map the result to a Data object.\n\n We avoid multiprocessing pickling by launching `python -c \"