Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
841b365
backend updated
deon-sanchez Dec 12, 2025
76e271b
Merge branch 'main' of https://github.com/langflow-ai/langflow into l…
deon-sanchez Dec 16, 2025
cd81575
[autofix.ci] apply automated fixes
autofix-ci[bot] Dec 16, 2025
114591e
updated to use provider env variable
deon-sanchez Dec 16, 2025
cf8d0c9
Merge branch 'lfoss-3056' of https://github.com/langflow-ai/langflow …
deon-sanchez Dec 16, 2025
53f365d
[autofix.ci] apply automated fixes
autofix-ci[bot] Dec 16, 2025
3c3fc1e
added doc url link
deon-sanchez Dec 16, 2025
8a2f85f
Merge branch 'lfoss-3056' of https://github.com/langflow-ai/langflow …
deon-sanchez Dec 16, 2025
f6666e1
added doc prop
deon-sanchez Dec 16, 2025
f32664a
Merge branch 'main' of https://github.com/langflow-ai/langflow into l…
deon-sanchez Dec 16, 2025
f845cd2
merge main
deon-sanchez Dec 17, 2025
cab466e
fixed disabled empty state
deon-sanchez Dec 17, 2025
25e9aab
[autofix.ci] apply automated fixes
autofix-ci[bot] Dec 17, 2025
7a75235
updated be components with new pattern
deon-sanchez Dec 17, 2025
197113f
fixed conflicts
deon-sanchez Dec 17, 2025
bd7adeb
[autofix.ci] apply automated fixes
autofix-ci[bot] Dec 17, 2025
2d43c6e
[autofix.ci] apply automated fixes (attempt 2/3)
autofix-ci[bot] Dec 17, 2025
ba86269
[autofix.ci] apply automated fixes (attempt 3/3)
autofix-ci[bot] Dec 17, 2025
0dc9bd1
updated templates
deon-sanchez Dec 17, 2025
8757085
Merge branch 'lfoss-3056' of https://github.com/langflow-ai/langflow …
deon-sanchez Dec 17, 2025
0f22c83
[autofix.ci] apply automated fixes
autofix-ci[bot] Dec 17, 2025
d17c9ed
filter based upon provider and models
deon-sanchez Dec 17, 2025
f32901c
Merge branch 'lfoss-3056' of https://github.com/langflow-ai/langflow …
deon-sanchez Dec 17, 2025
72f56a8
updated provider mapping and cache clearing logic
deon-sanchez Dec 17, 2025
1b371ca
narrowed exception handling to OSError and ValueError
deon-sanchez Dec 17, 2025
c149b66
Merge branch 'main' of https://github.com/langflow-ai/langflow into l…
deon-sanchez Dec 17, 2025
5093169
removed agentic flows creation and moved agentic initialization after…
deon-sanchez Dec 18, 2025
b63c348
[autofix.ci] apply automated fixes
autofix-ci[bot] Dec 18, 2025
f304bfe
fix: Update model provider filtering and embedding model detection
deon-sanchez Dec 18, 2025
ecb222d
Merge branch 'lfoss-3056' of https://github.com/langflow-ai/langflow …
deon-sanchez Dec 18, 2025
17de889
removed console.log
deon-sanchez Dec 18, 2025
8402040
[autofix.ci] apply automated fixes
autofix-ci[bot] Dec 18, 2025
6d23009
updated models showed
deon-sanchez Dec 18, 2025
088d139
Merge branch 'lfoss-3056' of https://github.com/langflow-ai/langflow …
deon-sanchez Dec 18, 2025
d1d1cc1
[autofix.ci] apply automated fixes
autofix-ci[bot] Dec 18, 2025
130ec8d
[autofix.ci] apply automated fixes (attempt 2/3)
autofix-ci[bot] Dec 18, 2025
ea13598
[autofix.ci] apply automated fixes (attempt 3/3)
autofix-ci[bot] Dec 18, 2025
32552eb
refactor: Simplify embedding model detection logic and add constant f…
deon-sanchez Dec 18, 2025
9d2fe3b
Merge branch 'lfoss-3056' of https://github.com/langflow-ai/langflow …
deon-sanchez Dec 18, 2025
a7ada62
Merge branch 'main' into lfoss-3056
deon-sanchez Dec 18, 2025
4f3918e
Merge branch 'main' of https://github.com/langflow-ai/langflow into l…
deon-sanchez Dec 19, 2025
fffa68d
merge templates
deon-sanchez Dec 29, 2025
b7323be
merge updated templates
deon-sanchez Dec 29, 2025
25de840
Merge branch 'main' of https://github.com/langflow-ai/langflow into l…
deon-sanchez Dec 29, 2025
23699ef
[autofix.ci] apply automated fixes
autofix-ci[bot] Dec 29, 2025
be513a2
[autofix.ci] apply automated fixes (attempt 2/3)
autofix-ci[bot] Dec 29, 2025
9f5561f
[autofix.ci] apply automated fixes (attempt 3/3)
autofix-ci[bot] Dec 29, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 11 additions & 4 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,17 @@ LANGFLOW_DATABASE_URL=sqlite:///./langflow.db
LANGFLOW_ALEMBIC_LOG_TO_STDOUT=False


# mem0 creates a directory
# mem0 creates a directory
# for chat history, vector stores, and other artifacts
# its default path is ~/.mem0.
# we can change this path with
# we can change this path with
# environment variable "MEM0_DIR"
# Example: MEM0_DIR=/tmp/.mem0

# composio creates a cache directory
# composio creates a cache directory
# for file uploads/downloads.
# its default path is ~/.composio
# we can change this path with
# we can change this path with
# environment variable "COMPOSIO_CACHE_DIR"
# Example: COMPOSIO_CACHE_DIR=/tmp/.composio

Expand Down Expand Up @@ -140,6 +140,13 @@ LANGFLOW_STORE_ENVIRONMENT_VARIABLES=
# Default: true
LANGFLOW_MCP_COMPOSER_ENABLED=

# Live Model Data from models.dev API
# When enabled, fetches model data from models.dev API instead of static constants
# Only includes supported providers. Falls back to static if API fails.
# Values: true, false
# Default: false
LFX_USE_LIVE_MODEL_DATA=

# STORE_URL
# Example: LANGFLOW_STORE_URL=https://api.langflow.store
# LANGFLOW_STORE_URL=
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,18 +74,29 @@ export const useGetModelProviders: useQueryFunctionType<
};

// Helper function to map provider names to icon names
// Icon keys must match lazyIconImports.ts in frontend/src/icons/
const getProviderIcon = (providerName: string): string => {
const iconMap: Record<string, string> = {
OpenAI: "OpenAI",
Anthropic: "Anthropic",
"Google Generative AI": "Google",
Groq: "Groq",
"Amazon Bedrock": "Bedrock",
NVIDIA: "NVIDIA",
"Google Generative AI": "GoogleGenerativeAI",
"Google Vertex AI": "VertexAI",
"Mistral AI": "Mistral",
Cohere: "Cohere",
"Azure OpenAI": "AzureOpenAI",
SambaNova: "SambaNova",
Groq: "Groq",
"Together AI": "Bot",
"Fireworks AI": "Bot",
DeepSeek: "DeepSeek",
xAI: "xAI",
Alibaba: "Bot",
Nvidia: "NVIDIA",
"Amazon Bedrock": "AWS",
"Azure OpenAI": "Azure",
Cerebras: "Bot",
Ollama: "Ollama",
"Ollama Cloud": "Ollama",
"IBM Watsonx": "WatsonxAI",
SambaNova: "SambaNova",
};

return iconMap[providerName] || "Bot";
Expand Down
2 changes: 1 addition & 1 deletion src/frontend/src/modals/modelProviderModal/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ const ModelProviderModal = ({
<div className="flex flex-row w-full overflow-hidden">
<div
className={cn(
"flex border-r p-2 flex-col transition-all duration-300 h-[513px] ease-in-out",
"flex border-r p-2 flex-col transition-all duration-300 h-[513px] ease-in-out overflow-y-auto",
selectedProvider ? "w-1/3" : "w-full",
)}
>
Expand Down
28 changes: 28 additions & 0 deletions src/lfx/src/lfx/base/models/__init__.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,41 @@
from .model import LCModelComponent
from .model_metadata import ModelCost, ModelLimits, ModelMetadata, ModelModalities, create_model_metadata
from .models_dev_client import (
clear_cache as clear_live_models_cache,
)
from .models_dev_client import (
fetch_models_dev_data,
get_live_models_detailed,
get_models_by_provider,
get_provider_metadata_from_api,
search_models,
)
from .unified_models import (
get_model_provider_variable_mapping,
get_model_providers,
get_unified_models_detailed,
refresh_live_model_data,
)

__all__ = [
# Core components
"LCModelComponent",
# Unified models API
"get_model_provider_variable_mapping",
"get_model_providers",
"get_unified_models_detailed",
"refresh_live_model_data",
# Model metadata types
"ModelCost",
"ModelLimits",
"ModelMetadata",
"ModelModalities",
"create_model_metadata",
# Live models API (models.dev)
"clear_live_models_cache",
"fetch_models_dev_data",
"get_live_models_detailed",
"get_models_by_provider",
"get_provider_metadata_from_api",
"search_models",
]

Check failure on line 41 in src/lfx/src/lfx/base/models/__init__.py

View workflow job for this annotation

GitHub Actions / Ruff Style Check (3.13)

Ruff (RUF022)

src/lfx/src/lfx/base/models/__init__.py:20:11: RUF022 `__all__` is not sorted
107 changes: 102 additions & 5 deletions src/lfx/src/lfx/base/models/model_metadata.py
Original file line number Diff line number Diff line change
@@ -1,47 +1,144 @@
from typing import TypedDict


class ModelCost(TypedDict, total=False):
"""Cost information per million tokens."""

input: float # Cost per million input tokens
output: float # Cost per million output tokens
reasoning: float # Cost per million reasoning tokens (if applicable)
cache_read: float # Cost per million cached read tokens
cache_write: float # Cost per million cached write tokens
input_audio: float # Cost per million audio input tokens
output_audio: float # Cost per million audio output tokens


class ModelLimits(TypedDict, total=False):
"""Token limits for the model."""

context: int # Maximum context window size
output: int # Maximum output tokens


class ModelModalities(TypedDict, total=False):
"""Input/output modalities supported by the model."""

input: list[str] # Supported input types (e.g., ["text", "image", "audio"])
output: list[str] # Supported output types (e.g., ["text", "image"])


class ModelMetadata(TypedDict, total=False):
"""Simple model metadata structure."""
"""Model metadata structure with extended fields from models.dev API."""

provider: str # Provider name (e.g., "anthropic", "groq", "openai")
# Core identification
provider: str # Provider name (e.g., "Anthropic", "OpenAI")
provider_id: str # Provider ID from API (e.g., "anthropic", "openai")
name: str # Model name/ID
display_name: str # Human-readable model name
icon: str # Icon name for UI

# Capabilities
tool_calling: bool # Whether model supports tool calling (defaults to False)
reasoning: bool # Reasoning models (defaults to False)
search: bool # Search models (defaults to False)
structured_output: bool # Whether model supports structured output
temperature: bool # Whether model supports temperature parameter
attachment: bool # Whether model supports file attachments

# Status flags
preview: bool # Whether model is in preview/beta (defaults to False)
not_supported: bool # Whether model is not supported or deprecated (defaults to False)
not_supported: bool # Whether model is not supported (defaults to False)
deprecated: bool # Whether model is deprecated (defaults to False)
default: bool # Whether model is a default/recommended option (defaults to False)
model_type: str # Type of model (defaults to "llm" or "embeddings")
open_weights: bool # Whether model has open weights

# Model classification
model_type: str # Type of model ("llm", "embeddings", "image", "audio", "video")

# Extended metadata from models.dev
cost: ModelCost # Pricing information
limits: ModelLimits # Token limits
modalities: ModelModalities # Supported input/output modalities
knowledge_cutoff: str # Knowledge cutoff date (e.g., "2024-04")
release_date: str # Model release date
last_updated: str # Last update date

# Provider metadata
api_base: str # Base API URL for the provider
env_vars: list[str] # Environment variables for API keys
documentation_url: str # Link to provider documentation


def create_model_metadata(
provider: str,
name: str,
icon: str,
*,
provider_id: str | None = None,
display_name: str | None = None,
tool_calling: bool = False,
reasoning: bool = False,
search: bool = False,
structured_output: bool = False,
temperature: bool = True,
attachment: bool = False,
preview: bool = False,
not_supported: bool = False,
deprecated: bool = False,
default: bool = False,
open_weights: bool = False,
model_type: str = "llm",
cost: ModelCost | None = None,
limits: ModelLimits | None = None,
modalities: ModelModalities | None = None,
knowledge_cutoff: str | None = None,
release_date: str | None = None,
last_updated: str | None = None,
api_base: str | None = None,
env_vars: list[str] | None = None,
documentation_url: str | None = None,
) -> ModelMetadata:
"""Helper function to create ModelMetadata with explicit defaults."""
return ModelMetadata(
metadata = ModelMetadata(
provider=provider,
name=name,
icon=icon,
tool_calling=tool_calling,
reasoning=reasoning,
search=search,
structured_output=structured_output,
temperature=temperature,
attachment=attachment,
preview=preview,
not_supported=not_supported,
deprecated=deprecated,
default=default,
open_weights=open_weights,
model_type=model_type,
)

# Add optional fields if provided
if provider_id is not None:
metadata["provider_id"] = provider_id
if display_name is not None:
metadata["display_name"] = display_name
if cost is not None:
metadata["cost"] = cost
if limits is not None:
metadata["limits"] = limits
if modalities is not None:
metadata["modalities"] = modalities
if knowledge_cutoff is not None:
metadata["knowledge_cutoff"] = knowledge_cutoff
if release_date is not None:
metadata["release_date"] = release_date
if last_updated is not None:
metadata["last_updated"] = last_updated
if api_base is not None:
metadata["api_base"] = api_base
if env_vars is not None:
metadata["env_vars"] = env_vars
if documentation_url is not None:
metadata["documentation_url"] = documentation_url

return metadata
Loading
Loading