diff --git a/docs/docs/Components/bundles-burncloud.mdx b/docs/docs/Components/bundles-burncloud.mdx
new file mode 100644
index 000000000000..c5326dc60a8c
--- /dev/null
+++ b/docs/docs/Components/bundles-burncloud.mdx
@@ -0,0 +1,52 @@
+---
+title: BurnCloud
+slug: /bundles-burncloud
+description: Use BurnCloud's OpenAI-compatible models inside Langflow.
+---
+
+import Icon from "@site/src/components/icon";
+
+ [**Bundles**](/components-bundle-components) contain custom components that support specific third-party integrations with Langflow.
+
+This page describes the components that are available in the **BurnCloud** bundle.
+
+For more information about BurnCloud features and API limits, see the [BurnCloud documentation](https://burncloud.com/).
+
+## BurnCloud text generation
+
+The **BurnCloud** component generates text through BurnCloud's OpenAI-compatible API gateway. It works with the same chat-completions schema as OpenAI, while letting you point to BurnCloud-hosted models or a private BurnCloud deployment.
+
+It can output either a **Model Response** ([`Message`](/data-types#message)) or a **Language Model** ([`LanguageModel`](/data-types#languagemodel)). The **Language Model** output is an instance of [`ChatOpenAI`](https://python.langchain.com/docs/integrations/chat/openai) configured to target BurnCloud's `/v1` endpoints.
+
+Use the **Language Model** output when you want to pass a BurnCloud model into another LLM-driven component, such as **Agent**, **Smart Function**, or **Prompt Template** components.
+
+### BurnCloud parameters
+
+import PartialParams from '@site/docs/_partial-hidden-params.mdx';
+
+
+
+| Name | Type | Description |
+|------|------|-------------|
+| api_key | SecretString | Input parameter. Your BurnCloud API key. Required for authentication and for fetching the latest model list. |
+| base_url | String | Input parameter. Override the default `https://ai.burncloud.com` base URL if you host BurnCloud privately. The component appends `/v1` automatically when needed. (Advanced) |
+| model_name | String | Input parameter. BurnCloud model to use. Options update dynamically after you provide a valid API key and click **Refresh**. Defaults to `gpt-4o`. |
+| temperature | Float | Input parameter. Controls randomness. Range: `[0, 2]`. Defaults to `0.7`. (Advanced) |
+| top_p | Float | Input parameter. Alternative sampling control that limits the cumulative probability mass of candidate tokens. Range: `[0, 1]`. Defaults to `1.0`. (Advanced) |
+| max_tokens | Integer | Input parameter. Maximum number of tokens to generate. Leave empty to let BurnCloud decide. (Advanced) |
+| input_value | String | Input parameter. The prompt or chat content you want to send to the model. |
+| system_message | String | Input parameter. Sets the assistant's persona or high-level instructions. |
+| stream | Boolean | Input parameter. Streams partial results when enabled. |
+| output_parser | OutputParser | Input parameter. (Advanced) Parse the model response before passing it downstream. |
+| model_output | LanguageModel | Output parameter. A `ChatOpenAI` instance configured for BurnCloud. |
+| text_output | Message | Output parameter. The generated response from the selected BurnCloud model. |
+
+### Use BurnCloud in a flow
+
+1. Sign up for a [BurnCloud account](https://burncloud.com/) and generate an API key in the BurnCloud dashboard.
+2. In Langflow, open **Bundles** and drag the **BurnCloud** component into your flow.
+3. Paste your API key into **BurnCloud API Key**. Optionally set **Base URL** if your organization hosts BurnCloud privately.
+4. Click **Refresh** next to **Model** to load the latest BurnCloud-hosted model list, then pick the model you need.
+5. Configure sampling parameters such as **Temperature**, **Top P**, and **Max Output Tokens** (if required) along with your **System Message** and **Prompt**.
+6. Connect **Chat Input** → **BurnCloud** → **Chat Output** (or feed the **Language Model** output into downstream components like **Agent** or **Smart Function**).
+7. Click **Playground** to test requests and validate the connection before deploying the flow.
diff --git a/docs/sidebars.js b/docs/sidebars.js
index f74fce3845c4..ac160a4312ef 100644
--- a/docs/sidebars.js
+++ b/docs/sidebars.js
@@ -304,6 +304,7 @@ module.exports = {
"Components/bundles-azure",
"Components/bundles-baidu",
"Components/bundles-bing",
+ "Components/bundles-burncloud",
"Components/bundles-cassandra",
"Components/bundles-chroma",
"Components/bundles-cleanlab",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json b/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json
index 429fd1aa1f3a..dbba4c321505 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json
@@ -2089,6 +2089,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Invoice Summarizer.json b/src/backend/base/langflow/initial_setup/starter_projects/Invoice Summarizer.json
index a6961dc4075b..035ffd6e8858 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Invoice Summarizer.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Invoice Summarizer.json
@@ -1261,6 +1261,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json b/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json
index 3030476ac494..c4a7342125f1 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json
@@ -1388,6 +1388,7 @@
"options": [
"OpenAI",
"Anthropic",
+ "BurnCloud",
"Google"
],
"options_metadata": [
@@ -1657,6 +1658,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/News Aggregator.json b/src/backend/base/langflow/initial_setup/starter_projects/News Aggregator.json
index 86f5b837c67a..fe9951b92261 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/News Aggregator.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/News Aggregator.json
@@ -1285,6 +1285,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Nvidia Remix.json b/src/backend/base/langflow/initial_setup/starter_projects/Nvidia Remix.json
index 3865c1b604c3..f88228ae6619 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Nvidia Remix.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Nvidia Remix.json
@@ -917,6 +917,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git "a/src/backend/base/langflow/initial_setup/starter_projects/Pok\303\251dex Agent.json" "b/src/backend/base/langflow/initial_setup/starter_projects/Pok\303\251dex Agent.json"
index 0aa70a9673eb..765c1a677640 100644
--- "a/src/backend/base/langflow/initial_setup/starter_projects/Pok\303\251dex Agent.json"
+++ "b/src/backend/base/langflow/initial_setup/starter_projects/Pok\303\251dex Agent.json"
@@ -1346,6 +1346,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Price Deal Finder.json b/src/backend/base/langflow/initial_setup/starter_projects/Price Deal Finder.json
index f92da5f22245..57ec476d4ee1 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Price Deal Finder.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Price Deal Finder.json
@@ -1713,6 +1713,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json
index 43176657d025..945914519180 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json
@@ -2098,6 +2098,7 @@
"options": [
"OpenAI",
"Anthropic",
+ "BurnCloud",
"Google"
],
"options_metadata": [
@@ -2686,6 +2687,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json b/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json
index 588ea74573d0..5fc13ff24c87 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json
@@ -975,6 +975,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Search agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Search agent.json
index 0da07cb025c7..83cf3eda2d13 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Search agent.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Search agent.json
@@ -1052,6 +1052,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents.json b/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents.json
index 39b2def3b663..fa1ad9ed6143 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents.json
@@ -464,6 +464,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
@@ -1197,6 +1198,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
@@ -2701,6 +2703,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json
index 2bd6e42ec0e1..92e9c8171ffd 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json
@@ -1037,6 +1037,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Social Media Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Social Media Agent.json
index d41eb3b7aafc..3814720b9c5a 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Social Media Agent.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Social Media Agent.json
@@ -1395,6 +1395,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json b/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json
index 1ed3e2deb857..58032097e4ba 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json
@@ -1770,6 +1770,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
@@ -2496,6 +2497,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
@@ -3222,6 +3224,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Youtube Analysis.json b/src/backend/base/langflow/initial_setup/starter_projects/Youtube Analysis.json
index 08830841ac6a..88b3d73e70b1 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Youtube Analysis.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Youtube Analysis.json
@@ -866,6 +866,7 @@
"name": "agent_llm",
"options": [
"Anthropic",
+ "BurnCloud",
"Google Generative AI",
"OpenAI",
"IBM watsonx.ai",
diff --git a/src/frontend/src/icons/BurnCloud/BurnCloudIcon.jsx b/src/frontend/src/icons/BurnCloud/BurnCloudIcon.jsx
new file mode 100644
index 000000000000..5970ecee78c0
--- /dev/null
+++ b/src/frontend/src/icons/BurnCloud/BurnCloudIcon.jsx
@@ -0,0 +1,36 @@
+import { useId } from "react";
+
+const BurnCloudIconSVG = ({ isDark = false, ...props }) => {
+ const gradientId = `burncloud-gradient-${useId()}`;
+ const start = isDark ? "#f9cf69" : "#f7b52c";
+ const end = isDark ? "#ff7a3a" : "#e95513";
+ return (
+
+ );
+};
+
+export default BurnCloudIconSVG;
diff --git a/src/frontend/src/icons/BurnCloud/BurnCloudIcon.svg b/src/frontend/src/icons/BurnCloud/BurnCloudIcon.svg
new file mode 100644
index 000000000000..b676b7dce1ff
--- /dev/null
+++ b/src/frontend/src/icons/BurnCloud/BurnCloudIcon.svg
@@ -0,0 +1,12 @@
+
diff --git a/src/frontend/src/icons/BurnCloud/index.tsx b/src/frontend/src/icons/BurnCloud/index.tsx
new file mode 100644
index 000000000000..b9747dbbe3e1
--- /dev/null
+++ b/src/frontend/src/icons/BurnCloud/index.tsx
@@ -0,0 +1,9 @@
+import type React from "react";
+import { forwardRef } from "react";
+import BurnCloudIconSVG from "./BurnCloudIcon";
+
+export const BurnCloudIcon = forwardRef>(
+ (props, ref) => ,
+);
+
+export default BurnCloudIcon;
diff --git a/src/frontend/src/icons/eagerIconImports.ts b/src/frontend/src/icons/eagerIconImports.ts
index b91674da9876..9c3fbd8aaf89 100644
--- a/src/frontend/src/icons/eagerIconImports.ts
+++ b/src/frontend/src/icons/eagerIconImports.ts
@@ -14,6 +14,7 @@ import { AthenaIcon } from "@/icons/athena/index";
import { BingIcon } from "@/icons/Bing";
import { BotMessageSquareIcon } from "@/icons/BotMessageSquare";
import { BWPythonIcon } from "@/icons/BW python";
+import { BurnCloudIcon } from "@/icons/BurnCloud";
import { CassandraIcon } from "@/icons/Cassandra";
import { ChromaIcon } from "@/icons/ChromaIcon";
import { ClickhouseIcon } from "@/icons/Clickhouse";
@@ -133,6 +134,7 @@ export const eagerIconsMapping = {
AWSInverted: AWSInvertedIcon,
Azure: AzureIcon,
Bing: BingIcon,
+ BurnCloud: BurnCloudIcon,
BotMessageSquare: BotMessageSquareIcon,
BWPython: BWPythonIcon,
Cassandra: CassandraIcon,
diff --git a/src/frontend/src/icons/lazyIconImports.ts b/src/frontend/src/icons/lazyIconImports.ts
index 65fd76eda1c1..1eeb7c296b66 100644
--- a/src/frontend/src/icons/lazyIconImports.ts
+++ b/src/frontend/src/icons/lazyIconImports.ts
@@ -117,6 +117,10 @@ export const lazyIconsMapping = {
import("@/icons/Brightdata").then((mod) => ({
default: mod.BrightdataIcon,
})),
+ BurnCloud: () =>
+ import("@/icons/BurnCloud").then((mod) => ({
+ default: mod.BurnCloudIcon,
+ })),
BWPython: () =>
import("@/icons/BW python").then((mod) => ({ default: mod.BWPythonIcon })),
Cassandra: () =>
diff --git a/src/frontend/src/utils/styleUtils.ts b/src/frontend/src/utils/styleUtils.ts
index b88ae2464294..4152be1e73bf 100644
--- a/src/frontend/src/utils/styleUtils.ts
+++ b/src/frontend/src/utils/styleUtils.ts
@@ -252,6 +252,7 @@ export const SIDEBAR_BUNDLES = [
{ display_name: "Azure", name: "azure", icon: "Azure" },
{ display_name: "Baidu", name: "baidu", icon: "BaiduQianfan" },
{ display_name: "Bing", name: "bing", icon: "Bing" },
+ { display_name: "BurnCloud", name: "BurnCloud", icon: "BurnCloud" },
{ display_name: "Cassandra", name: "cassandra", icon: "Cassandra" },
{ display_name: "Chroma", name: "chroma", icon: "Chroma" },
{ display_name: "ClickHouse", name: "clickhouse", icon: "Clickhouse" },
diff --git a/src/lfx/src/lfx/base/models/model_input_constants.py b/src/lfx/src/lfx/base/models/model_input_constants.py
index b9d21d41a6ad..26f367c0393e 100644
--- a/src/lfx/src/lfx/base/models/model_input_constants.py
+++ b/src/lfx/src/lfx/base/models/model_input_constants.py
@@ -262,6 +262,21 @@ def _get_sambanova_inputs_and_fields():
except ImportError:
pass
+try:
+ from lfx.components.BurnCloud.burncloud import BurnCloudModel
+
+ burncloud_inputs = get_filtered_inputs(BurnCloudModel)
+ MODEL_PROVIDERS_DICT["BurnCloud"] = {
+ "fields": create_input_fields_dict(burncloud_inputs, ""),
+ "inputs": burncloud_inputs,
+ "prefix": "",
+ "component_class": BurnCloudModel(),
+ "icon": BurnCloudModel.icon,
+ "is_active": True,
+ }
+except ImportError:
+ pass
+
try:
from lfx.components.nvidia.nvidia import NVIDIAModelComponent
@@ -373,6 +388,13 @@ def _get_sambanova_inputs_and_fields():
MODELS_METADATA = {name: {"icon": prov["icon"]} for name, prov in ACTIVE_MODEL_PROVIDERS_DICT.items()}
-MODEL_PROVIDERS_LIST = ["Anthropic", "Google Generative AI", "OpenAI", "IBM watsonx.ai", "Ollama"]
+MODEL_PROVIDERS_LIST = [
+ "Anthropic",
+ "BurnCloud",
+ "Google Generative AI",
+ "OpenAI",
+ "IBM watsonx.ai",
+ "Ollama",
+]
MODEL_OPTIONS_METADATA = [MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA]
diff --git a/src/lfx/src/lfx/components/BurnCloud/__init__.py b/src/lfx/src/lfx/components/BurnCloud/__init__.py
new file mode 100644
index 000000000000..03b90208b28e
--- /dev/null
+++ b/src/lfx/src/lfx/components/BurnCloud/__init__.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any
+
+from lfx.components._importing import import_mod
+
+if TYPE_CHECKING: # pragma: no cover
+ from .burncloud import BurnCloudModel
+
+_dynamic_imports = {
+ "BurnCloudModel": "burncloud",
+}
+
+__all__ = ["BurnCloudModel"]
+
+
+def __getattr__(attr_name: str) -> Any:
+ """Lazily import BurnCloud components on attribute access."""
+ if attr_name not in _dynamic_imports:
+ msg = f"module '{__name__}' has no attribute '{attr_name}'"
+ raise AttributeError(msg)
+ try:
+ result = import_mod(attr_name, _dynamic_imports[attr_name], __spec__.parent)
+ except (ModuleNotFoundError, ImportError, AttributeError) as e: # pragma: no cover - thin wrapper
+ msg = f"Could not import '{attr_name}' from '{__name__}': {e}"
+ raise AttributeError(msg) from e
+ globals()[attr_name] = result
+ return result
+
+
+def __dir__() -> list[str]: # pragma: no cover
+ return list(__all__)
diff --git a/src/lfx/src/lfx/components/BurnCloud/burncloud.py b/src/lfx/src/lfx/components/BurnCloud/burncloud.py
new file mode 100644
index 000000000000..97dd9280fc05
--- /dev/null
+++ b/src/lfx/src/lfx/components/BurnCloud/burncloud.py
@@ -0,0 +1,157 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any
+
+import httpx
+from langchain_openai import ChatOpenAI
+from pydantic.v1 import SecretStr
+
+from lfx.base.models.model import LCModelComponent
+from lfx.field_typing.range_spec import RangeSpec
+from lfx.inputs.inputs import DropdownInput, IntInput, MessageTextInput, SecretStrInput, SliderInput
+
+LanguageModel = Any
+if TYPE_CHECKING: # pragma: no cover - import only for static analysis
+ from lfx.field_typing import LanguageModel as _LanguageModel
+
+ LanguageModel = _LanguageModel
+
+DEFAULT_BURNCLOUD_BASE_URL = "https://ai.burncloud.com"
+DEFAULT_BURNCLOUD_MODELS = [
+ "gpt-4o-mini",
+ "gpt-4o",
+ "deepseek-v3",
+ "deepseek-r1",
+ "gemini-2.5-pro",
+ "o3",
+ "o4-mini",
+ "qwen3-235b-a22b",
+ "qwen3-235b-a22b-instruct-2507",
+ "llama-4-maverick",
+ "gemini-2.5-flash",
+ "gemini-2.5-flash-nothink",
+ "claude-sonnet-4-20250514",
+ "claude-opus-4-20250514",
+ "doubao-1.5-pro-256k",
+ "grok-4",
+]
+REQUEST_TIMEOUT_SECONDS = 10.0
+
+
+class BurnCloudModel(LCModelComponent):
+ display_name = "BurnCloud"
+ description = "Generate text using BurnCloud's OpenAI-compatible API gateway."
+ icon = "BurnCloud"
+ name = "BurnCloudModel"
+
+ inputs = [
+ *LCModelComponent.get_base_inputs(),
+ SecretStrInput(
+ name="api_key",
+ display_name="BurnCloud API Key",
+ info="Your BurnCloud API key for authentication.",
+ required=True,
+ real_time_refresh=True,
+ ),
+ MessageTextInput(
+ name="base_url",
+ display_name="Base URL",
+ info="Override the BurnCloud API base URL if you use a private deployment.",
+ value=DEFAULT_BURNCLOUD_BASE_URL,
+ advanced=True,
+ real_time_refresh=True,
+ ),
+ DropdownInput(
+ name="model_name",
+ display_name="Model",
+ info="Select one of the available BurnCloud-hosted models.",
+ options=DEFAULT_BURNCLOUD_MODELS,
+ value=DEFAULT_BURNCLOUD_MODELS[0],
+ refresh_button=True,
+ real_time_refresh=True,
+ combobox=True,
+ ),
+ SliderInput(
+ name="temperature",
+ display_name="Temperature",
+ value=0.7,
+ info="Controls randomness. Lower values make outputs more deterministic.",
+ range_spec=RangeSpec(min=0, max=2, step=0.01),
+ advanced=True,
+ ),
+ SliderInput(
+ name="top_p",
+ display_name="Top P",
+ value=1.0,
+ info="Alternative sampling parameter that limits the probability mass of candidate tokens.",
+ range_spec=RangeSpec(min=0, max=1, step=0.01),
+ advanced=True,
+ ),
+ IntInput(
+ name="max_tokens",
+ display_name="Max Output Tokens",
+ info="The maximum number of tokens to generate in the response.",
+ advanced=True,
+ ),
+ ]
+
+ def _build_api_base(self) -> str:
+ base = (self.base_url or DEFAULT_BURNCLOUD_BASE_URL).rstrip("/")
+ if not base.endswith("/v1"):
+ base = f"{base}/v1"
+ return base
+
+ def _build_headers(self) -> dict[str, str]:
+ return {
+ "Authorization": f"Bearer {self.api_key}",
+ "Content-Type": "application/json",
+ }
+
+ def get_models(self) -> list[str]:
+ if not self.api_key:
+ return DEFAULT_BURNCLOUD_MODELS.copy()
+
+ try:
+ with httpx.Client(timeout=REQUEST_TIMEOUT_SECONDS) as client:
+ response = client.get(
+ f"{self._build_api_base()}/models",
+ headers=self._build_headers(),
+ )
+ response.raise_for_status()
+ payload = response.json()
+ models = [item["id"] for item in payload.get("data", []) if item.get("id")]
+ return models or DEFAULT_BURNCLOUD_MODELS.copy()
+ except (httpx.HTTPError, ValueError, KeyError) as exc:
+ self.log(f"Error fetching BurnCloud models: {exc}", "warning")
+ return DEFAULT_BURNCLOUD_MODELS.copy()
+
+ def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):
+ if field_name in {"api_key", "base_url", "model_name"} and field_value:
+ model_options = self.get_models()
+ build_config.setdefault("model_name", {})
+ build_config["model_name"]["options"] = model_options
+ if model_options:
+ build_config["model_name"].setdefault("value", model_options[0])
+ return build_config
+
+ def build_model(self) -> LanguageModel:
+ if not self.api_key:
+ msg = "BurnCloud API key is required."
+ raise ValueError(msg)
+ if not self.model_name:
+ msg = "Select a BurnCloud model before running the component."
+ raise ValueError(msg)
+
+ kwargs = {
+ "model": self.model_name,
+ "openai_api_key": SecretStr(self.api_key).get_secret_value(),
+ "openai_api_base": self._build_api_base(),
+ "temperature": self.temperature if self.temperature is not None else 0.7,
+ "top_p": self.top_p if self.top_p is not None else 1.0,
+ "streaming": self.stream,
+ }
+
+ if self.max_tokens:
+ kwargs["max_tokens"] = int(self.max_tokens)
+
+ return ChatOpenAI(**kwargs)
diff --git a/src/lfx/src/lfx/components/__init__.py b/src/lfx/src/lfx/components/__init__.py
index ed13e3c130a8..e216bfcbaf32 100644
--- a/src/lfx/src/lfx/components/__init__.py
+++ b/src/lfx/src/lfx/components/__init__.py
@@ -8,6 +8,7 @@
# These imports are only for type checking and match _dynamic_imports
from lfx.components import (
FAISS,
+ BurnCloud,
Notion,
agentql,
aiml,
@@ -123,6 +124,7 @@
"azure": "__module__",
"baidu": "__module__",
"bing": "__module__",
+ "BurnCloud": "__module__",
"cassandra": "__module__",
"chains": "__module__",
"chroma": "__module__",
@@ -244,6 +246,7 @@ def _discover_components_from_module(module_name):
# Static base __all__ with module names
__all__ = [
"FAISS",
+ "BurnCloud",
"Notion",
"agentql",
"aiml",