diff --git a/docs/docs/API-Reference/api-files.mdx b/docs/docs/API-Reference/api-files.mdx
index 48b8abfb4856..06ff46bd565e 100644
--- a/docs/docs/API-Reference/api-files.mdx
+++ b/docs/docs/API-Reference/api-files.mdx
@@ -293,6 +293,14 @@ This component loads files into flows from your local machine or Langflow file m
If the file path is valid, the flow runs successfully.
+:::tip Need a complete example?
+For a comprehensive guide on integrating file uploads with external frontends like Streamlit, see:
+
+- [Complete Streamlit integration example](/streamlit-integration-example): Shows how to build a full application with file uploads and chat interface
+- [Vector store API integration](/vector-store-api-integration): Detailed guide for injecting files into vector store flows
+- [API examples with file uploads](/api-reference-api-examples): Complete Python examples for file upload workflows
+:::
+
### List files (v2)
List all files associated with your user account.
diff --git a/docs/docs/API-Reference/api-reference-api-examples.mdx b/docs/docs/API-Reference/api-reference-api-examples.mdx
index 83b79c583280..44b0c3d51519 100644
--- a/docs/docs/API-Reference/api-reference-api-examples.mdx
+++ b/docs/docs/API-Reference/api-reference-api-examples.mdx
@@ -26,7 +26,10 @@ The quickstart demonstrates how to get automatically generated code snippets for
While individual options vary by endpoint, all Langflow API requests share some commonalities, like a URL, method, parameters, and authentication.
-As an example of a Langflow API request, the following curl command calls the `/v1/run` endpoint, and it passes a runtime override (`tweaks`) to the flow's **Chat Output** component:
+As an example of a Langflow API request, the following examples show different ways to interact with the API, including cURL, Python requests, and complete file upload workflows:
+
+
+
```bash
curl --request POST \
@@ -45,6 +48,93 @@ curl --request POST \
}'
```
+
+
+
+```python
+import requests
+import json
+
+# Set up API configuration
+LANGFLOW_API_KEY = "your_api_key_here"
+LANGFLOW_SERVER_URL = "http://localhost:7860"
+FLOW_ID = "your_flow_id_here"
+
+headers = {
+ "Content-Type": "application/json",
+ "x-api-key": LANGFLOW_API_KEY
+}
+
+data = {
+ "input_value": "hello world!",
+ "output_type": "chat",
+ "input_type": "chat",
+ "tweaks": {
+ "ChatOutput-6zcZt": {
+ "should_store_message": True
+ }
+ }
+}
+
+response = requests.post(
+ f"{LANGFLOW_SERVER_URL}/api/v1/run/{FLOW_ID}",
+ headers=headers,
+ json=data
+)
+
+if response.status_code == 200:
+ result = response.json()
+ print("Flow executed successfully!")
+ print(json.dumps(result, indent=2))
+else:
+ print(f"Error: {response.status_code} - {response.text}")
+```
+
+
+
+
+```python
+import httpx
+import asyncio
+
+async def run_langflow_flow():
+ async with httpx.AsyncClient() as client:
+ headers = {
+ "Content-Type": "application/json",
+ "x-api-key": LANGFLOW_API_KEY
+ }
+
+ data = {
+ "input_value": "hello world!",
+ "output_type": "chat",
+ "input_type": "chat",
+ "tweaks": {
+ "ChatOutput-6zcZt": {
+ "should_store_message": True
+ }
+ }
+ }
+
+ response = await client.post(
+ f"{LANGFLOW_SERVER_URL}/api/v1/run/{FLOW_ID}",
+ headers=headers,
+ json=data
+ )
+
+ if response.status_code == 200:
+ result = response.json()
+ return result
+ else:
+ raise Exception(f"API request failed: {response.status_code} - {response.text}")
+
+# Run the async function
+result = asyncio.run(run_langflow_flow())
+print(result)
+```
+
+
+
+
### Base URL
By default, local deployments serve the Langflow API at `http://localhost:7860/api`.
@@ -118,6 +208,281 @@ Commonly used values in Langflow API requests include your [Langflow server URL]
You can retrieve flow IDs from the [**API access** pane](/concepts-publish#api-access), in a flow's URL, and with [`GET /flows`](/api-flows#read-flows).
+## Complete File Upload Workflow
+
+When integrating Langflow with external frontends (like Streamlit, React, or mobile apps), you need to handle file uploads through a specific workflow. Here's how to implement the complete process:
+
+### Overview
+
+The file upload workflow consists of these steps:
+1. **Get your user ID** (for v2 files API)
+2. **Upload files** to Langflow's file management system
+3. **Inject file paths** into flow components using tweaks
+4. **Execute the flow** with uploaded files
+5. **Handle the response** in your application
+
+### Complete Python Example
+
+Here's a comprehensive example showing the entire workflow:
+
+```python
+import requests
+import json
+import time
+from typing import List, Dict, Optional, Any
+
+class LangflowFileUploadClient:
+ """Complete Langflow client for file upload workflows"""
+
+ def __init__(self, base_url: str, api_key: str):
+ self.base_url = base_url.rstrip('/')
+ self.api_key = api_key
+ self.headers = {
+ "Content-Type": "application/json",
+ "x-api-key": api_key
+ }
+ self.file_headers = {"x-api-key": api_key}
+
+ def get_user_id(self) -> str:
+ """Get current user ID for v2 file operations"""
+ response = requests.get(
+ f"{self.base_url}/api/v1/users/whoami",
+ headers=self.headers
+ )
+ response.raise_for_status()
+ return response.json()["id"]
+
+ def upload_file(self, file_path: str, file_name: Optional[str] = None) -> Dict[str, Any]:
+ """Upload a single file to Langflow"""
+ if file_name is None:
+ file_name = file_path.split('/')[-1]
+
+ with open(file_path, 'rb') as f:
+ files = {"file": (file_name, f)}
+ response = requests.post(
+ f"{self.base_url}/api/v2/files",
+ files=files,
+ headers=self.file_headers
+ )
+
+ response.raise_for_status()
+ return response.json()
+
+ def upload_files(self, file_paths: List[str]) -> List[Dict[str, Any]]:
+ """Upload multiple files to Langflow"""
+ results = []
+ for file_path in file_paths:
+ try:
+ result = self.upload_file(file_path)
+ results.append(result)
+ print(f"ā
Uploaded: {result['name']} -> {result['path']}")
+ except Exception as e:
+ print(f"ā Failed to upload {file_path}: {str(e)}")
+ continue
+ return results
+
+ def run_flow_with_files(
+ self,
+ flow_id: str,
+ file_paths: List[str],
+ input_text: str,
+ file_component_id: str,
+ session_id: Optional[str] = None,
+ **component_tweaks
+ ) -> Dict[str, Any]:
+ """Execute flow with injected file paths"""
+
+ # Prepare tweaks for file injection
+ tweaks = {
+ file_component_id: {
+ "path": file_paths
+ }
+ }
+
+ # Add any additional component tweaks
+ tweaks.update(component_tweaks)
+
+ # Prepare request data
+ data = {
+ "input_value": input_text,
+ "input_type": "text",
+ "output_type": "chat",
+ "tweaks": tweaks
+ }
+
+ if session_id:
+ data["session_id"] = session_id
+
+ # Execute flow
+ response = requests.post(
+ f"{self.base_url}/api/v1/run/{flow_id}",
+ headers=self.headers,
+ json=data
+ )
+
+ response.raise_for_status()
+ return response.json()
+
+ def extract_chat_response(self, flow_response: Dict[str, Any]) -> str:
+ """Extract the main chat response from flow output"""
+ try:
+ outputs = flow_response.get("outputs", [])
+ if outputs:
+ output_data = outputs[0].get("outputs", [])
+ if output_data:
+ result = output_data[0].get("results", {})
+ if "message" in result:
+ return result["message"].get("text", "No response generated")
+ else:
+ return str(result)
+ return "No response from flow"
+ except Exception as e:
+ return f"Error extracting response: {str(e)}"
+
+def complete_workflow_example():
+ """Complete example: upload files and run flow with vector store processing"""
+
+ # Configuration
+ BASE_URL = "http://localhost:7860"
+ API_KEY = "your_api_key_here"
+ FLOW_ID = "your_vector_store_flow_id"
+ FILE_COMPONENT_ID = "File-abc123" # From your flow's API access pane
+
+ # Initialize client
+ client = LangflowFileUploadClient(BASE_URL, API_KEY)
+
+ # Step 1: Upload documents
+ print("š Step 1: Uploading files...")
+ file_paths = [
+ "/path/to/document1.pdf",
+ "/path/to/document2.txt",
+ "/path/to/document3.csv"
+ ]
+
+ uploaded_files = client.upload_files(file_paths)
+ uploaded_paths = [f["path"] for f in uploaded_files]
+
+ if not uploaded_paths:
+ print("ā No files were uploaded successfully")
+ return
+
+ print(f"ā
Uploaded {len(uploaded_paths)} files")
+
+ # Step 2: Run flow with file injection
+ print("\nš¤ Step 2: Running flow with uploaded documents...")
+ query = "What are the main topics discussed in these documents?"
+
+ try:
+ response = client.run_flow_with_files(
+ flow_id=FLOW_ID,
+ file_paths=uploaded_paths,
+ input_text=query,
+ file_component_id=FILE_COMPONENT_ID,
+ session_id="document_chat_session"
+ )
+
+ # Step 3: Extract and display response
+ answer = client.extract_chat_response(response)
+ print(f"\nš¬ Answer: {answer}")
+
+ return response
+
+ except Exception as e:
+ print(f"ā Flow execution failed: {str(e)}")
+ return None
+
+# Run the complete example
+if __name__ == "__main__":
+ complete_workflow_example()
+```
+
+### Frontend Integration Examples
+
+#### React Example
+
+```javascript
+import React, { useState } from 'react';
+
+const LangflowFileUploader = () => {
+ const [files, setFiles] = useState([]);
+ const [isUploading, setIsUploading] = useState(false);
+ const [query, setQuery] = useState('');
+ const [response, setResponse] = useState('');
+
+ const uploadFiles = async (filesToUpload) => {
+ setIsUploading(true);
+ const formData = new FormData();
+
+ Array.from(filesToUpload).forEach(file => {
+ formData.append('file', file);
+ });
+
+ try {
+ const response = await fetch(`${process.env.REACT_APP_LANGFLOW_URL}/api/v2/files`, {
+ method: 'POST',
+ headers: {
+ 'x-api-key': process.env.REACT_APP_LANGFLOW_API_KEY
+ },
+ body: formData
+ });
+
+ const data = await response.json();
+ return Array.isArray(data) ? data : [data];
+ } finally {
+ setIsUploading(false);
+ }
+ };
+
+ const handleQuery = async () => {
+ if (files.length === 0) return;
+
+ const uploadedFiles = await uploadFiles(files);
+ const filePaths = uploadedFiles.map(f => f.path);
+
+ const response = await fetch(`${process.env.REACT_APP_LANGFLOW_URL}/api/v1/run/${FLOW_ID}`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'x-api-key': process.env.REACT_APP_LANGFLOW_API_KEY
+ },
+ body: JSON.stringify({
+ input_value: query,
+ input_type: 'text',
+ output_type: 'chat',
+ tweaks: {
+ [FILE_COMPONENT_ID]: {
+ path: filePaths
+ }
+ }
+ })
+ });
+
+ const result = await response.json();
+ const answer = extractChatResponse(result);
+ setResponse(answer);
+ };
+
+ return (
+
+
setFiles(e.target.files)}
+ />
+
setQuery(e.target.value)}
+ placeholder="Ask about your documents..."
+ />
+
+ {response &&
{response}
}
+
+ );
+};
+```
+
## Try some Langflow API requests
Once you have your Langflow server URL, try calling these endpoints that return Langflow metadata.
diff --git a/docs/docs/Develop/streamlit-integration-example.mdx b/docs/docs/Develop/streamlit-integration-example.mdx
new file mode 100644
index 000000000000..2f384abd2314
--- /dev/null
+++ b/docs/docs/Develop/streamlit-integration-example.mdx
@@ -0,0 +1,373 @@
+---
+title: Streamlit integration example with file uploads
+slug: /streamlit-integration-example
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+This guide demonstrates how to integrate Langflow with Streamlit for file uploads and flow execution. You'll learn how to upload files through your Streamlit interface and pass them to Langflow flows that process documents using vector stores.
+
+## Overview
+
+When integrating Langflow with Streamlit for file uploads, you need to:
+
+1. **Upload files to Langflow** via the API
+2. **Inject file paths into flow components** using tweaks
+3. **Execute the flow** with the uploaded files
+4. **Handle the response** in your Streamlit interface
+
+## Prerequisites
+
+- A Langflow deployment with API access
+- A flow that includes vector store components for document processing
+- Streamlit installed in your Python environment
+- Your Langflow API key
+
+## Complete Streamlit Integration Example
+
+Here's a complete example showing how to build a Streamlit interface that uploads files to Langflow and processes them through vector store flows:
+
+```python
+import streamlit as st
+import requests
+import json
+import time
+from typing import Optional, Dict, Any
+import os
+
+class LangflowClient:
+ def __init__(self, base_url: str, api_key: str):
+ self.base_url = base_url.rstrip('/')
+ self.api_key = api_key
+ self.headers = {
+ "Content-Type": "application/json",
+ "x-api-key": api_key
+ }
+
+ def upload_file(self, file, user_id: Optional[str] = None) -> Dict[str, Any]:
+ """Upload a file to Langflow's file management system"""
+ files = {"file": (file.name, file.getvalue(), file.type)}
+ headers = {"x-api-key": self.api_key}
+
+ response = requests.post(
+ f"{self.base_url}/api/v2/files",
+ files=files,
+ headers=headers
+ )
+
+ if response.status_code == 200:
+ return response.json()
+ else:
+ raise Exception(f"File upload failed: {response.text}")
+
+ def get_user_id(self) -> str:
+ """Get current user ID"""
+ response = requests.get(
+ f"{self.base_url}/api/v1/users/whoami",
+ headers=self.headers
+ )
+ if response.status_code == 200:
+ return response.json()["id"]
+ else:
+ raise Exception(f"Failed to get user ID: {response.text}")
+
+ def run_flow(
+ self,
+ flow_id: str,
+ input_value: str,
+ tweaks: Optional[Dict[str, Any]] = None,
+ session_id: Optional[str] = None
+ ) -> Dict[str, Any]:
+ """Execute a Langflow with optional tweaks for file injection"""
+ data = {
+ "input_value": input_value,
+ "output_type": "chat",
+ "input_type": "chat",
+ "tweaks": tweaks or {}
+ }
+
+ if session_id:
+ data["session_id"] = session_id
+
+ response = requests.post(
+ f"{self.base_url}/api/v1/run/{flow_id}",
+ headers=self.headers,
+ json=data
+ )
+
+ if response.status_code == 200:
+ return response.json()
+ else:
+ raise Exception(f"Flow execution failed: {response.text}")
+
+def main():
+ st.set_page_config(
+ page_title="Langflow Document Q&A",
+ page_icon="š",
+ layout="wide"
+ )
+
+ st.title("š Langflow Document Q&A")
+ st.markdown("Upload documents and ask questions about them using Langflow!")
+
+ # Sidebar for configuration
+ with st.sidebar:
+ st.header("Configuration")
+
+ # Langflow configuration
+ langflow_url = st.text_input(
+ "Langflow Server URL",
+ value=os.getenv("LANGFLOW_URL", "http://localhost:7860"),
+ help="Your Langflow server URL"
+ )
+
+ api_key = st.text_input(
+ "Langflow API Key",
+ type="password",
+ help="Your Langflow API key for authentication"
+ )
+
+ # Flow configuration
+ flow_id = st.text_input(
+ "Flow ID",
+ help="ID of your Langflow flow (get from API access pane)"
+ )
+
+ # File component configuration
+ file_component_id = st.text_input(
+ "File Component ID",
+ help="ID of the File component in your flow (e.g., File-abc123)"
+ )
+
+ # Vector store component configuration
+ vector_component_id = st.text_input(
+ "Vector Store Component ID",
+ help="ID of the Vector Store component in your flow (e.g., Chromadb-xyz789)"
+ )
+
+ if st.button("Initialize Client"):
+ if not all([langflow_url, api_key, flow_id]):
+ st.error("Please fill in all required fields")
+ else:
+ st.session_state.client = LangflowClient(langflow_url, api_key)
+ st.session_state.flow_id = flow_id
+ st.session_state.file_component_id = file_component_id
+ st.session_state.vector_component_id = vector_component_id
+ st.success("Client initialized successfully!")
+
+ # Main interface
+ if 'client' not in st.session_state:
+ st.info("Please configure your Langflow connection in the sidebar")
+ return
+
+ # File upload section
+ st.header("š¤ Upload Documents")
+ uploaded_files = st.file_uploader(
+ "Choose files to upload",
+ type=['pdf', 'txt', 'docx', 'csv'],
+ accept_multiple_files=True,
+ help="Upload documents to process with your Langflow flow"
+ )
+
+ if uploaded_files and st.button("Upload to Langflow"):
+ st.session_state.uploaded_file_paths = []
+ st.session_state.uploaded_file_names = []
+
+ with st.spinner("Uploading files..."):
+ for file in uploaded_files:
+ try:
+ file_data = st.session_state.client.upload_file(file)
+ file_path = file_data["path"]
+ file_name = file_data["name"]
+
+ st.session_state.uploaded_file_paths.append(file_path)
+ st.session_state.uploaded_file_names.append(file_name)
+
+ st.success(f"ā
{file_name} uploaded successfully")
+
+ except Exception as e:
+ st.error(f"Failed to upload {file.name}: {str(e)}")
+
+ # Display uploaded files
+ if hasattr(st.session_state, 'uploaded_file_paths') and st.session_state.uploaded_file_paths:
+ st.subheader("Uploaded Files")
+ for i, (name, path) in enumerate(zip(st.session_state.uploaded_file_names, st.session_state.uploaded_file_paths)):
+ st.write(f"{i+1}. {name} ā `{path}`")
+
+ # Chat interface
+ st.header("š¬ Ask Questions")
+
+ if "messages" not in st.session_state:
+ st.session_state.messages = []
+
+ # Display chat history
+ for message in st.session_state.messages:
+ with st.chat_message(message["role"]):
+ st.write(message["content"])
+
+ # User input
+ if prompt := st.chat_input("Ask a question about your documents"):
+ # Add user message to chat history
+ st.session_state.messages.append({"role": "user", "content": prompt})
+ with st.chat_message("user"):
+ st.write(prompt)
+
+ # Prepare flow execution
+ if not hasattr(st.session_state, 'uploaded_file_paths') or not st.session_state.uploaded_file_paths:
+ st.error("Please upload some files first!")
+ return
+
+ # Prepare tweaks for file injection
+ tweaks = {}
+
+ # Inject file paths into File component
+ if st.session_state.file_component_id:
+ tweaks[st.session_state.file_component_id] = {
+ "path": st.session_state.uploaded_file_paths
+ }
+
+ # Execute flow
+ with st.chat_message("assistant"):
+ with st.spinner("Processing your question..."):
+ try:
+ # Run the flow
+ response = st.session_state.client.run_flow(
+ flow_id=st.session_state.flow_id,
+ input_value=prompt,
+ tweaks=tweaks,
+ session_id="streamlit_session"
+ )
+
+ # Extract the response text
+ outputs = response.get("outputs", [])
+ if outputs:
+ output_data = outputs[0].get("outputs", [])
+ if output_data:
+ result = output_data[0].get("results", {})
+ if "message" in result:
+ message_text = result["message"].get("text", "No response generated")
+ else:
+ message_text = str(result)
+ else:
+ message_text = "No output generated"
+ else:
+ message_text = "No response from flow"
+
+ st.write(message_text)
+
+ # Add assistant response to chat history
+ st.session_state.messages.append({"role": "assistant", "content": message_text})
+
+ except Exception as e:
+ error_message = f"Error executing flow: {str(e)}"
+ st.error(error_message)
+ st.session_state.messages.append({"role": "assistant", "content": error_message})
+
+ # Clear chat history
+ if st.button("Clear Chat History"):
+ st.session_state.messages = []
+ st.rerun()
+
+if __name__ == "__main__":
+ main()
+```
+
+## Flow Configuration Guide
+
+To use this Streamlit integration, you need to configure your Langflow flow with the correct component IDs:
+
+### 1. Identify Component IDs
+
+1. Open your Langflow flow in the visual editor
+2. Click on each component to see its ID in the properties panel
+3. Note the IDs for:
+ - **File component**: Usually labeled as `File-XXXXX`
+ - **Vector store component**: Usually labeled as `Chroma-XXXXX` or `AstraDB-XXXXX`
+
+### 2. Configure the Flow
+
+Ensure your flow includes:
+- A **File** component for loading documents
+- A **Vector Store** component (like ChromaDB, Pinecone, etc.)
+- Text processing components (Text Splitter, Embeddings, etc.)
+- An **Agent** or **Chat Output** component
+
+### 3. Connect Components
+
+Connect your components in this typical flow:
+```
+File ā Text Splitter ā Embeddings ā Vector Store ā Agent ā Chat Output
+ ā
+ Query Input
+```
+
+## Vector Store Integration Details
+
+The key to making this work is injecting the uploaded file paths into your flow's components using the `tweaks` parameter:
+
+### File Component Tweaks
+```python
+tweaks[file_component_id] = {
+ "path": ["/user_id/file_id.pdf", "/user_id/file_id2.pdf"]
+}
+```
+
+### Vector Store Component Tweaks (if needed)
+```python
+# Some vector stores might need explicit configuration
+tweaks[vector_component_id] = {
+ "collection_name": "my_documents",
+ "allow_reset": True
+}
+```
+
+## Complete Setup Instructions
+
+### 1. Install Dependencies
+
+```bash
+pip install streamlit requests python-multipart
+```
+
+### 2. Set Environment Variables
+
+```bash
+export LANGFLOW_URL="http://localhost:7860"
+export LANGFLOW_API_KEY="your_api_key_here"
+```
+
+### 3. Create a Langflow Flow
+
+1. Create a new flow with vector store capabilities
+2. Add a **File** component
+3. Add a **Vector Store** component (e.g., ChromaDB)
+4. Connect components: File ā Vector Store ā Chat Output
+5. Get component IDs from the API access pane
+
+### 4. Run Your Streamlit App
+
+```bash
+streamlit run your_streamlit_app.py
+```
+
+## Troubleshooting
+
+### Common Issues
+
+1. **File upload fails**: Check your API key and Langflow server URL
+2. **Flow execution fails**: Verify component IDs and flow structure
+3. **No response from vector store**: Ensure files are properly uploaded and component connections are correct
+
+### Debug Tips
+
+- Check the Langflow logs for detailed error messages
+- Use the Langflow playground to test your flow before integrating
+- Verify all component IDs match exactly (case-sensitive)
+- Ensure your vector store has enough resources for document processing
+
+## Next Steps
+
+- [Explore more Langflow API examples](../API-Reference/api-reference-api-examples)
+- [Learn about file management in Langflow](../concepts-file-management)
+- [Build custom Streamlit interfaces with different Langflow flows](https://github.com/langflow-ai/langflow/discussions)
\ No newline at end of file
diff --git a/docs/docs/Develop/vector-store-api-integration.mdx b/docs/docs/Develop/vector-store-api-integration.mdx
new file mode 100644
index 000000000000..a61cca7eb46f
--- /dev/null
+++ b/docs/docs/Develop/vector-store-api-integration.mdx
@@ -0,0 +1,429 @@
+---
+title: Vector store file injection via API
+slug: /vector-store-api-integration
+---
+
+This guide shows you how to inject uploaded files into Langflow flows that use vector stores, specifically focusing on the critical `tweaks` parameter that bridges external file uploads with your flow's vector store components.
+
+## Understanding Component Tweaks
+
+The `tweaks` parameter in the Langflow API is the key mechanism for dynamically configuring flow components at runtime. When working with vector stores, you use tweaks to:
+
+1. **Inject file paths** into File components
+2. **Configure vector store settings** (collection names, indexes, etc.)
+3. **Override component parameters** based on your uploaded files
+4. **Control the data flow** through your vector store pipeline
+
+## Finding Component IDs
+
+Before you can use tweaks, you need to identify the exact component IDs in your flow:
+
+1. **Open your flow** in the Langflow visual editor
+2. **Click on each component** to view its properties
+3. **Note the component ID** (usually displayed as `ComponentType-xxxxx`)
+4. **Use the API access pane** to get all component IDs at once
+
+### Using the API Access Pane
+
+1. Click the **API access** button in your flow toolbar
+2. **Select the "Input Schema" tab**
+3. **Copy the tweaks structure** - this shows you the exact format for component IDs
+
+The Input Schema will show you something like:
+
+```json
+{
+ "File-abc123": {
+ "path": "string"
+ },
+ "Chroma-xyz789": {
+ "collection_name": "string",
+ "allow_reset": "boolean"
+ }
+}
+```
+
+## Complete File Injection Examples
+
+### Basic File Upload with Vector Store
+
+Here's the complete workflow for injecting files into a vector store flow:
+
+```python
+import requests
+import json
+
+def inject_files_into_vector_store(
+ langflow_url: str,
+ api_key: str,
+ flow_id: str,
+ file_paths: list,
+ query: str
+):
+ """Complete example: upload files and inject into vector store flow"""
+
+ headers = {
+ "Content-Type": "application/json",
+ "x-api-key": api_key
+ }
+
+ # Step 1: Upload files to Langflow
+ print("š Uploading files...")
+ uploaded_files = []
+
+ for file_path in file_paths:
+ with open(file_path, 'rb') as f:
+ files = {"file": (file_path.split('/')[-1], f)}
+ response = requests.post(
+ f"{langflow_url}/api/v2/files",
+ files=files,
+ headers={"x-api-key": api_key}
+ )
+ response.raise_for_status()
+ uploaded_files.append(response.json())
+ print(f"ā
Uploaded: {response.json()['name']}")
+
+ # Step 2: Prepare file paths for injection
+ file_paths_for_flow = [f["path"] for f in uploaded_files]
+
+ # Step 3: Configure tweaks for your specific flow
+ # Adjust these component IDs based on your flow
+ tweaks = {
+ # Inject files into File component
+ "File-abc123": {
+ "path": file_paths_for_flow
+ },
+
+ # Configure vector store component
+ "Chroma-xyz789": {
+ "collection_name": "uploaded_documents",
+ "allow_reset": True,
+ "persist_directory": "./chroma_db"
+ },
+
+ # Override text splitter if needed
+ "TextSplitter-def456": {
+ "chunk_size": 1000,
+ "chunk_overlap": 200
+ },
+
+ # Configure embeddings component
+ "OpenAIEmbeddings-ghi789": {
+ "model": "text-embedding-ada-002"
+ }
+ }
+
+ # Step 4: Execute flow with injected files
+ data = {
+ "input_value": query,
+ "input_type": "text",
+ "output_type": "chat",
+ "tweaks": tweaks,
+ "session_id": "vector_store_session"
+ }
+
+ print(f"š¤ Running flow with {len(file_paths_for_flow)} files...")
+ response = requests.post(
+ f"{langflow_url}/api/v1/run/{flow_id}",
+ headers=headers,
+ json=data
+ )
+
+ if response.status_code == 200:
+ result = response.json()
+
+ # Extract the chat response
+ outputs = result.get("outputs", [])
+ if outputs and outputs[0].get("outputs"):
+ answer = outputs[0]["outputs"][0]["results"]["message"]["text"]
+ print(f"š¬ Answer: {answer}")
+ return answer
+ else:
+ print("ā No response generated")
+ return None
+ else:
+ print(f"ā Error: {response.status_code} - {response.text}")
+ return None
+
+# Usage example
+if __name__ == "__main__":
+ response = inject_files_into_vector_store(
+ langflow_url="http://localhost:7860",
+ api_key="your_api_key",
+ flow_id="your_vector_flow_id",
+ file_paths=["document1.pdf", "document2.txt", "report3.csv"],
+ query="What are the key findings in these documents?"
+ )
+```
+
+## Advanced Vector Store Configuration
+
+### Different Vector Store Types
+
+Each vector store type has specific parameters you can configure via tweaks:
+
+#### ChromaDB Configuration
+
+```python
+chroma_tweaks = {
+ "Chroma-abc123": {
+ "persist_directory": "./my_chroma_db",
+ "collection_name": "custom_docs",
+ "allow_reset": True,
+ "distance_function": "cosine" # cosine, l2, ip
+ }
+}
+```
+
+#### Pinecone Configuration
+
+```python
+pinecone_tweaks = {
+ "Pinecone-xyz789": {
+ "index_name": "my-index",
+ "namespace": "documents",
+ "environment": "us-east1-gcp",
+ "create_index_if_not_exists": True
+ }
+}
+```
+
+#### FAISS Configuration
+
+```python
+faiss_tweaks = {
+ "FAISS-def456": {
+ "index_file": "./index.faiss",
+ "save_index_file": True,
+ "normalize_L2": True
+ }
+}
+```
+
+#### AstraDB Configuration
+
+```python
+astra_tweaks = {
+ "AstraDB-ghi789": {
+ "collection_name": "my_collection",
+ "namespace": "default_keyspace",
+ "keyspace": "my_keyspace",
+ "setup_mode": "sync"
+ }
+}
+```
+
+## Multi-Component Flow Example
+
+Here's a complete example for a complex RAG flow with multiple components:
+
+```python
+def run_advanced_rag_flow(
+ langflow_url: str,
+ api_key: str,
+ flow_id: str,
+ uploaded_files: list,
+ query: str
+):
+ """Example for complex RAG flow with multiple vector store components"""
+
+ # Component IDs for a typical RAG flow
+ component_ids = {
+ "file_loader": "File-abc123",
+ "text_splitter": "RecursiveTextSplitter-def456",
+ "embeddings": "OpenAIEmbeddings-ghi789",
+ "vector_store": "Chroma-jkl012",
+ "retriever": "VectorStoreRetriever-mno345",
+ "llm": "OpenAI-pqr678",
+ "output": "ChatOutput-stu901"
+ }
+
+ # Configure the complete flow via tweaks
+ tweaks = {
+ # File loading component
+ component_ids["file_loader"]: {
+ "path": [f["path"] for f in uploaded_files]
+ },
+
+ # Text processing
+ component_ids["text_splitter"]: {
+ "chunk_size": 1500,
+ "chunk_overlap": 200,
+ "separators": ["\n\n", "\n", " "]
+ },
+
+ # Embeddings configuration
+ component_ids["embeddings"]: {
+ "model": "text-embedding-3-small",
+ "dimensions": 1536,
+ "encoding_format": "float"
+ },
+
+ # Vector store settings
+ component_ids["vector_store"]: {
+ "persist_directory": "./rag_db",
+ "collection_name": "qa_documents",
+ "allow_reset": True,
+ "embedding_function": "text-embedding-3-small",
+ "get_or_create": True
+ },
+
+ # Retriever configuration
+ component_ids["retriever"]: {
+ "search_type": "similarity", # similarity, mmr, similarity_score_threshold
+ "search_kwargs": {
+ "k": 4, # Number of documents to retrieve
+ "score_threshold": 0.7 # Minimum similarity score
+ }
+ },
+
+ # LLM configuration (if using a model that supports tweaks)
+ component_ids["llm"]: {
+ "temperature": 0.1,
+ "max_tokens": 1000,
+ "model": "gpt-4"
+ }
+ }
+
+ # Execute the flow
+ data = {
+ "input_value": query,
+ "input_type": "text",
+ "output_type": "chat",
+ "tweaks": tweaks,
+ "session_id": "advanced_rag_session"
+ }
+
+ response = requests.post(
+ f"{langflow_url}/api/v1/run/{flow_id}",
+ headers={"x-api-key": api_key, "Content-Type": "application/json"},
+ json=data
+ )
+
+ return response.json()
+```
+
+## Dynamic Component Configuration
+
+### Runtime Component Detection
+
+```python
+def detect_flow_components(flow_id: str):
+ """Get component information from a flow"""
+
+ # First, get the flow details
+ response = requests.get(
+ f"{langflow_url}/api/v1/flows/{flow_id}",
+ headers={"x-api-key": api_key}
+ )
+
+ if response.status_code == 200:
+ flow_data = response.json()
+
+ # Extract component information
+ vertices = flow_data.get("data", {}).get("vertices", {})
+
+ component_map = {}
+ for vertex_id, vertex_data in vertices.items():
+ component_type = vertex_data.get("type", "")
+ display_name = vertex_data.get("template", {}).get("display_name", "")
+
+ # Map common component types
+ if "File" in component_type:
+ component_map["file"] = vertex_id
+ elif any(store in component_type for store in ["Chroma", "Pinecone", "FAISS", "Astra"]):
+ component_map["vector_store"] = vertex_id
+ elif "Embeddings" in component_type:
+ component_map["embeddings"] = vertex_id
+ elif "Splitter" in component_type or "Text" in component_type:
+ component_map["text_splitter"] = vertex_id
+ elif "Retriever" in component_type:
+ component_map["retriever"] = vertex_id
+ elif "LLM" in component_type or "OpenAI" in component_type or "Model" in component_type:
+ component_map["llm"] = vertex_id
+
+ return component_map
+
+ return {}
+
+# Usage
+component_map = detect_flow_components(flow_id)
+print("Detected components:", component_map)
+```
+
+## Error Handling and Debugging
+
+### Common Issues and Solutions
+
+```python
+def debug_flow_execution(response, tweaks):
+ """Debug flow execution issues"""
+
+ print("=== Flow Execution Debug ===")
+ print(f"Status Code: {response.status_code}")
+
+ if response.status_code != 200:
+ print(f"Error Response: {response.text}")
+ return False
+
+ result = response.json()
+
+ # Check for errors in the response
+ if "error" in result:
+ print(f"Flow Error: {result['error']}")
+ return False
+
+ # Check if outputs exist
+ outputs = result.get("outputs", [])
+ if not outputs:
+ print("ā ļø No outputs generated - check if components are properly connected")
+ return False
+
+ # Check individual component results
+ for i, output in enumerate(outputs):
+ print(f"Output {i}: {output.keys()}")
+ component_outputs = output.get("outputs", [])
+ for j, comp_output in enumerate(component_outputs):
+ if "errors" in comp_output:
+ print(f"Component {j} Error: {comp_output['errors']}")
+
+ return True
+
+# Usage
+response = requests.post(url, json=data)
+if not debug_flow_execution(response, tweaks):
+ print("Flow execution failed. Check the debug output above.")
+```
+
+## Best Practices
+
+### 1. Component ID Management
+
+- **Always verify component IDs** before deploying to production
+- **Use environment variables** for sensitive component configurations
+- **Document your component mapping** for team members
+
+### 2. Error Handling
+
+- **Always check response status codes**
+- **Implement retry logic** for network issues
+- **Log component failures** for debugging
+
+### 3. Performance Optimization
+
+- **Batch file uploads** when possible
+- **Use appropriate chunk sizes** for text splitting
+- **Monitor vector store memory usage**
+
+### 4. Security Considerations
+
+- **Never hardcode API keys** in your application code
+- **Use HTTPS** for all Langflow communications
+- **Validate file types** before uploading
+
+## Next Steps
+
+- [Complete Streamlit integration example](./streamlit-integration-example)
+- [Langflow API reference](../API-Reference/api-reference-api-examples)
+- [Vector store documentation](../Components/components-vector-stores)
+- [Best practices for RAG flows](https://github.com/langflow-ai/langflow/discussions)
\ No newline at end of file
diff --git a/docs/sidebars.js b/docs/sidebars.js
index e83764e849f8..db51e51186d1 100644
--- a/docs/sidebars.js
+++ b/docs/sidebars.js
@@ -160,6 +160,16 @@ module.exports = {
id: "Develop/configuration-cli",
label: "Use the Langflow CLI"
},
+ {
+ type: "doc",
+ id: "Develop/streamlit-integration-example",
+ label: "Streamlit integration example"
+ },
+ {
+ type: "doc",
+ id: "Develop/vector-store-api-integration",
+ label: "Vector store API integration"
+ },
],
},
{