From fcc445599dc551a6eab1371ed55449469bc30faf Mon Sep 17 00:00:00 2001 From: Richard Blythman Date: Tue, 11 Feb 2025 18:52:32 +0000 Subject: [PATCH] update modules section --- docs/NapthaModules/1-agents.md | 115 +++++++++--- docs/NapthaModules/2-tools.md | 184 +++++++++++-------- docs/NapthaModules/3-knowledge-bases.md | 235 +++++++++++++++--------- docs/NapthaModules/4-memories.md | 134 ++++++-------- docs/NapthaModules/5-personas.md | 121 +++++++----- docs/NapthaModules/6-orchestrators.md | 191 ++++++++++++------- docs/NapthaModules/7-environments.md | 212 +++++++++++++-------- 7 files changed, 742 insertions(+), 450 deletions(-) diff --git a/docs/NapthaModules/1-agents.md b/docs/NapthaModules/1-agents.md index e269ffce..3fd8ead7 100644 --- a/docs/NapthaModules/1-agents.md +++ b/docs/NapthaModules/1-agents.md @@ -1,5 +1,14 @@ # Agent Modules +In this section, we'll cover: + +- [🤖 What is an Agent Module?](#-what-is-an-agent-module) +- [📝 Agent Configurations](#-agent-configurations) +- [🐋 Agent Deployments](#-agent-deployments) +- [🚀 Running an Agent Module](#-running-an-agent-module) + +## 🤖 What is an Agent Module? + The core of the Agent Module is the loop or logic that an agent runs. Some examples of agents that use different loops include: - **Chat Agents** @@ -11,7 +20,7 @@ The core of the Agent Module is the loop or logic that an agent runs. Some examp The code for this loop is usually contained in the `run.py` file of the agent module (for a detailed breakdown of the structure of an agent module, see the [overview](/NapthaModules/0-overview) page). -## Agent Configurations +## 📝 Agent Configurations As well as the core loop, Agent Modules are configured by specifying: @@ -50,7 +59,7 @@ Or in the deployment.json file in the `configs` folder of the module: ] ``` -## Agent Deployments +## 🐋 Agent Deployments Agent deployments allow you to specify other modules that the agent module interacts with: @@ -93,15 +102,37 @@ Or in the deployment.json file: ``` -## Deploying and Running an Agent Module +## 🚀 Running an Agent Module ### Prerequisites Install the Naptha SDK using the [instructions here](https://github.com/NapthaAI/naptha-sdk/?tab=readme-ov-file#install). +### From the CLI + +The [Hello World Agent](https://github.com/NapthaAI/hello_world_agent) is the simplest example of an agent that prints hello. You can deploy the agent (without running) using: + +```bash +# usage: naptha create +naptha create agent:hello_world_agent +``` + +Run the agent: + +```bash +# usage: naptha run +naptha run agent:hello_world_agent -p "firstname=sam surname=altman" +``` + +Try running the [Simple Chat Agent](https://github.com/NapthaAI/simple_chat_agent) that uses the local LLM running on your node: + +```bash +naptha run agent:simple_chat_agent -p "tool_name='chat' tool_input_data='what is an ai agent?'" +``` + ### In Python -You can deploy andrun an agent in Python using: +You can deploy and run an agent in Python using: ```python from naptha_sdk.modules.agent import Agent @@ -135,26 +166,15 @@ agent_run_input = AgentRunInput( ) # Run the agent -response = await agent.call_agent_func(agent_run_input) +response = await agent.run(agent_run_input) ``` -Under the hood, `call_agent_func` makes a call to the worker node via API, which executes the agent module. This makes it possible for agents built using different agent frameworks to interoperate. +:::info +For details on how to run LLM inference within modules, see the [LLM Inference](/docs/NapthaInference/1-inference) page. +::: -### From the CLI -You can deploy the agent (without running) using: - -```bash -# usage: naptha create -naptha create agent:hello_world_agent -``` - -Run the agent: - -```bash -# usage: naptha run -naptha run agent:hello_world_agent -p "firstname=sam surname=altman" -``` +Under the hood, `Agent.run` makes a call to the worker node via API, which executes the agent module. This makes it possible for agents built using different agent frameworks to interoperate. ## Examples @@ -170,4 +190,57 @@ Check out these sample agent modules: ## Next Steps -- Learn about Orchestrator Modules: [Orchestrator Modules](/docs/NapthaModules/2-orchestrator) \ No newline at end of file +import CardGrid from '@site/src/components/CardGrid'; + +export const featureCards = [ + { + title: 'Create Your First Module', + description: 'Follow our tutorial to create your first agent module', + icon: '✨', + link: 'Tutorials/module-guide' + }, + { + title: 'Onboard your Agent from Other Frameworks', + description: 'Find out how to automatically create a Naptha module from other agent frameworks', + icon: '🔄', + link: 'Integrations' + }, + { + title: 'Run LLM Inference', + description: 'Learn how to make LLM calls within your agent module', + icon: '🧠', + link: 'NapthaInference/1-inference' + }, + { + title: 'Tool Modules', + description: 'Learn how to use Agents with Tool Modules', + icon: '🛠️', + link: 'NapthaModules/2-tools' + }, + { + title: 'Knowledge Base Modules', + description: 'Learn how to use Agents with Knowledge Base Modules', + icon: '📚', + link: 'NapthaModules/3-knowledge-bases' + }, + { + title: 'Memory Modules', + description: 'Learn how to use Agents with Memory Modules', + icon: '💭', + link: 'NapthaModules/4-memories' + }, + { + title: 'Persona Modules', + description: 'Learn how to use Agents with Persona Modules', + icon: '🎭', + link: 'NapthaModules/5-personas' + }, + { + title: 'Orchestrator Modules', + description: 'Learn how to use Agents within Orchestrator Modules', + icon: '🎮', + link: 'NapthaModules/6-orchestrator' + } +]; + + diff --git a/docs/NapthaModules/2-tools.md b/docs/NapthaModules/2-tools.md index 5414fd5b..b6078845 100644 --- a/docs/NapthaModules/2-tools.md +++ b/docs/NapthaModules/2-tools.md @@ -1,5 +1,15 @@ # Tool Modules +In this section, we'll cover: + +- [🔧 What is a Tool Module?](#-what-is-a-tool-module) +- [📝 Tool Configurations](#-tool-configurations) +- [🐋 Tool Deployments](#-tool-deployments) +- [🚀 Running a Tool Module](#-running-a-tool-module) +- [🤖 Running an Agent that uses a Tool](#-running-an-agent-that-uses-a-tool) + +## 🔧 What is a Tool Module? + Tool modules extend agent capabilities by providing reusable functions that can be shared across different agents. Some examples of tool modules include: - **Web Search**: Access and retrieve information from the internet @@ -9,7 +19,7 @@ Tool modules extend agent capabilities by providing reusable functions that can ![Tool Integration](/img/tool-integration.png) -## Tool Configurations +## 📝 Tool Configurations You can configure a tool module by specifying: @@ -24,22 +34,7 @@ class ToolConfig(BaseModel): llm_config: Optional[LLMConfig] = None ``` -Or in the deployment.json file in the `configs` folder of the module: - -```json -# ToolConfig in deployment.json file -[ - { - ... - "config": { - "config_name": "tool_config", - "llm_config": {"config_name": "model_1"}, - } - } -] -``` - -## Tool Deployments +## 🚀 Tool Deployments Tool deployments allow you to specify the `node` that the tool will run on, and the `module` that the tool will use. The configuration of a tool deployment can be specified using the `ToolDeployment` class: @@ -53,64 +48,15 @@ class ToolDeployment(BaseModel): data_generation_config: Optional[DataGenerationConfig] = None ``` -Or in the deployment.json file: - -```json -# ToolDeployment in deployment.json file -[ - { - "node": {"name": "node.naptha.ai"}, - "module": {"name": "generate_image_tool"}, - "config": ..., - } -] -``` - -## Deploying and Running a Tool Module +## 🛠️ Running a Tool Module ### Prerequisites Install the Naptha SDK using the [instructions here](https://github.com/NapthaAI/naptha-sdk/?tab=readme-ov-file#install). -### In Python - -You can deploy and call a tool in Python using: - -```python -from naptha_sdk.client.naptha import Naptha -from naptha_sdk.modules.tool import Tool -from naptha_sdk.schemas import ToolRunInput +### Example -naptha = Naptha() - -tool_deployment = { - "node": {"name": "node.naptha.ai"}, - "module": {"name": "generate_image_tool"}, - ... -} - -tool = Tool() - -# Deploy the tool -response = await tool.create(tool_deployment) - -input_params = { - "tool_name": "generate_image_tool", - "tool_input_data": "generate an image of a cat", -} - -tool_run_input = ToolRunInput( - consumer_id=naptha.user.id, - inputs=input_params, - deployment=tool_deployment, - signature=sign_consumer_id(naptha.user.id, os.getenv("PRIVATE_KEY")) -) - -# Run the tool -response = await tool.call_tool_func(tool_run_input) -``` - -### From the CLI +The [Generate Image Tool](https://github.com/NapthaAI/generate_image_tool) is a simple example of a Tool module that will be used in the examples below. It is intended to demonstrate how agents can interact with a Tool module that allows them to generate images. You can deploy the tool (without running) using: @@ -126,19 +72,109 @@ Run the tool: naptha run tool:generate_image_tool -p "tool_name='generate_image_tool' tool_input_data='A beautiful image of a cat'" ``` -Run an Agent that interacts with the Tool: +The configuration of a tool module is specified in the `deployment.json` file in the `configs` folder of the module. + +```json +# ToolConfig in deployment.json file +[ + { + ... + "config": { + "config_name": "tool_config", + "llm_config": {"config_name": "model_1"}, + } + } +] +``` + +:::info +Details on how to store secrets such as API keys securely on the Naptha Hub coming soon. +::: + +## 🤖 Running an Agent that uses a Tool + +The [Generate Image Agent](https://github.com/NapthaAI/generate_image_agent) is an example of an Agent module that interacts with the [Generate Image Tool](https://github.com/NapthaAI/generate_image_tool). You can run the agent module using: ```bash -# usage: naptha run agent:generate_image_agent -p "" --tool_nodes "" +# usage: naptha run agent:generate_image_agent -p "" --tool_nodes "" naptha run agent:generate_image_agent -p "tool_name='generate_image_tool' tool_input_data='A beautiful image of a cat'" --tool_nodes "node.naptha.ai" ``` -## Examples +The name of the tool subdeployment that the agent uses is specified in the `configs/deployment.json`, and the full details of that tool subdeployment are loaded from the deployment with the same name in the `configs/tool_deployments.json` file. + + +To use a tool as a subdeployment for another module, you can create a tool_deployments.json file and specify use the `tool_deployments` field in the deployment.json file: + +```json +# AgentDeployment in deployment.json file +[ + { + "node": {"name": "node.naptha.ai"}, + "module": {"name": "generate_image_agent"}, + "config": ..., + "tool_deployments": [{"name": "tool_deployment_1"}], + ... + } +] + +# ToolDeployment in tool_deployments.json file +[ + { + "name": "tool_deployment_1", + "module": {"name": "generate_image_tool"}, + "node": {"ip": "node.naptha.ai"}, + "config": { + "config_name": "tool_config_1", + "llm_config": {"config_name": "model_1"} + }, + } +] +``` + +There is a `GenerateImageAgent` class in the `run.py` [file](https://github.com/NapthaAI/generate_image_agent/blob/main/generate_image_agent/run.py#L16), which imports the `Tool` class and calls the `Tool.run` method: + +```python +from naptha_sdk.schemas import AgentDeployment, AgentRunInput, ToolRunInput +from naptha_sdk.modules.tool import Tool +from naptha_sdk.user import sign_consumer_id + +class GenerateImageAgent: + def __init__(self, deployment: AgentDeployment): + ... + # the arg below is loaded from configs/tool_deployments.json + self.tool = Tool(tool_deployment=self.deployment.tool_deployments[0]) + ... -Check out these sample tool modules: -- [Generate Image Tool](https://github.com/NapthaAI/generate_image_tool) + async def call_tool(self, module_run: AgentRunInput): + tool_run_input = ToolRunInput( + consumer_id=module_run.consumer_id, + inputs=module_run.inputs, + deployment=self.deployment.tool_deployments[0], + signature=sign_consumer_id(module_run.consumer_id, os.getenv("PRIVATE_KEY")) + ) + + tool_response = await self.tool.run(tool_run_input) +``` ## Need Help? - Join our [Community](https://naptha.ai/naptha-community) and post in the #support channel - Submit issues on [GitHub](https://github.com/NapthaAI) +## Next Steps + +import CardGrid from '@site/src/components/CardGrid'; + +export const featureCards = [ + { + title: 'Create Your First Tool Module', + description: 'Use the Naptha Learn Hub to create your first tool module', + icon: '✨', + link: 'https://naptha-ai-learn.vercel.app/learn/expert/tool-modules/introduction' + }, + { + title: 'Run LLM Inference', + description: 'Learn how to make LLM calls within your tool module', + icon: '🧠', + link: 'NapthaInference/1-inference' + } +]; diff --git a/docs/NapthaModules/3-knowledge-bases.md b/docs/NapthaModules/3-knowledge-bases.md index 67ea9fb3..d5ff893e 100644 --- a/docs/NapthaModules/3-knowledge-bases.md +++ b/docs/NapthaModules/3-knowledge-bases.md @@ -1,5 +1,15 @@ # Knowledge Base Modules +In this section, we'll cover: + +- [📚 What is a Knowledge Base Module?](#-what-is-a-knowledge-base-module) +- [📝 Knowledge Base Configurations](#-knowledge-base-configurations) +- [🐋 Knowledge Base Deployments](#-knowledge-base-deployments) +- [🚀 Running a Knowledge Base Module](#-running-a-knowledge-base-module) +- [🤖 Running an Agent that uses a Knowledge Base](#-running-an-agent-that-uses-a-knowledge-base) + +## 📚 What is a Knowledge Base Module? + Knowledge Base modules provide agents and other modules with access to structured information repositories. These modules enable agents to query, retrieve, and reason about domain-specific knowledge, making them more effective at specialized tasks. Knowledge Base modules can be used for: @@ -10,7 +20,7 @@ Knowledge Base modules can be used for: Naptha Nodes support the deployment of Knowledge Base modules. The state of these modules is stored in a local database (postgres) and file system on the Naptha Node. -## Knowledge Base Configurations +## 📝 Knowledge Base Configurations Knowledge Base modules are configured by specifying: @@ -28,12 +38,33 @@ class KBConfig(BaseModel): ``` :::info -The storage configuration schema can be found in the [Storage Provider](/docs/NapthaStorage/0-overview.md) section. +More details on the `StorageConfig` schema can be found in the [Storage Provider](/docs/NapthaStorage/0-overview.md) section. ::: +## 🐋 Knowledge Base Deployments +Knowledge Base deployments allow you to specify the `node` that the knowledge base will run on, and the `module` that the knowledge base will use. The configuration of a knowledge base deployment can be specified using the `KBDeployment` class: + +```python +#naptha_sdk/schemas.py +class KBDeployment(BaseModel): + node: Union[NodeConfig, NodeConfigUser, Dict] + name: Optional[str] = None + module: Optional[Dict] = None + config: Optional[KBConfig] = None +``` -Or in the deployment.json file in the `configs` folder of the module: +## 🚀 Running a Knowledge Base Module + +### Prerequisites + +Install the Naptha SDK using the [instructions here](https://github.com/NapthaAI/naptha-sdk/?tab=readme-ov-file#install). + +### Example + +The [Wikipedia Knowledge Base Module](https://github.com/NapthaAI/wikipedia_kb/tree/main) is a simple example of a Knowledge Base module. It is intended to demonstrate how agents can interact with a Knowledge Base that looks like Wikipedia. + +The configuration of a knowledge base module is specified in the `deployment.json` file in the `configs` folder of the module. ```json # KnowledgeBaseConfig in deployment.json file @@ -61,91 +92,20 @@ Or in the deployment.json file in the `configs` folder of the module: ] ``` -## Knowledge Base Deployments - -Knowledge Base deployments allow you to specify the `node` that the knowledge base will run on, and the `module` that the knowledge base will use. The configuration of a knowledge base deployment can be specified using the `KBDeployment` class: - -```python -#naptha_sdk/schemas.py -class KBDeployment(BaseModel): - node: Union[NodeConfig, NodeConfigUser, Dict] - name: Optional[str] = None - module: Optional[Dict] = None - config: Optional[KBConfig] = None -``` - -Or in the deployment.json file in the `configs` folder of the module: - -```json -# KnowledgeBaseDeployment in deployment.json file -[ - { - "node": {"name": "node.naptha.ai"}, - "module": {"name": "wikipedia_kb"}, - } -] -``` - -## Deploying and Running a Knowledge Base Module - -### Prerequisites - -Install the Naptha SDK using the [instructions here](https://github.com/NapthaAI/naptha-sdk/?tab=readme-ov-file#install). - -### In Python - -You can deploy and run a knowledge base in Python using: - -```python -from naptha_sdk.modules.kb import KnowledgeBase -from naptha_sdk.client.naptha import Naptha -from naptha_sdk.schemas import KBRunInput - -naptha = Naptha() - -kb_deployment = { - "node": {"name": "node.naptha.ai"}, - "module": {"name": "wikipedia_kb"}, - ... -} - -knowledge_base = KnowledgeBase() - -# Deploy the knowledge base -response = await knowledge_base.create(kb_deployment) - -input_params = { - "function_name": "init", - "function_input_data": None -} - -kb_run_input = KBRunInput( - consumer_id=naptha.user.id, - inputs=input_params, - deployment=kb_deployment, - signature=sign_consumer_id(naptha.user.id, os.getenv("PRIVATE_KEY")) -) - -# Run the knowledge base -response = await knowledge_base.call_kb_func(kb_run_input) -``` - -### From the CLI - -You can deploy the knowledge base (without running) using: +You can deploy the knowledge base (without running) using the CLI: ```bash # usage: naptha create naptha create kb:wikipedia_kb ``` -Initialize the knowledge base: +If you take a look at the wikipedia_kb module, you'll notice the `WikipediaKB` class in the `run.py` file has a number of methods. You can think of these methods as [endpoints of the Knowledge Base](https://github.com/NapthaAI/wikipedia_kb/blob/main/wikipedia_kb/run.py#L59), which will be called using the `run` command below. For example, you can initialize the content in the Knowledge Base using: ```bash naptha run kb:wikipedia_kb -p "function_name='init'" ``` -List content in the Knowledge Base: +You can list content in the Knowledge Base using: ```bash naptha run kb:wikipedia_kb -p '{ @@ -156,7 +116,7 @@ naptha run kb:wikipedia_kb -p '{ }' ``` -Add to the Knowledge Base: +You can add to the Knowledge Base using: ```bash naptha run kb:wikipedia_kb -p '{ @@ -169,7 +129,7 @@ naptha run kb:wikipedia_kb -p '{ }' ``` -Query the Knowledge Base Module: +You can query the Knowledge Base using: ```bash naptha run kb:wikipedia_kb -p '{ @@ -180,7 +140,7 @@ naptha run kb:wikipedia_kb -p '{ }' ``` -Delete a row from the Knowledge Base: +You can delete a row from the Knowledge Base using: ```bash naptha run kb:wikipedia_kb -p '{ @@ -193,7 +153,7 @@ naptha run kb:wikipedia_kb -p '{ }' ``` -Delete the entire Knowledge Base: +You can delete the entire Knowledge Base using: ```bash naptha run kb:wikipedia_kb -p '{ @@ -204,13 +164,95 @@ naptha run kb:wikipedia_kb -p '{ }' ``` -Run an Agent that interacts with the Knowledge Base: +The Wikipedia KB also instantiates the `StorageClient` class and calls the `execute` method with `CreateStorageRequest`, `ReadStorageRequest`, `DeleteStorageRequest`, `ListStorageRequest` and `UpdateStorageRequest` objects: + +```python +from naptha_sdk.schemas import KBDeployment +from naptha_sdk.storage.schemas import ReadStorageRequest +from naptha_sdk.storage.storage_client import StorageClient + +class WikipediaKB: + def __init__(self, deployment: KBDeployment): + ... + # the arg is loaded from configs/deployment.json + self.storage_client = StorageClient(self.deployment.node) + self.storage_type = self.config.storage_config.storage_type + self.table_name = self.config.storage_config.path + self.schema = self.config.storage_config.storage_schema + + async def run_query(self, input_data: Dict[str, Any], *args, **kwargs): + read_storage_request = ReadStorageRequest( + storage_type=self.storage_type, + path=self.table_name, + options={"condition": {"title": input_data["query"]}} + ) + + read_result = await self.storage_client.execute(read_storage_request) +``` + +# 🤖 Running an Agent that interacts with the Knowledge Base + +You can run an Agent that interacts with the Knowledge Base using: + ```bash -# usage: naptha run agent:wikipedia_agent -p "" --kb_nodes "" +# usage: naptha run agent:wikipedia_agent -p "" --kb_nodes "" naptha run agent:wikipedia_agent -p "function_name='run_query' query='Elon Musk' question='Who is Elon Musk?'" --kb_nodes "node.naptha.ai" ``` +The name of the KB subdeployment that the agent uses is specified in the `configs/deployment.json`, and the full details of that KB subdeployment are loaded from the deployment with the same name in the `configs/kb_deployments.json` file. + +```json +# AgentDeployment in configs/deployment.json file +[ + { + "node": {"name": "node.naptha.ai"}, + "module": {"name": "wikipedia_agent"}, + "config": ..., + "kb_deployments": [{"name": "kb_deployment_1"}], + ... + } +] + +# KBDeployment in configs/kb_deployments.json file +[ + { + "name": "kb_deployment_1", + "module": {"name": "wikipedia_kb"}, + "node": {"ip": "node.naptha.ai"}, + "config": { + "llm_config": {"config_name": "model_1"}, + "storage_config": ... + }, + } +] +``` + +There is a `WikipediaAgent` class in the `run.py` [file](https://github.com/NapthaAI/wikipedia_agent/blob/main/wikipedia_agent/run.py#L15), which imports the `KnowledgeBase` class and calls the `KnowledgeBase.run` method: + +```python +from naptha_sdk.modules.kb import KnowledgeBase +from naptha_sdk.schemas import AgentDeployment, AgentRunInput, KBRunInput +from naptha_sdk.user import sign_consumer_id + +class WikipediaAgent: + def __init__(self, deployment: AgentDeployment): + ... + # the arg below is loaded from configs/kb_deployments.json + self.wikipedia_kb = KnowledgeBase(kb_deployment=self.deployment.kb_deployments[0]) + ... + + async def run_wikipedia_agent(self, module_run: AgentRunInput): + kb_run_input = KBRunInput( + consumer_id=module_run.consumer_id, + inputs={"func_name": "run_query", "func_input_data": {"query": module_run.inputs.query}}, + deployment=self.deployment.kb_deployments[0], + signature=sign_consumer_id(module_run.consumer_id, os.getenv("PRIVATE_KEY")) + ) + + page = await self.wikipedia_kb.run(kb_run_input) +``` + ## Examples Check out these knowledge base implementations: @@ -220,4 +262,29 @@ Check out these knowledge base implementations: ## Need Help? - Join our [Community](https://naptha.ai/naptha-community) and post in the #support channel -- Submit issues on [GitHub](https://github.com/NapthaAI) \ No newline at end of file +- Submit issues on [GitHub](https://github.com/NapthaAI) + +## Next Steps + +import CardGrid from '@site/src/components/CardGrid'; + +export const featureCards = [ + { + title: 'Create Your First Knowledge Base Module', + description: 'Use the Naptha Learn Hub to create your first knowledge base module', + icon: '✨', + link: 'https://naptha-ai-learn.vercel.app/learn/expert/knowledge-base-modules/introduction' + }, + { + title: 'Interact with Storage Providers', + description: 'Learn how to use storage within your knowledge base module', + icon: '💾', + link: 'NapthaStorage/0-overview' + }, + { + title: 'Orchestrator Modules', + description: 'Learn how to use Knowledge Base Modules within Orchestrator Modules', + icon: '🎮', + link: 'NapthaModules/6-orchestrator' + } +]; diff --git a/docs/NapthaModules/4-memories.md b/docs/NapthaModules/4-memories.md index 085e77e1..0c020596 100644 --- a/docs/NapthaModules/4-memories.md +++ b/docs/NapthaModules/4-memories.md @@ -1,5 +1,15 @@ # Memory Modules +In this section, we'll cover: + +- [💭 What is a Memory Module?](#-what-is-a-memory-module) +- [📝 Memory Configurations](#-memory-configurations) +- [🐋 Memory Deployments](#-memory-deployments) +- [🚀 Running a Memory Module](#-running-a-memory-module) +- [🤖 Running an Agent that uses a Memory](#-running-an-agent-that-uses-a-memory) + +## 💭 What is a Memory Module? + Memory modules enable agent modules on Naptha to store, retrieve, and manage their experiences over time. These modules are crucial for maintaining context and learning from past interactions. You can create modules for different types of memories such as: @@ -10,7 +20,7 @@ You can create modules for different types of memories such as: Naptha Nodes support the deployment of Memory modules. The state of these modules is stored in a local database (postgres) and file system on the Naptha Node. -## Memory Configurations +## 📝 Memory Configurations You can configure a memory module by specifying: @@ -27,18 +37,40 @@ class MemoryConfig(BaseModel): ``` :::info -The storage configuration schema can be found in the [Storage Provider](/docs/NapthaStorage/0-overview.md) section. +More details on the `StorageConfig` schema can be found in the [Storage Provider](/docs/NapthaStorage/0-overview.md) section. ::: -Or in the deployment.json file in the `configs` folder of the module: +## 🐋 Memory Deployments + +Memory deployments allow you to specify the `node` that the memory will run on, the `module` that the memory will use, and the `config` that the memory will use. The configuration of a memory deployment can be specified using the `MemoryDeployment` class: + +```python +#naptha_sdk/schemas.py +class MemoryDeployment(BaseModel): + node: Union[NodeConfig, NodeConfigUser, Dict] + name: Optional[str] = None + module: Optional[Dict] = None + config: Optional[MemoryConfig] = None +``` + +## 🚀 Running a Memory Module + +### Prerequisites + +Install the Naptha SDK using the [instructions here](https://github.com/NapthaAI/naptha-sdk/?tab=readme-ov-file#install). + +### Example + +The [Cognitive Memory module](https://github.com/NapthaAI/cognitive_memory) is a simple example of a Memory module. It is intended to demonstrate how agents can interact with a Memory module that allows them to store and retrieve cognitive steps such as reflections. You can create a memory table using: + +The configuration of a memory module is specified in the `deployment.json` file in the `configs` folder of the module. ```json -# MemoryConfig in deployment.json file +# MemoryConfig in configs/deployment.json file [ { ... "config": { - "llm_config": {"config_name": "model_1"}, "storage_config": { "storage_type": "db", "path": "cognitive_memory", @@ -59,77 +91,6 @@ Or in the deployment.json file in the `configs` folder of the module: ] ``` -## Memory Deployments - -Memory deployments allow you to specify the `node` that the memory will run on, the `module` that the memory will use, and the `config` that the memory will use. The configuration of a memory deployment can be specified using the `MemoryDeployment` class: - -```python -#naptha_sdk/schemas.py -class MemoryDeployment(BaseModel): - node: Union[NodeConfig, NodeConfigUser, Dict] - name: Optional[str] = None - module: Optional[Dict] = None - config: Optional[MemoryConfig] = None -``` - -Or in the deployment.json file in the `configs` folder of the module: - -```json -# MemoryDeployment in deployment.json file -[ - { - "node": {"name": "node.naptha.ai"}, - "module": {"name": "wikipedia_kb"}, - } -] -``` - -## Deploying and Running a Memory Module - -### Prerequisites - -Install the Naptha SDK using the [instructions here](https://github.com/NapthaAI/naptha-sdk/?tab=readme-ov-file#install). - -### In Python - -You can deploy and run memory in Python using: - -```python -from naptha_sdk.modules.memory import Memory -from naptha_sdk.client.naptha import Naptha -from naptha_sdk.schemas import MemoryRunInput - -naptha = Naptha() - -memory_deployment = { - "node": {"name": "node.naptha.ai"}, - "module": {"name": "wikipedia_kb"}, - ... -} - -memory = Memory() - -# Deploy the memory -response = await memory.create(memory_deployment) - -input_params = { - "func_name": "init", - "func_input_data": None -} - -memory_run_input = MemoryRunInput( - consumer_id=naptha.user.id, - inputs=input_params, - deployment=memory_deployment, - signature=sign_consumer_id(naptha.user.id, os.getenv("PRIVATE_KEY")) -) - -# Run memory -response = await memory.call_memory_func(memory_run_input) -``` - -### From the CLI - You can deploy the memory (without running) using: ```bash @@ -137,13 +98,13 @@ You can deploy the memory (without running) using: naptha create memory:cognitive_memory ``` -Create a Memory Table: +There is a CognitiveMemory class in the `run.py` file that has a number of methods. You can think of these methods as [endpoints of the Memory](https://github.com/NapthaAI/cognitive_memory/blob/main/cognitive_memory/run.py#L34), which will be called using the `run` command below. For example, you can initialize the table in Memory using: ```bash naptha run memory:cognitive_memory -p "func_name='init'" ``` -Add to Memory: +You can add to the memory table using: ```bash naptha run memory:cognitive_memory -p '{ @@ -155,7 +116,7 @@ naptha run memory:cognitive_memory -p '{ }' ``` -Query Memory: +You can query the memory table using: ```bash naptha run memory:cognitive_memory -p '{ @@ -166,7 +127,7 @@ naptha run memory:cognitive_memory -p '{ }' ``` -Delete a row in Memory: +You can delete a row in the memory table using: ```bash naptha run memory:cognitive_memory -p '{ @@ -186,3 +147,16 @@ Check out these memory implementations: ## Need Help? - Join our [Community](https://naptha.ai/naptha-community) and post in the #support channel - Submit issues on [GitHub](https://github.com/NapthaAI) + +## Next Steps + +import CardGrid from '@site/src/components/CardGrid'; + +export const featureCards = [ + { + title: 'Interact with Storage Providers', + description: 'Learn how to use storage within your memory module', + icon: '💾', + link: 'NapthaStorage/0-overview' + } +]; diff --git a/docs/NapthaModules/5-personas.md b/docs/NapthaModules/5-personas.md index 0085de60..5255135e 100644 --- a/docs/NapthaModules/5-personas.md +++ b/docs/NapthaModules/5-personas.md @@ -1,80 +1,99 @@ # Persona Modules -Persona modules define the character and behavior of AI agents. These modules enable more natural and specialized agent interactions by providing personality traits, communication styles, and behavioral patterns. +In this section, we'll cover: -## Using Persona Modules on Naptha +- [🎭 What is a Persona Module?](#-what-is-a-persona-module) +- [📝 Persona Configurations](#-persona-configurations) +- [🤖 Running an Agent that uses a Persona](#-running-an-agent-that-uses-a-persona) -### Prerequisites +## 🎭 What is a Persona Module? -Install the Naptha SDK using the [instructions here](https://github.com/NapthaAI/naptha-sdk). +Persona modules define the character and behavior of AI agents. These modules enable more natural and specialized agent interactions by providing personality traits, communication styles, and behavioral patterns. -### List Available Personas +## 📝 Persona Configurations -You can explore available personas using the CLI: -```bash -naptha personas -``` +The persona module is specified in the `AgentConfig` class: -Or using the Python SDK: ```python -from naptha_sdk.client.naptha import Naptha - -async def list_personas(): - """List personas with new client""" - try: - async with Naptha() as naptha: - await naptha.hub.signin(os.getenv("HUB_USER"), os.getenv("HUB_PASS")) - personas = await naptha.hub.list_personas() - return personas - except Exception as e: - print(f"Failed to list personas: {str(e)}") - return None +#naptha_sdk/schemas.py +class AgentConfig(BaseModel): + config_name: Optional[str] = "agent_config" + llm_config: Optional[LLMConfig] = None + persona_module: Optional[Union[Dict, BaseModel]] = None + system_prompt: Optional[Union[Dict, BaseModel]] = None ``` -### Create a New Persona - -Via CLI: -```bash -naptha personas sam_altman_twitter -p "description='Persona for Sam Altman' parameters='{name: str, bio: str, openness: int}' module_url='https://huggingface.co/datasets/OpenAI/twitter_personas' module_entrypoint='data/sam.json'" +Or in the deployment.json file in the `configs` folder of the agent module: + +```json +# AgentConfig in deployment.json file +[ + { + ... + "config": { + "config_name": "agent_config", + "llm_config": {"config_name": "model_1"}, + "persona_module" : {"name": "richard_twitter"}, + "system_prompt": { + "role": "You are a helpful AI assistant.", + "persona": "" + } + } + } +] ``` -:::note -Make sure that the `module_url` points to the main repo (e.g., HuggingFace dataset, GitHub repo, or IPFS) and the `module_entrypoint` specifies the path to the file (JSON or YAML format). +:::info +The `name` of the `persona_module` should exactly match the name of the persona module in the `personas` registry on the Naptha Hub. The rest of the metadata will be loaded from the Hub. The data will be loaded into the `persona` field of the `system_prompt` dict. ::: -### Delete a Persona +## 🤖 Running an Agent that uses a Persona -```bash -naptha personas -d persona_name -``` +### Prerequisites -### Run an Agent with a Persona +Install the Naptha SDK using the [instructions here](https://github.com/NapthaAI/naptha-sdk). + +### Examples + +Below are examples of running the Simple Chat Agent with a [twitter/X persona](https://huggingface.co/datasets/NapthaAI/twitter_personas/blob/main/interstellarninja.json), generated from exported X data: -Via CLI: ```bash -# Using interstellarninja persona naptha run agent:simple_chat_agent -p "tool_name='chat' tool_input_data='who are you?'" --persona_modules "interstellarninja_twitter" - -# Using market agent persona -naptha run agent:simple_chat_agent -p "tool_name='chat' tool_input_data='who are you?'" --persona_modules "marketagents_aileenmay" ``` +and from a synthetically generated [market persona](https://huggingface.co/datasets/NapthaAI/market_agents_personas/blob/main/market_agents_personas/data/Aileen_May.yaml) based on census data: - -## Available Collections - -Browse our curated persona collections: -- [Social personas](https://huggingface.co/datasets/NapthaAI/twitter_personas) -- [Market personas](https://huggingface.co/datasets/NapthaAI/market_agents_personas) - -## Creating Your Own - -Want to create your own persona? Check out our [Quick Persona Guide](../Tutorials/quick-persona-guide.md) for step-by-step instructions. +```bash +naptha run agent:simple_chat_agent -p "tool_name='chat' tool_input_data='who are you?'" --persona_modules "marketagents_aileenmay" +``` ## Need Help? -- Join our [Discord Community](https://naptha.ai/naptha-community) +- Join our [Discord Community](https://naptha.ai/naptha-community) and post in the #support channel - Submit issues on [GitHub](https://github.com/NapthaAI) ## Next Steps -- Check out the tutorial for exporting your X data and creating a persona: [Creating a Persona](/docs/Tutorials/quick-persona-guide) \ No newline at end of file +import CardGrid from '@site/src/components/CardGrid'; + +export const featureCards = [ + { + title: 'Create a Persona', + description: 'Learn how to create a persona from your X data', + icon: '🧠', + link: 'Tutorials/quick-persona-guide' + }, + { + title: 'Browse Persona Datasets on Hugging Face', + description: 'Browse our persona datasets in the Naptha Organization on Hugging Face', + icon: '🤖', + link: 'https://huggingface.co/datasets/NapthaAI/twitter_personas' + }, + { + title: 'Synthetic Demographics', + description: 'Browse a synthetic demographics dataset created in our community', + icon: '🤖', + link: 'https://huggingface.co/datasets/sacrificialpancakes/synthetic_demographics_seed' + } +]; + + \ No newline at end of file diff --git a/docs/NapthaModules/6-orchestrators.md b/docs/NapthaModules/6-orchestrators.md index 44b74ced..da86ccb7 100644 --- a/docs/NapthaModules/6-orchestrators.md +++ b/docs/NapthaModules/6-orchestrators.md @@ -1,5 +1,14 @@ # Orchestrator Modules +In this section, we'll cover: + +- [🤖 What is an Orchestrator Module?](#-what-is-an-orchestrator-module) +- [📝 Orchestrator Configurations](#-orchestrator-configurations) +- [🐋 Orchestrator Deployments](#-orchestrator-deployments) +- [🚀 Running an Orchestrator Module](#-running-an-orchestrator-module) + +## 🎮 What is an Orchestrator Module? + Agent orchestrators are modules that manage the orchestration of agents, tools, environments, and personas, as defined through interaction patterns and workflows. Examples of agent orchestrators include: - Orchestration of numerous social agents e.g. agents that take part in debate or social simulations @@ -8,7 +17,7 @@ Agent orchestrators are modules that manage the orchestration of agents, tools, The code for the orchestration logic is usually contained in the `run.py` file of the orchestrator module (for a detailed breakdown of the structure of an orchestrator module, see the [overview](/NapthaModules/0-overview) page). -## Orchestrator Configurations +## 📝 Orchestrator Configurations As well as the core orchestration logic, Orchestrator Modules are configured by specifying: @@ -25,17 +34,7 @@ class OrchestratorConfig(BaseModel): max_rounds: Optional[int] = 5 ``` -Or in the deployment.json file in the `configs` folder of the module: - -```json - "config": { - "config_name": "orchestrator_config_1", - "llm_config": {"config_name": "model_1"}, - "max_rounds": 5, - } -``` - -## Orchestrator Deployments +## 🐋 Orchestrator Deployments Orchestrator deployments allow you to specify other modules that the orchestrator module interacts with: @@ -59,69 +58,102 @@ class OrchestratorDeployment(BaseModel): memory_deployments: Optional[List[MemoryDeployment]] = None ``` -Or in the deployment.json file: - -```json -# OrchestratorDeployment in deployment.json file -[ - { - "node": {"name": "node2.naptha.ai"}, - "module": {"name": "multiagent_chat"}, - "config": ..., - "agent_deployments": [{"name": "agent_deployment_1"}, {"name": "agent_deployment_2"}], - "environment_deployments": [{"name": "environment_deployment_1"}], - "kb_deployments": [{"name": "kb_deployment_1"}], - "memory_deployments": [{"name": "memory_deployment_1"}, {"name": "memory_deployment_2"}] - } -] -``` - -## Deploying and Running an Orchestrator Module +## 🚀 Running an Orchestrator Module ### Prerequisites Install the Naptha SDK using the [instructions here](https://github.com/NapthaAI/naptha-sdk/?tab=readme-ov-file#install). -### In Python - -You can run an orchestrator in Python using: - -```python -from naptha_sdk.client.naptha import Naptha -from naptha_sdk.modules.orchestrator import Orchestrator -from naptha_sdk.schemas import OrchestratorRunInput +### Example -naptha = Naptha() +The [Multiagent Chat Orchestrator](https://github.com/NapthaAI/multiagent_chat) is an example of an Orchestrator module that interacts with simple chat [Agent modules](https://github.com/NapthaAI/simple_chat_agent) and a groupchat [Knowledge Base module](https://github.com/NapthaAI/groupchat_kb). The orchestrator, agents and knowledge base can all run on different nodes. You can run the orchestrator module on hosted nodes using: -orchestrator_deployment = { - "node": {"name": "node2.naptha.ai"}, - "module": {"name": "multiagent_chat"}, - ... -} +The names of the Agent and KB subdeployments that the orchestrator uses are specified in the `configs/deployment.json`, and the full details of those subdeployments are loaded from the deployments with the same name in the `configs/agent_deployments.json` and `configs/kb_deployments.json` files. -orchestrator = Orchestrator() +```json +# OrchestratorDeployment in configs/deployment.json file +[ + { + "node": {"name": "node.naptha.ai"}, + "module": {"name": "multiagent_chat"}, + "config": ..., + "agent_deployments": [ + {"name": "agent_deployment_1"}, + {"name": "agent_deployment_2"} + ], + "kb_deployments": [{"name": "groupchat_kb_deployment_1"}] + ... + } +] -# Deploy the orchestrator -response = await orchestrator.create(orchestrator_deployment) +# AgentDeployments in configs/agent_deployments.json file +[ + { + "name": "agent_deployment_1", + "module": {"name": "simple_chat_agent"}, + "node": {"ip": "node.naptha.ai"}, + "config": { + "config_name": "agent_config_1", + "llm_config": {"config_name": "model_1"}, + "system_prompt": ... + } + }, + { + "name": "agent_deployment_2", + "module": {"name": "simple_chat_agent"}, + "node": {"ip": "node.naptha.ai"}, + "config": { + "config_name": "agent_config_2", + "llm_config": {"config_name": "model_2"}, + "system_prompt": ... + } + } +] -input_params = { - "prompt": "i would like to count up to ten, one number at a time. ill start. one.", -} +# KBDeployment in configs/kb_deployments.json file +[ + { + "name": "groupchat_kb_deployment_1", + "module": {"name": "groupchat_kb"}, + "node": {"ip": "node.naptha.ai"}, + "config": { + "storage_config": ... + }, + } +] +``` -orchestrator_run_input = OrchestratorRunInput( - consumer_id=naptha.user.id, - inputs=input_params, - deployment=orchestrator_deployment, - signature=sign_consumer_id(naptha.user.id, os.getenv("PRIVATE_KEY")) -) +There is a `MultiAgentChat` class in the `run.py` [file](https://github.com/NapthaAI/multiagent_chat/blob/main/multiagent_chat/run.py#L24C7-L24C21), which imports the `Agent` and `KnowledgeBase` classes and calls the `Agent.run` and `KnowledgeBase.run` methods: -# Call the orchestrator -response = await orchestrator.call_orchestrator_func(orchestrator_run_input) +``` +from naptha_sdk.modules.agent import Agent +from naptha_sdk.modules.kb import KnowledgeBase +from naptha_sdk.schemas import OrchestratorRunInput, OrchestratorDeployment, KBRunInput, AgentRunInput +from naptha_sdk.user import sign_consumer_id + +class MultiAgentChat: + def __init__(self, deployment: OrchestratorDeployment): + self.orchestrator_deployment = orchestrator_deployment + self.agent_deployments = self.orchestrator_deployment.agent_deployments + self.agents = [ + Agent(deployment=self.agent_deployments[0], *args, **kwargs), + Agent(deployment=self.agent_deployments[1], *args, **kwargs) + ] + self.groupchat_kb = KnowledgeBase(kb_deployment=self.orchestrator_deployment.kb_deployments[0]) + + async def run_multiagent_chat(self, module_run: OrchestratorRunInput): + ... + for round_num in range(self.orchestrator_deployment.config.max_rounds): + for agent_num, agent in enumerate(self.agents): + agent_run_input = AgentRunInput( + consumer_id=module_run.consumer_id, + inputs={"tool_name": "chat", "tool_input_data": messages}, + deployment=self.agent_deployments[agent_num], + signature=sign_consumer_id(module_run.consumer_id, os.getenv("PRIVATE_KEY")) + ) + response = await agent.call_agent_func(agent_run_input) ``` -Under the hood, `call_orchestrator_func` makes a call to the orchestrator node via API, which executes the orchestrator module. - -### From the CLI You can deploy the modules for an orchestrator (without running) using: @@ -129,6 +161,8 @@ You can deploy the modules for an orchestrator (without running) using: naptha create orchestrator:multiagent_chat --agent_modules "agent:simple_chat_agent,agent:simple_chat_agent" --agent_nodes "node.naptha.ai,node1.naptha.ai" --kb_modules "kb:groupchat_kb" --kb_nodes "node.naptha.ai" ``` +You can run the orchestrator module using (note that using the `--agent_nodes` and `--kb_nodes` flags overrides the values in the `deployment.json` file instead): + You can run the orchestrator module on hosted nodes using: ```bash @@ -144,4 +178,37 @@ Check out these sample agent orchestrator modules: ## Need Help? - Join our [Community](https://naptha.ai/naptha-community) and post in the #support channel -- Submit issues on [GitHub](https://github.com/NapthaAI) \ No newline at end of file +- Submit issues on [GitHub](https://github.com/NapthaAI) + +## Next Steps + +import CardGrid from '@site/src/components/CardGrid'; + +export const featureCards = [ + { + title: 'Create Your First Orchestrator Module', + description: 'Use the Naptha Learn Hub to create your first orchestrator module', + icon: '✨', + link: 'https://naptha-ai-learn.vercel.app/learn/builder/orchestration/introduction' + }, + { + title: 'Run LLM Inference', + description: 'Learn how to make LLM calls within your orchestrator module', + icon: '🧠', + link: 'NapthaInference/1-inference' + }, + { + title: 'Knowledge Base Modules', + description: 'Learn how to use Orchestrators with Knowledge Base Modules', + icon: '📚', + link: 'NapthaModules/3-knowledge-bases' + }, + { + title: 'Environment Modules', + description: 'Learn how to use Orchestrators with Environment Modules', + icon: '🏢', + link: 'NapthaModules/7-environments' + } +]; + + diff --git a/docs/NapthaModules/7-environments.md b/docs/NapthaModules/7-environments.md index 2e7f8497..6d193e16 100644 --- a/docs/NapthaModules/7-environments.md +++ b/docs/NapthaModules/7-environments.md @@ -1,10 +1,15 @@ # Environment Modules -Environments in Naptha provide the necessary infrastructure doing reinforcement learning. They serve as the shared operational space where agents can: +In this section, we'll cover: -- Exchange information seamlessly -- Maintain persistent state across executions \ -- Access shared resources and context +- [🌳 What is an Environment Module?](#-what-is-an-environment-module) +- [📝 Environment Configurations](#-environment-configurations) +- [🐋 Environment Deployments](#-environment-deployments) +- [🚀 Running an Environment Module](#-running-an-environment-module) + +## 🌳 What is an Orchestrator Module? + +Environments in Naptha provide the necessary infrastructure doing reinforcement learning. Environment modules can be things like: @@ -16,7 +21,12 @@ Environment modules can be things like: Naptha Nodes support the deployment of Environment modules. The state of these modules is stored in a local database (postgres) and file system on the Naptha Node. -## Environment Configurations +## 📝 Environment Configurations + +Environment modules are configured by specifying: + +* **An LLM Configuration** - The language model that the environment uses to generate responses +* **Storage Configuration** - The storage configuration that the environment uses to store and retrieve data The configuration of an environment module can be specified using the `EnvironmentConfig` class: @@ -29,10 +39,33 @@ class EnvironmentConfig(BaseModel): ``` :::info -The storage configuration schema can be found in the [Storage Provider](/docs/NapthaStorage/0-overview.md) section. +More details on the `StorageConfig` schema can be found in the [Storage Provider](/docs/NapthaStorage/0-overview.md) section. ::: -Or in the deployment.json file in the `configs` folder of the module: +## 🐋 Environment Deployments + +Environment deployments allow you to specify the `node` that the environment will run on, and the `module` that the environment will use. The configuration of an environment deployment can be specified using the `EnvironmentDeployment` class: + +```python +#naptha-sdk/schemas.py +class EnvironmentDeployment(BaseModel): + node: Union[NodeConfig, NodeConfigUser, Dict] + name: Optional[str] = None + module: Optional[Dict] = None + config: Optional[EnvironmentConfig] = None +``` + +## 🚀 Running an Environment Module + +### Prerequisites + +Install the Naptha SDK using the [instructions here](https://github.com/NapthaAI/naptha-sdk). + +### Example + +The [Group Chat Environment Module](https://github.com/NapthaAI/chat_environment/tree/main) is a simple example of an Environment module. It is intended to demonstrate how agents can interact with an environment that looks like a group chat. + +The configuration of an environment module is specified in the `deployment.json` file in the `configs` folder of the module. ```json # EnvironmentConfig in deployment.json file @@ -55,98 +88,121 @@ Or in the deployment.json file in the `configs` folder of the module: ] ``` -## Environment Deployments +You can deploy the environment (without running) using: -Environment deployments allow you to specify the `node` that the environment will run on, and the `module` that the environment will use. The configuration of an environment deployment can be specified using the `EnvironmentDeployment` class: +```bash +# usage: naptha create +naptha create environment:chat_environment +``` + +If you take a look at the chat_environment module, you'll notice the `ChatMechanism` class in the `run.py` file has a [method](https://github.com/NapthaAI/chat_environment/blob/main/chat_environment/run.py#L44) called `step()`. This is used to step the environment forward. You can get the global state of the environment using: + +```bash +# usage: naptha run +naptha run environment:chat_environment -p "function_name='get_global_state'" +``` + +The Chat Environment also instantiates the `StorageClient` class and calls the `execute` method with `CreateStorageRequest`, `ReadStorageRequest`, `DeleteStorageRequest`, `ListStorageRequest` and `UpdateStorageRequest` objects: ```python -#naptha-sdk/schemas.py -class EnvironmentDeployment(BaseModel): - node: Union[NodeConfig, NodeConfigUser, Dict] - name: Optional[str] = None - module: Optional[Dict] = None - config: Optional[EnvironmentConfig] = None +from naptha_sdk.schemas import KBDeployment +from naptha_sdk.storage.schemas import ReadStorageRequest +from naptha_sdk.storage.storage_client import StorageClient + +class ChatMechanism: + def __init__(self, deployment: KBDeployment): + ... + # the arg is loaded from configs/deployment.json + self.storage_client = StorageClient(self.deployment.node) + self.storage_type = self.config.storage_config.storage_type + self.table_name = self.config.storage_config.path + self.schema = self.config.storage_config.storage_schema + + async def run_query(self, input_data: Dict[str, Any], *args, **kwargs): + read_storage_request = ReadStorageRequest( + storage_type=self.storage_type, + path=self.table_name, + options={"condition": {"title": input_data["query"]}} + ) + + read_result = await self.storage_client.execute(read_storage_request) ``` -Or in the deployment.json file: +# 🤖 Running an Agent that interacts with an Environment + +The name of the Environment subdeployment that the agent uses is specified in the `configs/deployment.json`, and the full details of that Environment subdeployment are loaded from the deployment with the same name in the `configs/environment_deployments.json` file. ```json -# EnvironmentDeployment in deployment.json file +# AgentDeployment in configs/deployment.json file [ { - "node": {"name": "node2.naptha.ai"}, - "module": {"name": "groupchat_environment"}, + "node": {"name": "node.naptha.ai"}, + "module": {"name": "wikipedia_agent"}, "config": ..., + "environment_deployments": [{"name": "environment_deployment_1"}], + ... } ] -``` - -## Deploying and Running an Environment Module - -### Prerequisites - -Install the Naptha SDK using the [instructions here](https://github.com/NapthaAI/naptha-sdk). -### In Python +# EnvironmentDeployment in configs/environment_deployments.json file +[ + { + "name": "environment_deployment_1", + "node": {"name": "node.naptha.ai"}, + "module": {"name": "chat_environment"}, + "config": { + "llm_config": {"config_name": "model_1"}, + "storage_config": ... + }, + } +] +``` -You can deploy an environment module in Python using: +The `EnvironmentAgent` class in the `run.py` file of the agent module should import the `Environment` class and call the `Environment.run` method: ```python -from naptha_sdk.client.naptha import Naptha from naptha_sdk.modules.environment import Environment -from naptha_sdk.schemas import EnvironmentRunInput - -naptha = Naptha() - -environment_deployment = { - "module": {"name": "groupchat_environment"}, - "environment_node_url": "https://node.naptha.ai" -} - -environment = Environment() +from naptha_sdk.schemas import AgentDeployment, AgentRunInput, EnvironmentRunInput +from naptha_sdk.user import sign_consumer_id -# Deploy the environment -response = await environment.create(environment_deployment) - -input_params = { - "function_name": "get_global_state", -} - -# Run the environment -environment_run_input = EnvironmentRunInput( - consumer_id=naptha.user.id, - inputs=input_params, - deployment=environment_deployment, - signature=sign_consumer_id(naptha.user.id, os.getenv("PRIVATE_KEY")) -) - -response = await environment.call_environment_func(environment_run_input) -``` - -Under the hood, `call_environment_func` makes a call to the environment node via API, which executes the environment module. - -### From the CLI - -You can deploy the environment (without running) using: - -```bash -# usage: naptha create -naptha create environment:groupchat_environment +class EnvironmentAgent: + def __init__(self, deployment: AgentDeployment): + ... + # the arg below is loaded from configs/environment_deployments.json + self.environment = Environment(self.deployment.environment_deployments[0]) + + async def run_environment_agent(self, module_run: AgentRunInput): + environment_run_input = EnvironmentRunInput( + consumer_id=module_run.consumer_id, + inputs={"func_name": "step", "func_input_data": {"message": module_run.inputs.message}}, + deployment=self.deployment.environment_deployments[0], + signature=sign_consumer_id(module_run.consumer_id, os.getenv("PRIVATE_KEY")) + ) + + await self.environment.run(environment_run_input) ``` -Run the environment: - -```bash -# usage: naptha run -naptha run environment:groupchat_environment -p "function_name='get_global_state'" -``` - -## Examples - -Check out this environment implementation: -- [Group Chat Environment](https://github.com/NapthaAI/groupchat_environment) - ## Need Help? - Join our [Community](https://naptha.ai/naptha-community) and post in the #support channel - Submit issues on [GitHub](https://github.com/NapthaAI) +## Next Steps + +import CardGrid from '@site/src/components/CardGrid'; + +export const featureCards = [ + { + title: 'Create Your First Environment Module', + description: 'Use the Naptha Learn Hub to create your first environment module', + icon: '✨', + link: 'https://naptha-ai-learn.vercel.app/learn/builder/environment-modules/introduction' + }, + { + title: 'Interact with Storage Providers', + description: 'Learn how to use storage within your environment module', + icon: '💾', + link: 'NapthaStorage/0-overview' + }, +]; + + \ No newline at end of file