Skip to content
Next Next commit
feat: implements separate adapter for ollama
  • Loading branch information
hajdul88 committed Feb 18, 2025
commit d69c2080cb6876e71dc48d9b9e13bd13c2f43f5d
Empty file.
44 changes: 44 additions & 0 deletions cognee/infrastructure/llm/ollama/adapter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from typing import Type
from pydantic import BaseModel
import instructor
from cognee.infrastructure.llm.llm_interface import LLMInterface
from cognee.infrastructure.llm.config import get_llm_config
from openai import OpenAI


class OllamaAPIAdapter(LLMInterface):
"""Adapter for a Generic API LLM provider using instructor with an OpenAI backend."""

def __init__(self, endpoint: str, api_key: str, model: str, name: str, max_tokens: int):
self.name = name
self.model = model
self.api_key = api_key
self.endpoint = endpoint
self.max_tokens = max_tokens

self.aclient = instructor.from_openai(
OpenAI(base_url=self.endpoint, api_key=self.api_key), mode=instructor.Mode.JSON
)

async def acreate_structured_output(
self, text_input: str, system_prompt: str, response_model: Type[BaseModel]
) -> BaseModel:
"""Generate a structured output from the LLM using the provided text and system prompt."""

response = self.aclient.chat.completions.create(
model=self.model,
messages=[
{
"role": "user",
"content": f"Use the given format to extract information from the following input: {text_input}",
},
{
"role": "system",
"content": system_prompt,
},
],
max_retries=5,
response_model=response_model,
)

return response
Comment on lines +23 to +44
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Add error handling and configuration options.

The method should handle API errors and allow configuration of model parameters.

     async def acreate_structured_output(
         self, text_input: str, system_prompt: str, response_model: Type[BaseModel]
     ) -> BaseModel:
-        """Generate a structured output from the LLM using the provided text and system prompt."""
+        """Generate a structured output from Ollama using the provided text and system prompt.
+        
+        Args:
+            text_input: The input text to process
+            system_prompt: The system prompt to guide the model
+            response_model: Pydantic model for response structure
+            
+        Returns:
+            BaseModel: Structured response matching response_model
+            
+        Raises:
+            OpenAIError: If API call fails
+            ValueError: If input validation fails
+        """
+        if not text_input or not system_prompt:
+            raise ValueError("text_input and system_prompt are required")
 
-        response = self.aclient.chat.completions.create(
-            model=self.model,
-            messages=[
-                {
-                    "role": "user",
-                    "content": f"Use the given format to extract information from the following input: {text_input}",
-                },
-                {
-                    "role": "system",
-                    "content": system_prompt,
-                },
-            ],
-            max_retries=5,
-            response_model=response_model,
-        )
+        try:
+            response = await self.aclient.chat.completions.create(
+                model=self.model,
+                messages=[
+                    {
+                        "role": "user",
+                        "content": f"Use the given format to extract information from the following input: {text_input}",
+                    },
+                    {
+                        "role": "system",
+                        "content": system_prompt,
+                    },
+                ],
+                max_retries=5,
+                response_model=response_model,
+                temperature=0.7,  # Add configurable parameters
+                timeout=30,  # Add timeout
+            )
+            return response
+        except Exception as e:
+            raise OpenAIError(f"Failed to generate structured output: {str(e)}")
-        return response
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
async def acreate_structured_output(
self, text_input: str, system_prompt: str, response_model: Type[BaseModel]
) -> BaseModel:
"""Generate a structured output from the LLM using the provided text and system prompt."""
response = self.aclient.chat.completions.create(
model=self.model,
messages=[
{
"role": "user",
"content": f"Use the given format to extract information from the following input: {text_input}",
},
{
"role": "system",
"content": system_prompt,
},
],
max_retries=5,
response_model=response_model,
)
return response
async def acreate_structured_output(
self, text_input: str, system_prompt: str, response_model: Type[BaseModel]
) -> BaseModel:
"""Generate a structured output from Ollama using the provided text and system prompt.
Args:
text_input: The input text to process
system_prompt: The system prompt to guide the model
response_model: Pydantic model for response structure
Returns:
BaseModel: Structured response matching response_model
Raises:
OpenAIError: If API call fails
ValueError: If input validation fails
"""
if not text_input or not system_prompt:
raise ValueError("text_input and system_prompt are required")
try:
response = await self.aclient.chat.completions.create(
model=self.model,
messages=[
{
"role": "user",
"content": f"Use the given format to extract information from the following input: {text_input}",
},
{
"role": "system",
"content": system_prompt,
},
],
max_retries=5,
response_model=response_model,
temperature=0.7, # Add configurable parameters
timeout=30, # Add timeout
)
return response
except Exception as e:
raise OpenAIError(f"Failed to generate structured output: {str(e)}")