Skip to content
Merged
Prev Previous commit
Next Next commit
refactor: move graph_prompt_path to llm config
  • Loading branch information
lxobr committed Apr 3, 2025
commit e0f7057bc533fd45b671cc8b3408bec5d5a2766a
2 changes: 0 additions & 2 deletions cognee/api/v1/cognify/cognify.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,6 @@ async def get_default_tasks( # TODO: Find out a better way to do this (Boris's
chunker=TextChunker,
chunk_size: int = None,
ontology_file_path: Optional[str] = None,
graph_prompt_path: Optional[str] = None,
) -> list[Task]:
if user is None:
user = await get_default_user()
Expand All @@ -132,7 +131,6 @@ async def get_default_tasks( # TODO: Find out a better way to do this (Boris's
extract_graph_from_data,
graph_model=graph_model,
ontology_adapter=ontology_adapter,
graph_prompt_path=graph_prompt_path,
task_config={"batch_size": 10},
), # Generate knowledge graphs from the document chunks.
Task(
Expand Down
2 changes: 2 additions & 0 deletions cognee/infrastructure/llm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ class LLMConfig(BaseSettings):
llm_streaming: bool = False
llm_max_tokens: int = 16384
transcription_model: str = "whisper-1"
graph_prompt_path: str = "generate_graph_prompt.txt"

model_config = SettingsConfigDict(env_file=".env", extra="allow")

Expand Down Expand Up @@ -83,6 +84,7 @@ def to_dict(self) -> dict:
"streaming": self.llm_streaming,
"max_tokens": self.llm_max_tokens,
"transcription_model": self.transcription_model,
"graph_prompt_path": self.graph_prompt_path,
}


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@
from pydantic import BaseModel
from cognee.infrastructure.llm.get_llm_client import get_llm_client
from cognee.infrastructure.llm.prompts import render_prompt
from cognee.infrastructure.llm.config import get_llm_config


async def extract_content_graph(
content: str, response_model: Type[BaseModel], graph_prompt_path: Optional[str] = None
):
async def extract_content_graph(content: str, response_model: Type[BaseModel]):
llm_client = get_llm_client()
llm_config = get_llm_config()

prompt_path = graph_prompt_path or "generate_graph_prompt.txt"
prompt_path = llm_config.graph_prompt_path
system_prompt = render_prompt(prompt_path, {})
content_graph = await llm_client.acreate_structured_output(
content, system_prompt, response_model
Expand Down
6 changes: 1 addition & 5 deletions cognee/tasks/graph/extract_graph_from_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,13 +54,9 @@ async def extract_graph_from_data(
data_chunks: list[DocumentChunk],
graph_model: Type[BaseModel],
ontology_adapter: OntologyResolver = OntologyResolver(),
graph_prompt_path: Optional[str] = None,
) -> List[DocumentChunk]:
"""Extracts and integrates a knowledge graph from the text content of document chunks using a specified graph model."""
chunk_graphs = await asyncio.gather(
*[
extract_content_graph(chunk.text, graph_model, graph_prompt_path)
for chunk in data_chunks
]
*[extract_content_graph(chunk.text, graph_model) for chunk in data_chunks]
)
return await integrate_chunk_graphs(data_chunks, chunk_graphs, graph_model, ontology_adapter)
Loading