diff --git a/.github/workflows/test_eval_framework.yml b/.github/workflows/test_eval_framework.yml index 54aa68dd00..a9eb321b33 100644 --- a/.github/workflows/test_eval_framework.yml +++ b/.github/workflows/test_eval_framework.yml @@ -14,7 +14,7 @@ jobs: run_eval_framework_test: uses: ./.github/workflows/reusable_python_example.yml with: - example-location: ./evals/eval_framework/run_eval.py + example-location: ./cognee/eval_framework/run_eval.py secrets: LLM_API_KEY: ${{ secrets.OPENAI_API_KEY }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} diff --git a/cognee/api/v1/cognify/cognify_v2.py b/cognee/api/v1/cognify/cognify_v2.py index 50a3e081d1..2d5a55ef35 100644 --- a/cognee/api/v1/cognify/cognify_v2.py +++ b/cognee/api/v1/cognify/cognify_v2.py @@ -112,8 +112,8 @@ def generate_dataset_name(dataset_name: str) -> str: return dataset_name.replace(".", "_").replace(" ", "_") -async def get_default_tasks( - user: User = None, graph_model: BaseModel = KnowledgeGraph +async def get_default_tasks( # TODO: Find out a better way to do this (Boris's comment) + user: User = None, graph_model: BaseModel = KnowledgeGraph, chunk_size=1024, chunker=TextChunker ) -> list[Task]: if user is None: user = await get_default_user() @@ -126,7 +126,8 @@ async def get_default_tasks( Task( extract_chunks_from_documents, max_chunk_tokens=get_max_chunk_tokens(), - chunker=TextChunker, + chunker=chunker, + chunk_size=chunk_size, ), # Extract text chunks based on the document type. Task( extract_graph_from_data, graph_model=graph_model, task_config={"batch_size": 10} diff --git a/cognee/api/v1/search/search_v2.py b/cognee/api/v1/search/search_v2.py index 49faa0dc5a..8afd8545c4 100644 --- a/cognee/api/v1/search/search_v2.py +++ b/cognee/api/v1/search/search_v2.py @@ -12,6 +12,7 @@ async def search( query_type: SearchType = SearchType.GRAPH_COMPLETION, user: User = None, datasets: Union[list[str], str, None] = None, + system_prompt_path: str = "answer_simple_question.txt", ) -> list: # We use lists from now on for datasets if isinstance(datasets, str): @@ -23,6 +24,8 @@ async def search( if user is None: raise UserNotFoundError - filtered_search_results = await search_function(query_text, query_type, datasets, user) + filtered_search_results = await search_function( + query_text, query_type, datasets, user, system_prompt_path=system_prompt_path + ) return filtered_search_results diff --git a/evals/eval_framework/__init__.py b/cognee/eval_framework/__init__.py similarity index 100% rename from evals/eval_framework/__init__.py rename to cognee/eval_framework/__init__.py diff --git a/evals/eval_framework/analysis/__init__.py b/cognee/eval_framework/analysis/__init__.py similarity index 100% rename from evals/eval_framework/analysis/__init__.py rename to cognee/eval_framework/analysis/__init__.py diff --git a/evals/eval_framework/analysis/dashboard_generator.py b/cognee/eval_framework/analysis/dashboard_generator.py similarity index 100% rename from evals/eval_framework/analysis/dashboard_generator.py rename to cognee/eval_framework/analysis/dashboard_generator.py diff --git a/evals/eval_framework/analysis/metrics_calculator.py b/cognee/eval_framework/analysis/metrics_calculator.py similarity index 100% rename from evals/eval_framework/analysis/metrics_calculator.py rename to cognee/eval_framework/analysis/metrics_calculator.py diff --git a/evals/eval_framework/answer_generation/__init__.py b/cognee/eval_framework/answer_generation/__init__.py similarity index 100% rename from evals/eval_framework/answer_generation/__init__.py rename to cognee/eval_framework/answer_generation/__init__.py diff --git a/evals/eval_framework/answer_generation/answer_generation_executor.py b/cognee/eval_framework/answer_generation/answer_generation_executor.py similarity index 54% rename from evals/eval_framework/answer_generation/answer_generation_executor.py rename to cognee/eval_framework/answer_generation/answer_generation_executor.py index a5b18c8e7e..f4fc5f4a2d 100644 --- a/evals/eval_framework/answer_generation/answer_generation_executor.py +++ b/cognee/eval_framework/answer_generation/answer_generation_executor.py @@ -3,20 +3,19 @@ from cognee.api.v1.search import SearchType question_answering_engine_options: Dict[str, Callable[[str], Awaitable[List[str]]]] = { - "cognee_graph_completion": lambda query: cognee.search( - query_type=SearchType.GRAPH_COMPLETION, query_text=query + "cognee_graph_completion": lambda query, system_prompt_path: cognee.search( + query_type=SearchType.GRAPH_COMPLETION, + query_text=query, + system_prompt_path=system_prompt_path, ), - "cognee_completion": lambda query: cognee.search( - query_type=SearchType.COMPLETION, query_text=query + "cognee_completion": lambda query, system_prompt_path: cognee.search( + query_type=SearchType.COMPLETION, query_text=query, system_prompt_path=system_prompt_path ), - "cognee_summaries": lambda query: cognee.search( - query_type=SearchType.SUMMARIES, query_text=query + "graph_summary_completion": lambda query, system_prompt_path: cognee.search( + query_type=SearchType.GRAPH_SUMMARY_COMPLETION, + query_text=query, + system_prompt_path=system_prompt_path, ), - "cognee_insights": lambda query: cognee.search( - query_type=SearchType.INSIGHTS, query_text=query - ), - "cognee_chunks": lambda query: cognee.search(query_type=SearchType.CHUNKS, query_text=query), - "cognee_code": lambda query: cognee.search(query_type=SearchType.CODE, query_text=query), } @@ -25,13 +24,14 @@ async def question_answering_non_parallel( self, questions: List[Dict[str, str]], answer_resolver: Callable[[str], Awaitable[List[str]]], + system_prompt: str = "answer_simple_question.txt", ) -> List[Dict[str, str]]: answers = [] for instance in questions: query_text = instance["question"] correct_answer = instance["answer"] - search_results = await answer_resolver(query_text) + search_results = await answer_resolver(query_text, system_prompt) answers.append( { diff --git a/evals/eval_framework/answer_generation/run_question_answering_module.py b/cognee/eval_framework/answer_generation/run_question_answering_module.py similarity index 83% rename from evals/eval_framework/answer_generation/run_question_answering_module.py rename to cognee/eval_framework/answer_generation/run_question_answering_module.py index 3ad5e78aca..42b31d44b9 100644 --- a/evals/eval_framework/answer_generation/run_question_answering_module.py +++ b/cognee/eval_framework/answer_generation/run_question_answering_module.py @@ -1,6 +1,7 @@ import logging import json -from evals.eval_framework.answer_generation.answer_generation_executor import ( +from typing import List +from cognee.eval_framework.answer_generation.answer_generation_executor import ( AnswerGeneratorExecutor, question_answering_engine_options, ) @@ -30,7 +31,9 @@ async def create_and_insert_answers_table(questions_payload): await session.commit() -async def run_question_answering(params: dict) -> None: +async def run_question_answering( + params: dict, system_prompt="answer_simple_question.txt" +) -> List[dict]: if params.get("answering_questions"): logging.info("Question answering started...") try: @@ -46,9 +49,17 @@ async def run_question_answering(params: dict) -> None: answers = await answer_generator.question_answering_non_parallel( questions=questions, answer_resolver=question_answering_engine_options[params["qa_engine"]], + system_prompt=system_prompt, ) with open(params["answers_path"], "w", encoding="utf-8") as f: json.dump(answers, f, ensure_ascii=False, indent=4) await create_and_insert_answers_table(answers) logging.info("Question answering End...") + + return answers + else: + logging.info( + "The question answering module was not executed as answering_questions is not enabled" + ) + return [] diff --git a/evals/eval_framework/benchmark_adapters/__init__.py b/cognee/eval_framework/benchmark_adapters/__init__.py similarity index 100% rename from evals/eval_framework/benchmark_adapters/__init__.py rename to cognee/eval_framework/benchmark_adapters/__init__.py diff --git a/evals/eval_framework/benchmark_adapters/base_benchmark_adapter.py b/cognee/eval_framework/benchmark_adapters/base_benchmark_adapter.py similarity index 100% rename from evals/eval_framework/benchmark_adapters/base_benchmark_adapter.py rename to cognee/eval_framework/benchmark_adapters/base_benchmark_adapter.py diff --git a/evals/eval_framework/benchmark_adapters/benchmark_adapters.py b/cognee/eval_framework/benchmark_adapters/benchmark_adapters.py similarity index 59% rename from evals/eval_framework/benchmark_adapters/benchmark_adapters.py rename to cognee/eval_framework/benchmark_adapters/benchmark_adapters.py index f040818e09..6242617746 100644 --- a/evals/eval_framework/benchmark_adapters/benchmark_adapters.py +++ b/cognee/eval_framework/benchmark_adapters/benchmark_adapters.py @@ -1,10 +1,10 @@ from enum import Enum from typing import Type -from evals.eval_framework.benchmark_adapters.hotpot_qa_adapter import HotpotQAAdapter -from evals.eval_framework.benchmark_adapters.musique_adapter import MusiqueQAAdapter -from evals.eval_framework.benchmark_adapters.dummy_adapter import DummyAdapter -from evals.eval_framework.benchmark_adapters.twowikimultihop_adapter import TwoWikiMultihopAdapter +from cognee.eval_framework.benchmark_adapters.hotpot_qa_adapter import HotpotQAAdapter +from cognee.eval_framework.benchmark_adapters.musique_adapter import MusiqueQAAdapter +from cognee.eval_framework.benchmark_adapters.dummy_adapter import DummyAdapter +from cognee.eval_framework.benchmark_adapters.twowikimultihop_adapter import TwoWikiMultihopAdapter class BenchmarkAdapter(Enum): diff --git a/evals/eval_framework/benchmark_adapters/dummy_adapter.py b/cognee/eval_framework/benchmark_adapters/dummy_adapter.py similarity index 76% rename from evals/eval_framework/benchmark_adapters/dummy_adapter.py rename to cognee/eval_framework/benchmark_adapters/dummy_adapter.py index c67440940a..69cc6e518e 100644 --- a/evals/eval_framework/benchmark_adapters/dummy_adapter.py +++ b/cognee/eval_framework/benchmark_adapters/dummy_adapter.py @@ -1,12 +1,12 @@ -from typing import Optional +from typing import Optional, Any -from evals.eval_framework.benchmark_adapters.base_benchmark_adapter import BaseBenchmarkAdapter +from cognee.eval_framework.benchmark_adapters.base_benchmark_adapter import BaseBenchmarkAdapter class DummyAdapter(BaseBenchmarkAdapter): def load_corpus( self, limit: Optional[int] = None, seed: int = 42 - ) -> tuple[list[str], list[dict[str, str]]]: + ) -> tuple[list[str], list[dict[str, Any]]]: corpus_list = [ "The cognee is an AI memory engine that supports different vector and graph databases", "Neo4j is a graph database supported by cognee", diff --git a/evals/eval_framework/benchmark_adapters/hotpot_qa_adapter.py b/cognee/eval_framework/benchmark_adapters/hotpot_qa_adapter.py similarity index 97% rename from evals/eval_framework/benchmark_adapters/hotpot_qa_adapter.py rename to cognee/eval_framework/benchmark_adapters/hotpot_qa_adapter.py index 3020a5bb17..d8e5a03c23 100644 --- a/evals/eval_framework/benchmark_adapters/hotpot_qa_adapter.py +++ b/cognee/eval_framework/benchmark_adapters/hotpot_qa_adapter.py @@ -3,7 +3,7 @@ import json import random from typing import Optional, Any, List, Tuple -from evals.eval_framework.benchmark_adapters.base_benchmark_adapter import BaseBenchmarkAdapter +from cognee.eval_framework.benchmark_adapters.base_benchmark_adapter import BaseBenchmarkAdapter class HotpotQAAdapter(BaseBenchmarkAdapter): diff --git a/evals/eval_framework/benchmark_adapters/musique_adapter.py b/cognee/eval_framework/benchmark_adapters/musique_adapter.py similarity index 97% rename from evals/eval_framework/benchmark_adapters/musique_adapter.py rename to cognee/eval_framework/benchmark_adapters/musique_adapter.py index 27cfe554b1..3be44edf84 100644 --- a/evals/eval_framework/benchmark_adapters/musique_adapter.py +++ b/cognee/eval_framework/benchmark_adapters/musique_adapter.py @@ -6,7 +6,7 @@ import gdown -from evals.eval_framework.benchmark_adapters.base_benchmark_adapter import BaseBenchmarkAdapter +from cognee.eval_framework.benchmark_adapters.base_benchmark_adapter import BaseBenchmarkAdapter class MusiqueQAAdapter(BaseBenchmarkAdapter): diff --git a/evals/eval_framework/benchmark_adapters/twowikimultihop_adapter.py b/cognee/eval_framework/benchmark_adapters/twowikimultihop_adapter.py similarity index 91% rename from evals/eval_framework/benchmark_adapters/twowikimultihop_adapter.py rename to cognee/eval_framework/benchmark_adapters/twowikimultihop_adapter.py index a6bb017fb8..ce5e3d57b1 100644 --- a/evals/eval_framework/benchmark_adapters/twowikimultihop_adapter.py +++ b/cognee/eval_framework/benchmark_adapters/twowikimultihop_adapter.py @@ -3,7 +3,7 @@ import json import random from typing import Optional, Any, List, Tuple -from evals.eval_framework.benchmark_adapters.hotpot_qa_adapter import HotpotQAAdapter +from cognee.eval_framework.benchmark_adapters.hotpot_qa_adapter import HotpotQAAdapter class TwoWikiMultihopAdapter(HotpotQAAdapter): diff --git a/evals/eval_framework/corpus_builder/__init__.py b/cognee/eval_framework/corpus_builder/__init__.py similarity index 100% rename from evals/eval_framework/corpus_builder/__init__.py rename to cognee/eval_framework/corpus_builder/__init__.py diff --git a/evals/eval_framework/corpus_builder/corpus_builder_executor.py b/cognee/eval_framework/corpus_builder/corpus_builder_executor.py similarity index 69% rename from evals/eval_framework/corpus_builder/corpus_builder_executor.py rename to cognee/eval_framework/corpus_builder/corpus_builder_executor.py index 2dbefa80a1..2e4a7fd3d3 100644 --- a/evals/eval_framework/corpus_builder/corpus_builder_executor.py +++ b/cognee/eval_framework/corpus_builder/corpus_builder_executor.py @@ -2,8 +2,9 @@ import logging from typing import Optional, Tuple, List, Dict, Union, Any, Callable, Awaitable -from evals.eval_framework.benchmark_adapters.benchmark_adapters import BenchmarkAdapter -from evals.eval_framework.corpus_builder.task_getters.TaskGetters import TaskGetters +from cognee.eval_framework.corpus_builder.task_getters.TaskGetters import TaskGetters +from cognee.eval_framework.benchmark_adapters.benchmark_adapters import BenchmarkAdapter +from cognee.modules.chunking.TextChunker import TextChunker from cognee.modules.pipelines.tasks.Task import Task from cognee.shared.utils import setup_logging @@ -31,12 +32,14 @@ def load_corpus(self, limit: Optional[int] = None) -> Tuple[List[Dict], List[str self.raw_corpus, self.questions = self.adapter.load_corpus(limit=limit) return self.raw_corpus, self.questions - async def build_corpus(self, limit: Optional[int] = None) -> List[str]: + async def build_corpus( + self, limit: Optional[int] = None, chunk_size=1024, chunker=TextChunker + ) -> List[str]: self.load_corpus(limit=limit) - await self.run_cognee() + await self.run_cognee(chunk_size=chunk_size, chunker=chunker) return self.questions - async def run_cognee(self) -> None: + async def run_cognee(self, chunk_size=1024, chunker=TextChunker) -> None: setup_logging(logging.ERROR) await cognee.prune.prune_data() @@ -44,5 +47,5 @@ async def run_cognee(self) -> None: await cognee.add(self.raw_corpus) - tasks = await self.task_getter() + tasks = await self.task_getter(chunk_size=chunk_size, chunker=TextChunker) await cognee.cognify(tasks=tasks) diff --git a/evals/eval_framework/corpus_builder/run_corpus_builder.py b/cognee/eval_framework/corpus_builder/run_corpus_builder.py similarity index 77% rename from evals/eval_framework/corpus_builder/run_corpus_builder.py rename to cognee/eval_framework/corpus_builder/run_corpus_builder.py index b2a4366f63..2aff21249c 100644 --- a/evals/eval_framework/corpus_builder/run_corpus_builder.py +++ b/cognee/eval_framework/corpus_builder/run_corpus_builder.py @@ -1,14 +1,19 @@ import logging import json +from typing import List + +from unstructured.chunking.dispatch import chunk + from cognee.infrastructure.files.storage import LocalStorage -from evals.eval_framework.corpus_builder.corpus_builder_executor import CorpusBuilderExecutor +from cognee.eval_framework.corpus_builder.corpus_builder_executor import CorpusBuilderExecutor from cognee.modules.data.models.questions_base import QuestionsBase from cognee.modules.data.models.questions_data import Questions from cognee.infrastructure.databases.relational.get_relational_engine import ( get_relational_engine, get_relational_config, ) -from evals.eval_framework.corpus_builder.task_getters.TaskGetters import TaskGetters +from cognee.modules.chunking.TextChunker import TextChunker +from cognee.eval_framework.corpus_builder.task_getters.TaskGetters import TaskGetters async def create_and_insert_questions_table(questions_payload): @@ -28,7 +33,7 @@ async def create_and_insert_questions_table(questions_payload): await session.commit() -async def run_corpus_builder(params: dict) -> None: +async def run_corpus_builder(params: dict, chunk_size=1024, chunker=TextChunker) -> List[dict]: if params.get("building_corpus_from_scratch"): logging.info("Corpus Builder started...") @@ -42,7 +47,7 @@ async def run_corpus_builder(params: dict) -> None: task_getter=task_getter, ) questions = await corpus_builder.build_corpus( - limit=params.get("number_of_samples_in_corpus") + limit=params.get("number_of_samples_in_corpus"), chunk_size=chunk_size, chunker=chunker ) with open(params["questions_path"], "w", encoding="utf-8") as f: json.dump(questions, f, ensure_ascii=False, indent=4) @@ -50,3 +55,5 @@ async def run_corpus_builder(params: dict) -> None: await create_and_insert_questions_table(questions_payload=questions) logging.info("Corpus Builder End...") + + return questions diff --git a/evals/eval_framework/corpus_builder/task_getters/TaskGetters.py b/cognee/eval_framework/corpus_builder/task_getters/TaskGetters.py similarity index 88% rename from evals/eval_framework/corpus_builder/task_getters/TaskGetters.py rename to cognee/eval_framework/corpus_builder/task_getters/TaskGetters.py index 39a1a0a235..d58115965c 100644 --- a/evals/eval_framework/corpus_builder/task_getters/TaskGetters.py +++ b/cognee/eval_framework/corpus_builder/task_getters/TaskGetters.py @@ -2,7 +2,7 @@ from typing import Callable, Awaitable, List from cognee.api.v1.cognify.cognify_v2 import get_default_tasks from cognee.modules.pipelines.tasks.Task import Task -from evals.eval_framework.corpus_builder.task_getters.get_cascade_graph_tasks import ( +from cognee.eval_framework.corpus_builder.task_getters.get_cascade_graph_tasks import ( get_cascade_graph_tasks, ) diff --git a/evals/eval_framework/corpus_builder/task_getters/__init__.py b/cognee/eval_framework/corpus_builder/task_getters/__init__.py similarity index 100% rename from evals/eval_framework/corpus_builder/task_getters/__init__.py rename to cognee/eval_framework/corpus_builder/task_getters/__init__.py diff --git a/cognee/eval_framework/corpus_builder/task_getters/default_task_getter.py b/cognee/eval_framework/corpus_builder/task_getters/default_task_getter.py new file mode 100644 index 0000000000..69f8eca2c5 --- /dev/null +++ b/cognee/eval_framework/corpus_builder/task_getters/default_task_getter.py @@ -0,0 +1,14 @@ +from cognee.api.v1.cognify.cognify_v2 import get_default_tasks +from typing import List +from cognee.eval_framework.corpus_builder.task_getters.base_task_getter import BaseTaskGetter +from cognee.modules.pipelines.tasks.Task import Task +from cognee.infrastructure.llm import get_max_chunk_tokens +from cognee.modules.chunking.TextChunker import TextChunker + + +class DefaultTaskGetter(BaseTaskGetter): + """Default task getter that retrieves tasks using the standard get_default_tasks function.""" + + async def get_tasks(self, chunk_size=1024, chunker=TextChunker) -> List[Task]: + """Retrieve default tasks asynchronously.""" + return await get_default_tasks(chunk_size=chunk_size, chunker=chunker) diff --git a/evals/eval_framework/corpus_builder/task_getters/get_cascade_graph_tasks.py b/cognee/eval_framework/corpus_builder/task_getters/get_cascade_graph_tasks.py similarity index 100% rename from evals/eval_framework/corpus_builder/task_getters/get_cascade_graph_tasks.py rename to cognee/eval_framework/corpus_builder/task_getters/get_cascade_graph_tasks.py diff --git a/evals/eval_framework/eval_config.py b/cognee/eval_framework/eval_config.py similarity index 100% rename from evals/eval_framework/eval_config.py rename to cognee/eval_framework/eval_config.py diff --git a/evals/eval_framework/evaluation/__init__.py b/cognee/eval_framework/evaluation/__init__.py similarity index 100% rename from evals/eval_framework/evaluation/__init__.py rename to cognee/eval_framework/evaluation/__init__.py diff --git a/evals/eval_framework/evaluation/base_eval_adapter.py b/cognee/eval_framework/evaluation/base_eval_adapter.py similarity index 100% rename from evals/eval_framework/evaluation/base_eval_adapter.py rename to cognee/eval_framework/evaluation/base_eval_adapter.py diff --git a/evals/eval_framework/evaluation/deep_eval_adapter.py b/cognee/eval_framework/evaluation/deep_eval_adapter.py similarity index 88% rename from evals/eval_framework/evaluation/deep_eval_adapter.py rename to cognee/eval_framework/evaluation/deep_eval_adapter.py index ec0cc41f32..84ae79f706 100644 --- a/evals/eval_framework/evaluation/deep_eval_adapter.py +++ b/cognee/eval_framework/evaluation/deep_eval_adapter.py @@ -1,9 +1,9 @@ from deepeval.metrics import GEval from deepeval.test_case import LLMTestCase, LLMTestCaseParams -from evals.eval_framework.eval_config import EvalConfig -from evals.eval_framework.evaluation.base_eval_adapter import BaseEvalAdapter -from evals.eval_framework.evaluation.metrics.exact_match import ExactMatchMetric -from evals.eval_framework.evaluation.metrics.f1 import F1ScoreMetric +from cognee.eval_framework.eval_config import EvalConfig +from cognee.eval_framework.evaluation.base_eval_adapter import BaseEvalAdapter +from cognee.eval_framework.evaluation.metrics.exact_match import ExactMatchMetric +from cognee.eval_framework.evaluation.metrics.f1 import F1ScoreMetric from typing import Any, Dict, List diff --git a/evals/eval_framework/evaluation/direct_llm_eval_adapter.py b/cognee/eval_framework/evaluation/direct_llm_eval_adapter.py similarity index 93% rename from evals/eval_framework/evaluation/direct_llm_eval_adapter.py rename to cognee/eval_framework/evaluation/direct_llm_eval_adapter.py index b911f88b0d..00ee7e101f 100644 --- a/evals/eval_framework/evaluation/direct_llm_eval_adapter.py +++ b/cognee/eval_framework/evaluation/direct_llm_eval_adapter.py @@ -1,9 +1,9 @@ from typing import Any, Dict, List from pydantic import BaseModel from cognee.infrastructure.llm.get_llm_client import get_llm_client -from evals.eval_framework.evaluation.base_eval_adapter import BaseEvalAdapter +from cognee.eval_framework.evaluation.base_eval_adapter import BaseEvalAdapter from cognee.infrastructure.llm.prompts import read_query_prompt, render_prompt -from evals.eval_framework.eval_config import EvalConfig +from cognee.eval_framework.eval_config import EvalConfig class CorrectnessEvaluation(BaseModel): diff --git a/evals/eval_framework/evaluation/evaluation_executor.py b/cognee/eval_framework/evaluation/evaluation_executor.py similarity index 91% rename from evals/eval_framework/evaluation/evaluation_executor.py rename to cognee/eval_framework/evaluation/evaluation_executor.py index becee8f4ec..dcee2281e0 100644 --- a/evals/eval_framework/evaluation/evaluation_executor.py +++ b/cognee/eval_framework/evaluation/evaluation_executor.py @@ -1,5 +1,5 @@ from typing import List, Dict, Any, Union -from evals.eval_framework.evaluation.evaluator_adapters import EvaluatorAdapter +from cognee.eval_framework.evaluation.evaluator_adapters import EvaluatorAdapter class EvaluationExecutor: diff --git a/evals/eval_framework/evaluation/evaluator_adapters.py b/cognee/eval_framework/evaluation/evaluator_adapters.py similarity index 71% rename from evals/eval_framework/evaluation/evaluator_adapters.py rename to cognee/eval_framework/evaluation/evaluator_adapters.py index 28b5462aa3..ec0032f691 100644 --- a/evals/eval_framework/evaluation/evaluator_adapters.py +++ b/cognee/eval_framework/evaluation/evaluator_adapters.py @@ -1,7 +1,7 @@ from enum import Enum from typing import Type -from evals.eval_framework.evaluation.deep_eval_adapter import DeepEvalAdapter -from evals.eval_framework.evaluation.direct_llm_eval_adapter import DirectLLMEvalAdapter +from cognee.eval_framework.evaluation.deep_eval_adapter import DeepEvalAdapter +from cognee.eval_framework.evaluation.direct_llm_eval_adapter import DirectLLMEvalAdapter class EvaluatorAdapter(Enum): diff --git a/cognee/eval_framework/evaluation/metrics/__init__.py b/cognee/eval_framework/evaluation/metrics/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/evals/eval_framework/evaluation/metrics/exact_match.py b/cognee/eval_framework/evaluation/metrics/exact_match.py similarity index 100% rename from evals/eval_framework/evaluation/metrics/exact_match.py rename to cognee/eval_framework/evaluation/metrics/exact_match.py diff --git a/evals/eval_framework/evaluation/metrics/f1.py b/cognee/eval_framework/evaluation/metrics/f1.py similarity index 100% rename from evals/eval_framework/evaluation/metrics/f1.py rename to cognee/eval_framework/evaluation/metrics/f1.py diff --git a/evals/eval_framework/evaluation/run_evaluation_module.py b/cognee/eval_framework/evaluation/run_evaluation_module.py similarity index 75% rename from evals/eval_framework/evaluation/run_evaluation_module.py rename to cognee/eval_framework/evaluation/run_evaluation_module.py index 76a7c5c560..14230f2244 100644 --- a/evals/eval_framework/evaluation/run_evaluation_module.py +++ b/cognee/eval_framework/evaluation/run_evaluation_module.py @@ -1,8 +1,9 @@ import logging import json -from evals.eval_framework.evaluation.evaluation_executor import EvaluationExecutor -from evals.eval_framework.analysis.metrics_calculator import calculate_metrics_statistics -from evals.eval_framework.analysis.dashboard_generator import create_dashboard +from typing import List +from cognee.eval_framework.evaluation.evaluation_executor import EvaluationExecutor +from cognee.eval_framework.analysis.metrics_calculator import calculate_metrics_statistics +from cognee.eval_framework.analysis.dashboard_generator import create_dashboard from cognee.infrastructure.files.storage import LocalStorage from cognee.infrastructure.databases.relational.get_relational_engine import ( get_relational_engine, @@ -50,13 +51,14 @@ async def execute_evaluation(params: dict) -> None: await create_and_insert_metrics_table(metrics) logging.info("Evaluation completed") + return metrics -async def run_evaluation(params: dict) -> None: +async def run_evaluation(params: dict) -> List[dict]: """Run each step of the evaluation pipeline based on configuration flags.""" # Step 1: Evaluate answers if requested if params.get("evaluating_answers"): - await execute_evaluation(params) + metrics = await execute_evaluation(params) else: logging.info("Skipping evaluation as evaluating_answers is False") @@ -67,18 +69,7 @@ async def run_evaluation(params: dict) -> None: json_data=params["metrics_path"], aggregate_output_path=params["aggregate_metrics_path"] ) logging.info("Metrics calculation completed") + return metrics else: logging.info("Skipping metrics calculation as calculate_metrics is False") - - # Step 3: Generate dashboard if requested - if params.get("dashboard"): - logging.info("Generating dashboard...") - create_dashboard( - metrics_path=params["metrics_path"], - aggregate_metrics_path=params["aggregate_metrics_path"], - output_file=params["dashboard_path"], - benchmark=params["benchmark"], - ) - logging.info(f"Dashboard generated at {params['dashboard_path']}") - else: - logging.info("Skipping dashboard generation as dashboard is False") + return [] diff --git a/cognee/eval_framework/metrics_dashboard.py b/cognee/eval_framework/metrics_dashboard.py new file mode 100644 index 0000000000..2c917740aa --- /dev/null +++ b/cognee/eval_framework/metrics_dashboard.py @@ -0,0 +1,172 @@ +import json +import plotly.graph_objects as go +from typing import Dict, List, Tuple +from collections import defaultdict + + +def create_distribution_plots(metrics_data: Dict[str, List[float]]) -> List[str]: + """Create distribution histogram plots for each metric.""" + figures = [] + for metric, scores in metrics_data.items(): + fig = go.Figure() + fig.add_trace(go.Histogram(x=scores, name=metric, nbinsx=10, marker_color="#1f77b4")) + + fig.update_layout( + title=f"{metric} Score Distribution", + xaxis_title="Score", + yaxis_title="Count", + bargap=0.1, + template="seaborn", + ) + figures.append(fig.to_html(full_html=False)) + return figures + + +def create_ci_plot(ci_results: Dict[str, Tuple[float, float, float]]) -> str: + """Create confidence interval bar plot.""" + fig = go.Figure() + for metric, (mean_score, lower, upper) in ci_results.items(): + fig.add_trace( + go.Bar( + x=[metric], + y=[mean_score], + error_y=dict( + type="data", + array=[upper - mean_score], + arrayminus=[mean_score - lower], + visible=True, + ), + name=metric, + ) + ) + + fig.update_layout( + title="95% confidence interval for all the metrics", + xaxis_title="Metric", + yaxis_title="Score", + template="seaborn", + ) + return fig.to_html(full_html=False) + + +def generate_details_html(metrics_data: List[Dict]) -> List[str]: + """Generate HTML for detailed metric information.""" + details_html = [] + metric_details = {} + + # Organize metrics by type + for entry in metrics_data: + for metric, values in entry["metrics"].items(): + if metric not in metric_details: + metric_details[metric] = [] + metric_details[metric].append( + { + "question": entry["question"], + "answer": entry["answer"], + "golden_answer": entry["golden_answer"], + "reason": values.get("reason", ""), + "score": values["score"], + } + ) + + for metric, details in metric_details.items(): + details_html.append(f"
| Question | +Answer | +Golden Answer | +Reason | +Score | +
|---|---|---|---|---|
| {item['question']} | " + f"{item['answer']} | " + f"{item['golden_answer']} | " + f"{item['reason']} | " + f"{item['score']} | " + f"