Skip to content

Commit c89eda0

Browse files
committed
fix: address function complexity and prompt optimization
- Bundle _process_chunk parameters into plan tuple to fix R0913/R0917 (too many arguments) - Improve OpenAI prompt to request translation only without quotes/commentary - Reduce function argument count from 6 to 3 parameters - Maintain all functionality while improving code organization
1 parent 52aca87 commit c89eda0

File tree

1 file changed

+8
-3
lines changed

1 file changed

+8
-3
lines changed

cognee/tasks/translation/translate_content.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -478,7 +478,7 @@ async def translate(self, text: str, target_language: str) -> Optional[Tuple[str
478478
response = await client.chat.completions.create(
479479
model=getattr(type(self), "_model", "gpt-4o-mini"),
480480
messages=[
481-
{"role": "system", "content": f"You are a translation assistant. Translate the following text to {target_language}."},
481+
{"role": "system", "content": f"You are a translation assistant. Translate the user's text to {target_language}. Reply with only the translated text, no quotes or commentary."},
482482
{"role": "user", "content": text},
483483
],
484484
temperature=0,
@@ -673,7 +673,9 @@ def _build_provider_plan(translation_provider_name, fallback_input):
673673
return primary_key, fallback_providers
674674

675675

676-
async def _process_chunk(chunk, target_language, primary_key, fallback_providers, confidence_threshold, provider_cache):
676+
async def _process_chunk(chunk, plan, provider_cache):
677+
# Unpack plan: (target_language, primary_key, fallback_providers, confidence_threshold)
678+
target_language, primary_key, fallback_providers, confidence_threshold = plan
677679
try:
678680
provider = provider_cache.get(primary_key)
679681
if provider is None:
@@ -782,6 +784,9 @@ async def translate_content(*chunks: Any, **kwargs) -> Any:
782784
# Provider cache for this batch to reduce instantiation overhead
783785
provider_cache: Dict[str, Any] = {}
784786

787+
# Bundle plan parameters to reduce argument count
788+
plan = (target_language, primary_key, fallback_providers, confidence_threshold)
789+
785790
# Parse concurrency with error handling
786791
try:
787792
max_concurrency = int(os.getenv("COGNEE_TRANSLATION_MAX_CONCURRENCY", "8"))
@@ -792,7 +797,7 @@ async def translate_content(*chunks: Any, **kwargs) -> Any:
792797
sem = asyncio.Semaphore(max_concurrency)
793798
async def _wrapped(c):
794799
async with sem:
795-
return await _process_chunk(c, target_language, primary_key, fallback_providers, confidence_threshold, provider_cache)
800+
return await _process_chunk(c, plan, provider_cache)
796801
results = await asyncio.gather(*(_wrapped(c) for c in batch))
797802

798803
return results[0] if return_single else results

0 commit comments

Comments
 (0)