1+ import json
2+ import time
3+ from typing import Any , Dict , List , Optional
4+
5+ from nc_py_api import Nextcloud
6+ from langchain_core .callbacks .manager import CallbackManagerForLLMRun
7+ from langchain_core .language_models .llms import LLM
8+
9+ def get_model_for (model_type : str , model_config : dict ):
10+ if model_config is None :
11+ return None
12+
13+ if model_type == 'llm' :
14+ return CustomLLM ()
15+
16+ return None
17+
18+ class CustomLLM (LLM ):
19+ """A custom chat model that queries Nextcloud's TextToText provider
20+ """
21+
22+ def _call (
23+ self ,
24+ prompt : str ,
25+ stop : Optional [List [str ]] = None ,
26+ run_manager : Optional [CallbackManagerForLLMRun ] = None ,
27+ ** kwargs : Any ,
28+ ) -> str :
29+ """Run the LLM on the given input.
30+
31+ Override this method to implement the LLM logic.
32+
33+ Args:
34+ prompt: The prompt to generate from.
35+ stop: Stop words to use when generating. Model output is cut off at the
36+ first occurrence of any of the stop substrings.
37+ If stop tokens are not supported consider raising NotImplementedError.
38+ run_manager: Callback manager for the run.
39+ **kwargs: Arbitrary additional keyword arguments. These are usually passed
40+ to the model provider API call.
41+
42+ Returns:
43+ The model output as a string. Actual completions SHOULD NOT include the prompt.
44+ """
45+ nc = Nextcloud ()
46+
47+ print (json .dumps (prompt ))
48+
49+ response = nc .ocs ("POST" , "/ocs/v1.php/taskprocessing/schedule" , json = {
50+ "type" : "core:text2text" ,
51+ "appId" : "context_chat_backend" ,
52+ "input" : {
53+ "input" : prompt
54+ }
55+ })
56+
57+ task_id = response ["task" ]["id" ]
58+
59+ while response ['task' ]['status' ] != 'STATUS_SUCCESSFUL' and response ['task' ]['status' ] != 'STATUS_FAILED' :
60+ time .sleep (5 )
61+ response = nc .ocs ("GET" , f"/ocs/v1.php/taskprocessing/task/{ task_id } " )
62+ print (json .dumps (response ))
63+
64+ if response ['task' ]['status' ] == 'STATUS_FAILED' :
65+ raise RuntimeError ('Nextcloud TaskProcessing Task failed' )
66+
67+ return response ['task' ]['output' ]['output' ]
68+
69+ @property
70+ def _identifying_params (self ) -> Dict [str , Any ]:
71+ """Return a dictionary of identifying parameters."""
72+ return {
73+ # The model name allows users to specify custom token counting
74+ # rules in LLM monitoring applications (e.g., in LangSmith users
75+ # can provide per token pricing for their model and monitor
76+ # costs for the given LLM.)
77+ "model_name" : "NextcloudTextToTextProvider" ,
78+ }
79+
80+ @property
81+ def _llm_type (self ) -> str :
82+ """Get the type of language model used by this chat model. Used for logging purposes only."""
83+ return "nc_texttotetx"
0 commit comments