@@ -21,84 +21,82 @@ export async function chatWithAI({
2121 tokenLimit,
2222 debug = false ,
2323} : ChatWithAiArgs ) : Promise < string > {
24- while ( true ) {
25- try {
26- const model = Config . fast_llm_model ;
27- const sendTokenLimit = tokenLimit - 1000 ;
24+ try {
25+ const model = Config . fast_llm_model ;
26+ const sendTokenLimit = tokenLimit - 1000 ;
2827
29- const currentContext : LLMMessage [ ] = [
30- { role : "system" , content : prompt } ,
31- { role : "system" , content : `Permanent memory: ${ permanentMemory } ` } ,
32- ] ;
28+ const currentContext : LLMMessage [ ] = [
29+ { role : "system" , content : prompt } ,
30+ { role : "system" , content : `Permanent memory: ${ permanentMemory } ` } ,
31+ ] ;
3332
34- let nextMessageToAddIndex = fullMessageHistory . length - 1 ;
35- let currentTokensUsed = 0 ;
36- const insertionIndex = currentContext . length ;
33+ let nextMessageToAddIndex = fullMessageHistory . length - 1 ;
34+ let currentTokensUsed = 0 ;
35+ const insertionIndex = currentContext . length ;
3736
38- currentTokensUsed = countMessageTokens ( currentContext , model ) ;
39- currentTokensUsed += countMessageTokens (
40- [ { role : "user" , content : userInput } ] ,
41- model
42- ) ;
43-
44- while ( nextMessageToAddIndex >= 0 ) {
45- const messageToAdd = fullMessageHistory [ nextMessageToAddIndex ] ;
46- const tokensToAdd = countMessageTokens ( [ messageToAdd ] , model ) ;
37+ currentTokensUsed = countMessageTokens ( currentContext , model ) ;
38+ currentTokensUsed += countMessageTokens (
39+ [ { role : "user" , content : userInput } ] ,
40+ model
41+ ) ;
4742
48- if ( currentTokensUsed + tokensToAdd > sendTokenLimit ) {
49- break ;
50- }
43+ while ( nextMessageToAddIndex >= 0 ) {
44+ const messageToAdd = fullMessageHistory [ nextMessageToAddIndex ] ;
45+ const tokensToAdd = countMessageTokens ( [ messageToAdd ] , model ) ;
5146
52- currentContext . splice (
53- insertionIndex ,
54- 0 ,
55- fullMessageHistory [ nextMessageToAddIndex ]
56- ) ;
57- currentTokensUsed += tokensToAdd ;
58- nextMessageToAddIndex -= 1 ;
47+ if ( currentTokensUsed + tokensToAdd > sendTokenLimit ) {
48+ break ;
5949 }
6050
61- currentContext . push ( { role : "user" , content : userInput } ) ;
62- const tokensRemaining = tokenLimit - currentTokensUsed ;
51+ currentContext . splice (
52+ insertionIndex ,
53+ 0 ,
54+ fullMessageHistory [ nextMessageToAddIndex ]
55+ ) ;
56+ currentTokensUsed += tokensToAdd ;
57+ nextMessageToAddIndex -= 1 ;
58+ }
6359
64- if ( debug ) {
65- console . log ( `Token limit: ${ tokenLimit } ` ) ;
66- console . log ( `Send Token Count: ${ currentTokensUsed } ` ) ;
67- console . log ( `Tokens remaining for response: ${ tokensRemaining } ` ) ;
68- console . log ( "------------ CONTEXT SENT TO AI ---------------" ) ;
69- for ( const message of currentContext ) {
70- if ( message . role === "system" && message . content === prompt ) {
71- continue ;
72- }
73- console . log (
74- `${ message . role . charAt ( 0 ) . toUpperCase ( ) + message . role . slice ( 1 ) } : ${
75- message . content
76- } `
77- ) ;
78- console . log ( ) ;
60+ currentContext . push ( { role : "user" , content : userInput } ) ;
61+ const tokensRemaining = tokenLimit - currentTokensUsed ;
62+
63+ if ( debug ) {
64+ console . log ( `Token limit: ${ tokenLimit } ` ) ;
65+ console . log ( `Send Token Count: ${ currentTokensUsed } ` ) ;
66+ console . log ( `Tokens remaining for response: ${ tokensRemaining } ` ) ;
67+ console . log ( "------------ CONTEXT SENT TO AI ---------------" ) ;
68+ for ( const message of currentContext ) {
69+ if ( message . role === "system" && message . content === prompt ) {
70+ continue ;
7971 }
80- console . log ( "----------- END OF CONTEXT ----------------" ) ;
72+ console . log (
73+ `${ message . role . charAt ( 0 ) . toUpperCase ( ) + message . role . slice ( 1 ) } : ${
74+ message . content
75+ } `
76+ ) ;
77+ console . log ( ) ;
8178 }
79+ console . log ( "----------- END OF CONTEXT ----------------" ) ;
80+ }
8281
83- const assistantReply = await callLLMChatCompletion (
84- currentContext ,
85- model ,
86- undefined /* temperature */ ,
87- tokensRemaining
88- ) ;
82+ const assistantReply = await callLLMChatCompletion (
83+ currentContext ,
84+ model ,
85+ undefined /* temperature */ ,
86+ tokensRemaining
87+ ) ;
8988
90- appendToFullMessageHistory ( [
91- { role : "user" , content : userInput } ,
92- {
93- role : "assistant" ,
94- content : assistantReply ,
95- } ,
96- ] ) ;
89+ appendToFullMessageHistory ( [
90+ { role : "user" , content : userInput } ,
91+ {
92+ role : "assistant" ,
93+ content : assistantReply ,
94+ } ,
95+ ] ) ;
9796
98- return assistantReply ;
99- } catch ( error ) {
100- console . error ( "Error calling chat" , error ) ;
101- throw error ;
102- }
97+ return assistantReply ;
98+ } catch ( error ) {
99+ console . error ( "Error calling chat" , error ) ;
100+ throw error ;
103101 }
104102}
0 commit comments