forked from chatboxai/chatbox
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathclient.ts
More file actions
141 lines (133 loc) · 4.09 KB
/
client.ts
File metadata and controls
141 lines (133 loc) · 4.09 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import { Message } from './types';
import * as wordCount from './utils';
import { createParser } from 'eventsource-parser'
export interface OnTextCallbackResult {
// response content
text: string;
// cancel for fetch
cancel: () => void;
}
export async function replay(
apiKey: string,
host: string,
maxContextSize: string,
maxTokens: string,
modelName: string,
msgs: Message[],
onText?: (option: OnTextCallbackResult) => void,
onError?: (error: Error) => void,
) {
if (msgs.length === 0) {
throw new Error('No messages to replay')
}
const head = msgs[0].role === 'system' ? msgs[0] : undefined
if (head) {
msgs = msgs.slice(1)
}
const maxTokensNumber = Number(maxTokens)
const maxLen = Number(maxContextSize)
let totalLen = head ? wordCount.estimateTokens(head.content) : 0
let prompts: Message[] = []
for (let i = msgs.length - 1; i >= 0; i--) {
const msg = msgs[i]
const msgTokenSize: number = wordCount.estimateTokens(msg.content) + 200 // 200 作为预估的误差补偿
if (msgTokenSize + totalLen > maxLen) {
break
}
prompts = [msg, ...prompts]
totalLen += msgTokenSize
}
if (head) {
prompts = [head, ...prompts]
}
// fetch has been canceled
let hasCancel = false;
// abort signal for fetch
const controller = new AbortController();
const cancel = () => {
hasCancel = true;
controller.abort();
};
let fullText = '';
try {
const messages = prompts.map(msg => ({ role: msg.role, content: msg.content }))
const response = await fetch(`${host}/v1/chat/completions`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
messages,
model: modelName,
max_tokens: maxTokensNumber,
stream: true
}),
signal: controller.signal,
});
await handleSSE(response, (message) => {
if (message === '[DONE]') {
return;
}
const data = JSON.parse(message)
if (data.error) {
throw new Error(`Error from OpenAI: ${JSON.stringify(data)}`)
}
const text = data.choices[0]?.delta?.content
if (text !== undefined) {
fullText += text
if (onText) {
onText({ text: fullText, cancel })
}
}
})
} catch (error) {
// if a cancellation is performed
// do not throw an exception
// otherwise the content will be overwritten.
if (hasCancel) {
return;
}
if (onError) {
onError(error as any)
}
throw error
}
return fullText
}
export async function handleSSE(response: Response, onMessage: (message: string) => void) {
if (!response.ok) {
const error = await response.json().catch(() => null)
throw new Error(error ? JSON.stringify(error) : `${response.status} ${response.statusText}`)
}
if (response.status !== 200) {
throw new Error(`Error from OpenAI: ${response.status} ${response.statusText}`)
}
if (!response.body) {
throw new Error('No response body')
}
const parser = createParser((event) => {
if (event.type === 'event') {
onMessage(event.data)
}
})
for await (const chunk of iterableStreamAsync(response.body)) {
const str = new TextDecoder().decode(chunk)
parser.feed(str)
}
}
export async function* iterableStreamAsync(stream: ReadableStream): AsyncIterableIterator<Uint8Array> {
const reader = stream.getReader();
try {
while (true) {
const { value, done } = await reader.read()
if (done) {
return
} else {
yield value
}
}
} finally {
reader.releaseLock()
}
}