Skip to content

Commit c64ed0a

Browse files
committed
Added ability to pause and resume
1 parent 35173cd commit c64ed0a

File tree

6 files changed

+178
-99
lines changed

6 files changed

+178
-99
lines changed

AutoGPT/utils/chat.ts

Lines changed: 64 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -21,84 +21,82 @@ export async function chatWithAI({
2121
tokenLimit,
2222
debug = false,
2323
}: ChatWithAiArgs): Promise<string> {
24-
while (true) {
25-
try {
26-
const model = Config.fast_llm_model;
27-
const sendTokenLimit = tokenLimit - 1000;
24+
try {
25+
const model = Config.fast_llm_model;
26+
const sendTokenLimit = tokenLimit - 1000;
2827

29-
const currentContext: LLMMessage[] = [
30-
{ role: "system", content: prompt },
31-
{ role: "system", content: `Permanent memory: ${permanentMemory}` },
32-
];
28+
const currentContext: LLMMessage[] = [
29+
{ role: "system", content: prompt },
30+
{ role: "system", content: `Permanent memory: ${permanentMemory}` },
31+
];
3332

34-
let nextMessageToAddIndex = fullMessageHistory.length - 1;
35-
let currentTokensUsed = 0;
36-
const insertionIndex = currentContext.length;
33+
let nextMessageToAddIndex = fullMessageHistory.length - 1;
34+
let currentTokensUsed = 0;
35+
const insertionIndex = currentContext.length;
3736

38-
currentTokensUsed = countMessageTokens(currentContext, model);
39-
currentTokensUsed += countMessageTokens(
40-
[{ role: "user", content: userInput }],
41-
model
42-
);
43-
44-
while (nextMessageToAddIndex >= 0) {
45-
const messageToAdd = fullMessageHistory[nextMessageToAddIndex];
46-
const tokensToAdd = countMessageTokens([messageToAdd], model);
37+
currentTokensUsed = countMessageTokens(currentContext, model);
38+
currentTokensUsed += countMessageTokens(
39+
[{ role: "user", content: userInput }],
40+
model
41+
);
4742

48-
if (currentTokensUsed + tokensToAdd > sendTokenLimit) {
49-
break;
50-
}
43+
while (nextMessageToAddIndex >= 0) {
44+
const messageToAdd = fullMessageHistory[nextMessageToAddIndex];
45+
const tokensToAdd = countMessageTokens([messageToAdd], model);
5146

52-
currentContext.splice(
53-
insertionIndex,
54-
0,
55-
fullMessageHistory[nextMessageToAddIndex]
56-
);
57-
currentTokensUsed += tokensToAdd;
58-
nextMessageToAddIndex -= 1;
47+
if (currentTokensUsed + tokensToAdd > sendTokenLimit) {
48+
break;
5949
}
6050

61-
currentContext.push({ role: "user", content: userInput });
62-
const tokensRemaining = tokenLimit - currentTokensUsed;
51+
currentContext.splice(
52+
insertionIndex,
53+
0,
54+
fullMessageHistory[nextMessageToAddIndex]
55+
);
56+
currentTokensUsed += tokensToAdd;
57+
nextMessageToAddIndex -= 1;
58+
}
6359

64-
if (debug) {
65-
console.log(`Token limit: ${tokenLimit}`);
66-
console.log(`Send Token Count: ${currentTokensUsed}`);
67-
console.log(`Tokens remaining for response: ${tokensRemaining}`);
68-
console.log("------------ CONTEXT SENT TO AI ---------------");
69-
for (const message of currentContext) {
70-
if (message.role === "system" && message.content === prompt) {
71-
continue;
72-
}
73-
console.log(
74-
`${message.role.charAt(0).toUpperCase() + message.role.slice(1)}: ${
75-
message.content
76-
}`
77-
);
78-
console.log();
60+
currentContext.push({ role: "user", content: userInput });
61+
const tokensRemaining = tokenLimit - currentTokensUsed;
62+
63+
if (debug) {
64+
console.log(`Token limit: ${tokenLimit}`);
65+
console.log(`Send Token Count: ${currentTokensUsed}`);
66+
console.log(`Tokens remaining for response: ${tokensRemaining}`);
67+
console.log("------------ CONTEXT SENT TO AI ---------------");
68+
for (const message of currentContext) {
69+
if (message.role === "system" && message.content === prompt) {
70+
continue;
7971
}
80-
console.log("----------- END OF CONTEXT ----------------");
72+
console.log(
73+
`${message.role.charAt(0).toUpperCase() + message.role.slice(1)}: ${
74+
message.content
75+
}`
76+
);
77+
console.log();
8178
}
79+
console.log("----------- END OF CONTEXT ----------------");
80+
}
8281

83-
const assistantReply = await callLLMChatCompletion(
84-
currentContext,
85-
model,
86-
undefined /* temperature */,
87-
tokensRemaining
88-
);
82+
const assistantReply = await callLLMChatCompletion(
83+
currentContext,
84+
model,
85+
undefined /* temperature */,
86+
tokensRemaining
87+
);
8988

90-
appendToFullMessageHistory([
91-
{ role: "user", content: userInput },
92-
{
93-
role: "assistant",
94-
content: assistantReply,
95-
},
96-
]);
89+
appendToFullMessageHistory([
90+
{ role: "user", content: userInput },
91+
{
92+
role: "assistant",
93+
content: assistantReply,
94+
},
95+
]);
9796

98-
return assistantReply;
99-
} catch (error) {
100-
console.error("Error calling chat", error);
101-
throw error;
102-
}
97+
return assistantReply;
98+
} catch (error) {
99+
console.error("Error calling chat", error);
100+
throw error;
103101
}
104102
}

AutoGPT/utils/jsonParsingAssist.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ export async function fixAndParseJson(
2525
`;
2626

2727
try {
28-
return JSON.parse(jsonStr);
28+
return JSON.parse(jsonStr.replaceAll('\n', ''));
2929
} catch (e) {
3030
try {
3131
const braceIndex = jsonStr.indexOf("{");

app/components/AIStateProvider.tsx

Lines changed: 40 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -9,39 +9,55 @@ interface AIState {
99
aiInfo: AIInfoState;
1010
}
1111

12-
const DEFAULT_AI_STATE = Object.freeze<AIState>({
13-
setup: {
14-
stage: "not_init",
15-
},
16-
aiInfo: {
17-
name: "Entrepreneur-GPT",
18-
description:
19-
"an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.",
20-
goals: [
21-
"Increase net worth",
22-
"Grow Twitter Account",
23-
"Develop and manage multiple businesses autonomously",
24-
],
25-
},
26-
});
12+
const AI_INFO_STATE_KEY = "AI_INFO_STATE";
13+
function getDefaultAIState(): AIState {
14+
const DEFAULT_AI_STATE = Object.freeze<AIState>({
15+
setup: {
16+
stage: "not_init",
17+
},
18+
aiInfo: {
19+
name: "Entrepreneur-GPT",
20+
description:
21+
"an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.",
22+
goals: [
23+
"Increase net worth",
24+
"Grow Twitter Account",
25+
"Develop and manage multiple businesses autonomously",
26+
],
27+
},
28+
});
29+
30+
if (typeof window === "undefined") {
31+
// In case of server, where `window` doesn't exist, we should just return the default state.
32+
return DEFAULT_AI_STATE;
33+
}
34+
35+
const savedAIInfoStateStr = window.localStorage.getItem(AI_INFO_STATE_KEY);
36+
if (savedAIInfoStateStr) {
37+
const savedAIInfoState = JSON.parse(savedAIInfoStateStr) as AIInfoState;
38+
return {...DEFAULT_AI_STATE, aiInfo: savedAIInfoState };
39+
} else {
40+
return DEFAULT_AI_STATE;
41+
}
42+
}
2743

2844
interface AIStateDispatchers {
2945
setupDispatcher: Dispatch<SetupReducerAction>;
3046
aiInfoDispatcher: Dispatch<AIInfoReducerAction>;
3147
}
3248

33-
const AIStateContext = createContext<AIState>({ ...DEFAULT_AI_STATE });
49+
const AIStateContext = createContext<AIState>({ ...getDefaultAIState()});
3450
const AIStateDispatcherContext = createContext<AIStateDispatchers>({
3551
setupDispatcher: () => {},
3652
aiInfoDispatcher: () => {},
3753
});
3854

3955
export function AIStateProvider({ children }: PropsWithChildren<{}>) {
4056
const [setupState, setupDispatcher] = useReducer(setupReducer, {
41-
...DEFAULT_AI_STATE.setup,
57+
...getDefaultAIState().setup,
4258
});
4359
const [aiInfoState, aiInfoDispatcher] = useReducer(aiInfoReducer, {
44-
...DEFAULT_AI_STATE.aiInfo,
60+
...getDefaultAIState().aiInfo,
4561
});
4662

4763
useEffect(() => {
@@ -131,7 +147,12 @@ function aiInfoReducer(
131147
} else if (action.type === "set_description") {
132148
return { ...state, description: action.description };
133149
} else if (action.type === "set_goals") {
134-
return { ...state, goals: action.goals };
150+
// As setting goals is generally the last stage of AIInfoState
151+
// so we'll store it in local storage
152+
const newState = { ...state, goals: action.goals };
153+
window.localStorage.setItem(AI_INFO_STATE_KEY, JSON.stringify(newState));
154+
155+
return newState;
135156
} else {
136157
return assertNever(action);
137158
}

app/components/AutoGPTChatLoop.tsx

Lines changed: 39 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@ import { chatWithAI } from "AutoGPT/utils/chat";
66
import { generatePrompt } from "AutoGPT/utils/prompt";
77
import { permanentMemory } from "AutoGPT/commandPlugins/MemoryCommandPlugins";
88
import { executeCommand, getCommand } from "AutoGPT/commandPlugins/index";
9+
import { PauseButton, ResumeButton } from "./Buttons";
10+
import { useCallback } from "react";
911

1012
const USER_INPUT =
1113
"Determine which next command to use, and respond using the format specified above:";
@@ -16,27 +18,44 @@ export function AutoGPTChatLoop() {
1618
} = useAIState();
1719

1820
const fullMessageHistory = useRef<LLMMessage[]>([]);
21+
const userInput = useRef<string>(USER_INPUT);
22+
const isChatInProgress = useRef<boolean>(false);
23+
const prevMessageIndexRan = useRef<number>(-1); // Should be different from currMessageIndex
24+
1925
const [currMessageIndex, setCurrMessageIndex] = useState<number>(0);
26+
const [activities, setActivities] = useState<Activity[]>([]);
27+
const [isPaused, setIsPaused] = useState<boolean>(false);
2028

21-
const activities = fullMessageHistory.current.map((message, index) =>
22-
convertMessageToActivity(index, message)
23-
);
29+
const togglePause = useCallback(() => {
30+
// Toggle paused
31+
setIsPaused(!isPaused);
2432

25-
const userInput = useRef<string>(USER_INPUT);
26-
const isChatInProgress = useRef<boolean>(false);
33+
if (!isPaused) {
34+
// If we need to run again, increase message index so chat gets triggered again
35+
setCurrMessageIndex(currMessageIndex + 1);
36+
}
37+
}, [isPaused, currMessageIndex]);
2738

2839
useEffect(() => {
29-
if (isChatInProgress.current) {
30-
// Already chat in progress
40+
if (isChatInProgress.current || prevMessageIndexRan.current === currMessageIndex || isPaused) {
41+
// Already chat in progress or some other state changed, where we shouldn't run
3142
return;
3243
}
44+
const appendToFullMessageHistory = (messages: LLMMessage[]) => {
45+
fullMessageHistory.current.push(...messages);
46+
setActivities(
47+
fullMessageHistory.current.map((msg, index) =>
48+
convertMessageToActivity(index, msg)
49+
)
50+
);
51+
};
3352

3453
isChatInProgress.current = true;
54+
prevMessageIndexRan.current = currMessageIndex;
3555
chatWithAI({
3656
prompt: generatePrompt(name, description, goals),
3757
fullMessageHistory: fullMessageHistory.current,
38-
appendToFullMessageHistory: (messages) =>
39-
fullMessageHistory.current.push(...messages),
58+
appendToFullMessageHistory,
4059
permanentMemory,
4160
tokenLimit: 4000,
4261
userInput: userInput.current,
@@ -63,15 +82,23 @@ export function AutoGPTChatLoop() {
6382
} else {
6483
result = `Command ${commandName} threw the following error: ${args}`;
6584
}
66-
fullMessageHistory.current.push({ role: "system", content: result });
85+
appendToFullMessageHistory([{ role: "system", content: result }]);
6786
setCurrMessageIndex(currMessageIndex + 1);
6887
})
6988
.finally(() => {
7089
isChatInProgress.current = false;
7190
});
72-
}, [currMessageIndex]);
91+
}, [currMessageIndex, isPaused]);
7392

74-
return <ActivityFeed activities={activities} />;
93+
return (
94+
<>
95+
<ActivityFeed activities={activities} />
96+
<div className="w-full flex justify-center align-middle mt-8">
97+
{!isPaused && <PauseButton onClick={togglePause} />}
98+
{isPaused && <ResumeButton onClick={togglePause} />}
99+
</div>
100+
</>
101+
);
75102
}
76103

77104
function convertMessageToActivity(

app/components/Buttons.tsx

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import { PauseCircleIcon, PlayCircleIcon } from "@heroicons/react/20/solid";
2+
3+
export interface PauseButtonProps {
4+
onClick: () => void;
5+
}
6+
export function PauseButton({ onClick }: PauseButtonProps) {
7+
return (
8+
<button
9+
type="button"
10+
className="inline-flex items-center gap-x-2 rounded-md bg-amber-600 px-3.5 py-2.5 text-sm font-semibold text-white shadow-sm hover:bg-amber-500 focus-visible:outline focus-visible:outline-2 focus-visible:outline-offset-2 focus-visible:outline-amber-600"
11+
onClick={onClick}
12+
>
13+
<PauseCircleIcon className="-ml-0.5 h-5 w-5" aria-hidden="true" />
14+
Pause chat
15+
</button>
16+
);
17+
}
18+
19+
export interface ResumeButtonProps {
20+
onClick: () => void;
21+
}
22+
export function ResumeButton({ onClick }: ResumeButtonProps) {
23+
return (
24+
<button
25+
type="button"
26+
className="inline-flex items-center gap-x-2 rounded-md bg-green-600 px-3.5 py-2.5 text-sm font-semibold text-white shadow-sm hover:bg-green-500 focus-visible:outline focus-visible:outline-2 focus-visible:outline-offset-2 focus-visible:outline-green-600"
27+
onClick={onClick}
28+
>
29+
<PlayCircleIcon className="-ml-0.5 h-5 w-5" aria-hidden="true" />
30+
Resume chat
31+
</button>
32+
);
33+
}

tsconfig.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"exclude": ["./cypress", "./cypress.config.ts"],
33
"include": ["remix.env.d.ts", "**/*.ts", "**/*.tsx"],
44
"compilerOptions": {
5-
"lib": ["DOM", "DOM.Iterable", "ES2019"],
5+
"lib": ["DOM", "DOM.Iterable", "ES2019", "ES2021.String"],
66
"types": ["vitest/globals", "@types/wicg-file-system-access"],
77
"isolatedModules": true,
88
"esModuleInterop": true,

0 commit comments

Comments
 (0)