11// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
22import { ApiReturnSchema , ApiSchema , OpenAIAPIError } from '@/types/apiTypes' ;
33import type { NextApiRequest , NextApiResponse } from 'next'
4- import { Configuration , OpenAIApi } from 'openai' ;
4+ import { ChatCompletionRequestMessage , Configuration , OpenAIApi } from 'openai' ;
55import { z } from 'zod' ;
66
77const configuration = new Configuration ( {
88 apiKey : process . env . OPENAI_API_KEY ,
99} ) ;
1010
1111
12- const PROMPT_CONTEXT = `Make a math bot for school.
13- This bot helps with schoolwork by taking in a mathematical problem and solving it, outputting the intermediate steps as well. Mathematical symbols in the input and outputs, as well as the steps, are all done in LaTeX.
14- Q: Solve the following: $$\\int x^{2}dx$$
15- A: $$\\int x^{2}dx = \\frac{1}{3} x^3 + C$$
16- Q: Solve the following: $$\\int_{1}^{3}x^{2}dx$$
17- A: $$\\int_{1}^{2}x^{2} = \\left[ \\frac{1}{2} x^3 \\right]_{1}^{2} = \\frac{2^3}{3} - \\frac{1^3}{3} = \\frac{7}{3}$$`
18-
19-
12+ const MESSAGES_CONTEXT : ChatCompletionRequestMessage [ ] = [
13+ { role : "system" , content : "You are a math bot built for high school and undergraduate calculus courses. You helpe with schoolwork by taking in a mathematical problem and solving it, outputting the intermediate steps as well. Mathematical symbols in the input and outputs, as well as the steps, are all done in LaTeX." } ,
14+ { role : "user" , content : "Solve the following: $$\\int x^{2}dx$$" } ,
15+ { role : "assistant" , content : "$$\\int x^{2}dx = \\frac{1}{3} x^3 + C$$" } ,
16+ { role : "user" , content : "Solve the following: $$\\int_{1}^{3}x^{2}dx$$" } ,
17+ { role : "assistant" , content : "$$\\int_{1}^{2}x^{2} = \\left[ \\frac{1}{2} x^3 \\right]_{1}^{2} = \\frac{2^3}{3} - \\frac{1^3}{3} = \\frac{7}{3}$$" } ,
18+ ]
2019export default async function handler (
2120 req : NextApiRequest ,
2221 res : NextApiResponse < ApiReturnSchema >
@@ -37,27 +36,22 @@ export default async function handler(
3736 res . status ( 400 ) . send ( { tag : 'error' , error : `Bad request: ${ parsed . error . toString ( ) } ` } )
3837 return
3938 }
40- const total_prompt = PROMPT_CONTEXT + `\nQ: ${ parsed . data . prompt } ` ;
41- console . log ( 'Handling chatGPT: total prompt and context:' , total_prompt )
39+ const total_prompt : ChatCompletionRequestMessage [ ] = [ ... MESSAGES_CONTEXT , { "role" : "user" , "content" : parsed . data . prompt } ] ;
40+ console . log ( 'Handling chatGPT: total prompt and context:' , total_prompt ) ;
4241
43- const completion = await openai . createCompletion ( {
44- model : 'text-davinci-003' ,
45- // append prompt to the end of the prompt context
46- prompt : total_prompt ,
47- temperature : 0.3 ,
48- max_tokens : 500
42+ const completion = await openai . createChatCompletion ( {
43+ model : 'gpt-3.5-turbo' ,
44+ messages : total_prompt ,
45+ temperature : 0.5 ,
4946 } )
5047 console . log ( 'Completion:' , completion . data )
51- const answer = completion . data . choices [ 0 ] . text ;
48+ const answer = completion . data . choices [ 0 ] . message ?. content ;
49+
5250 if ( ! answer ) {
5351 return res . send ( { tag : 'error' , error : 'No answer found' } )
5452 }
5553 console . log ( `Got answer '${ answer } '` )
56- if ( answer . trim ( ) . startsWith ( 'A:' ) ) {
57- return res . send ( { tag : 'success' , promptReturn : answer . trim ( ) . slice ( 2 ) . trim ( ) } )
58- } else {
59- return res . send ( { tag : 'success' , promptReturn : answer . trim ( ) } )
60- }
54+ return res . send ( { tag : 'success' , promptReturn : answer . trim ( ) } )
6155
6256 } catch ( e ) {
6357 const parsedError = OpenAIAPIError . safeParse ( e ) ;
0 commit comments