|
| 1 | +use std::error::Error; |
| 2 | + |
| 3 | +use async_openai::{ |
| 4 | + config::OpenAIConfig, |
| 5 | + types::{ |
| 6 | + ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestSystemMessageArgs, |
| 7 | + ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, |
| 8 | + }, |
| 9 | + Client, |
| 10 | +}; |
| 11 | + |
| 12 | +#[tokio::main] |
| 13 | +async fn main() -> Result<(), Box<dyn Error>> { |
| 14 | + // This is the default host:port for Ollama's OpenAI endpoint. |
| 15 | + // Should match the config in docker-compose.yml. |
| 16 | + let api_base = "http://localhost:11434/v1"; |
| 17 | + // Required but ignored |
| 18 | + let api_key = "ollama"; |
| 19 | + |
| 20 | + let client = Client::with_config( |
| 21 | + OpenAIConfig::new() |
| 22 | + .with_api_key(api_key) |
| 23 | + .with_api_base(api_base), |
| 24 | + ); |
| 25 | + |
| 26 | + // This should match whatever model is downloaded in Ollama docker container. |
| 27 | + let model = "llama3.2:1b"; |
| 28 | + |
| 29 | + let request = CreateChatCompletionRequestArgs::default() |
| 30 | + .max_tokens(512u32) |
| 31 | + .model(model) |
| 32 | + .messages([ |
| 33 | + ChatCompletionRequestSystemMessageArgs::default() |
| 34 | + .content("You are a helpful assistant.") |
| 35 | + .build()? |
| 36 | + .into(), |
| 37 | + ChatCompletionRequestUserMessageArgs::default() |
| 38 | + .content("Who won the world series in 2020?") |
| 39 | + .build()? |
| 40 | + .into(), |
| 41 | + ChatCompletionRequestAssistantMessageArgs::default() |
| 42 | + .content("The Los Angeles Dodgers won the World Series in 2020.") |
| 43 | + .build()? |
| 44 | + .into(), |
| 45 | + ChatCompletionRequestUserMessageArgs::default() |
| 46 | + .content("Where was it played?") |
| 47 | + .build()? |
| 48 | + .into(), |
| 49 | + ]) |
| 50 | + .build()?; |
| 51 | + |
| 52 | + println!("{}", serde_json::to_string(&request).unwrap()); |
| 53 | + |
| 54 | + let response = client.chat().create(request).await?; |
| 55 | + |
| 56 | + println!("\nResponse:\n"); |
| 57 | + for choice in response.choices { |
| 58 | + println!( |
| 59 | + "{}: Role: {} Content: {:?}", |
| 60 | + choice.index, choice.message.role, choice.message.content |
| 61 | + ); |
| 62 | + } |
| 63 | + |
| 64 | + Ok(()) |
| 65 | +} |
0 commit comments