diff --git a/async-openai-macros/Cargo.toml b/async-openai-macros/Cargo.toml index 87efe25e..2080967c 100644 --- a/async-openai-macros/Cargo.toml +++ b/async-openai-macros/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors = ["Himanshu Neema"] keywords = ["openai", "macros", "ai"] description = "Macros for async-openai" -edition = "2021" +edition = "2024" license = "MIT" homepage = "https://github.com/64bit/async-openai" repository = "https://github.com/64bit/async-openai" diff --git a/async-openai-wasm/Cargo.toml b/async-openai-wasm/Cargo.toml index bcdfb591..1f716564 100644 --- a/async-openai-wasm/Cargo.toml +++ b/async-openai-wasm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "async-openai-wasm" -version = "0.28.2" +version = "0.29.3" authors = [ "Contributors of the async-openai crate", "ifsheldon " diff --git a/async-openai-wasm/README.md b/async-openai-wasm/README.md index 2fe8e455..9013651f 100644 --- a/async-openai-wasm/README.md +++ b/async-openai-wasm/README.md @@ -131,6 +131,22 @@ async fn main() -> Result<(), Box> { Scaled up for README, actual size 256x256 +## Dynamic Dispatch for Different Providers + +For any struct that implements `Config` trait, you can wrap it in a smart pointer and cast the pointer to `dyn Config` +trait object, then your client can accept any wrapped configuration type. + +For example, + +```rust +use async_openai::{Client, config::Config, config::OpenAIConfig}; + +let openai_config = OpenAIConfig::default(); +// You can use `std::sync::Arc` to wrap the config as well +let config = Box::new(openai_config) as Box; +let client: Client > = Client::with_config(config); +``` + ## Contributing This repo will only accept issues and PRs related to WASM support. For other issues and PRs, please visit the original diff --git a/async-openai-wasm/src/client.rs b/async-openai-wasm/src/client.rs index c92be13c..570e67f2 100644 --- a/async-openai-wasm/src/client.rs +++ b/async-openai-wasm/src/client.rs @@ -68,97 +68,97 @@ impl Client { // API groups /// To call [Models] group related APIs using this client. - pub fn models(&self) -> Models { + pub fn models(&self) -> Models<'_, C> { Models::new(self) } /// To call [Completions] group related APIs using this client. - pub fn completions(&self) -> Completions { + pub fn completions(&self) -> Completions<'_, C> { Completions::new(self) } /// To call [Chat] group related APIs using this client. - pub fn chat(&self) -> Chat { + pub fn chat(&self) -> Chat<'_, C> { Chat::new(self) } /// To call [Images] group related APIs using this client. - pub fn images(&self) -> Images { + pub fn images(&self) -> Images<'_, C> { Images::new(self) } /// To call [Moderations] group related APIs using this client. - pub fn moderations(&self) -> Moderations { + pub fn moderations(&self) -> Moderations<'_, C> { Moderations::new(self) } /// To call [Files] group related APIs using this client. - pub fn files(&self) -> Files { + pub fn files(&self) -> Files<'_, C> { Files::new(self) } /// To call [Uploads] group related APIs using this client. - pub fn uploads(&self) -> Uploads { + pub fn uploads(&self) -> Uploads<'_, C> { Uploads::new(self) } /// To call [FineTuning] group related APIs using this client. - pub fn fine_tuning(&self) -> FineTuning { + pub fn fine_tuning(&self) -> FineTuning<'_, C> { FineTuning::new(self) } /// To call [Embeddings] group related APIs using this client. - pub fn embeddings(&self) -> Embeddings { + pub fn embeddings(&self) -> Embeddings<'_, C> { Embeddings::new(self) } /// To call [Audio] group related APIs using this client. - pub fn audio(&self) -> Audio { + pub fn audio(&self) -> Audio<'_, C> { Audio::new(self) } /// To call [Assistants] group related APIs using this client. - pub fn assistants(&self) -> Assistants { + pub fn assistants(&self) -> Assistants<'_, C> { Assistants::new(self) } /// To call [Threads] group related APIs using this client. - pub fn threads(&self) -> Threads { + pub fn threads(&self) -> Threads<'_, C> { Threads::new(self) } /// To call [VectorStores] group related APIs using this client. - pub fn vector_stores(&self) -> VectorStores { + pub fn vector_stores(&self) -> VectorStores<'_, C> { VectorStores::new(self) } /// To call [Batches] group related APIs using this client. - pub fn batches(&self) -> Batches { + pub fn batches(&self) -> Batches<'_, C> { Batches::new(self) } /// To call [AuditLogs] group related APIs using this client. - pub fn audit_logs(&self) -> AuditLogs { + pub fn audit_logs(&self) -> AuditLogs<'_, C> { AuditLogs::new(self) } /// To call [Invites] group related APIs using this client. - pub fn invites(&self) -> Invites { + pub fn invites(&self) -> Invites<'_, C> { Invites::new(self) } /// To call [Users] group related APIs using this client. - pub fn users(&self) -> Users { + pub fn users(&self) -> Users<'_, C> { Users::new(self) } /// To call [Projects] group related APIs using this client. - pub fn projects(&self) -> Projects { + pub fn projects(&self) -> Projects<'_, C> { Projects::new(self) } /// To call [Responses] group related APIs using this client. - pub fn responses(&self) -> Responses { + pub fn responses(&self) -> Responses<'_, C> { Responses::new(self) } @@ -519,7 +519,7 @@ where *this.done = true; Poll::Ready(Some(Err(map_deserialization_error( e, - &message.data.as_bytes(), + message.data.as_bytes(), )))) } Ok(output) => Poll::Ready(Some(Ok(output))), diff --git a/async-openai-wasm/src/config.rs b/async-openai-wasm/src/config.rs index d5e10e34..22133d6f 100644 --- a/async-openai-wasm/src/config.rs +++ b/async-openai-wasm/src/config.rs @@ -15,7 +15,7 @@ pub const OPENAI_BETA_HEADER: &str = "OpenAI-Beta"; /// [crate::Client] relies on this for every API call on OpenAI /// or Azure OpenAI service -pub trait Config: Clone { +pub trait Config: Send + Sync { fn headers(&self) -> HeaderMap; fn url(&self, path: &str) -> String; fn query(&self) -> Vec<(&str, &str)>; @@ -25,6 +25,32 @@ pub trait Config: Clone { fn api_key(&self) -> &SecretString; } +/// Macro to implement Config trait for pointer types with dyn objects +macro_rules! impl_config_for_ptr { + ($t:ty) => { + impl Config for $t { + fn headers(&self) -> HeaderMap { + self.as_ref().headers() + } + fn url(&self, path: &str) -> String { + self.as_ref().url(path) + } + fn query(&self) -> Vec<(&str, &str)> { + self.as_ref().query() + } + fn api_base(&self) -> &str { + self.as_ref().api_base() + } + fn api_key(&self) -> &SecretString { + self.as_ref().api_key() + } + } + }; +} + +impl_config_for_ptr!(Box); +impl_config_for_ptr!(std::sync::Arc); + /// Configuration for OpenAI API #[derive(Clone, Debug, Deserialize)] #[serde(default)] @@ -211,3 +237,55 @@ impl Config for AzureConfig { vec![("api-version", &self.api_version)] } } + +#[cfg(test)] +mod test { + use super::*; + use crate::Client; + use crate::types::{ + ChatCompletionRequestMessage, ChatCompletionRequestUserMessage, CreateChatCompletionRequest, + }; + use std::sync::Arc; + #[test] + fn test_client_creation() { + unsafe { std::env::set_var("OPENAI_API_KEY", "test") } + let openai_config = OpenAIConfig::default(); + let config = Box::new(openai_config.clone()) as Box; + let client = Client::with_config(config); + assert!(client.config().url("").ends_with("/v1")); + + let config = Arc::new(openai_config) as Arc; + let client = Client::with_config(config); + assert!(client.config().url("").ends_with("/v1")); + let cloned_client = client.clone(); + assert!(cloned_client.config().url("").ends_with("/v1")); + } + + async fn dynamic_dispatch_compiles(client: &Client>) { + let _ = client.chat().create(CreateChatCompletionRequest { + model: "gpt-4o".to_string(), + messages: vec![ChatCompletionRequestMessage::User( + ChatCompletionRequestUserMessage { + content: "Hello, world!".into(), + ..Default::default() + }, + )], + ..Default::default() + }); + } + + #[tokio::test] + async fn test_dynamic_dispatch() { + let openai_config = OpenAIConfig::default(); + let azure_config = AzureConfig::default(); + + let azure_client = Client::with_config(Box::new(azure_config.clone()) as Box); + let oai_client = Client::with_config(Box::new(openai_config.clone()) as Box); + + let _ = dynamic_dispatch_compiles(&azure_client).await; + let _ = dynamic_dispatch_compiles(&oai_client).await; + + let _ = tokio::spawn(async move { dynamic_dispatch_compiles(&azure_client).await }); + let _ = tokio::spawn(async move { dynamic_dispatch_compiles(&oai_client).await }); + } +} diff --git a/async-openai-wasm/src/error.rs b/async-openai-wasm/src/error.rs index eea51c10..a1139c9f 100644 --- a/async-openai-wasm/src/error.rs +++ b/async-openai-wasm/src/error.rs @@ -1,5 +1,5 @@ //! Errors originating from API calls, parsing responses, and reading-or-writing to the file system. -use serde::Deserialize; +use serde::{Deserialize, Serialize}; #[derive(Debug, thiserror::Error)] pub enum OpenAIError { @@ -28,7 +28,7 @@ pub enum OpenAIError { } /// OpenAI API returns error object on failure -#[derive(Debug, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone)] pub struct ApiError { pub message: String, pub r#type: Option, @@ -62,9 +62,9 @@ impl std::fmt::Display for ApiError { } /// Wrapper to deserialize the error object nested in "error" JSON key -#[derive(Debug, Deserialize)] -pub(crate) struct WrappedError { - pub(crate) error: ApiError, +#[derive(Debug, Deserialize, Serialize)] +pub struct WrappedError { + pub error: ApiError, } pub(crate) fn map_deserialization_error(e: serde_json::Error, bytes: &[u8]) -> OpenAIError { diff --git a/async-openai-wasm/src/lib.rs b/async-openai-wasm/src/lib.rs index 014faf1a..eeecffe8 100644 --- a/async-openai-wasm/src/lib.rs +++ b/async-openai-wasm/src/lib.rs @@ -94,6 +94,22 @@ //! # }); //!``` //! +//! ## Dynamic Dispatch for Different Providers +//! +//! For any struct that implements `Config` trait, you can wrap it in a smart pointer and cast the pointer to `dyn Config` +//! trait object, then your client can accept any wrapped configuration type. +//! +//! For example, +//! ``` +//! use async_openai::{Client, config::Config, config::OpenAIConfig}; +//! unsafe { std::env::set_var("OPENAI_API_KEY", "only for doc test") } +//! +//! let openai_config = OpenAIConfig::default(); +//! // You can use `std::sync::Arc` to wrap the config as well +//! let config = Box::new(openai_config) as Box; +//! let client: Client > = Client::with_config(config); +//! ``` +//! //! ## Microsoft Azure //! //! ``` diff --git a/async-openai-wasm/src/projects.rs b/async-openai-wasm/src/projects.rs index d9c731a6..714ce88a 100644 --- a/async-openai-wasm/src/projects.rs +++ b/async-openai-wasm/src/projects.rs @@ -20,17 +20,17 @@ impl<'c, C: Config> Projects<'c, C> { } // call [ProjectUsers] group APIs - pub fn users(&self, project_id: &str) -> ProjectUsers { + pub fn users(&self, project_id: &str) -> ProjectUsers<'_, C> { ProjectUsers::new(self.client, project_id) } // call [ProjectServiceAccounts] group APIs - pub fn service_accounts(&self, project_id: &str) -> ProjectServiceAccounts { + pub fn service_accounts(&self, project_id: &str) -> ProjectServiceAccounts<'_, C> { ProjectServiceAccounts::new(self.client, project_id) } // call [ProjectAPIKeys] group APIs - pub fn api_keys(&self, project_id: &str) -> ProjectAPIKeys { + pub fn api_keys(&self, project_id: &str) -> ProjectAPIKeys<'_, C> { ProjectAPIKeys::new(self.client, project_id) } diff --git a/async-openai-wasm/src/responses.rs b/async-openai-wasm/src/responses.rs index 201e1d30..f86e1139 100644 --- a/async-openai-wasm/src/responses.rs +++ b/async-openai-wasm/src/responses.rs @@ -2,12 +2,12 @@ use crate::{ Client, config::Config, error::OpenAIError, - types::responses::{CreateResponse, Response}, + types::responses::{CreateResponse, Response, ResponseStream}, }; /// Given text input or a list of context items, the model will generate a response. /// -/// Related guide: [Responses API](https://platform.openai.com/docs/guides/responses) +/// Related guide: [Responses](https://platform.openai.com/docs/api-reference/responses) pub struct Responses<'c, C: Config> { client: &'c Client, } @@ -26,4 +26,30 @@ impl<'c, C: Config> Responses<'c, C> { pub async fn create(&self, request: CreateResponse) -> Result { self.client.post("/responses", request).await } + + /// Creates a model response for the given input with streaming. + /// + /// Response events will be sent as server-sent events as they become available, + #[crate::byot( + T0 = serde::Serialize, + R = serde::de::DeserializeOwned, + stream = "true", + where_clause = "R: std::marker::Send + 'static" + )] + #[allow(unused_mut)] + pub async fn create_stream( + &self, + mut request: CreateResponse, + ) -> Result { + #[cfg(not(feature = "byot"))] + { + if matches!(request.stream, Some(false)) { + return Err(OpenAIError::InvalidArgument( + "When stream is false, use Responses::create".into(), + )); + } + request.stream = Some(true); + } + Ok(self.client.post_stream("/responses", request).await) + } } diff --git a/async-openai-wasm/src/runs.rs b/async-openai-wasm/src/runs.rs index f9a678e2..2ef5ad60 100644 --- a/async-openai-wasm/src/runs.rs +++ b/async-openai-wasm/src/runs.rs @@ -28,7 +28,7 @@ impl<'c, C: Config> Runs<'c, C> { } /// [Steps] API group - pub fn steps(&self, run_id: &str) -> Steps { + pub fn steps(&self, run_id: &str) -> Steps<'_, C> { Steps::new(self.client, &self.thread_id, run_id) } diff --git a/async-openai-wasm/src/threads.rs b/async-openai-wasm/src/threads.rs index cc2ff690..8517c930 100644 --- a/async-openai-wasm/src/threads.rs +++ b/async-openai-wasm/src/threads.rs @@ -21,12 +21,12 @@ impl<'c, C: Config> Threads<'c, C> { } /// Call [Messages] group API to manage message in [thread_id] thread. - pub fn messages(&self, thread_id: &str) -> Messages { + pub fn messages(&self, thread_id: &str) -> Messages<'_, C> { Messages::new(self.client, thread_id) } /// Call [Runs] group API to manage runs in [thread_id] thread. - pub fn runs(&self, thread_id: &str) -> Runs { + pub fn runs(&self, thread_id: &str) -> Runs<'_, C> { Runs::new(self.client, thread_id) } diff --git a/async-openai-wasm/src/types/assistant_stream.rs b/async-openai-wasm/src/types/assistant_stream.rs index 9cf43ced..5fac3f97 100644 --- a/async-openai-wasm/src/types/assistant_stream.rs +++ b/async-openai-wasm/src/types/assistant_stream.rs @@ -32,7 +32,7 @@ use super::{ pub enum AssistantStreamEvent { /// Occurs when a new [thread](https://platform.openai.com/docs/api-reference/threads/object) is created. #[serde(rename = "thread.created")] - TreadCreated(ThreadObject), + ThreadCreated(ThreadObject), /// Occurs when a new [run](https://platform.openai.com/docs/api-reference/runs/object) is created. #[serde(rename = "thread.run.created")] ThreadRunCreated(RunObject), @@ -115,7 +115,7 @@ impl TryFrom for AssistantStreamEvent { match value.event.as_str() { "thread.created" => serde_json::from_str::(value.data.as_str()) .map_err(|e| map_deserialization_error(e, value.data.as_bytes())) - .map(AssistantStreamEvent::TreadCreated), + .map(AssistantStreamEvent::ThreadCreated), "thread.run.created" => serde_json::from_str::(value.data.as_str()) .map_err(|e| map_deserialization_error(e, value.data.as_bytes())) .map(AssistantStreamEvent::ThreadRunCreated), diff --git a/async-openai-wasm/src/types/audio.rs b/async-openai-wasm/src/types/audio.rs index 09d96458..dd963147 100644 --- a/async-openai-wasm/src/types/audio.rs +++ b/async-openai-wasm/src/types/audio.rs @@ -40,6 +40,7 @@ pub enum Voice { #[default] Alloy, Ash, + Ballad, Coral, Echo, Fable, diff --git a/async-openai-wasm/src/types/chat.rs b/async-openai-wasm/src/types/chat.rs index b734a9ab..9b879e99 100644 --- a/async-openai-wasm/src/types/chat.rs +++ b/async-openai-wasm/src/types/chat.rs @@ -11,9 +11,9 @@ use crate::error::OpenAIError; pub enum Prompt { String(String), StringArray(Vec), - // Minimum value is 0, maximum value is 50256 (inclusive). - IntegerArray(Vec), - ArrayOfIntegerArray(Vec>), + // Minimum value is 0, maximum value is 4_294_967_295 (inclusive). + IntegerArray(Vec), + ArrayOfIntegerArray(Vec>), } #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] @@ -43,7 +43,9 @@ pub enum CompletionFinishReason { pub struct Choice { pub text: String, pub index: u32, + #[serde(skip_serializing_if = "Option::is_none")] pub logprobs: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub finish_reason: Option, } @@ -85,7 +87,7 @@ pub struct FunctionCall { } /// Usage statistics for the completion request. -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] pub struct CompletionUsage { /// Number of tokens in the prompt. pub prompt_tokens: u32, @@ -94,13 +96,15 @@ pub struct CompletionUsage { /// Total number of tokens used in the request (prompt + completion). pub total_tokens: u32, /// Breakdown of tokens used in the prompt. + #[serde(skip_serializing_if = "Option::is_none")] pub prompt_tokens_details: Option, /// Breakdown of tokens used in a completion. + #[serde(skip_serializing_if = "Option::is_none")] pub completion_tokens_details: Option, } /// Breakdown of tokens used in a completion. -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] pub struct PromptTokensDetails { /// Audio input tokens present in the prompt. pub audio_tokens: Option, @@ -109,7 +113,7 @@ pub struct PromptTokensDetails { } /// Breakdown of tokens used in a completion. -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] pub struct CompletionTokensDetails { pub accepted_prediction_tokens: Option, /// Audio input tokens generated by the model. @@ -414,10 +418,13 @@ pub struct ChatCompletionResponseMessageAudio { #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct ChatCompletionResponseMessage { /// The contents of the message. + #[serde(skip_serializing_if = "Option::is_none")] pub content: Option, /// The refusal message generated by the model. + #[serde(skip_serializing_if = "Option::is_none")] pub refusal: Option, /// The tool calls generated by the model, such as function calls. + #[serde(skip_serializing_if = "Option::is_none")] pub tool_calls: Option>, /// The role of the author of this message. @@ -425,10 +432,12 @@ pub struct ChatCompletionResponseMessage { /// Deprecated and replaced by `tool_calls`. /// The name and arguments of a function that should be called, as generated by the model. + #[serde(skip_serializing_if = "Option::is_none")] #[deprecated] pub function_call: Option, /// If the audio output modality is requested, this object contains data about the audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio). + #[serde(skip_serializing_if = "Option::is_none")] pub audio: Option, /// Catching anything else that a provider wants to provide, for example, a `reasoning` field @@ -547,7 +556,7 @@ pub struct ChatCompletionNamedToolChoice { /// `required` means the model must call one or more tools. /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// -/// `none` is the default when no tools are present. `auto` is the default if tools are present.present. +/// `none` is the default when no tools are present. `auto` is the default if tools are present. #[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum ChatCompletionToolChoiceOption { @@ -611,6 +620,9 @@ pub struct WebSearchOptions { pub enum ServiceTier { Auto, Default, + Flex, + Scale, + Priority, } #[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] @@ -618,11 +630,14 @@ pub enum ServiceTier { pub enum ServiceTierResponse { Scale, Default, + Flex, + Priority, } #[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum ReasoningEffort { + Minimal, Low, Medium, High, @@ -852,6 +867,7 @@ pub struct CreateChatCompletionRequest { /// This tool searches the web for relevant results to use in a response. /// Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + #[serde(skip_serializing_if = "Option::is_none")] pub web_search_options: Option, /// Deprecated in favor of `tool_choice`. @@ -933,8 +949,10 @@ pub struct ChatChoice { /// `length` if the maximum number of tokens specified in the request was reached, /// `content_filter` if content was omitted due to a flag from our content filters, /// `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + #[serde(skip_serializing_if = "Option::is_none")] pub finish_reason: Option, /// Log probability information for the choice. + #[serde(skip_serializing_if = "Option::is_none")] pub logprobs: Option, } @@ -950,10 +968,12 @@ pub struct CreateChatCompletionResponse { /// The model used for the chat completion. pub model: String, /// The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + #[serde(skip_serializing_if = "Option::is_none")] pub service_tier: Option, /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + #[serde(skip_serializing_if = "Option::is_none")] pub system_fingerprint: Option, /// The object type, which is always `chat.completion`. @@ -1026,8 +1046,10 @@ pub struct ChatChoiceStream { /// content filters, /// `tool_calls` if the model called a tool, or `function_call` /// (deprecated) if the model called a function. + #[serde(skip_serializing_if = "Option::is_none")] pub finish_reason: Option, /// Log probability information for the choice. + #[serde(skip_serializing_if = "Option::is_none")] pub logprobs: Option, } diff --git a/async-openai-wasm/src/types/image.rs b/async-openai-wasm/src/types/image.rs index 03eabe91..f30b5dd2 100644 --- a/async-openai-wasm/src/types/image.rs +++ b/async-openai-wasm/src/types/image.rs @@ -57,6 +57,10 @@ pub enum ImageQuality { #[default] Standard, HD, + High, + Medium, + Low, + Auto, } #[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] @@ -67,6 +71,14 @@ pub enum ImageStyle { Natural, } +#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum ImageModeration { + #[default] + Auto, + Low, +} + #[derive(Debug, Clone, Serialize, Deserialize, Default, Builder, PartialEq)] #[builder(name = "CreateImageRequestArgs")] #[builder(pattern = "mutable")] @@ -110,6 +122,11 @@ pub struct CreateImageRequest { /// A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/usage-policies/end-user-ids). #[serde(skip_serializing_if = "Option::is_none")] pub user: Option, + + /// Control the content-moderation level for images generated by gpt-image-1. + /// Must be either `low` for less restrictive filtering or `auto` (default value). + #[serde(skip_serializing_if = "Option::is_none")] + pub moderation: Option, } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] diff --git a/async-openai-wasm/src/types/impls.rs b/async-openai-wasm/src/types/impls.rs index 8c733c60..8fade937 100644 --- a/async-openai-wasm/src/types/impls.rs +++ b/async-openai-wasm/src/types/impls.rs @@ -291,7 +291,7 @@ macro_rules! impl_from_for_integer_array { } impl_from_for_integer_array!(u32, EmbeddingInput); -impl_from_for_integer_array!(u16, Prompt); +impl_from_for_integer_array!(u32, Prompt); macro_rules! impl_from_for_array_of_integer_array { ($from_typ:ty, $to_typ:ty) => { @@ -388,7 +388,7 @@ macro_rules! impl_from_for_array_of_integer_array { } impl_from_for_array_of_integer_array!(u32, EmbeddingInput); -impl_from_for_array_of_integer_array!(u16, Prompt); +impl_from_for_array_of_integer_array!(u32, Prompt); impl From<&str> for ChatCompletionFunctionCall { fn from(value: &str) -> Self { diff --git a/async-openai-wasm/src/types/realtime/response_resource.rs b/async-openai-wasm/src/types/realtime/response_resource.rs index 4a500890..a6c6c32f 100644 --- a/async-openai-wasm/src/types/realtime/response_resource.rs +++ b/async-openai-wasm/src/types/realtime/response_resource.rs @@ -40,6 +40,8 @@ pub enum ResponseStatusDetail { Incomplete { reason: IncompleteReason }, #[serde(rename = "failed")] Failed { error: Option }, + #[serde(rename = "cancelled")] + Cancelled { reason: String }, } #[derive(Debug, Serialize, Deserialize, Clone)] diff --git a/async-openai-wasm/src/types/realtime/server_event.rs b/async-openai-wasm/src/types/realtime/server_event.rs index 3ba5f552..8795f6e4 100644 --- a/async-openai-wasm/src/types/realtime/server_event.rs +++ b/async-openai-wasm/src/types/realtime/server_event.rs @@ -83,6 +83,17 @@ pub struct ConversationItemCreatedEvent { pub item: Item, } +#[derive(Debug, Serialize, Deserialize, Clone)] +/// Log probability information for a transcribed token. +pub struct LogProb { + /// Raw UTF-8 bytes for the token. + pub bytes: Vec, + /// The log probability of the token. + pub logprob: f64, + /// The token string. + pub token: String, +} + #[derive(Debug, Serialize, Deserialize, Clone)] pub struct ConversationItemInputAudioTranscriptionCompletedEvent { /// The unique ID of the server event. @@ -93,6 +104,22 @@ pub struct ConversationItemInputAudioTranscriptionCompletedEvent { pub content_index: u32, /// The transcribed text. pub transcript: String, + /// Optional per-token log probability data. + pub logprobs: Option>, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ConversationItemInputAudioTranscriptionDeltaEvent { + /// The unique ID of the server event. + pub event_id: String, + /// The ID of the user message item. + pub item_id: String, + /// The index of the content part containing the audio. + pub content_index: u32, + /// The text delta. + pub delta: String, + /// Optional per-token log probability data. + pub logprobs: Option>, } #[derive(Debug, Serialize, Deserialize, Clone)] @@ -378,6 +405,9 @@ pub enum ServerEvent { ConversationItemInputAudioTranscriptionCompletedEvent, ), + #[serde(rename = "conversation.item.input_audio_transcription.delta")] + ConversationItemInputAudioTranscriptionDelta(ConversationItemInputAudioTranscriptionDeltaEvent), + /// Returned when input audio transcription is configured, and a transcription request for a user message failed. #[serde(rename = "conversation.item.input_audio_transcription.failed")] ConversationItemInputAudioTranscriptionFailed( diff --git a/async-openai-wasm/src/types/realtime/session_resource.rs b/async-openai-wasm/src/types/realtime/session_resource.rs index 10472414..2fe1e5b1 100644 --- a/async-openai-wasm/src/types/realtime/session_resource.rs +++ b/async-openai-wasm/src/types/realtime/session_resource.rs @@ -4,18 +4,25 @@ use serde::{Deserialize, Serialize}; pub enum AudioFormat { #[serde(rename = "pcm16")] PCM16, - #[serde(rename = "g711-ulaw")] + #[serde(rename = "g711_law")] G711ULAW, - #[serde(rename = "g711-alaw")] + #[serde(rename = "g711_alaw")] G711ALAW, } -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Default, Serialize, Deserialize, Clone)] pub struct AudioTranscription { - /// Whether to enable input audio transcription. - pub enabled: bool, - /// The model to use for transcription (e.g., "whisper-1"). - pub model: String, + /// The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. + #[serde(skip_serializing_if = "Option::is_none")] + pub language: Option, + /// The model to use for transcription, current options are gpt-4o-transcribe, gpt-4o-mini-transcribe, and whisper-1. + #[serde(skip_serializing_if = "Option::is_none")] + pub model: Option, + /// An optional text to guide the model's style or continue a previous audio segment. + /// For whisper-1, the prompt is a list of keywords. For gpt-4o-transcribe models, + /// the prompt is a free text string, for example "expect words related to technology". + #[serde(skip_serializing_if = "Option::is_none")] + pub prompt: Option, } #[derive(Debug, Serialize, Deserialize, Clone)] @@ -30,6 +37,32 @@ pub enum TurnDetection { prefix_padding_ms: u32, /// Duration of silence to detect speech stop (in milliseconds). silence_duration_ms: u32, + + /// Whether or not to automatically generate a response when a VAD stop event occurs. + #[serde(skip_serializing_if = "Option::is_none")] + create_response: Option, + + /// Whether or not to automatically interrupt any ongoing response with output to + /// the default conversation (i.e. conversation of auto) when a VAD start event occurs. + #[serde(skip_serializing_if = "Option::is_none")] + interrupt_response: Option, + }, + + #[serde(rename = "semantic_vad")] + SemanticVAD { + /// The eagerness of the model to respond. + /// `low` will wait longer for the user to continue speaking, + /// `high`` will respond more quickly. `auto`` is the default and is equivalent to `medium` + eagerness: String, + + /// Whether or not to automatically generate a response when a VAD stop event occurs. + #[serde(skip_serializing_if = "Option::is_none", default)] + create_response: Option, + + /// Whether or not to automatically interrupt any ongoing response with output to + /// the default conversation (i.e. conversation of auto) when a VAD start event occurs. + #[serde(skip_serializing_if = "Option::is_none", default)] + interrupt_response: Option, }, } @@ -78,8 +111,15 @@ pub enum ToolChoice { #[serde(rename_all = "lowercase")] pub enum RealtimeVoice { Alloy, - Shimmer, + Ash, + Ballad, + Coral, Echo, + Fable, + Onyx, + Nova, + Shimmer, + Verse, } #[derive(Debug, Serialize, Deserialize, Clone, Default)] diff --git a/async-openai-wasm/src/types/responses.rs b/async-openai-wasm/src/types/responses.rs index 726e601f..6aac8c8f 100644 --- a/async-openai-wasm/src/types/responses.rs +++ b/async-openai-wasm/src/types/responses.rs @@ -1,3 +1,4 @@ +use crate::OpenAIEventStream; use crate::error::OpenAIError; pub use crate::types::{ CompletionTokensDetails, ImageDetail, PromptTokensDetails, ReasoningEffort, @@ -155,6 +156,11 @@ pub struct CreateResponse { /// performance characteristics, and price points. pub model: String, + /// Whether to run the model response in the background. + /// boolean or null. + #[serde(skip_serializing_if = "Option::is_none")] + pub background: Option, + /// Specify additional output data to include in the model response. /// /// Supported values: @@ -188,6 +194,11 @@ pub struct CreateResponse { #[serde(skip_serializing_if = "Option::is_none")] pub max_output_tokens: Option, + /// The maximum number of total calls to built-in tools that can be processed in a response. + /// This maximum number applies across all built-in tool calls, not per individual tool. + /// Any further attempts to call a tool by the model will be ignored. + pub max_tool_calls: Option, + /// Set of 16 key-value pairs that can be attached to an object. This can be /// useful for storing additional information about the object in a structured /// format, and querying for objects via API or the dashboard. @@ -206,6 +217,10 @@ pub struct CreateResponse { #[serde(skip_serializing_if = "Option::is_none")] pub previous_response_id: Option, + /// Reference to a prompt template and its variables. + #[serde(skip_serializing_if = "Option::is_none")] + pub prompt: Option, + /// **o-series models only**: Configuration options for reasoning models. #[serde(skip_serializing_if = "Option::is_none")] pub reasoning: Option, @@ -236,6 +251,11 @@ pub struct CreateResponse { #[serde(skip_serializing_if = "Option::is_none")] pub store: Option, + /// If set to true, the model response data will be streamed to the client as it is + /// generated using server-sent events. + #[serde(skip_serializing_if = "Option::is_none")] + pub stream: Option, + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 /// will make the output more random, while lower values like 0.2 will make it /// more focused and deterministic. We generally recommend altering this or @@ -259,6 +279,11 @@ pub struct CreateResponse { #[serde(skip_serializing_if = "Option::is_none")] pub tools: Option>, + /// An integer between 0 and 20 specifying the number of most likely tokens to return + /// at each token position, each with an associated log probability. + #[serde(skip_serializing_if = "Option::is_none")] + pub top_logprobs: Option, // TODO add validation of range + /// An alternative to sampling with temperature, called nucleus sampling, /// where the model considers the results of the tokens with top_p probability /// mass. So 0.1 means only the tokens comprising the top 10% probability mass @@ -279,6 +304,23 @@ pub struct CreateResponse { pub user: Option, } +/// Service tier request options. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct PromptConfig { + /// The unique identifier of the prompt template to use. + pub id: String, + + /// Optional version of the prompt template. + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + + /// Optional map of values to substitute in for variables in your prompt. The substitution + /// values can either be strings, or other Response input types like images or files. + /// For now only supporting Strings. + #[serde(skip_serializing_if = "Option::is_none")] + pub variables: Option>, +} + /// Service tier request options. #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] #[serde(rename_all = "lowercase")] @@ -286,6 +328,8 @@ pub enum ServiceTier { Auto, Default, Flex, + Scale, + Priority, } /// Truncation strategies. @@ -499,7 +543,7 @@ pub enum ComparisonType { pub struct CompoundFilter { /// Type of operation #[serde(rename = "type")] - pub op: ComparisonType, + pub op: CompoundType, /// Array of filters to combine. Items can be ComparisonFilter or CompoundFilter. pub filters: Vec, } @@ -940,6 +984,7 @@ pub enum FileSearchCallOutputStatus { Searching, Incomplete, Failed, + Completed, } /// A single result from a file search. @@ -1139,15 +1184,17 @@ pub struct ImageGenerationCallOutput { #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct CodeInterpreterCallOutput { /// The code that was executed. - pub code: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub code: Option, /// Unique ID of the call. pub id: String, /// Status of the tool call. pub status: String, /// ID of the container used to run the code. pub container_id: String, - /// The results of the execution: logs or files. - pub results: Vec, + /// The outputs of the execution: logs or files. + #[serde(skip_serializing_if = "Option::is_none")] + pub outputs: Option>, } /// Individual result from a code interpreter: either logs or files. @@ -1322,6 +1369,12 @@ pub struct Response { /// The array of content items generated by the model. pub output: Vec, + /// SDK-only convenience property that contains the aggregated text output from all + /// `output_text` items in the `output` array, if any are present. + /// Supported in the Python and JavaScript SDKs. + #[serde(skip_serializing_if = "Option::is_none")] + pub output_text: Option, + /// Whether parallel tool calls were enabled. #[serde(skip_serializing_if = "Option::is_none")] pub parallel_tool_calls: Option, @@ -1334,6 +1387,10 @@ pub struct Response { #[serde(skip_serializing_if = "Option::is_none")] pub reasoning: Option, + /// Whether to store the generated model response for later retrieval via API. + #[serde(skip_serializing_if = "Option::is_none")] + pub store: Option, + /// The service tier that actually processed this response. #[serde(skip_serializing_if = "Option::is_none")] pub service_tier: Option, @@ -1382,3 +1439,733 @@ pub enum Status { InProgress, Incomplete, } + +/// Event types for streaming responses from the Responses API +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[serde(tag = "type")] +#[non_exhaustive] // Future-proof against breaking changes +pub enum ResponseEvent { + /// Response creation started + #[serde(rename = "response.created")] + ResponseCreated(ResponseCreated), + /// Processing in progress + #[serde(rename = "response.in_progress")] + ResponseInProgress(ResponseInProgress), + /// Response completed (different from done) + #[serde(rename = "response.completed")] + ResponseCompleted(ResponseCompleted), + /// Response failed + #[serde(rename = "response.failed")] + ResponseFailed(ResponseFailed), + /// Response incomplete + #[serde(rename = "response.incomplete")] + ResponseIncomplete(ResponseIncomplete), + /// Response queued + #[serde(rename = "response.queued")] + ResponseQueued(ResponseQueued), + /// Output item added + #[serde(rename = "response.output_item.added")] + ResponseOutputItemAdded(ResponseOutputItemAdded), + /// Content part added + #[serde(rename = "response.content_part.added")] + ResponseContentPartAdded(ResponseContentPartAdded), + /// Text delta update + #[serde(rename = "response.output_text.delta")] + ResponseOutputTextDelta(ResponseOutputTextDelta), + /// Text output completed + #[serde(rename = "response.output_text.done")] + ResponseOutputTextDone(ResponseOutputTextDone), + /// Refusal delta update + #[serde(rename = "response.refusal.delta")] + ResponseRefusalDelta(ResponseRefusalDelta), + /// Refusal completed + #[serde(rename = "response.refusal.done")] + ResponseRefusalDone(ResponseRefusalDone), + /// Content part completed + #[serde(rename = "response.content_part.done")] + ResponseContentPartDone(ResponseContentPartDone), + /// Output item completed + #[serde(rename = "response.output_item.done")] + ResponseOutputItemDone(ResponseOutputItemDone), + /// Function call arguments delta + #[serde(rename = "response.function_call_arguments.delta")] + ResponseFunctionCallArgumentsDelta(ResponseFunctionCallArgumentsDelta), + /// Function call arguments completed + #[serde(rename = "response.function_call_arguments.done")] + ResponseFunctionCallArgumentsDone(ResponseFunctionCallArgumentsDone), + /// File search call in progress + #[serde(rename = "response.file_search_call.in_progress")] + ResponseFileSearchCallInProgress(ResponseFileSearchCallInProgress), + /// File search call searching + #[serde(rename = "response.file_search_call.searching")] + ResponseFileSearchCallSearching(ResponseFileSearchCallSearching), + /// File search call completed + #[serde(rename = "response.file_search_call.completed")] + ResponseFileSearchCallCompleted(ResponseFileSearchCallCompleted), + /// Web search call in progress + #[serde(rename = "response.web_search_call.in_progress")] + ResponseWebSearchCallInProgress(ResponseWebSearchCallInProgress), + /// Web search call searching + #[serde(rename = "response.web_search_call.searching")] + ResponseWebSearchCallSearching(ResponseWebSearchCallSearching), + /// Web search call completed + #[serde(rename = "response.web_search_call.completed")] + ResponseWebSearchCallCompleted(ResponseWebSearchCallCompleted), + /// Reasoning summary part added + #[serde(rename = "response.reasoning_summary_part.added")] + ResponseReasoningSummaryPartAdded(ResponseReasoningSummaryPartAdded), + /// Reasoning summary part done + #[serde(rename = "response.reasoning_summary_part.done")] + ResponseReasoningSummaryPartDone(ResponseReasoningSummaryPartDone), + /// Reasoning summary text delta + #[serde(rename = "response.reasoning_summary_text.delta")] + ResponseReasoningSummaryTextDelta(ResponseReasoningSummaryTextDelta), + /// Reasoning summary text done + #[serde(rename = "response.reasoning_summary_text.done")] + ResponseReasoningSummaryTextDone(ResponseReasoningSummaryTextDone), + /// Reasoning summary delta + #[serde(rename = "response.reasoning_summary.delta")] + ResponseReasoningSummaryDelta(ResponseReasoningSummaryDelta), + /// Reasoning summary done + #[serde(rename = "response.reasoning_summary.done")] + ResponseReasoningSummaryDone(ResponseReasoningSummaryDone), + /// Image generation call in progress + #[serde(rename = "response.image_generation_call.in_progress")] + ResponseImageGenerationCallInProgress(ResponseImageGenerationCallInProgress), + /// Image generation call generating + #[serde(rename = "response.image_generation_call.generating")] + ResponseImageGenerationCallGenerating(ResponseImageGenerationCallGenerating), + /// Image generation call partial image + #[serde(rename = "response.image_generation_call.partial_image")] + ResponseImageGenerationCallPartialImage(ResponseImageGenerationCallPartialImage), + /// Image generation call completed + #[serde(rename = "response.image_generation_call.completed")] + ResponseImageGenerationCallCompleted(ResponseImageGenerationCallCompleted), + /// MCP call arguments delta + #[serde(rename = "response.mcp_call_arguments.delta")] + ResponseMcpCallArgumentsDelta(ResponseMcpCallArgumentsDelta), + /// MCP call arguments done + #[serde(rename = "response.mcp_call_arguments.done")] + ResponseMcpCallArgumentsDone(ResponseMcpCallArgumentsDone), + /// MCP call completed + #[serde(rename = "response.mcp_call.completed")] + ResponseMcpCallCompleted(ResponseMcpCallCompleted), + /// MCP call failed + #[serde(rename = "response.mcp_call.failed")] + ResponseMcpCallFailed(ResponseMcpCallFailed), + /// MCP call in progress + #[serde(rename = "response.mcp_call.in_progress")] + ResponseMcpCallInProgress(ResponseMcpCallInProgress), + /// MCP list tools completed + #[serde(rename = "response.mcp_list_tools.completed")] + ResponseMcpListToolsCompleted(ResponseMcpListToolsCompleted), + /// MCP list tools failed + #[serde(rename = "response.mcp_list_tools.failed")] + ResponseMcpListToolsFailed(ResponseMcpListToolsFailed), + /// MCP list tools in progress + #[serde(rename = "response.mcp_list_tools.in_progress")] + ResponseMcpListToolsInProgress(ResponseMcpListToolsInProgress), + /// Code interpreter call in progress + #[serde(rename = "response.code_interpreter_call.in_progress")] + ResponseCodeInterpreterCallInProgress(ResponseCodeInterpreterCallInProgress), + /// Code interpreter call interpreting + #[serde(rename = "response.code_interpreter_call.interpreting")] + ResponseCodeInterpreterCallInterpreting(ResponseCodeInterpreterCallInterpreting), + /// Code interpreter call completed + #[serde(rename = "response.code_interpreter_call.completed")] + ResponseCodeInterpreterCallCompleted(ResponseCodeInterpreterCallCompleted), + /// Code interpreter call code delta + #[serde(rename = "response.code_interpreter_call_code.delta")] + ResponseCodeInterpreterCallCodeDelta(ResponseCodeInterpreterCallCodeDelta), + /// Code interpreter call code done + #[serde(rename = "response.code_interpreter_call_code.done")] + ResponseCodeInterpreterCallCodeDone(ResponseCodeInterpreterCallCodeDone), + /// Output text annotation added + #[serde(rename = "response.output_text.annotation.added")] + ResponseOutputTextAnnotationAdded(ResponseOutputTextAnnotationAdded), + /// Error occurred + #[serde(rename = "error")] + ResponseError(ResponseError), + + /// Unknown event type + #[serde(untagged)] + Unknown(serde_json::Value), +} + +/// Stream of response events +pub type ResponseStream = OpenAIEventStream; + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseCreated { + pub sequence_number: u64, + pub response: ResponseMetadata, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseInProgress { + pub sequence_number: u64, + pub response: ResponseMetadata, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseOutputItemAdded { + pub sequence_number: u64, + pub output_index: u32, + pub item: OutputItem, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseContentPartAdded { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub content_index: u32, + pub part: ContentPart, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseOutputTextDelta { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub content_index: u32, + pub delta: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub logprobs: Option, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseContentPartDone { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub content_index: u32, + pub part: ContentPart, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseOutputItemDone { + pub sequence_number: u64, + pub output_index: u32, + pub item: OutputItem, +} + +/// Response completed event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseCompleted { + pub sequence_number: u64, + pub response: ResponseMetadata, +} + +/// Response failed event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseFailed { + pub sequence_number: u64, + pub response: ResponseMetadata, +} + +/// Response incomplete event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseIncomplete { + pub sequence_number: u64, + pub response: ResponseMetadata, +} + +/// Response queued event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseQueued { + pub sequence_number: u64, + pub response: ResponseMetadata, +} + +/// Text output completed event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseOutputTextDone { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub content_index: u32, + pub text: String, + pub logprobs: Option>, +} + +/// Refusal delta event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseRefusalDelta { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub content_index: u32, + pub delta: String, +} + +/// Refusal done event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseRefusalDone { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub content_index: u32, + pub refusal: String, +} + +/// Function call arguments delta event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseFunctionCallArgumentsDelta { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub delta: String, +} + +/// Function call arguments done event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseFunctionCallArgumentsDone { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub arguments: String, +} + +/// Error event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseError { + pub sequence_number: u64, + pub code: Option, + pub message: String, + pub param: Option, +} + +/// File search call in progress event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseFileSearchCallInProgress { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// File search call searching event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseFileSearchCallSearching { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// File search call completed event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseFileSearchCallCompleted { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// Web search call in progress event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseWebSearchCallInProgress { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// Web search call searching event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseWebSearchCallSearching { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// Web search call completed event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseWebSearchCallCompleted { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// Reasoning summary part added event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseReasoningSummaryPartAdded { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub summary_index: u32, + pub part: serde_json::Value, // Could be more specific but using Value for flexibility +} + +/// Reasoning summary part done event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseReasoningSummaryPartDone { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub summary_index: u32, + pub part: serde_json::Value, +} + +/// Reasoning summary text delta event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseReasoningSummaryTextDelta { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub summary_index: u32, + pub delta: String, +} + +/// Reasoning summary text done event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseReasoningSummaryTextDone { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub summary_index: u32, + pub text: String, +} + +/// Reasoning summary delta event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseReasoningSummaryDelta { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub summary_index: u32, + pub delta: serde_json::Value, +} + +/// Reasoning summary done event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseReasoningSummaryDone { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub summary_index: u32, + pub text: String, +} + +/// Image generation call in progress event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseImageGenerationCallInProgress { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// Image generation call generating event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseImageGenerationCallGenerating { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// Image generation call partial image event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseImageGenerationCallPartialImage { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, + pub partial_image_index: u32, + pub partial_image_b64: String, +} + +/// Image generation call completed event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseImageGenerationCallCompleted { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// MCP call arguments delta event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseMcpCallArgumentsDelta { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, + pub delta: String, +} + +/// MCP call arguments done event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseMcpCallArgumentsDone { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, + pub arguments: String, +} + +/// MCP call completed event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseMcpCallCompleted { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// MCP call failed event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseMcpCallFailed { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// MCP call in progress event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseMcpCallInProgress { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// MCP list tools completed event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseMcpListToolsCompleted { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// MCP list tools failed event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseMcpListToolsFailed { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// MCP list tools in progress event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseMcpListToolsInProgress { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// Code interpreter call in progress event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseCodeInterpreterCallInProgress { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// Code interpreter call interpreting event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseCodeInterpreterCallInterpreting { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// Code interpreter call completed event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseCodeInterpreterCallCompleted { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, +} + +/// Code interpreter call code delta event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseCodeInterpreterCallCodeDelta { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, + pub delta: String, +} + +/// Code interpreter call code done event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseCodeInterpreterCallCodeDone { + pub sequence_number: u64, + pub output_index: u32, + pub item_id: String, + pub code: String, +} + +/// Response metadata +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseMetadata { + pub id: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub object: Option, + pub created_at: u64, + pub status: Status, + #[serde(skip_serializing_if = "Option::is_none")] + pub model: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub usage: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub incomplete_details: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub input: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub instructions: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub max_output_tokens: Option, + /// Whether the model was run in background mode + #[serde(skip_serializing_if = "Option::is_none")] + pub background: Option, + /// The service tier that was actually used + #[serde(skip_serializing_if = "Option::is_none")] + pub service_tier: Option, + /// The effective value of top_logprobs parameter + #[serde(skip_serializing_if = "Option::is_none")] + pub top_logprobs: Option, + /// The effective value of max_tool_calls parameter + #[serde(skip_serializing_if = "Option::is_none")] + pub max_tool_calls: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub output: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub parallel_tool_calls: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub previous_response_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub reasoning: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub store: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub temperature: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub text: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tool_choice: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tools: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub top_p: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub truncation: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub user: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option>, + /// Prompt cache key for improved performance + #[serde(skip_serializing_if = "Option::is_none")] + pub prompt_cache_key: Option, + /// Safety identifier for content filtering + #[serde(skip_serializing_if = "Option::is_none")] + pub safety_identifier: Option, +} + +/// Output item +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[serde(tag = "type")] +#[serde(rename_all = "snake_case")] +#[non_exhaustive] +pub enum OutputItem { + Message(OutputMessage), + FileSearchCall(FileSearchCallOutput), + FunctionCall(FunctionCall), + WebSearchCall(WebSearchCallOutput), + ComputerCall(ComputerCallOutput), + Reasoning(ReasoningItem), + ImageGenerationCall(ImageGenerationCallOutput), + CodeInterpreterCall(CodeInterpreterCallOutput), + LocalShellCall(LocalShellCallOutput), + McpCall(McpCallOutput), + McpListTools(McpListToolsOutput), + McpApprovalRequest(McpApprovalRequestOutput), + CustomToolCall(CustomToolCallOutput), +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct CustomToolCallOutput { + pub call_id: String, + pub input: String, + pub name: String, + pub id: String, +} + +/// Content part +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ContentPart { + #[serde(rename = "type")] + pub part_type: String, + pub text: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub logprobs: Option>, +} + +// ===== RESPONSE COLLECTOR ===== + +/// Collects streaming response events into a complete response +/// Output text annotation added event +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct ResponseOutputTextAnnotationAdded { + pub sequence_number: u64, + pub item_id: String, + pub output_index: u32, + pub content_index: u32, + pub annotation_index: u32, + pub annotation: TextAnnotation, +} + +/// Text annotation object for output text +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[non_exhaustive] +pub struct TextAnnotation { + #[serde(rename = "type")] + pub annotation_type: String, + pub text: String, + pub start: u32, + pub end: u32, +} diff --git a/async-openai-wasm/src/util.rs b/async-openai-wasm/src/util.rs index fab17167..27908ab9 100644 --- a/async-openai-wasm/src/util.rs +++ b/async-openai-wasm/src/util.rs @@ -12,10 +12,7 @@ pub(crate) async fn create_file_part( InputSource::VecU8 { filename, vec } => (Body::from(vec), filename), }; - let file_part = reqwest::multipart::Part::stream(stream) - .file_name(file_name) - .mime_str("application/octet-stream") - .unwrap(); + let file_part = reqwest::multipart::Part::stream(stream).file_name(file_name); Ok(file_part) } diff --git a/async-openai-wasm/src/vector_stores.rs b/async-openai-wasm/src/vector_stores.rs index 88cd3fb0..caa5a93e 100644 --- a/async-openai-wasm/src/vector_stores.rs +++ b/async-openai-wasm/src/vector_stores.rs @@ -22,12 +22,12 @@ impl<'c, C: Config> VectorStores<'c, C> { } /// [VectorStoreFiles] API group - pub fn files(&self, vector_store_id: &str) -> VectorStoreFiles { + pub fn files(&self, vector_store_id: &str) -> VectorStoreFiles<'_, C> { VectorStoreFiles::new(self.client, vector_store_id) } /// [VectorStoreFileBatches] API group - pub fn file_batches(&self, vector_store_id: &str) -> VectorStoreFileBatches { + pub fn file_batches(&self, vector_store_id: &str) -> VectorStoreFileBatches<'_, C> { VectorStoreFileBatches::new(self.client, vector_store_id) } diff --git a/justfile b/justfile new file mode 100644 index 00000000..34c0a8f1 --- /dev/null +++ b/justfile @@ -0,0 +1,2 @@ +check: + cargo check && cargo check --target wasm32-unknown-unknown \ No newline at end of file