Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
75a7c7a
#191: Add simple ollama chat example (#336)
matthewhaynesonline Mar 9, 2025
c9acfed
fix: readme example link (#347)
attila-lin Mar 15, 2025
4c83fa4
feat: Gemini openai compatibility (#353)
DarshanVanol Apr 13, 2025
2f77716
Backoff when OpenAI returns 5xx (#354)
tinco Apr 13, 2025
aeb6d1f
chore: Release
64bit Apr 13, 2025
0e7a629
Implement vector store search, retrieve file content operations (#360)
cfraz89 Jun 1, 2025
ef6817f
[Completions API] Add web search options (#370)
adambenali Jun 1, 2025
939c4cd
Add instructions option to speech request (#374)
emchristiansen Jun 1, 2025
c2f3a6c
feat: Add responses API (#373)
samvrlewis Jun 2, 2025
761468d
chore: update readme; format code (#377)
64bit Jun 2, 2025
43c744b
chore: Release
64bit Jun 2, 2025
8a05a53
fix web search options; skip serializing if none (#379)
spencerbart Jun 3, 2025
de53c00
added copyright material links, Resolves #346 (#380)
DarshanVanol Jun 3, 2025
19c9ba0
add completed state (#384)
JensWalter Jun 7, 2025
7bb433a
feat: adds Default to CompletionUsage (#387)
paulhendricks Jun 7, 2025
2d07228
add flex service tier to chat completions (#385)
spencerbart Jun 7, 2025
097945b
chore: Release
64bit Jun 7, 2025
9b3ecda
Enable dyn dispatch by dyn Config objects (#383)
ifsheldon Jun 14, 2025
482344a
Add missing voice Ballad to enum (#388)
jregistr Jun 14, 2025
be059c2
feat: enhance realtime response types and audio transcription options…
codesoda Jun 29, 2025
7cb57e8
feat: change Prompt integer variants from u16 to u32 for future compa…
paulhendricks Jun 29, 2025
4b52f20
task: Add serialize impl for ApiError (#393)
tomharmon Jun 29, 2025
f9affae
refactor: adding missing fields from Responses API (#394)
paulhendricks Jun 29, 2025
22c3d5e
remove .mime_str(application/octet-stream) (#395)
64bit Jun 29, 2025
483c84f
chore: Release
64bit Jun 29, 2025
20c580e
Merge remote-tracking branch 'upstream' into kczimm/spiceai
kczimm Aug 15, 2025
2436235
add Scale and Priority to ServiceTier
kczimm Aug 15, 2025
3f7f4ee
Merge pull request #14 from spiceai/kczimm/service-tier-priority
kczimm Aug 15, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@ default-members = ["async-openai", "async-openai-*"]
resolver = "2"

[workspace.package]
rust-version = "1.75"
rust-version = "1.75"
2 changes: 1 addition & 1 deletion async-openai-macros/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@ proc-macro = true
[dependencies]
syn = { version = "2.0", features = ["full"] }
quote = "1.0"
proc-macro2 = "1.0"
proc-macro2 = "1.0"
2 changes: 1 addition & 1 deletion async-openai/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "async-openai"
version = "0.28.0"
version = "0.29.0"
authors = ["Himanshu Neema"]
categories = ["api-bindings", "web-programming", "asynchronous"]
keywords = ["openai", "async", "openapi", "ai"]
Expand Down
22 changes: 20 additions & 2 deletions async-openai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
- [x] Moderations
- [x] Organizations | Administration (partially implemented)
- [x] Realtime (Beta) (partially implemented)
- [x] Responses (partially implemented)
- [x] Uploads
- Bring your own custom types for Request or Response objects.
- SSE streaming on available APIs
Expand Down Expand Up @@ -140,13 +141,30 @@ This can be useful in many scenarios:
- To avoid verbose types.
- To escape deserialization errors.

Visit [examples/bring-your-own-type](https://github.com/64bit/async-openai/tree/main/examples/bring-your-own-type) directory to learn more.
Visit [examples/bring-your-own-type](https://github.com/64bit/async-openai/tree/main/examples/bring-your-own-type)
directory to learn more.

## Dynamic Dispatch for Different Providers

For any struct that implements `Config` trait, you can wrap it in a smart pointer and cast the pointer to `dyn Config`
trait object, then your client can accept any wrapped configuration type.

For example,

```rust
use async_openai::{Client, config::Config, config::OpenAIConfig};

let openai_config = OpenAIConfig::default();
// You can use `std::sync::Arc` to wrap the config as well
let config = Box::new(openai_config) as Box<dyn Config>;
let client: Client<Box<dyn Config> > = Client::with_config(config);
```

## Contributing

Thank you for taking the time to contribute and improve the project. I'd be happy to have you!

All forms of contributions, such as new features requests, bug fixes, issues, documentation, testing, comments, [examples](../examples) etc. are welcome.
All forms of contributions, such as new features requests, bug fixes, issues, documentation, testing, comments, [examples](https://github.com/64bit/async-openai/tree/main/examples) etc. are welcome.

A good starting point would be to look at existing [open issues](https://github.com/64bit/async-openai/issues).

Expand Down
24 changes: 22 additions & 2 deletions async-openai/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,13 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize};

use crate::{
config::{Config, OpenAIConfig},
error::{map_deserialization_error, OpenAIError, WrappedError},
error::{map_deserialization_error, ApiError, OpenAIError, WrappedError},
file::Files,
image::Images,
moderation::Moderations,
traits::AsyncTryFrom,
Assistants, Audio, AuditLogs, Batches, Chat, Completions, Embeddings, FineTuning, Invites,
Models, Projects, Threads, Uploads, Users, VectorStores,
Models, Projects, Responses, Threads, Uploads, Users, VectorStores,
};

#[derive(Debug, Clone, Default)]
Expand Down Expand Up @@ -162,6 +162,11 @@ impl<C: Config> Client<C> {
Projects::new(self)
}

/// To call [Responses] group related APIs using this client.
pub fn responses(&self) -> Responses<C> {
Responses::new(self)
}

pub fn config(&self) -> &C {
&self.config
}
Expand Down Expand Up @@ -345,6 +350,21 @@ impl<C: Config> Client<C> {
.map_err(OpenAIError::Reqwest)
.map_err(backoff::Error::Permanent)?;

if status.is_server_error() {
// OpenAI does not guarantee server errors are returned as JSON so we cannot deserialize them.
let message: String = String::from_utf8_lossy(&bytes).into_owned();
tracing::warn!("Server error: {status} - {message}");
return Err(backoff::Error::Transient {
err: OpenAIError::ApiError(ApiError {
message,
r#type: None,
param: None,
code: None,
}),
retry_after: None,
});
}

// Deserialize response body from either error object or actual response object
if !status.is_success() {
let wrapped_error: WrappedError = serde_json::from_slice(bytes.as_ref())
Expand Down
80 changes: 79 additions & 1 deletion async-openai/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ pub const OPENAI_BETA_HEADER: &str = "OpenAI-Beta";

/// [crate::Client] relies on this for every API call on OpenAI
/// or Azure OpenAI service
pub trait Config: Clone {
pub trait Config: Send + Sync {
fn headers(&self) -> HeaderMap;
fn url(&self, path: &str) -> String;
fn query(&self) -> Vec<(&str, &str)>;
Expand All @@ -27,6 +27,32 @@ pub trait Config: Clone {
fn api_key(&self) -> Arc<SecretString>;
}

/// Macro to implement Config trait for pointer types with dyn objects
macro_rules! impl_config_for_ptr {
($t:ty) => {
impl Config for $t {
fn headers(&self) -> HeaderMap {
self.as_ref().headers()
}
fn url(&self, path: &str) -> String {
self.as_ref().url(path)
}
fn query(&self) -> Vec<(&str, &str)> {
self.as_ref().query()
}
fn api_base(&self) -> &str {
self.as_ref().api_base()
}
fn api_key(&self) -> &SecretString {
self.as_ref().api_key()
}
}
};
}

impl_config_for_ptr!(Box<dyn Config>);
impl_config_for_ptr!(std::sync::Arc<dyn Config>);

/// Configuration for OpenAI API
#[derive(Clone, Debug, Deserialize)]
#[serde(default)]
Expand Down Expand Up @@ -239,3 +265,55 @@ impl Config for AzureConfig {
vec![("api-version", &self.api_version)]
}
}

#[cfg(test)]
mod test {
use super::*;
use crate::types::{
ChatCompletionRequestMessage, ChatCompletionRequestUserMessage, CreateChatCompletionRequest,
};
use crate::Client;
use std::sync::Arc;
#[test]
fn test_client_creation() {
unsafe { std::env::set_var("OPENAI_API_KEY", "test") }
let openai_config = OpenAIConfig::default();
let config = Box::new(openai_config.clone()) as Box<dyn Config>;
let client = Client::with_config(config);
assert!(client.config().url("").ends_with("/v1"));

let config = Arc::new(openai_config) as Arc<dyn Config>;
let client = Client::with_config(config);
assert!(client.config().url("").ends_with("/v1"));
let cloned_client = client.clone();
assert!(cloned_client.config().url("").ends_with("/v1"));
}

async fn dynamic_dispatch_compiles(client: &Client<Box<dyn Config>>) {
let _ = client.chat().create(CreateChatCompletionRequest {
model: "gpt-4o".to_string(),
messages: vec![ChatCompletionRequestMessage::User(
ChatCompletionRequestUserMessage {
content: "Hello, world!".into(),
..Default::default()
},
)],
..Default::default()
});
}

#[tokio::test]
async fn test_dynamic_dispatch() {
let openai_config = OpenAIConfig::default();
let azure_config = AzureConfig::default();

let azure_client = Client::with_config(Box::new(azure_config.clone()) as Box<dyn Config>);
let oai_client = Client::with_config(Box::new(openai_config.clone()) as Box<dyn Config>);

let _ = dynamic_dispatch_compiles(&azure_client).await;
let _ = dynamic_dispatch_compiles(&oai_client).await;

let _ = tokio::spawn(async move { dynamic_dispatch_compiles(&azure_client).await });
let _ = tokio::spawn(async move { dynamic_dispatch_compiles(&oai_client).await });
}
}
10 changes: 5 additions & 5 deletions async-openai/src/error.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//! Errors originating from API calls, parsing responses, and reading-or-writing to the file system.
use serde::Deserialize;
use serde::{Deserialize, Serialize};

#[derive(Debug, thiserror::Error)]
pub enum OpenAIError {
Expand Down Expand Up @@ -28,7 +28,7 @@ pub enum OpenAIError {
}

/// OpenAI API returns error object on failure
#[derive(Debug, Deserialize, Clone)]
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ApiError {
pub message: String,
pub r#type: Option<String>,
Expand Down Expand Up @@ -62,9 +62,9 @@ impl std::fmt::Display for ApiError {
}

/// Wrapper to deserialize the error object nested in "error" JSON key
#[derive(Debug, Deserialize)]
pub(crate) struct WrappedError {
pub(crate) error: ApiError,
#[derive(Debug, Deserialize, Serialize)]
pub struct WrappedError {
pub error: ApiError,
}

impl From<serde_json::Error> for OpenAIError {
Expand Down
18 changes: 18 additions & 0 deletions async-openai/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,22 @@
//! # });
//!```
//!
//! ## Dynamic Dispatch for Different Providers
//!
//! For any struct that implements `Config` trait, you can wrap it in a smart pointer and cast the pointer to `dyn Config`
//! trait object, then your client can accept any wrapped configuration type.
//!
//! For example,
//! ```
//! use async_openai::{Client, config::Config, config::OpenAIConfig};
//! unsafe { std::env::set_var("OPENAI_API_KEY", "only for doc test") }
//!
//! let openai_config = OpenAIConfig::default();
//! // You can use `std::sync::Arc` to wrap the config as well
//! let config = Box::new(openai_config) as Box<dyn Config>;
//! let client: Client<Box<dyn Config> > = Client::with_config(config);
//! ```
//!
//! ## Microsoft Azure
//!
//! ```
Expand Down Expand Up @@ -146,6 +162,7 @@ mod project_api_keys;
mod project_service_accounts;
mod project_users;
mod projects;
mod responses;
mod runs;
mod steps;
mod threads;
Expand Down Expand Up @@ -177,6 +194,7 @@ pub use project_api_keys::ProjectAPIKeys;
pub use project_service_accounts::ProjectServiceAccounts;
pub use project_users::ProjectUsers;
pub use projects::Projects;
pub use responses::Responses;
pub use runs::Runs;
pub use steps::Steps;
pub use threads::Threads;
Expand Down
29 changes: 29 additions & 0 deletions async-openai/src/responses.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
use crate::{
config::Config,
error::OpenAIError,
types::responses::{CreateResponse, Response},
Client,
};

/// Given text input or a list of context items, the model will generate a response.
///
/// Related guide: [Responses API](https://platform.openai.com/docs/guides/responses)
pub struct Responses<'c, C: Config> {
client: &'c Client<C>,
}

impl<'c, C: Config> Responses<'c, C> {
/// Constructs a new Responses client.
pub fn new(client: &'c Client<C>) -> Self {
Self { client }
}

/// Creates a model response for the given input.
#[crate::byot(
T0 = serde::Serialize,
R = serde::de::DeserializeOwned
)]
pub async fn create(&self, request: CreateResponse) -> Result<Response, OpenAIError> {
self.client.post("/responses", request).await
}
}
9 changes: 8 additions & 1 deletion async-openai/src/types/audio.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ pub enum Voice {
#[default]
Alloy,
Ash,
Ballad,
Coral,
Echo,
Fable,
Expand Down Expand Up @@ -188,10 +189,16 @@ pub struct CreateSpeechRequest {
/// One of the available [TTS models](https://platform.openai.com/docs/models/tts): `tts-1` or `tts-1-hd`
pub model: SpeechModel,

/// The voice to use when generating the audio. Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`.
/// The voice to use when generating the audio. Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer` and `verse`.

/// Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
pub voice: Voice,

/// Control the voice of your generated audio with additional instructions.
/// Does not work with `tts-1` or `tts-1-hd`.
#[serde(skip_serializing_if = "Option::is_none")]
pub instructions: Option<String>,

/// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
#[serde(skip_serializing_if = "Option::is_none")]
pub response_format: Option<SpeechResponseFormat>,
Expand Down
Loading