Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
43 commits
Select commit Hold shift + click to select a range
e61af22
Update to Assistants example (#146)
Strange-Knoll Nov 25, 2023
5d6c838
Add examples tool-call and tool-call-stream (#153)
frankfralick Nov 25, 2023
28d4005
add names (#150)
ifsheldon Nov 25, 2023
923d03a
Link to openai-func-enums (#152)
frankfralick Nov 25, 2023
136a463
In memory files (#154)
monadoid Nov 25, 2023
4e6df12
Spec, readme, and crate description updates (#156)
64bit Nov 25, 2023
131717f
chore: Release
64bit Nov 25, 2023
66f2672
Make tool choice lower case (#158)
dmweis Nov 26, 2023
8b8dc80
Fix: post_form to be Sendable (#157)
katya4oyu Nov 26, 2023
460f00a
chore: Release
64bit Nov 26, 2023
2ba35ff
Add support for rustls-webpki-roots (#168)
xutianyi1999 Dec 28, 2023
b5c83c0
Refactor `types` module (#170)
sharifhsn Jan 5, 2024
73c71cb
Sync updates from Spec (#171)
64bit Jan 5, 2024
a292c3c
add query param to list files (#172)
64bit Jan 5, 2024
8426647
chore: Release
64bit Jan 5, 2024
b85257b
Optional model in ModifyAssistantRequest (#174)
CakeCrusher Jan 7, 2024
ff955a5
update contribution guidelines (#182)
64bit Jan 18, 2024
f28303b
chore: Release
64bit Jan 18, 2024
30f8b6a
fix file test by providing query param
64bit Jan 18, 2024
589fde0
Added dimensions param to embedding request (#185)
vmg-dev Jan 30, 2024
2fdcd21
chore: Release
64bit Jan 30, 2024
b9a6270
fix: CreateTranscriptionRequest language field not convert (#188)
Taoaozw Feb 6, 2024
48d18e0
chore: Release
64bit Feb 6, 2024
21fe408
Add usage information to the run object (#195)
turingbuilder Mar 2, 2024
22284b3
Updates from Spec (#196)
64bit Mar 2, 2024
8cb3f94
chore: Release
64bit Mar 2, 2024
63f6f78
Add Client::build for full customizability during instantiation (#197)
GabrielBianconi Mar 2, 2024
f3916ad
Change std::sleep to tokio's sleep (#200)
sgopalan98 Mar 13, 2024
208bc08
chore: Release
64bit Mar 13, 2024
032f58f
add support for base64 embeddings (#190)
adri1wald Mar 16, 2024
e4a428f
Add vision-chat example (#203)
Gabriel2409 Mar 18, 2024
db4c213
Update Audio APIs from updated spec (#202)
emk Mar 24, 2024
6178070
Upgrade dependencies: Rust crates in Cargo.toml (#204)
Sagebati Mar 31, 2024
12436b3
cargo test working (#207)
64bit Mar 31, 2024
4496ce2
fix: cargo fmt and compiler warnings fixes (#208)
64bit Mar 31, 2024
f70ed12
chore: Release
64bit Mar 31, 2024
fbb9e1b
Merge branch 'upstream' into sync0.20
ifsheldon Apr 10, 2024
ccf1e9b
fixed problems due to code sync
ifsheldon Apr 10, 2024
36cc185
update worker dependency to resolve build issue
ifsheldon Apr 10, 2024
051edb4
update test to fix test compilation issue
ifsheldon Apr 10, 2024
b908cee
add conditional imports
ifsheldon Apr 10, 2024
e9d92a5
change default of InputSource and bring back builders of file-related…
ifsheldon Apr 10, 2024
37758be
update doc
ifsheldon Apr 10, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions async-openai/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "async-openai"
version = "0.18.2"
version = "0.20.0"
authors = [
"Himanshu Neema"
]
Expand Down Expand Up @@ -33,16 +33,16 @@ native-tls-vendored = ["reqwest/native-tls-vendored"]

[dependencies]
backoff = {version = "0.4.0", features = ["futures"], optional = true }
base64 = "0.21.0"
base64 = "0.22.0"
futures = "0.3.26"
rand = "0.8.5"
reqwest = { version = "0.11.14", features = ["json", "stream", "multipart"],default-features = false }
reqwest-eventsource = "0.5.0"
reqwest = { version = "0.12.0", features = ["json", "stream", "multipart"],default-features = false }
reqwest-eventsource = "0.6.0"
serde = { version = "1.0.152", features = ["derive", "rc"] }
serde_json = "1.0.93"
thiserror = "1.0.38"
tracing = "0.1.37"
derive_builder = "0.12.0"
derive_builder = "0.20.0"
async-convert = "1.0.0"
secrecy = { version = "0.8.0", features=["serde"] }
tokio = { version = "1.25.0", features = ["fs", "macros"], optional = true }
Expand Down
9 changes: 2 additions & 7 deletions async-openai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,12 @@
- It's based on [OpenAI OpenAPI spec](https://github.com/openai/openai-openapi)
- Current features:
- [x] Assistants (Beta)
- [x] Audio (Whisper/TTS)
- [x] Audio
- [x] Chat
- [x] Completions (Legacy)
- [x] Edits (Deprecated)
- [x] Embeddings
- [x] Files
- [x] Fine-Tuning
- [x] Fine-Tunes (Deprecated)
- [x] Images
- [x] Microsoft Azure OpenAI Service
- [x] Models
Expand Down Expand Up @@ -125,7 +123,7 @@ All forms of contributions, such as new features requests, bug fixes, issues, do
A good starting point would be to look at existing [open issues](https://github.com/64bit/async-openai/issues).

To maintain quality of the project, a minimum of the following is a must for code contribution:
- **Documented**: Primary source of doc comments is description field from OpenAPI spec.
- **Names & Documentation**: All struct names, field names and doc comments are from OpenAPI spec. Nested objects in spec without names leaves room for making appropriate name.
- **Tested**: Examples are primary means of testing and should continue to work. For new features supporting example is required.
- **Scope**: Keep scope limited to APIs available in official documents such as [API Reference](https://platform.openai.com/docs/api-reference) or [OpenAPI spec](https://github.com/openai/openai-openapi/). Other LLMs or AI Providers offer OpenAI-compatible APIs, yet they may not always have full parity. In such cases, the OpenAI spec takes precedence.
- **Consistency**: Keep code style consistent across all the "APIs" that library exposes; it creates a great developer experience.
Expand All @@ -135,9 +133,6 @@ This project adheres to [Rust Code of Conduct](https://www.rust-lang.org/policie
## Complimentary Crates
- [openai-func-enums](https://github.com/frankfralick/openai-func-enums) provides procedural macros that make it easier to use this library with OpenAI API's tool calling feature. It also provides derive macros you can add to existing [clap](https://github.com/clap-rs/clap) application subcommands for natural language use of command line tools. It also supports openai's [parallel tool calls](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) and allows you to choose between running multiple tool calls concurrently or own their own OS threads.

## Complimentary Crates
- [openai-func-enums](https://github.com/frankfralick/openai-func-enums) provides procedural macros that make it easier to use this library with OpenAI API's tool calling feature. It also provides derive macros you can add to existing [clap](https://github.com/clap-rs/clap) application subcommands for natural language use of command line tools. It also supports openai's [parallel tool calls](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) and allows you to choose between running multiple tool calls concurrently or own their own OS threads.


## License

Expand Down
27 changes: 25 additions & 2 deletions async-openai/src/audio.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
use bytes::Bytes;

use crate::{
config::Config,
error::OpenAIError,
types::{
CreateSpeechRequest, CreateSpeechResponse, CreateTranscriptionRequest,
CreateTranscriptionResponse, CreateTranslationRequest, CreateTranslationResponse,
CreateTranscriptionResponseJson, CreateTranscriptionResponseVerboseJson,
CreateTranslationRequest, CreateTranslationResponse,
},
Client,
};
Expand All @@ -23,12 +26,32 @@ impl<'c, C: Config> Audio<'c, C> {
pub async fn transcribe(
&self,
request: CreateTranscriptionRequest,
) -> Result<CreateTranscriptionResponse, OpenAIError> {
) -> Result<CreateTranscriptionResponseJson, OpenAIError> {
self.client
.post_form("/audio/transcriptions", request)
.await
}

/// Transcribes audio into the input language.
pub async fn transcribe_verbose_json(
&self,
request: CreateTranscriptionRequest,
) -> Result<CreateTranscriptionResponseVerboseJson, OpenAIError> {
self.client
.post_form("/audio/transcriptions", request)
.await
}

/// Transcribes audio into the input language.
pub async fn transcribe_raw(
&self,
request: CreateTranscriptionRequest,
) -> Result<Bytes, OpenAIError> {
self.client
.post_form_raw("/audio/transcriptions", request)
.await
}

/// Translates audio into into English.
pub async fn translate(
&self,
Expand Down
50 changes: 36 additions & 14 deletions async-openai/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ use crate::{
config::{Config, OpenAIConfig},
error::{map_deserialization_error, OpenAIError, WrappedError},
moderation::Moderations,
edit::Edits,
file::Files,
image::Images,
Chat, Completions, Embeddings, Models, FineTunes, FineTuning, Assistants, Threads, Audio};
Assistants, Audio, Chat, Completions, Embeddings, FineTuning, Models, Threads,
};

#[derive(Debug, Clone)]
/// Client is a container for config, backoff and http_client
Expand All @@ -42,6 +42,21 @@ impl Client<OpenAIConfig> {
}

impl<C: Config> Client<C> {
/// Create client with a custom HTTP client, OpenAI config, and backoff.
pub fn build(
http_client: reqwest::Client,
config: C,
#[cfg(feature = "backoff")]
backoff: backoff::ExponentialBackoff,
) -> Self {
Self {
http_client,
config,
#[cfg(feature = "backoff")]
backoff,
}
}

/// Create client with [OpenAIConfig] or [crate::config::AzureConfig]
pub fn with_config(config: C) -> Self {
Self {
Expand Down Expand Up @@ -84,12 +99,6 @@ impl<C: Config> Client<C> {
Chat::new(self)
}

/// To call [Edits] group related APIs using this client.
#[deprecated(since = "0.15.0", note = "By OpenAI")]
pub fn edits(&self) -> Edits<C> {
Edits::new(self)
}

/// To call [Images] group related APIs using this client.
pub fn images(&self) -> Images<C> {
Images::new(self)
Expand All @@ -105,12 +114,6 @@ impl<C: Config> Client<C> {
Files::new(self)
}

/// To call [FineTunes] group related APIs using this client.
#[deprecated(since = "0.15.0", note = "By OpenAI")]
pub fn fine_tunes(&self) -> FineTunes<C> {
FineTunes::new(self)
}

/// To call [FineTuning] group related APIs using this client.
pub fn fine_tuning(&self) -> FineTuning<C> {
FineTuning::new(self)
Expand Down Expand Up @@ -230,6 +233,25 @@ impl<C: Config> Client<C> {
self.execute(request_maker).await
}

/// POST a form at {path} and return the response body
pub(crate) async fn post_form_raw<F>(&self, path: &str, form: F) -> Result<Bytes, OpenAIError>
where
reqwest::multipart::Form: async_convert::TryFrom<F, Error = OpenAIError>,
F: Clone,
{
let request_maker = || async {
Ok(self
.http_client
.post(self.config.url(path))
.query(&self.config.query())
.headers(self.config.headers())
.multipart(async_convert::TryFrom::try_from(form.clone()).await?)
.build()?)
};

self.execute_raw(request_maker).await
}

/// POST a form at {path} and deserialize the response body
pub(crate) async fn post_form<O, F>(&self, path: &str, form: F) -> Result<O, OpenAIError>
where
Expand Down
26 changes: 0 additions & 26 deletions async-openai/src/edit.rs

This file was deleted.

83 changes: 80 additions & 3 deletions async-openai/src/embedding.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
use crate::{
config::Config,
error::OpenAIError,
types::{CreateEmbeddingRequest, CreateEmbeddingResponse},
types::{
CreateBase64EmbeddingResponse, CreateEmbeddingRequest, CreateEmbeddingResponse,
EncodingFormat,
},
Client,
};

Expand All @@ -23,14 +26,36 @@ impl<'c, C: Config> Embeddings<'c, C> {
&self,
request: CreateEmbeddingRequest,
) -> Result<CreateEmbeddingResponse, OpenAIError> {
if matches!(request.encoding_format, Some(EncodingFormat::Base64)) {
return Err(OpenAIError::InvalidArgument(
"When encoding_format is base64, use Embeddings::create_base64".into(),
));
}
self.client.post("/embeddings", request).await
}

/// Creates an embedding vector representing the input text.
///
/// The response will contain the embedding in base64 format.
pub async fn create_base64(
&self,
request: CreateEmbeddingRequest,
) -> Result<CreateBase64EmbeddingResponse, OpenAIError> {
if !matches!(request.encoding_format, Some(EncodingFormat::Base64)) {
return Err(OpenAIError::InvalidArgument(
"When encoding_format is not base64, use Embeddings::create".into(),
));
}

self.client.post("/embeddings", request).await
}
}

#[cfg(test)]
mod tests {
use crate::error::OpenAIError;
use crate::types::{CreateEmbeddingResponse, Embedding, EncodingFormat};
use crate::{types::CreateEmbeddingRequestArgs, Client};
use crate::types::{CreateEmbeddingResponse, Embedding};

#[tokio::test]
async fn test_embedding_string() {
Expand Down Expand Up @@ -122,9 +147,61 @@ mod tests {

assert!(response.is_ok());

let CreateEmbeddingResponse { mut data, ..} = response.unwrap();
let CreateEmbeddingResponse { mut data, .. } = response.unwrap();
assert_eq!(data.len(), 1);
let Embedding { embedding, .. } = data.pop().unwrap();
assert_eq!(embedding.len(), dimensions as usize);
}

#[tokio::test]
async fn test_cannot_use_base64_encoding_with_normal_create_request() {
let client = Client::new();

const MODEL: &str = "text-embedding-ada-002";
const INPUT: &str = "You shall not pass.";

let b64_request = CreateEmbeddingRequestArgs::default()
.model(MODEL)
.input(INPUT)
.encoding_format(EncodingFormat::Base64)
.build()
.unwrap();
let b64_response = client.embeddings().create(b64_request).await;
assert!(matches!(b64_response, Err(OpenAIError::InvalidArgument(_))));
}

#[tokio::test]
async fn test_embedding_create_base64() {
let client = Client::new();

const MODEL: &str = "text-embedding-ada-002";
const INPUT: &str = "CoLoop will eat the other qual research tools...";

let b64_request = CreateEmbeddingRequestArgs::default()
.model(MODEL)
.input(INPUT)
.encoding_format(EncodingFormat::Base64)
.build()
.unwrap();
let b64_response = client
.embeddings()
.create_base64(b64_request)
.await
.unwrap();
let b64_embedding = b64_response.data.into_iter().next().unwrap().embedding;
let b64_embedding: Vec<f32> = b64_embedding.into();

let request = CreateEmbeddingRequestArgs::default()
.model(MODEL)
.input(INPUT)
.build()
.unwrap();
let response = client.embeddings().create(request).await.unwrap();
let embedding = response.data.into_iter().next().unwrap().embedding;

assert_eq!(b64_embedding.len(), embedding.len());
for (b64, normal) in b64_embedding.iter().zip(embedding.iter()) {
assert!((b64 - normal).abs() < 1e-6);
}
}
}
1 change: 1 addition & 0 deletions async-openai/src/file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ impl<'c, C: Config> Files<'c, C> {
}

#[cfg(test)]
#[cfg(not(feature = "wasm"))]
mod tests {
use crate::{types::CreateFileRequestArgs, Client};

Expand Down
Loading