Skip to content
Merged
Changes from 1 commit
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
c9acfed
fix: readme example link (#347)
attila-lin Mar 15, 2025
4c83fa4
feat: Gemini openai compatibility (#353)
DarshanVanol Apr 13, 2025
2f77716
Backoff when OpenAI returns 5xx (#354)
tinco Apr 13, 2025
aeb6d1f
chore: Release
64bit Apr 13, 2025
0e7a629
Implement vector store search, retrieve file content operations (#360)
cfraz89 Jun 1, 2025
ef6817f
[Completions API] Add web search options (#370)
adambenali Jun 1, 2025
939c4cd
Add instructions option to speech request (#374)
emchristiansen Jun 1, 2025
c2f3a6c
feat: Add responses API (#373)
samvrlewis Jun 2, 2025
761468d
chore: update readme; format code (#377)
64bit Jun 2, 2025
43c744b
chore: Release
64bit Jun 2, 2025
8a05a53
fix web search options; skip serializing if none (#379)
spencerbart Jun 3, 2025
de53c00
added copyright material links, Resolves #346 (#380)
DarshanVanol Jun 3, 2025
19c9ba0
add completed state (#384)
JensWalter Jun 7, 2025
7bb433a
feat: adds Default to CompletionUsage (#387)
paulhendricks Jun 7, 2025
2d07228
add flex service tier to chat completions (#385)
spencerbart Jun 7, 2025
097945b
chore: Release
64bit Jun 7, 2025
9b3ecda
Enable dyn dispatch by dyn Config objects (#383)
ifsheldon Jun 14, 2025
482344a
Add missing voice Ballad to enum (#388)
jregistr Jun 14, 2025
be059c2
feat: enhance realtime response types and audio transcription options…
codesoda Jun 29, 2025
7cb57e8
feat: change Prompt integer variants from u16 to u32 for future compa…
paulhendricks Jun 29, 2025
4b52f20
task: Add serialize impl for ApiError (#393)
tomharmon Jun 29, 2025
f9affae
refactor: adding missing fields from Responses API (#394)
paulhendricks Jun 29, 2025
22c3d5e
remove .mime_str(application/octet-stream) (#395)
64bit Jun 29, 2025
483c84f
chore: Release
64bit Jun 29, 2025
3080208
sync upstream
gilljon Aug 8, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
refactor: adding missing fields from Responses API (64bit#394)
  • Loading branch information
paulhendricks authored Jun 29, 2025
commit f9affaeddb730f89bedef527d25ae87b2ecf7e88
51 changes: 51 additions & 0 deletions async-openai/src/types/responses.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,11 @@ pub struct CreateResponse {
/// performance characteristics, and price points.
pub model: String,

/// Whether to run the model response in the background.
/// boolean or null.
#[serde(skip_serializing_if = "Option::is_none")]
pub background: Option<bool>,

/// Specify additional output data to include in the model response.
///
/// Supported values:
Expand Down Expand Up @@ -188,6 +193,11 @@ pub struct CreateResponse {
#[serde(skip_serializing_if = "Option::is_none")]
pub max_output_tokens: Option<u32>,

/// The maximum number of total calls to built-in tools that can be processed in a response.
/// This maximum number applies across all built-in tool calls, not per individual tool.
/// Any further attempts to call a tool by the model will be ignored.
pub max_tool_calls: Option<u32>,

/// Set of 16 key-value pairs that can be attached to an object. This can be
/// useful for storing additional information about the object in a structured
/// format, and querying for objects via API or the dashboard.
Expand All @@ -206,6 +216,10 @@ pub struct CreateResponse {
#[serde(skip_serializing_if = "Option::is_none")]
pub previous_response_id: Option<String>,

/// Reference to a prompt template and its variables.
#[serde(skip_serializing_if = "Option::is_none")]
pub prompt: Option<PromptConfig>,

/// **o-series models only**: Configuration options for reasoning models.
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning: Option<ReasoningConfig>,
Expand Down Expand Up @@ -236,6 +250,11 @@ pub struct CreateResponse {
#[serde(skip_serializing_if = "Option::is_none")]
pub store: Option<bool>,

/// If set to true, the model response data will be streamed to the client as it is
/// generated using server-sent events.
#[serde(skip_serializing_if = "Option::is_none")]
pub stream: Option<bool>,

/// What sampling temperature to use, between 0 and 2. Higher values like 0.8
/// will make the output more random, while lower values like 0.2 will make it
/// more focused and deterministic. We generally recommend altering this or
Expand All @@ -259,6 +278,11 @@ pub struct CreateResponse {
#[serde(skip_serializing_if = "Option::is_none")]
pub tools: Option<Vec<ToolDefinition>>,

/// An integer between 0 and 20 specifying the number of most likely tokens to return
/// at each token position, each with an associated log probability.
#[serde(skip_serializing_if = "Option::is_none")]
pub top_logprobs: Option<u32>, // TODO add validation of range

/// An alternative to sampling with temperature, called nucleus sampling,
/// where the model considers the results of the tokens with top_p probability
/// mass. So 0.1 means only the tokens comprising the top 10% probability mass
Expand All @@ -279,6 +303,23 @@ pub struct CreateResponse {
pub user: Option<String>,
}

/// Service tier request options.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct PromptConfig {
/// The unique identifier of the prompt template to use.
pub id: String,

/// Optional version of the prompt template.
#[serde(skip_serializing_if = "Option::is_none")]
pub version: Option<String>,

/// Optional map of values to substitute in for variables in your prompt. The substitution
/// values can either be strings, or other Response input types like images or files.
/// For now only supporting Strings.
#[serde(skip_serializing_if = "Option::is_none")]
pub variables: Option<HashMap<String, String>>,
}

/// Service tier request options.
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
#[serde(rename_all = "lowercase")]
Expand Down Expand Up @@ -1323,6 +1364,12 @@ pub struct Response {
/// The array of content items generated by the model.
pub output: Vec<OutputContent>,

/// SDK-only convenience property that contains the aggregated text output from all
/// `output_text` items in the `output` array, if any are present.
/// Supported in the Python and JavaScript SDKs.
#[serde(skip_serializing_if = "Option::is_none")]
pub output_text: Option<String>,

/// Whether parallel tool calls were enabled.
#[serde(skip_serializing_if = "Option::is_none")]
pub parallel_tool_calls: Option<bool>,
Expand All @@ -1335,6 +1382,10 @@ pub struct Response {
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning: Option<ReasoningConfig>,

/// Whether to store the generated model response for later retrieval via API.
#[serde(skip_serializing_if = "Option::is_none")]
pub store: Option<bool>,

/// The service tier that actually processed this response.
#[serde(skip_serializing_if = "Option::is_none")]
pub service_tier: Option<ServiceTier>,
Expand Down