Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions FirebaseAI/Sources/FirebaseAI.swift
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ public final class FirebaseAI: Sendable {
)
// Verify that the `FirebaseAI` instance is always configured with the production endpoint since
// this is the public API surface for creating an instance.
assert(instance.apiConfig.service.endpoint == .firebaseVertexAIProd)
assert(instance.apiConfig.service.endpoint == .firebaseProxyProd)
assert(instance.apiConfig.version == .v1beta)
return instance
}
Expand Down Expand Up @@ -150,7 +150,7 @@ public final class FirebaseAI: Sendable {
let location: String?

static let defaultVertexAIAPIConfig = APIConfig(
service: .vertexAI(endpoint: .firebaseVertexAIProd),
service: .vertexAI(endpoint: .firebaseProxyProd),
version: .v1beta
)

Expand Down Expand Up @@ -209,7 +209,7 @@ public final class FirebaseAI: Sendable {
switch apiConfig.service {
case .vertexAI:
return vertexAIModelResourceName(modelName: modelName)
case .developer:
case .googleAI:
return developerModelResourceName(modelName: modelName)
}
}
Expand All @@ -233,10 +233,10 @@ public final class FirebaseAI: Sendable {

private func developerModelResourceName(modelName: String) -> String {
switch apiConfig.service.endpoint {
case .firebaseVertexAIStaging, .firebaseVertexAIProd:
case .firebaseProxyStaging, .firebaseProxyProd:
let projectID = firebaseInfo.projectID
return "projects/\(projectID)/models/\(modelName)"
case .generativeLanguage:
case .googleAIBypassProxy:
return "models/\(modelName)"
}
}
Expand Down
8 changes: 4 additions & 4 deletions FirebaseAI/Sources/GenerativeModel.swift
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ public final class GenerativeModel: Sendable {
let requestContent = switch apiConfig.service {
case .vertexAI:
content
case .developer:
case .googleAI:
// The `role` defaults to "user" but is ignored in `countTokens`. However, it is erroneously
// erroneously counted towards the prompt and total token count when using the Developer API
// backend; set to `nil` to avoid token count discrepancies between `countTokens` and
Expand All @@ -290,10 +290,10 @@ public final class GenerativeModel: Sendable {
// "models/model-name". This field is unaltered by the Firebase backend before forwarding the
// request to the Generative Language backend, which expects the form "models/model-name".
let generateContentRequestModelResourceName = switch apiConfig.service {
case .vertexAI, .developer(endpoint: .generativeLanguage):
case .vertexAI, .googleAI(endpoint: .googleAIBypassProxy):
modelResourceName
case .developer(endpoint: .firebaseVertexAIProd),
.developer(endpoint: .firebaseVertexAIStaging):
case .googleAI(endpoint: .firebaseProxyProd),
.googleAI(endpoint: .firebaseProxyStaging):
"models/\(modelName)"
}

Expand Down
33 changes: 21 additions & 12 deletions FirebaseAI/Sources/Types/Internal/APIConfig.swift
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
// limitations under the License.

/// Configuration for the generative AI backend API used by this SDK.
struct APIConfig: Sendable, Hashable {
struct APIConfig: Sendable, Hashable, Encodable {
/// The service to use for generative AI.
///
/// This controls which backend API is used by the SDK.
Expand All @@ -39,7 +39,7 @@ extension APIConfig {
/// See [Vertex AI and Google AI
/// differences](https://cloud.google.com/vertex-ai/generative-ai/docs/overview#how-gemini-vertex-different-gemini-aistudio)
/// for a comparison of the two [API services](https://google.aip.dev/9#api-service).
enum Service: Hashable {
enum Service: Hashable, Encodable {
/// The Gemini Enterprise API provided by Vertex AI.
///
/// See the [Cloud
Expand All @@ -50,7 +50,7 @@ extension APIConfig {
/// The Gemini Developer API provided by Google AI.
///
/// See the [Google AI docs](https://ai.google.dev/gemini-api/docs) for more details.
case developer(endpoint: Endpoint)
case googleAI(endpoint: Endpoint)

/// The specific network address to use for API requests.
///
Expand All @@ -59,7 +59,7 @@ extension APIConfig {
switch self {
case let .vertexAI(endpoint: endpoint):
return endpoint
case let .developer(endpoint: endpoint):
case let .googleAI(endpoint: endpoint):
return endpoint
}
}
Expand All @@ -68,21 +68,30 @@ extension APIConfig {

extension APIConfig.Service {
/// Network addresses for generative AI API services.
enum Endpoint: String {
/// The Firebase AI SDK production endpoint.
case firebaseVertexAIProd = "https://firebasevertexai.googleapis.com"
enum Endpoint: String, Encodable {
/// The Firebase proxy production endpoint.
///
/// This endpoint supports both Google AI and Vertex AI.
case firebaseProxyProd = "https://firebasevertexai.googleapis.com"

/// The Firebase AI SDK staging endpoint; for SDK development and testing only.
case firebaseVertexAIStaging = "https://staging-firebasevertexai.sandbox.googleapis.com"
/// The Firebase proxy staging endpoint; for SDK development and testing only.
///
/// This endpoint supports both the Gemini Developer API (commonly referred to as Google AI)
/// and the Gemini API in Vertex AI (commonly referred to simply as Vertex AI).
case firebaseProxyStaging = "https://staging-firebasevertexai.sandbox.googleapis.com"

/// The Gemini Developer API production endpoint; for SDK development and testing only.
case generativeLanguage = "https://generativelanguage.googleapis.com"
/// The Gemini Developer API (Google AI) direct production endpoint; for SDK development and
/// testing only.
///
/// This bypasses the Firebase proxy and directly connects to the Gemini Developer API
/// (Google AI) backend. This endpoint only supports the Gemini Developer API, not Vertex AI.
case googleAIBypassProxy = "https://generativelanguage.googleapis.com"
}
}

extension APIConfig {
/// Versions of the configured API service (`APIConfig.Service`).
enum Version: String {
enum Version: String, Encodable {
/// The stable channel for version 1 of the API.
case v1

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ extension CountTokensRequest: Encodable {
switch apiConfig.service {
case .vertexAI:
try encodeForVertexAI(to: encoder)
case .developer:
case .googleAI:
try encodeForDeveloper(to: encoder)
}
}
Expand Down
4 changes: 2 additions & 2 deletions FirebaseAI/Sources/Types/Public/Backend.swift
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,15 @@ public struct Backend {
/// for a list of supported locations.
public static func vertexAI(location: String = "us-central1") -> Backend {
return Backend(
apiConfig: APIConfig(service: .vertexAI(endpoint: .firebaseVertexAIProd), version: .v1beta),
apiConfig: APIConfig(service: .vertexAI(endpoint: .firebaseProxyProd), version: .v1beta),
location: location
)
}

/// Initializes a `Backend` configured for the Google Developer API.
public static func googleAI() -> Backend {
return Backend(
apiConfig: APIConfig(service: .developer(endpoint: .firebaseVertexAIProd), version: .v1beta),
apiConfig: APIConfig(service: .googleAI(endpoint: .firebaseProxyProd), version: .v1beta),
location: nil
)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ struct CountTokensIntegrationTests {
switch config.apiConfig.service {
case .vertexAI:
#expect(response.totalBillableCharacters == 16)
case .developer:
case .googleAI:
#expect(response.totalBillableCharacters == nil)
}
#expect(response.promptTokensDetails.count == 1)
Expand All @@ -71,7 +71,7 @@ struct CountTokensIntegrationTests {

@Test(
/* System instructions are not supported on the v1 Developer API. */
arguments: InstanceConfig.allConfigsExceptDeveloperV1
arguments: InstanceConfig.allConfigsExceptGoogleAI_v1
)
func countTokens_text_systemInstruction(_ config: InstanceConfig) async throws {
let model = FirebaseAI.componentInstance(config).generativeModel(
Expand All @@ -87,7 +87,7 @@ struct CountTokensIntegrationTests {
switch config.apiConfig.service {
case .vertexAI:
#expect(response.totalBillableCharacters == 61)
case .developer:
case .googleAI:
#expect(response.totalBillableCharacters == nil)
}
#expect(response.promptTokensDetails.count == 1)
Expand All @@ -98,7 +98,7 @@ struct CountTokensIntegrationTests {

@Test(arguments: [
/* System instructions are not supported on the v1 Developer API. */
InstanceConfig.developerV1Spark,
InstanceConfig.googleAI_v1_freeTier_bypassProxy,
])
func countTokens_text_systemInstruction_unsupported(_ config: InstanceConfig) async throws {
let model = FirebaseAI.componentInstance(config).generativeModel(
Expand All @@ -120,7 +120,7 @@ struct CountTokensIntegrationTests {

@Test(
/* System instructions are not supported on the v1 Developer API. */
arguments: InstanceConfig.allConfigsExceptDeveloperV1
arguments: InstanceConfig.allConfigsExceptGoogleAI_v1
)
func countTokens_jsonSchema(_ config: InstanceConfig) async throws {
let model = FirebaseAI.componentInstance(config).generativeModel(
Expand All @@ -144,7 +144,7 @@ struct CountTokensIntegrationTests {
case .vertexAI:
#expect(response.totalTokens == 65)
#expect(response.totalBillableCharacters == 170)
case .developer:
case .googleAI:
// The Developer API erroneously ignores the `responseSchema` when counting tokens, resulting
// in a lower total count than Vertex AI.
#expect(response.totalTokens == 34)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ struct GenerateContentIntegrationTests {
@Test(
"Generate an enum and provide a system instruction",
/* System instructions are not supported on the v1 Developer API. */
arguments: InstanceConfig.allConfigsExceptDeveloperV1
arguments: InstanceConfig.allConfigsExceptGoogleAI_v1
)
func generateContentEnum(_ config: InstanceConfig) async throws {
let model = FirebaseAI.componentInstance(config).generativeModel(
Expand Down Expand Up @@ -118,9 +118,9 @@ struct GenerateContentIntegrationTests {
// TODO(andrewheard): Vertex AI configs temporarily disabled to due empty SafetyRatings bug.
// InstanceConfig.vertexV1,
// InstanceConfig.vertexV1Beta,
InstanceConfig.developerV1Beta,
InstanceConfig.developerV1BetaStaging,
InstanceConfig.developerV1BetaSpark,
InstanceConfig.googleAI_v1beta,
InstanceConfig.googleAI_v1beta_staging,
InstanceConfig.googleAI_v1beta_freeTier_bypassProxy,
])
func generateImage(_ config: InstanceConfig) async throws {
let generationConfig = GenerationConfig(
Expand Down Expand Up @@ -216,12 +216,7 @@ struct GenerateContentIntegrationTests {

// MARK: - App Check Tests

@Test(arguments: [
InstanceConfig.vertexV1AppCheckNotConfigured,
InstanceConfig.vertexV1BetaAppCheckNotConfigured,
// App Check is not supported on the Generative Language Developer API endpoint since it
// bypasses the Firebase AI SDK proxy.
])
@Test(arguments: InstanceConfig.appCheckNotConfiguredConfigs)
func generateContent_appCheckNotConfigured_shouldFail(_ config: InstanceConfig) async throws {
let model = FirebaseAI.componentInstance(config).generativeModel(
modelName: ModelNames.gemini2Flash
Expand Down
8 changes: 4 additions & 4 deletions FirebaseAI/Tests/TestApp/Tests/Integration/SchemaTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ struct SchemaTests {
storage = Storage.storage()
}

@Test(arguments: InstanceConfig.allConfigsExceptDeveloperV1)
@Test(arguments: InstanceConfig.allConfigsExceptGoogleAI_v1)
func generateContentSchemaItems(_ config: InstanceConfig) async throws {
let model = FirebaseAI.componentInstance(config).generativeModel(
modelName: ModelNames.gemini2FlashLite,
Expand All @@ -73,7 +73,7 @@ struct SchemaTests {
#expect(decodedJSON.count <= 5, "Expected at most 5 cities, but got \(decodedJSON.count)")
}

@Test(arguments: InstanceConfig.allConfigsExceptDeveloperV1)
@Test(arguments: InstanceConfig.allConfigsExceptGoogleAI_v1)
func generateContentSchemaNumberRange(_ config: InstanceConfig) async throws {
let model = FirebaseAI.componentInstance(config).generativeModel(
modelName: ModelNames.gemini2FlashLite,
Expand All @@ -96,7 +96,7 @@ struct SchemaTests {
#expect(decodedNumber <= 120.0, "Expected a number <= 120, but got \(decodedNumber)")
}

@Test(arguments: InstanceConfig.allConfigsExceptDeveloperV1)
@Test(arguments: InstanceConfig.allConfigsExceptGoogleAI_v1)
func generateContentSchemaNumberRangeMultiType(_ config: InstanceConfig) async throws {
struct ProductInfo: Codable {
let productName: String
Expand Down Expand Up @@ -149,7 +149,7 @@ struct SchemaTests {
#expect(rating <= 5, "Expected a rating <= 5, but got \(rating)")
}

@Test(arguments: InstanceConfig.allConfigsExceptDeveloperV1)
@Test(arguments: InstanceConfig.allConfigsExceptGoogleAI_v1)
func generateContentAnyOfSchema(_ config: InstanceConfig) async throws {
struct MailingAddress: Decodable {
let streetAddress: String
Expand Down
Loading
Loading