Skip to content

Commit 84650c2

Browse files
refactor(ai): required argument for vararg functions (#7104)
This is a breaking change. See context in google-gemini/deprecated-generative-ai-android#116 --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
1 parent e184a98 commit 84650c2

File tree

5 files changed

+100
-25
lines changed

5 files changed

+100
-25
lines changed

firebase-ai/CHANGELOG.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,10 @@
33
2.5 series models. (#6990)
44
* [feature] **Breaking Change**: Add support for Grounding with Google Search (#7042).
55
* **Action Required:** Update all references of `groundingAttributions`, `webSearchQueries`, `retrievalQueries` in `GroundingMetadata` to be non-optional.
6+
* [changed] require at least one argument for `generateContent()`, `generateContentStream()` and
7+
`countTokens()`.
8+
* [feature] Added new overloads for `generateContent()`, `generateContentStream()` and
9+
`countTokens()` that take a `List<Content>` parameter.
610

711
# 16.2.0
812
* [changed] Deprecate the `totalBillableCharacters` field (only usable with pre-2.0 models). (#7042)
@@ -34,3 +38,4 @@
3438

3539
Note: This feature is in Public Preview, which means that it is not subject to any SLA or
3640
deprecation policy and could change in backwards-incompatible ways.
41+

firebase-ai/api.txt

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -53,14 +53,17 @@ package com.google.firebase.ai {
5353

5454
public final class GenerativeModel {
5555
method public suspend Object? countTokens(android.graphics.Bitmap prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
56-
method public suspend Object? countTokens(com.google.firebase.ai.type.Content[] prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
56+
method public suspend Object? countTokens(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content[] prompts, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
5757
method public suspend Object? countTokens(String prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
58+
method public suspend Object? countTokens(java.util.List<com.google.firebase.ai.type.Content> prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
5859
method public suspend Object? generateContent(android.graphics.Bitmap prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
59-
method public suspend Object? generateContent(com.google.firebase.ai.type.Content[] prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
60+
method public suspend Object? generateContent(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content[] prompts, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
6061
method public suspend Object? generateContent(String prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
62+
method public suspend Object? generateContent(java.util.List<com.google.firebase.ai.type.Content> prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
6163
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(android.graphics.Bitmap prompt);
62-
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content... prompt);
64+
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
6365
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(String prompt);
66+
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(java.util.List<com.google.firebase.ai.type.Content> prompt);
6467
method public com.google.firebase.ai.Chat startChat(java.util.List<com.google.firebase.ai.type.Content> history = emptyList());
6568
}
6669

@@ -89,10 +92,10 @@ package com.google.firebase.ai.java {
8992
}
9093

9194
public abstract class GenerativeModelFutures {
92-
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.CountTokensResponse> countTokens(com.google.firebase.ai.type.Content... prompt);
95+
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.CountTokensResponse> countTokens(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
9396
method public static final com.google.firebase.ai.java.GenerativeModelFutures from(com.google.firebase.ai.GenerativeModel model);
94-
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.GenerateContentResponse> generateContent(com.google.firebase.ai.type.Content... prompt);
95-
method public abstract org.reactivestreams.Publisher<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content... prompt);
97+
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.GenerateContentResponse> generateContent(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
98+
method public abstract org.reactivestreams.Publisher<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
9699
method public abstract com.google.firebase.ai.GenerativeModel getGenerativeModel();
97100
method public abstract com.google.firebase.ai.java.ChatFutures startChat();
98101
method public abstract com.google.firebase.ai.java.ChatFutures startChat(java.util.List<com.google.firebase.ai.type.Content> history);

firebase-ai/src/main/kotlin/com/google/firebase/ai/Chat.kt

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,8 @@ public class Chat(
6666
prompt.assertComesFromUser()
6767
attemptLock()
6868
try {
69-
val response = model.generateContent(*history.toTypedArray(), prompt)
69+
val fullPrompt = history + prompt
70+
val response = model.generateContent(fullPrompt.first(), *fullPrompt.drop(1).toTypedArray())
7071
history.add(prompt)
7172
history.add(response.candidates.first().content)
7273
return response
@@ -127,7 +128,8 @@ public class Chat(
127128
prompt.assertComesFromUser()
128129
attemptLock()
129130

130-
val flow = model.generateContentStream(*history.toTypedArray(), prompt)
131+
val fullPrompt = history + prompt
132+
val flow = model.generateContentStream(fullPrompt.first(), *fullPrompt.drop(1).toTypedArray())
131133
val bitmaps = LinkedList<Bitmap>()
132134
val inlineDataParts = LinkedList<InlineDataPart>()
133135
val text = StringBuilder()

firebase-ai/src/main/kotlin/com/google/firebase/ai/GenerativeModel.kt

Lines changed: 59 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -100,13 +100,48 @@ internal constructor(
100100
* @throws [FirebaseAIException] if the request failed.
101101
* @see [FirebaseAIException] for types of errors.
102102
*/
103-
public suspend fun generateContent(vararg prompt: Content): GenerateContentResponse =
103+
public suspend fun generateContent(
104+
prompt: Content,
105+
vararg prompts: Content
106+
): GenerateContentResponse =
104107
try {
105-
controller.generateContent(constructRequest(*prompt)).toPublic().validate()
108+
controller.generateContent(constructRequest(prompt, *prompts)).toPublic().validate()
106109
} catch (e: Throwable) {
107110
throw FirebaseAIException.from(e)
108111
}
109112

113+
/**
114+
* Generates new content from the input [Content] given to the model as a prompt.
115+
*
116+
* @param prompt The input(s) given to the model as a prompt.
117+
* @return The content generated by the model.
118+
* @throws [FirebaseAIException] if the request failed.
119+
* @see [FirebaseAIException] for types of errors.
120+
*/
121+
public suspend fun generateContent(prompt: List<Content>): GenerateContentResponse =
122+
try {
123+
controller.generateContent(constructRequest(prompt)).toPublic().validate()
124+
} catch (e: Throwable) {
125+
throw FirebaseAIException.from(e)
126+
}
127+
128+
/**
129+
* Generates new content as a stream from the input [Content] given to the model as a prompt.
130+
*
131+
* @param prompt The input(s) given to the model as a prompt.
132+
* @return A [Flow] which will emit responses as they are returned by the model.
133+
* @throws [FirebaseAIException] if the request failed.
134+
* @see [FirebaseAIException] for types of errors.
135+
*/
136+
public fun generateContentStream(
137+
prompt: Content,
138+
vararg prompts: Content
139+
): Flow<GenerateContentResponse> =
140+
controller
141+
.generateContentStream(constructRequest(prompt, *prompts))
142+
.catch { throw FirebaseAIException.from(it) }
143+
.map { it.toPublic().validate() }
144+
110145
/**
111146
* Generates new content as a stream from the input [Content] given to the model as a prompt.
112147
*
@@ -115,9 +150,9 @@ internal constructor(
115150
* @throws [FirebaseAIException] if the request failed.
116151
* @see [FirebaseAIException] for types of errors.
117152
*/
118-
public fun generateContentStream(vararg prompt: Content): Flow<GenerateContentResponse> =
153+
public fun generateContentStream(prompt: List<Content>): Flow<GenerateContentResponse> =
119154
controller
120-
.generateContentStream(constructRequest(*prompt))
155+
.generateContentStream(constructRequest(prompt))
121156
.catch { throw FirebaseAIException.from(it) }
122157
.map { it.toPublic().validate() }
123158

@@ -177,9 +212,25 @@ internal constructor(
177212
* @throws [FirebaseAIException] if the request failed.
178213
* @see [FirebaseAIException] for types of errors.
179214
*/
180-
public suspend fun countTokens(vararg prompt: Content): CountTokensResponse {
215+
public suspend fun countTokens(prompt: Content, vararg prompts: Content): CountTokensResponse {
216+
try {
217+
return controller.countTokens(constructCountTokensRequest(prompt, *prompts)).toPublic()
218+
} catch (e: Throwable) {
219+
throw FirebaseAIException.from(e)
220+
}
221+
}
222+
223+
/**
224+
* Counts the number of tokens in a prompt using the model's tokenizer.
225+
*
226+
* @param prompt The input(s) given to the model as a prompt.
227+
* @return The [CountTokensResponse] of running the model's tokenizer on the input.
228+
* @throws [FirebaseAIException] if the request failed.
229+
* @see [FirebaseAIException] for types of errors.
230+
*/
231+
public suspend fun countTokens(prompt: List<Content>): CountTokensResponse {
181232
try {
182-
return controller.countTokens(constructCountTokensRequest(*prompt)).toPublic()
233+
return controller.countTokens(constructCountTokensRequest(*prompt.toTypedArray())).toPublic()
183234
} catch (e: Throwable) {
184235
throw FirebaseAIException.from(e)
185236
}
@@ -232,6 +283,8 @@ internal constructor(
232283
systemInstruction?.copy(role = "system")?.toInternal(),
233284
)
234285

286+
private fun constructRequest(prompt: List<Content>) = constructRequest(*prompt.toTypedArray())
287+
235288
private fun constructCountTokensRequest(vararg prompt: Content) =
236289
when (generativeBackend.backend) {
237290
GenerativeBackendEnum.GOOGLE_AI -> CountTokensRequest.forGoogleAI(constructRequest(*prompt))

firebase-ai/src/main/kotlin/com/google/firebase/ai/java/GenerativeModelFutures.kt

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@ public abstract class GenerativeModelFutures internal constructor() {
4242
* @throws [FirebaseAIException] if the request failed.
4343
*/
4444
public abstract fun generateContent(
45-
vararg prompt: Content
45+
prompt: Content,
46+
vararg prompts: Content
4647
): ListenableFuture<GenerateContentResponse>
4748

4849
/**
@@ -53,7 +54,8 @@ public abstract class GenerativeModelFutures internal constructor() {
5354
* @throws [FirebaseAIException] if the request failed.
5455
*/
5556
public abstract fun generateContentStream(
56-
vararg prompt: Content
57+
prompt: Content,
58+
vararg prompts: Content
5759
): Publisher<GenerateContentResponse>
5860

5961
/**
@@ -63,7 +65,10 @@ public abstract class GenerativeModelFutures internal constructor() {
6365
* @return The [CountTokensResponse] of running the model's tokenizer on the input.
6466
* @throws [FirebaseAIException] if the request failed.
6567
*/
66-
public abstract fun countTokens(vararg prompt: Content): ListenableFuture<CountTokensResponse>
68+
public abstract fun countTokens(
69+
prompt: Content,
70+
vararg prompts: Content
71+
): ListenableFuture<CountTokensResponse>
6772

6873
/**
6974
* Creates a [ChatFutures] instance which internally tracks the ongoing conversation with the
@@ -83,15 +88,22 @@ public abstract class GenerativeModelFutures internal constructor() {
8388

8489
private class FuturesImpl(private val model: GenerativeModel) : GenerativeModelFutures() {
8590
override fun generateContent(
86-
vararg prompt: Content
91+
prompt: Content,
92+
vararg prompts: Content
8793
): ListenableFuture<GenerateContentResponse> =
88-
SuspendToFutureAdapter.launchFuture { model.generateContent(*prompt) }
89-
90-
override fun generateContentStream(vararg prompt: Content): Publisher<GenerateContentResponse> =
91-
model.generateContentStream(*prompt).asPublisher()
92-
93-
override fun countTokens(vararg prompt: Content): ListenableFuture<CountTokensResponse> =
94-
SuspendToFutureAdapter.launchFuture { model.countTokens(*prompt) }
94+
SuspendToFutureAdapter.launchFuture { model.generateContent(prompt, *prompts) }
95+
96+
override fun generateContentStream(
97+
prompt: Content,
98+
vararg prompts: Content
99+
): Publisher<GenerateContentResponse> =
100+
model.generateContentStream(prompt, *prompts).asPublisher()
101+
102+
override fun countTokens(
103+
prompt: Content,
104+
vararg prompts: Content
105+
): ListenableFuture<CountTokensResponse> =
106+
SuspendToFutureAdapter.launchFuture { model.countTokens(prompt, *prompts) }
95107

96108
override fun startChat(): ChatFutures = startChat(emptyList())
97109

0 commit comments

Comments
 (0)