You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
#### firebaseml:v2beta
The following keys were deleted:
- schemas.GoogleCloudAiplatformV1beta1GenerationConfigThinkingConfig.properties.enableThinking.type (Total Keys: 1)
The following keys were added:
- schemas.GoogleCloudAiplatformV1beta1SpeechConfig.properties.languageCode.type (Total Keys: 1)
Copy file name to clipboardExpand all lines: docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html
+17-17Lines changed: 17 additions & 17 deletions
Original file line number
Diff line number
Diff line change
@@ -120,7 +120,7 @@ <h3>Method Details</h3>
120
120
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
121
121
},
122
122
"functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.
123
-
"args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
123
+
"args": { # Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
124
124
"a_key": "", # Properties of the object.
125
125
},
126
126
"id": "A String", # Optional. The unique id of the function call. If populated, the client to execute the `function_call` and return the response with the matching `id`.
@@ -204,11 +204,12 @@ <h3>Method Details</h3>
204
204
"modelRoutingPreference": "A String", # The model routing preference.
205
205
},
206
206
"manualMode": { # When manual routing is set, the specified model will be used directly. # Manual routing.
207
-
"modelName": "A String", # The model name to use. Only the public LLM models are accepted. e.g. 'gemini-1.5-pro-001'.
207
+
"modelName": "A String", # The model name to use. Only the public LLM models are accepted. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).
208
208
},
209
209
},
210
210
"seed": 42, # Optional. Seed.
211
211
"speechConfig": { # The speech generation config. # Optional. The speech generation config.
212
+
"languageCode": "A String", # Optional. Language code (ISO 639. e.g. en-US) for the speech synthesization.
212
213
"voiceConfig": { # The configuration for the voice to use. # The configuration for the speaker to use.
213
214
"prebuiltVoiceConfig": { # The configuration for the prebuilt speaker to use. # The configuration for the prebuilt voice to use.
214
215
"voiceName": "A String", # The name of the preset voice to use.
@@ -220,7 +221,6 @@ <h3>Method Details</h3>
220
221
],
221
222
"temperature": 3.14, # Optional. Controls the randomness of predictions.
222
223
"thinkingConfig": { # Config for thinking features. # Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.
223
-
"enableThinking": True or False, # Optional. Indicates whether to enable thinking mode. If true, the model will enable thinking mode.
224
224
"thinkingBudget": 42, # Optional. Indicates the thinking budget in tokens. This is only applied when enable_thinking is true.
225
225
},
226
226
"topK": 3.14, # Optional. If specified, top-k sampling will be used.
@@ -247,7 +247,7 @@ <h3>Method Details</h3>
247
247
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
248
248
},
249
249
"functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.
250
-
"args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
250
+
"args": { # Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
251
251
"a_key": "", # Properties of the object.
252
252
},
253
253
"id": "A String", # Optional. The unique id of the function call. If populated, the client to execute the `function_call` and return the response with the matching `id`.
@@ -392,7 +392,7 @@ <h3>Method Details</h3>
392
392
},
393
393
"ranking": { # Config for ranking and reranking. # Optional. Config for ranking and reranking.
394
394
"llmRanker": { # Config for LlmRanker. # Optional. Config for LlmRanker.
395
-
"modelName": "A String", # Optional. The model name used for ranking. Format: `gemini-1.5-pro`
395
+
"modelName": "A String", # Optional. The model name used for ranking. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).
396
396
},
397
397
"rankService": { # Config for Rank Service. # Optional. Config for Rank Service.
398
398
"modelName": "A String", # Optional. The model name of the rank service. Format: `semantic-ranker-512@latest`
@@ -457,7 +457,7 @@ <h3>Method Details</h3>
457
457
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
458
458
},
459
459
"functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.
460
-
"args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
460
+
"args": { # Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
461
461
"a_key": "", # Properties of the object.
462
462
},
463
463
"id": "A String", # Optional. The unique id of the function call. If populated, the client to execute the `function_call` and return the response with the matching `id`.
@@ -541,11 +541,12 @@ <h3>Method Details</h3>
541
541
"modelRoutingPreference": "A String", # The model routing preference.
542
542
},
543
543
"manualMode": { # When manual routing is set, the specified model will be used directly. # Manual routing.
544
-
"modelName": "A String", # The model name to use. Only the public LLM models are accepted. e.g. 'gemini-1.5-pro-001'.
544
+
"modelName": "A String", # The model name to use. Only the public LLM models are accepted. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).
545
545
},
546
546
},
547
547
"seed": 42, # Optional. Seed.
548
548
"speechConfig": { # The speech generation config. # Optional. The speech generation config.
549
+
"languageCode": "A String", # Optional. Language code (ISO 639. e.g. en-US) for the speech synthesization.
549
550
"voiceConfig": { # The configuration for the voice to use. # The configuration for the speaker to use.
550
551
"prebuiltVoiceConfig": { # The configuration for the prebuilt speaker to use. # The configuration for the prebuilt voice to use.
551
552
"voiceName": "A String", # The name of the preset voice to use.
@@ -557,7 +558,6 @@ <h3>Method Details</h3>
557
558
],
558
559
"temperature": 3.14, # Optional. Controls the randomness of predictions.
559
560
"thinkingConfig": { # Config for thinking features. # Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.
560
-
"enableThinking": True or False, # Optional. Indicates whether to enable thinking mode. If true, the model will enable thinking mode.
561
561
"thinkingBudget": 42, # Optional. Indicates the thinking budget in tokens. This is only applied when enable_thinking is true.
562
562
},
563
563
"topK": 3.14, # Optional. If specified, top-k sampling will be used.
@@ -590,7 +590,7 @@ <h3>Method Details</h3>
590
590
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
591
591
},
592
592
"functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.
593
-
"args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
593
+
"args": { # Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
594
594
"a_key": "", # Properties of the object.
595
595
},
596
596
"id": "A String", # Optional. The unique id of the function call. If populated, the client to execute the `function_call` and return the response with the matching `id`.
@@ -750,7 +750,7 @@ <h3>Method Details</h3>
750
750
},
751
751
"ranking": { # Config for ranking and reranking. # Optional. Config for ranking and reranking.
752
752
"llmRanker": { # Config for LlmRanker. # Optional. Config for LlmRanker.
753
-
"modelName": "A String", # Optional. The model name used for ranking. Format: `gemini-1.5-pro`
753
+
"modelName": "A String", # Optional. The model name used for ranking. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).
754
754
},
755
755
"rankService": { # Config for Rank Service. # Optional. Config for Rank Service.
756
756
"modelName": "A String", # Optional. The model name of the rank service. Format: `semantic-ranker-512@latest`
@@ -811,7 +811,7 @@ <h3>Method Details</h3>
811
811
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
812
812
},
813
813
"functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.
814
-
"args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
814
+
"args": { # Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
815
815
"a_key": "", # Properties of the object.
816
816
},
817
817
"id": "A String", # Optional. The unique id of the function call. If populated, the client to execute the `function_call` and return the response with the matching `id`.
@@ -1008,7 +1008,7 @@ <h3>Method Details</h3>
1008
1008
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
1009
1009
},
1010
1010
"functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.
1011
-
"args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
1011
+
"args": { # Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
1012
1012
"a_key": "", # Properties of the object.
1013
1013
},
1014
1014
"id": "A String", # Optional. The unique id of the function call. If populated, the client to execute the `function_call` and return the response with the matching `id`.
@@ -1092,11 +1092,12 @@ <h3>Method Details</h3>
1092
1092
"modelRoutingPreference": "A String", # The model routing preference.
1093
1093
},
1094
1094
"manualMode": { # When manual routing is set, the specified model will be used directly. # Manual routing.
1095
-
"modelName": "A String", # The model name to use. Only the public LLM models are accepted. e.g. 'gemini-1.5-pro-001'.
1095
+
"modelName": "A String", # The model name to use. Only the public LLM models are accepted. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).
1096
1096
},
1097
1097
},
1098
1098
"seed": 42, # Optional. Seed.
1099
1099
"speechConfig": { # The speech generation config. # Optional. The speech generation config.
1100
+
"languageCode": "A String", # Optional. Language code (ISO 639. e.g. en-US) for the speech synthesization.
1100
1101
"voiceConfig": { # The configuration for the voice to use. # The configuration for the speaker to use.
1101
1102
"prebuiltVoiceConfig": { # The configuration for the prebuilt speaker to use. # The configuration for the prebuilt voice to use.
1102
1103
"voiceName": "A String", # The name of the preset voice to use.
@@ -1108,7 +1109,6 @@ <h3>Method Details</h3>
1108
1109
],
1109
1110
"temperature": 3.14, # Optional. Controls the randomness of predictions.
1110
1111
"thinkingConfig": { # Config for thinking features. # Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.
1111
-
"enableThinking": True or False, # Optional. Indicates whether to enable thinking mode. If true, the model will enable thinking mode.
1112
1112
"thinkingBudget": 42, # Optional. Indicates the thinking budget in tokens. This is only applied when enable_thinking is true.
1113
1113
},
1114
1114
"topK": 3.14, # Optional. If specified, top-k sampling will be used.
@@ -1141,7 +1141,7 @@ <h3>Method Details</h3>
1141
1141
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
1142
1142
},
1143
1143
"functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.
1144
-
"args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
1144
+
"args": { # Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
1145
1145
"a_key": "", # Properties of the object.
1146
1146
},
1147
1147
"id": "A String", # Optional. The unique id of the function call. If populated, the client to execute the `function_call` and return the response with the matching `id`.
@@ -1301,7 +1301,7 @@ <h3>Method Details</h3>
1301
1301
},
1302
1302
"ranking": { # Config for ranking and reranking. # Optional. Config for ranking and reranking.
1303
1303
"llmRanker": { # Config for LlmRanker. # Optional. Config for LlmRanker.
1304
-
"modelName": "A String", # Optional. The model name used for ranking. Format: `gemini-1.5-pro`
1304
+
"modelName": "A String", # Optional. The model name used for ranking. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).
1305
1305
},
1306
1306
"rankService": { # Config for Rank Service. # Optional. Config for Rank Service.
1307
1307
"modelName": "A String", # Optional. The model name of the rank service. Format: `semantic-ranker-512@latest`
@@ -1362,7 +1362,7 @@ <h3>Method Details</h3>
1362
1362
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
1363
1363
},
1364
1364
"functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.
1365
-
"args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
1365
+
"args": { # Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.
1366
1366
"a_key": "", # Properties of the object.
1367
1367
},
1368
1368
"id": "A String", # Optional. The unique id of the function call. If populated, the client to execute the `function_call` and return the response with the matching `id`.
0 commit comments