@@ -188,6 +188,81 @@ def generate_int(
188188 return series_list [0 ]._apply_nary_op (operator , series_list [1 :])
189189
190190
191+ @log_adapter .method_logger (custom_base_name = "bigquery_ai" )
192+ def generate_double (
193+ prompt : PROMPT_TYPE ,
194+ * ,
195+ connection_id : str | None = None ,
196+ endpoint : str | None = None ,
197+ request_type : Literal ["dedicated" , "shared" , "unspecified" ] = "unspecified" ,
198+ model_params : Mapping [Any , Any ] | None = None ,
199+ ) -> series .Series :
200+ """
201+ Returns the AI analysis based on the prompt, which can be any combination of text and unstructured data.
202+
203+ **Examples:**
204+
205+ >>> import bigframes.pandas as bpd
206+ >>> import bigframes.bigquery as bbq
207+ >>> bpd.options.display.progress_bar = None
208+ >>> animal = bpd.Series(["Kangaroo", "Rabbit", "Spider"])
209+ >>> bbq.ai.generate_double(("How many legs does a ", animal, " have?"))
210+ 0 {'result': 2.0, 'full_response': '{"candidates...
211+ 1 {'result': 4.0, 'full_response': '{"candidates...
212+ 2 {'result': 8.0, 'full_response': '{"candidates...
213+ dtype: struct<result: double, full_response: extension<dbjson<JSONArrowType>>, status: string>[pyarrow]
214+
215+ >>> bbq.ai.generate_double(("How many legs does a ", animal, " have?")).struct.field("result")
216+ 0 2.0
217+ 1 4.0
218+ 2 8.0
219+ Name: result, dtype: Float64
220+
221+ Args:
222+ prompt (Series | List[str|Series] | Tuple[str|Series, ...]):
223+ A mixture of Series and string literals that specifies the prompt to send to the model. The Series can be BigFrames Series
224+ or pandas Series.
225+ connection_id (str, optional):
226+ Specifies the connection to use to communicate with the model. For example, `myproject.us.myconnection`.
227+ If not provided, the connection from the current session will be used.
228+ endpoint (str, optional):
229+ Specifies the Vertex AI endpoint to use for the model. For example `"gemini-2.5-flash"`. You can specify any
230+ generally available or preview Gemini model. If you specify the model name, BigQuery ML automatically identifies and
231+ uses the full endpoint of the model. If you don't specify an ENDPOINT value, BigQuery ML selects a recent stable
232+ version of Gemini to use.
233+ request_type (Literal["dedicated", "shared", "unspecified"]):
234+ Specifies the type of inference request to send to the Gemini model. The request type determines what quota the request uses.
235+ * "dedicated": function only uses Provisioned Throughput quota. The function returns the error Provisioned throughput is not
236+ purchased or is not active if Provisioned Throughput quota isn't available.
237+ * "shared": the function only uses dynamic shared quota (DSQ), even if you have purchased Provisioned Throughput quota.
238+ * "unspecified": If you haven't purchased Provisioned Throughput quota, the function uses DSQ quota.
239+ If you have purchased Provisioned Throughput quota, the function uses the Provisioned Throughput quota first.
240+ If requests exceed the Provisioned Throughput quota, the overflow traffic uses DSQ quota.
241+ model_params (Mapping[Any, Any]):
242+ Provides additional parameters to the model. The MODEL_PARAMS value must conform to the generateContent request body format.
243+
244+ Returns:
245+ bigframes.series.Series: A new struct Series with the result data. The struct contains these fields:
246+ * "result": an DOUBLE value containing the model's response to the prompt. The result is None if the request fails or is filtered by responsible AI.
247+ * "full_response": a JSON value containing the response from the projects.locations.endpoints.generateContent call to the model.
248+ The generated text is in the text element.
249+ * "status": a STRING value that contains the API response status for the corresponding row. This value is empty if the operation was successful.
250+ """
251+
252+ prompt_context , series_list = _separate_context_and_series (prompt )
253+ assert len (series_list ) > 0
254+
255+ operator = ai_ops .AIGenerateDouble (
256+ prompt_context = tuple (prompt_context ),
257+ connection_id = _resolve_connection_id (series_list [0 ], connection_id ),
258+ endpoint = endpoint ,
259+ request_type = request_type ,
260+ model_params = json .dumps (model_params ) if model_params else None ,
261+ )
262+
263+ return series_list [0 ]._apply_nary_op (operator , series_list [1 :])
264+
265+
191266def _separate_context_and_series (
192267 prompt : PROMPT_TYPE ,
193268) -> Tuple [List [str | None ], List [series .Series ]]:
0 commit comments