|
35 | 35 | ] |
36 | 36 |
|
37 | 37 |
|
| 38 | +@log_adapter.method_logger(custom_base_name="bigquery_ai") |
| 39 | +def generate( |
| 40 | + prompt: PROMPT_TYPE, |
| 41 | + *, |
| 42 | + connection_id: str | None = None, |
| 43 | + endpoint: str | None = None, |
| 44 | + request_type: Literal["dedicated", "shared", "unspecified"] = "unspecified", |
| 45 | + model_params: Mapping[Any, Any] | None = None, |
| 46 | + # TODO(b/446974666) Add output_schema parameter |
| 47 | +) -> series.Series: |
| 48 | + """ |
| 49 | + Returns the AI analysis based on the prompt, which can be any combination of text and unstructured data. |
| 50 | +
|
| 51 | + **Examples:** |
| 52 | +
|
| 53 | + >>> import bigframes.pandas as bpd |
| 54 | + >>> import bigframes.bigquery as bbq |
| 55 | + >>> bpd.options.display.progress_bar = None |
| 56 | + >>> country = bpd.Series(["Japan", "Canada"]) |
| 57 | + >>> bbq.ai.generate(("What's the capital city of ", country, " one word only")) |
| 58 | + 0 {'result': 'Tokyo\\n', 'full_response': '{"cand... |
| 59 | + 1 {'result': 'Ottawa\\n', 'full_response': '{"can... |
| 60 | + dtype: struct<result: string, full_response: extension<dbjson<JSONArrowType>>, status: string>[pyarrow] |
| 61 | +
|
| 62 | + >>> bbq.ai.generate(("What's the capital city of ", country, " one word only")).struct.field("result") |
| 63 | + 0 Tokyo\\n |
| 64 | + 1 Ottawa\\n |
| 65 | + Name: result, dtype: string |
| 66 | +
|
| 67 | + Args: |
| 68 | + prompt (Series | List[str|Series] | Tuple[str|Series, ...]): |
| 69 | + A mixture of Series and string literals that specifies the prompt to send to the model. The Series can be BigFrames Series |
| 70 | + or pandas Series. |
| 71 | + connection_id (str, optional): |
| 72 | + Specifies the connection to use to communicate with the model. For example, `myproject.us.myconnection`. |
| 73 | + If not provided, the connection from the current session will be used. |
| 74 | + endpoint (str, optional): |
| 75 | + Specifies the Vertex AI endpoint to use for the model. For example `"gemini-2.5-flash"`. You can specify any |
| 76 | + generally available or preview Gemini model. If you specify the model name, BigQuery ML automatically identifies and |
| 77 | + uses the full endpoint of the model. If you don't specify an ENDPOINT value, BigQuery ML selects a recent stable |
| 78 | + version of Gemini to use. |
| 79 | + request_type (Literal["dedicated", "shared", "unspecified"]): |
| 80 | + Specifies the type of inference request to send to the Gemini model. The request type determines what quota the request uses. |
| 81 | + * "dedicated": function only uses Provisioned Throughput quota. The function returns the error Provisioned throughput is not |
| 82 | + purchased or is not active if Provisioned Throughput quota isn't available. |
| 83 | + * "shared": the function only uses dynamic shared quota (DSQ), even if you have purchased Provisioned Throughput quota. |
| 84 | + * "unspecified": If you haven't purchased Provisioned Throughput quota, the function uses DSQ quota. |
| 85 | + If you have purchased Provisioned Throughput quota, the function uses the Provisioned Throughput quota first. |
| 86 | + If requests exceed the Provisioned Throughput quota, the overflow traffic uses DSQ quota. |
| 87 | + model_params (Mapping[Any, Any]): |
| 88 | + Provides additional parameters to the model. The MODEL_PARAMS value must conform to the generateContent request body format. |
| 89 | +
|
| 90 | + Returns: |
| 91 | + bigframes.series.Series: A new struct Series with the result data. The struct contains these fields: |
| 92 | + * "result": a STRING value containing the model's response to the prompt. The result is None if the request fails or is filtered by responsible AI. |
| 93 | + * "full_response": a JSON value containing the response from the projects.locations.endpoints.generateContent call to the model. |
| 94 | + The generated text is in the text element. |
| 95 | + * "status": a STRING value that contains the API response status for the corresponding row. This value is empty if the operation was successful. |
| 96 | + """ |
| 97 | + |
| 98 | + prompt_context, series_list = _separate_context_and_series(prompt) |
| 99 | + assert len(series_list) > 0 |
| 100 | + |
| 101 | + operator = ai_ops.AIGenerate( |
| 102 | + prompt_context=tuple(prompt_context), |
| 103 | + connection_id=_resolve_connection_id(series_list[0], connection_id), |
| 104 | + endpoint=endpoint, |
| 105 | + request_type=request_type, |
| 106 | + model_params=json.dumps(model_params) if model_params else None, |
| 107 | + ) |
| 108 | + |
| 109 | + return series_list[0]._apply_nary_op(operator, series_list[1:]) |
| 110 | + |
| 111 | + |
38 | 112 | @log_adapter.method_logger(custom_base_name="bigquery_ai") |
39 | 113 | def generate_bool( |
40 | 114 | prompt: PROMPT_TYPE, |
|
0 commit comments