Skip to content

Auto-generated code for main #82

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Oct 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 41 additions & 2 deletions elasticsearch_serverless/_async/client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2271,6 +2271,7 @@ async def msearch(
human: t.Optional[bool] = None,
ignore_throttled: t.Optional[bool] = None,
ignore_unavailable: t.Optional[bool] = None,
include_named_queries_score: t.Optional[bool] = None,
max_concurrent_searches: t.Optional[int] = None,
max_concurrent_shard_requests: t.Optional[int] = None,
pre_filter_shard_size: t.Optional[int] = None,
Expand Down Expand Up @@ -2304,6 +2305,13 @@ async def msearch(
when frozen.
:param ignore_unavailable: If true, missing or closed indices are not included
in the response.
:param include_named_queries_score: Indicates whether hit.matched_queries should
be rendered as a map that includes the name of the matched query associated
with its score (true) or as an array containing the name of the matched queries
(false) This functionality reruns each named query on every hit in a search
response. Typically, this adds a small overhead to a request. However, using
computationally expensive named queries on a large number of hits may add
significant overhead.
:param max_concurrent_searches: Maximum number of concurrent searches the multi
search API can execute.
:param max_concurrent_shard_requests: Maximum number of concurrent shard requests
Expand Down Expand Up @@ -2353,6 +2361,8 @@ async def msearch(
__query["ignore_throttled"] = ignore_throttled
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if include_named_queries_score is not None:
__query["include_named_queries_score"] = include_named_queries_score
if max_concurrent_searches is not None:
__query["max_concurrent_searches"] = max_concurrent_searches
if max_concurrent_shard_requests is not None:
Expand Down Expand Up @@ -2585,7 +2595,9 @@ async def mtermvectors(
path_parts=__path_parts,
)

@_rewrite_parameters()
@_rewrite_parameters(
body_fields=("index_filter",),
)
async def open_point_in_time(
self,
*,
Expand All @@ -2603,9 +2615,11 @@ async def open_point_in_time(
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
ignore_unavailable: t.Optional[bool] = None,
index_filter: t.Optional[t.Mapping[str, t.Any]] = None,
preference: t.Optional[str] = None,
pretty: t.Optional[bool] = None,
routing: t.Optional[str] = None,
body: t.Optional[t.Dict[str, t.Any]] = None,
) -> ObjectApiResponse[t.Any]:
"""
A search request by default executes against the most recent visible data of
Expand All @@ -2627,17 +2641,20 @@ async def open_point_in_time(
as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
:param ignore_unavailable: If `false`, the request returns an error if it targets
a missing or closed index.
:param index_filter: Allows to filter indices if the provided query rewrites
to `match_none` on every shard.
:param preference: Specifies the node or shard the operation should be performed
on. Random by default.
:param routing: Custom value used to route operations to a specific shard.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
if keep_alive is None:
if keep_alive is None and body is None:
raise ValueError("Empty value passed for parameter 'keep_alive'")
__path_parts: t.Dict[str, str] = {"index": _quote(index)}
__path = f'/{__path_parts["index"]}/_pit'
__query: t.Dict[str, t.Any] = {}
__body: t.Dict[str, t.Any] = body if body is not None else {}
if keep_alive is not None:
__query["keep_alive"] = keep_alive
if error_trace is not None:
Expand All @@ -2656,12 +2673,20 @@ async def open_point_in_time(
__query["pretty"] = pretty
if routing is not None:
__query["routing"] = routing
if not __body:
if index_filter is not None:
__body["index_filter"] = index_filter
if not __body:
__body = None # type: ignore[assignment]
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return await self.perform_request( # type: ignore[return-value]
"POST",
__path,
params=__query,
headers=__headers,
body=__body,
endpoint_id="open_point_in_time",
path_parts=__path_parts,
)
Expand Down Expand Up @@ -3221,6 +3246,7 @@ async def search(
human: t.Optional[bool] = None,
ignore_throttled: t.Optional[bool] = None,
ignore_unavailable: t.Optional[bool] = None,
include_named_queries_score: t.Optional[bool] = None,
indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None,
knn: t.Optional[
t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]]
Expand Down Expand Up @@ -3348,6 +3374,13 @@ async def search(
be ignored when frozen.
:param ignore_unavailable: If `false`, the request returns an error if it targets
a missing or closed index.
:param include_named_queries_score: Indicates whether hit.matched_queries should
be rendered as a map that includes the name of the matched query associated
with its score (true) or as an array containing the name of the matched queries
(false) This functionality reruns each named query on every hit in a search
response. Typically, this adds a small overhead to a request. However, using
computationally expensive named queries on a large number of hits may add
significant overhead.
:param indices_boost: Boosts the _score of documents from specified indices.
:param knn: Defines the approximate kNN search to run.
:param lenient: If `true`, format-based query failures (such as providing text
Expand Down Expand Up @@ -3529,6 +3562,8 @@ async def search(
__query["ignore_throttled"] = ignore_throttled
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if include_named_queries_score is not None:
__query["include_named_queries_score"] = include_named_queries_score
if lenient is not None:
__query["lenient"] = lenient
if max_concurrent_shard_requests is not None:
Expand Down Expand Up @@ -4389,6 +4424,7 @@ async def update_by_query(
pipeline: t.Optional[str] = None,
preference: t.Optional[str] = None,
pretty: t.Optional[bool] = None,
q: t.Optional[str] = None,
query: t.Optional[t.Mapping[str, t.Any]] = None,
refresh: t.Optional[bool] = None,
request_cache: t.Optional[bool] = None,
Expand Down Expand Up @@ -4455,6 +4491,7 @@ async def update_by_query(
parameter.
:param preference: Specifies the node or shard the operation should be performed
on. Random by default.
:param q: Query in the Lucene query string syntax.
:param query: Specifies the documents to update using the Query DSL.
:param refresh: If `true`, Elasticsearch refreshes affected shards to make the
operation visible to search.
Expand Down Expand Up @@ -4539,6 +4576,8 @@ async def update_by_query(
__query["preference"] = preference
if pretty is not None:
__query["pretty"] = pretty
if q is not None:
__query["q"] = q
if refresh is not None:
__query["refresh"] = refresh
if request_cache is not None:
Expand Down
2 changes: 1 addition & 1 deletion elasticsearch_serverless/_async/client/cat.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ async def count(
) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
"""
Get a document count. Provides quick access to a document count for a data stream,
an index, or an entire cluster.n/ The document count only includes live documents,
an index, or an entire cluster. The document count only includes live documents,
not deleted documents which have not yet been removed by the merge process. CAT
APIs are only intended for human consumption using the command line or Kibana
console. They are not intended for use by applications. For application consumption,
Expand Down
2 changes: 1 addition & 1 deletion elasticsearch_serverless/_async/client/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ async def put_component_template(
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html>`_

:param name: Name of the component template to create. Elasticsearch includes
the following built-in component templates: `logs-mappings`; 'logs-settings`;
the following built-in component templates: `logs-mappings`; `logs-settings`;
`metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`.
Elastic Agent uses these templates to configure backing indices for its data
streams. If you use Elastic Agent and want to overwrite one of these templates,
Expand Down
9 changes: 8 additions & 1 deletion elasticsearch_serverless/_async/client/esql.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,14 @@ async def query(
error_trace: t.Optional[bool] = None,
filter: t.Optional[t.Mapping[str, t.Any]] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
format: t.Optional[
t.Union[
str,
t.Literal[
"arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml"
],
]
] = None,
human: t.Optional[bool] = None,
locale: t.Optional[str] = None,
params: t.Optional[
Expand Down
Loading
Loading