@@ -20,7 +20,7 @@ If you have your own CA bundle to use you can configure via the `ca_certs` param
20
20
21
21
[source,python]
22
22
------------------------------------
23
- es = Elasticsearch(
23
+ client = Elasticsearch(
24
24
"https://...",
25
25
ca_certs="/path/to/certs.pem"
26
26
)
@@ -32,7 +32,7 @@ In Python 3.9 and earlier only the leaf certificate will be verified but in Pyth
32
32
33
33
[source,python]
34
34
------------------------------------
35
- es = Elasticsearch(
35
+ client = Elasticsearch(
36
36
"https://...",
37
37
ssl_assert_fingerprint=(
38
38
"315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3"
@@ -44,7 +44,7 @@ To disable certificate verification use the `verify_certs=False` parameter. This
44
44
45
45
[source,python]
46
46
------------------------------------
47
- es = Elasticsearch(
47
+ client = Elasticsearch(
48
48
"https://...",
49
49
verify_certs=False
50
50
)
@@ -59,7 +59,7 @@ Configuring the minimum TLS version to connect to is done via the `ssl_version`
59
59
------------------------------------
60
60
import ssl
61
61
62
- es = Elasticsearch(
62
+ client = Elasticsearch(
63
63
...,
64
64
ssl_version=ssl.TLSVersion.TLSv1_2
65
65
)
@@ -72,7 +72,7 @@ Elasticsearch can be configured to authenticate clients via TLS client certifica
72
72
73
73
[source,python]
74
74
------------------------------------
75
- es = Elasticsearch(
75
+ client = Elasticsearch(
76
76
...,
77
77
client_cert="/path/to/cert.pem",
78
78
client_key="/path/to/key.pem",
@@ -93,7 +93,7 @@ import ssl
93
93
ctx = ssl.create_default_context()
94
94
ctx.load_verify_locations(...)
95
95
96
- es = Elasticsearch(
96
+ client = Elasticsearch(
97
97
...,
98
98
ssl_context=ctx
99
99
)
@@ -110,7 +110,7 @@ the `Accept-Encoding: gzip` HTTP header. By default compression is disabled.
110
110
111
111
[source,python]
112
112
------------------------------------
113
- es = Elasticsearch(
113
+ client = Elasticsearch(
114
114
...,
115
115
http_compress=True # Enable compression!
116
116
)
@@ -130,13 +130,13 @@ Setting `request_timeout` to `None` will disable timeouts.
130
130
131
131
[source,python]
132
132
------------------------------------
133
- es = Elasticsearch(
133
+ client = Elasticsearch(
134
134
...,
135
135
request_timeout=10 # 10 second timeout
136
136
)
137
137
138
138
# Search request will timeout in 5 seconds
139
- es .options(request_timeout=5).search(...)
139
+ client .options(request_timeout=5).search(...)
140
140
------------------------------------
141
141
142
142
[discrete]
@@ -148,7 +148,7 @@ In the example below there are three different configurable timeouts for the `cl
148
148
149
149
[source,python]
150
150
------------------------------------
151
- es .options(
151
+ client .options(
152
152
# Amount of time to wait for an HTTP response to start.
153
153
request_timeout=30
154
154
).cluster.health(
@@ -170,13 +170,13 @@ The maximum number of retries per request can be configured via the `max_retries
170
170
171
171
[source,python]
172
172
------------------------------------
173
- es = Elasticsearch(
173
+ client = Elasticsearch(
174
174
...,
175
175
max_retries=5
176
176
)
177
177
178
178
# For this API request we disable retries with 'max_retries=0'
179
- es .options(max_retries=0).index(
179
+ client .options(max_retries=0).index(
180
180
index="blogs",
181
181
document={
182
182
"title": "..."
@@ -191,11 +191,11 @@ Connection errors are automatically retried if retries are enabled. Retrying req
191
191
192
192
[source,python]
193
193
------------------------------------
194
- es = Elasticsearch(
194
+ client = Elasticsearch(
195
195
...,
196
196
retry_on_timeout=True
197
197
)
198
- es .options(retry_on_timeout=False).info()
198
+ client .options(retry_on_timeout=False).info()
199
199
------------------------------------
200
200
201
201
[discrete]
@@ -205,13 +205,13 @@ By default if retries are enabled `retry_on_status` is set to `(429, 502, 503, 5
205
205
206
206
[source,python]
207
207
------------------------------------
208
- es = Elasticsearch(
208
+ client = Elasticsearch(
209
209
...,
210
210
retry_on_status=()
211
211
)
212
212
213
213
# Retry this API on '500 Internal Error' statuses
214
- es .options(retry_on_status=[500]).index(
214
+ client .options(retry_on_status=[500]).index(
215
215
index="blogs",
216
216
document={
217
217
"title": "..."
@@ -228,14 +228,14 @@ A good example where this is useful is setting up or cleaning up resources in a
228
228
229
229
[source,python]
230
230
------------------------------------
231
- es = Elasticsearch(...)
231
+ client = Elasticsearch(...)
232
232
233
233
# API request is robust against the index not existing:
234
- resp = es .options(ignore_status=404).indices.delete(index="delete-this")
234
+ resp = client .options(ignore_status=404).indices.delete(index="delete-this")
235
235
resp.meta.status # Can be either '2XX' or '404'
236
236
237
237
# API request is robust against the index already existing:
238
- resp = es .options(ignore_status=[400]).indices.create(
238
+ resp = client .options(ignore_status=[400]).indices.create(
239
239
index="create-this",
240
240
mapping={
241
241
"properties": {"field": {"type": "integer"}}
@@ -322,7 +322,7 @@ You can specify a node selector pattern via the `node_selector_class` parameter.
322
322
323
323
[source,python]
324
324
------------------------------------
325
- es = Elasticsearch(
325
+ client = Elasticsearch(
326
326
...,
327
327
node_selector_class="round_robin"
328
328
)
@@ -337,7 +337,7 @@ from elastic_transport import NodeSelector
337
337
class CustomSelector(NodeSelector):
338
338
def select(nodes): ...
339
339
340
- es = Elasticsearch(
340
+ client = Elasticsearch(
341
341
...,
342
342
node_selector_class=CustomSelector
343
343
)
@@ -374,7 +374,7 @@ class JsonSetSerializer(JsonSerializer):
374
374
return list(data)
375
375
return super().default(data)
376
376
377
- es = Elasticsearch(
377
+ client = Elasticsearch(
378
378
...,
379
379
# Serializers are a mapping of 'mimetype' to Serializer class.
380
380
serializers={"application/json": JsonSetSerializer()}
@@ -397,7 +397,7 @@ For all of the built-in HTTP node implementations like `urllib3`, `requests`, an
397
397
------------------------------------
398
398
from elasticsearch import Elasticsearch
399
399
400
- es = Elasticsearch(
400
+ client = Elasticsearch(
401
401
...,
402
402
node_class="requests"
403
403
)
@@ -413,7 +413,7 @@ from elastic_transport import Urllib3HttpNode
413
413
class CustomHttpNode(Urllib3HttpNode):
414
414
...
415
415
416
- es = Elasticsearch(
416
+ client = Elasticsearch(
417
417
...
418
418
node_class=CustomHttpNode
419
419
)
@@ -426,7 +426,7 @@ Each node contains its own pool of HTTP connections to allow for concurrent requ
426
426
427
427
[source,python]
428
428
------------------------------------
429
- es = Elasticsearch(
429
+ client = Elasticsearch(
430
430
...,
431
431
connections_per_node=5
432
432
)
0 commit comments