-
Notifications
You must be signed in to change notification settings - Fork 31
Expand file tree
/
Copy pathaws_batch.erl
More file actions
555 lines (468 loc) · 19 KB
/
aws_batch.erl
File metadata and controls
555 lines (468 loc) · 19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
%% WARNING: DO NOT EDIT, AUTO-GENERATED CODE!
%% See https://github.com/aws-beam/aws-codegen for more details.
%% @doc AWS Batch enables you to run batch computing workloads on the AWS
%% Cloud.
%%
%% Batch computing is a common way for developers, scientists, and engineers
%% to access large amounts of compute resources, and AWS Batch removes the
%% undifferentiated heavy lifting of configuring and managing the required
%% infrastructure. AWS Batch will be familiar to users of traditional batch
%% computing software. This service can efficiently provision resources in
%% response to jobs submitted in order to eliminate capacity constraints,
%% reduce compute costs, and deliver results quickly.
%%
%% As a fully managed service, AWS Batch enables developers, scientists, and
%% engineers to run batch computing workloads of any scale. AWS Batch
%% automatically provisions compute resources and optimizes the workload
%% distribution based on the quantity and scale of the workloads. With AWS
%% Batch, there is no need to install or manage batch computing software,
%% which allows you to focus on analyzing results and solving problems. AWS
%% Batch reduces operational complexities, saves time, and reduces costs,
%% which makes it easy for developers, scientists, and engineers to run their
%% batch jobs in the AWS Cloud.
-module(aws_batch).
-export([cancel_job/2,
cancel_job/3,
create_compute_environment/2,
create_compute_environment/3,
create_job_queue/2,
create_job_queue/3,
delete_compute_environment/2,
delete_compute_environment/3,
delete_job_queue/2,
delete_job_queue/3,
deregister_job_definition/2,
deregister_job_definition/3,
describe_compute_environments/2,
describe_compute_environments/3,
describe_job_definitions/2,
describe_job_definitions/3,
describe_job_queues/2,
describe_job_queues/3,
describe_jobs/2,
describe_jobs/3,
list_jobs/2,
list_jobs/3,
list_tags_for_resource/2,
list_tags_for_resource/3,
register_job_definition/2,
register_job_definition/3,
submit_job/2,
submit_job/3,
tag_resource/3,
tag_resource/4,
terminate_job/2,
terminate_job/3,
untag_resource/3,
untag_resource/4,
update_compute_environment/2,
update_compute_environment/3,
update_job_queue/2,
update_job_queue/3]).
-include_lib("hackney/include/hackney_lib.hrl").
%%====================================================================
%% API
%%====================================================================
%% @doc Cancels a job in an AWS Batch job queue.
%%
%% Jobs that are in the `SUBMITTED', `PENDING', or `RUNNABLE' state are
%% cancelled. Jobs that have progressed to `STARTING' or `RUNNING' are not
%% cancelled (but the API operation still succeeds, even if no job is
%% cancelled); these jobs must be terminated with the `TerminateJob'
%% operation.
cancel_job(Client, Input) ->
cancel_job(Client, Input, []).
cancel_job(Client, Input0, Options) ->
Method = post,
Path = ["/v1/canceljob"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Creates an AWS Batch compute environment.
%%
%% You can create `MANAGED' or `UNMANAGED' compute environments.
%%
%% In a managed compute environment, AWS Batch manages the capacity and
%% instance types of the compute resources within the environment. This is
%% based on the compute resource specification that you define or the launch
%% template that you specify when you create the compute environment. You can
%% choose to use Amazon EC2 On-Demand Instances or Spot Instances in your
%% managed compute environment. You can optionally set a maximum price so
%% that Spot Instances only launch when the Spot Instance price is below a
%% specified percentage of the On-Demand price.
%%
%% Multi-node parallel jobs are not supported on Spot Instances.
%%
%% In an unmanaged compute environment, you can manage your own compute
%% resources. This provides more compute resource configuration options, such
%% as using a custom AMI, but you must ensure that your AMI meets the Amazon
%% ECS container instance AMI specification. For more information, see
%% Container Instance AMIs in the Amazon Elastic Container Service Developer
%% Guide. After you have created your unmanaged compute environment, you can
%% use the `DescribeComputeEnvironments' operation to find the Amazon ECS
%% cluster that is associated with it. Then, manually launch your container
%% instances into that Amazon ECS cluster. For more information, see
%% Launching an Amazon ECS Container Instance in the Amazon Elastic Container
%% Service Developer Guide.
%%
%% AWS Batch does not upgrade the AMIs in a compute environment after it is
%% created (for example, when a newer version of the Amazon ECS-optimized AMI
%% is available). You are responsible for the management of the guest
%% operating system (including updates and security patches) and any
%% additional application software or utilities that you install on the
%% compute resources. To use a new AMI for your AWS Batch jobs:
%%
%% Create a new compute environment with the new AMI.
%%
%% Add the compute environment to an existing job queue.
%%
%% Remove the old compute environment from your job queue.
%%
%% Delete the old compute environment.
create_compute_environment(Client, Input) ->
create_compute_environment(Client, Input, []).
create_compute_environment(Client, Input0, Options) ->
Method = post,
Path = ["/v1/createcomputeenvironment"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Creates an AWS Batch job queue.
%%
%% When you create a job queue, you associate one or more compute
%% environments to the queue and assign an order of preference for the
%% compute environments.
%%
%% You also set a priority to the job queue that determines the order in
%% which the AWS Batch scheduler places jobs onto its associated compute
%% environments. For example, if a compute environment is associated with
%% more than one job queue, the job queue with a higher priority is given
%% preference for scheduling jobs to that compute environment.
create_job_queue(Client, Input) ->
create_job_queue(Client, Input, []).
create_job_queue(Client, Input0, Options) ->
Method = post,
Path = ["/v1/createjobqueue"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Deletes an AWS Batch compute environment.
%%
%% Before you can delete a compute environment, you must set its state to
%% `DISABLED' with the `UpdateComputeEnvironment' API operation and
%% disassociate it from any job queues with the `UpdateJobQueue' API
%% operation.
delete_compute_environment(Client, Input) ->
delete_compute_environment(Client, Input, []).
delete_compute_environment(Client, Input0, Options) ->
Method = post,
Path = ["/v1/deletecomputeenvironment"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Deletes the specified job queue.
%%
%% You must first disable submissions for a queue with the `UpdateJobQueue'
%% operation. All jobs in the queue are terminated when you delete a job
%% queue.
%%
%% It is not necessary to disassociate compute environments from a queue
%% before submitting a `DeleteJobQueue' request.
delete_job_queue(Client, Input) ->
delete_job_queue(Client, Input, []).
delete_job_queue(Client, Input0, Options) ->
Method = post,
Path = ["/v1/deletejobqueue"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Deregisters an AWS Batch job definition.
%%
%% Job definitions will be permanently deleted after 180 days.
deregister_job_definition(Client, Input) ->
deregister_job_definition(Client, Input, []).
deregister_job_definition(Client, Input0, Options) ->
Method = post,
Path = ["/v1/deregisterjobdefinition"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Describes one or more of your compute environments.
%%
%% If you are using an unmanaged compute environment, you can use the
%% `DescribeComputeEnvironment' operation to determine the `ecsClusterArn'
%% that you should launch your Amazon ECS container instances into.
describe_compute_environments(Client, Input) ->
describe_compute_environments(Client, Input, []).
describe_compute_environments(Client, Input0, Options) ->
Method = post,
Path = ["/v1/describecomputeenvironments"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Describes a list of job definitions.
%%
%% You can specify a `status' (such as `ACTIVE') to only return job
%% definitions that match that status.
describe_job_definitions(Client, Input) ->
describe_job_definitions(Client, Input, []).
describe_job_definitions(Client, Input0, Options) ->
Method = post,
Path = ["/v1/describejobdefinitions"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Describes one or more of your job queues.
describe_job_queues(Client, Input) ->
describe_job_queues(Client, Input, []).
describe_job_queues(Client, Input0, Options) ->
Method = post,
Path = ["/v1/describejobqueues"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Describes a list of AWS Batch jobs.
describe_jobs(Client, Input) ->
describe_jobs(Client, Input, []).
describe_jobs(Client, Input0, Options) ->
Method = post,
Path = ["/v1/describejobs"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Returns a list of AWS Batch jobs.
%%
%% You must specify only one of the following:
%%
%% <ul> <li> a job queue ID to return a list of jobs in that job queue
%%
%% </li> <li> a multi-node parallel job ID to return a list of that job's
%% nodes
%%
%% </li> <li> an array job ID to return a list of that job's children
%%
%% </li> </ul> You can filter the results by job status with the `jobStatus'
%% parameter. If you do not specify a status, only `RUNNING' jobs are
%% returned.
list_jobs(Client, Input) ->
list_jobs(Client, Input, []).
list_jobs(Client, Input0, Options) ->
Method = post,
Path = ["/v1/listjobs"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc List the tags for an AWS Batch resource.
%%
%% AWS Batch resources that support tags are compute environments, jobs, job
%% definitions, and job queues. ARNs for child jobs of array and multi-node
%% parallel (MNP) jobs are not supported.
list_tags_for_resource(Client, ResourceArn)
when is_map(Client) ->
list_tags_for_resource(Client, ResourceArn, []).
list_tags_for_resource(Client, ResourceArn, Options)
when is_map(Client), is_list(Options) ->
Path = ["/v1/tags/", aws_util:encode_uri(ResourceArn), ""],
SuccessStatusCode = undefined,
Headers = [],
Query_ = [],
request(Client, get, Path, Query_, Headers, undefined, Options, SuccessStatusCode).
%% @doc Registers an AWS Batch job definition.
register_job_definition(Client, Input) ->
register_job_definition(Client, Input, []).
register_job_definition(Client, Input0, Options) ->
Method = post,
Path = ["/v1/registerjobdefinition"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Submits an AWS Batch job from a job definition.
%%
%% Parameters specified during `SubmitJob' override parameters defined in the
%% job definition.
submit_job(Client, Input) ->
submit_job(Client, Input, []).
submit_job(Client, Input0, Options) ->
Method = post,
Path = ["/v1/submitjob"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Associates the specified tags to a resource with the specified
%% `resourceArn'.
%%
%% If existing tags on a resource are not specified in the request
%% parameters, they are not changed. When a resource is deleted, the tags
%% associated with that resource are deleted as well. AWS Batch resources
%% that support tags are compute environments, jobs, job definitions, and job
%% queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs
%% are not supported.
tag_resource(Client, ResourceArn, Input) ->
tag_resource(Client, ResourceArn, Input, []).
tag_resource(Client, ResourceArn, Input0, Options) ->
Method = post,
Path = ["/v1/tags/", aws_util:encode_uri(ResourceArn), ""],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Terminates a job in a job queue.
%%
%% Jobs that are in the `STARTING' or `RUNNING' state are terminated, which
%% causes them to transition to `FAILED'. Jobs that have not progressed to
%% the `STARTING' state are cancelled.
terminate_job(Client, Input) ->
terminate_job(Client, Input, []).
terminate_job(Client, Input0, Options) ->
Method = post,
Path = ["/v1/terminatejob"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Deletes specified tags from an AWS Batch resource.
untag_resource(Client, ResourceArn, Input) ->
untag_resource(Client, ResourceArn, Input, []).
untag_resource(Client, ResourceArn, Input0, Options) ->
Method = delete,
Path = ["/v1/tags/", aws_util:encode_uri(ResourceArn), ""],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
QueryMapping = [
{<<"tagKeys">>, <<"tagKeys">>}
],
{Query_, Input} = aws_request:build_headers(QueryMapping, Input1),
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Updates an AWS Batch compute environment.
update_compute_environment(Client, Input) ->
update_compute_environment(Client, Input, []).
update_compute_environment(Client, Input0, Options) ->
Method = post,
Path = ["/v1/updatecomputeenvironment"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%% @doc Updates a job queue.
update_job_queue(Client, Input) ->
update_job_queue(Client, Input, []).
update_job_queue(Client, Input0, Options) ->
Method = post,
Path = ["/v1/updatejobqueue"],
SuccessStatusCode = undefined,
Headers = [],
Input1 = Input0,
Query_ = [],
Input = Input1,
request(Client, Method, Path, Query_, Headers, Input, Options, SuccessStatusCode).
%%====================================================================
%% Internal functions
%%====================================================================
-spec request(aws_client:aws_client(), atom(), iolist(), list(),
list(), map() | undefined, list(), pos_integer() | undefined) ->
{ok, Result, {integer(), list(), hackney:client()}} |
{error, Error, {integer(), list(), hackney:client()}} |
{error, term()} when
Result :: map(),
Error :: map().
request(Client, Method, Path, Query, Headers0, Input, Options, SuccessStatusCode) ->
Client1 = Client#{service => <<"batch">>},
Host = build_host(<<"batch">>, Client1),
URL0 = build_url(Host, Path, Client1),
URL = aws_request:add_query(URL0, Query),
AdditionalHeaders = [ {<<"Host">>, Host}
, {<<"Content-Type">>, <<"application/x-amz-json-1.1">>}
],
Headers1 = aws_request:add_headers(AdditionalHeaders, Headers0),
Payload =
case proplists:get_value(should_send_body_as_binary, Options) of
true ->
maps:get(<<"Body">>, Input, <<"">>);
undefined ->
encode_payload(Input)
end,
MethodBin = aws_request:method_to_binary(Method),
SignedHeaders = aws_request:sign_request(Client1, MethodBin, URL, Headers1, Payload),
Response = hackney:request(Method, URL, SignedHeaders, Payload, Options),
handle_response(Response, SuccessStatusCode).
handle_response({ok, StatusCode, ResponseHeaders, Client}, SuccessStatusCode)
when StatusCode =:= 200;
StatusCode =:= 202;
StatusCode =:= 204;
StatusCode =:= SuccessStatusCode ->
case hackney:body(Client) of
{ok, <<>>} when StatusCode =:= 200;
StatusCode =:= SuccessStatusCode ->
{ok, #{}, {StatusCode, ResponseHeaders, Client}};
{ok, Body} ->
Result = jsx:decode(Body),
{ok, Result, {StatusCode, ResponseHeaders, Client}}
end;
handle_response({ok, StatusCode, ResponseHeaders, Client}, _) ->
{ok, Body} = hackney:body(Client),
Error = jsx:decode(Body),
{error, Error, {StatusCode, ResponseHeaders, Client}};
handle_response({error, Reason}, _) ->
{error, Reason}.
build_host(_EndpointPrefix, #{region := <<"local">>, endpoint := Endpoint}) ->
Endpoint;
build_host(_EndpointPrefix, #{region := <<"local">>}) ->
<<"localhost">>;
build_host(EndpointPrefix, #{region := Region, endpoint := Endpoint}) ->
aws_util:binary_join([EndpointPrefix, Region, Endpoint], <<".">>).
build_url(Host, Path0, Client) ->
Proto = maps:get(proto, Client),
Path = erlang:iolist_to_binary(Path0),
Port = maps:get(port, Client),
aws_util:binary_join([Proto, <<"://">>, Host, <<":">>, Port, Path], <<"">>).
-spec encode_payload(undefined | map()) -> binary().
encode_payload(undefined) ->
<<>>;
encode_payload(Input) ->
jsx:encode(Input).