@@ -89,13 +89,18 @@ def update_labels(yaml, instascale, instance_types):
89
89
metadata .pop ("labels" )
90
90
91
91
92
- def update_priority (item , dispatch_priority ):
92
+ def update_priority (yaml , item , workers , dispatch_priority ):
93
93
if dispatch_priority is not None :
94
94
head = item .get ("generictemplate" ).get ("spec" ).get ("headGroupSpec" )
95
95
worker = item .get ("generictemplate" ).get ("spec" ).get ("workerGroupSpecs" )[0 ]
96
96
head ["template" ]["spec" ]["priorityClassName" ] = dispatch_priority
97
97
worker ["template" ]["spec" ]["priorityClassName" ] = dispatch_priority
98
+ update_scheduling_spec (yaml ,workers )
98
99
100
+ def update_scheduling_spec (yaml , workers ):
101
+ spec = yaml .get ("spec" )
102
+ spec ["schedulingSpec" ]= {"minAvailable" :"" }
103
+ spec ["schedulingSpec" ]["minAvailable" ] = workers + 1
99
104
100
105
def update_custompodresources (
101
106
item , min_cpu , max_cpu , min_memory , max_memory , gpu , workers
@@ -183,11 +188,6 @@ def update_resources(spec, min_cpu, max_cpu, min_memory, max_memory, gpu):
183
188
limits ["nvidia.com/gpu" ] = gpu
184
189
185
190
186
- def update_scheduling_spec (yaml , workers ):
187
- spec = yaml .get ("spec" )
188
- spec ["schedulingSpec" ]["minAvailable" ] = workers + 1
189
-
190
-
191
191
def update_nodes (
192
192
item ,
193
193
appwrapper_name ,
@@ -368,8 +368,7 @@ def generate_appwrapper(
368
368
route_item = resources ["resources" ].get ("GenericItems" )[1 ]
369
369
update_names (user_yaml , item , appwrapper_name , cluster_name , namespace )
370
370
update_labels (user_yaml , instascale , instance_types )
371
- update_priority (item , dispatch_priority )
372
- update_scheduling_spec (user_yaml , workers )
371
+ update_priority (user_yaml , item , workers , dispatch_priority )
373
372
update_custompodresources (
374
373
item , min_cpu , max_cpu , min_memory , max_memory , gpu , workers
375
374
)
0 commit comments