Skip to content

Commit af9b8c5

Browse files
authored
[Security] fix security problem for prune_by_memory_estimation (#61382)
* OS Command Injection prune_by_memory_estimation fix * Fix StyleCode
1 parent f99d4f2 commit af9b8c5

File tree

1 file changed

+34
-9
lines changed
  • python/paddle/distributed/auto_tuner

1 file changed

+34
-9
lines changed

python/paddle/distributed/auto_tuner/prune.py

Lines changed: 34 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -510,17 +510,42 @@ def prune_by_memory_estimation(tuner_cfg, cur_cfg, history_cfgs=[]):
510510
"max_mem_usage should be set when using memory estimation tool"
511511
)
512512

513-
memory_estimation_cmd = f"python {memory_estimation_tool} --dp_degree {cur_cfg['dp_degree']} --mp_degree {cur_cfg['mp_degree']} \
514-
--pp_degree {cur_cfg['pp_degree']} --vpp_degree {cur_cfg['vpp_degree']} \
515-
--sharding_degree {cur_cfg['sharding_degree']} --sharding_stage {cur_cfg['sharding_stage']} \
516-
--use_recompute {cur_cfg['use_recompute']} --micro_batch_size {cur_cfg['micro_batch_size']} \
517-
--recompute_granularity {cur_cfg['recompute_granularity']} \
518-
--hidden_size {model_cfg['hidden_size']} --num_attention_heads {model_cfg['num_attention_heads']} \
519-
--num_layers {model_cfg['num_layers']} --max_sequence_length {model_cfg['max_sequence_length']} \
520-
--vocab_size {model_cfg['vocab_size']} --intermediate_size {model_cfg['intermediate_size']} "
513+
memory_estimation_cmd = [
514+
"python",
515+
memory_estimation_tool,
516+
"--dp_degree",
517+
str(cur_cfg['dp_degree']),
518+
"--mp_degree",
519+
str(cur_cfg['mp_degree']),
520+
"--pp_degree",
521+
str(cur_cfg['pp_degree']),
522+
"--vpp_degree",
523+
str(cur_cfg['vpp_degree']),
524+
"--sharding_degree",
525+
str(cur_cfg['sharding_degree']),
526+
"--sharding_stage",
527+
str(cur_cfg['sharding_stage']),
528+
"--use_recompute",
529+
str(cur_cfg['use_recompute']),
530+
"--micro_batch_size",
531+
str(cur_cfg['micro_batch_size']),
532+
"--recompute_granularity",
533+
str(cur_cfg['recompute_granularity']),
534+
"--hidden_size",
535+
str(model_cfg['hidden_size']),
536+
"--num_attention_heads",
537+
str(model_cfg['num_attention_heads']),
538+
"--num_layers",
539+
str(model_cfg['num_layers']),
540+
"--max_sequence_length",
541+
str(model_cfg['max_sequence_length']),
542+
"--vocab_size",
543+
str(model_cfg['vocab_size']),
544+
"--intermediate_size",
545+
str(model_cfg['intermediate_size']),
546+
]
521547
result = subprocess.run(
522548
memory_estimation_cmd,
523-
shell=True,
524549
capture_output=True,
525550
text=True,
526551
)

0 commit comments

Comments
 (0)