From 304d1128a5e04e6437f6af0867b8dfb1acf6dde8 Mon Sep 17 00:00:00 2001 From: Nadia Yakimakha <32335935+nadiaya@users.noreply.github.com> Date: Fri, 3 Apr 2020 13:05:27 -0700 Subject: [PATCH] doc: fix documentation to provide working example. --- doc/using_tf.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/using_tf.rst b/doc/using_tf.rst index c65e616bc1..782834eda1 100644 --- a/doc/using_tf.rst +++ b/doc/using_tf.rst @@ -267,19 +267,19 @@ Training with ``MPI`` is configured by specifying following fields in ``distribu command executed by SageMaker to launch distributed horovod training. -In the below example we create an estimator to launch Horovod distributed training with 2 processes on one host: +In the below example we create an estimator to launch Horovod distributed training with 4 processes on one host: .. code:: python from sagemaker.tensorflow import TensorFlow tf_estimator = TensorFlow(entry_point='tf-train.py', role='SageMakerRole', - train_instance_count=1, train_instance_type='ml.p2.xlarge', - framework_version='1.12', py_version='py3', + train_instance_count=1, train_instance_type='ml.p3.8xlarge', + framework_version='2.1.0', py_version='py3', distributions={ 'mpi': { 'enabled': True, - 'processes_per_host': 2, + 'processes_per_host': 4, 'custom_mpi_options': '--NCCL_DEBUG INFO' } })