diff --git a/torch_xla/distributed/xla_backend.py b/torch_xla/distributed/xla_backend.py index 7222a7bf3dc..6001fd2b171 100644 --- a/torch_xla/distributed/xla_backend.py +++ b/torch_xla/distributed/xla_backend.py @@ -46,7 +46,7 @@ def __init__(self, prefix_store, rank, size, timeout): def getBackendName(self): return 'xla' - # pytorch's process group is unable to retrive the group size from python level. It should + # pytorch's process group is unable to retrieve the group size from python level. It should # already been support in C++ level: https://github.com/pytorch/pytorch/blob/7b1988f9222f3dec5cc2012afce84218199748ae/torch/csrc/distributed/c10d/ProcessGroup.cpp#L148-L152 # For now we manually set the group name property as a temporary solution. def _set_group_name(self, name: str) -> None: @@ -391,7 +391,7 @@ def new_xla_process_group(ranks=None, else: pg._mesh = [ranks] else: - logging.warn( + logging.warning( f'Can\'t infer process group mesh from given ranks "{str(ranks)}". ' 'The process group will use the entire world as its collective comm group.' ) diff --git a/torchax/torchax/tensor.py b/torchax/torchax/tensor.py index 66e2b55994b..0d7328b44af 100644 --- a/torchax/torchax/tensor.py +++ b/torchax/torchax/tensor.py @@ -162,7 +162,7 @@ def jax_device(self): @property def data(self): - logger.warn("In-place to .data modifications still results a copy on TPU") + logger.warning("In-place to .data modifications still results a copy on TPU") return self @data.setter