Skip to content

Commit 2276b22

Browse files
committed
Fix unneccessary param and bug
1 parent 7502076 commit 2276b22

File tree

1 file changed

+8
-9
lines changed

1 file changed

+8
-9
lines changed

references/optical_flow/train.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ def _evaluate(model, args, val_dataset, *, padder_mode, num_flow_updates=None, b
7373
if args.distributed:
7474
sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, drop_last=True)
7575
else:
76-
sampler = torch.utils.data.SequentialSampler(val_dataset, drop_last=True)
76+
sampler = torch.utils.data.SequentialSampler(val_dataset)
7777

7878
val_loader = torch.utils.data.DataLoader(
7979
val_dataset,
@@ -122,14 +122,13 @@ def inner_loop(blob):
122122

123123
if args.distributed:
124124
num_processed_samples = utils.reduce_across_processes(num_processed_samples)
125-
print(
126-
f"Batch-processed {num_processed_samples} / {len(val_dataset)} samples. "
127-
"Going to process the remaining samples individually, if any."
128-
)
129-
130-
if not args.distributed or args.rank == 0: # we only need to process the rest on a single worker
131-
for i in range(num_processed_samples, len(val_dataset)):
132-
inner_loop(val_dataset[i])
125+
print(
126+
f"Batch-processed {num_processed_samples} / {len(val_dataset)} samples. "
127+
"Going to process the remaining samples individually, if any."
128+
)
129+
if args.rank == 0: # we only need to process the rest on a single worker
130+
for i in range(num_processed_samples, len(val_dataset)):
131+
inner_loop(val_dataset[i])
133132

134133
logger.synchronize_between_processes()
135134
print(header, logger)

0 commit comments

Comments
 (0)