Skip to content

Commit 250bac8

Browse files
LXYTSOSsoumith
authored andcommitted
utils.py in references can't work with pytorch-cpu (#1023)
* can't work with pytorch-cpu fixed utils.py can't work with pytorch-cpu because of this line of code `memory=torch.cuda.max_memory_allocated()` * can't work with pytorch-cpu fixed utils.py can't work with pytorch-cpu because of this line of code 'memory=torch.cuda.max_memory_allocated()'
1 parent 0fb41c2 commit 250bac8

File tree

3 files changed

+90
-42
lines changed

3 files changed

+90
-42
lines changed

references/classification/utils.py

Lines changed: 30 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -115,15 +115,25 @@ def log_every(self, iterable, print_freq, header=None):
115115
iter_time = SmoothedValue(fmt='{avg:.4f}')
116116
data_time = SmoothedValue(fmt='{avg:.4f}')
117117
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
118-
log_msg = self.delimiter.join([
119-
header,
120-
'[{0' + space_fmt + '}/{1}]',
121-
'eta: {eta}',
122-
'{meters}',
123-
'time: {time}',
124-
'data: {data}',
125-
'max mem: {memory:.0f}'
126-
])
118+
if torch.cuda.is_available():
119+
log_msg = self.delimiter.join([
120+
header,
121+
'[{0' + space_fmt + '}/{1}]',
122+
'eta: {eta}',
123+
'{meters}',
124+
'time: {time}',
125+
'data: {data}',
126+
'max mem: {memory:.0f}'
127+
])
128+
else:
129+
log_msg = self.delimiter.join([
130+
header,
131+
'[{0' + space_fmt + '}/{1}]',
132+
'eta: {eta}',
133+
'{meters}',
134+
'time: {time}',
135+
'data: {data}'
136+
])
127137
MB = 1024.0 * 1024.0
128138
for obj in iterable:
129139
data_time.update(time.time() - end)
@@ -132,11 +142,17 @@ def log_every(self, iterable, print_freq, header=None):
132142
if i % print_freq == 0:
133143
eta_seconds = iter_time.global_avg * (len(iterable) - i)
134144
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
135-
print(log_msg.format(
136-
i, len(iterable), eta=eta_string,
137-
meters=str(self),
138-
time=str(iter_time), data=str(data_time),
139-
memory=torch.cuda.max_memory_allocated() / MB))
145+
if torch.cuda.is_available():
146+
print(log_msg.format(
147+
i, len(iterable), eta=eta_string,
148+
meters=str(self),
149+
time=str(iter_time), data=str(data_time),
150+
memory=torch.cuda.max_memory_allocated() / MB))
151+
else:
152+
print(log_msg.format(
153+
i, len(iterable), eta=eta_string,
154+
meters=str(self),
155+
time=str(iter_time), data=str(data_time)))
140156
i += 1
141157
end = time.time()
142158
total_time = time.time() - start_time

references/detection/utils.py

Lines changed: 30 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -188,15 +188,25 @@ def log_every(self, iterable, print_freq, header=None):
188188
iter_time = SmoothedValue(fmt='{avg:.4f}')
189189
data_time = SmoothedValue(fmt='{avg:.4f}')
190190
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
191-
log_msg = self.delimiter.join([
192-
header,
193-
'[{0' + space_fmt + '}/{1}]',
194-
'eta: {eta}',
195-
'{meters}',
196-
'time: {time}',
197-
'data: {data}',
198-
'max mem: {memory:.0f}'
199-
])
191+
if torch.cuda.is_available():
192+
log_msg = self.delimiter.join([
193+
header,
194+
'[{0' + space_fmt + '}/{1}]',
195+
'eta: {eta}',
196+
'{meters}',
197+
'time: {time}',
198+
'data: {data}',
199+
'max mem: {memory:.0f}'
200+
])
201+
else:
202+
log_msg = self.delimiter.join([
203+
header,
204+
'[{0' + space_fmt + '}/{1}]',
205+
'eta: {eta}',
206+
'{meters}',
207+
'time: {time}',
208+
'data: {data}'
209+
])
200210
MB = 1024.0 * 1024.0
201211
for obj in iterable:
202212
data_time.update(time.time() - end)
@@ -205,11 +215,17 @@ def log_every(self, iterable, print_freq, header=None):
205215
if i % print_freq == 0 or i == len(iterable) - 1:
206216
eta_seconds = iter_time.global_avg * (len(iterable) - i)
207217
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
208-
print(log_msg.format(
209-
i, len(iterable), eta=eta_string,
210-
meters=str(self),
211-
time=str(iter_time), data=str(data_time),
212-
memory=torch.cuda.max_memory_allocated() / MB))
218+
if torch.cuda.is_available():
219+
print(log_msg.format(
220+
i, len(iterable), eta=eta_string,
221+
meters=str(self),
222+
time=str(iter_time), data=str(data_time),
223+
memory=torch.cuda.max_memory_allocated() / MB))
224+
else:
225+
print(log_msg.format(
226+
i, len(iterable), eta=eta_string,
227+
meters=str(self),
228+
time=str(iter_time), data=str(data_time)))
213229
i += 1
214230
end = time.time()
215231
total_time = time.time() - start_time

references/segmentation/utils.py

Lines changed: 30 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -161,15 +161,25 @@ def log_every(self, iterable, print_freq, header=None):
161161
iter_time = SmoothedValue(fmt='{avg:.4f}')
162162
data_time = SmoothedValue(fmt='{avg:.4f}')
163163
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
164-
log_msg = self.delimiter.join([
165-
header,
166-
'[{0' + space_fmt + '}/{1}]',
167-
'eta: {eta}',
168-
'{meters}',
169-
'time: {time}',
170-
'data: {data}',
171-
'max mem: {memory:.0f}'
172-
])
164+
if torch.cuda.is_available():
165+
log_msg = self.delimiter.join([
166+
header,
167+
'[{0' + space_fmt + '}/{1}]',
168+
'eta: {eta}',
169+
'{meters}',
170+
'time: {time}',
171+
'data: {data}',
172+
'max mem: {memory:.0f}'
173+
])
174+
else:
175+
log_msg = self.delimiter.join([
176+
header,
177+
'[{0' + space_fmt + '}/{1}]',
178+
'eta: {eta}',
179+
'{meters}',
180+
'time: {time}',
181+
'data: {data}'
182+
])
173183
MB = 1024.0 * 1024.0
174184
for obj in iterable:
175185
data_time.update(time.time() - end)
@@ -178,11 +188,17 @@ def log_every(self, iterable, print_freq, header=None):
178188
if i % print_freq == 0:
179189
eta_seconds = iter_time.global_avg * (len(iterable) - i)
180190
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
181-
print(log_msg.format(
182-
i, len(iterable), eta=eta_string,
183-
meters=str(self),
184-
time=str(iter_time), data=str(data_time),
185-
memory=torch.cuda.max_memory_allocated() / MB))
191+
if torch.cuda.is_available():
192+
print(log_msg.format(
193+
i, len(iterable), eta=eta_string,
194+
meters=str(self),
195+
time=str(iter_time), data=str(data_time),
196+
memory=torch.cuda.max_memory_allocated() / MB))
197+
else:
198+
print(log_msg.format(
199+
i, len(iterable), eta=eta_string,
200+
meters=str(self),
201+
time=str(iter_time), data=str(data_time)))
186202
i += 1
187203
end = time.time()
188204
total_time = time.time() - start_time

0 commit comments

Comments
 (0)