2
2
# SPDX-License-Identifier: Apache-2.0
3
3
4
4
from pathlib import Path
5
- from typing import Optional , Tuple
6
5
7
6
import pytest
8
7
import torch
13
12
ChronosPipeline ,
14
13
MeanScaleUniformBins ,
15
14
)
15
+ from test .util import validate_tensor
16
16
17
17
18
18
def test_base_chronos_pipeline_loads_from_huggingface ():
@@ -166,30 +166,21 @@ def test_tokenizer_random_data(use_eos_token: bool):
166
166
assert samples .shape == (2 , 10 , 4 )
167
167
168
168
169
- def validate_tensor (
170
- a : torch .Tensor , shape : Tuple [int , ...], dtype : Optional [torch .dtype ] = None
171
- ) -> None :
172
- assert isinstance (a , torch .Tensor )
173
- assert a .shape == shape
174
-
175
- if dtype is not None :
176
- assert a .dtype == dtype
177
-
178
-
179
169
@pytest .mark .parametrize ("model_dtype" , [torch .float32 , torch .bfloat16 ])
180
- @pytest .mark .parametrize ("input_dtype" , [torch .float32 , torch .bfloat16 ])
170
+ @pytest .mark .parametrize ("input_dtype" , [torch .float32 , torch .bfloat16 , torch . int64 ])
181
171
def test_pipeline_predict (model_dtype : torch .dtype , input_dtype : torch .dtype ):
182
172
pipeline = ChronosPipeline .from_pretrained (
183
173
Path (__file__ ).parent / "dummy-chronos-model" ,
184
174
device_map = "cpu" ,
185
175
torch_dtype = model_dtype ,
186
176
)
187
- context = 10 * torch .rand (size = (4 , 16 ), dtype = input_dtype ) + 10
177
+ context = 10 * torch .rand (size = (4 , 16 )) + 10
178
+ context = context .to (dtype = input_dtype )
188
179
189
180
# input: tensor of shape (batch_size, context_length)
190
181
191
182
samples = pipeline .predict (context , num_samples = 12 , prediction_length = 3 )
192
- validate_tensor (samples , shape = (4 , 12 , 3 ), dtype = input_dtype )
183
+ validate_tensor (samples , shape = (4 , 12 , 3 ), dtype = torch . float32 )
193
184
194
185
with pytest .raises (ValueError ):
195
186
samples = pipeline .predict (
@@ -199,12 +190,12 @@ def test_pipeline_predict(model_dtype: torch.dtype, input_dtype: torch.dtype):
199
190
samples = pipeline .predict (
200
191
context , num_samples = 7 , prediction_length = 65 , limit_prediction_length = False
201
192
)
202
- validate_tensor (samples , shape = (4 , 7 , 65 ), dtype = input_dtype )
193
+ validate_tensor (samples , shape = (4 , 7 , 65 ), dtype = torch . float32 )
203
194
204
195
# input: batch_size-long list of tensors of shape (context_length,)
205
196
206
197
samples = pipeline .predict (list (context ), num_samples = 12 , prediction_length = 3 )
207
- validate_tensor (samples , shape = (4 , 12 , 3 ), dtype = input_dtype )
198
+ validate_tensor (samples , shape = (4 , 12 , 3 ), dtype = torch . float32 )
208
199
209
200
with pytest .raises (ValueError ):
210
201
samples = pipeline .predict (
@@ -220,12 +211,12 @@ def test_pipeline_predict(model_dtype: torch.dtype, input_dtype: torch.dtype):
220
211
prediction_length = 65 ,
221
212
limit_prediction_length = False ,
222
213
)
223
- validate_tensor (samples , shape = (4 , 7 , 65 ), dtype = input_dtype )
214
+ validate_tensor (samples , shape = (4 , 7 , 65 ), dtype = torch . float32 )
224
215
225
216
# input: tensor of shape (context_length,)
226
217
227
218
samples = pipeline .predict (context [0 , ...], num_samples = 12 , prediction_length = 3 )
228
- validate_tensor (samples , shape = (1 , 12 , 3 ), dtype = input_dtype )
219
+ validate_tensor (samples , shape = (1 , 12 , 3 ), dtype = torch . float32 )
229
220
230
221
with pytest .raises (ValueError ):
231
222
samples = pipeline .predict (
@@ -240,16 +231,18 @@ def test_pipeline_predict(model_dtype: torch.dtype, input_dtype: torch.dtype):
240
231
num_samples = 7 ,
241
232
prediction_length = 65 ,
242
233
)
243
- validate_tensor (samples , shape = (1 , 7 , 65 ), dtype = input_dtype )
234
+ validate_tensor (samples , shape = (1 , 7 , 65 ), dtype = torch . float32 )
244
235
245
236
246
237
@pytest .mark .parametrize ("model_dtype" , [torch .float32 , torch .bfloat16 ])
238
+ @pytest .mark .parametrize ("input_dtype" , [torch .float32 , torch .bfloat16 , torch .int64 ])
247
239
@pytest .mark .parametrize ("prediction_length" , [3 , 65 ])
248
240
@pytest .mark .parametrize (
249
241
"quantile_levels" , [[0.1 , 0.2 , 0.3 , 0.4 , 0.5 , 0.6 , 0.7 , 0.8 , 0.9 ], [0.1 , 0.5 , 0.9 ]]
250
242
)
251
243
def test_pipeline_predict_quantiles (
252
244
model_dtype : torch .dtype ,
245
+ input_dtype : torch .dtype ,
253
246
prediction_length : int ,
254
247
quantile_levels : list [int ],
255
248
):
@@ -259,6 +252,7 @@ def test_pipeline_predict_quantiles(
259
252
torch_dtype = model_dtype ,
260
253
)
261
254
context = 10 * torch .rand (size = (4 , 16 )) + 10
255
+ context = context .to (dtype = input_dtype )
262
256
263
257
num_expected_quantiles = len (quantile_levels )
264
258
# input: tensor of shape (batch_size, context_length)
@@ -269,8 +263,10 @@ def test_pipeline_predict_quantiles(
269
263
prediction_length = prediction_length ,
270
264
quantile_levels = quantile_levels ,
271
265
)
272
- validate_tensor (quantiles , (4 , prediction_length , num_expected_quantiles ))
273
- validate_tensor (mean , (4 , prediction_length ))
266
+ validate_tensor (
267
+ quantiles , (4 , prediction_length , num_expected_quantiles ), dtype = torch .float32
268
+ )
269
+ validate_tensor (mean , (4 , prediction_length ), dtype = torch .float32 )
274
270
275
271
# input: batch_size-long list of tensors of shape (context_length,)
276
272
@@ -280,8 +276,10 @@ def test_pipeline_predict_quantiles(
280
276
prediction_length = prediction_length ,
281
277
quantile_levels = quantile_levels ,
282
278
)
283
- validate_tensor (quantiles , (4 , prediction_length , num_expected_quantiles ))
284
- validate_tensor (mean , (4 , prediction_length ))
279
+ validate_tensor (
280
+ quantiles , (4 , prediction_length , num_expected_quantiles ), dtype = torch .float32
281
+ )
282
+ validate_tensor (mean , (4 , prediction_length ), dtype = torch .float32 )
285
283
286
284
# input: tensor of shape (context_length,)
287
285
@@ -291,20 +289,23 @@ def test_pipeline_predict_quantiles(
291
289
prediction_length = prediction_length ,
292
290
quantile_levels = quantile_levels ,
293
291
)
294
- validate_tensor (quantiles , (1 , prediction_length , num_expected_quantiles ))
295
- validate_tensor (mean , (1 , prediction_length ))
292
+ validate_tensor (
293
+ quantiles , (1 , prediction_length , num_expected_quantiles ), dtype = torch .float32
294
+ )
295
+ validate_tensor (mean , (1 , prediction_length ), dtype = torch .float32 )
296
296
297
297
298
298
@pytest .mark .parametrize ("model_dtype" , [torch .float32 , torch .bfloat16 ])
299
- @pytest .mark .parametrize ("input_dtype" , [torch .float32 , torch .bfloat16 ])
299
+ @pytest .mark .parametrize ("input_dtype" , [torch .float32 , torch .bfloat16 , torch . int64 ])
300
300
def test_pipeline_embed (model_dtype : torch .dtype , input_dtype : torch .dtype ):
301
301
pipeline = ChronosPipeline .from_pretrained (
302
302
Path (__file__ ).parent / "dummy-chronos-model" ,
303
303
device_map = "cpu" ,
304
304
torch_dtype = model_dtype ,
305
305
)
306
306
d_model = pipeline .model .model .config .d_model
307
- context = 10 * torch .rand (size = (4 , 16 ), dtype = input_dtype ) + 10
307
+ context = 10 * torch .rand (size = (4 , 16 )) + 10
308
+ context = context .to (dtype = input_dtype )
308
309
expected_embed_length = 16 + (1 if pipeline .model .config .use_eos_token else 0 )
309
310
310
311
# input: tensor of shape (batch_size, context_length)
0 commit comments