16
16
17
17
import aesara
18
18
import numpy as np
19
+ import pytest
19
20
20
21
from pymc .distributions import Normal
21
22
from pymc .model import Model
@@ -45,7 +46,7 @@ def test_float64(self):
45
46
46
47
for sampler in self .samplers :
47
48
with model :
48
- sample (10 , sampler ())
49
+ sample (draws = 10 , tune = 10 , chains = 1 , step = sampler ())
49
50
50
51
@aesara .config .change_flags ({"floatX" : "float32" , "warn_float64" : "warn" })
51
52
def test_float32 (self ):
@@ -58,8 +59,9 @@ def test_float32(self):
58
59
59
60
for sampler in self .samplers :
60
61
with model :
61
- sample (10 , sampler ())
62
+ sample (draws = 10 , tune = 10 , chains = 1 , step = sampler ())
62
63
64
+ @pytest .mark .xfail (reason = "MLDA not refactored for V4 yet" )
63
65
@aesara .config .change_flags ({"floatX" : "float64" , "warn_float64" : "ignore" })
64
66
def test_float64_MLDA (self ):
65
67
data = np .random .randn (5 )
@@ -76,8 +78,9 @@ def test_float64_MLDA(self):
76
78
assert obs .dtype == "float64"
77
79
78
80
with model :
79
- sample (10 , MLDA (coarse_models = [coarse_model ]))
81
+ sample (draws = 10 , tune = 10 , chains = 1 , step = MLDA (coarse_models = [coarse_model ]))
80
82
83
+ @pytest .mark .xfail (reason = "MLDA not refactored for V4 yet" )
81
84
@aesara .config .change_flags ({"floatX" : "float32" , "warn_float64" : "warn" })
82
85
def test_float32_MLDA (self ):
83
86
data = np .random .randn (5 ).astype ("float32" )
@@ -94,4 +97,4 @@ def test_float32_MLDA(self):
94
97
assert obs .dtype == "float32"
95
98
96
99
with model :
97
- sample (10 , MLDA (coarse_models = [coarse_model ]))
100
+ sample (draws = 10 , tune = 10 , chains = 1 , step = MLDA (coarse_models = [coarse_model ]))
0 commit comments