Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions thinc/tests/layers/test_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ def test_predict_small(W_b_input):


@given(arrays_OI_O_BI(max_batch=20, max_out=30, max_in=30))
@settings(deadline=None)
def test_predict_extensive(W_b_input):
W, b, input_ = W_b_input
nr_out, nr_in = W.shape
Expand Down
17 changes: 10 additions & 7 deletions thinc/tests/model/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@
import time
from thinc.api import Adam, CupyOps, Dropout, Linear, Model, Relu
from thinc.api import Shim, Softmax, chain, change_attr_values
from thinc.api import concatenate, set_dropout_rate
from thinc.api import use_ops, with_debug, wrap_model_recursive
from thinc.compat import has_cupy_gpu
from thinc.api import concatenate, set_dropout_rate, set_gpu_allocator
from thinc.api import with_debug, wrap_model_recursive, use_ops
from thinc.util import gpu_is_available
from thinc.compat import has_tensorflow
import numpy

from ..util import make_tempdir
Expand Down Expand Up @@ -404,15 +405,17 @@ def get_model_id(id_list, index):
assert len(list_of_ids) == len(list(set(list_of_ids)))


@pytest.mark.skipif(not gpu_is_available(), reason="needs GPU")
def test_model_gpu():
pytest.importorskip("ml_datasets")
import ml_datasets

ops = "cpu"
if has_cupy_gpu:
ops = "cupy"
if has_tensorflow:
# Ensure that CuPy has enough memory as TF just
# loves to bogart all of the GPU's memory on init.
set_gpu_allocator("tensorflow")

with use_ops(ops):
with use_ops("cupy"):
n_hidden = 32
dropout = 0.2
(train_X, train_Y), (dev_X, dev_Y) = ml_datasets.mnist()
Expand Down