Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.

Commit c01d5f7

Browse files
committed
Disables failing tests due to cuDNN
1 parent 29578b5 commit c01d5f7

File tree

2 files changed

+2
-0
lines changed

2 files changed

+2
-0
lines changed

tests/python/gpu/test_gluon_gpu.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,7 @@ def test_rnn_layer_begin_state_type():
227227
modeling_layer(fake_data)
228228

229229

230+
@unittest.skip("test fails due to cuDNN arch missmatch. temporarily disabled till it gets fixed. See https://github.com/apache/incubator-mxnet/issues/14502")
230231
def test_gluon_ctc_consistency():
231232
loss = mx.gluon.loss.CTCLoss()
232233
data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0)

tests/python/unittest/test_gluon_rnn.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -541,6 +541,7 @@ def test_rnn_layers_fp32():
541541

542542
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
543543
@unittest.skipIf(mx.context.num_gpus() == 0, "RNN FP16 only implemented for GPU for now")
544+
@unittest.skip("test fails due to cuDNN arch missmatch. temporarily disabled till it gets fixed. See https://github.com/apache/incubator-mxnet/issues/14502")
544545
def test_rnn_layers_fp16():
545546
run_rnn_layers('float16', 'float32', mx.gpu())
546547

0 commit comments

Comments
 (0)