make test_optimized_block forked again

pull/554/head
Denis Mazur 4 months ago
parent a945711f58
commit e9c81b2beb

@ -184,7 +184,6 @@ class UnoptimizedWrappedLlamaBlock(LlamaDecoderLayer):
@pytest.mark.parametrize("device", ["cpu", "cuda:0"])
# @pytest.mark.forked
def test_optimized_block(device):
if device == "cuda:0" and not torch.cuda.is_available():
pytest.skip("CUDA tests can be run only in CUDA-enabled setups")

Loading…
Cancel
Save