Skip to content

Commit bfd862a

Browse files
authored
Bump version to 0.6.1 (#326)
* Bump version to 0.6.1. * Re-disable Llama 2 tests.
1 parent 89b5b65 commit bfd862a

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

setup.cfg

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
[metadata]
2-
version = 0.6.0
2+
version = 0.6.1
33
description = Integrating LLMs into structured NLP pipelines
44
author = Explosion
55
author_email = [email protected]

spacy_llm/tests/models/test_llama2.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
"""
3838

3939

40-
# @pytest.mark.skip(reason="CI runner needs more GPU memory")
40+
@pytest.mark.skip(reason="CI runner needs more GPU memory")
4141
@pytest.mark.gpu
4242
@pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA")
4343
def test_init():
@@ -52,7 +52,7 @@ def test_init():
5252
)
5353

5454

55-
# @pytest.mark.skip(reason="CI runner needs more GPU memory")
55+
@pytest.mark.skip(reason="CI runner needs more GPU memory")
5656
@pytest.mark.gpu
5757
@pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA")
5858
def test_init_from_config():
@@ -62,7 +62,7 @@ def test_init_from_config():
6262
torch.cuda.empty_cache()
6363

6464

65-
# @pytest.mark.skip(reason="CI runner needs more GPU memory")
65+
@pytest.mark.skip(reason="CI runner needs more GPU memory")
6666
@pytest.mark.gpu
6767
@pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA")
6868
def test_invalid_model():

0 commit comments

Comments
 (0)