diff --git a/README.md b/README.md
index cba0a968..381235b9 100644
--- a/README.md
+++ b/README.md
@@ -119,8 +119,7 @@ factory = "llm"
 labels = ["COMPLIMENT", "INSULT"]
 
 [components.llm.model]
-@llm_models = "spacy.OpenAI.v1"
-name = "gpt-4"
+@llm_models = "spacy.GPT-4.v2"
 ```
 
 Now run:
diff --git a/pyproject.toml b/pyproject.toml
index 6d1b0284..d138c29a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -27,9 +27,7 @@ filterwarnings = [
     "ignore:^.*The `construct` method is deprecated.*",
     "ignore:^.*Skipping device Apple Paravirtual device that does not support Metal 2.0.*",
     "ignore:^.*Pydantic V1 style `@validator` validators are deprecated.*",
-    "ignore:^.*was deprecated in langchain-community.*",
-    "ignore:^.*was deprecated in LangChain 0.0.1.*",
-    "ignore:^.*the load_module() method is deprecated and slated for removal in Python 3.12.*"
+    "ignore:^.*was deprecated in langchain-community.*"
 ]
 markers = [
     "external: interacts with a (potentially cost-incurring) third-party API",
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 9061904f..63862a4a 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -13,8 +13,7 @@ langchain>=0.1,<0.2; python_version>="3.9"
 openai>=0.27,<=0.28.1; python_version>="3.9"
 
 # Necessary for running all local models on GPU.
-# TODO: transformers > 4.38 causes bug in model handling due to unknown factors. To be investigated.
-transformers[sentencepiece]>=4.0.0,<=4.38
+transformers[sentencepiece]>=4.0.0
 torch
 einops>=0.4
 
diff --git a/spacy_llm/models/hf/__init__.py b/spacy_llm/models/hf/__init__.py
index f495632a..b3afbb71 100644
--- a/spacy_llm/models/hf/__init__.py
+++ b/spacy_llm/models/hf/__init__.py
@@ -4,14 +4,12 @@
 from .llama2 import llama2_hf
 from .mistral import mistral_hf
 from .openllama import openllama_hf
-from .registry import huggingface_v1
 from .stablelm import stablelm_hf
 
 __all__ = [
     "HuggingFace",
     "dolly_hf",
     "falcon_hf",
-    "huggingface_v1",
     "llama2_hf",
     "mistral_hf",
     "openllama_hf",
diff --git a/spacy_llm/models/hf/mistral.py b/spacy_llm/models/hf/mistral.py
index 9e7b06c5..c80d636e 100644
--- a/spacy_llm/models/hf/mistral.py
+++ b/spacy_llm/models/hf/mistral.py
@@ -99,7 +99,8 @@ def mistral_hf(
     name (Literal): Name of the Mistral model. Has to be one of Mistral.get_model_names().
     config_init (Optional[Dict[str, Any]]): HF config for initializing the model.
     config_run (Optional[Dict[str, Any]]): HF config for running the model.
-    RETURNS (Mistral): Mistral instance that can execute a set of prompts and return the raw responses.
+    RETURNS (Callable[[Iterable[str]], Iterable[str]]): Mistral instance that can execute a set of prompts and return
+        the raw responses.
     """
     return Mistral(
         name=name, config_init=config_init, config_run=config_run, context_length=8000
diff --git a/spacy_llm/models/hf/registry.py b/spacy_llm/models/hf/registry.py
deleted file mode 100644
index 1210341c..00000000
--- a/spacy_llm/models/hf/registry.py
+++ /dev/null
@@ -1,51 +0,0 @@
-from typing import Any, Dict, Optional
-
-from confection import SimpleFrozenDict
-
-from ...registry import registry
-from .base import HuggingFace
-from .dolly import Dolly
-from .falcon import Falcon
-from .llama2 import Llama2
-from .mistral import Mistral
-from .openllama import OpenLLaMA
-from .stablelm import StableLM
-
-
-@registry.llm_models("spacy.HF.v1")
-@registry.llm_models("spacy.HuggingFace.v1")
-def huggingface_v1(
-    name: str,
-    config_init: Optional[Dict[str, Any]] = SimpleFrozenDict(),
-    config_run: Optional[Dict[str, Any]] = SimpleFrozenDict(),
-) -> HuggingFace:
-    """Returns HuggingFace model instance.
-    name (str): Name of model to use.
-    config_init (Optional[Dict[str, Any]]): HF config for initializing the model.
-    config_run (Optional[Dict[str, Any]]): HF config for running the model.
-    RETURNS (Callable[[Iterable[str]], Iterable[str]]): Model instance that can execute a set of prompts and return
-        the raw responses.
-    """
-    model_context_lengths = {
-        Dolly: 2048,
-        Falcon: 2048,
-        Llama2: 4096,
-        Mistral: 8000,
-        OpenLLaMA: 2048,
-        StableLM: 4096,
-    }
-
-    for model_cls, context_length in model_context_lengths.items():
-        model_names = getattr(model_cls, "MODEL_NAMES")
-        if model_names and name in model_names.__args__:
-            return model_cls(
-                name=name,
-                config_init=config_init,
-                config_run=config_run,
-                context_length=context_length,
-            )
-
-    raise ValueError(
-        f"Name {name} could not be associated with any of the supported models. Please check "
-        f"https://spacy.io/api/large-language-models#models-hf to ensure the specified model name is correct."
-    )
diff --git a/spacy_llm/models/langchain/model.py b/spacy_llm/models/langchain/model.py
index c940da6a..cded9fd7 100644
--- a/spacy_llm/models/langchain/model.py
+++ b/spacy_llm/models/langchain/model.py
@@ -99,7 +99,6 @@ def query_langchain(
         prompts (Iterable[Iterable[Any]]): Prompts to execute.
         RETURNS (Iterable[Iterable[Any]]): LLM responses.
         """
-        assert callable(model)
         return [
             [model.invoke(pr) for pr in prompts_for_doc] for prompts_for_doc in prompts
         ]
diff --git a/spacy_llm/models/rest/anthropic/registry.py b/spacy_llm/models/rest/anthropic/registry.py
index 9719af18..dc44eb7e 100644
--- a/spacy_llm/models/rest/anthropic/registry.py
+++ b/spacy_llm/models/rest/anthropic/registry.py
@@ -7,43 +7,6 @@
 from .model import Anthropic, Endpoints
 
 
-@registry.llm_models("spacy.Anthropic.v1")
-def anthropic_v1(
-    name: str,
-    config: Dict[Any, Any] = SimpleFrozenDict(),
-    strict: bool = Anthropic.DEFAULT_STRICT,
-    max_tries: int = Anthropic.DEFAULT_MAX_TRIES,
-    interval: float = Anthropic.DEFAULT_INTERVAL,
-    max_request_time: float = Anthropic.DEFAULT_MAX_REQUEST_TIME,
-    context_length: Optional[int] = None,
-) -> Anthropic:
-    """Returns Anthropic model instance using REST to prompt API.
-    config (Dict[Any, Any]): LLM config arguments passed on to the initialization of the model instance.
-    name (str): Name of model to use.
-    strict (bool): If True, ValueError is raised if the LLM API returns a malformed response (i. e. any kind of JSON
-        or other response object that does not conform to the expectation of how a well-formed response object from
-        this API should look like). If False, the API error responses are returned by __call__(), but no error will
-        be raised.
-    max_tries (int): Max. number of tries for API request.
-    interval (float): Time interval (in seconds) for API retries in seconds. We implement a base 2 exponential backoff
-        at each retry.
-    max_request_time (float): Max. time (in seconds) to wait for request to terminate before raising an exception.
-    context_length (Optional[int]): Context length for this model. Only necessary for sharding and if no context length
-        natively provided by spacy-llm.
-    RETURNS (Anthropic): Instance of Anthropic model.
-    """
-    return Anthropic(
-        name=name,
-        endpoint=Endpoints.COMPLETIONS.value,
-        config=config,
-        strict=strict,
-        max_tries=max_tries,
-        interval=interval,
-        max_request_time=max_request_time,
-        context_length=context_length,
-    )
-
-
 @registry.llm_models("spacy.Claude-2.v2")
 def anthropic_claude_2_v2(
     config: Dict[Any, Any] = SimpleFrozenDict(),
diff --git a/spacy_llm/models/rest/cohere/registry.py b/spacy_llm/models/rest/cohere/registry.py
index 8deb979d..79c711e1 100644
--- a/spacy_llm/models/rest/cohere/registry.py
+++ b/spacy_llm/models/rest/cohere/registry.py
@@ -7,43 +7,6 @@
 from .model import Cohere, Endpoints
 
 
-@registry.llm_models("spacy.Cohere.v1")
-def cohere_v1(
-    name: str,
-    config: Dict[Any, Any] = SimpleFrozenDict(),
-    strict: bool = Cohere.DEFAULT_STRICT,
-    max_tries: int = Cohere.DEFAULT_MAX_TRIES,
-    interval: float = Cohere.DEFAULT_INTERVAL,
-    max_request_time: float = Cohere.DEFAULT_MAX_REQUEST_TIME,
-    context_length: Optional[int] = None,
-) -> Cohere:
-    """Returns Cohere model instance using REST to prompt API.
-    config (Dict[Any, Any]): LLM config arguments passed on to the initialization of the model instance.
-    name (str): Name of model to use.
-    strict (bool): If True, ValueError is raised if the LLM API returns a malformed response (i. e. any kind of JSON
-        or other response object that does not conform to the expectation of how a well-formed response object from
-        this API should look like). If False, the API error responses are returned by __call__(), but no error will
-        be raised.
-    max_tries (int): Max. number of tries for API request.
-    interval (float): Time interval (in seconds) for API retries in seconds. We implement a base 2 exponential backoff
-        at each retry.
-    max_request_time (float): Max. time (in seconds) to wait for request to terminate before raising an exception.
-    context_length (Optional[int]): Context length for this model. Only necessary for sharding and if no context length
-        natively provided by spacy-llm.
-    RETURNS (Cohere): Instance of Cohere model.
-    """
-    return Cohere(
-        name=name,
-        endpoint=Endpoints.COMPLETION.value,
-        config=config,
-        strict=strict,
-        max_tries=max_tries,
-        interval=interval,
-        max_request_time=max_request_time,
-        context_length=context_length,
-    )
-
-
 @registry.llm_models("spacy.Command.v2")
 def cohere_command_v2(
     config: Dict[Any, Any] = SimpleFrozenDict(),
@@ -93,7 +56,7 @@ def cohere_command(
     max_request_time: float = Cohere.DEFAULT_MAX_REQUEST_TIME,
 ) -> Callable[[Iterable[Iterable[str]]], Iterable[Iterable[str]]]:
     """Returns Cohere instance for 'command' model using REST to prompt API.
-    name (Literal["command", "command-light", "command-light-nightly", "command-nightly"]): Name of model to use.
+    name (Literal["command", "command-light", "command-light-nightly", "command-nightly"]): Model  to use.
     config (Dict[Any, Any]): LLM config arguments passed on to the initialization of the model instance.
     strict (bool): If True, ValueError is raised if the LLM API returns a malformed response (i. e. any kind of JSON
         or other response object that does not conform to the expectation of how a well-formed response object from
diff --git a/spacy_llm/models/rest/openai/registry.py b/spacy_llm/models/rest/openai/registry.py
index 767c9d39..3c3793ff 100644
--- a/spacy_llm/models/rest/openai/registry.py
+++ b/spacy_llm/models/rest/openai/registry.py
@@ -8,47 +8,6 @@
 
 _DEFAULT_TEMPERATURE = 0.0
 
-
-@registry.llm_models("spacy.OpenAI.v1")
-def openai_v1(
-    name: str,
-    config: Dict[Any, Any] = SimpleFrozenDict(temperature=_DEFAULT_TEMPERATURE),
-    strict: bool = OpenAI.DEFAULT_STRICT,
-    max_tries: int = OpenAI.DEFAULT_MAX_TRIES,
-    interval: float = OpenAI.DEFAULT_INTERVAL,
-    max_request_time: float = OpenAI.DEFAULT_MAX_REQUEST_TIME,
-    endpoint: Optional[str] = None,
-    context_length: Optional[int] = None,
-) -> OpenAI:
-    """Returns OpenAI model instance using REST to prompt API.
-
-    config (Dict[Any, Any]): LLM config passed on to the model's initialization.
-    name (str): Model name to use. Can be any model name supported by the OpenAI API.
-    strict (bool): If True, ValueError is raised if the LLM API returns a malformed response (i. e. any kind of JSON
-        or other response object that does not conform to the expectation of how a well-formed response object from
-        this API should look like). If False, the API error responses are returned by __call__(), but no error will
-        be raised.
-    max_tries (int): Max. number of tries for API request.
-    interval (float): Time interval (in seconds) for API retries in seconds. We implement a base 2 exponential backoff
-        at each retry.
-    max_request_time (float): Max. time (in seconds) to wait for request to terminate before raising an exception.
-    endpoint (Optional[str]): Endpoint to set. Defaults to standard endpoint.
-    context_length (Optional[int]): Context length for this model. Only necessary for sharding and if no context length
-        natively provided by spacy-llm.
-    RETURNS (OpenAI): OpenAI model instance.
-    """
-    return OpenAI(
-        name=name,
-        endpoint=endpoint or Endpoints.CHAT.value,
-        config=config,
-        strict=strict,
-        max_tries=max_tries,
-        interval=interval,
-        max_request_time=max_request_time,
-        context_length=context_length,
-    )
-
-
 """
 Parameter explanations:
     strict (bool): If True, ValueError is raised if the LLM API returns a malformed response (i. e. any kind of JSON
diff --git a/spacy_llm/models/rest/palm/registry.py b/spacy_llm/models/rest/palm/registry.py
index 506e6d4b..d7bae629 100644
--- a/spacy_llm/models/rest/palm/registry.py
+++ b/spacy_llm/models/rest/palm/registry.py
@@ -7,48 +7,6 @@
 from .model import Endpoints, PaLM
 
 
-@registry.llm_models("spacy.Google.v1")
-def google_v1(
-    name: str,
-    config: Dict[Any, Any] = SimpleFrozenDict(temperature=0),
-    strict: bool = PaLM.DEFAULT_STRICT,
-    max_tries: int = PaLM.DEFAULT_MAX_TRIES,
-    interval: float = PaLM.DEFAULT_INTERVAL,
-    max_request_time: float = PaLM.DEFAULT_MAX_REQUEST_TIME,
-    context_length: Optional[int] = None,
-    endpoint: Optional[str] = None,
-) -> Callable[[Iterable[Iterable[str]]], Iterable[Iterable[str]]]:
-    """Returns Google model instance using REST to prompt API.
-    name (str): Name of model to use.
-    config (Dict[Any, Any]): LLM config arguments passed on to the initialization of the model instance.
-    strict (bool): If True, ValueError is raised if the LLM API returns a malformed response (i. e. any kind of JSON
-        or other response object that does not conform to the expectation of how a well-formed response object from
-        this API should look like). If False, the API error responses are returned by __call__(), but no error will
-        be raised.
-    max_tries (int): Max. number of tries for API request.
-    interval (float): Time interval (in seconds) for API retries in seconds. We implement a base 2 exponential backoff
-        at each retry.
-    max_request_time (float): Max. time (in seconds) to wait for request to terminate before raising an exception.
-    context_length (Optional[int]): Context length for this model. Only necessary for sharding and if no context length
-        natively provided by spacy-llm.
-    endpoint (Optional[str]): Endpoint to use. Defaults to standard endpoint.
-    RETURNS (PaLM): PaLM model instance.
-    """
-    default_endpoint = (
-        Endpoints.TEXT.value if name in {"text-bison-001"} else Endpoints.MSG.value
-    )
-    return PaLM(
-        name=name,
-        endpoint=endpoint or default_endpoint,
-        config=config,
-        strict=strict,
-        max_tries=max_tries,
-        interval=interval,
-        max_request_time=max_request_time,
-        context_length=None,
-    )
-
-
 @registry.llm_models("spacy.PaLM.v2")
 def palm_bison_v2(
     config: Dict[Any, Any] = SimpleFrozenDict(temperature=0),
@@ -60,7 +18,7 @@ def palm_bison_v2(
     context_length: Optional[int] = None,
 ) -> Callable[[Iterable[Iterable[str]]], Iterable[Iterable[str]]]:
     """Returns Google instance for PaLM Bison model using REST to prompt API.
-    name (Literal["chat-bison-001", "text-bison-001"]): Name of model to use.
+    name (Literal["chat-bison-001", "text-bison-001"]): Model  to use.
     config (Dict[Any, Any]): LLM config arguments passed on to the initialization of the model instance.
     strict (bool): If True, ValueError is raised if the LLM API returns a malformed response (i. e. any kind of JSON
         or other response object that does not conform to the expectation of how a well-formed response object from
@@ -99,7 +57,7 @@ def palm_bison(
     endpoint: Optional[str] = None,
 ) -> PaLM:
     """Returns Google instance for PaLM Bison model using REST to prompt API.
-    name (Literal["chat-bison-001", "text-bison-001"]): Name of model to use.
+    name (Literal["chat-bison-001", "text-bison-001"]): Model  to use.
     config (Dict[Any, Any]): LLM config arguments passed on to the initialization of the model instance.
     strict (bool): If True, ValueError is raised if the LLM API returns a malformed response (i. e. any kind of JSON
         or other response object that does not conform to the expectation of how a well-formed response object from
diff --git a/spacy_llm/pipeline/llm.py b/spacy_llm/pipeline/llm.py
index 90ff20b5..f3edff55 100644
--- a/spacy_llm/pipeline/llm.py
+++ b/spacy_llm/pipeline/llm.py
@@ -24,7 +24,7 @@
 logger.addHandler(logging.NullHandler())
 
 DEFAULT_MODEL_CONFIG = {
-    "@llm_models": "spacy.GPT-3-5.v3",
+    "@llm_models": "spacy.GPT-3-5.v2",
     "strict": True,
 }
 DEFAULT_CACHE_CONFIG = {
@@ -238,7 +238,6 @@ def _process_docs(self, docs: List[Doc]) -> List[Doc]:
                 else self._task.generate_prompts(noncached_doc_batch),
                 n_iters + 1,
             )
-
             responses_iters = tee(
                 self._model(
                     # Ensure that model receives Iterable[Iterable[Any]]. If task doesn't shard, its prompt is wrapped
@@ -252,7 +251,7 @@ def _process_docs(self, docs: List[Doc]) -> List[Doc]:
             )
 
             for prompt_data, response, doc in zip(
-                prompts_iters[1], list(responses_iters[0]), noncached_doc_batch
+                prompts_iters[1], responses_iters[0], noncached_doc_batch
             ):
                 logger.debug(
                     "Generated prompt for doc: %s\n%s",
@@ -267,7 +266,7 @@ def _process_docs(self, docs: List[Doc]) -> List[Doc]:
                         elem[1] if support_sharding else noncached_doc_batch[i]
                         for i, elem in enumerate(prompts_iters[2])
                     ),
-                    list(responses_iters[1]),
+                    responses_iters[1],
                 )
             )
 
diff --git a/spacy_llm/tests/models/test_cohere.py b/spacy_llm/tests/models/test_cohere.py
index f3bb9936..dfcb432a 100644
--- a/spacy_llm/tests/models/test_cohere.py
+++ b/spacy_llm/tests/models/test_cohere.py
@@ -84,7 +84,7 @@ def test_cohere_api_response_when_error():
 def test_cohere_error_unsupported_model():
     """Ensure graceful handling of error when model is not supported"""
     incorrect_model = "x-gpt-3.5-turbo"
-    with pytest.raises(ValueError, match="Request to Cohere API failed"):
+    with pytest.raises(ValueError, match="model not found"):
         Cohere(
             name=incorrect_model,
             config={},
diff --git a/spacy_llm/tests/models/test_dolly.py b/spacy_llm/tests/models/test_dolly.py
index a7da3e7e..6a6dc32f 100644
--- a/spacy_llm/tests/models/test_dolly.py
+++ b/spacy_llm/tests/models/test_dolly.py
@@ -1,5 +1,4 @@
 import copy
-import warnings
 
 import pytest
 import spacy
@@ -10,7 +9,7 @@
 
 _PIPE_CFG = {
     "model": {
-        "@llm_models": "spacy.HuggingFace.v1",
+        "@llm_models": "spacy.Dolly.v1",
         "name": "dolly-v2-3b",
     },
     "task": {"@llm_tasks": "spacy.NoOp.v1"},
@@ -33,7 +32,7 @@
 @llm_tasks = "spacy.NoOp.v1"
 
 [components.llm.model]
-@llm_models = "spacy.HuggingFace.v1"
+@llm_models = "spacy.Dolly.v1"
 name = "dolly-v2-3b"
 """
 
@@ -43,9 +42,7 @@
 def test_init():
     """Test initialization and simple run."""
     nlp = spacy.blank("en")
-    with warnings.catch_warnings():
-        warnings.filterwarnings("ignore", category=DeprecationWarning)
-        nlp.add_pipe("llm", config=_PIPE_CFG)
+    nlp.add_pipe("llm", config=_PIPE_CFG)
     doc = nlp("This is a test.")
     nlp.get_pipe("llm")._model.get_model_names()
     torch.cuda.empty_cache()
@@ -56,7 +53,6 @@ def test_init():
 
 @pytest.mark.gpu
 @pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA")
-@pytest.mark.filterwarnings("ignore:the load_module() method is deprecated")
 def test_init_from_config():
     orig_config = Config().from_str(_NLP_CONFIG)
     nlp = spacy.util.load_model_from_config(orig_config, auto_fill=True)
@@ -70,6 +66,6 @@ def test_invalid_model():
     orig_config = Config().from_str(_NLP_CONFIG)
     config = copy.deepcopy(orig_config)
     config["components"]["llm"]["model"]["name"] = "dolly-the-sheep"
-    with pytest.raises(ValueError, match="could not be associated"):
+    with pytest.raises(ValueError, match="unexpected value; permitted"):
         spacy.util.load_model_from_config(config, auto_fill=True)
     torch.cuda.empty_cache()
diff --git a/spacy_llm/tests/models/test_falcon.py b/spacy_llm/tests/models/test_falcon.py
index e0c115c6..0d3f8554 100644
--- a/spacy_llm/tests/models/test_falcon.py
+++ b/spacy_llm/tests/models/test_falcon.py
@@ -9,7 +9,7 @@
 
 _PIPE_CFG = {
     "model": {
-        "@llm_models": "spacy.HuggingFace.v1",
+        "@llm_models": "spacy.Falcon.v1",
         "name": "falcon-rw-1b",
     },
     "task": {"@llm_tasks": "spacy.NoOp.v1"},
@@ -32,14 +32,13 @@
 @llm_tasks = "spacy.NoOp.v1"
 
 [components.llm.model]
-@llm_models = "spacy.HuggingFace.v1"
+@llm_models = "spacy.Falcon.v1"
 name = "falcon-rw-1b"
 """
 
 
 @pytest.mark.gpu
 @pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA")
-@pytest.mark.filterwarnings("ignore:the load_module() method is deprecated")
 def test_init():
     """Test initialization and simple run."""
     nlp = spacy.blank("en")
@@ -54,7 +53,6 @@ def test_init():
 
 @pytest.mark.gpu
 @pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA")
-@pytest.mark.filterwarnings("ignore:the load_module() method is deprecated")
 def test_init_from_config():
     orig_config = Config().from_str(_NLP_CONFIG)
     nlp = spacy.util.load_model_from_config(orig_config, auto_fill=True)
@@ -68,6 +66,6 @@ def test_invalid_model():
     orig_config = Config().from_str(_NLP_CONFIG)
     config = copy.deepcopy(orig_config)
     config["components"]["llm"]["model"]["name"] = "x"
-    with pytest.raises(ValueError, match="could not be associated"):
+    with pytest.raises(ValueError, match="unexpected value; permitted"):
         spacy.util.load_model_from_config(config, auto_fill=True)
     torch.cuda.empty_cache()
diff --git a/spacy_llm/tests/models/test_hf.py b/spacy_llm/tests/models/test_hf.py
index fa756dc5..3058035c 100644
--- a/spacy_llm/tests/models/test_hf.py
+++ b/spacy_llm/tests/models/test_hf.py
@@ -18,14 +18,14 @@
 
 @pytest.mark.gpu
 @pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA")
-@pytest.mark.parametrize("model", ("dolly-v2-3b", "Llama-2-7b-hf"))
+@pytest.mark.parametrize(
+    "model", (("spacy.Dolly.v1", "dolly-v2-3b"), ("spacy.Llama2.v1", "Llama-2-7b-hf"))
+)
 def test_device_config_conflict(model: Tuple[str, str]):
     """Test device configuration."""
     nlp = spacy.blank("en")
-    cfg = {
-        **_PIPE_CFG,
-        **{"model": {"@llm_models": "spacy.HuggingFace.v1", "name": model}},
-    }
+    model, name = model
+    cfg = {**_PIPE_CFG, **{"model": {"@llm_models": model, "name": name}}}
 
     # Set device only.
     cfg["model"]["config_init"] = {"device": "cpu"}  # type: ignore[index]
@@ -58,7 +58,7 @@ def test_torch_dtype():
     nlp = spacy.blank("en")
     cfg = {
         **_PIPE_CFG,
-        **{"model": {"@llm_models": "spacy.HuggingFace.v1", "name": "dolly-v2-3b"}},
+        **{"model": {"@llm_models": "spacy.Dolly.v1", "name": "dolly-v2-3b"}},
     }
 
     # Should be converted to torch.float16.
diff --git a/spacy_llm/tests/models/test_llama2.py b/spacy_llm/tests/models/test_llama2.py
index bafbdd14..6896269b 100644
--- a/spacy_llm/tests/models/test_llama2.py
+++ b/spacy_llm/tests/models/test_llama2.py
@@ -9,7 +9,7 @@
 
 _PIPE_CFG = {
     "model": {
-        "@llm_models": "spacy.HuggingFace.v1",
+        "@llm_models": "spacy.Llama2.v1",
         "name": "Llama-2-7b-hf",
     },
     "task": {"@llm_tasks": "spacy.NoOp.v1"},
@@ -32,7 +32,7 @@
 @llm_tasks = "spacy.NoOp.v1"
 
 [components.llm.model]
-@llm_models = "spacy.HuggingFace.v1"
+@llm_models = "spacy.Llama2.v1"
 name = "Llama-2-7b-hf"
 """
 
diff --git a/spacy_llm/tests/models/test_mistral.py b/spacy_llm/tests/models/test_mistral.py
index 42c14fbf..548d4d29 100644
--- a/spacy_llm/tests/models/test_mistral.py
+++ b/spacy_llm/tests/models/test_mistral.py
@@ -9,7 +9,7 @@
 
 _PIPE_CFG = {
     "model": {
-        "@llm_models": "spacy.HuggingFace.v1",
+        "@llm_models": "spacy.Mistral.v1",
         "name": "Mistral-7B-v0.1",
     },
     "task": {"@llm_tasks": "spacy.NoOp.v1"},
@@ -31,7 +31,7 @@
 @llm_tasks = "spacy.NoOp.v1"
 
 [components.llm.model]
-@llm_models = "spacy.HuggingFace.v1"
+@llm_models = "spacy.Mistral.v1"
 name = "Mistral-7B-v0.1"
 """
 
@@ -63,6 +63,6 @@ def test_invalid_model():
     orig_config = Config().from_str(_NLP_CONFIG)
     config = copy.deepcopy(orig_config)
     config["components"]["llm"]["model"]["name"] = "x"
-    with pytest.raises(ValueError, match="could not be associated"):
+    with pytest.raises(ValueError, match="unexpected value; permitted"):
         spacy.util.load_model_from_config(config, auto_fill=True)
     torch.cuda.empty_cache()
diff --git a/spacy_llm/tests/models/test_openllama.py b/spacy_llm/tests/models/test_openllama.py
index 0a949ff0..f42d94dc 100644
--- a/spacy_llm/tests/models/test_openllama.py
+++ b/spacy_llm/tests/models/test_openllama.py
@@ -9,7 +9,7 @@
 
 _PIPE_CFG = {
     "model": {
-        "@llm_models": "spacy.HuggingFace.v1",
+        "@llm_models": "spacy.OpenLLaMA.v1",
         "name": "open_llama_3b",
     },
     "task": {"@llm_tasks": "spacy.NoOp.v1"},
@@ -32,7 +32,7 @@
 @llm_tasks = "spacy.NoOp.v1"
 
 [components.llm.model]
-@llm_models = spacy.HuggingFace.v1
+@llm_models = spacy.OpenLLaMA.v1
 name = open_llama_3b
 """
 
@@ -80,6 +80,6 @@ def test_invalid_model():
     orig_config = Config().from_str(_NLP_CONFIG)
     config = copy.deepcopy(orig_config)
     config["components"]["llm"]["model"]["name"] = "anything-else"
-    with pytest.raises(ValueError, match="could not be associated"):
+    with pytest.raises(ValueError, match="unexpected value; permitted"):
         spacy.util.load_model_from_config(config, auto_fill=True)
     torch.cuda.empty_cache()
diff --git a/spacy_llm/tests/models/test_palm.py b/spacy_llm/tests/models/test_palm.py
index dc88e9d7..f4df8a51 100644
--- a/spacy_llm/tests/models/test_palm.py
+++ b/spacy_llm/tests/models/test_palm.py
@@ -3,7 +3,6 @@
 
 from spacy_llm.models.rest.palm import palm_bison
 
-from ...models.rest.palm.registry import google_v1
 from ..compat import has_palm_key
 
 
@@ -12,7 +11,7 @@
 @pytest.mark.parametrize("name", ("text-bison-001", "chat-bison-001"))
 def test_palm_api_response_is_correct(name: str):
     """Check if we're getting the response from the correct structure"""
-    model = google_v1(name=name)
+    model = palm_bison(name=name)
     prompt = "The number of stars in the universe is"
     num_prompts = 3  # arbitrary number to check multiple inputs
     responses = list(model([prompt] * num_prompts))
@@ -31,7 +30,7 @@ def test_palm_api_response_n_generations():
     the very first output.
     """
     candidate_count = 3
-    model = google_v1(config={"candidate_count": candidate_count})
+    model = palm_bison(config={"candidate_count": candidate_count})
 
     prompt = "The number of stars in the universe is"
     num_prompts = 3
@@ -58,4 +57,4 @@ def test_palm_error_unsupported_model():
     """Ensure graceful handling of error when model is not supported"""
     incorrect_model = "x-gpt-3.5-turbo"
     with pytest.raises(ValueError, match="Model 'x-gpt-3.5-turbo' is not supported"):
-        google_v1(name=incorrect_model)
+        palm_bison(name=incorrect_model)
diff --git a/spacy_llm/tests/models/test_rest.py b/spacy_llm/tests/models/test_rest.py
index a135615e..305732c6 100644
--- a/spacy_llm/tests/models/test_rest.py
+++ b/spacy_llm/tests/models/test_rest.py
@@ -12,7 +12,7 @@
 
 PIPE_CFG = {
     "model": {
-        "@llm_models": "spacy.OpenAI.v1",
+        "@llm_models": "spacy.GPT-3-5.v2",
     },
     "task": {"@llm_tasks": "spacy.TextCat.v1", "labels": "POSITIVE,NEGATIVE"},
 }
@@ -53,12 +53,12 @@ def test_initialization():
 def test_model_error_handling():
     """Test error handling for wrong model."""
     nlp = spacy.blank("en")
-    with pytest.raises(ValueError, match="is not available"):
+    with pytest.raises(ValueError, match="Could not find function 'spacy.gpt-3.5x.v1'"):
         nlp.add_pipe(
             "llm",
             config={
                 "task": {"@llm_tasks": "spacy.NoOp.v1"},
-                "model": {"@llm_models": "spacy.OpenAI.v1", "name": "GPT-3.5-x"},
+                "model": {"@llm_models": "spacy.gpt-3.5x.v1"},
             },
         )
 
@@ -80,11 +80,11 @@ def test_doc_length_error_handling():
     with pytest.raises(
         ValueError,
         match=re.escape(
-            "Request to OpenAI API failed: This model's maximum context length is 16385 tokens. However, your messages "
-            "resulted in 40018 tokens. Please reduce the length of the messages."
+            "Request to OpenAI API failed: This model's maximum context length is 4097 tokens. However, your messages "
+            "resulted in 5018 tokens. Please reduce the length of the messages."
         ),
     ):
-        nlp("this is a test " * 10000)
+        nlp("n" * 10000)
 
 
 @pytest.mark.skipif(has_openai_key is False, reason="OpenAI API key not available")
diff --git a/spacy_llm/tests/models/test_stablelm.py b/spacy_llm/tests/models/test_stablelm.py
index 4dbc1747..e9edab4b 100644
--- a/spacy_llm/tests/models/test_stablelm.py
+++ b/spacy_llm/tests/models/test_stablelm.py
@@ -9,7 +9,7 @@
 
 _PIPE_CFG = {
     "model": {
-        "@llm_models": "spacy.HuggingFace.v1",
+        "@llm_models": "spacy.StableLM.v1",
         "name": "stablelm-base-alpha-3b",
     },
     "task": {"@llm_tasks": "spacy.NoOp.v1"},
@@ -31,7 +31,7 @@
 @llm_tasks = "spacy.NoOp.v1"
 
 [components.llm.model]
-@llm_models = "spacy.HuggingFace.v1"
+@llm_models = "spacy.StableLM.v1"
 name = "stablelm-base-alpha-3b"
 """
 
@@ -81,5 +81,5 @@ def test_invalid_model():
     orig_config = Config().from_str(_NLP_CONFIG)
     config = copy.deepcopy(orig_config)
     config["components"]["llm"]["model"]["name"] = "anything-else"
-    with pytest.raises(ValueError, match="could not be associated"):
+    with pytest.raises(ValueError, match="unexpected value; permitted:"):
         spacy.util.load_model_from_config(config, auto_fill=True)
diff --git a/spacy_llm/tests/pipeline/test_llm.py b/spacy_llm/tests/pipeline/test_llm.py
index 82bc838e..ac5c1547 100644
--- a/spacy_llm/tests/pipeline/test_llm.py
+++ b/spacy_llm/tests/pipeline/test_llm.py
@@ -405,7 +405,7 @@ def test_llm_task_factories_ner():
     labels = PER,ORG,LOC
 
     [components.llm.model]
-    @llm_models = "spacy.GPT-3-5.v3"
+    @llm_models = "spacy.GPT-3-5.v1"
     """
     config = Config().from_str(cfg_string)
     nlp = assemble_from_config(config)
diff --git a/spacy_llm/tests/sharding/test_sharding.py b/spacy_llm/tests/sharding/test_sharding.py
index c29e71b1..6bc818da 100644
--- a/spacy_llm/tests/sharding/test_sharding.py
+++ b/spacy_llm/tests/sharding/test_sharding.py
@@ -60,11 +60,7 @@ def test_sharding_count(config):
         "fear is fear itself.",
     ]
     assert all(
-        # GPT-3.5 count of words can be off, hence we're allowing for some tolerance.
-        [
-            response - 1 <= len(pr.split()) <= response + 1
-            for response, pr in zip(responses, prompts)
-        ]
+        [response == len(pr.split()) for response, pr in zip(responses, prompts)]
     )
     assert sum(responses) == doc.user_data["count"]
 
@@ -172,9 +168,6 @@ def test_sharding_sentiment(config):
 @pytest.mark.skipif(has_openai_key is False, reason="OpenAI API key not available")
 def test_sharding_spancat(config):
     context_length = 265
-    config["components"]["llm"]["model"]["@llm_models"] = "spacy.OpenAI.v1"
-    # Spancat (not sharding) aspect of test case doesn't work with gpt-3.5.
-    config["components"]["llm"]["model"]["name"] = "gpt-4"
     config["components"]["llm"]["model"]["context_length"] = context_length
     config["components"]["llm"]["task"] = {
         "@llm_tasks": "spacy.SpanCat.v3",
diff --git a/spacy_llm/tests/tasks/test_entity_linker.py b/spacy_llm/tests/tasks/test_entity_linker.py
index 93aaf7cb..45f18e7e 100644
--- a/spacy_llm/tests/tasks/test_entity_linker.py
+++ b/spacy_llm/tests/tasks/test_entity_linker.py
@@ -135,8 +135,7 @@ def zeroshot_cfg_string():
     @llm_tasks = "spacy.EntityLinker.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v1"
     config = {"temperature": 0}
 
     [initialize]
@@ -180,8 +179,7 @@ def fewshot_cfg_string():
     path = {str((Path(__file__).parent / "examples" / "entity_linker.yml"))}
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v1"
     config = {{"temperature": 0}}
 
     [initialize]
@@ -226,8 +224,7 @@ def ext_template_cfg_string():
     path = {str((Path(__file__).parent / "templates" / "entity_linker.jinja2"))}
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v1"
     config = {{"temperature": 0}}
 
     [initialize]
@@ -402,10 +399,8 @@ def test_el_io(cfg_string, request, tmp_path):
     doc = nlp2(doc)
     if cfg_string != "ext_template_cfg_string":
         assert len(doc.ents) == 2
-        # Should be Q100, but mileage may vary depending on model
-        assert doc.ents[0].kb_id_ in ("Q100", "Q131371")
-        # Should be Q131371, but mileage may vary depending on model
-        assert doc.ents[1].kb_id_ in ("Q131371", "Q100")
+        assert doc.ents[0].kb_id_ == "Q100"
+        assert doc.ents[1].kb_id_ == "Q131371"
 
 
 def test_jinja_template_rendering_without_examples(tmp_path):
@@ -779,10 +774,7 @@ def test_init_with_code():
         top_n=5,
     )
     nlp = spacy.blank("en")
-    # Test case doesn't work with gpt-3.5-turbo.
-    llm_ner = nlp.add_pipe(
-        "llm_ner", config={"model": {"@llm_models": "spacy.OpenAI.v1", "name": "gpt-4"}}
-    )
+    llm_ner = nlp.add_pipe("llm_ner")
     for label in ("PERSON", "ORGANISATION", "LOCATION", "SPORTS TEAM"):
         llm_ner.add_label(label)
 
diff --git a/spacy_llm/tests/tasks/test_lemma.py b/spacy_llm/tests/tasks/test_lemma.py
index aa6020cb..d82cd087 100644
--- a/spacy_llm/tests/tasks/test_lemma.py
+++ b/spacy_llm/tests/tasks/test_lemma.py
@@ -56,8 +56,7 @@ def zeroshot_cfg_string():
     @llm_tasks = "spacy.Lemma.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -82,8 +81,7 @@ def fewshot_cfg_string():
     path = {str((Path(__file__).parent / "examples" / "lemma.yml"))}
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -109,8 +107,7 @@ def ext_template_cfg_string():
     path = {str((Path(__file__).parent / "templates" / "lemma.jinja2"))}
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
diff --git a/spacy_llm/tests/tasks/test_ner.py b/spacy_llm/tests/tasks/test_ner.py
index 7cacd92b..5fe4b178 100644
--- a/spacy_llm/tests/tasks/test_ner.py
+++ b/spacy_llm/tests/tasks/test_ner.py
@@ -101,8 +101,7 @@ def fewshot_cfg_string_v3_lds():
     @misc = "spacy.LowercaseNormalizer.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -132,7 +131,7 @@ def fewshot_cfg_string_v3():
     @misc = "spacy.LowercaseNormalizer.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.GPT-3-5.v3"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -167,7 +166,7 @@ def ext_template_cfg_string():
     @misc = "spacy.LowercaseNormalizer.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.GPT-3-5.v3"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -265,10 +264,7 @@ def test_llm_ner_predict(text, gold_ents):
     Note that this test may fail randomly, as the LLM's output is unguaranteed to be consistent/predictable
     """
     nlp = spacy.blank("en")
-    # Test case doesn't work with gpt-3.5-turbo.
-    llm = nlp.add_pipe(
-        "llm_ner", config={"model": {"@llm_models": "spacy.OpenAI.v1", "name": "gpt-4"}}
-    )
+    llm = nlp.add_pipe("llm_ner")
     for ent_str, ent_label in gold_ents:
         llm.add_label(ent_label)
     doc = nlp(text)
@@ -989,7 +985,7 @@ def test_add_label():
                 "@llm_tasks": "spacy.NER.v3",
             },
             "model": {
-                "@llm_models": "spacy.GPT-3-5.v3",
+                "@llm_models": "spacy.GPT-3-5.v1",
             },
         },
     )
@@ -1020,9 +1016,7 @@ def test_clear_label():
                 "@llm_tasks": "spacy.NER.v3",
             },
             "model": {
-                "@llm_models": "spacy.OpenAI.v1",
-                # Test case doesn't work with gpt-3.5-turbo.
-                "name": "gpt-4",
+                "@llm_models": "spacy.GPT-3-5.v1",
             },
         },
     )
diff --git a/spacy_llm/tests/tasks/test_raw.py b/spacy_llm/tests/tasks/test_raw.py
index df6f5b90..9973135a 100644
--- a/spacy_llm/tests/tasks/test_raw.py
+++ b/spacy_llm/tests/tasks/test_raw.py
@@ -53,8 +53,7 @@ def zeroshot_cfg_string():
     @llm_tasks = "spacy.Raw.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v3"
     """
 
 
diff --git a/spacy_llm/tests/tasks/test_rel.py b/spacy_llm/tests/tasks/test_rel.py
index 517cbbba..258824d4 100644
--- a/spacy_llm/tests/tasks/test_rel.py
+++ b/spacy_llm/tests/tasks/test_rel.py
@@ -40,8 +40,7 @@ def zeroshot_cfg_string():
     labels = "LivesIn,Visits"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
 
     [initialize]
     vectors = "en_core_web_md"
@@ -73,8 +72,7 @@ def fewshot_cfg_string():
     path = {str(EXAMPLES_DIR / "rel.jsonl")}
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
 
     [initialize]
     vectors = "en_core_web_md"
diff --git a/spacy_llm/tests/tasks/test_sentiment.py b/spacy_llm/tests/tasks/test_sentiment.py
index 3c269096..aac85966 100644
--- a/spacy_llm/tests/tasks/test_sentiment.py
+++ b/spacy_llm/tests/tasks/test_sentiment.py
@@ -33,8 +33,7 @@ def zeroshot_cfg_string():
     @llm_tasks = "spacy.Sentiment.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -59,7 +58,7 @@ def fewshot_cfg_string():
     path = {str((Path(__file__).parent / "examples" / "sentiment.yml"))}
 
     [components.llm.model]
-    @llm_models = "spacy.GPT-3-5.v3"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -85,7 +84,7 @@ def ext_template_cfg_string():
     path = {str((Path(__file__).parent / "templates" / "sentiment.jinja2"))}
 
     [components.llm.model]
-    @llm_models = "spacy.GPT-3-5.v3"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -132,7 +131,7 @@ def test_sentiment_predict(cfg_string, request):
     orig_config = Config().from_str(cfg)
     nlp = spacy.util.load_model_from_config(orig_config, auto_fill=True)
     if cfg_string != "ext_template_cfg_string":
-        assert nlp("This is horrible.")._.sentiment <= 0.1
+        assert nlp("This is horrible.")._.sentiment == 0.0
         assert 0 < nlp("This is meh.")._.sentiment <= 0.5
         assert nlp("This is perfect.")._.sentiment == 1.0
 
diff --git a/spacy_llm/tests/tasks/test_spancat.py b/spacy_llm/tests/tasks/test_spancat.py
index ced48c11..b064c9ef 100644
--- a/spacy_llm/tests/tasks/test_spancat.py
+++ b/spacy_llm/tests/tasks/test_spancat.py
@@ -83,8 +83,7 @@ def fewshot_cfg_string():
     @misc = "spacy.LowercaseNormalizer.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -119,8 +118,7 @@ def ext_template_cfg_string():
     @misc = "spacy.LowercaseNormalizer.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
diff --git a/spacy_llm/tests/tasks/test_summarization.py b/spacy_llm/tests/tasks/test_summarization.py
index 5d154895..35e24118 100644
--- a/spacy_llm/tests/tasks/test_summarization.py
+++ b/spacy_llm/tests/tasks/test_summarization.py
@@ -36,8 +36,7 @@ def zeroshot_cfg_string():
     max_n_words = 20
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -63,8 +62,7 @@ def fewshot_cfg_string():
     path = {str((Path(__file__).parent / "examples" / "summarization.yml"))}
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -91,8 +89,7 @@ def ext_template_cfg_string():
     path = {str((Path(__file__).parent / "templates" / "summarization.jinja2"))}
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
diff --git a/spacy_llm/tests/tasks/test_textcat.py b/spacy_llm/tests/tasks/test_textcat.py
index 656b5af9..6e7468dd 100644
--- a/spacy_llm/tests/tasks/test_textcat.py
+++ b/spacy_llm/tests/tasks/test_textcat.py
@@ -44,8 +44,7 @@ def zeroshot_cfg_string():
     @misc = "spacy.LowercaseNormalizer.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -75,8 +74,7 @@ def fewshot_cfg_string():
     @misc = "spacy.LowercaseNormalizer.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -108,8 +106,7 @@ def ext_template_cfg_string():
     @misc = "spacy.LowercaseNormalizer.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -138,8 +135,7 @@ def zeroshot_cfg_string_v3_lds():
     @misc = "spacy.LowercaseNormalizer.v1"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v2"
     """
 
 
@@ -837,7 +833,7 @@ def test_add_label():
                 "@llm_tasks": "spacy.TextCat.v3",
             },
             "model": {
-                "@llm_models": "spacy.GPT-3-5.v3",
+                "@llm_models": "spacy.GPT-3-5.v1",
             },
         },
     )
diff --git a/spacy_llm/tests/tasks/test_translation.py b/spacy_llm/tests/tasks/test_translation.py
index c722f039..31ed6799 100644
--- a/spacy_llm/tests/tasks/test_translation.py
+++ b/spacy_llm/tests/tasks/test_translation.py
@@ -32,8 +32,7 @@ def zeroshot_cfg_string():
     target_lang = "Spanish"
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v3"
     """
 
 
@@ -59,8 +58,7 @@ def fewshot_cfg_string():
     path = {str((Path(__file__).parent / "examples" / "translation.yml"))}
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v3"
     """
 
 
@@ -87,8 +85,7 @@ def ext_template_cfg_string():
     path = {str((Path(__file__).parent / "templates" / "translation.jinja2"))}
 
     [components.llm.model]
-    @llm_models = "spacy.OpenAI.v1"
-    name = "gpt-3.5-turbo"
+    @llm_models = "spacy.GPT-3-5.v3"
     """
 
 
diff --git a/spacy_llm/tests/test_combinations.py b/spacy_llm/tests/test_combinations.py
index 16692e41..b94641ef 100644
--- a/spacy_llm/tests/test_combinations.py
+++ b/spacy_llm/tests/test_combinations.py
@@ -12,8 +12,8 @@
 @pytest.mark.skipif(has_langchain is False, reason="LangChain is not installed")
 @pytest.mark.parametrize(
     "model",
-    ["langchain.OpenAIChat.v1", "spacy.OpenAI.v1"],
-    ids=["langchain", "rest-openai"],
+    ["langchain.OpenAIChat.v1", "spacy.GPT-3-5.v3", "spacy.GPT-4.v3"],
+    ids=["langchain", "rest-openai", "rest-openai"],
 )
 @pytest.mark.parametrize(
     "task",
@@ -34,7 +34,8 @@ def test_combinations(model: str, task: str, n_process: int):
         },
         "task": {"@llm_tasks": task},
     }
-    config["model"]["name"] = "gpt-3.5-turbo"
+    if model.startswith("langchain"):
+        config["model"]["name"] = "gpt-3.5-turbo"
     # Configure task-specific settings.
     if task.startswith("spacy.NER"):
         config["task"]["labels"] = "PER,ORG,LOC"
diff --git a/usage_examples/el_openai/fewshot.cfg b/usage_examples/el_openai/fewshot.cfg
index 2904ab28..de9cb1e7 100644
--- a/usage_examples/el_openai/fewshot.cfg
+++ b/usage_examples/el_openai/fewshot.cfg
@@ -24,8 +24,7 @@ factory = "llm"
 path = ${paths.examples}
 
 [components.llm-el.model]
-@llm_models = "spacy.OpenAI.v1"
-name = "gpt-3.5-turbo"
+@llm_models = "spacy.GPT-3-5.v1"
 config = {"temperature": 0}
 
 [initialize]
diff --git a/usage_examples/el_openai/zeroshot.cfg b/usage_examples/el_openai/zeroshot.cfg
index 62be1cd3..4c9a0187 100644
--- a/usage_examples/el_openai/zeroshot.cfg
+++ b/usage_examples/el_openai/zeroshot.cfg
@@ -18,8 +18,7 @@ factory = "llm"
 @llm_tasks = "spacy.EntityLinker.v1"
 
 [components.llm-el.model]
-@llm_models = "spacy.OpenAI.v1"
-name = "gpt-3.5-turbo"
+@llm_models = "spacy.GPT-3-5.v1"
 config = {"temperature": 0}
 
 [initialize]
diff --git a/usage_examples/multitask_openai/fewshot.cfg b/usage_examples/multitask_openai/fewshot.cfg
index a0b6f79f..b01691bc 100644
--- a/usage_examples/multitask_openai/fewshot.cfg
+++ b/usage_examples/multitask_openai/fewshot.cfg
@@ -19,7 +19,7 @@ labels = SIZE,TYPE,TOPPING,PRODUCT
 path = ${paths.examples}
 
 [components.llm_ner.model]
-@llm_models = "spacy.OpenAI.v1"
+@llm_models = "spacy.GPT-3-5.v2"
 name = "gpt-3.5-turbo"
 config = {"temperature": 0.0}
 
diff --git a/usage_examples/multitask_openai/zeroshot.cfg b/usage_examples/multitask_openai/zeroshot.cfg
index 047fa81b..9e793c04 100644
--- a/usage_examples/multitask_openai/zeroshot.cfg
+++ b/usage_examples/multitask_openai/zeroshot.cfg
@@ -12,7 +12,7 @@ factory = "llm"
 labels = SIZE,TYPE,TOPPING,PRODUCT
 
 [components.llm_ner.model]
-@llm_models = "spacy.OpenAI.v1"
+@llm_models = "spacy.GPT-3-5.v2"
 name = "gpt-3.5-turbo"
 config = {"temperature": 0.0}
 
diff --git a/usage_examples/ner_dolly/fewshot.cfg b/usage_examples/ner_dolly/fewshot.cfg
index 28d90ce8..cb50585b 100644
--- a/usage_examples/ner_dolly/fewshot.cfg
+++ b/usage_examples/ner_dolly/fewshot.cfg
@@ -12,7 +12,7 @@ batch_size = 128
 factory = "llm"
 
 [components.llm.model]
-@llm_models = "spacy.HuggingFace.v1"
+@llm_models = "spacy.Dolly.v1"
 name = "dolly-v2-3b"
 
 [components.llm.task]
diff --git a/usage_examples/ner_dolly/fewshot_v2.cfg b/usage_examples/ner_dolly/fewshot_v2.cfg
index d0bac099..46590e6e 100644
--- a/usage_examples/ner_dolly/fewshot_v2.cfg
+++ b/usage_examples/ner_dolly/fewshot_v2.cfg
@@ -12,7 +12,7 @@ batch_size = 128
 factory = "llm"
 
 [components.llm.model]
-@llm_models = "spacy.HuggingFace.v1"
+@llm_models = "spacy.Dolly.v1"
 name = "dolly-v2-3b"
 
 [components.llm.task]
diff --git a/usage_examples/ner_dolly/zeroshot.cfg b/usage_examples/ner_dolly/zeroshot.cfg
index 6a36298e..4dad8993 100644
--- a/usage_examples/ner_dolly/zeroshot.cfg
+++ b/usage_examples/ner_dolly/zeroshot.cfg
@@ -9,7 +9,7 @@ batch_size = 128
 factory = "llm"
 
 [components.llm.model]
-@llm_models = "spacy.HuggingFace.v1"
+@llm_models = "spacy.Dolly.v1"
 name = "dolly-v2-3b"
 
 [components.llm.task]
diff --git a/usage_examples/ner_dolly/zeroshot_v2.cfg b/usage_examples/ner_dolly/zeroshot_v2.cfg
index 4e401aa0..abf825af 100644
--- a/usage_examples/ner_dolly/zeroshot_v2.cfg
+++ b/usage_examples/ner_dolly/zeroshot_v2.cfg
@@ -9,7 +9,7 @@ batch_size = 128
 factory = "llm"
 
 [components.llm.model]
-@llm_models = "spacy.HuggingFace.v1"
+@llm_models = "spacy.Dolly.v1"
 name = "dolly-v2-3b"
 
 [components.llm.task]
diff --git a/usage_examples/ner_v3_openai/fewshot.cfg b/usage_examples/ner_v3_openai/fewshot.cfg
index 3585ffed..6d024875 100644
--- a/usage_examples/ner_v3_openai/fewshot.cfg
+++ b/usage_examples/ner_v3_openai/fewshot.cfg
@@ -28,5 +28,4 @@ EQUIPMENT = "Any kind of cooking equipment. e.g. oven, cooking pot, grill"
 path = "${paths.examples}"
 
 [components.llm.model]
-@llm_models = "spacy.OpenAI.v1"
-name = "gpt-3.5-turbo"
+@llm_models = "spacy.GPT-3-5.v1"
diff --git a/usage_examples/rel_openai/fewshot.cfg b/usage_examples/rel_openai/fewshot.cfg
index 6f944808..f65fe26f 100644
--- a/usage_examples/rel_openai/fewshot.cfg
+++ b/usage_examples/rel_openai/fewshot.cfg
@@ -22,8 +22,7 @@ labels = LivesIn,Visits
 path = ${paths.examples}
 
 [components.llm_rel.model]
-@llm_models = "spacy.OpenAI.v1"
-name = "gpt-3.5-turbo"
+@llm_models = "spacy.GPT-3-5.v2"
 
 [initialize]
 vectors = "en_core_web_md"
diff --git a/usage_examples/rel_openai/zeroshot.cfg b/usage_examples/rel_openai/zeroshot.cfg
index 13341f37..3a38afc1 100644
--- a/usage_examples/rel_openai/zeroshot.cfg
+++ b/usage_examples/rel_openai/zeroshot.cfg
@@ -18,8 +18,7 @@ factory = "llm"
 labels = LivesIn,Visits
 
 [components.llm_rel.model]
-@llm_models = "spacy.OpenAI.v1"
-name = "gpt-3.5-turbo"
+@llm_models = "spacy.GPT-3-5.v2"
 
 [initialize]
 vectors = "en_core_web_md"