Skip to content

Commit c62ddb3

Browse files
committed
typo fix
1 parent 746eb9d commit c62ddb3

File tree

10 files changed

+8
-8
lines changed

10 files changed

+8
-8
lines changed

generate.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
from tqdm import tqdm
1616

1717
from internlm.accelerator import get_accelerator
18-
from internlm.apis.inference import SequenceGenerator
18+
from internlm.inference.inference import SequenceGenerator
1919
from internlm.core.context import global_context as gpc
2020
from internlm.data import build_generation_loader_with_data_type
2121
from internlm.initialize import initialize_launcher

internlm/core/scheduler/base_scheduler.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@
88

99
import torch
1010

11-
from internlm.apis import InferenceParams
1211
from internlm.core.engine import Engine
12+
from internlm.inference import InferenceParams
1313

1414

1515
class BaseScheduler(ABC):

internlm/data/streaming/dataset.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,10 @@
88
from datasets.distributed import split_dataset_by_node
99
from PIL import Image
1010
from torch.utils.data import Dataset
11+
from transformers import AutoTokenizer
1112

1213
from internlm.core.context import ParallelMode
1314
from internlm.core.context import global_context as gpc
14-
from transformers import AutoTokenizer
1515

1616

1717
class StreamingDataset(Dataset):
File renamed without changes.

internlm/apis/inference.py internlm/inference/inference.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,10 @@
55
import torch.nn.functional as F
66
from torch import nn
77

8-
from internlm.apis import InferenceParams, process_parallel_output
98
from internlm.core.context import ParallelMode # noqa: E402
109
from internlm.core.context import global_context as gpc # noqa: E402
1110
from internlm.core.trainer import Trainer
11+
from internlm.inference import InferenceParams, process_parallel_output
1212

1313
__all__ = ["SequenceGenerator"]
1414

File renamed without changes.

internlm/initialize/initialize_model.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def set_fp32_attr_for_model(model: Union[nn.Module, nn.ModuleList]):
9393

9494

9595
def set_parallel_attr_for_param_groups(model: Union[nn.Module, nn.ModuleList]):
96-
def _check_module(name, module):
96+
def _check_module(name, module): # pylint: disable=W0613
9797
# layer_norm
9898
if isinstance(module, (RMSNorm, nn.LayerNorm)):
9999
for param in module.parameters():

tests/test_infer/test_generate.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import torch
55
from sentencepiece import SentencePieceProcessor
66

7-
from internlm.apis.inference import SequenceGenerator, batch_tokenize
7+
from internlm.inference.inference import SequenceGenerator, batch_tokenize
88
from internlm.initialize import initialize_launcher # noqa: E402
99
from internlm.initialize.initialize_model import (
1010
initialize_model_and_parallel_communicator,

tests/test_infer/test_trainer_generate.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import pytest
44
from sentencepiece import SentencePieceProcessor
55

6-
from internlm.apis.inference import SequenceGenerator, batch_tokenize
6+
from internlm.inference.inference import SequenceGenerator, batch_tokenize
77
from internlm.checkpoint import CheckpointManager # noqa: E402
88
from internlm.core.context import global_context as gpc # noqa: E402
99
from internlm.core.trainer import Trainer, TrainState # noqa: E402

tools/load_internlm2_model.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
import torch
99

10-
from internlm.apis.inference import SequenceGenerator
10+
from internlm.inference.inference import SequenceGenerator
1111
from internlm.core.context import ParallelMode
1212
from internlm.core.context import global_context as gpc
1313
from internlm.initialize import initialize_launcher

0 commit comments

Comments
 (0)