diff --git a/.github/workflows/run-yesno-recipe.yml b/.github/workflows/run-yesno-recipe.yml
index edd3d39ceb..876b95e71c 100644
--- a/.github/workflows/run-yesno-recipe.yml
+++ b/.github/workflows/run-yesno-recipe.yml
@@ -34,7 +34,7 @@ jobs:
os: [ubuntu-18.04]
python-version: [3.8]
torch: ["1.8.1"]
- k2-version: ["1.8.dev20210917"]
+ k2-version: ["1.9.dev20210919"]
fail-fast: false
steps:
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 6da27170cc..150b5258a8 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -32,7 +32,7 @@ jobs:
os: [ubuntu-18.04, macos-10.15]
python-version: [3.6, 3.7, 3.8, 3.9]
torch: ["1.8.1"]
- k2-version: ["1.8.dev20210917"]
+ k2-version: ["1.9.dev20210919"]
fail-fast: false
diff --git a/docs/source/installation/images/k2-v-1.7.svg b/docs/source/installation/images/k2-v-1.7.svg
deleted file mode 100644
index 8a74d0b55e..0000000000
--- a/docs/source/installation/images/k2-v-1.7.svg
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/docs/source/installation/images/k2-v1.9-blueviolet.svg b/docs/source/installation/images/k2-v1.9-blueviolet.svg
new file mode 100644
index 0000000000..5a207b3705
--- /dev/null
+++ b/docs/source/installation/images/k2-v1.9-blueviolet.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/source/installation/index.rst b/docs/source/installation/index.rst
index 588ec13ec4..f960033e8f 100644
--- a/docs/source/installation/index.rst
+++ b/docs/source/installation/index.rst
@@ -21,7 +21,7 @@ Installation
.. |torch_versions| image:: ./images/torch-1.6.0_1.7.0_1.7.1_1.8.0_1.8.1_1.9.0-green.svg
:alt: Supported PyTorch versions
-.. |k2_versions| image:: ./images/k2-v-1.7.svg
+.. |k2_versions| image:: ./images/k2-v1.9-blueviolet.svg
:alt: Supported k2 versions
``icefall`` depends on `k2 `_ and
@@ -40,7 +40,7 @@ to install ``k2``.
.. CAUTION::
- You need to install ``k2`` with a version at least **v1.7**.
+ You need to install ``k2`` with a version at least **v1.9**.
.. HINT::
diff --git a/egs/librispeech/ASR/conformer_ctc/conformer.py b/egs/librispeech/ASR/conformer_ctc/conformer.py
index efe3570cba..b19b94db1d 100644
--- a/egs/librispeech/ASR/conformer_ctc/conformer.py
+++ b/egs/librispeech/ASR/conformer_ctc/conformer.py
@@ -98,7 +98,7 @@ def run_encoder(
"""
Args:
x:
- The model input. Its shape is [N, T, C].
+ The model input. Its shape is (N, T, C).
supervisions:
Supervision in lhotse format.
See https://github.com/lhotse-speech/lhotse/blob/master/lhotse/dataset/speech_recognition.py#L32 # noqa
diff --git a/egs/librispeech/ASR/conformer_ctc/decode.py b/egs/librispeech/ASR/conformer_ctc/decode.py
index c9d31ff6c4..b5b41c82ec 100755
--- a/egs/librispeech/ASR/conformer_ctc/decode.py
+++ b/egs/librispeech/ASR/conformer_ctc/decode.py
@@ -213,12 +213,12 @@ def decode_one_batch(
feature = batch["inputs"]
assert feature.ndim == 3
feature = feature.to(device)
- # at entry, feature is [N, T, C]
+ # at entry, feature is (N, T, C)
supervisions = batch["supervisions"]
nnet_output, memory, memory_key_padding_mask = model(feature, supervisions)
- # nnet_output is [N, T, C]
+ # nnet_output is (N, T, C)
supervision_segments = torch.stack(
(
@@ -244,14 +244,19 @@ def decode_one_batch(
# Note: You can also pass rescored lattices to it.
# We choose the HLG decoded lattice for speed reasons
# as HLG decoding is faster and the oracle WER
- # is slightly worse than that of rescored lattices.
- return nbest_oracle(
+ # is only slightly worse than that of rescored lattices.
+ best_path = nbest_oracle(
lattice=lattice,
num_paths=params.num_paths,
ref_texts=supervisions["text"],
word_table=word_table,
- scale=params.lattice_score_scale,
+ lattice_score_scale=params.lattice_score_scale,
+ oov="",
)
+ hyps = get_texts(best_path)
+ hyps = [[word_table[i] for i in ids] for ids in hyps]
+ key = f"oracle_{params.num_paths}_lattice_score_scale_{params.lattice_score_scale}" # noqa
+ return {key: hyps}
if params.method in ["1best", "nbest"]:
if params.method == "1best":
@@ -264,7 +269,7 @@ def decode_one_batch(
lattice=lattice,
num_paths=params.num_paths,
use_double_scores=params.use_double_scores,
- scale=params.lattice_score_scale,
+ lattice_score_scale=params.lattice_score_scale,
)
key = f"no_rescore-scale-{params.lattice_score_scale}-{params.num_paths}" # noqa
@@ -288,17 +293,23 @@ def decode_one_batch(
G=G,
num_paths=params.num_paths,
lm_scale_list=lm_scale_list,
- scale=params.lattice_score_scale,
+ lattice_score_scale=params.lattice_score_scale,
)
elif params.method == "whole-lattice-rescoring":
best_path_dict = rescore_with_whole_lattice(
- lattice=lattice, G_with_epsilon_loops=G, lm_scale_list=lm_scale_list
+ lattice=lattice,
+ G_with_epsilon_loops=G,
+ lm_scale_list=lm_scale_list,
)
elif params.method == "attention-decoder":
# lattice uses a 3-gram Lm. We rescore it with a 4-gram LM.
rescored_lattice = rescore_with_whole_lattice(
- lattice=lattice, G_with_epsilon_loops=G, lm_scale_list=None
+ lattice=lattice,
+ G_with_epsilon_loops=G,
+ lm_scale_list=None,
)
+ # TODO: pass `lattice` instead of `rescored_lattice` to
+ # `rescore_with_attention_decoder`
best_path_dict = rescore_with_attention_decoder(
lattice=rescored_lattice,
@@ -308,16 +319,20 @@ def decode_one_batch(
memory_key_padding_mask=memory_key_padding_mask,
sos_id=sos_id,
eos_id=eos_id,
- scale=params.lattice_score_scale,
+ lattice_score_scale=params.lattice_score_scale,
)
else:
assert False, f"Unsupported decoding method: {params.method}"
ans = dict()
- for lm_scale_str, best_path in best_path_dict.items():
- hyps = get_texts(best_path)
- hyps = [[word_table[i] for i in ids] for ids in hyps]
- ans[lm_scale_str] = hyps
+ if best_path_dict is not None:
+ for lm_scale_str, best_path in best_path_dict.items():
+ hyps = get_texts(best_path)
+ hyps = [[word_table[i] for i in ids] for ids in hyps]
+ ans[lm_scale_str] = hyps
+ else:
+ for lm_scale in lm_scale_list:
+ ans[lm_scale_str] = [[] * lattice.shape[0]]
return ans
diff --git a/egs/librispeech/ASR/conformer_ctc/pretrained.py b/egs/librispeech/ASR/conformer_ctc/pretrained.py
index 913088777b..c924b87bbc 100755
--- a/egs/librispeech/ASR/conformer_ctc/pretrained.py
+++ b/egs/librispeech/ASR/conformer_ctc/pretrained.py
@@ -336,7 +336,7 @@ def main():
memory_key_padding_mask=memory_key_padding_mask,
sos_id=params.sos_id,
eos_id=params.eos_id,
- scale=params.lattice_score_scale,
+ lattice_score_scale=params.lattice_score_scale,
ngram_lm_scale=params.ngram_lm_scale,
attention_scale=params.attention_decoder_scale,
)
diff --git a/egs/librispeech/ASR/conformer_ctc/subsampling.py b/egs/librispeech/ASR/conformer_ctc/subsampling.py
index 720ed6c228..542fb0364e 100644
--- a/egs/librispeech/ASR/conformer_ctc/subsampling.py
+++ b/egs/librispeech/ASR/conformer_ctc/subsampling.py
@@ -22,8 +22,8 @@
class Conv2dSubsampling(nn.Module):
"""Convolutional 2D subsampling (to 1/4 length).
- Convert an input of shape [N, T, idim] to an output
- with shape [N, T', odim], where
+ Convert an input of shape (N, T, idim) to an output
+ with shape (N, T', odim), where
T' = ((T-1)//2 - 1)//2, which approximates T' == T//4
It is based on
@@ -34,10 +34,10 @@ def __init__(self, idim: int, odim: int) -> None:
"""
Args:
idim:
- Input dim. The input shape is [N, T, idim].
+ Input dim. The input shape is (N, T, idim).
Caution: It requires: T >=7, idim >=7
odim:
- Output dim. The output shape is [N, ((T-1)//2 - 1)//2, odim]
+ Output dim. The output shape is (N, ((T-1)//2 - 1)//2, odim)
"""
assert idim >= 7
super().__init__()
@@ -58,18 +58,18 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
Args:
x:
- Its shape is [N, T, idim].
+ Its shape is (N, T, idim).
Returns:
- Return a tensor of shape [N, ((T-1)//2 - 1)//2, odim]
+ Return a tensor of shape (N, ((T-1)//2 - 1)//2, odim)
"""
- # On entry, x is [N, T, idim]
- x = x.unsqueeze(1) # [N, T, idim] -> [N, 1, T, idim] i.e., [N, C, H, W]
+ # On entry, x is (N, T, idim)
+ x = x.unsqueeze(1) # (N, T, idim) -> (N, 1, T, idim) i.e., (N, C, H, W)
x = self.conv(x)
- # Now x is of shape [N, odim, ((T-1)//2 - 1)//2, ((idim-1)//2 - 1)//2]
+ # Now x is of shape (N, odim, ((T-1)//2 - 1)//2, ((idim-1)//2 - 1)//2)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
- # Now x is of shape [N, ((T-1)//2 - 1))//2, odim]
+ # Now x is of shape (N, ((T-1)//2 - 1))//2, odim)
return x
@@ -80,8 +80,8 @@ class VggSubsampling(nn.Module):
This paper is not 100% explicit so I am guessing to some extent,
and trying to compare with other VGG implementations.
- Convert an input of shape [N, T, idim] to an output
- with shape [N, T', odim], where
+ Convert an input of shape (N, T, idim) to an output
+ with shape (N, T', odim), where
T' = ((T-1)//2 - 1)//2, which approximates T' = T//4
"""
@@ -93,10 +93,10 @@ def __init__(self, idim: int, odim: int) -> None:
Args:
idim:
- Input dim. The input shape is [N, T, idim].
+ Input dim. The input shape is (N, T, idim).
Caution: It requires: T >=7, idim >=7
odim:
- Output dim. The output shape is [N, ((T-1)//2 - 1)//2, odim]
+ Output dim. The output shape is (N, ((T-1)//2 - 1)//2, odim)
"""
super().__init__()
@@ -149,10 +149,10 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
Args:
x:
- Its shape is [N, T, idim].
+ Its shape is (N, T, idim).
Returns:
- Return a tensor of shape [N, ((T-1)//2 - 1)//2, odim]
+ Return a tensor of shape (N, ((T-1)//2 - 1)//2, odim)
"""
x = x.unsqueeze(1)
x = self.layers(x)
diff --git a/egs/librispeech/ASR/conformer_ctc/train.py b/egs/librispeech/ASR/conformer_ctc/train.py
index 298b741124..80b2d924a7 100755
--- a/egs/librispeech/ASR/conformer_ctc/train.py
+++ b/egs/librispeech/ASR/conformer_ctc/train.py
@@ -310,14 +310,14 @@ def compute_loss(
"""
device = graph_compiler.device
feature = batch["inputs"]
- # at entry, feature is [N, T, C]
+ # at entry, feature is (N, T, C)
assert feature.ndim == 3
feature = feature.to(device)
supervisions = batch["supervisions"]
with torch.set_grad_enabled(is_training):
nnet_output, encoder_memory, memory_mask = model(feature, supervisions)
- # nnet_output is [N, T, C]
+ # nnet_output is (N, T, C)
# NOTE: We need `encode_supervisions` to sort sequences with
# different duration in decreasing order, required by
diff --git a/egs/librispeech/ASR/conformer_ctc/transformer.py b/egs/librispeech/ASR/conformer_ctc/transformer.py
index 88b10b23d8..68a4ff65cb 100644
--- a/egs/librispeech/ASR/conformer_ctc/transformer.py
+++ b/egs/librispeech/ASR/conformer_ctc/transformer.py
@@ -83,8 +83,8 @@ def __init__(
if subsampling_factor != 4:
raise NotImplementedError("Support only 'subsampling_factor=4'.")
- # self.encoder_embed converts the input of shape [N, T, num_classes]
- # to the shape [N, T//subsampling_factor, d_model].
+ # self.encoder_embed converts the input of shape (N, T, num_classes)
+ # to the shape (N, T//subsampling_factor, d_model).
# That is, it does two things simultaneously:
# (1) subsampling: T -> T//subsampling_factor
# (2) embedding: num_classes -> d_model
@@ -162,7 +162,7 @@ def forward(
"""
Args:
x:
- The input tensor. Its shape is [N, T, C].
+ The input tensor. Its shape is (N, T, C).
supervision:
Supervision in lhotse format.
See https://github.com/lhotse-speech/lhotse/blob/master/lhotse/dataset/speech_recognition.py#L32 # noqa
@@ -171,17 +171,17 @@ def forward(
Returns:
Return a tuple containing 3 tensors:
- - CTC output for ctc decoding. Its shape is [N, T, C]
- - Encoder output with shape [T, N, C]. It can be used as key and
+ - CTC output for ctc decoding. Its shape is (N, T, C)
+ - Encoder output with shape (T, N, C). It can be used as key and
value for the decoder.
- Encoder output padding mask. It can be used as
- memory_key_padding_mask for the decoder. Its shape is [N, T].
+ memory_key_padding_mask for the decoder. Its shape is (N, T).
It is None if `supervision` is None.
"""
if self.use_feat_batchnorm:
- x = x.permute(0, 2, 1) # [N, T, C] -> [N, C, T]
+ x = x.permute(0, 2, 1) # (N, T, C) -> (N, C, T)
x = self.feat_batchnorm(x)
- x = x.permute(0, 2, 1) # [N, C, T] -> [N, T, C]
+ x = x.permute(0, 2, 1) # (N, C, T) -> (N, T, C)
encoder_memory, memory_key_padding_mask = self.run_encoder(
x, supervision
)
@@ -195,7 +195,7 @@ def run_encoder(
Args:
x:
- The model input. Its shape is [N, T, C].
+ The model input. Its shape is (N, T, C).
supervisions:
Supervision in lhotse format.
See https://github.com/lhotse-speech/lhotse/blob/master/lhotse/dataset/speech_recognition.py#L32 # noqa
@@ -206,8 +206,8 @@ def run_encoder(
padding mask for the decoder.
Returns:
Return a tuple with two tensors:
- - The encoder output, with shape [T, N, C]
- - encoder padding mask, with shape [N, T].
+ - The encoder output, with shape (T, N, C)
+ - encoder padding mask, with shape (N, T).
The mask is None if `supervisions` is None.
It is used as memory key padding mask in the decoder.
"""
@@ -225,11 +225,11 @@ def ctc_output(self, x: torch.Tensor) -> torch.Tensor:
Args:
x:
The output tensor from the transformer encoder.
- Its shape is [T, N, C]
+ Its shape is (T, N, C)
Returns:
Return a tensor that can be used for CTC decoding.
- Its shape is [N, T, C]
+ Its shape is (N, T, C)
"""
x = self.encoder_output_layer(x)
x = x.permute(1, 0, 2) # (T, N, C) ->(N, T, C)
@@ -247,7 +247,7 @@ def decoder_forward(
"""
Args:
memory:
- It's the output of the encoder with shape [T, N, C]
+ It's the output of the encoder with shape (T, N, C)
memory_key_padding_mask:
The padding mask from the encoder.
token_ids:
@@ -312,7 +312,7 @@ def decoder_nll(
"""
Args:
memory:
- It's the output of the encoder with shape [T, N, C]
+ It's the output of the encoder with shape (T, N, C)
memory_key_padding_mask:
The padding mask from the encoder.
token_ids:
@@ -654,13 +654,13 @@ def __init__(self, d_model: int, dropout: float = 0.1) -> None:
def extend_pe(self, x: torch.Tensor) -> None:
"""Extend the time t in the positional encoding if required.
- The shape of `self.pe` is [1, T1, d_model]. The shape of the input x
- is [N, T, d_model]. If T > T1, then we change the shape of self.pe
- to [N, T, d_model]. Otherwise, nothing is done.
+ The shape of `self.pe` is (1, T1, d_model). The shape of the input x
+ is (N, T, d_model). If T > T1, then we change the shape of self.pe
+ to (N, T, d_model). Otherwise, nothing is done.
Args:
x:
- It is a tensor of shape [N, T, C].
+ It is a tensor of shape (N, T, C).
Returns:
Return None.
"""
@@ -678,7 +678,7 @@ def extend_pe(self, x: torch.Tensor) -> None:
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
- # Now pe is of shape [1, T, d_model], where T is x.size(1)
+ # Now pe is of shape (1, T, d_model), where T is x.size(1)
self.pe = pe.to(device=x.device, dtype=x.dtype)
def forward(self, x: torch.Tensor) -> torch.Tensor:
@@ -687,10 +687,10 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
Args:
x:
- Its shape is [N, T, C]
+ Its shape is (N, T, C)
Returns:
- Return a tensor of shape [N, T, C]
+ Return a tensor of shape (N, T, C)
"""
self.extend_pe(x)
x = x * self.xscale + self.pe[:, : x.size(1), :]
diff --git a/egs/librispeech/ASR/tdnn_lstm_ctc/decode.py b/egs/librispeech/ASR/tdnn_lstm_ctc/decode.py
index 7e5ec8c0dd..1e91b1008b 100755
--- a/egs/librispeech/ASR/tdnn_lstm_ctc/decode.py
+++ b/egs/librispeech/ASR/tdnn_lstm_ctc/decode.py
@@ -190,12 +190,12 @@ def decode_one_batch(
feature = batch["inputs"]
assert feature.ndim == 3
feature = feature.to(device)
- # at entry, feature is [N, T, C]
+ # at entry, feature is (N, T, C)
- feature = feature.permute(0, 2, 1) # now feature is [N, C, T]
+ feature = feature.permute(0, 2, 1) # now feature is (N, C, T)
nnet_output = model(feature)
- # nnet_output is [N, T, C]
+ # nnet_output is (N, T, C)
supervisions = batch["supervisions"]
@@ -229,6 +229,7 @@ def decode_one_batch(
lattice=lattice,
num_paths=params.num_paths,
use_double_scores=params.use_double_scores,
+ lattice_score_scale=params.lattice_score_scale,
)
key = f"no_rescore-{params.num_paths}"
hyps = get_texts(best_path)
@@ -247,10 +248,13 @@ def decode_one_batch(
G=G,
num_paths=params.num_paths,
lm_scale_list=lm_scale_list,
+ lattice_score_scale=params.lattice_score_scale,
)
else:
best_path_dict = rescore_with_whole_lattice(
- lattice=lattice, G_with_epsilon_loops=G, lm_scale_list=lm_scale_list
+ lattice=lattice,
+ G_with_epsilon_loops=G,
+ lm_scale_list=lm_scale_list,
)
ans = dict()
diff --git a/egs/librispeech/ASR/tdnn_lstm_ctc/pretrained.py b/egs/librispeech/ASR/tdnn_lstm_ctc/pretrained.py
index 4f82a989c7..0a543d8598 100755
--- a/egs/librispeech/ASR/tdnn_lstm_ctc/pretrained.py
+++ b/egs/librispeech/ASR/tdnn_lstm_ctc/pretrained.py
@@ -218,11 +218,11 @@ def main():
features = pad_sequence(
features, batch_first=True, padding_value=math.log(1e-10)
)
- features = features.permute(0, 2, 1) # now features is [N, C, T]
+ features = features.permute(0, 2, 1) # now features is (N, C, T)
with torch.no_grad():
nnet_output = model(features)
- # nnet_output is [N, T, C]
+ # nnet_output is (N, T, C)
batch_size = nnet_output.shape[0]
supervision_segments = torch.tensor(
diff --git a/egs/librispeech/ASR/tdnn_lstm_ctc/train.py b/egs/librispeech/ASR/tdnn_lstm_ctc/train.py
index 4d45d197b1..695ee51300 100755
--- a/egs/librispeech/ASR/tdnn_lstm_ctc/train.py
+++ b/egs/librispeech/ASR/tdnn_lstm_ctc/train.py
@@ -290,14 +290,14 @@ def compute_loss(
"""
device = graph_compiler.device
feature = batch["inputs"]
- # at entry, feature is [N, T, C]
- feature = feature.permute(0, 2, 1) # now feature is [N, C, T]
+ # at entry, feature is (N, T, C)
+ feature = feature.permute(0, 2, 1) # now feature is (N, C, T)
assert feature.ndim == 3
feature = feature.to(device)
with torch.set_grad_enabled(is_training):
nnet_output = model(feature)
- # nnet_output is [N, T, C]
+ # nnet_output is (N, T, C)
# NOTE: We need `encode_supervisions` to sort sequences with
# different duration in decreasing order, required by
diff --git a/egs/yesno/ASR/tdnn/decode.py b/egs/yesno/ASR/tdnn/decode.py
index 54fdbb3cc3..325acf316f 100755
--- a/egs/yesno/ASR/tdnn/decode.py
+++ b/egs/yesno/ASR/tdnn/decode.py
@@ -111,10 +111,10 @@ def decode_one_batch(
feature = batch["inputs"]
assert feature.ndim == 3
feature = feature.to(device)
- # at entry, feature is [N, T, C]
+ # at entry, feature is (N, T, C)
nnet_output = model(feature)
- # nnet_output is [N, T, C]
+ # nnet_output is (N, T, C)
batch_size = nnet_output.shape[0]
supervision_segments = torch.tensor(
diff --git a/egs/yesno/ASR/tdnn/train.py b/egs/yesno/ASR/tdnn/train.py
index 39c5ef3efb..0f5506d380 100755
--- a/egs/yesno/ASR/tdnn/train.py
+++ b/egs/yesno/ASR/tdnn/train.py
@@ -268,13 +268,13 @@ def compute_loss(
"""
device = graph_compiler.device
feature = batch["inputs"]
- # at entry, feature is [N, T, C]
+ # at entry, feature is (N, T, C)
assert feature.ndim == 3
feature = feature.to(device)
with torch.set_grad_enabled(is_training):
nnet_output = model(feature)
- # nnet_output is [N, T, C]
+ # nnet_output is (N, T, C)
# NOTE: We need `encode_supervisions` to sort sequences with
# different duration in decreasing order, required by
diff --git a/icefall/decode.py b/icefall/decode.py
index 29b76d973a..e678e4622f 100644
--- a/icefall/decode.py
+++ b/icefall/decode.py
@@ -15,42 +15,12 @@
# limitations under the License.
import logging
-from typing import Dict, List, Optional, Tuple, Union
+from typing import Dict, List, Optional, Union
import k2
-import kaldialign
import torch
-import torch.nn as nn
-
-def _get_random_paths(
- lattice: k2.Fsa,
- num_paths: int,
- use_double_scores: bool = True,
- scale: float = 1.0,
-):
- """
- Args:
- lattice:
- The decoding lattice, returned by :func:`get_lattice`.
- num_paths:
- It specifies the size `n` in n-best. Note: Paths are selected randomly
- and those containing identical word sequences are remove dand only one
- of them is kept.
- use_double_scores:
- True to use double precision floating point in the computation.
- False to use single precision.
- scale:
- It's the scale applied to the lattice.scores. A smaller value
- yields more unique paths.
- Returns:
- Return a k2.RaggedInt with 3 axes [seq][path][arc_pos]
- """
- saved_scores = lattice.scores.clone()
- lattice.scores *= scale
- path = k2.random_paths(lattice, num_paths=num_paths, use_double_scores=True)
- lattice.scores = saved_scores
- return path
+from icefall.utils import get_texts
def _intersect_device(
@@ -65,7 +35,7 @@ def _intersect_device(
CUDA OOM error.
The arguments and return value of this function are the same as
- k2.intersect_device.
+ :func:`k2.intersect_device`.
"""
num_fsas = b_fsas.shape[0]
if num_fsas <= batch_size:
@@ -106,10 +76,9 @@ def get_lattice(
) -> k2.Fsa:
"""Get the decoding lattice from a decoding graph and neural
network output.
-
Args:
nnet_output:
- It is the output of a neural model of shape `[N, T, C]`.
+ It is the output of a neural model of shape `(N, T, C)`.
HLG:
An Fsa, the decoding graph. See also `compile_HLG.py`.
supervision_segments:
@@ -139,10 +108,12 @@ def get_lattice(
subsampling_factor:
The subsampling factor of the model.
Returns:
- A lattice containing the decoding result.
+ An FsaVec containing the decoding result. It has axes [utt][state][arc].
"""
dense_fsa_vec = k2.DenseFsaVec(
- nnet_output, supervision_segments, allow_truncate=subsampling_factor - 1
+ nnet_output,
+ supervision_segments,
+ allow_truncate=subsampling_factor - 1,
)
lattice = k2.intersect_dense_pruned(
@@ -157,8 +128,304 @@ def get_lattice(
return lattice
+class Nbest(object):
+ """
+ An Nbest object contains two fields:
+
+ (1) fsa. It is an FsaVec containing a vector of **linear** FSAs.
+ Its axes are [path][state][arc]
+ (2) shape. Its type is :class:`k2.RaggedShape`.
+ Its axes are [utt][path]
+
+ The field `shape` has two axes [utt][path]. `shape.dim0` contains
+ the number of utterances, which is also the number of rows in the
+ supervision_segments. `shape.tot_size(1)` contains the number
+ of paths, which is also the number of FSAs in `fsa`.
+
+ Caution:
+ Don't be confused by the name `Nbest`. The best in the name `Nbest`
+ has nothing to do with `best scores`. The important part is
+ `N` in `Nbest`, not `best`.
+ """
+
+ def __init__(self, fsa: k2.Fsa, shape: k2.RaggedShape) -> None:
+ """
+ Args:
+ fsa:
+ An FsaVec with axes [path][state][arc]. It is expected to contain
+ a list of **linear** FSAs.
+ shape:
+ A ragged shape with two axes [utt][path].
+ """
+ assert len(fsa.shape) == 3, f"fsa.shape: {fsa.shape}"
+ assert shape.num_axes == 2, f"num_axes: {shape.num_axes}"
+
+ if fsa.shape[0] != shape.tot_size(1):
+ raise ValueError(
+ f"{fsa.shape[0]} vs {shape.tot_size(1)}\n"
+ "Number of FSAs in `fsa` does not match the given shape"
+ )
+
+ self.fsa = fsa
+ self.shape = shape
+
+ def __str__(self):
+ s = "Nbest("
+ s += f"Number of utterances:{self.shape.dim0}, "
+ s += f"Number of Paths:{self.fsa.shape[0]})"
+ return s
+
+ @staticmethod
+ def from_lattice(
+ lattice: k2.Fsa,
+ num_paths: int,
+ use_double_scores: bool = True,
+ lattice_score_scale: float = 0.5,
+ ) -> "Nbest":
+ """Construct an Nbest object by **sampling** `num_paths` from a lattice.
+
+ Each sampled path is a linear FSA.
+
+ We assume `lattice.labels` contains token IDs and `lattice.aux_labels`
+ contains word IDs.
+
+ Args:
+ lattice:
+ An FsaVec with axes [utt][state][arc].
+ num_paths:
+ Number of paths to **sample** from the lattice
+ using :func:`k2.random_paths`.
+ use_double_scores:
+ True to use double precision in :func:`k2.random_paths`.
+ False to use single precision.
+ scale:
+ Scale `lattice.score` before passing it to :func:`k2.random_paths`.
+ A smaller value leads to more unique paths at the risk of being not
+ to sample the path with the best score.
+ Returns:
+ Return an Nbest instance.
+ """
+ saved_scores = lattice.scores.clone()
+ lattice.scores *= lattice_score_scale
+ # path is a ragged tensor with dtype torch.int32.
+ # It has three axes [utt][path][arc_pos]
+ path = k2.random_paths(
+ lattice, num_paths=num_paths, use_double_scores=use_double_scores
+ )
+ lattice.scores = saved_scores
+
+ # word_seq is a k2.RaggedTensor sharing the same shape as `path`
+ # but it contains word IDs. Note that it also contains 0s and -1s.
+ # The last entry in each sublist is -1.
+ # It axes is [utt][path][word_id]
+ if isinstance(lattice.aux_labels, torch.Tensor):
+ word_seq = k2.ragged.index(lattice.aux_labels, path)
+ else:
+ word_seq = lattice.aux_labels.index(path)
+ word_seq = word_seq.remove_axis(word_seq.num_axes - 2)
+
+ # Each utterance has `num_paths` paths but some of them transduces
+ # to the same word sequence, so we need to remove repeated word
+ # sequences within an utterance. After removing repeats, each utterance
+ # contains different number of paths
+ #
+ # `new2old` is a 1-D torch.Tensor mapping from the output path index
+ # to the input path index.
+ _, _, new2old = word_seq.unique(
+ need_num_repeats=False, need_new2old_indexes=True
+ )
+
+ # kept_path is a ragged tensor with dtype torch.int32.
+ # It has axes [utt][path][arc_pos]
+ kept_path, _ = path.index(new2old, axis=1, need_value_indexes=False)
+
+ # utt_to_path_shape has axes [utt][path]
+ utt_to_path_shape = kept_path.shape.get_layer(0)
+
+ # Remove the utterance axis.
+ # Now kept_path has only two axes [path][arc_pos]
+ kept_path = kept_path.remove_axis(0)
+
+ # labels is a ragged tensor with 2 axes [path][token_id]
+ # Note that it contains -1s.
+ labels = k2.ragged.index(lattice.labels.contiguous(), kept_path)
+
+ # Remove -1 from labels as we will use it to construct a linear FSA
+ labels = labels.remove_values_eq(-1)
+
+ if isinstance(lattice.aux_labels, k2.RaggedTensor):
+ # lattice.aux_labels is a ragged tensor with dtype torch.int32.
+ # It has 2 axes [arc][word], so aux_labels is also a ragged tensor
+ # with 2 axes [arc][word]
+ aux_labels, _ = lattice.aux_labels.index(
+ indexes=kept_path.values, axis=0, need_value_indexes=False
+ )
+ else:
+ assert isinstance(lattice.aux_labels, torch.Tensor)
+ aux_labels = k2.index_select(lattice.aux_labels, kept_path.values)
+ # aux_labels is a 1-D torch.Tensor. It also contains -1 and 0.
+
+ fsa = k2.linear_fsa(labels)
+ fsa.aux_labels = aux_labels
+ # Caution: fsa.scores are all 0s.
+ # `fsa` has only one extra attribute: aux_labels.
+ return Nbest(fsa=fsa, shape=utt_to_path_shape)
+
+ def intersect(self, lattice: k2.Fsa, use_double_scores=True) -> "Nbest":
+ """Intersect this Nbest object with a lattice, get 1-best
+ path from the resulting FsaVec, and return a new Nbest object.
+
+ The purpose of this function is to attach scores to an Nbest.
+
+ Args:
+ lattice:
+ An FsaVec with axes [utt][state][arc]. If it has `aux_labels`, then
+ we assume its `labels` are token IDs and `aux_labels` are word IDs.
+ If it has only `labels`, we assume its `labels` are word IDs.
+ use_double_scores:
+ True to use double precision when computing shortest path.
+ False to use single precision.
+ Returns:
+ Return a new Nbest. This new Nbest shares the same shape with `self`,
+ while its `fsa` is the 1-best path from intersecting `self.fsa` and
+ `lattice`. Also, its `fsa` has non-zero scores and inherits attributes
+ for `lattice`.
+ """
+ # Note: We view each linear FSA as a word sequence
+ # and we use the passed lattice to give each word sequence a score.
+ #
+ # We are not viewing each linear FSAs as a token sequence.
+ #
+ # So we use k2.invert() here.
+
+ # We use a word fsa to intersect with k2.invert(lattice)
+ word_fsa = k2.invert(self.fsa)
+
+ if hasattr(lattice, "aux_labels"):
+ # delete token IDs as it is not needed
+ del word_fsa.aux_labels
+
+ word_fsa.scores.zero_()
+ word_fsa_with_epsilon_loops = k2.remove_epsilon_and_add_self_loops(
+ word_fsa
+ )
+
+ path_to_utt_map = self.shape.row_ids(1)
+
+ if hasattr(lattice, "aux_labels"):
+ # lattice has token IDs as labels and word IDs as aux_labels.
+ # inv_lattice has word IDs as labels and token IDs as aux_labels
+ inv_lattice = k2.invert(lattice)
+ inv_lattice = k2.arc_sort(inv_lattice)
+ else:
+ inv_lattice = k2.arc_sort(lattice)
+
+ if inv_lattice.shape[0] == 1:
+ path_lattice = _intersect_device(
+ inv_lattice,
+ word_fsa_with_epsilon_loops,
+ b_to_a_map=torch.zeros_like(path_to_utt_map),
+ sorted_match_a=True,
+ )
+ else:
+ path_lattice = _intersect_device(
+ inv_lattice,
+ word_fsa_with_epsilon_loops,
+ b_to_a_map=path_to_utt_map,
+ sorted_match_a=True,
+ )
+
+ # path_lattice has word IDs as labels and token IDs as aux_labels
+ path_lattice = k2.top_sort(k2.connect(path_lattice))
+
+ one_best = k2.shortest_path(
+ path_lattice, use_double_scores=use_double_scores
+ )
+
+ one_best = k2.invert(one_best)
+ # Now one_best has token IDs as labels and word IDs as aux_labels
+
+ return Nbest(fsa=one_best, shape=self.shape)
+
+ def compute_am_scores(self) -> k2.RaggedTensor:
+ """Compute AM scores of each linear FSA (i.e., each path within
+ an utterance).
+
+ Hint:
+ `self.fsa.scores` contains two parts: acoustic scores (AM scores)
+ and n-gram language model scores (LM scores).
+
+ Caution:
+ We require that ``self.fsa`` has an attribute ``lm_scores``.
+
+ Returns:
+ Return a ragged tensor with 2 axes [utt][path_scores].
+ Its dtype is torch.float64.
+ """
+ saved_scores = self.fsa.scores
+
+ # The `scores` of every arc consists of `am_scores` and `lm_scores`
+ self.fsa.scores = self.fsa.scores - self.fsa.lm_scores
+
+ am_scores = self.fsa.get_tot_scores(
+ use_double_scores=True, log_semiring=False
+ )
+ self.fsa.scores = saved_scores
+
+ return k2.RaggedTensor(self.shape, am_scores)
+
+ def compute_lm_scores(self) -> k2.RaggedTensor:
+ """Compute LM scores of each linear FSA (i.e., each path within
+ an utterance).
+
+ Hint:
+ `self.fsa.scores` contains two parts: acoustic scores (AM scores)
+ and n-gram language model scores (LM scores).
+
+ Caution:
+ We require that ``self.fsa`` has an attribute ``lm_scores``.
+
+ Returns:
+ Return a ragged tensor with 2 axes [utt][path_scores].
+ Its dtype is torch.float64.
+ """
+ saved_scores = self.fsa.scores
+
+ # The `scores` of every arc consists of `am_scores` and `lm_scores`
+ self.fsa.scores = self.fsa.lm_scores
+
+ lm_scores = self.fsa.get_tot_scores(
+ use_double_scores=True, log_semiring=False
+ )
+ self.fsa.scores = saved_scores
+
+ return k2.RaggedTensor(self.shape, lm_scores)
+
+ def tot_scores(self) -> k2.RaggedTensor:
+ """Get total scores of FSAs in this Nbest.
+
+ Note:
+ Since FSAs in Nbest are just linear FSAs, log-semiring
+ and tropical semiring produce the same total scores.
+
+ Returns:
+ Return a ragged tensor with two axes [utt][path_scores].
+ Its dtype is torch.float64.
+ """
+ scores = self.fsa.get_tot_scores(
+ use_double_scores=True, log_semiring=False
+ )
+ return k2.RaggedTensor(self.shape, scores)
+
+ def build_levenshtein_graphs(self) -> k2.Fsa:
+ """Return an FsaVec with axes [utt][state][arc]."""
+ word_ids = get_texts(self.fsa, return_ragged=True)
+ return k2.levenshtein_graph(word_ids)
+
+
def one_best_decoding(
- lattice: k2.Fsa, use_double_scores: bool = True
+ lattice: k2.Fsa,
+ use_double_scores: bool = True,
) -> k2.Fsa:
"""Get the best path from a lattice.
@@ -179,199 +446,143 @@ def nbest_decoding(
lattice: k2.Fsa,
num_paths: int,
use_double_scores: bool = True,
- scale: float = 1.0,
+ lattice_score_scale: float = 1.0,
) -> k2.Fsa:
"""It implements something like CTC prefix beam search using n-best lists.
- The basic idea is to first extra n-best paths from the given lattice,
- build a word seqs from these paths, and compute the total scores
- of these sequences in the log-semiring. The one with the max score
+ The basic idea is to first extract `num_paths` paths from the given lattice,
+ build a word sequence from these paths, and compute the total scores
+ of the word sequence in the tropical semiring. The one with the max score
is used as the decoding output.
Caution:
Don't be confused by `best` in the name `n-best`. Paths are selected
- randomly, not by ranking their scores.
+ **randomly**, not by ranking their scores.
+
+ Hint:
+ This decoding method is for demonstration only and it does
+ not produce a lower WER than :func:`one_best_decoding`.
Args:
lattice:
- The decoding lattice, returned by :func:`get_lattice`.
+ The decoding lattice, e.g., can be the return value of
+ :func:`get_lattice`. It has 3 axes [utt][state][arc].
num_paths:
It specifies the size `n` in n-best. Note: Paths are selected randomly
- and those containing identical word sequences are remove dand only one
+ and those containing identical word sequences are removed and only one
of them is kept.
use_double_scores:
True to use double precision floating point in the computation.
False to use single precision.
- scale:
- It's the scale applied to the lattice.scores. A smaller value
- yields more unique paths.
+ lattice_score_scale:
+ It's the scale applied to the `lattice.scores`. A smaller value
+ leads to more unique paths at the risk of missing the correct path.
Returns:
- An FsaVec containing linear FSAs.
+ An FsaVec containing **linear** FSAs. It axes are [utt][state][arc].
"""
- path = _get_random_paths(
+ nbest = Nbest.from_lattice(
lattice=lattice,
num_paths=num_paths,
use_double_scores=use_double_scores,
- scale=scale,
- )
-
- # word_seq is a k2.RaggedTensor sharing the same shape as `path`
- # but it contains word IDs. Note that it also contains 0s and -1s.
- # The last entry in each sublist is -1.
- if isinstance(lattice.aux_labels, torch.Tensor):
- word_seq = k2.ragged.index(lattice.aux_labels, path)
- else:
- word_seq = lattice.aux_labels.index(path)
- word_seq = word_seq.remove_axis(word_seq.num_axes - 2)
-
- # Remove 0 (epsilon) and -1 from word_seq
- word_seq = word_seq.remove_values_leq(0)
-
- # Remove sequences with identical word sequences.
- #
- # k2.ragged.unique_sequences will reorder paths within a seq.
- # `new2old` is a 1-D torch.Tensor mapping from the output path index
- # to the input path index.
- # new2old.numel() == unique_word_seqs.tot_size(1)
- unique_word_seq, _, new2old = word_seq.unique(
- need_num_repeats=False, need_new2old_indexes=True
+ lattice_score_scale=lattice_score_scale,
)
- # Note: unique_word_seq still has the same axes as word_seq
-
- seq_to_path_shape = unique_word_seq.shape.get_layer(0)
-
- # path_to_seq_map is a 1-D torch.Tensor.
- # path_to_seq_map[i] is the seq to which the i-th path belongs
- path_to_seq_map = seq_to_path_shape.row_ids(1)
+ # nbest.fsa.scores contains 0s
- # Remove the seq axis.
- # Now unique_word_seq has only two axes [path][word]
- unique_word_seq = unique_word_seq.remove_axis(0)
+ nbest = nbest.intersect(lattice)
+ # now nbest.fsa.scores gets assigned
- # word_fsa is an FsaVec with axes [path][state][arc]
- word_fsa = k2.linear_fsa(unique_word_seq)
+ # max_indexes contains the indexes for the path with the maximum score
+ # within an utterance.
+ max_indexes = nbest.tot_scores().argmax()
- # add epsilon self loops since we will use
- # k2.intersect_device, which treats epsilon as a normal symbol
- word_fsa_with_epsilon_loops = k2.add_epsilon_self_loops(word_fsa)
-
- # lattice has token IDs as labels and word IDs as aux_labels.
- # inv_lattice has word IDs as labels and token IDs as aux_labels
- inv_lattice = k2.invert(lattice)
- inv_lattice = k2.arc_sort(inv_lattice)
-
- path_lattice = _intersect_device(
- inv_lattice,
- word_fsa_with_epsilon_loops,
- b_to_a_map=path_to_seq_map,
- sorted_match_a=True,
- )
- # path_lat has word IDs as labels and token IDs as aux_labels
-
- path_lattice = k2.top_sort(k2.connect(path_lattice))
-
- tot_scores = path_lattice.get_tot_scores(
- use_double_scores=use_double_scores, log_semiring=False
- )
-
- ragged_tot_scores = k2.RaggedTensor(seq_to_path_shape, tot_scores)
-
- argmax_indexes = ragged_tot_scores.argmax()
-
- # Since we invoked `k2.ragged.unique_sequences`, which reorders
- # the index from `path`, we use `new2old` here to convert argmax_indexes
- # to the indexes into `path`.
- #
- # Use k2.index here since argmax_indexes' dtype is torch.int32
- best_path_indexes = k2.index_select(new2old, argmax_indexes)
-
- path_2axes = path.remove_axis(0)
-
- # best_path is a k2.RaggedTensor with 2 axes [path][arc_pos]
- best_path, _ = path_2axes.index(
- indexes=best_path_indexes, axis=0, need_value_indexes=False
- )
-
- # labels is a k2.RaggedTensor with 2 axes [path][token_id]
- # Note that it contains -1s.
- labels = k2.ragged.index(lattice.labels.contiguous(), best_path)
+ best_path = k2.index_fsa(nbest.fsa, max_indexes)
+ return best_path
- labels = labels.remove_values_eq(-1)
- # lattice.aux_labels is a k2.RaggedTensor with 2 axes, so
- # aux_labels is also a k2.RaggedTensor with 2 axes
- aux_labels, _ = lattice.aux_labels.index(
- indexes=best_path.values, axis=0, need_value_indexes=False
- )
+def nbest_oracle(
+ lattice: k2.Fsa,
+ num_paths: int,
+ ref_texts: List[str],
+ word_table: k2.SymbolTable,
+ use_double_scores: bool = True,
+ lattice_score_scale: float = 0.5,
+ oov: str = "",
+) -> Dict[str, List[List[int]]]:
+ """Select the best hypothesis given a lattice and a reference transcript.
- best_path_fsa = k2.linear_fsa(labels)
- best_path_fsa.aux_labels = aux_labels
- return best_path_fsa
+ The basic idea is to extract `num_paths` paths from the given lattice,
+ unique them, and select the one that has the minimum edit distance with
+ the corresponding reference transcript as the decoding output.
+ The decoding result returned from this function is the best result that
+ we can obtain using n-best decoding with all kinds of rescoring techniques.
-def compute_am_and_lm_scores(
- lattice: k2.Fsa,
- word_fsa_with_epsilon_loops: k2.Fsa,
- path_to_seq_map: torch.Tensor,
-) -> Tuple[torch.Tensor, torch.Tensor]:
- """Compute AM scores of n-best lists (represented as word_fsas).
+ This function is useful to tune the value of `lattice_score_scale`.
Args:
lattice:
- An FsaVec, e.g., the return value of :func:`get_lattice`
- It must have the attribute `lm_scores`.
- word_fsa_with_epsilon_loops:
- An FsaVec representing an n-best list. Note that it has been processed
- by `k2.add_epsilon_self_loops`.
- path_to_seq_map:
- A 1-D torch.Tensor with dtype torch.int32. path_to_seq_map[i] indicates
- which sequence the i-th Fsa in word_fsa_with_epsilon_loops belongs to.
- path_to_seq_map.numel() == word_fsas_with_epsilon_loops.arcs.dim0().
- Returns:
- Return a tuple containing two 1-D torch.Tensors: (am_scores, lm_scores).
- Each tensor's `numel()' equals to `word_fsas_with_epsilon_loops.shape[0]`
+ An FsaVec with axes [utt][state][arc].
+ Note: We assume its `aux_labels` contains word IDs.
+ num_paths:
+ The size of `n` in n-best.
+ ref_texts:
+ A list of reference transcript. Each entry contains space(s)
+ separated words
+ word_table:
+ It is the word symbol table.
+ use_double_scores:
+ True to use double precision for computation. False to use
+ single precision.
+ lattice_score_scale:
+ It's the scale applied to the lattice.scores. A smaller value
+ yields more unique paths.
+ oov:
+ The out of vocabulary word.
+ Return:
+ Return a dict. Its key contains the information about the parameters
+ when calling this function, while its value contains the decoding output.
+ `len(ans_dict) == len(ref_texts)`
"""
- assert len(lattice.shape) == 3
- assert hasattr(lattice, "lm_scores")
-
- # k2.compose() currently does not support b_to_a_map. To void
- # replicating `lats`, we use k2.intersect_device here.
- #
- # lattice has token IDs as `labels` and word IDs as aux_labels, so we
- # need to invert it here.
- inv_lattice = k2.invert(lattice)
-
- # Now the `labels` of inv_lattice are word IDs (a 1-D torch.Tensor)
- # and its `aux_labels` are token IDs ( a k2.RaggedInt with 2 axes)
-
- # Remove its `aux_labels` since it is not needed in the
- # following computation
- del inv_lattice.aux_labels
- inv_lattice = k2.arc_sort(inv_lattice)
+ device = lattice.device
- path_lattice = _intersect_device(
- inv_lattice,
- word_fsa_with_epsilon_loops,
- b_to_a_map=path_to_seq_map,
- sorted_match_a=True,
+ nbest = Nbest.from_lattice(
+ lattice=lattice,
+ num_paths=num_paths,
+ use_double_scores=use_double_scores,
+ lattice_score_scale=lattice_score_scale,
)
- path_lattice = k2.top_sort(k2.connect(path_lattice))
+ hyps = nbest.build_levenshtein_graphs()
+
+ oov_id = word_table[oov]
+ word_ids_list = []
+ for text in ref_texts:
+ word_ids = []
+ for word in text.split():
+ if word in word_table:
+ word_ids.append(word_table[word])
+ else:
+ word_ids.append(oov_id)
+ word_ids_list.append(word_ids)
- # The `scores` of every arc consists of `am_scores` and `lm_scores`
- path_lattice.scores = path_lattice.scores - path_lattice.lm_scores
+ refs = k2.levenshtein_graph(word_ids_list, device=device)
- am_scores = path_lattice.get_tot_scores(
- use_double_scores=True, log_semiring=False
+ levenshtein_alignment = k2.levenshtein_alignment(
+ refs=refs,
+ hyps=hyps,
+ hyp_to_ref_map=nbest.shape.row_ids(1),
+ sorted_match_ref=True,
)
- path_lattice.scores = path_lattice.lm_scores
-
- lm_scores = path_lattice.get_tot_scores(
- use_double_scores=True, log_semiring=False
+ tot_scores = levenshtein_alignment.get_tot_scores(
+ use_double_scores=False, log_semiring=False
)
+ ragged_tot_scores = k2.RaggedTensor(nbest.shape, tot_scores)
- return am_scores.to(torch.float32), lm_scores.to(torch.float32)
+ max_indexes = ragged_tot_scores.argmax()
+
+ best_path = k2.index_fsa(nbest.fsa, max_indexes)
+ return best_path
def rescore_with_n_best_list(
@@ -379,34 +590,32 @@ def rescore_with_n_best_list(
G: k2.Fsa,
num_paths: int,
lm_scale_list: List[float],
- scale: float = 1.0,
+ lattice_score_scale: float = 1.0,
+ use_double_scores: bool = True,
) -> Dict[str, k2.Fsa]:
- """Decode using n-best list with LM rescoring.
-
- `lattice` is a decoding lattice with 3 axes. This function first
- extracts `num_paths` paths from `lattice` for each sequence using
- `k2.random_paths`. The `am_scores` of these paths are computed.
- For each path, its `lm_scores` is computed using `G` (which is an LM).
- The final `tot_scores` is the sum of `am_scores` and `lm_scores`.
- The path with the largest `tot_scores` within a sequence is used
- as the decoding output.
+ """Rescore an n-best list with an n-gram LM.
+ The path with the maximum score is used as the decoding output.
Args:
lattice:
- An FsaVec. It can be the return value of :func:`get_lattice`.
+ An FsaVec with axes [utt][state][arc]. It must have the following
+ attributes: ``aux_labels`` and ``lm_scores``. Its labels are
+ token IDs and ``aux_labels`` word IDs.
G:
- An FsaVec representing the language model (LM). Note that it
- is an FsaVec, but it contains only one Fsa.
+ An FsaVec containing only a single FSA. It is an n-gram LM.
num_paths:
- It is the size `n` in `n-best` list.
+ Size of nbest list.
lm_scale_list:
- A list containing lm_scale values.
- scale:
- It's the scale applied to the lattice.scores. A smaller value
- yields more unique paths.
+ A list of float representing LM score scales.
+ lattice_score_scale:
+ Scale to be applied to ``lattice.score`` when sampling paths
+ using ``k2.random_paths``.
+ use_double_scores:
+ True to use double precision during computation. False to use
+ single precision.
Returns:
A dict of FsaVec, whose key is an lm_scale and the value is the
- best decoding path for each sequence in the lattice.
+ best decoding path for each utterance in the lattice.
"""
device = lattice.device
@@ -418,119 +627,32 @@ def rescore_with_n_best_list(
assert G.device == device
assert hasattr(G, "aux_labels") is False
- path = _get_random_paths(
+ nbest = Nbest.from_lattice(
lattice=lattice,
num_paths=num_paths,
- use_double_scores=True,
- scale=scale,
- )
-
- # word_seq is a k2.RaggedTensor sharing the same shape as `path`
- # but it contains word IDs. Note that it also contains 0s and -1s.
- # The last entry in each sublist is -1.
- if isinstance(lattice.aux_labels, torch.Tensor):
- word_seq = k2.ragged.index(lattice.aux_labels, path)
- else:
- word_seq = lattice.aux_labels.index(path)
- word_seq = word_seq.remove_axis(word_seq.num_axes - 2)
-
- # Remove epsilons and -1 from word_seq
- word_seq = word_seq.remove_values_leq(0)
-
- # Remove paths that has identical word sequences.
- #
- # unique_word_seq is still a k2.RaggedTensor with 3 axes [seq][path][word]
- # except that there are no repeated paths with the same word_seq
- # within a sequence.
- #
- # num_repeats is also a k2.RaggedTensor with 2 axes containing the
- # multiplicities of each path.
- # num_repeats.numel() == unique_word_seqs.tot_size(1)
- #
- # Since k2.ragged.unique_sequences will reorder paths within a seq,
- # `new2old` is a 1-D torch.Tensor mapping from the output path index
- # to the input path index.
- # new2old.numel() == unique_word_seqs.tot_size(1)
- unique_word_seq, num_repeats, new2old = word_seq.unique(
- need_num_repeats=True, need_new2old_indexes=True
+ use_double_scores=use_double_scores,
+ lattice_score_scale=lattice_score_scale,
)
+ # nbest.fsa.scores are all 0s at this point
- seq_to_path_shape = unique_word_seq.shape.get_layer(0)
-
- # path_to_seq_map is a 1-D torch.Tensor.
- # path_to_seq_map[i] is the seq to which the i-th path
- # belongs.
- path_to_seq_map = seq_to_path_shape.row_ids(1)
+ nbest = nbest.intersect(lattice)
+ # Now nbest.fsa has its scores set
+ assert hasattr(nbest.fsa, "lm_scores")
- # Remove the seq axis.
- # Now unique_word_seq has only two axes [path][word]
- unique_word_seq = unique_word_seq.remove_axis(0)
+ am_scores = nbest.compute_am_scores()
- # word_fsa is an FsaVec with axes [path][state][arc]
- word_fsa = k2.linear_fsa(unique_word_seq)
-
- word_fsa_with_epsilon_loops = k2.add_epsilon_self_loops(word_fsa)
-
- am_scores, _ = compute_am_and_lm_scores(
- lattice, word_fsa_with_epsilon_loops, path_to_seq_map
- )
-
- # Now compute lm_scores
- b_to_a_map = torch.zeros_like(path_to_seq_map)
- lm_path_lattice = _intersect_device(
- G,
- word_fsa_with_epsilon_loops,
- b_to_a_map=b_to_a_map,
- sorted_match_a=True,
- )
- lm_path_lattice = k2.top_sort(k2.connect(lm_path_lattice))
- lm_scores = lm_path_lattice.get_tot_scores(
- use_double_scores=True, log_semiring=False
- )
-
- path_2axes = path.remove_axis(0)
+ nbest = nbest.intersect(G)
+ # Now nbest contains only lm scores
+ lm_scores = nbest.tot_scores()
ans = dict()
for lm_scale in lm_scale_list:
- tot_scores = am_scores / lm_scale + lm_scores
-
- # Remember that we used `k2.RaggedTensor.unique` to remove repeated
- # paths to avoid redundant computation in `k2.intersect_device`.
- # Now we use `num_repeats` to correct the scores for each path.
- #
- # NOTE(fangjun): It is commented out as it leads to a worse WER
- # tot_scores = tot_scores * num_repeats.values()
-
- ragged_tot_scores = k2.RaggedTensor(seq_to_path_shape, tot_scores)
- argmax_indexes = ragged_tot_scores.argmax()
-
- # Use k2.index here since argmax_indexes' dtype is torch.int32
- best_path_indexes = k2.index_select(new2old, argmax_indexes)
-
- # best_path is a k2.RaggedInt with 2 axes [path][arc_pos]
- best_path, _ = path_2axes.index(
- indexes=best_path_indexes, axis=0, need_value_indexes=False
- )
-
- # labels is a k2.RaggedTensor with 2 axes [path][phone_id]
- # Note that it contains -1s.
- labels = k2.ragged.index(lattice.labels.contiguous(), best_path)
-
- labels = labels.remove_values_eq(-1)
-
- # lattice.aux_labels is a k2.RaggedTensor tensor with 2 axes, so
- # aux_labels is also a k2.RaggedTensor with 2 axes
-
- aux_labels, _ = lattice.aux_labels.index(
- indexes=best_path.values, axis=0, need_value_indexes=False
- )
-
- best_path_fsa = k2.linear_fsa(labels)
- best_path_fsa.aux_labels = aux_labels
-
+ tot_scores = am_scores.values / lm_scale + lm_scores.values
+ tot_scores = k2.RaggedTensor(nbest.shape, tot_scores)
+ max_indexes = tot_scores.argmax()
+ best_path = k2.index_fsa(nbest.fsa, max_indexes)
key = f"lm_scale_{lm_scale}"
- ans[key] = best_path_fsa
-
+ ans[key] = best_path
return ans
@@ -538,25 +660,40 @@ def rescore_with_whole_lattice(
lattice: k2.Fsa,
G_with_epsilon_loops: k2.Fsa,
lm_scale_list: Optional[List[float]] = None,
+ use_double_scores: bool = True,
) -> Union[k2.Fsa, Dict[str, k2.Fsa]]:
- """Use whole lattice to rescore.
+ """Intersect the lattice with an n-gram LM and use shortest path
+ to decode.
+
+ The input lattice is obtained by intersecting `HLG` with
+ a DenseFsaVec, where the `G` in `HLG` is in general a 3-gram LM.
+ The input `G_with_epsilon_loops` is usually a 4-gram LM. You can consider
+ this function as a second pass decoding. In the first pass decoding, we
+ use a small G, while we use a larger G in the second pass decoding.
Args:
lattice:
- An FsaVec It can be the return value of :func:`get_lattice`.
+ An FsaVec with axes [utt][state][arc]. Its `aux_lables` are word IDs.
+ It must have an attribute `lm_scores`.
G_with_epsilon_loops:
- An FsaVec representing the language model (LM). Note that it
- is an FsaVec, but it contains only one Fsa.
+ An FsaVec containing only a single FSA. It contains epsilon self-loops.
+ It is an acceptor and its labels are word IDs.
lm_scale_list:
- A list containing lm_scale values or None.
+ Optional. If none, return the intersection of `lattice` and
+ `G_with_epsilon_loops`.
+ If not None, it contains a list of values to scale LM scores.
+ For each scale, there is a corresponding decoding result contained in
+ the resulting dict.
+ use_double_scores:
+ True to use double precision in the computation.
+ False to use single precision.
Returns:
- If lm_scale_list is not None, return a dict of FsaVec, whose key
- is a lm_scale and the value represents the best decoding path for
- each sequence in the lattice.
- If lm_scale_list is not None, return a lattice that is rescored
- with the given LM.
+ If `lm_scale_list` is None, return a new lattice which is the intersection
+ result of `lattice` and `G_with_epsilon_loops`.
+ Otherwise, return a dict whose key is an entry in `lm_scale_list` and the
+ value is the decoding result (i.e., an FsaVec containing linear FSAs).
"""
- assert len(lattice.shape) == 3
+ # Nbest is not used in this function
assert hasattr(lattice, "lm_scores")
assert G_with_epsilon_loops.shape == (1, None, None)
@@ -564,19 +701,22 @@ def rescore_with_whole_lattice(
lattice.scores = lattice.scores - lattice.lm_scores
# We will use lm_scores from G, so remove lats.lm_scores here
del lattice.lm_scores
- assert hasattr(lattice, "lm_scores") is False
assert hasattr(G_with_epsilon_loops, "lm_scores")
# Now, lattice.scores contains only am_scores
# inv_lattice has word IDs as labels.
- # Its aux_labels are token IDs, which is a ragged tensor k2.RaggedInt
+ # Its `aux_labels` is token IDs
inv_lattice = k2.invert(lattice)
num_seqs = lattice.shape[0]
b_to_a_map = torch.zeros(num_seqs, device=device, dtype=torch.int32)
- while True:
+
+ max_loop_count = 10
+ loop_count = 0
+ while loop_count <= max_loop_count:
+ loop_count += 1
try:
rescoring_lattice = k2.intersect_device(
G_with_epsilon_loops,
@@ -592,12 +732,15 @@ def rescore_with_whole_lattice(
f"num_arcs before pruning: {inv_lattice.arcs.num_elements()}"
)
- # NOTE(fangjun): The choice of the threshold 1e-7 is arbitrary here
- # to avoid OOM. We may need to fine tune it.
- inv_lattice = k2.prune_on_arc_post(inv_lattice, 1e-7, True)
+ # NOTE(fangjun): The choice of the threshold 1e-9 is arbitrary here
+ # to avoid OOM. You may need to fine tune it.
+ inv_lattice = k2.prune_on_arc_post(inv_lattice, 1e-9, True)
logging.info(
f"num_arcs after pruning: {inv_lattice.arcs.num_elements()}"
)
+ if loop_count > max_loop_count:
+ logging.info("Return None as the resulting lattice is too large")
+ return None
# lat has token IDs as labels
# and word IDs as aux_labels.
@@ -607,117 +750,37 @@ def rescore_with_whole_lattice(
return lat
ans = dict()
- #
- # The following implements
- # scores = (scores - lm_scores)/lm_scale + lm_scores
- # = scores/lm_scale + lm_scores*(1 - 1/lm_scale)
- #
saved_am_scores = lat.scores - lat.lm_scores
for lm_scale in lm_scale_list:
am_scores = saved_am_scores / lm_scale
lat.scores = am_scores + lat.lm_scores
- best_path = k2.shortest_path(lat, use_double_scores=True)
+ best_path = k2.shortest_path(lat, use_double_scores=use_double_scores)
key = f"lm_scale_{lm_scale}"
ans[key] = best_path
return ans
-def nbest_oracle(
- lattice: k2.Fsa,
- num_paths: int,
- ref_texts: List[str],
- word_table: k2.SymbolTable,
- scale: float = 1.0,
-) -> Dict[str, List[List[int]]]:
- """Select the best hypothesis given a lattice and a reference transcript.
-
- The basic idea is to extract n paths from the given lattice, unique them,
- and select the one that has the minimum edit distance with the corresponding
- reference transcript as the decoding output.
-
- The decoding result returned from this function is the best result that
- we can obtain using n-best decoding with all kinds of rescoring techniques.
-
- Args:
- lattice:
- An FsaVec. It can be the return value of :func:`get_lattice`.
- Note: We assume its aux_labels contain word IDs.
- num_paths:
- The size of `n` in n-best.
- ref_texts:
- A list of reference transcript. Each entry contains space(s)
- separated words
- word_table:
- It is the word symbol table.
- scale:
- It's the scale applied to the lattice.scores. A smaller value
- yields more unique paths.
- Return:
- Return a dict. Its key contains the information about the parameters
- when calling this function, while its value contains the decoding output.
- `len(ans_dict) == len(ref_texts)`
- """
- path = _get_random_paths(
- lattice=lattice,
- num_paths=num_paths,
- use_double_scores=True,
- scale=scale,
- )
-
- if isinstance(lattice.aux_labels, torch.Tensor):
- word_seq = k2.ragged.index(lattice.aux_labels, path)
- else:
- word_seq = lattice.aux_labels.index(path)
- word_seq = word_seq.remove_axis(word_seq.num_axes - 2)
-
- word_seq = word_seq.remove_values_leq(0)
- unique_word_seq, _, _ = word_seq.unique(
- need_num_repeats=False, need_new2old_indexes=False
- )
- unique_word_ids = unique_word_seq.tolist()
- assert len(unique_word_ids) == len(ref_texts)
- # unique_word_ids[i] contains all hypotheses of the i-th utterance
-
- results = []
- for hyps, ref in zip(unique_word_ids, ref_texts):
- # Note hyps is a list-of-list ints
- # Each sublist contains a hypothesis
- ref_words = ref.strip().split()
- # CAUTION: We don't convert ref_words to ref_words_ids
- # since there may exist OOV words in ref_words
- best_hyp_words = None
- min_error = float("inf")
- for hyp_words in hyps:
- hyp_words = [word_table[i] for i in hyp_words]
- this_error = kaldialign.edit_distance(ref_words, hyp_words)["total"]
- if this_error < min_error:
- min_error = this_error
- best_hyp_words = hyp_words
- results.append(best_hyp_words)
-
- return {f"nbest_{num_paths}_scale_{scale}_oracle": results}
-
-
def rescore_with_attention_decoder(
lattice: k2.Fsa,
num_paths: int,
- model: nn.Module,
+ model: torch.nn.Module,
memory: torch.Tensor,
memory_key_padding_mask: Optional[torch.Tensor],
sos_id: int,
eos_id: int,
- scale: float = 1.0,
+ lattice_score_scale: float = 1.0,
ngram_lm_scale: Optional[float] = None,
attention_scale: Optional[float] = None,
+ use_double_scores: bool = True,
) -> Dict[str, k2.Fsa]:
- """This function extracts n paths from the given lattice and uses
- an attention decoder to rescore them. The path with the highest
- score is used as the decoding output.
+ """This function extracts `num_paths` paths from the given lattice and uses
+ an attention decoder to rescore them. The path with the highest score is
+ the decoding output.
Args:
lattice:
- An FsaVec. It can be the return value of :func:`get_lattice`.
+ An FsaVec with axes [utt][state][arc].
num_paths:
Number of paths to extract from the given lattice for rescoring.
model:
@@ -726,16 +789,16 @@ def rescore_with_attention_decoder(
memory:
The encoder memory of the given model. It is the output of
the last torch.nn.TransformerEncoder layer in the given model.
- Its shape is `[T, N, C]`.
+ Its shape is `(T, N, C)`.
memory_key_padding_mask:
- The padding mask for memory with shape [N, T].
+ The padding mask for memory with shape `(N, T)`.
sos_id:
The token ID for SOS.
eos_id:
The token ID for EOS.
- scale:
- It's the scale applied to the lattice.scores. A smaller value
- yields more unique paths.
+ lattice_score_scale:
+ It's the scale applied to `lattice.scores`. A smaller value
+ leads to more unique paths at the risk of missing the correct path.
ngram_lm_scale:
Optional. It specifies the scale for n-gram LM scores.
attention_scale:
@@ -743,105 +806,47 @@ def rescore_with_attention_decoder(
Returns:
A dict of FsaVec, whose key contains a string
ngram_lm_scale_attention_scale and the value is the
- best decoding path for each sequence in the lattice.
+ best decoding path for each utterance in the lattice.
"""
- # First, extract `num_paths` paths for each sequence.
- # path is a k2.RaggedInt with axes [seq][path][arc_pos]
- path = _get_random_paths(
+ nbest = Nbest.from_lattice(
lattice=lattice,
num_paths=num_paths,
- use_double_scores=True,
- scale=scale,
- )
-
- # word_seq is a k2.RaggedTensor sharing the same shape as `path`
- # but it contains word IDs. Note that it also contains 0s and -1s.
- # The last entry in each sublist is -1.
- if isinstance(lattice.aux_labels, torch.Tensor):
- word_seq = k2.ragged.index(lattice.aux_labels, path)
- else:
- word_seq = lattice.aux_labels.index(path)
- word_seq = word_seq.remove_axis(word_seq.num_axes - 2)
-
- # Remove epsilons and -1 from word_seq
- word_seq = word_seq.remove_values_leq(0)
-
- # Remove paths that has identical word sequences.
- #
- # unique_word_seq is still a k2.RaggedTensor with 3 axes [seq][path][word]
- # except that there are no repeated paths with the same word_seq
- # within a sequence.
- #
- # num_repeats is also a k2.RaggedTensor with 2 axes containing the
- # multiplicities of each path.
- # num_repeats.numel() == unique_word_seqs.tot_size(1)
- #
- # Since k2.ragged.unique_sequences will reorder paths within a seq,
- # `new2old` is a 1-D torch.Tensor mapping from the output path index
- # to the input path index.
- # new2old.numel() == unique_word_seq.tot_size(1)
- unique_word_seq, num_repeats, new2old = word_seq.unique(
- need_num_repeats=True, need_new2old_indexes=True
- )
-
- seq_to_path_shape = unique_word_seq.shape.get_layer(0)
-
- # path_to_seq_map is a 1-D torch.Tensor.
- # path_to_seq_map[i] is the seq to which the i-th path
- # belongs.
- path_to_seq_map = seq_to_path_shape.row_ids(1)
-
- # Remove the seq axis.
- # Now unique_word_seq has only two axes [path][word]
- unique_word_seq = unique_word_seq.remove_axis(0)
-
- # word_fsa is an FsaVec with axes [path][state][arc]
- word_fsa = k2.linear_fsa(unique_word_seq)
-
- word_fsa_with_epsilon_loops = k2.add_epsilon_self_loops(word_fsa)
-
- am_scores, ngram_lm_scores = compute_am_and_lm_scores(
- lattice, word_fsa_with_epsilon_loops, path_to_seq_map
+ use_double_scores=use_double_scores,
+ lattice_score_scale=lattice_score_scale,
)
- # Now we use the attention decoder to compute another
- # score: attention_scores.
- #
- # To do that, we have to get the input and output for the attention
- # decoder.
-
- # CAUTION: The "tokens" attribute is set in the file
- # local/compile_hlg.py
- if isinstance(lattice.tokens, torch.Tensor):
- token_seq = k2.ragged.index(lattice.tokens, path)
- else:
- token_seq = lattice.tokens.index(path)
- token_seq = token_seq.remove_axis(token_seq.num_axes - 2)
-
- # Remove epsilons and -1 from token_seq
- token_seq = token_seq.remove_values_leq(0)
-
- # Remove the seq axis.
- token_seq = token_seq.remove_axis(0)
+ # nbest.fsa.scores are all 0s at this point
- token_seq, _ = token_seq.index(
- indexes=new2old, axis=0, need_value_indexes=False
- )
+ nbest = nbest.intersect(lattice)
+ # Now nbest.fsa has its scores set.
+ # Also, nbest.fsa inherits the attributes from `lattice`.
+ assert hasattr(nbest.fsa, "lm_scores")
- # Now word in unique_word_seq has its corresponding token IDs.
- token_ids = token_seq.tolist()
+ am_scores = nbest.compute_am_scores()
+ ngram_lm_scores = nbest.compute_lm_scores()
- num_word_seqs = new2old.numel()
+ # The `tokens` attribute is set inside `compile_hlg.py`
+ assert hasattr(nbest.fsa, "tokens")
+ assert isinstance(nbest.fsa.tokens, torch.Tensor)
- path_to_seq_map_long = path_to_seq_map.to(torch.long)
- expanded_memory = memory.index_select(1, path_to_seq_map_long)
+ path_to_utt_map = nbest.shape.row_ids(1).to(torch.long)
+ # the shape of memory is (T, N, C), so we use axis=1 here
+ expanded_memory = memory.index_select(1, path_to_utt_map)
if memory_key_padding_mask is not None:
+ # The shape of memory_key_padding_mask is (N, T), so we
+ # use axis=0 here.
expanded_memory_key_padding_mask = memory_key_padding_mask.index_select(
- 0, path_to_seq_map_long
+ 0, path_to_utt_map
)
else:
expanded_memory_key_padding_mask = None
+ # remove axis corresponding to states.
+ tokens_shape = nbest.fsa.arcs.shape().remove_axis(1)
+ tokens = k2.RaggedTensor(tokens_shape, nbest.fsa.tokens)
+ tokens = tokens.remove_values_leq(0)
+ token_ids = tokens.tolist()
+
nll = model.decoder_nll(
memory=expanded_memory,
memory_key_padding_mask=expanded_memory_key_padding_mask,
@@ -850,62 +855,36 @@ def rescore_with_attention_decoder(
eos_id=eos_id,
)
assert nll.ndim == 2
- assert nll.shape[0] == num_word_seqs
+ assert nll.shape[0] == len(token_ids)
attention_scores = -nll.sum(dim=1)
- assert attention_scores.ndim == 1
- assert attention_scores.numel() == num_word_seqs
if ngram_lm_scale is None:
- ngram_lm_scale_list = [0.1, 0.3, 0.5, 0.6, 0.7, 0.9, 1.0]
+ ngram_lm_scale_list = [0.01, 0.05, 0.08]
+ ngram_lm_scale_list += [0.1, 0.3, 0.5, 0.6, 0.7, 0.9, 1.0]
ngram_lm_scale_list += [1.1, 1.2, 1.3, 1.5, 1.7, 1.9, 2.0]
else:
ngram_lm_scale_list = [ngram_lm_scale]
if attention_scale is None:
- attention_scale_list = [0.1, 0.3, 0.5, 0.6, 0.7, 0.9, 1.0]
+ attention_scale_list = [0.01, 0.05, 0.08]
+ attention_scale_list += [0.1, 0.3, 0.5, 0.6, 0.7, 0.9, 1.0]
attention_scale_list += [1.1, 1.2, 1.3, 1.5, 1.7, 1.9, 2.0]
else:
attention_scale_list = [attention_scale]
- path_2axes = path.remove_axis(0)
-
ans = dict()
for n_scale in ngram_lm_scale_list:
for a_scale in attention_scale_list:
tot_scores = (
- am_scores
- + n_scale * ngram_lm_scores
+ am_scores.values
+ + n_scale * ngram_lm_scores.values
+ a_scale * attention_scores
)
- ragged_tot_scores = k2.RaggedTensor(seq_to_path_shape, tot_scores)
- argmax_indexes = ragged_tot_scores.argmax()
-
- best_path_indexes = k2.index_select(new2old, argmax_indexes)
-
- # best_path is a k2.RaggedInt with 2 axes [path][arc_pos]
- best_path, _ = path_2axes.index(
- indexes=best_path_indexes, axis=0, need_value_indexes=False
- )
-
- # labels is a k2.RaggedTensor with 2 axes [path][token_id]
- # Note that it contains -1s.
- labels = k2.ragged.index(lattice.labels.contiguous(), best_path)
-
- labels = labels.remove_values_eq(-1)
-
- if isinstance(lattice.aux_labels, torch.Tensor):
- aux_labels = k2.index_select(
- lattice.aux_labels, best_path.values
- )
- else:
- aux_labels, _ = lattice.aux_labels.index(
- indexes=best_path.values, axis=0, need_value_indexes=False
- )
-
- best_path_fsa = k2.linear_fsa(labels)
- best_path_fsa.aux_labels = aux_labels
+ ragged_tot_scores = k2.RaggedTensor(nbest.shape, tot_scores)
+ max_indexes = ragged_tot_scores.argmax()
+ best_path = k2.index_fsa(nbest.fsa, max_indexes)
key = f"ngram_lm_scale_{n_scale}_attention_scale_{a_scale}"
- ans[key] = best_path_fsa
+ ans[key] = best_path
return ans
diff --git a/icefall/graph_compiler.py b/icefall/graph_compiler.py
index 23ac247e8a..b4c87d9640 100644
--- a/icefall/graph_compiler.py
+++ b/icefall/graph_compiler.py
@@ -106,7 +106,7 @@ def convert_transcript_to_fsa(self, texts: List[str]) -> k2.Fsa:
word_ids_list = []
for text in texts:
word_ids = []
- for word in text.split(" "):
+ for word in text.split():
if word in self.word_table:
word_ids.append(self.word_table[word])
else:
diff --git a/icefall/utils.py b/icefall/utils.py
index cc658ae323..2324201c32 100644
--- a/icefall/utils.py
+++ b/icefall/utils.py
@@ -186,7 +186,9 @@ def encode_supervisions(
return supervision_segments, texts
-def get_texts(best_paths: k2.Fsa) -> List[List[int]]:
+def get_texts(
+ best_paths: k2.Fsa, return_ragged: bool = False
+) -> Union[List[List[int]], k2.RaggedTensor]:
"""Extract the texts (as word IDs) from the best-path FSAs.
Args:
best_paths:
@@ -194,6 +196,9 @@ def get_texts(best_paths: k2.Fsa) -> List[List[int]]:
containing multiple FSAs, which is expected to be the result
of k2.shortest_path (otherwise the returned values won't
be meaningful).
+ return_ragged:
+ True to return a ragged tensor with two axes [utt][word_id].
+ False to return a list-of-list word IDs.
Returns:
Returns a list of lists of int, containing the label sequences we
decoded.
@@ -216,7 +221,10 @@ def get_texts(best_paths: k2.Fsa) -> List[List[int]]:
aux_labels = aux_labels.remove_values_leq(0)
assert aux_labels.num_axes == 2
- return aux_labels.tolist()
+ if return_ragged:
+ return aux_labels
+ else:
+ return aux_labels.tolist()
def store_transcripts(
diff --git a/test/test_decode.py b/test/test_decode.py
new file mode 100644
index 0000000000..7ef1277819
--- /dev/null
+++ b/test/test_decode.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python3
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
+#
+# See ../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+You can run this file in one of the two ways:
+
+ (1) cd icefall; pytest test/test_decode.py
+ (2) cd icefall; ./test/test_decode.py
+"""
+
+import k2
+from icefall.decode import Nbest
+
+
+def test_nbest_from_lattice():
+ s = """
+ 0 1 1 10 0.1
+ 0 1 5 10 0.11
+ 0 1 2 20 0.2
+ 1 2 3 30 0.3
+ 1 2 4 40 0.4
+ 2 3 -1 -1 0.5
+ 3
+ """
+ lattice = k2.Fsa.from_str(s, acceptor=False)
+ lattice = k2.Fsa.from_fsas([lattice, lattice])
+
+ nbest = Nbest.from_lattice(
+ lattice=lattice,
+ num_paths=10,
+ use_double_scores=True,
+ lattice_score_scale=0.5,
+ )
+ # each lattice has only 4 distinct paths that have different word sequences:
+ # 10->30
+ # 10->40
+ # 20->30
+ # 20->40
+ #
+ # So there should be only 4 paths for each lattice in the Nbest object
+ assert nbest.fsa.shape[0] == 4 * 2
+ assert nbest.shape.row_splits(1).tolist() == [0, 4, 8]
+
+ nbest2 = nbest.intersect(lattice)
+ tot_scores = nbest2.tot_scores()
+ argmax = tot_scores.argmax()
+ best_path = k2.index_fsa(nbest2.fsa, argmax)
+ print(best_path[0])