Skip to content

Commit e8df014

Browse files
committed
type ignores
1 parent 019697e commit e8df014

File tree

7 files changed

+10
-10
lines changed

7 files changed

+10
-10
lines changed

src/gluonts/itertools.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,7 @@ def split_into(xs: Sequence, n: int) -> Sequence:
298298
# e.g. 10 by 3 -> 4, 3, 3
299299
relative_splits[:remainder] += 1
300300

301-
return split(xs, np.cumsum(relative_splits))
301+
return split(xs, np.cumsum(relative_splits)) # type: ignore[arg-type]
302302

303303

304304
@dataclass

src/gluonts/model/forecast.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -178,14 +178,14 @@ class Quantile:
178178
@classmethod
179179
def from_float(cls, quantile: float) -> "Quantile":
180180
assert isinstance(quantile, float)
181-
return cls(value=quantile, name=str(quantile))
181+
return cls(value=quantile, name=str(quantile)) # type: ignore[call-arg]
182182

183183
@classmethod
184184
def from_str(cls, quantile: str) -> "Quantile":
185185
assert isinstance(quantile, str)
186186

187187
try:
188-
return cls(value=float(quantile), name=quantile)
188+
return cls(value=float(quantile), name=quantile) # type: ignore[call-arg]
189189
except ValueError:
190190
m = re.match(r"^p(\d+)$", quantile)
191191

src/gluonts/mx/trainer/learning_rate_scheduler.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -247,8 +247,8 @@ def __init__(
247247
0 <= min_lr <= base_lr
248248
), "The value of `min_lr` should be >= 0 and <= base_lr"
249249

250-
self.lr_scheduler = MetricAttentiveScheduler(
251-
patience=Patience(
250+
self.lr_scheduler = MetricAttentiveScheduler( # type: ignore[call-arg]
251+
patience=Patience( # type: ignore[call-arg]
252252
patience=patience, objective=Objective.from_str(objective)
253253
),
254254
learning_rate=base_lr,

src/gluonts/torch/model/patch_tst/module.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ def forward(
229229
# shift time features by `prediction_length` so that they are
230230
# aligned with the target input.
231231
time_feat = take_last(
232-
torch.cat((past_time_feat, future_time_feat), dim=1),
232+
torch.cat((past_time_feat, future_time_feat), dim=1), # type: ignore[arg-type]
233233
dim=1,
234234
num=self.context_length,
235235
)

src/gluonts/torch/model/tft/layers.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def forward(self, features: torch.Tensor) -> List[torch.Tensor]: # type: ignore
2727
concat_features = super().forward(features=features)
2828

2929
if self._num_features > 1:
30-
return torch.chunk(concat_features, self._num_features, dim=-1)
30+
return torch.chunk(concat_features, self._num_features, dim=-1) # type: ignore[return-value]
3131
else:
3232
return [concat_features]
3333

src/gluonts/torch/modules/feature.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def forward(self, features: torch.Tensor) -> torch.Tensor:
3838
features, self._num_features, dim=-1
3939
)
4040
else:
41-
cat_feature_slices = [features]
41+
cat_feature_slices = [features] # type: ignore[assignment]
4242

4343
return torch.cat(
4444
[

src/gluonts/zebras/_time_frame.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -453,11 +453,11 @@ def split(
453453

454454
# If past_length is not provided, it will equal to `index`, since
455455
# `len(tf.split(5).past) == 5`
456-
past_length: int = maybe.unwrap_or(past_length, index)
456+
past_length: int = maybe.unwrap_or(past_length, index) # type: ignore[annotation-unchecked]
457457

458458
# Same logic applies to future_length, except that we deduct from the
459459
# right. (We can't use past_length, since it can be unequal to index).
460-
future_length: int = maybe.unwrap_or(future_length, len(self) - index)
460+
future_length: int = maybe.unwrap_or(future_length, len(self) - index) # type: ignore[annotation-unchecked]
461461

462462
if self.index is None:
463463
new_index = None

0 commit comments

Comments
 (0)