Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 67 additions & 0 deletions test/test_torchlike.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
# Test ability to type-check user defined classes which have a "torch-like" interface
# The required interface is defined as the protocol TensorLike in tensor_details.py

from __future__ import annotations
import pytest
import torch
from torch import rand

from torchtyping import TensorType, TensorTypeMixin
from typeguard import typechecked


# New class that supports the tensor-like interface
class MyTensor:
def __init__(self, tensor: torch.Tensor = torch.zeros(2, 3)):
self.tensor = tensor
self.dtype = self.tensor.dtype
self.layout = "something special"
self.names = self.tensor.names
self.shape = self.tensor.shape

def is_floating_point(self) -> bool:
return self.dtype == torch.float32

# Add tensors and take the mean over the last dimension
# Output drops the last dimension
def __add__(self, o: torch.Tensor) -> MyTensor:
res = self.tensor + o
res_reduced = torch.mean(res, -1)
res_myt = MyTensor(res_reduced)
return res_myt


# Create a type corresponding to the new class
class MyTensorType(MyTensor, TensorTypeMixin):
base_cls = MyTensor


# make flake8 happy
x = y = None


def test_my_tensor1():
@typechecked
def func(x: MyTensorType["x", "y"], y: TensorType["x", "y"]) -> MyTensorType["x"]:
return x + y

@typechecked
def bad_func_spec(
x: MyTensorType["x", "y"], y: TensorType["x", "y"]
) -> MyTensorType["x", "y"]:
return x + y

my_t: MyTensor = MyTensor()
func(my_t, rand((2, 3)))

# Incorrect input dimensions for x
with pytest.raises(TypeError):
func(MyTensor(rand(1)), rand((2, 3)))

# Incorrect input dimensions for y
with pytest.raises(TypeError):
func(my_t, rand(1))

# Incorrect spec for return dimensions
with pytest.raises(TypeError):
bad_func_spec(my_t, rand((2, 3)))
2 changes: 1 addition & 1 deletion torchtyping/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
TensorDetail,
)

from .tensor_type import TensorType
from .tensor_type import TensorType, TensorTypeMixin
from .typechecker import patch_typeguard

__version__ = "0.1.4"
63 changes: 50 additions & 13 deletions torchtyping/tensor_details.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,24 +4,61 @@
import collections
import torch

from typing import Optional, Union
from typing import Optional, Union, runtime_checkable, Protocol, Tuple, Any


ellipsis = type(...)


# Define a Protocol (PEP 544) class to represent "tensor-like" objects
# These are objects which support the interface given below
@runtime_checkable
class TensorLike(Protocol):
@property
def dtype(self) -> torch.dtype:
pass

# leave the layout definition open because tensor-like classes are likely
# to extend it with new storage types
@property
def layout(self) -> Any:
pass

@property
def names(self) -> Tuple[str, ...]:
pass

@property
def shape(self) -> Tuple[int, ...]:
pass

def is_floating_point(self) -> bool:
pass


class MyTensor:
def __init__(self):
self.dtype = torch.float32
self.layout = "very special"
self.names = (None, None)
self.shape = (1, 1)

def is_floating_point(self):
return self.dtype == torch.float32


class TensorDetail(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __repr__(self) -> str:
raise NotImplementedError

@abc.abstractmethod
def check(self, tensor: torch.Tensor) -> bool:
def check(self, tensor: TensorLike) -> bool:
raise NotImplementedError

@classmethod
@abc.abstractmethod
def tensor_repr(cls, tensor: torch.Tensor) -> str:
def tensor_repr(cls, tensor: TensorLike) -> str:
raise NotImplementedError


Expand Down Expand Up @@ -69,7 +106,7 @@ def __repr__(self) -> str:
out += ", is_named"
return out

def check(self, tensor: torch.Tensor) -> bool:
def check(self, tensor: TensorLike) -> bool:
self_names = [self_dim.name for self_dim in self.dims]
self_shape = [self_dim.size for self_dim in self.dims]

Expand Down Expand Up @@ -103,7 +140,7 @@ def check(self, tensor: torch.Tensor) -> bool:
return True

@classmethod
def tensor_repr(cls, tensor: torch.Tensor) -> str:
def tensor_repr(cls, tensor: TensorLike) -> str:
dims = []
check_names = any(name is not None for name in tensor.names)
for name, size in zip(tensor.names, tensor.shape):
Expand Down Expand Up @@ -133,11 +170,11 @@ def __init__(self, *, dtype, **kwargs) -> None:
def __repr__(self) -> str:
return repr(self.dtype)

def check(self, tensor: torch.Tensor) -> bool:
def check(self, tensor: TensorLike) -> bool:
return self.dtype == tensor.dtype

@classmethod
def tensor_repr(cls, tensor: torch.Tensor) -> str:
def tensor_repr(cls, tensor: TensorLike) -> str:
return repr(cls(dtype=tensor.dtype))


Expand All @@ -149,23 +186,23 @@ def __init__(self, *, layout, **kwargs) -> None:
def __repr__(self) -> str:
return repr(self.layout)

def check(self, tensor: torch.Tensor) -> bool:
def check(self, tensor: TensorLike) -> bool:
return self.layout == tensor.layout

@classmethod
def tensor_repr(cls, tensor: torch.Tensor) -> str:
def tensor_repr(cls, tensor: TensorLike) -> str:
return repr(cls(layout=tensor.layout))


class _FloatDetail(TensorDetail):
def __repr__(self) -> str:
return "is_float"

def check(self, tensor: torch.Tensor) -> bool:
def check(self, tensor: TensorLike) -> bool:
return tensor.is_floating_point()

@classmethod
def tensor_repr(cls, tensor: torch.Tensor) -> str:
def tensor_repr(cls, tensor: TensorLike) -> str:
return "is_float" if tensor.is_floating_point() else ""


Expand All @@ -177,11 +214,11 @@ class _NamedTensorDetail(TensorDetail):
def __repr__(self) -> str:
raise RuntimeError

def check(self, tensor: torch.Tensor) -> bool:
def check(self, tensor: TensorLike) -> bool:
raise RuntimeError

@classmethod
def tensor_repr(cls, tensor: torch.Tensor) -> str:
def tensor_repr(cls, tensor: TensorLike) -> str:
raise RuntimeError


Expand Down
3 changes: 2 additions & 1 deletion torchtyping/tensor_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
LayoutDetail,
ShapeDetail,
TensorDetail,
TensorLike,
)
from .utils import frozendict

Expand All @@ -25,7 +26,7 @@
from typing_extensions import Annotated

# Not Type[Annotated...] as we want to use this in instance checks.
_AnnotatedType = type(Annotated[torch.Tensor, ...])
_AnnotatedType = type(Annotated[TensorLike, ...])


# For use when we have a plain TensorType, without any [].
Expand Down
19 changes: 8 additions & 11 deletions torchtyping/typechecker.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,18 @@
import inspect
import sys
import torch
import typeguard

from .tensor_details import _Dim, _no_name, ShapeDetail
from .tensor_details import _Dim, _no_name, ShapeDetail, TensorLike
from .tensor_type import _AnnotatedType

from typing import Any, Dict, List, Tuple

# get_args is available in python version 3.8
# get_type_hints with include_extras parameter is available in 3.9 PEP 593.
if sys.version_info >= (3, 9):
from typing import get_type_hints, get_args, Type
from typing import get_type_hints, get_args
else:
from typing_extensions import get_type_hints, get_args, Type
from typing_extensions import get_type_hints, get_args


# TYPEGUARD PATCHER
Expand Down Expand Up @@ -60,7 +59,7 @@ def _to_string(name, detail_reprs: List[str]) -> str:


def _check_tensor(
argname: str, value: Any, origin: Type[torch.Tensor], metadata: Dict[str, Any]
argname: str, value: Any, origin: TensorLike, metadata: Dict[str, Any]
):
details = metadata["details"]
if not isinstance(value, origin) or any(
Expand All @@ -69,7 +68,7 @@ def _check_tensor(
expected_string = _to_string(
metadata["cls_name"], [repr(detail) for detail in details]
)
if isinstance(value, torch.Tensor):
if isinstance(value, TensorLike):
given_string = _to_string(
metadata["cls_name"], [detail.tensor_repr(value) for detail in details]
)
Expand Down Expand Up @@ -253,7 +252,7 @@ def _check_memo(memo):
dims.append(_Dim(name=dim.name, size=size))
detail = detail.update(dims=tuple(dims))
_check_tensor(
argname, value, torch.Tensor, {"cls_name": cls_name, "details": [detail]}
argname, value, TensorLike, {"cls_name": cls_name, "details": [detail]}
)


Expand All @@ -274,7 +273,7 @@ class _CallMemo(typeguard._CallMemo):
"name_to_size",
"name_to_shape",
)
value_info: List[Tuple[str, torch.Tensor, str, Dict[str, Any]]]
value_info: List[Tuple[str, TensorLike, str, Dict[str, Any]]]
name_to_size: Dict[str, int]
name_to_shape: Dict[str, Tuple[int]]

Expand All @@ -298,11 +297,9 @@ def check_type(*args, **kwargs):
and hasattr(memo, "value_info")
and isinstance(expected_type, _AnnotatedType)
)
# Now check if it's annotating a tensor
# Grab the base class
if is_torchtyping_annotation:
base_cls, *all_metadata = get_args(expected_type)
if not issubclass(base_cls, torch.Tensor):
is_torchtyping_annotation = False
# Now check if the annotation's metadata is our metadata
if is_torchtyping_annotation:
for metadata in all_metadata:
Expand Down