diff --git a/pymlg/torch/se2.py b/pymlg/torch/se2.py index f10061f..ae90041 100644 --- a/pymlg/torch/se2.py +++ b/pymlg/torch/se2.py @@ -12,18 +12,21 @@ class SE2(MatrixLieGroupTorch): matrix_size = 3 @staticmethod - def random(N=1): + def random(N=1, device='cpu'): phi = torch.rand(N, 1, 1) * 2 * torch.pi r = torch.randn(N, 2, 1) C = SO2.Exp(phi) - return SE2.from_components(C, r) + return SE2.from_components(C, r).to(device) @staticmethod def from_components(C, r): """ Construct an SE(2) matrix from a rotation matrix and translation vector. """ - T = torch.zeros(C.shape[0], 3, 3, dtype=C.dtype) + + # first, confirm that both components are allocated on the same device + assert C.device == r.device, "Components must be on the same device for SE2.from_components" + T = torch.zeros(C.shape[0], 3, 3, dtype=C.dtype, device=C.device) T[:, 0:2, 0:2] = C T[:, 0:2, 2] = r.view(-1, 2) @@ -45,7 +48,7 @@ def wedge(xi): phi = xi[:, 0] xi_r = xi[:, 1:] Xi_phi = SO2.wedge(phi) - Xi = torch.zeros(xi.shape[0], 3, 3, dtype=xi.dtype) + Xi = torch.zeros(xi.shape[0], 3, 3, dtype=xi.dtype, device=xi.device) Xi[:, 0:2, 0:2] = Xi_phi Xi[:, 0:2, 2] = xi_r.view(-1, 2) return Xi @@ -72,7 +75,7 @@ def log(T): Xi_phi = SO2.log(T[:, 0:2, 0:2]) r = T[:, 0:2, 2].unsqueeze(2) xi_r = SE2.V_matrix_inv(SO2.vee(Xi_phi)) @ r - Xi = torch.zeros(T.shape[0], 3, 3, dtype=T.dtype) + Xi = torch.zeros(T.shape[0], 3, 3, dtype=T.dtype, device=T.device) Xi[:, 0:2, 0:2] = Xi_phi Xi[:, 0:2, 2] = xi_r.squeeze(2) return Xi @@ -80,9 +83,9 @@ def log(T): @staticmethod def odot(b): - X = torch.zeros(b.shape[0], 3, 3, dtype=b.dtype) + X = torch.zeros(b.shape[0], 3, 3, dtype=b.dtype, device=b.device) X[:, 0:2, 0] = SO2.odot(b[:, :2]).squeeze(2) - X[:, 0:2, 1:3] = batch_eye(b.shape[0], 2, 2) * b[:, 2].unsqueeze(2) + X[:, 0:2, 1:3] = batch_eye(b.shape[0], 2, 2, device=b.device) * b[:, 2].unsqueeze(2) return X @@ -103,7 +106,7 @@ def left_jacobian(xi): large_angle_mask = small_angle_mask.logical_not() large_angle_inds = large_angle_mask.nonzero(as_tuple=True)[0] - J = torch.zeros(xi.shape[0], 3, 3, dtype=xi.dtype) + J = torch.zeros(xi.shape[0], 3, 3, dtype=xi.dtype, device=xi.device) if small_angle_inds.numel(): A = (1 - 1.0 / 6.0 * phi_sq[small_angle_inds]).view(-1) @@ -144,7 +147,7 @@ def adjoint(T): r = T[:, 0:2, 2] # build Om matrix manually (will this break the DAG?) - Om = torch.Tensor([[0, -1], [1, 0]]).repeat(T.shape[0], 1, 1) + Om = torch.Tensor([[0, -1], [1, 0]], device=T.device).repeat(T.shape[0], 1, 1) A = torch.zeros(T.shape[0], 3, 3, dtype=T.dtype) A[:, 0, 0] = 1 @@ -155,7 +158,7 @@ def adjoint(T): @staticmethod def adjoint_algebra(Xi): - A = torch.zeros(Xi.shape[0], 3, 3, dtype=Xi.dtype) + A = torch.zeros(Xi.shape[0], 3, 3, dtype=Xi.dtype, device=Xi.device) A[:, 1, 0] = Xi[:, 1, 2] A[:, 2, 0] = -Xi[:, 0, 2] A[:, 1:, 1:] = Xi[:, 0:2, 0:2] @@ -171,7 +174,7 @@ def V_matrix(phi): large_angle_mask = small_angle_mask.logical_not() large_angle_inds = large_angle_mask.nonzero(as_tuple=True)[0] - V = batch_eye(phi.shape[0], 2, 2, dtype=phi.dtype) + V = batch_eye(phi.shape[0], 2, 2, device=phi.device, dtype=phi.dtype) if small_angle_inds.numel(): V[small_angle_inds] += .5 * SO2.wedge(phi[small_angle_inds]) @@ -193,7 +196,7 @@ def V_matrix_inv(phi): large_angle_mask = small_angle_mask.logical_not() large_angle_inds = large_angle_mask.nonzero(as_tuple=True)[0] - V_inv = batch_eye(phi.shape[0], 2, 2, dtype=phi.dtype) + V_inv = batch_eye(phi.shape[0], 2, 2, device=phi.device, dtype=phi.dtype) if small_angle_inds.numel(): V_inv[small_angle_inds] -= .5 * SO2.wedge(phi[small_angle_inds]) diff --git a/pymlg/torch/se23.py b/pymlg/torch/se23.py index acd7310..efbb384 100644 --- a/pymlg/torch/se23.py +++ b/pymlg/torch/se23.py @@ -18,7 +18,7 @@ class SE23(MatrixLieGroupTorch): matrix_size = 5 @staticmethod - def random(N=1): + def random(N=1, device='cpu'): """ Generates a random batch of SE_2(3) matricies. @@ -39,7 +39,7 @@ def random(N=1): C = SO3.Exp(phi) - return SE23.from_components(C, v, r) + return SE23.from_components(C, v, r).to(device) @staticmethod def from_components(C: torch.Tensor, v: torch.Tensor, r: torch.Tensor): @@ -65,8 +65,11 @@ def from_components(C: torch.Tensor, v: torch.Tensor, r: torch.Tensor): # firstly, check that batch dimension for all 3 components matches if not (C.shape[0] == v.shape[0] == r.shape[0]): raise ValueError("Batch dimension for SE_2(3) components don't match.") + + # check that all components are on the same device + assert C.device == v.device == r.device, "Components must be on the same device for SE23.from_components." - X = batch_eye(C.shape[0], 5, 5, dtype=C.dtype) + X = batch_eye(C.shape[0], 5, 5, device=C.device, dtype=C.dtype) X[:, 0:3, 0:3] = C X[:, 0:3, 3] = v.squeeze(2) @@ -112,7 +115,7 @@ def wedge(xi: torch.Tensor): Xi = torch.cat( (SO3.cross(xi_phi), xi_v, xi_r), dim=2 ) # this yields a (N, 3, 5) matrix that must now be blocked with a (2, 5) batched matrix - block = torch.zeros(xi_phi.shape[0], 2, 5) + block = torch.zeros(xi_phi.shape[0], 2, 5, device=xi.device, dtype=xi.dtype) return torch.cat((Xi, block), dim=1) @staticmethod @@ -152,13 +155,13 @@ def log(X): Xi = torch.cat( (SO3.cross(phi), v, r), dim=2 ) # this yields a (N, 3, 5) matrix that must now be blocked with a (2, 5) batched matrix - block = torch.zeros(X.shape[0], 2, 5) + block = torch.zeros(X.shape[0], 2, 5, device=X.device, dtype=X.dtype) return torch.cat((Xi, block), dim=1) @staticmethod def adjoint(X): C, v, r = SE23.to_components(X) - O = torch.zeros(v.shape[0], 3, 3) + O = torch.zeros(v.shape[0], 3, 3, device=X.device, dtype=X.dtype) # creating block matrix b1 = torch.cat((C, O, O), dim=2) @@ -168,7 +171,7 @@ def adjoint(X): @staticmethod def adjoint_algebra(Xi): - A = torch.zeros(Xi.shape[0], 9, 9) + A = torch.zeros(Xi.shape[0], 9, 9, dtype=Xi.dtype, device=Xi.device) A[:, 0:3, 0:3] = Xi[:, 0:3, 0:3] A[:, 3:6, 0:3] = SO3.wedge(Xi[:, 0:3, 3]) A[:, 3:6, 3:6] = Xi[:, 0:3, 0:3] @@ -177,14 +180,14 @@ def adjoint_algebra(Xi): return A @staticmethod - def identity(N=1): - return batch_eye(N, 5, 5) + def identity(device, N=1, dtype=torch.float64): + return batch_eye(N, 5, 5, device=device, dtype=dtype) @staticmethod def odot(xi : torch.Tensor): - X = torch.zeros(xi.shape[0], 5, 9) + X = torch.zeros(xi.shape[0], 5, 9, dtype=xi.dtype, device=xi.device) X[:, 0:4, 0:6] = SE3.odot(xi[:, 0:4]) - X[:, 0:3, 6:9] = xi[:, 4] * batch_eye(xi.shape[0], 3, 3) + X[:, 0:3, 6:9] = xi[:, 4] * batch_eye(xi.shape[0], 3, 3, device=xi.device, dtype=xi.dtype) return X @staticmethod @@ -193,7 +196,7 @@ def left_jacobian(xi): xi_v = xi[:, 3:6] xi_r = xi[:, 6:9] - J_left = batch_eye(xi.shape[0], 9, 9, dtype=xi.dtype) + J_left = batch_eye(xi.shape[0], 9, 9, dtype=xi.dtype, device=xi.device) small_angle_mask = is_close( torch.linalg.norm(xi_phi, dim=1), 0.0, SE23._small_angle_tol @@ -242,7 +245,7 @@ def left_jacobian_inv(xi): xi_v = xi[:, 3:6] xi_r = xi[:, 6:9] - J_left = batch_eye(xi.shape[0], 9, 9, dtype=xi.dtype) + J_left = batch_eye(xi.shape[0], 9, 9, dtype=xi.dtype, device=xi.device) small_angle_mask = is_close( torch.linalg.norm(xi_phi, dim=1), 0.0, SE23._small_angle_tol diff --git a/pymlg/torch/se3.py b/pymlg/torch/se3.py index c706629..bffb1d9 100644 --- a/pymlg/torch/se3.py +++ b/pymlg/torch/se3.py @@ -50,7 +50,7 @@ def _left_jacobian_Q_matrix(xi_phi, xi_rho): return Q.squeeze_() @staticmethod - def random(N=1): + def random(N=1, device='cpu'): """ Generates a random batch of SE_(3) matricies. @@ -70,7 +70,7 @@ def random(N=1): C = SO3.Exp(phi) - return SE3.from_components(C, r) + return SE3.from_components(C, r).to(device) @staticmethod def from_components(C: torch.Tensor, r: torch.Tensor): @@ -94,8 +94,11 @@ def from_components(C: torch.Tensor, r: torch.Tensor): # firstly, check that batch dimension for all 3 components matches if not (C.shape[0] == r.shape[0]): raise ValueError("Batch dimension for SE(3) components don't match.") + + # then, check that both components are on the same device + assert C.device == r.device, "Components must be on the same device for SE3.from_components." - X = batch_eye(C.shape[0], 4, 4, dtype = C.dtype) + X = batch_eye(C.shape[0], 4, 4, device=C.device, dtype = C.dtype) X[:, 0:3, 0:3] = C X[:, 0:3, 3] = r.squeeze(2) @@ -140,7 +143,7 @@ def wedge(xi: torch.Tensor): ) # this yields a (N, 3, 4) matrix that must now be blocked with a (1, 4) batched matrix # generating a (N, 1, 4) batched matrix to append - b1 = torch.tensor([0, 0, 0, 0], dtype = xi.dtype).reshape(1, 1, 4) + b1 = torch.tensor([0, 0, 0, 0], dtype = xi.dtype, device=xi.device).reshape(1, 1, 4) block = b1.repeat(Xi.shape[0], 1, 1) return torch.cat((Xi, block), dim=1) @@ -180,22 +183,22 @@ def log(X : torch.Tensor): ) # this yields a (N, 3, 4) matrix that must now be blocked with a (1, 4) batched matrix # generating a (N, 1, 4) batched matrix to append - b1 = torch.tensor([0, 0, 0, 0]).reshape(1, 1, 4) + b1 = torch.tensor([0, 0, 0, 0], device=X.device).reshape(1, 1, 4) block = b1.repeat(Xi.shape[0], 1, 1) return torch.cat((Xi, block), dim=1) @staticmethod def odot(b : torch.Tensor): - X = torch.zeros(b.shape[0], 4, 6, dtype=b.dtype) + X = torch.zeros(b.shape[0], 4, 6, dtype=b.dtype, device=b.device) X[:, 0:3, 0:3] = SO3.odot(b[0:3]) - X[:, 0:3, 3:6] = b[:, 3] * batch_eye(b.shape[0], 3, 3, dtype=b.dtype) + X[:, 0:3, 3:6] = b[:, 3] * batch_eye(b.shape[0], 3, 3, dtype=b.dtype, device=b.device) return X @staticmethod def adjoint(X): C, r = SE3.to_components(X) - O = torch.zeros(r.shape[0], 3, 3) + O = torch.zeros(r.shape[0], 3, 3, device=X.device) # creating block matrix b1 = torch.cat((C, O), dim=2) @@ -204,22 +207,22 @@ def adjoint(X): @staticmethod def adjoint_algebra(Xi): - A = torch.zeros(Xi.shape[0], 6, 6, dtype=Xi.dtype) + A = torch.zeros(Xi.shape[0], 6, 6, dtype=Xi.dtype, device=Xi.device) A[:, 0:3, 0:3] = Xi[:, 0:3, 0:3] A[:, 3:6, 0:3] = SO3.wedge(Xi[:, 0:3, 3]) A[:, 3:6, 3:6] = Xi[:, 0:3, 0:3] return A @staticmethod - def identity(N=1, dtype=torch.float32): - return batch_eye(N, 4, 4, dtype=dtype) + def identity(device, N=1, dtype=torch.float64): + return batch_eye(N, 4, 4, device=device, dtype=dtype) @staticmethod def left_jacobian(xi): xi_phi = xi[:, 0:3] xi_r = xi[:, 3:6] - J_left = batch_eye(xi.shape[0], 6, 6, dtype=xi.dtype) + J_left = batch_eye(xi.shape[0], 6, 6, device=xi.device, dtype=xi.dtype) small_angle_mask = is_close( torch.linalg.norm(xi_phi, dim=1), 0.0, SE3._small_angle_tol @@ -256,7 +259,7 @@ def left_jacobian_inv(xi): xi_phi = xi[:, 0:3] xi_r = xi[:, 3:6] - J_left = batch_eye(xi.shape[0], 6, 6, dtype=xi.dtype) + J_left = batch_eye(xi.shape[0], 6, 6, device=xi.device, dtype=xi.dtype) small_angle_mask = is_close( torch.linalg.norm(xi_phi, dim=1), 0.0, SE3._small_angle_tol diff --git a/pymlg/torch/so2.py b/pymlg/torch/so2.py index 1b8fb0d..3927723 100644 --- a/pymlg/torch/so2.py +++ b/pymlg/torch/so2.py @@ -11,7 +11,7 @@ class SO2(MatrixLieGroupTorch): matrix_size = 2 @staticmethod - def random(N=1): + def random(N=1, device='cpu'): """ Generates a random batch of SO_(2) matricies. @@ -21,7 +21,7 @@ def random(N=1): batch size, by default 1 """ phi = torch.rand(N, 1) * 2 * torch.pi - return SO2.Exp(phi) + return SO2.Exp(phi).to(device) @staticmethod def wedge(phi): diff --git a/pymlg/torch/so3.py b/pymlg/torch/so3.py index b033d0c..58b0703 100644 --- a/pymlg/torch/so3.py +++ b/pymlg/torch/so3.py @@ -8,6 +8,10 @@ def bouter(vec1, vec2): """batch outer product""" + + # before doing anything, ensure vec1 and vec2 are on the same device + if vec1.device != vec2.device: + raise ValueError("vec1 and vec2 must be on the same device for batch outer product") a = torch.einsum("bik, bjk -> bij", vec1, vec2) return a @@ -40,9 +44,13 @@ class SO3(MatrixLieGroupTorch): matrix_size = 3 @staticmethod - def random(N=1): + def random(N=1, device='cpu'): v = torch.rand((N, SO3.dof)) - return SO3.Exp(v) + return SO3.Exp(v).to(device) + + @staticmethod + def identity(device, N=1, dtype=torch.float64): + return batch_eye(N, 3, 3, device=device, dtype=dtype) @staticmethod def wedge(phi): @@ -108,13 +116,13 @@ def Exp(phi: torch.Tensor): mask = angle[:, 0, 0] < 1e-7 mask = mask.to(phi.device) dim_batch = phi.shape[0] - Id = torch.eye(3, device=phi.device).expand(dim_batch, 3, 3) + Id = torch.eye(3, device=phi.device, dtype=phi.dtype).expand(dim_batch, 3, 3) axis = phi[~mask] / angle[~mask] c = angle[~mask].cos() s = angle[~mask].sin() - Rot = phi.new_empty(dim_batch, 3, 3) + Rot = phi.new_empty(dim_batch, 3, 3, device=phi.device, dtype=phi.dtype) Rot[mask] = Id[mask] + SO3.wedge(phi[mask]) Rot[~mask] = c * Id[~mask] + (1 - c) * bouter(axis, axis) + s * SO3.wedge(axis) @@ -214,7 +222,7 @@ def log(C): if np.isclose(angle.__float__(), np.pi, atol=1e-9): # if so, return a manually generated rotation vector that protects # against formulaic failure around pi - phi_constructed = torch.Tensor([C[:, 0, 2], C[:, 1, 2], 1 + C[:, 2, 2]]) + phi_constructed = torch.Tensor([C[:, 0, 2], C[:, 1, 2], 1 + C[:, 2, 2]], device=C.device) rho = 1 / torch.sqrt(2 * (1 + C[0, 2, 2])) phi_constructed = rho * phi_constructed return SO3.wedge((torch.pi * phi_constructed).reshape(1, 3, 1)) @@ -277,16 +285,20 @@ def left_jacobian(xi): small_angle_mask = is_close(xi_norm, 0.0) small_angle_inds = small_angle_mask.nonzero(as_tuple=True)[0] + + # move small_angle_inds to relevant device + small_angle_inds = small_angle_inds.to(xi.device) + large_angle_mask = small_angle_mask.logical_not() large_angle_inds = large_angle_mask.nonzero(as_tuple=True)[0] - J_left = torch.empty(xi.shape[0], 3, 3, dtype=xi.dtype) + J_left = torch.empty(xi.shape[0], 3, 3, dtype=xi.dtype, device=xi.device) cross_xi = SO3.wedge(xi) if small_angle_inds.numel(): J_left[small_angle_inds] = ( - torch.eye(3, 3).expand(small_angle_inds.shape[0], 3, 3) + torch.eye(3, 3, device=xi.device, dtype=xi.dtype).expand(small_angle_inds.shape[0], 3, 3) + SO3.A_lj(xi_norm[small_angle_inds], small=True) .reshape(-1, 1) .unsqueeze(2) @@ -298,7 +310,7 @@ def left_jacobian(xi): ) if large_angle_inds.numel(): J_left[large_angle_inds] = ( - torch.eye(3, 3).expand(large_angle_inds.shape[0], 3, 3) + torch.eye(3, 3, device=xi.device, dtype=xi.dtype).expand(large_angle_inds.shape[0], 3, 3) + SO3.A_lj(xi_norm[large_angle_inds], small=False) .reshape(-1, 1) .unsqueeze(2) @@ -323,16 +335,20 @@ def left_jacobian_inv(xi): small_angle_mask = is_close(xi_norm, 0.0, tol=SO3._small_angle_tol) small_angle_inds = small_angle_mask.nonzero(as_tuple=True)[0] + + # move small_angle_inds to relevant device + small_angle_inds = small_angle_inds.to(xi.device) + large_angle_mask = small_angle_mask.logical_not() large_angle_inds = large_angle_mask.nonzero(as_tuple=True)[0] - J_left = torch.empty(xi.shape[0], 3, 3, dtype=xi.dtype) + J_left = torch.empty(xi.shape[0], 3, 3, dtype=xi.dtype, device=xi.device) cross_xi = SO3.wedge(xi) if small_angle_inds.numel(): J_left[small_angle_inds] = ( - batch_eye(small_angle_inds.shape[0], 3, 3) + batch_eye(small_angle_inds.shape[0], 3, 3, device=xi.device, dtype=xi.dtype) - 0.5 * cross_xi[small_angle_inds] + SO3.A_inv_lj(xi_norm[small_angle_inds], small=True) .reshape(-1, 1) @@ -341,7 +357,7 @@ def left_jacobian_inv(xi): ) if large_angle_inds.numel(): J_left[large_angle_inds] = ( - batch_eye(large_angle_inds.shape[0], 3, 3) + batch_eye(large_angle_inds.shape[0], 3, 3, device=xi.device, dtype=xi.dtype) - 0.5 * cross_xi[large_angle_inds] + SO3.A_inv_lj(xi_norm[large_angle_inds], small=False) .reshape(-1, 1) @@ -369,9 +385,9 @@ def to_quat(C, order='wxyz'): C = C.unsqueeze(dim=0) qw = 0.5 * torch.sqrt(1. + C[:, 0, 0] + C[:, 1, 1] + C[:, 2, 2]) - qx = qw.new_empty(qw.shape) - qy = qw.new_empty(qw.shape) - qz = qw.new_empty(qw.shape) + qx = qw.new_empty(qw.shape, device=C.device, dtype=C.dtype) + qy = qw.new_empty(qw.shape, device=C.device, dtype=C.dtype) + qz = qw.new_empty(qw.shape, device=C.device, dtype=C.dtype) near_zero_mask = (qw).abs_().lt(1e-6) @@ -469,7 +485,7 @@ def from_quat(quat, order="wxyz"): ) # Form the matrix - mat = quat.new_empty(quat.shape[0], 3, 3) + mat = quat.new_empty(quat.shape[0], 3, 3, device=quat.device, dtype=quat.dtype) qw2 = qw * qw qx2 = qx * qx diff --git a/pymlg/torch/utils.py b/pymlg/torch/utils.py index 208ad34..5139f2e 100644 --- a/pymlg/torch/utils.py +++ b/pymlg/torch/utils.py @@ -14,11 +14,11 @@ def batch_vector(N, v : torch.Tensor): return v.repeat(N, 1, 1) -def batch_eye(N, n, m, dtype = torch.float32): +def batch_eye(N, n, m, device, dtype = torch.float32): """ Generate a batched set of identity matricies by using torch.repeat() """ - b = torch.eye(n, m, dtype=dtype) + b = torch.eye(n, m, dtype=dtype, device=device).unsqueeze(0) return b.repeat(N, 1, 1) \ No newline at end of file diff --git a/tests/standard_tests_torch.py b/tests/standard_tests_torch.py index a132007..64dd8a8 100644 --- a/tests/standard_tests_torch.py +++ b/tests/standard_tests_torch.py @@ -7,232 +7,258 @@ # set pytorch to double precision for testing torch.set_default_dtype(torch.float64) +# TODO: np.allclose requires a CPU tensor, so to avoimd moving it to the CPU every time, implement something that checks whether the target device is CPU or GPU and uses the appropriate allclose method + class StandardTestsTorch: - def test_wedge_vee(self, G: MatrixLieGroupTorch): + def test_wedge_vee(self, G: MatrixLieGroupTorch, device): x = torch.rand(randrange(1, 10), G.dof, 1) + x = x.to(device) x_test = G.vee(G.wedge(x)) if G.dof > 1: assert x_test.shape == (x.shape[0], G.dof, 1) - assert np.allclose(x, x_test, 1e-15) - def test_exp(self, G: MatrixLieGroupTorch): + # change allclose assertion based on whether the inputs are torch tensors or numpy arrays + if x.dtype == torch.float32 or x.dtype == torch.float64: + assert torch.allclose(x, x_test, 1e-15) + else: + assert np.allclose(x, x_test, 1e-15) + + def test_exp(self, G: MatrixLieGroupTorch, device): x = torch.rand(randrange(1, 10), G.dof, 1) + x = x.to(device) Xi = G.wedge(x) X = G.exp(Xi) + if Xi.dtype == torch.float32 or Xi.dtype == torch.float64: + Xi = Xi.cpu().numpy() + X = X.cpu().numpy() Xi = np.array(Xi).copy() X_test = expm(Xi) assert np.allclose(X, X_test) - def test_log(self, G: MatrixLieGroupTorch): - X = G.random() + def test_log(self, G: MatrixLieGroupTorch, device): + X = G.random(device=device) Xi = G.log(X) + if X.dtype == torch.float32 or X.dtype == torch.float64: + X = X.cpu().numpy() + Xi = Xi.cpu().numpy() X = np.array(X).copy() # note. logm is not currently batched as per (https://github.com/scipy/scipy/issues/12838#issuecomment-1539746877), but the G.random() call for all classes is single-batch. So by definition, should be safe to squeeze the batch dimension and test with unbatched logm(). Xi_test = logm(X.squeeze(0)) assert np.allclose(Xi, Xi_test) - def test_log_zero(self, G: MatrixLieGroupTorch): + def test_log_zero(self, G: MatrixLieGroupTorch, device): x = torch.zeros(1, G.dof, 1) + x = x.to(device) X = G.Exp(x) Xi = G.log(X) + if X.dtype == torch.float32 or X.dtype == torch.float64: + X = X.cpu().numpy() + Xi = Xi.cpu().numpy() X = np.array(X).copy() Xi_test = logm(X.squeeze(0)) assert np.allclose(Xi, Xi_test) - def test_capital_log_zero(self, G: MatrixLieGroupTorch): + def test_capital_log_zero(self, G: MatrixLieGroupTorch, device): x = torch.zeros(randrange(1, 10), G.dof, 1) + x = x.to(device) X = G.Exp(x) x_test = G.Log(X) - assert np.allclose(x, x_test) + assert torch.allclose(x, x_test) - def test_capital_log_small_value(self, G: MatrixLieGroupTorch): + def test_capital_log_small_value(self, G: MatrixLieGroupTorch, device): x = torch.zeros(randrange(1, 10), G.dof, 1) + x = x.to(device) x[0] = 1e-8 X = G.Exp(x) x_test = G.Log(X) - assert not np.isnan(x_test).any() - assert np.allclose(x, x_test) + assert not torch.isnan(x_test).any() + assert torch.allclose(x, x_test) - def test_exp_log_inverse(self, G: MatrixLieGroupTorch): - X = G.random() + def test_exp_log_inverse(self, G: MatrixLieGroupTorch, device): + X = G.random(device=device) Xi = G.log(X) - assert np.allclose(X, G.exp(G.log(X))) - assert np.allclose(Xi, G.log(G.exp(Xi))) + assert torch.allclose(X, G.exp(G.log(X))) + assert torch.allclose(Xi, G.log(G.exp(Xi))) - def test_capital_exp_log_inverse(self, G: MatrixLieGroupTorch): - T = G.random() + def test_capital_exp_log_inverse(self, G: MatrixLieGroupTorch, device): + T = G.random(device=device) x = G.Log(T) - assert np.allclose(T, G.Exp(x)) + assert torch.allclose(T, G.Exp(x)) if G.dof > 1: assert x.shape == (1, G.dof, 1) - def test_odot_wedge(self, G: MatrixLieGroupTorch): - X = G.random() + def test_odot_wedge(self, G: MatrixLieGroupTorch, device): + X = G.random(device=device) a = G.Log(X) - b = torch.normal(0, 1, (X.shape[0], X.shape[1], 1)) + b = torch.normal(0, 1, (X.shape[0], X.shape[1], 1), device=device) test1 = G.wedge(a) @ b test2 = G.odot(b) @ a - assert np.allclose(test1, test2) + assert torch.allclose(test1, test2) - def test_left_jacobian_inverse(self, G: MatrixLieGroupTorch): - X = G.random() + def test_left_jacobian_inverse(self, G: MatrixLieGroupTorch, device): + X = G.random(device=device) xi = G.Log(X) J_left = G.left_jacobian(xi) J_left_inv = G.left_jacobian_inv(xi) - assert np.allclose(J_left_inv, torch.linalg.inv(J_left)) + assert torch.allclose(J_left_inv, torch.linalg.inv(J_left)) - def test_left_jacobian_inverse_zero(self, G: MatrixLieGroupTorch): + def test_left_jacobian_inverse_zero(self, G: MatrixLieGroupTorch, device): xi = torch.zeros(randrange(1, 10), G.dof, 1) + xi = xi.to(device) J_left = G.left_jacobian(xi) J_left_inv = G.left_jacobian_inv(xi) - assert not np.isnan(J_left_inv).any() - assert np.allclose(J_left_inv, np.linalg.inv(J_left)) + assert not torch.isnan(J_left_inv).any() + assert torch.allclose(J_left_inv, torch.linalg.inv(J_left)) - def test_left_jacobian_inverse_small_value(self, G: MatrixLieGroupTorch): + def test_left_jacobian_inverse_small_value(self, G: MatrixLieGroupTorch, device): xi = torch.zeros(randrange(1, 10), G.dof, 1) + xi = xi.to(device) xi[0] = 1e-8 J_left = G.left_jacobian(xi) J_left_inv = G.left_jacobian_inv(xi) - assert not np.isnan(J_left_inv).any() - assert np.allclose(J_left_inv, np.linalg.inv(J_left)) + assert not torch.isnan(J_left_inv).any() + assert torch.allclose(J_left_inv, torch.linalg.inv(J_left)) - def test_right_jacobian_inverse(self, G: MatrixLieGroupTorch): - X = G.random() + def test_right_jacobian_inverse(self, G: MatrixLieGroupTorch, device): + X = G.random(device=device) xi = G.Log(X) J_right = G.right_jacobian(xi) J_right_inv = G.right_jacobian_inv(xi) - assert np.allclose(J_right_inv, np.linalg.inv(J_right)) + assert torch.allclose(J_right_inv, torch.linalg.inv(J_right)) - def test_left_jacobian(self, G: MatrixLieGroupTorch): - x_bar = G.Log(G.random()) + def test_left_jacobian(self, G: MatrixLieGroupTorch, device): + x_bar = G.Log(G.random(device=device)) J_left = G.left_jacobian(x_bar) J_fd = self._numerical_left_jacobian(G, x_bar) - assert np.allclose(J_fd, J_left, atol=1e-2) + assert torch.allclose(J_fd, J_left, atol=1e-2) - def test_left_jacobian_zero(self, G: MatrixLieGroupTorch): + def test_left_jacobian_zero(self, G: MatrixLieGroupTorch, device): x_bar = torch.zeros(randrange(1, 10), G.dof, 1) + x_bar = x_bar.to(device) J_left = G.left_jacobian(x_bar) J_fd = self._numerical_left_jacobian(G, x_bar) - assert np.allclose(J_fd, J_left, atol=1e-5) + assert torch.allclose(J_fd, J_left, atol=1e-5) - def test_left_jacobian_small_value(self, G: MatrixLieGroupTorch): + def test_left_jacobian_small_value(self, G: MatrixLieGroupTorch, device): x_bar = torch.zeros(randrange(1, 10), G.dof, 1) + x_bar = x_bar.to(device) x_bar[0] = 1e-8 J_left = G.left_jacobian(x_bar) J_fd = self._numerical_left_jacobian(G, x_bar) - assert np.allclose(J_fd, J_left, atol=1e-5) + assert torch.allclose(J_fd, J_left, atol=1e-5) def _numerical_left_jacobian(self, G: MatrixLieGroupTorch, x_bar: torch.Tensor): + device = x_bar.device exp_inv = G.inverse(G.Exp(x_bar)) - J_fd = torch.zeros(x_bar.shape[0], G.dof, G.dof) #np.zeros((G.dof, G.dof)) + J_fd = torch.zeros(x_bar.shape[0], G.dof, G.dof, device=device) #np.zeros((G.dof, G.dof)) h = 1e-7 for i in range(G.dof): - dx = torch.zeros(x_bar.shape[0], G.dof, 1) + dx = torch.zeros(x_bar.shape[0], G.dof, 1, device=device) #np.zeros((G.dof, 1)) dx[:, i, :] = h J_fd[:, :, i] = (G.Log(G.Exp(x_bar + dx) @ exp_inv) / h).squeeze(2) return J_fd - def test_adjoint_identity(self, G: MatrixLieGroupTorch): - X = G.random() - xi = G.Log(G.random()) + def test_adjoint_identity(self, G: MatrixLieGroupTorch, device): + X = G.random(device=device) + xi = G.Log(G.random(device=device)) side1 = G.wedge(G.adjoint(X) @ xi) side2 = X @ (G.wedge(xi) @ G.inverse(X)) #np.dot(X, np.dot(G.wedge(xi), G.inverse(X))) - assert np.allclose(side1, side2) + assert torch.allclose(side1, side2) - def test_adjoint_algebra_identity(self, G: MatrixLieGroupTorch): - Xi1 = G.log(G.random()) - Xi2 = G.log(G.random()) + def test_adjoint_algebra_identity(self, G: MatrixLieGroupTorch, device): + Xi1 = G.log(G.random(device=device)) + Xi2 = G.log(G.random(device=device)) xi1 = G.vee(Xi1) xi2 = G.vee(Xi2) - assert np.allclose(G.adjoint_algebra(Xi1) @ xi2, -G.adjoint_algebra(Xi2) @ xi1) - - def test_inverse(self, G: MatrixLieGroupTorch): - X = G.random() - assert np.allclose(G.inverse(G.inverse(X)), X) - assert np.allclose(G.inverse(X), np.linalg.inv(X)) - assert np.allclose(G.inverse(G.identity()), G.identity()) - assert np.allclose(G.inverse(X) @ X, G.identity()) - - def do_tests(self, G: MatrixLieGroupTorch): - self.test_wedge_vee(G) - self.test_exp(G) - self.test_log(G) - self.test_exp_log_inverse(G) - self.test_capital_exp_log_inverse(G) - self.test_odot_wedge(G) - self.test_left_jacobian(G) - self.test_left_jacobian_small_value(G) - self.test_left_jacobian_inverse(G) - self.test_right_jacobian_inverse(G) - self.test_left_jacobian(G) - self.test_adjoint_identity(G) - self.test_adjoint_algebra_identity(G) - self.test_inverse(G) + assert torch.allclose(G.adjoint_algebra(Xi1) @ xi2, -G.adjoint_algebra(Xi2) @ xi1) + + def test_inverse(self, G: MatrixLieGroupTorch, device): + X = G.random(device=device) + assert torch.allclose(G.inverse(G.inverse(X)), X) + assert torch.allclose(G.inverse(X), torch.linalg.inv(X)) + assert torch.allclose(G.inverse(G.identity(device=device)), G.identity(device=device)) + assert torch.allclose(G.inverse(X) @ X, G.identity(device=device)) + + def do_tests(self, G: MatrixLieGroupTorch, test_device): + self.test_wedge_vee(G, device=test_device) + self.test_exp(G, device=test_device) + self.test_log(G, device=test_device) + self.test_exp_log_inverse(G, device=test_device) + self.test_capital_exp_log_inverse(G, device=test_device) + self.test_odot_wedge(G, device=test_device) + self.test_left_jacobian(G, device=test_device) + self.test_left_jacobian_small_value(G, device=test_device) + self.test_left_jacobian_inverse(G, device=test_device) + self.test_right_jacobian_inverse(G, device=test_device) + self.test_left_jacobian(G, device=test_device) + self.test_adjoint_identity(G, device=test_device) + self.test_adjoint_algebra_identity(G, device=test_device) + self.test_inverse(G, device=test_device) class CrossValidation: def test_wedge(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): x = torch.arange(0, G1.dof).unsqueeze(0) * .1 - assert np.allclose(G1.wedge(x), G2.wedge(x)) + assert torch.allclose(G1.wedge(x), G2.wedge(x)) def test_exp(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): x = torch.linspace(.1, 1, G1.dof).view(1, G1.dof, 1) Xi = G1.wedge(x) - assert np.allclose(G1.exp(Xi), G2.exp(Xi)) + assert torch.allclose(G1.exp(Xi), G2.exp(Xi)) def test_log(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): X = G1.Exp(torch.linspace(.1, 1, G1.dof).view(1, G1.dof, 1)) - assert np.allclose(G1.log(X), G2.log(X)) + assert torch.allclose(G1.log(X), G2.log(X)) def test_capital_exp(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): x = torch.linspace(.1, 1, G1.dof).view(1, G1.dof, 1) - assert np.allclose(G1.Exp(x), G2.Exp(x)) + assert torch.allclose(G1.Exp(x), G2.Exp(x)) def test_capital_log(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): X = G1.Exp(torch.linspace(.1, 1, G1.dof).view(1, G1.dof, 1)) - assert np.allclose(G1.Log(X), G2.Log(X)) + assert torch.allclose(G1.Log(X), G2.Log(X)) def test_odot(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): b = torch.rand(1, G1.matrix_size, 1) - assert np.allclose(G1.odot(b), G2.odot(b)) + assert torch.allclose(G1.odot(b), G2.odot(b)) def test_left_jacobian(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): x = torch.linspace(.1, 1, G1.dof).view(1, G1.dof, 1) - assert np.allclose(G1.left_jacobian(x), G2.left_jacobian(x), atol=1e-6) + assert torch.allclose(G1.left_jacobian(x), G2.left_jacobian(x), atol=1e-6) def test_right_jacobian(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): x = torch.linspace(.1, 1, G1.dof).view(1, G1.dof, 1) - assert np.allclose(G1.right_jacobian(x), G2.right_jacobian(x), atol=1e-6) + assert torch.allclose(G1.right_jacobian(x), G2.right_jacobian(x), atol=1e-6) def test_left_jacobian_inv(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): x = torch.linspace(.1, 1, G1.dof).view(1, G1.dof, 1) - assert np.allclose(G1.left_jacobian_inv(x), G2.left_jacobian_inv(x), atol=1e-6) + assert torch.allclose(G1.left_jacobian_inv(x), G2.left_jacobian_inv(x), atol=1e-6) def test_right_jacobian_inv(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): x = torch.linspace(.1, 1, G1.dof).view(1, G1.dof, 1) - assert np.allclose(G1.right_jacobian_inv(x), G2.right_jacobian_inv(x), atol=1e-6) + assert torch.allclose(G1.right_jacobian_inv(x), G2.right_jacobian_inv(x), atol=1e-6) def test_adjoint(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): X = G1.Exp(torch.linspace(.1, 1, G1.dof).view(1, G1.dof, 1)) - assert np.allclose(G1.adjoint(X), G2.adjoint(X)) + assert torch.allclose(G1.adjoint(X), G2.adjoint(X)) def test_adjoint_algebra(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): X = G1.Exp(torch.linspace(.1, 1, G1.dof).view(1, G1.dof, 1)) Xi = G1.log(X) - assert np.allclose(G1.adjoint_algebra(Xi), G2.adjoint_algebra(Xi)) + assert torch.allclose(G1.adjoint_algebra(Xi), G2.adjoint_algebra(Xi)) def test_inverse(self, G1: MatrixLieGroupTorch, G2: MatrixLieGroupTorch): X = G1.Exp(torch.linspace(.1, 1, G1.dof).view(1, G1.dof, 1)) - assert np.allclose(G1.inverse(X), G2.inverse(X)) + assert torch.allclose(G1.inverse(X), G2.inverse(X)) diff --git a/tests/torch/test_standard_torch.py b/tests/torch/test_standard_torch.py index 05e1d5a..1121d6a 100644 --- a/tests/torch/test_standard_torch.py +++ b/tests/torch/test_standard_torch.py @@ -9,6 +9,7 @@ from standard_tests_torch import StandardTestsTorch @pytest.mark.parametrize("G", [SO3, SE3, SE23]) +@pytest.mark.parametrize("device", ['cpu', 'cuda']) class TestStandardTorch(StandardTestsTorch): pass @@ -19,8 +20,8 @@ class TestStandardTorch(StandardTestsTorch): # For debugging purposes test = TestStandardTorch() - test.do_tests(SO3) - test.do_tests(SE3) - test.do_tests(SE23) - test.do_tests(SO2) - test.do_tests(SE2) + test.do_tests(SO3, device='cuda') + test.do_tests(SE3, device='cuda') + test.do_tests(SE23, device='cuda') + test.do_tests(SO2, device='cuda') + test.do_tests(SE2, device='cuda')