Skip to content

Commit 18b5381

Browse files
committed
fix: add autocast for float32 only ops
1 parent f658c99 commit 18b5381

File tree

4 files changed

+20
-2
lines changed

4 files changed

+20
-2
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
# @package model.parameters.groups.model1
2+
3+
_target_: moai.parameters.selectors.model.ModelParameterSelector
4+
modules: null # optional
5+
monads: null # optional
6+
parameters: null # optional
7+
force_grad: true
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
# @package model.parameters.groups.model2
2+
3+
_target_: moai.parameters.selectors.model.ModelParameterSelector
4+
modules: null # optional
5+
monads: null # optional
6+
parameters: null # optional
7+
force_grad: true

moai/monads/render/nvdiffrast/render.py

+3
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ def __init__(
2222
self.resolution = [height, width]
2323
self.decomposed = decomposed
2424

25+
@torch.amp.custom_fwd(device_type="cuda", cast_inputs=torch.float32)
2526
def forward(
2627
self,
2728
ndc_vertices: torch.Tensor,
@@ -90,6 +91,7 @@ def __init__(
9091
):
9192
super().__init__()
9293

94+
@torch.amp.custom_fwd(device_type="cuda", cast_inputs=torch.float32)
9395
def forward(
9496
self,
9597
attributes: torch.Tensor,
@@ -114,6 +116,7 @@ def __init__(
114116
super().__init__()
115117
self.pos_grad_boost = position_gradient_scale
116118

119+
@torch.amp.custom_fwd(device_type="cuda", cast_inputs=torch.float32)
117120
def forward(
118121
self,
119122
attributes: torch.Tensor,

moai/supervision/objectives/mesh/laplacian.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ def __init__(self, method: str = "uniform"):
8989
super().__init__()
9090
self.method = method
9191

92+
@torch.amp.custom_fwd(device_type="cuda", cast_inputs=torch.float32)
9293
def forward(
9394
self,
9495
vertices: torch.Tensor, # [B, V, 3]
@@ -109,9 +110,9 @@ def forward(
109110
# We don't want to backprop through the computation of the Laplacian;
110111
# just treat it as a magic constant matrix that is used to transform
111112
# verts into normals
112-
with torch.no_grad():
113+
with torch.no_grad(), torch.amp.autocast(device_type="cuda", enabled=False):
113114
if self.method == "uniform":
114-
L = laplacian(verts_packed, faces_packed)
115+
L = laplacian(verts_packed.float(), faces_packed)
115116
elif self.method in ["cot", "cotcurv"]:
116117
L, inv_areas = cot_laplacian(verts_packed, faces_packed)
117118
if self.method == "cot":

0 commit comments

Comments
 (0)