Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions gempy_engine/core/backend_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
class BackendTensor:
engine_backend: AvailableBackends

pykeops_enabled: bool
pykeops_enabled: bool = False
use_pykeops: bool = False
use_gpu: bool = True
dtype: str = DEFAULT_TENSOR_DTYPE
Expand All @@ -46,7 +46,7 @@ def get_backend_string(cls) -> str:
return "CPU"

@classmethod
def change_backend_gempy(cls, engine_backend: AvailableBackends, use_gpu: bool = True, dtype: Optional[str] = None):
def change_backend_gempy(cls, engine_backend: AvailableBackends, use_gpu: bool = False, dtype: Optional[str] = None):
cls._change_backend(engine_backend, use_pykeops=PYKEOPS, use_gpu=use_gpu, dtype=dtype)

@classmethod
Expand Down Expand Up @@ -110,7 +110,10 @@ def _change_backend(cls, engine_backend: AvailableBackends, use_pykeops: bool =
# Check if CUDA is available
if not pytorch_copy.cuda.is_available():
raise RuntimeError("GPU requested but CUDA is not available in PyTorch")
if False:
if False: # * (Miguel) this slows down the code a lot
# Check if CUDA device is available
if not pytorch_copy.cuda.device_count():
raise RuntimeError("GPU requested but no CUDA device is available in PyTorch")
# Set default device to CUDA
cls.device = pytorch_copy.device("cuda")
pytorch_copy.set_default_device("cuda")
Expand Down Expand Up @@ -293,6 +296,7 @@ def _fill_diagonal(tensor, value):
cls.tfnp.tile = lambda tensor, repeats: tensor.repeat(repeats)
cls.tfnp.ravel = lambda tensor: tensor.flatten()
cls.tfnp.packbits = _packbits
cls.tfnp.ascontiguousarray = lambda tensor: tensor.contiguous()
cls.tfnp.fill_diagonal = _fill_diagonal
cls.tfnp.isclose = lambda a, b, rtol=1e-05, atol=1e-08, equal_nan=False: isclose(
a,
Expand Down
53 changes: 21 additions & 32 deletions gempy_engine/modules/dual_contouring/fancy_triangulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,36 +231,25 @@ def check_voxels_exist_next_to_edge(coord_col, edge_vector, _left_right_array_ac
raise ValueError("n must be smaller than 12")

# flip triangle order if normal is negative
if False:
indices = BackendTensor.tfnp.stack([x[normal >= 0], y[normal >= 0], z[normal >= 0]]).T
flipped_indices = BackendTensor.tfnp.stack(
[
x[normal < 0],
y[normal < 0],
z[normal < 0]]).T[:, [0, 2, 1]
]
indices = BackendTensor.tfnp.stack([indices, flipped_indices])
else:
# flip triangle order if normal is negative
# Create masks for positive and negative normals
positive_mask = normal >= 0
negative_mask = normal < 0

# Extract indices for positive normals (keep original order)
x_pos = x[positive_mask]
y_pos = y[positive_mask]
z_pos = z[positive_mask]

# Extract indices for negative normals (flip order: x, z, y instead of x, y, z)
x_neg = x[negative_mask]
y_neg = y[negative_mask]
z_neg = z[negative_mask]

# Combine all indices
all_x = BackendTensor.tfnp.concatenate([x_pos, x_neg], axis=0)
all_y = BackendTensor.tfnp.concatenate([y_pos, z_neg], axis=0) # Note: z_neg for flipped triangles
all_z = BackendTensor.tfnp.concatenate([z_pos, y_neg], axis=0) # Note: y_neg for flipped triangles

# Stack into final indices array
indices = BackendTensor.tfnp.stack([all_x, all_y, all_z], axis=1)
# Create masks for positive and negative normals
positive_mask = normal >= 0
negative_mask = normal < 0

# Extract indices for positive normals (keep original order)
x_pos = x[positive_mask]
y_pos = y[positive_mask]
z_pos = z[positive_mask]

# Extract indices for negative normals (flip order: x, z, y instead of x, y, z)
x_neg = x[negative_mask]
y_neg = y[negative_mask]
z_neg = z[negative_mask]

# Combine all indices
all_x = BackendTensor.tfnp.concatenate([x_pos, x_neg], axis=0)
all_y = BackendTensor.tfnp.concatenate([y_pos, z_neg], axis=0) # Note: z_neg for flipped triangles
all_z = BackendTensor.tfnp.concatenate([z_pos, y_neg], axis=0) # Note: y_neg for flipped triangles

# Stack into final indices array
indices = BackendTensor.tfnp.stack([all_x, all_y, all_z], axis=1)
return indices
16 changes: 10 additions & 6 deletions gempy_engine/modules/evaluator/symbolic_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,15 @@ def symbolic_evaluator(solver_input: SolverInput, weights: np.ndarray, options:
eval_kernel = yield_evaluation_kernel(solver_input, options.kernel_options)
if BackendTensor.engine_backend == gempy_engine.config.AvailableBackends.numpy:
from pykeops.numpy import LazyTensor
lazy_weights = LazyTensor(np.asfortranarray(weights), axis=1)
# Create lazy_weights with correct dimensions: we want (16, 1) to match eval_kernel's nj dimension
lazy_weights = LazyTensor(np.asfortranarray(weights.reshape(-1, 1)), axis=0) # axis=0 means this is the 'i' dimension
scalar_field: np.ndarray = (eval_kernel * lazy_weights).sum(axis=0, backend=backend_string).reshape(-1)
else:
from pykeops.torch import LazyTensor
lazy_weights = LazyTensor(weights.view((-1, 1)), axis=1)
scalar_field: np.ndarray = (eval_kernel.T * lazy_weights).sum(axis=1, backend=backend_string).reshape(-1)
lazy_weights = LazyTensor(weights.view((-1, 1)), axis=0) # axis=0 for 'i' dimension
# Use element-wise multiplication and sum over the correct axis
scalar_field: np.ndarray = (eval_kernel * lazy_weights).sum(axis=0, backend=backend_string).reshape(-1)

gx_field: Optional[np.ndarray] = None
gy_field: Optional[np.ndarray] = None
gz_field: Optional[np.ndarray] = None
Expand All @@ -34,12 +38,12 @@ def symbolic_evaluator(solver_input: SolverInput, weights: np.ndarray, options:
eval_gx_kernel = yield_evaluation_grad_kernel(solver_input, options.kernel_options, axis=0)
eval_gy_kernel = yield_evaluation_grad_kernel(solver_input, options.kernel_options, axis=1)

gx_field = (eval_gx_kernel.T * lazy_weights).sum(axis=1, backend=backend_string).reshape(-1)
gy_field = (eval_gy_kernel.T * lazy_weights).sum(axis=1, backend=backend_string).reshape(-1)
gx_field = (eval_gx_kernel * lazy_weights).sum(axis=0, backend=backend_string).reshape(-1)
gy_field = (eval_gy_kernel * lazy_weights).sum(axis=0, backend=backend_string).reshape(-1)

if options.number_dimensions == 3:
eval_gz_kernel = yield_evaluation_grad_kernel(solver_input, options.kernel_options, axis=2)
gz_field = (eval_gz_kernel.T * lazy_weights).sum(axis=1, backend=backend_string).reshape(-1)
gz_field = (eval_gz_kernel * lazy_weights).sum(axis=0, backend=backend_string).reshape(-1)
elif options.number_dimensions == 2:
gz_field = None
else:
Expand Down
3 changes: 3 additions & 0 deletions gempy_engine/modules/kernel_constructor/_structs.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ def _upgrade_kernel_input_to_keops_tensor_pytorch(struct_data_instance):

for key, val in struct_data_instance.__dict__.items():
if key == "n_faults_i": continue
if (val.is_contiguous() is False):
raise ValueError("Input tensors are not contiguous")

struct_data_instance.__dict__[key] = LazyTensor(val.type(BackendTensor.dtype_obj))


Expand Down
23 changes: 13 additions & 10 deletions gempy_engine/modules/kernel_constructor/_vectors_preparation.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ def cov_vectors_preparation(interp_input: SolverInput, kernel_options: KernelOpt
return KernelInput(
ori_sp_matrices=orientations_sp_matrices,
cartesian_selector=cartesian_selector,
nugget_scalar= interp_input.sp_internal.nugget_effect_ref_rest,
nugget_grad= interp_input.ori_internal.nugget_effect_grad,
nugget_scalar=interp_input.sp_internal.nugget_effect_ref_rest,
nugget_grad=interp_input.ori_internal.nugget_effect_grad,
# Drift
ori_drift=dips_ug,
ref_drift=dips_ref_ui,
Expand All @@ -61,11 +61,11 @@ def cov_vectors_preparation(interp_input: SolverInput, kernel_options: KernelOpt
)


def evaluation_vectors_preparations(interp_input: SolverInput, kernel_options: KernelOptions,
axis: Optional[int] = None, slice_array = None) -> KernelInput:
def evaluation_vectors_preparations(interp_input: SolverInput, kernel_options: KernelOptions,
axis: Optional[int] = None, slice_array=None) -> KernelInput:
sp_: SurfacePointsInternals = interp_input.sp_internal
ori_: OrientationsInternals = interp_input.ori_internal

# if is none just get the whole array
if slice_array is not None:
grid: np.ndarray = interp_input.xyz_to_interpolate[slice_array]
Expand Down Expand Up @@ -129,10 +129,10 @@ def evaluation_vectors_preparations(interp_input: SolverInput, kernel_options: K
def _assembly_dips_points_tensors(matrices_size: MatricesSizes, ori_, sp_) -> OrientationSurfacePointsCoords:
dips_ref_coord = assembly_dips_points_tensor(ori_.dip_positions_tiled, sp_.ref_surface_points, matrices_size)
dips_rest_coord = assembly_dips_points_tensor(ori_.dip_positions_tiled, sp_.rest_surface_points, matrices_size)

orientations_sp_matrices = OrientationSurfacePointsCoords(dips_ref_coord, dips_ref_coord, dips_rest_coord,
dips_rest_coord) # When we create que core covariance these are the repeated since the distance are with themselves

return orientations_sp_matrices


Expand Down Expand Up @@ -196,7 +196,6 @@ def _assembly_drift_grid_tensors(grid: np.ndarray, options: KernelOptions, matri
# region UG
dips_ug_d1, dips_ug_d2a, dips_ug_d2b, second_degree_selector = assembly_dips_ug_coords(ori_, options, matrices_size)


grid_1 = BackendTensor.t.zeros_like(grid)
grid_1[:, axis] = 1

Expand All @@ -223,7 +222,10 @@ def _assembly_drift_grid_tensors(grid: np.ndarray, options: KernelOptions, matri

def _assembly_fault_grid_tensors(fault_values_on_grid, options: KernelOptions, faults_val: FaultsData, ori_size: int) -> FaultDrift:
fault_vector_ref, fault_vector_rest = _assembly_fault_internals(faults_val, options, ori_size)
fault_drift = FaultDrift(fault_vector_ref, fault_values_on_grid.T)
fault_drift = FaultDrift(
x_degree_1=fault_vector_ref,
y_degree_1=BackendTensor.t.ascontiguousarray(fault_values_on_grid.T)
)
return fault_drift


Expand All @@ -244,6 +246,7 @@ def _assembler(matrix_val, ori_size_: int, uni_drift_size: int): # TODO: This f

ref_matrix_val = faults_val.fault_values_ref
rest_matrix_val = faults_val.fault_values_rest
fault_vector_ref = _assembler(ref_matrix_val.T, ori_size, options.n_uni_eq)
ref_matrix_contig = BackendTensor.t.ascontiguousarray(ref_matrix_val.T)
fault_vector_ref = _assembler(ref_matrix_contig, ori_size, options.n_uni_eq)
fault_vector_rest = _assembler(rest_matrix_val.T, ori_size, options.n_uni_eq)
return fault_vector_ref, fault_vector_rest