Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Drop torch_tensor_to_array #303

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 7 additions & 23 deletions examples/6_Autograd/autograd.f90
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
program example

use, intrinsic :: iso_c_binding, only : c_int64_t

! Import precision info from iso
use, intrinsic :: iso_fortran_env, only : sp => real32

! Import our library for interfacing with PyTorch's Autograd module
use ftorch, only: assignment(=), operator(+), operator(-), operator(*), &
operator(/), operator(**), torch_kCPU, torch_kFloat32, torch_tensor, torch_tensor_delete, &
torch_tensor_empty, torch_tensor_from_array, torch_tensor_to_array
torch_tensor_empty, torch_tensor_from_array

! Import our tools module for testing utils
use ftorch_test_utils, only : assert_allclose
Expand All @@ -21,10 +19,9 @@ program example
! Set up Fortran data structures
integer, parameter :: ndims = 2
integer, parameter :: n=2, m=1
integer(c_int64_t), dimension(ndims), parameter :: tensor_shape = [n, m]
real(wp), dimension(n,m), target :: in_data1
real(wp), dimension(n,m), target :: in_data2
real(wp), dimension(:,:), pointer :: out_data
real(wp), dimension(n,m), target :: out_data
real(wp), dimension(n,m) :: expected
integer :: tensor_layout(ndims) = [1, 2]

Expand All @@ -34,48 +31,35 @@ program example
! Set up Torch data structures
type(torch_tensor) :: a, b, Q

! Initialise input arrays as in Python example
! Initialise Torch Tensors from input arrays as in Python example
in_data1(:,1) = [2.0_wp, 3.0_wp]
in_data2(:,1) = [6.0_wp, 4.0_wp]

! Construct a Torch Tensor from a Fortran array
! TODO: Implement requires_grad=.true.
call torch_tensor_from_array(a, in_data1, tensor_layout, torch_kCPU)
call torch_tensor_from_array(b, in_data2, tensor_layout, torch_kCPU)

! Initialise Torch Tensor from array used for output
call torch_tensor_from_array(Q, out_data, tensor_layout, torch_kCPU)

! Check arithmetic operations work for torch_tensors
call torch_tensor_empty(Q, ndims, tensor_shape, torch_kFloat32, torch_kCPU)
write (*,*) "a = ", in_data1(:,1)
write (*,*) "b = ", in_data2(:,1)
Q = 3 * (a**3 - b * b / 3)

! Extract a Fortran array from a Torch tensor
call torch_tensor_to_array(Q, out_data, shape(in_data1))
write (*,*) "Q = 3 * (a ** 3 - b * b / 2) =", out_data(:,1)

! Check output tensor matches expected value
expected(:,1) = [-12.0_wp, 65.0_wp]
test_pass = assert_allclose(out_data, expected, test_name="torch_tensor_to_array", rtol=1e-5)
test_pass = assert_allclose(out_data, expected, test_name="autograd_Q")
if (.not. test_pass) then
call clean_up()
print *, "Error :: out_data does not match expected value"
stop 999
end if

! Back-propagation
! TODO: Requires API extension

call clean_up()
write (*,*) "Autograd example ran successfully"

contains

! Subroutine for freeing memory and nullifying pointers used in the example
subroutine clean_up()
nullify(out_data)
call torch_tensor_delete(a)
call torch_tensor_delete(b)
call torch_tensor_delete(Q)
end subroutine clean_up

end program example
4 changes: 2 additions & 2 deletions pages/examples.md
Original file line number Diff line number Diff line change
Expand Up @@ -183,5 +183,5 @@ different input multiple times in the same workflow.
[This worked example](https://github.com/Cambridge-ICCS/FTorch/tree/main/examples/6_Autograd)
is currently under development. Eventually, it will demonstrate how to perform
automatic differentiation in FTorch by leveraging PyTorch's Autograd module.
Currently, it just demonstrates how to use `torch_tensor_to_array` and compute
mathematical expressions involving Torch tensors.
Currently, it just demonstrates how to compute mathematical expressions
involving Torch tensors.
2 changes: 1 addition & 1 deletion pages/updates.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ title: Recent API Changes
## February 2025

If you use a version of FTorch from before commit
[f7fbebf](f7fbebfdad2a4801f57742a2bb12bc21e70881ff)
[c85185e](c85185e6c261606c212dd11fee734663d610b695)
(February 2025) you will notice that the main `CMakeLists.txt` file has moved
from `src/` to the root level of the FTorch repository. This move was mainly to
simplify the development experience, such that the examples could be built as
Expand Down
44 changes: 5 additions & 39 deletions src/ctorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -246,44 +246,6 @@ torch_tensor_t torch_from_blob(void *data, int ndim, const int64_t *shape,
// --- Functions for interrogating tensors
// =====================================================================================

void *torch_to_blob(const torch_tensor_t tensor, const torch_data_t dtype) {
auto t = reinterpret_cast<torch::Tensor *const>(tensor);
void *raw_ptr;
switch (dtype) {
case torch_kUInt8:
std::cerr << "[WARNING]: uint8 not supported" << std::endl;
exit(EXIT_FAILURE);
case torch_kInt8:
raw_ptr = (void *)t->data_ptr<int8_t>();
break;
case torch_kInt16:
raw_ptr = (void *)t->data_ptr<int16_t>();
break;
case torch_kInt32:
raw_ptr = (void *)t->data_ptr<int32_t>();
break;
case torch_kInt64:
raw_ptr = (void *)t->data_ptr<int64_t>();
break;
case torch_kFloat16:
std::cerr << "[WARNING]: float16 not supported" << std::endl;
// NOTE: std::float16_t is available but only with C++23
exit(EXIT_FAILURE);
case torch_kFloat32:
raw_ptr = (void *)t->data_ptr<float>();
// NOTE: std::float32_t is available but only with C++23
break;
case torch_kFloat64:
raw_ptr = (void *)t->data_ptr<double>();
// NOTE: std::float64_t is available but only with C++23
break;
default:
std::cerr << "[WARNING]: unknown data type" << std::endl;
exit(EXIT_FAILURE);
}
return raw_ptr;
}

void torch_tensor_print(const torch_tensor_t tensor) {
auto t = reinterpret_cast<torch::Tensor *>(tensor);
std::cout << *t << std::endl;
Expand Down Expand Up @@ -338,7 +300,11 @@ void torch_tensor_assign(torch_tensor_t output, const torch_tensor_t input) {
auto out = reinterpret_cast<torch::Tensor *>(output);
auto in = reinterpret_cast<torch::Tensor *const>(input);
torch::AutoGradMode enable_grad(in->requires_grad());
*out = *in;
// NOTE: The following line ensures that the output tensor continues to point to a
// Fortran array if it was set up to do so using torch_tensor_from_array. If
// it's removed then the Fortran array keeps its original value and is no
// longer be pointed to.
std::move(*out) = *in;
}

void torch_tensor_add(torch_tensor_t output, const torch_tensor_t tensor1,
Expand Down
9 changes: 0 additions & 9 deletions src/ctorch.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,15 +111,6 @@ EXPORT_C torch_tensor_t torch_from_blob(void *data, int ndim, const int64_t *sha
// --- Functions for interrogating tensors
// =============================================================================

/**
* Function to extract a C-array from a Torch Tensor's data.
*
* @param the Torch Tensor
* @param data type of the elements of the Tensor
* @return pointer to the Tensor in memory
*/
EXPORT_C void *torch_to_blob(const torch_tensor_t tensor, const torch_data_t dtype);

/**
* Function to print out a Torch Tensor
* @param Torch Tensor to print
Expand Down
Loading