Skip to content

Commit 14adcf1

Browse files
committed
Add a Fortran equivalent of the optimisers example WIP.
1 parent dcdcf6b commit 14adcf1

File tree

3 files changed

+164
-0
lines changed

3 files changed

+164
-0
lines changed

examples/n_Optimisers/CMakeLists.txt

+39
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
cmake_minimum_required(VERSION 3.15...3.31)
2+
# policy CMP0076 - target_sources source files are relative to file where
3+
# target_sources is run
4+
cmake_policy(SET CMP0076 NEW)
5+
6+
set(PROJECT_NAME OptimisersExample)
7+
8+
project(${PROJECT_NAME} LANGUAGES Fortran)
9+
10+
# Build in Debug mode if not specified
11+
if(NOT CMAKE_BUILD_TYPE)
12+
set(CMAKE_BUILD_TYPE
13+
Debug
14+
CACHE STRING "" FORCE)
15+
endif()
16+
17+
find_package(FTorch)
18+
message(STATUS "Building with Fortran PyTorch coupling")
19+
20+
# Fortran example
21+
add_executable(optimisers optimisers.f90)
22+
target_link_libraries(optimisers PRIVATE FTorch::ftorch)
23+
24+
# Integration testing
25+
if(CMAKE_BUILD_TESTS)
26+
include(CTest)
27+
28+
# 1. Check the Python Optimisers script runs successfully
29+
add_test(NAME pyoptim COMMAND ${Python_EXECUTABLE}
30+
${PROJECT_SOURCE_DIR}/optimisers.py)
31+
32+
# 2. Check the Fortran Optimisers script runs successfully
33+
add_test(
34+
NAME foptim
35+
COMMAND optimisers
36+
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
37+
set_tests_properties(foptim PROPERTIES PASS_REGULAR_EXPRESSION
38+
"Optimisers example ran successfully")
39+
endif()

examples/n_Optimisers/README.md

+19
Original file line numberDiff line numberDiff line change
@@ -54,3 +54,22 @@ Epoch: 0
5454
tensor([1.0000, 1.5000, 2.0000, 2.5000], requires_grad=True)
5555
...
5656
```
57+
58+
To run the Fortran version of the demo we need to compile with (for example)
59+
```
60+
mkdir build
61+
cd build
62+
cmake .. -DCMAKE_PREFIX_PATH=<path/to/your/installation/of/library/> -DCMAKE_BUILD_TYPE=Release
63+
cmake --build .
64+
```
65+
66+
(Note that the Fortran compiler can be chosen explicitly with the `-DCMAKE_Fortran_COMPILER` flag,
67+
and should match the compiler that was used to locally build FTorch.)
68+
69+
To run the compiled code, simply use
70+
```
71+
./optimisers
72+
```
73+
Currently, the example constructs Torch Tensors and iterates over a training loop,
74+
computing a loss with each iteration.
75+
It does not yet implement an optimiser or step to update the scaling tensor.

examples/n_Optimisers/optimisers.f90

+106
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
program example
2+
3+
! Import precision info from iso
4+
use, intrinsic :: iso_fortran_env, only : sp => real32
5+
6+
! Import c_int64_t
7+
use, intrinsic :: iso_c_binding, only: c_int64_t
8+
9+
! Import our library for interfacing with PyTorch's Autograd module
10+
use ftorch, only: assignment(=), operator(-), operator(*), operator(/), operator(**), &
11+
torch_kCPU, torch_kFloat32, &
12+
torch_tensor, torch_tensor_from_array, &
13+
torch_tensor_ones, torch_tensor_empty, &
14+
torch_tensor_print, torch_delete
15+
16+
implicit none
17+
18+
! Set working precision for reals
19+
integer, parameter :: wp = sp
20+
21+
! Set up Fortran data structures
22+
integer, parameter :: ndims = 1
23+
integer, parameter :: n=4
24+
real(wp), dimension(n), target :: input_data, output_data, target_data
25+
integer :: tensor_layout(ndims) = [1]
26+
27+
! Set up Torch data structures
28+
integer(c_int64_t), dimension(1), parameter :: tensor_shape = [4]
29+
type(torch_tensor) :: input_vec, output_vec, target_vec, scaling_tensor, loss, torch_4p0
30+
31+
! Set up training parameters
32+
integer :: i
33+
integer, parameter :: n_train = 15
34+
integer, parameter :: n_print = 1
35+
36+
! Initialise Torch Tensors from input/target arrays as in Python example
37+
input_data = [1.0_wp, 1.0_wp, 1.0_wp, 1.0_wp]
38+
target_data = [1.0_wp, 2.0_wp, 3.0_wp, 4.0_wp]
39+
call torch_tensor_from_array(input_vec, input_data, tensor_layout, torch_kCPU)
40+
call torch_tensor_from_array(target_vec, target_data, tensor_layout, torch_kCPU)
41+
42+
! Initialise Scaling tensor as ones as in Python example
43+
call torch_tensor_ones(scaling_tensor, ndims, tensor_shape, &
44+
torch_kFloat32, torch_kCPU, requires_grad=.true.)
45+
46+
! Initialise scaling factor of 4.0 for use in tensor operations
47+
call torch_tensor_from_array(torch_4p0, [4.0_wp], tensor_layout, torch_kCPU, requires_grad=.true.)
48+
49+
! Initialise an optimiser and apply it to scaling_tensor
50+
! TODO
51+
52+
! Conduct training loop
53+
do i = 1, n_train+1
54+
! Zero any previously stored gradients ready for a new iteration
55+
! TODO: implement equivalent to optimizer.zero_grad()
56+
57+
! Forward pass: multiply the input of ones by the tensor (elementwise)
58+
call torch_tensor_from_array(output_vec, output_data, tensor_layout, torch_kCPU)
59+
output_vec = input_vec * scaling_tensor
60+
61+
! Create an empty loss tensor and populate with mean square error (MSE) between target and input
62+
! Then perform backward step on loss to propogate gradients using autograd
63+
!
64+
! We could use the following lines to do this by explicitly specifying a
65+
! gradient of ones to start the process:
66+
call torch_tensor_empty(loss, ndims, tensor_shape, &
67+
torch_kFloat32, torch_kCPU)
68+
loss = ((output_vec - target_vec) ** 2) / torch_4p0
69+
! TODO: add in backpropogation functionality for loss.backward(gradient=torch.ones(4))
70+
!
71+
! However, we can avoid explicitly passing an initial gradient and instead do this
72+
! implicitly by aggregating the loss vector into a scalar value:
73+
! TODO: Requires addition of `.mean()` to the FTorch tensor API
74+
! loss = ((output - target_vec) ** 2).mean()
75+
! loss.backward()
76+
77+
! Step the optimiser to update the values in `tensor`
78+
! TODO Add step functionality to optimisers for optimizer.step()
79+
80+
if (modulo(i,n_print) == 0) then
81+
write(*,*) "================================================"
82+
write(*,*) "Epoch: ", i
83+
write(*,*)
84+
write(*,*) "Output:", output_data
85+
write(*,*)
86+
write(*,*) "loss:"
87+
call torch_tensor_print(loss)
88+
write(*,*)
89+
write(*,*) "tensor gradient: TODO: scaling_tensor.grad"
90+
write(*,*)
91+
write(*,*) "scaling_tensor:"
92+
call torch_tensor_print(scaling_tensor)
93+
write(*,*)
94+
end if
95+
96+
! Clean up created tensors
97+
call torch_delete(output_vec)
98+
call torch_delete(loss)
99+
100+
end do
101+
102+
write(*,*) "Training complete."
103+
104+
write (*,*) "Optimisers example ran successfully"
105+
106+
end program example

0 commit comments

Comments
 (0)