@@ -13,55 +13,57 @@ subsample = 2^3;
13
13
# create the x training array, according to our desired grid size
14
14
xtrain = vars[" a" ][1 : 1000 , 1 : subsample: end ] |> device;
15
15
# create the x test array
16
- xtest = vars[" a" ][end - 99 : end , 1 : subsample: end ] |> device;
16
+ xtest = vars[" a" ][( end - 99 ) : end , 1 : subsample: end ] |> device;
17
17
18
18
# Create the y training array
19
19
ytrain = vars[" u" ][1 : 1000 , 1 : subsample: end ] |> device;
20
20
# Create the y test array
21
- ytest = vars[" u" ][end - 99 : end , 1 : subsample: end ] |> device;
21
+ ytest = vars[" u" ][( end - 99 ) : end , 1 : subsample: end ] |> device;
22
22
23
23
# The data is missing grid data, so we create it
24
24
# `collect` converts data type `range` into an array
25
- grid = collect (range (0 , 1 , length= length (xtrain[1 ,:]))) |> device
25
+ grid = collect (range (0 , 1 , length = length (xtrain[1 , :]))) |> device
26
26
27
27
# Merge the created grid with the data
28
28
# Output has the dims: batch x grid points x 2 (a(x), x)
29
29
# First, reshape the data to a 3D tensor,
30
30
# Then, create a 3D tensor from the synthetic grid data
31
31
# and concatenate them along the newly created 3rd dim
32
- xtrain = cat (reshape (xtrain,(1000 ,1024 ,1 )),
33
- reshape (repeat (grid,1000 ),(1000 ,1024 ,1 ));
34
- dims= 3 ) |> device
35
- ytrain = cat (reshape (ytrain,(1000 ,1024 ,1 )),
36
- reshape (repeat (grid,1000 ),(1000 ,1024 ,1 ));
37
- dims= 3 ) |> device
32
+ xtrain = cat (reshape (xtrain, (1000 , 1024 , 1 )),
33
+ reshape (repeat (grid, 1000 ), (1000 , 1024 , 1 ));
34
+ dims = 3 ) |> device
35
+ ytrain = cat (reshape (ytrain, (1000 , 1024 , 1 )),
36
+ reshape (repeat (grid, 1000 ), (1000 , 1024 , 1 ));
37
+ dims = 3 ) |> device
38
38
# Same treatment with the test data
39
- xtest = cat (reshape (xtest,(100 ,1024 ,1 )),
40
- reshape (repeat (grid,100 ),(100 ,1024 ,1 ));
41
- dims= 3 ) |> device
42
- ytest = cat (reshape (ytest,(100 ,1024 ,1 )),
43
- reshape (repeat (grid,100 ),(100 ,1024 ,1 ));
44
- dims= 3 ) |> device
39
+ xtest = cat (reshape (xtest, (100 , 1024 , 1 )),
40
+ reshape (repeat (grid, 100 ), (100 , 1024 , 1 ));
41
+ dims = 3 ) |> device
42
+ ytest = cat (reshape (ytest, (100 , 1024 , 1 )),
43
+ reshape (repeat (grid, 100 ), (100 , 1024 , 1 ));
44
+ dims = 3 ) |> device
45
45
46
46
# Our net wants the input in the form (2,grid,batch), though,
47
47
# So we permute
48
- xtrain, xtest = permutedims (xtrain,(3 ,2 , 1 )), permutedims (xtest,(3 ,2 , 1 )) |> device
49
- ytrain, ytest = permutedims (ytrain,(3 ,2 , 1 )), permutedims (ytest,(3 ,2 , 1 )) |> device
48
+ xtrain, xtest = permutedims (xtrain, (3 , 2 , 1 )), permutedims (xtest, (3 , 2 , 1 )) |> device
49
+ ytrain, ytest = permutedims (ytrain, (3 , 2 , 1 )), permutedims (ytest, (3 , 2 , 1 )) |> device
50
50
51
51
# Pass the data to the Flux DataLoader and give it a batch of 20
52
- train_loader = Flux. Data. DataLoader ((xtrain, ytrain), batchsize= 20 , shuffle= true ) |> device
53
- test_loader = Flux. Data. DataLoader ((xtest, ytest), batchsize= 20 , shuffle= false ) |> device
52
+ train_loader = Flux. Data. DataLoader ((xtrain, ytrain), batchsize = 20 , shuffle = true ) |>
53
+ device
54
+ test_loader = Flux. Data. DataLoader ((xtest, ytest), batchsize = 20 , shuffle = false ) |>
55
+ device
54
56
55
57
# Set up the Fourier Layer
56
58
# 128 in- and outputs, batch size 20 as given above, grid size 1024
57
59
# 16 modes to keep, σ activation on the gpu
58
- layer = FourierLayer (128 ,128 ,1024 ,16 ,gelu,bias_fourier= false ) |> device
60
+ layer = FourierLayer (128 , 128 , 1024 , 16 , gelu, bias_fourier = false ) |> device
59
61
60
62
# The whole architecture
61
63
# linear transform into the latent space, 4 Fourier Layers,
62
64
# then transform it back
63
- model = Chain (Dense (2 ,128 ;bias= false ), layer, layer, layer, layer,
64
- Dense (128 ,2 ; bias= false )) |> device
65
+ model = Chain (Dense (2 , 128 ; bias = false ), layer, layer, layer, layer,
66
+ Dense (128 , 2 ; bias = false )) |> device
65
67
66
68
# We use the ADAM optimizer for training
67
69
learning_rate = 0.001
@@ -71,21 +73,21 @@ opt = ADAM(learning_rate)
71
73
parameters = params (model)
72
74
73
75
# The loss function
74
- loss (x,y) = Flux. Losses. mse (model (x),y)
76
+ loss (x, y) = Flux. Losses. mse (model (x), y)
75
77
76
78
# Define a callback function that gives some output during training
77
- evalcb () = @show (loss (xtest,ytest))
79
+ evalcb () = @show (loss (xtest, ytest))
78
80
# Print the callback only every 5 seconds,
79
81
throttled_cb = throttle (evalcb, 5 )
80
82
81
83
# Do the training loop
82
84
Flux. @epochs 500 train! (loss, parameters, train_loader, opt, cb = throttled_cb)
83
85
84
86
# Accuracy metrics
85
- val_loader = Flux. Data. DataLoader ((xtest, ytest), batchsize= 1 , shuffle= false ) |> device
87
+ val_loader = Flux. Data. DataLoader ((xtest, ytest), batchsize = 1 , shuffle = false ) |> device
86
88
loss = 0.0 |> device
87
89
88
- for (x,y) in val_loader
90
+ for (x, y) in val_loader
89
91
ŷ = model (x)
90
- loss += Flux. Losses. mse (ŷ,y)
92
+ loss += Flux. Losses. mse (ŷ, y)
91
93
end
0 commit comments