-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
83 lines (68 loc) · 3.07 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScale
data = example_data #as np.array; shape (n,) where n is the length of the profile
# Prepare the data
x = np.arange(len(data)).reshape(-1, 1)
y = data.reshape(-1, 1)
# Split the data into training, validation, and testing sets
x_train, x_temp, y_train, y_temp = train_test_split(x, y, test_size=0.3, random_state=42)
x_val, x_test, y_val, y_test = train_test_split(x_temp, y_temp, test_size=0.5, random_state=42)
# Normalize the data
scaler_x = MinMaxScaler()
scaler_y = MinMaxScaler()
x_train_normalized = scaler_x.fit_transform(x_train)
x_val_normalized = scaler_x.transform(x_val)
x_test_normalized = scaler_x.transform(x_test)
y_train_normalized = scaler_y.fit_transform(y_train)
y_val_normalized = scaler_y.transform(y_val)
y_test_normalized = scaler_y.transform(y_test)
# Define the neural network model
model = tf.keras.Sequential([
tf.keras.Input(shape=(1,)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1)
])
# Compile the model
model.compile(optimizer='adam', loss='mean_squared_error')
# Define early stopping
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
# Train the model with early stopping
history = model.fit(x_train_normalized, y_train_normalized, epochs=500, validation_data=(x_val_normalized, y_val_normalized), callbacks=[early_stopping], verbose=0)
# Evaluate the model on the test set
test_loss = model.evaluate(x_test_normalized, y_test_normalized, verbose=0)
print(f'Test Loss: {test_loss}')
# Predict the profile using the trained model
y_pred_train_normalized = model.predict(x_train_normalized).flatten()
y_pred_val_normalized = model.predict(x_val_normalized).flatten()
y_pred_test_normalized = model.predict(x_test_normalized).flatten()
# Inverse transform the predictions
y_pred_train = scaler_y.inverse_transform(y_pred_train_normalized.reshape(-1, 1)).flatten()
y_pred_val = scaler_y.inverse_transform(y_pred_val_normalized.reshape(-1, 1)).flatten()
y_pred_test = scaler_y.inverse_transform(y_pred_test_normalized.reshape(-1, 1)).flatten()
# Combine the training, testing and validation predictions for full profile plotting
y_pred = np.zeros_like(y.flatten())
y_pred[x_train.flatten()] = y_pred_train
y_pred[x_val.flatten()] = y_pred_val
y_pred[x_test.flatten()] = y_pred_test
# Plot the original, smoothed signal, and model predictions
plt.figure(figsize=(10, 6))
plt.plot(x, y, label='Original Profile')
plt.plot(x, y_pred, label='Model Prediction', linestyle='--')
plt.xlabel('Position')
plt.ylabel('Intensity')
plt.title('Model Fit to Signal Profile')
plt.legend()
plt.show()
# Plot training and validation loss
plt.figure(figsize=(6, 3))
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()