From 704860abcefc19968b2de94932e57f69b728e030 Mon Sep 17 00:00:00 2001 From: RoberLopez Date: Thu, 26 Dec 2024 17:56:18 +0100 Subject: [PATCH] clean --- opennn/quasi_newton_method.cpp | 12 +++---- opennn/quasi_newton_method.h | 14 ++++---- tests/conjugate_gradient_test.cpp | 5 ++- tests/cross_entropy_error_test.cpp | 11 ++++--- tests/inputs_selection_test.cpp | 24 -------------- tests/model_selection_test.cpp | 10 +++--- tests/neurons_selection_test.cpp | 22 ------------- tests/probabilistic_layer_3d_test.cpp | 20 +++++------- tests/quasi_newton_method_test.cpp | 21 ++++++++---- tests/recurrent_layer_test.cpp | 42 +++++++----------------- tests/response_optimization_test.cpp | 47 +++++++++++++++++---------- tests/tensors_test.cpp | 39 ++++++---------------- tests/tests.vcxproj | 2 -- 13 files changed, 99 insertions(+), 170 deletions(-) delete mode 100644 tests/inputs_selection_test.cpp delete mode 100644 tests/neurons_selection_test.cpp diff --git a/opennn/quasi_newton_method.cpp b/opennn/quasi_newton_method.cpp index 5d151d2bd..a64472de8 100644 --- a/opennn/quasi_newton_method.cpp +++ b/opennn/quasi_newton_method.cpp @@ -177,7 +177,7 @@ void QuasiNewtonMethod::set_maximum_time(const type& new_maximum_time) } -void QuasiNewtonMethod::calculate_inverse_hessian_approximation(QuasiNewtonMehtodData& optimization_data) const +void QuasiNewtonMethod::calculate_inverse_hessian_approximation(QuasiNewtonMethodData& optimization_data) const { switch(inverse_hessian_approximation_method) { @@ -195,7 +195,7 @@ void QuasiNewtonMethod::calculate_inverse_hessian_approximation(QuasiNewtonMehto } -void QuasiNewtonMethod::calculate_DFP_inverse_hessian(QuasiNewtonMehtodData& optimization_data) const +void QuasiNewtonMethod::calculate_DFP_inverse_hessian(QuasiNewtonMethodData& optimization_data) const { const Tensor& parameters_difference = optimization_data.parameters_difference; const Tensor& gradient_difference = optimization_data.gradient_difference; @@ -234,7 +234,7 @@ void QuasiNewtonMethod::calculate_DFP_inverse_hessian(QuasiNewtonMehtodData& opt } -void QuasiNewtonMethod::calculate_BFGS_inverse_hessian(QuasiNewtonMehtodData& optimization_data) const +void QuasiNewtonMethod::calculate_BFGS_inverse_hessian(QuasiNewtonMethodData& optimization_data) const { const Tensor& parameters_difference = optimization_data.parameters_difference; const Tensor& gradient_difference = optimization_data.gradient_difference; @@ -283,7 +283,7 @@ void QuasiNewtonMethod::update_parameters( const Batch& batch, ForwardPropagation& forward_propagation, BackPropagation& back_propagation, - QuasiNewtonMehtodData& optimization_data) const + QuasiNewtonMethodData& optimization_data) const { Tensor& parameters = back_propagation.parameters; const Tensor& gradient = back_propagation.gradient; @@ -461,7 +461,7 @@ TrainingResults QuasiNewtonMethod::perform_training() time(&beginning_time); type elapsed_time; - QuasiNewtonMehtodData optimization_data(this); + QuasiNewtonMethodData optimization_data(this); // Main loop @@ -652,7 +652,7 @@ void QuasiNewtonMethod::from_XML(const XMLDocument& document) } -void QuasiNewtonMehtodData::set(QuasiNewtonMethod* new_quasi_newton_method) +void QuasiNewtonMethodData::set(QuasiNewtonMethod* new_quasi_newton_method) { quasi_newton_method = new_quasi_newton_method; diff --git a/opennn/quasi_newton_method.h b/opennn/quasi_newton_method.h index d929733ab..af88319f0 100644 --- a/opennn/quasi_newton_method.h +++ b/opennn/quasi_newton_method.h @@ -15,7 +15,7 @@ namespace opennn { -struct QuasiNewtonMehtodData; +struct QuasiNewtonMethodData; class QuasiNewtonMethod : public OptimizationAlgorithm { @@ -67,13 +67,13 @@ class QuasiNewtonMethod : public OptimizationAlgorithm // Training - void calculate_DFP_inverse_hessian(QuasiNewtonMehtodData&) const; + void calculate_DFP_inverse_hessian(QuasiNewtonMethodData&) const; - void calculate_BFGS_inverse_hessian(QuasiNewtonMehtodData&) const; + void calculate_BFGS_inverse_hessian(QuasiNewtonMethodData&) const; - void calculate_inverse_hessian_approximation(QuasiNewtonMehtodData&) const; + void calculate_inverse_hessian_approximation(QuasiNewtonMethodData&) const; - void update_parameters(const Batch& , ForwardPropagation& , BackPropagation& , QuasiNewtonMehtodData&) const; + void update_parameters(const Batch& , ForwardPropagation& , BackPropagation& , QuasiNewtonMethodData&) const; TrainingResults perform_training() override; @@ -109,10 +109,10 @@ class QuasiNewtonMethod : public OptimizationAlgorithm }; -struct QuasiNewtonMehtodData : public OptimizationAlgorithmData +struct QuasiNewtonMethodData : public OptimizationAlgorithmData { - QuasiNewtonMehtodData(QuasiNewtonMethod* new_quasi_newton_method = nullptr) + QuasiNewtonMethodData(QuasiNewtonMethod* new_quasi_newton_method = nullptr) { set(new_quasi_newton_method); } diff --git a/tests/conjugate_gradient_test.cpp b/tests/conjugate_gradient_test.cpp index 5b332b478..4156c7704 100644 --- a/tests/conjugate_gradient_test.cpp +++ b/tests/conjugate_gradient_test.cpp @@ -5,7 +5,7 @@ TEST(ConjugateGradientTest, DefaultConstructor) { - ConjugateGradient conjugate_gradient_1; + ConjugateGradient conjugate_gradient; // EXPECT_EQ(!conjugate_gradient_1.has_loss_index()); // ConjugateGradient conjugate_gradient_2(&mean_squared_error); @@ -15,8 +15,7 @@ TEST(ConjugateGradientTest, DefaultConstructor) TEST(ConjugateGradientTest, GeneralConstructor) { - ConjugateGradient conjugate_gradient_1; - // EXPECT_EQ(!conjugate_gradient_1.has_loss_index()); + ConjugateGradient conjugate_gradient; // ConjugateGradient conjugate_gradient_2(&mean_squared_error); // EXPECT_EQ(conjugate_gradient_2.has_loss_index()); diff --git a/tests/cross_entropy_error_test.cpp b/tests/cross_entropy_error_test.cpp index d9c5ccd11..1642a3dcc 100644 --- a/tests/cross_entropy_error_test.cpp +++ b/tests/cross_entropy_error_test.cpp @@ -8,18 +8,18 @@ TEST(CrossEntropyErrorTest, DefaultConstructor) { -/* + CrossEntropyError cross_entropy_error; EXPECT_TRUE(!cross_entropy_error.has_data_set()); EXPECT_TRUE(!cross_entropy_error.has_neural_network()); -*/ + } TEST(CrossEntropyErrorTest, BackPropagateEmpty) { -/* + DataSet data_set; Batch batch; @@ -32,19 +32,20 @@ TEST(CrossEntropyErrorTest, BackPropagateEmpty) BackPropagation back_propagation; cross_entropy_error.back_propagate(batch, forward_propagation, back_propagation); -*/ + } TEST(CrossEntropyErrorTest, BackPropagate) { -/* + const Index samples_number = get_random_index(1, 10); const Index inputs_number = get_random_index(1, 10); const Index targets_number = get_random_index(1, 10); const Index neurons_number = get_random_index(1, 10); DataSet data_set(samples_number, { inputs_number }, { targets_number }); +/* data_set.set_data_classification(); data_set.set(DataSet::SampleUse::Training); diff --git a/tests/inputs_selection_test.cpp b/tests/inputs_selection_test.cpp deleted file mode 100644 index 3d26baa03..000000000 --- a/tests/inputs_selection_test.cpp +++ /dev/null @@ -1,24 +0,0 @@ -#include "pch.h" - -#include "../opennn/inputs_selection.h" -#include "../opennn/growing_inputs.h" - - -TEST(InputsSelectionTest, DefaultConstructor) -{ -// InputsSelection inputs_selection; - -// EXPECT_EQ(!gi2.has_training_strategy()); - - -} - - -TEST(InputsSelectionTest, GeneralConstructor) -{ -// GrowingInputs gi1(&training_strategy); - -// EXPECT_EQ(gi1.has_training_strategy()); - - -} diff --git a/tests/model_selection_test.cpp b/tests/model_selection_test.cpp index e536d12f5..c2820f4df 100644 --- a/tests/model_selection_test.cpp +++ b/tests/model_selection_test.cpp @@ -22,14 +22,14 @@ TEST(ModelSelectionTest, GeneralConstructor) TEST(ModelSelectionTest, NeuronsSelection) { -/* - data_set.generate_sum_data(20, 2); - neural_network.set(NeuralNetwork::ModelType::Approximation, { 1 }, { 2 }, { 1 }); +// data_set.generate_sum_data(20, 2); - training_strategy.set_display(false); + NeuralNetwork neural_network(NeuralNetwork::ModelType::Approximation, { 1 }, { 2 }, { 1 }); +/* + //training_strategy.set_display(false); - model_selection.set_display(false); + //model_selection.set_display(false); GrowingNeurons* incremental_neurons = model_selection.get_growing_neurons(); diff --git a/tests/neurons_selection_test.cpp b/tests/neurons_selection_test.cpp deleted file mode 100644 index 3eed971f3..000000000 --- a/tests/neurons_selection_test.cpp +++ /dev/null @@ -1,22 +0,0 @@ -#include "pch.h" - -#include "../opennn/growing_neurons.h" - -/* -void NeuronsSelectionTest::test_constructor() -{ - // Test - - GrowingNeurons growing_neurons_1(&training_strategy); - - EXPECT_EQ(growing_neurons_1.has_training_strategy()); - - // Test - - GrowingNeurons growing_neurons_2; - - EXPECT_EQ(!growing_neurons_2.has_training_strategy()); -} - -} -*/ diff --git a/tests/probabilistic_layer_3d_test.cpp b/tests/probabilistic_layer_3d_test.cpp index 4c93ae529..532bdce9d 100644 --- a/tests/probabilistic_layer_3d_test.cpp +++ b/tests/probabilistic_layer_3d_test.cpp @@ -171,15 +171,13 @@ TEST(ProbabilisticLayer3DTest, Activations) */ } -/* -void ProbabilisticLayer3DTest::test_calculate_activations() +TEST(ProbabilisticLayer3DTest, SoftmaxDerivatives) { +/* + Tensor& activations; + Tensor& activation_derivatives; -} - -bool ProbabilisticLayer3DTest::check_softmax_derivatives(Tensor& activations, Tensor& activation_derivatives) const -{ for(Index i = 0; i < samples_number; i++) { for(Index j = 0; j < inputs_number; j++) @@ -202,13 +200,13 @@ bool ProbabilisticLayer3DTest::check_softmax_derivatives(Tensor& activa } } } - - return true; +*/ } -void ProbabilisticLayer3DTest::test_forward_propagate() +TEST(ProbabilisticLayer3DTest, ForwardPropagate) { +/* bool is_training = true; { @@ -295,7 +293,5 @@ void ProbabilisticLayer3DTest::test_forward_propagate() EXPECT_EQ(correct_outputs); } +*/ } - -} -*/ \ No newline at end of file diff --git a/tests/quasi_newton_method_test.cpp b/tests/quasi_newton_method_test.cpp index f16df6968..ab010c2f9 100644 --- a/tests/quasi_newton_method_test.cpp +++ b/tests/quasi_newton_method_test.cpp @@ -34,13 +34,16 @@ TEST(QuasiNewtonMethodTest, DFP) data_set.set_data_random(); NeuralNetwork neural_network(NeuralNetwork::ModelType::Approximation, { inputs_number }, {}, { outputs_number }); - neural_network.set_parameters_constant(type(1)); -/* - quasi_newton_method_data.set(&quasi_newton_method); - quasi_newton_method.calculate_DFP_inverse_hessian(quasi_newton_method_data); + MeanSquaredError mean_squared_error(&neural_network, &data_set); + + QuasiNewtonMethod quasi_newton_method(&mean_squared_error); + + QuasiNewtonMethodData quasi_newton_method_data(&quasi_newton_method); + quasi_newton_method.calculate_DFP_inverse_hessian(quasi_newton_method_data); +/* EXPECT_EQ(are_equal(quasi_newton_method_data.inverse_hessian, inverse_hessian, type(1e-4))); */ } @@ -55,11 +58,17 @@ TEST(QuasiNewtonMethodTest, BGFS) NeuralNetwork neural_network(NeuralNetwork::ModelType::Approximation, { inputs_number }, {}, { outputs_number }); neural_network.set_parameters_constant(type(1)); -/* + + MeanSquaredError mean_squared_error(&neural_network); + mean_squared_error.set_regularization_method(LossIndex::RegularizationMethod::L2); - quasi_newton_method.calculate_BFGS_inverse_hessian(quasi_newton_method_data); + QuasiNewtonMethod quasi_newton_method(&mean_squared_error); + + QuasiNewtonMethodData quasi_newton_method_data(&quasi_newton_method); + quasi_newton_method.calculate_BFGS_inverse_hessian(quasi_newton_method_data); +/* EXPECT_EQ(are_equal(BFGS_inverse_hessian, inverse_hessian, type(1e-4))); */ } diff --git a/tests/recurrent_layer_test.cpp b/tests/recurrent_layer_test.cpp index 9ba798c02..ef6554cd9 100644 --- a/tests/recurrent_layer_test.cpp +++ b/tests/recurrent_layer_test.cpp @@ -1,44 +1,24 @@ #include "pch.h" + #include "../opennn/recurrent_layer.h" TEST(RecurrentLayerTest, DefaultConstructor) { RecurrentLayer recurrent_layer; -// EXPECT_EQ(quasi_newton_method.has_loss_index(), false); + EXPECT_EQ(recurrent_layer.get_inputs_number(), 0); } TEST(RecurrentLayerTest, GeneralConstructor) { + const Index inputs_number = get_random_index(1, 10); + const Index neurons_number = get_random_index(1, 10); + const Index time_steps = get_random_index(1, 10); - RecurrentLayer recurrent_layer; - - Index inputs_number; - Index neurons_number; - Index time_steps; -/* - // Test + RecurrentLayer recurrent_layer({ inputs_number }, { neurons_number }); - inputs_number = 1; - neurons_number = 1; - time_steps = 1; - - recurrent_layer.set(inputs_number, neurons_number, time_steps); - - EXPECT_EQ(recurrent_layer.get_parameters_number() == 3); - - // Test - - inputs_number = 2; - neurons_number = 3; - - recurrent_layer.set(inputs_number, neurons_number, time_steps); - - EXPECT_EQ(recurrent_layer.get_parameters_number() == 18); - - EXPECT_EQ(quasi_newton_method.has_loss_index(), true); -*/ +// EXPECT_EQ(recurrent_layer.get_parameters_number(), 3); } @@ -59,8 +39,8 @@ TEST(RecurrentLayerTest, ForwardPropagate) Tensor new_biases; pair input_pairs; - /* - recurrent_layer.set(inputs_number, neurons_number, time_steps); + + RecurrentLayer recurrent_layer({ inputs_number }, { neurons_number }); recurrent_layer.set_activation_function(RecurrentLayer::ActivationFunction::HyperbolicTangent); @@ -69,8 +49,8 @@ TEST(RecurrentLayerTest, ForwardPropagate) recurrent_layer.set_parameters_constant(type(1)); inputs.setConstant(type(1)); - recurrent_layer_forward_propagation.set(samples_number, &recurrent_layer); - + RecurrentLayerForwardPropagation recurrent_layer_forward_propagation(samples_number, &recurrent_layer); +/* Tensor input_data(1); input_data(0) = inputs.data(); diff --git a/tests/response_optimization_test.cpp b/tests/response_optimization_test.cpp index d4d0ec895..143f479de 100644 --- a/tests/response_optimization_test.cpp +++ b/tests/response_optimization_test.cpp @@ -1,41 +1,51 @@ #include "pch.h" -/* -void ResponseOptimizationTest::test_constructor() +#include "../opennn/response_optimization.h" + +TEST(ResponseOptimization, DefaultConstructor) +{ + ResponseOptimization response_optimization; + +// EXPECT_EQ(response_optimization.has_, 0); +} + + +TEST(ResponseOptimization, GeneralConstructor) { - ResponseOptimization response_optimization_1(&neural_network); + NeuralNetwork neural_network; - EXPECT_EQ(response_optimization_1.get_inputs_conditions()(0) == ResponseOptimization::Condition::None); - EXPECT_EQ(response_optimization_1.get_outputs_conditions()(0) == ResponseOptimization::Condition::None); + // EXPECT_EQ(response_optimization.has_, 0); - ResponseOptimization response_optimization_2(&neural_network_2); - EXPECT_EQ(response_optimization_2.get_inputs_conditions()(0) == ResponseOptimization::Condition::None); - EXPECT_EQ(response_optimization_2.get_inputs_conditions()(1) == ResponseOptimization::Condition::None); - EXPECT_EQ(response_optimization_2.get_outputs_conditions()(0) == ResponseOptimization::Condition::None); - EXPECT_EQ(response_optimization_2.get_outputs_conditions()(1) == ResponseOptimization::Condition::None); + ResponseOptimization response_optimization(&neural_network); - ResponseOptimization response_optimization_3; +// EXPECT_EQ(response_optimization.get_inputs_conditions()(0), ResponseOptimization::Condition::None); +// EXPECT_EQ(response_optimization.get_outputs_conditions()(0), ResponseOptimization::Condition::None); } -void ResponseOptimizationTest::test_calculate_inputs() +TEST(ResponseOptimization, Inputs) { - ResponseOptimization response_optimization(&neural_network, &data_set); + NeuralNetwork neural_network; + DataSet data_set; - Tensor inputs = response_optimization.calculate_inputs(); + ResponseOptimization response_optimization(&neural_network, &data_set); - EXPECT_EQ(inputs.dimension(0) == response_optimization.get_evaluations_number()); - EXPECT_EQ(inputs.dimension(1) == neural_network.get_inputs_number()); + Tensor inputs = response_optimization.calculate_inputs(); +/* + EXPECT_EQ(inputs.dimension(0), response_optimization.get_evaluations_number()); + EXPECT_EQ(inputs.dimension(1), neural_network.get_inputs_number()); EXPECT_EQ(inputs(0) <= response_optimization.get_inputs_maximums()(1)); EXPECT_EQ(inputs(1) <= response_optimization.get_inputs_maximums()(1)); EXPECT_EQ(inputs(0) >= response_optimization.get_inputs_minimums()(1)); EXPECT_EQ(inputs(1) >= response_optimization.get_inputs_minimums()(1)); +*/ } -void ResponseOptimizationTest::test_perform_optimization() +TEST(ResponseOptimization, Optimize) { +/* ResponseOptimization response_optimization(&neural_network,&data_set); // Empty results @@ -136,6 +146,7 @@ void ResponseOptimizationTest::test_perform_optimization() EXPECT_EQ(results->optimal_variables(2) <= 3.0); EXPECT_EQ(results->optimal_variables(3) >= type(-1)); EXPECT_EQ(results->optimal_variables(3) <= 0.0); + */ } -*/ + diff --git a/tests/tensors_test.cpp b/tests/tensors_test.cpp index 22a74433d..922b05788 100644 --- a/tests/tensors_test.cpp +++ b/tests/tensors_test.cpp @@ -2,34 +2,34 @@ #include "../opennn/tensors.h" -/* - -void TensorsTest::test_fill_tensor_data() +TEST(Tensors, Fill) { +/* Tensor submatrix; - Tensor rows_indices; - Tensor columns_indices; + vector rows_indices; + vector columns_indices; // Test - matrix.resize(1, 1); + Tensor matrix(1, 1); matrix.setConstant(type(3.1416)); - rows_indices.resize(1); - rows_indices.setZero(); + rows_indices.resize(1, 0); - columns_indices.resize(1); - columns_indices.setZero(); + columns_indices.resize(1, 0); submatrix.resize(1, 1); fill_tensor_data(matrix, rows_indices, columns_indices, submatrix.data()); EXPECT_EQ(is_equal(submatrix, type(3.1416))); +*/ } +/* + void TensorsTest::test_calculate_rank() { Tensor rank_greater; @@ -43,23 +43,4 @@ void TensorsTest::test_calculate_rank() rank_greater = calculate_rank_greater(vector); rank_less = calculate_rank_less(vector); } - -} - -// OpenNN: Open Neural Networks Library. -// Copyright (C) 2005-2024 Artificial Intelligence Techniques, SL. -// -// This library is free software; you can redistribute it and/or -// modify it under the s of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 2.1 of the License, or any later version. -// -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -// Lesser General Public License for more details. - -// You should have received a copy of the GNU Lesser General Public -// License along with this library; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ \ No newline at end of file diff --git a/tests/tests.vcxproj b/tests/tests.vcxproj index 764ca825f..62d6b9e4c 100644 --- a/tests/tests.vcxproj +++ b/tests/tests.vcxproj @@ -118,7 +118,6 @@ - @@ -126,7 +125,6 @@ -