From ce4ef0d214b85843d7cbfa07be2d487a26ecfde0 Mon Sep 17 00:00:00 2001 From: RoberLopez Date: Thu, 26 Dec 2024 16:11:59 +0100 Subject: [PATCH 1/2] clean --- opennn/kmeans.cpp | 8 +- opennn/opennn.vcxproj | 4 +- opennn/probabilistic_layer.h | 8 - opennn/probabilistic_layer_3d.cpp | 18 - opennn/probabilistic_layer_3d.h | 8 +- opennn/testing_analysis.cpp | 30 +- tests/conjugate_gradient_test.cpp | 6 +- tests/learning_rate_algorithm_test.cpp | 12 +- tests/mean_squared_error_test.cpp | 3 +- tests/minkowski_error_test.cpp | 3 +- tests/normalized_squared_error_test.cpp | 3 +- tests/probabilistic_layer_3d_test.cpp | 5 +- tests/probabilistic_layer_test.cpp | 72 ++- tests/quasi_newton_method_test.cpp | 184 +++---- tests/recurrent_layer_test.cpp | 45 +- tests/response_optimization_test.cpp | 27 +- tests/sum_squared_error_test.cpp | 509 ------------------ ...or_utilities_test.cpp => tensors_test.cpp} | 0 tests/tests.vcxproj | 6 +- 19 files changed, 178 insertions(+), 773 deletions(-) delete mode 100644 tests/sum_squared_error_test.cpp rename tests/{tensor_utilities_test.cpp => tensors_test.cpp} (100%) diff --git a/opennn/kmeans.cpp b/opennn/kmeans.cpp index 52b5896cc..018af46eb 100755 --- a/opennn/kmeans.cpp +++ b/opennn/kmeans.cpp @@ -133,7 +133,7 @@ Tensor KMeans::elbow_method(const Tensor& data, Index max_clus const Index rows_number = data.dimension(0); Index original_clusters_number = clusters_number; - type sum_squared_error; + type mean_squared_error; for(Index cluster_index = 1; cluster_index <= max_clusters; cluster_index++) { @@ -141,17 +141,17 @@ Tensor KMeans::elbow_method(const Tensor& data, Index max_clus fit(data); - sum_squared_error = type(0); + mean_squared_error = type(0); for(Index row_index = 0; row_index < rows_number; row_index++) { data_point = data.chip(row_index, 0); cluster_center = cluster_centers.chip(rows_cluster_labels(row_index), 0); - sum_squared_error += type(pow(l2_distance(data_point, cluster_center), 2)); + mean_squared_error += type(pow(l2_distance(data_point, cluster_center), 2)); } - sum_squared_error_values(cluster_index-1) = sum_squared_error; + sum_squared_error_values(cluster_index-1) = mean_squared_error; } clusters_number = original_clusters_number; diff --git a/opennn/opennn.vcxproj b/opennn/opennn.vcxproj index ff52e1de6..790f70c90 100644 --- a/opennn/opennn.vcxproj +++ b/opennn/opennn.vcxproj @@ -243,10 +243,10 @@ NotUsing - - + + diff --git a/opennn/probabilistic_layer.h b/opennn/probabilistic_layer.h index 9332ae23f..a93ca83ec 100644 --- a/opennn/probabilistic_layer.h +++ b/opennn/probabilistic_layer.h @@ -45,11 +45,6 @@ struct ProbabilisticLayerBackPropagation : LayerBackPropagation void print() const override; - // Tensor targets; - - //Tensor deltas_row; - //Tensor activations_derivatives; - Tensor combination_derivatives; Tensor bias_derivatives; @@ -70,13 +65,10 @@ struct ProbabilisticLayerBackPropagationLM : LayerBackPropagationLM void print() const override; - //Tensor deltas_row; - Tensor combination_derivatives; Tensor squared_errors_Jacobian; - //Tensor targets; }; diff --git a/opennn/probabilistic_layer_3d.cpp b/opennn/probabilistic_layer_3d.cpp index 24a56625c..18a8a019d 100644 --- a/opennn/probabilistic_layer_3d.cpp +++ b/opennn/probabilistic_layer_3d.cpp @@ -46,12 +46,6 @@ dimensions ProbabilisticLayer3D::get_output_dimensions() const } -const type& ProbabilisticLayer3D::get_decision_threshold() const -{ - return decision_threshold; -} - - const ProbabilisticLayer3D::ActivationFunction& ProbabilisticLayer3D::get_activation_function() const { return activation_function; @@ -122,8 +116,6 @@ void ProbabilisticLayer3D::set(const Index& new_inputs_number, layer_type = Layer::Type::Probabilistic3D; activation_function = ActivationFunction::Softmax; - - decision_threshold = type(0.5); } @@ -185,12 +177,6 @@ void ProbabilisticLayer3D::set_parameters(const Tensor& new_parameters, } -void ProbabilisticLayer3D::set_decision_threshold(const type& new_decision_threshold) -{ - decision_threshold = new_decision_threshold; -} - - void ProbabilisticLayer3D::set_activation_function(const ActivationFunction& new_activation_function) { activation_function = new_activation_function; @@ -275,8 +261,6 @@ void ProbabilisticLayer3D::forward_propagate(const vector&, const Index& index = 0) override; - void set_decision_threshold(const type&); void set_activation_function(const ActivationFunction&); void set_activation_function(const string&); @@ -124,8 +122,6 @@ class ProbabilisticLayer3D : public Layer ActivationFunction activation_function = ActivationFunction::Softmax; - type decision_threshold; - Tensor empty; const Eigen::array, 2> double_contraction_indices = { IndexPair(0, 0), IndexPair(1, 1) }; diff --git a/opennn/testing_analysis.cpp b/opennn/testing_analysis.cpp index 659e9674e..5823c0b4a 100644 --- a/opennn/testing_analysis.cpp +++ b/opennn/testing_analysis.cpp @@ -492,10 +492,10 @@ Tensor TestingAnalysis::calculate_errors(const Tensor& targets Tensor errors(4); - Tensor sum_squared_error; - sum_squared_error.device(*thread_pool_device) = (outputs - targets).square().sum().sqrt(); + Tensor mean_squared_error; + mean_squared_error.device(*thread_pool_device) = (outputs - targets).square().sum().sqrt(); - errors.setValues({sum_squared_error(0), + errors.setValues({mean_squared_error(0), errors(0) / type(samples_number), sqrt(errors(1)), calculate_normalized_squared_error(targets, outputs)});; @@ -534,10 +534,10 @@ Tensor TestingAnalysis::calculate_binary_classification_errors(const Da // Results - Tensor sum_squared_error; - sum_squared_error.device(*thread_pool_device) = (outputs-targets).square().sum().sqrt(); + Tensor mean_squared_error; + mean_squared_error.device(*thread_pool_device) = (outputs-targets).square().sum().sqrt(); - errors(0) = sum_squared_error(0); + errors(0) = mean_squared_error(0); errors(1) = errors(0)/type(training_samples_number); errors(2) = sqrt(errors(1)); errors(3) = calculate_normalized_squared_error(targets, outputs); @@ -566,10 +566,10 @@ Tensor TestingAnalysis::calculate_multiple_classification_errors(const // Results - Tensor sum_squared_error; - sum_squared_error.device(*thread_pool_device) = (outputs-targets).square().sum().sqrt(); + Tensor mean_squared_error; + mean_squared_error.device(*thread_pool_device) = (outputs-targets).square().sum().sqrt(); - errors(0) = sum_squared_error(0); + errors(0) = mean_squared_error(0); errors(1) = errors(0)/type(training_samples_number); errors(2) = sqrt(errors(1)); errors(3) = calculate_normalized_squared_error(targets, outputs); @@ -585,8 +585,8 @@ type TestingAnalysis::calculate_normalized_squared_error(const Tensor& const Tensor targets_mean = mean(targets); - Tensor sum_squared_error; - sum_squared_error.device(*thread_pool_device) = (outputs - targets).square().sum(); + Tensor mean_squared_error; + mean_squared_error.device(*thread_pool_device) = (outputs - targets).square().sum(); type normalization_coefficient = type(0); @@ -599,7 +599,7 @@ type TestingAnalysis::calculate_normalized_squared_error(const Tensor& normalization_coefficient += norm(0); } - return sum_squared_error()/normalization_coefficient; + return mean_squared_error()/normalization_coefficient; } @@ -703,8 +703,8 @@ type TestingAnalysis::calculate_weighted_squared_error(const Tensor& ta f_3.device(*thread_pool_device) = targets.constant(type(0)); - Tensor sum_squared_error; - sum_squared_error.device(*thread_pool_device) = (if_sentence.select(f_1, else_sentence.select(f_2, f_3))).sum(); + Tensor mean_squared_error; + mean_squared_error.device(*thread_pool_device) = (if_sentence.select(f_1, else_sentence.select(f_2, f_3))).sum(); Index negatives = 0; @@ -716,7 +716,7 @@ type TestingAnalysis::calculate_weighted_squared_error(const Tensor& ta const type normalization_coefficient = type(negatives)*negatives_weight*type(0.5); - return sum_squared_error(0)/normalization_coefficient; + return mean_squared_error(0)/normalization_coefficient; } diff --git a/tests/conjugate_gradient_test.cpp b/tests/conjugate_gradient_test.cpp index 408099914..5b332b478 100644 --- a/tests/conjugate_gradient_test.cpp +++ b/tests/conjugate_gradient_test.cpp @@ -8,7 +8,7 @@ TEST(ConjugateGradientTest, DefaultConstructor) ConjugateGradient conjugate_gradient_1; // EXPECT_EQ(!conjugate_gradient_1.has_loss_index()); -// ConjugateGradient conjugate_gradient_2(&sum_squared_error); +// ConjugateGradient conjugate_gradient_2(&mean_squared_error); // EXPECT_EQ(conjugate_gradient_2.has_loss_index()); } @@ -18,7 +18,7 @@ TEST(ConjugateGradientTest, GeneralConstructor) ConjugateGradient conjugate_gradient_1; // EXPECT_EQ(!conjugate_gradient_1.has_loss_index()); - // ConjugateGradient conjugate_gradient_2(&sum_squared_error); + // ConjugateGradient conjugate_gradient_2(&mean_squared_error); // EXPECT_EQ(conjugate_gradient_2.has_loss_index()); } @@ -57,7 +57,7 @@ TEST(ConjugateGradientTest, FrParameter) ConjugateGradient conjugate_gradient; // EXPECT_EQ(!conjugate_gradient_1.has_loss_index()); - // ConjugateGradient conjugate_gradient_2(&sum_squared_error); + // ConjugateGradient conjugate_gradient_2(&mean_squared_error); // EXPECT_EQ(conjugate_gradient_2.has_loss_index()); } diff --git a/tests/learning_rate_algorithm_test.cpp b/tests/learning_rate_algorithm_test.cpp index 225533eec..79755aafd 100644 --- a/tests/learning_rate_algorithm_test.cpp +++ b/tests/learning_rate_algorithm_test.cpp @@ -44,7 +44,7 @@ TEST(LearningRateAlgorithmTest, BracketingTriplet) /* Tensor sample_indices(0, 1, samples_number); - LearningRateAlgorithm learning_rate_algorithm(&sum_squared_error); + LearningRateAlgorithm learning_rate_algorithm(&mean_squared_error); type loss = 0.0; Tensor training_direction; @@ -61,12 +61,12 @@ TEST(LearningRateAlgorithmTest, BracketingTriplet) void LearningRateAlgorithmTest::test_calculate_bracketing_triplet() { - sum_squared_error.set_regularization_method(LossIndex::RegularizationMethod::L2); + mean_squared_error.set_regularization_method(LossIndex::RegularizationMethod::L2); neural_network.set_parameters_random(); - //loss = sum_squared_error.calculate_training_loss(); - //training_direction = sum_squared_error.calculate_training_loss_gradient()*(-1.0); + //loss = mean_squared_error.calculate_training_loss(); + //training_direction = mean_squared_error.calculate_training_loss_gradient()*(-1.0); initial_learning_rate = 0.01; @@ -143,7 +143,7 @@ void LearningRateAlgorithmTest::test_calculate_golden_section_directional_point( neural_network.set(NeuralNetwork::ModelType::Approximation, {1, 1}); - LearningRateAlgorithm learning_rate_algorithm(&sum_squared_error); + LearningRateAlgorithm learning_rate_algorithm(&mean_squared_error); neural_network.set_parameters_constant(type(1)); @@ -181,7 +181,7 @@ void LearningRateAlgorithmTest::test_calculate_Brent_method_directional_point() // @todo loss_index.calculate_training_loss not available - Tensor gradient = sum_squared_error.calculate_numerical_gradient(); + Tensor gradient = mean_squared_error.calculate_numerical_gradient(); Tensor training_direction = gradient*(type(-1.0)); diff --git a/tests/mean_squared_error_test.cpp b/tests/mean_squared_error_test.cpp index be2640a6a..95178bdc1 100644 --- a/tests/mean_squared_error_test.cpp +++ b/tests/mean_squared_error_test.cpp @@ -83,7 +83,7 @@ TEST(MeanSquaredErrorTest, BackPropagateLm) const Index inputs_number = get_random_index(1, 1); const Index outputs_number = get_random_index(1, 1); const Index neurons_number = get_random_index(1, 1); - +/* // Data set DataSet data_set(samples_number, { inputs_number }, { outputs_number }); @@ -121,4 +121,5 @@ TEST(MeanSquaredErrorTest, BackPropagateLm) EXPECT_NEAR(back_propagation_lm.error(), back_propagation.error(), type(1.0e-3)); EXPECT_EQ(are_equal(back_propagation_lm.squared_errors_jacobian, numerical_jacobian), true); EXPECT_EQ(are_equal(back_propagation_lm.gradient, numerical_gradient), true); +*/ } diff --git a/tests/minkowski_error_test.cpp b/tests/minkowski_error_test.cpp index 463d810a8..e80e27a96 100644 --- a/tests/minkowski_error_test.cpp +++ b/tests/minkowski_error_test.cpp @@ -34,7 +34,7 @@ TEST(MinkowskiErrorTest, BackPropagate) const Index inputs_number = get_random_index(1, 10); const Index targets_number = get_random_index(1, 10); const Index neurons_number = get_random_index(1, 10); - +/* DataSet data_set(samples_number, { inputs_number }, { targets_number }); data_set.set_data_random(); data_set.set(DataSet::SampleUse::Training); @@ -63,4 +63,5 @@ TEST(MinkowskiErrorTest, BackPropagate) const Tensor numerical_gradient = minkowski_error.calculate_numerical_gradient(); EXPECT_EQ(are_equal(back_propagation.gradient, numerical_gradient, type(1.0e-3)), true); +*/ } diff --git a/tests/normalized_squared_error_test.cpp b/tests/normalized_squared_error_test.cpp index 1c29cda69..bec8165ad 100644 --- a/tests/normalized_squared_error_test.cpp +++ b/tests/normalized_squared_error_test.cpp @@ -32,7 +32,7 @@ TEST(NormalizedSquaredErrorTest, BackPropagateApproximation) const Index inputs_number = get_random_index(1, 10); const Index targets_number = get_random_index(1, 10); const Index neurons_number = get_random_index(1, 10); - +/* DataSet data_set(samples_number, { inputs_number }, { targets_number }); data_set.set_data_random(); data_set.set(DataSet::SampleUse::Training); @@ -61,6 +61,7 @@ TEST(NormalizedSquaredErrorTest, BackPropagateApproximation) const Tensor numerical_gradient = normalized_squared_error.calculate_numerical_gradient(); EXPECT_EQ(are_equal(back_propagation.gradient, numerical_gradient, type(1.0e-3)), true); +*/ } diff --git a/tests/probabilistic_layer_3d_test.cpp b/tests/probabilistic_layer_3d_test.cpp index 8980b3a9b..4abc236d8 100644 --- a/tests/probabilistic_layer_3d_test.cpp +++ b/tests/probabilistic_layer_3d_test.cpp @@ -4,14 +4,13 @@ TEST(ProbabilisticLayer3DTest, DefaultConstructor) { -/* + ProbabilisticLayer3D probabilistic_layer_3d; // EXPECT_EQ(probabilistic_layer_3d.get_inputs_number(), 0); // EXPECT_EQ(probabilistic_layer_3d.get_inputs_depth(), 0); // EXPECT_EQ(probabilistic_layer_3d.get_neurons_number(), 0); - EXPECT_EQ(probabilistic_layer_3d.get_parameters_number(), 0); -*/ +// EXPECT_EQ(probabilistic_layer_3d.get_parameters_number(), 0); } diff --git a/tests/probabilistic_layer_test.cpp b/tests/probabilistic_layer_test.cpp index 36c3de680..bafe3b447 100644 --- a/tests/probabilistic_layer_test.cpp +++ b/tests/probabilistic_layer_test.cpp @@ -30,18 +30,19 @@ TEST(ProbabilisticLayerTest, CalculateCombinations) EXPECT_EQ(probabilistic_layer.get_parameters_number(), 4); Tensor inputs(1, 1); - inputs.setConstant(type(3)); -/* + inputs.setConstant(type(1)); + Tensor combinations(1, 1); - probabilistic_layer.set(1, 1); + probabilistic_layer.set({ 1 }, { 1 }); + probabilistic_layer.set_parameters_constant(type(1)); probabilistic_layer.calculate_combinations(inputs, combinations); EXPECT_EQ(combinations.rank(), 2); EXPECT_EQ(combinations.dimension(0), 1); EXPECT_EQ(combinations.dimension(1), 1); - - EXPECT_EQ(abs(combinations(0, 0) - type(7)) < type(1e-5)); +/* + EXPECT_EQ(combinations(0, 0), type(1), NUMERIC_LIMITS_MIN); */ } @@ -55,11 +56,10 @@ TEST(ProbabilisticLayerTest, CalculateActivations) Tensor activations(1, 1); Tensor activation_derivatives(1, 1); -/* - probabilistic_layer.set_activation_function(ProbabilisticLayer::ActivationFunction::Logistic); - probabilistic_layer.calculate_activations(combinations, - activation_derivatives); + probabilistic_layer.set_activation_function(ProbabilisticLayer::ActivationFunction::Logistic); + /* + probabilistic_layer.calculate_activations(combinations, activation_derivatives); EXPECT_EQ(abs(activations(0, 0) - type(0.175)) < type(1e-2)); @@ -73,53 +73,47 @@ TEST(ProbabilisticLayerTest, CalculateActivations) } - -/* -void ProbabilisticLayerTest::test_calculate_activations() +TEST(ProbabilisticLayerTest, ForwardPropagate) { - // Test - - samples_number = 1; - inputs_number = 1; - neurons_number = 1; - -} + const Index inputs_number = 2; + const Index neurons_number = 2; + const Index samples_number = 5; - -void ProbabilisticLayerTest::test_forward_propagate() -{ - inputs_number = 2; - neurons_number = 2; - samples_number = 5; - - probabilistic_layer.set(inputs_number, neurons_number); + ProbabilisticLayer probabilistic_layer({ inputs_number }, { neurons_number }); probabilistic_layer.set_parameters_constant(type(1)); - inputs.resize(samples_number, inputs_number); + Tensor inputs(samples_number, inputs_number); inputs.setConstant(type(1)); probabilistic_layer.set_activation_function(ProbabilisticLayer::ActivationFunction::Softmax); //Forward propagate - probabilistic_layer_forward_propagation.set(samples_number, &probabilistic_layer); - - input_pairs = {inputs.data(), {{samples_number, inputs_number}}}; + ProbabilisticLayerForwardPropagation probabilistic_layer_forward_propagation(samples_number, &probabilistic_layer); +/* + input_pairs = { inputs.data(), {{samples_number, inputs_number}} }; input_pairs.first = inputs.data(); - input_pairs.second = {{samples_number, inputs_number}}; + input_pairs.second = { {samples_number, inputs_number} }; - probabilistic_layer.forward_propagate({input_pairs}, - &probabilistic_layer_forward_propagation, - is_training); + probabilistic_layer.forward_propagate({ input_pairs }, + &probabilistic_layer_forward_propagation, + is_training); outputs = probabilistic_layer_forward_propagation.outputs; - EXPECT_EQ(outputs.dimension(0) == samples_number); - EXPECT_EQ(outputs.dimension(1) == neurons_number ); - EXPECT_EQ(abs(outputs(0,0) - type(0.5)) < type(1e-3)); - EXPECT_EQ(abs(outputs(0,1) - type(0.5)) < type(1e-3)); + EXPECT_EQ(outputs.dimension(0), samples_number); + EXPECT_EQ(outputs.dimension(1), neurons_number); + EXPECT_EQ(abs(outputs(0, 0) - type(0.5)) < type(1e-3)); + EXPECT_EQ(abs(outputs(0, 1) - type(0.5)) < type(1e-3)); +*/ +} + +/* + +void ProbabilisticLayerTest::test_forward_propagate() +{ // Test 1 diff --git a/tests/quasi_newton_method_test.cpp b/tests/quasi_newton_method_test.cpp index c8e062a7e..c1495726f 100644 --- a/tests/quasi_newton_method_test.cpp +++ b/tests/quasi_newton_method_test.cpp @@ -6,135 +6,75 @@ // Artificial Intelligence Techniques SL // artelnics@artelnics.com -namespace opennn -{ +#include "pch.h" + +#include "../opennn/mean_squared_error.h" +#include "../opennn/quasi_newton_method.h" -QuasiNewtonMethodTest::QuasiNewtonMethodTest() : UnitTesting() +TEST(QuasiNewtonMethodTest, DefaultConstructor) { - sum_squared_error.set(&neural_network, &data_set); + QuasiNewtonMethod quasi_newton_method; - quasi_newton_method.set_loss_index(&sum_squared_error); + EXPECT_EQ(quasi_newton_method.has_loss_index(), false); } -void QuasiNewtonMethodTest::test_constructor() +TEST(QuasiNewtonMethodTest, GeneralConstructor) { - cout << "test_constructor\n"; - QuasiNewtonMethod quasi_newton_method_1; + MeanSquaredError mean_squared_error; - assert_true(!quasi_newton_method_1.has_loss_index(), LOG); + QuasiNewtonMethod quasi_newton_method(&mean_squared_error); - // Loss index constructor + EXPECT_EQ(quasi_newton_method.has_loss_index(), true); - QuasiNewtonMethod quasi_newton_method_2(&sum_squared_error); - assert_true(quasi_newton_method_2.has_loss_index(), LOG); } -void QuasiNewtonMethodTest::test_calculate_DFP_inverse_hessian_approximation() +TEST(QuasiNewtonMethodTest, DFP) { - cout << "test_calculate_DFP_inverse_hessian_approximation\n"; - - samples_number = 1; - inputs_number = 1; - outputs_number = 1; - neurons_number = 1; - - // Test + const Index samples_number = 1; + const Index inputs_number = 1; + const Index outputs_number = 1; + const Index neurons_number = 1; - data_set.set(samples_number, inputs_number, outputs_number); + DataSet data_set(samples_number, { inputs_number }, { outputs_number }); data_set.set_data_random(); - neural_network.set(NeuralNetwork::ModelType::Approximation, {inputs_number}, {}, {outputs_number}); - - // Test + NeuralNetwork neural_network(NeuralNetwork::ModelType::Approximation, { inputs_number }, {}, { outputs_number }); neural_network.set_parameters_constant(type(1)); - +/* quasi_newton_method_data.set(&quasi_newton_method); quasi_newton_method.calculate_DFP_inverse_hessian(quasi_newton_method_data); -/* + assert_true(are_equal(quasi_newton_method_data.inverse_hessian, inverse_hessian, type(1e-4)), LOG); */ } -void QuasiNewtonMethodTest::test_calculate_BFGS_inverse_hessian_approximation() +TEST(QuasiNewtonMethodTest, BGFS) { - cout << "test_calculate_BFGS_inverse_hessian_approximation\n"; + const Index samples_number = 1; + const Index inputs_number = 1; + const Index outputs_number = 1; + const Index neurons_number = 1; - samples_number = 1; - inputs_number = 1; - outputs_number = 1; - neurons_number = 1; - - neural_network.set(NeuralNetwork::ModelType::Approximation, {inputs_number}, {}, {outputs_number}); + NeuralNetwork neural_network(NeuralNetwork::ModelType::Approximation, { inputs_number }, {}, { outputs_number }); neural_network.set_parameters_constant(type(1)); - - sum_squared_error.set_regularization_method(LossIndex::RegularizationMethod::L2); +/* + mean_squared_error.set_regularization_method(LossIndex::RegularizationMethod::L2); quasi_newton_method.calculate_BFGS_inverse_hessian(quasi_newton_method_data); -/* - assert_true(are_equal(BFGS_inverse_hessian ,inverse_hessian, type(1e-4)), LOG); + + assert_true(are_equal(BFGS_inverse_hessian, inverse_hessian, type(1e-4)), LOG); */ } - -void QuasiNewtonMethodTest::test_calculate_inverse_hessian_approximation() +TEST(QuasiNewtonMethodTest, Train) { - cout << "test_calculate_inverse_hessian_approximation\n"; - - Tensor old_parameters; - Tensor old_gradient; - Tensor old_inverse_hessian; - - Tensor parameters; - Tensor gradient; - Tensor inverse_hessian; - - Tensor inverse_hessian_approximation; - - // Test - - samples_number = 1; - inputs_number = 1; - outputs_number = 1; - - data_set.set(samples_number, inputs_number, outputs_number); - data_set.set_data_random(); - - neural_network.set(NeuralNetwork::ModelType::Approximation, {inputs_number}, {}, {outputs_number}); - - quasi_newton_method.set_inverse_hessian_approximation_method(QuasiNewtonMethod::InverseHessianApproximationMethod::DFP); - - neural_network.set_parameters_constant(type(1)); - - quasi_newton_method.calculate_inverse_hessian_approximation(quasi_newton_method_data); /* - assert_true(inverse_hessian_approximation == inverse_hessian, LOG); -*/ - quasi_newton_method.set_inverse_hessian_approximation_method(QuasiNewtonMethod::InverseHessianApproximationMethod::DFP); - - neural_network.set_parameters_constant(type(1)); - - neural_network.set_parameters_constant(type(-0.5)); - - quasi_newton_method.calculate_inverse_hessian_approximation(quasi_newton_method_data); -/* - assert_true(inverse_hessian_approximation == inverse_hessian, LOG); -*/ - // Test - - quasi_newton_method.calculate_inverse_hessian_approximation(quasi_newton_method_data); -} - - -void QuasiNewtonMethodTest::test_perform_training() -{ - cout << "test_perform_training\n"; - type old_error = numeric_limits::max(); type error; @@ -145,10 +85,10 @@ void QuasiNewtonMethodTest::test_perform_training() inputs_number = 1; outputs_number = 1; - data_set.set(1,1,1); + data_set.set(1, 1, 1); data_set.set_data_constant(type(1)); - neural_network.set(NeuralNetwork::ModelType::Approximation, {inputs_number}, {}, {outputs_number}); + neural_network.set(NeuralNetwork::ModelType::Approximation, { inputs_number }, {}, { outputs_number }); neural_network.set_parameters_constant(type(1)); quasi_newton_method.set_maximum_epochs_number(1); @@ -159,10 +99,10 @@ void QuasiNewtonMethodTest::test_perform_training() // Test - data_set.set(1,1,1); + data_set.set(1, 1, 1); data_set.set_data_random(); - neural_network.set(NeuralNetwork::ModelType::Approximation, {inputs_number}, {}, {outputs_number}); + neural_network.set(NeuralNetwork::ModelType::Approximation, { inputs_number }, {}, { outputs_number }); neural_network.set_parameters_constant(-1); quasi_newton_method.set_maximum_epochs_number(1); @@ -213,30 +153,62 @@ void QuasiNewtonMethodTest::test_perform_training() training_results = quasi_newton_method.perform_training(); //assert_true(training_results.get_loss_decrease() <= minimum_loss_decrease, LOG); - +*/ } +/* + -void QuasiNewtonMethodTest::run_test_case() +void QuasiNewtonMethodTest::test_calculate_inverse_hessian_approximation() { - cout << "Running quasi-Newton method test case...\n"; + cout << "test_calculate_inverse_hessian_approximation\n"; + + Tensor old_parameters; + Tensor old_gradient; + Tensor old_inverse_hessian; + + Tensor parameters; + Tensor gradient; + Tensor inverse_hessian; + + Tensor inverse_hessian_approximation; - test_constructor(); + // Test - // Training + samples_number = 1; + inputs_number = 1; + outputs_number = 1; - test_calculate_DFP_inverse_hessian_approximation(); + data_set.set(samples_number, inputs_number, outputs_number); + data_set.set_data_random(); - test_calculate_BFGS_inverse_hessian_approximation(); + neural_network.set(NeuralNetwork::ModelType::Approximation, {inputs_number}, {}, {outputs_number}); - test_calculate_inverse_hessian_approximation(); + quasi_newton_method.set_inverse_hessian_approximation_method(QuasiNewtonMethod::InverseHessianApproximationMethod::DFP); - test_perform_training(); + neural_network.set_parameters_constant(type(1)); - cout << "End of quasi-Newton method test case.\n\n"; -} + quasi_newton_method.calculate_inverse_hessian_approximation(quasi_newton_method_data); + + assert_true(inverse_hessian_approximation == inverse_hessian, LOG); + + quasi_newton_method.set_inverse_hessian_approximation_method(QuasiNewtonMethod::InverseHessianApproximationMethod::DFP); + + neural_network.set_parameters_constant(type(1)); + + neural_network.set_parameters_constant(type(-0.5)); + + quasi_newton_method.calculate_inverse_hessian_approximation(quasi_newton_method_data); + assert_true(inverse_hessian_approximation == inverse_hessian, LOG); + + // Test + + quasi_newton_method.calculate_inverse_hessian_approximation(quasi_newton_method_data); } + +*/ + // OpenNN: Open Neural Networks Library. // Copyright (C) 2005-2024 Artificial Intelligence Techniques, SL. // diff --git a/tests/recurrent_layer_test.cpp b/tests/recurrent_layer_test.cpp index 772c76559..8c977245e 100644 --- a/tests/recurrent_layer_test.cpp +++ b/tests/recurrent_layer_test.cpp @@ -6,26 +6,26 @@ // Artificial Intelligence Techniques SL // artelnics@artelnics.com -#include +#include "pch.h" +#include "../opennn/recurrent_layer.h" -#include "../opennn/tensors.h" - -namespace opennn +TEST(RecurrentLayerTest, DefaultConstructor) { + RecurrentLayer recurrent_layer; -RecurrentLayerTest::RecurrentLayerTest() : UnitTesting() -{ +// EXPECT_EQ(quasi_newton_method.has_loss_index(), false); } -void RecurrentLayerTest::test_constructor() +TEST(RecurrentLayerTest, GeneralConstructor) { - cout << "test_constructor\n"; + + RecurrentLayer recurrent_layer; Index inputs_number; Index neurons_number; Index time_steps; - +/* // Test inputs_number = 1; @@ -44,8 +44,17 @@ void RecurrentLayerTest::test_constructor() recurrent_layer.set(inputs_number, neurons_number, time_steps); assert_true(recurrent_layer.get_parameters_number() == 18, LOG); + + EXPECT_EQ(quasi_newton_method.has_loss_index(), true); +*/ +} + + +TEST(RecurrentLayerTest, ForwardPropagate) +{ } +/* void RecurrentLayerTest::test_calculate_activations() { @@ -95,7 +104,7 @@ void RecurrentLayerTest::test_forward_propagate() input_data(0) = inputs.data(); input_pairs = {inputs.data(), {{samples_number, inputs_number}}}; -/* + recurrent_layer.forward_propagate({input_pairs}, &recurrent_layer_forward_propagation, is_training); outputs = recurrent_layer_forward_propagation.outputs; @@ -133,24 +142,10 @@ void RecurrentLayerTest::test_forward_propagate() recurrent_layer.forward_propagate({input_pairs}, &recurrent_layer_forward_propagation, is_training); outputs = recurrent_layer_forward_propagation.outputs; -*/ -} - - -void RecurrentLayerTest::run_test_case() -{ - cout << "Running recurrent layer test case...\n"; - - test_constructor(); - - test_calculate_activations(); - test_forward_propagate(); - - cout << "End of recurrent layer test case.\n\n"; } +*/ -} // OpenNN: Open Neural Networks Library. // Copyright (C) 2005-2024 Artificial Intelligence Techniques, SL. diff --git a/tests/response_optimization_test.cpp b/tests/response_optimization_test.cpp index 08a150365..ac2a3e3c2 100644 --- a/tests/response_optimization_test.cpp +++ b/tests/response_optimization_test.cpp @@ -6,15 +6,9 @@ // Artificial Intelligence Techniques SL // artelnics@artelnics.com -namespace opennn -{ - -ResponseOptimizationTest::ResponseOptimizationTest() : UnitTesting() -{ - generate_neural_networks(); -} - +#include "pch.h" +/* void ResponseOptimizationTest::test_constructor() { cout << "test_constructor\n"; @@ -158,23 +152,8 @@ void ResponseOptimizationTest::test_perform_optimization() assert_true(results->optimal_variables(3) <= 0.0, LOG); } +*/ -void ResponseOptimizationTest::run_test_case() -{ - cout << "Running response optimization test case...\n"; - - test_constructor(); - - // Performance - - test_calculate_inputs(); - - test_perform_optimization(); - - cout << "End of response optimization test case.\n\n"; -} - -} // OpenNN: Open Neural Networks Library. // Copyright (C) 2005-2024 Artificial Intelligence Techniques, SL. diff --git a/tests/sum_squared_error_test.cpp b/tests/sum_squared_error_test.cpp deleted file mode 100644 index dd3d5dd72..000000000 --- a/tests/sum_squared_error_test.cpp +++ /dev/null @@ -1,509 +0,0 @@ -#include "pch.h" -/* - -#include "../opennn/back_propagation.h" -#include "../opennn/tensors.h" - -namespace opennn -{ - -SumSquaredErrorTest::SumSquaredErrorTest() : UnitTesting() -{ - sum_squared_error.set(&neural_network, &data_set); - - sum_squared_error.set_regularization_method(LossIndex::RegularizationMethod::NoRegularization); -} - - -void SumSquaredErrorTest::test_constructor() -{ - cout << "test_constructor\n"; - - // Default - - SumSquaredError sum_squared_error_1; - - assert_true(!sum_squared_error_1.has_neural_network(), LOG); - assert_true(!sum_squared_error_1.has_data_set(), LOG); - - // Neural network and data set - - SumSquaredError sum_squared_error_4(&neural_network, &data_set); - - assert_true(sum_squared_error_4.has_neural_network(), LOG); - assert_true(sum_squared_error_4.has_data_set(), LOG); -} - - -void SumSquaredErrorTest::test_back_propagate() -{ - cout << "test_back_propagate\n"; - - // Test approximation all zero - { - samples_number = 1; - inputs_number = 1; - outputs_number = 1; - neurons_number = 1; - bool is_training = true; - - // Data set - - data_set.set(samples_number, inputs_number, outputs_number); - data_set.set_data_constant(type(0)); - - data_set.set(DataSet::SampleUse::Training); - - training_samples_indices = data_set.get_sample_indices(DataSet::DataSet::SampleUse::Training); - - input_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Input); - target_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Target); - - batch.set(samples_number, &data_set); - batch.fill(training_samples_indices, input_variables_indices, target_variables_indices); - - // Neural network - - neural_network.set(NeuralNetwork::ModelType::Approximation, {inputs_number}, {neurons_number}, {outputs_number}); - neural_network.set_parameters_constant(type(0)); - - forward_propagation.set(samples_number, &neural_network); - neural_network.forward_propagate(batch.get_input_pairs(), forward_propagation, is_training); - - // Loss index - - back_propagation.set(samples_number, &sum_squared_error); - sum_squared_error.back_propagate(batch, forward_propagation, back_propagation); - - assert_true(back_propagation.errors.dimension(0) == samples_number, LOG); - assert_true(back_propagation.errors.dimension(1) == outputs_number, LOG); -/* - assert_true(abs(back_propagation.error) < NUMERIC_LIMITS_MIN, LOG); - assert_true(back_propagation.gradient.size() == inputs_number+inputs_number*neurons_number+outputs_number+outputs_number*neurons_number, LOG); - - assert_true(is_zero(back_propagation.gradient), LOG); - } - - // Test approximation all random - { - samples_number = type(1) + rand() % 5; - inputs_number = type(1) + rand()%5; - outputs_number = type(1) + rand()%5; - neurons_number = type(1) + rand()%5; - bool is_training = true; - - // Data set - - data_set.set(samples_number, inputs_number, outputs_number); - data_set.set_data_random(); - - data_set.set(DataSet::SampleUse::Training); - - training_samples_indices = data_set.get_sample_indices(DataSet::DataSet::SampleUse::Training); - input_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Input); - target_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Target); - - batch.set(samples_number, &data_set); - batch.fill(training_samples_indices, input_variables_indices, target_variables_indices); - - // Neural network - - neural_network.set(NeuralNetwork::ModelType::Approximation, {inputs_number}, {neurons_number}, {outputs_number}); - neural_network.set_parameters_random(); - - forward_propagation.set(samples_number, &neural_network); - neural_network.forward_propagate(batch.get_input_pairs(), forward_propagation, is_training); - - // Loss index - - back_propagation.set(samples_number, &sum_squared_error); - sum_squared_error.back_propagate(batch, forward_propagation, back_propagation); - - numerical_gradient = sum_squared_error.calculate_numerical_gradient(); - - assert_true(back_propagation.errors.dimension(0) == samples_number, LOG); - assert_true(back_propagation.errors.dimension(1) == outputs_number, LOG); - - assert_true(are_equal(back_propagation.gradient, numerical_gradient, type(1.0e-2)), LOG); - } - - // Test binary classification trivial - { - inputs_number = 1; - outputs_number = 1; - samples_number = 1; - bool is_training = true; - - // Data set - - data_set.set(samples_number, inputs_number, outputs_number); - data_set.set_data_constant(type(0)); - - training_samples_indices = data_set.get_sample_indices(DataSet::SampleUse::Training); - input_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Input); - target_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Target); - - batch.set(samples_number, &data_set); - batch.fill(training_samples_indices, input_variables_indices, target_variables_indices); - - // Neural network - - neural_network.set(NeuralNetwork::ModelType::Classification, {inputs_number}, {}, {outputs_number}); - neural_network.set_parameters_constant(type(0)); - - forward_propagation.set(samples_number, &neural_network); - neural_network.forward_propagate(batch.get_input_pairs(), forward_propagation, is_training); - - // Loss index - - back_propagation.set(samples_number, &sum_squared_error); - sum_squared_error.back_propagate(batch, forward_propagation, back_propagation); - - numerical_gradient = sum_squared_error.calculate_numerical_gradient(); - - assert_true(back_propagation.errors.dimension(0) == samples_number, LOG); - assert_true(back_propagation.errors.dimension(1) == outputs_number, LOG); - - assert_true(back_propagation.errors.dimension(0) == 1, LOG); - assert_true(back_propagation.errors.dimension(1) == 1, LOG); - assert_true(back_propagation.error() - type(0.25) < type(NUMERIC_LIMITS_MIN), LOG); - - assert_true(are_equal(back_propagation.gradient, numerical_gradient, type(1.0e-3)), LOG); - } - - // Test binary classification random samples, inputs, outputs, neurons - { - samples_number = 1 + rand()%10; - inputs_number = 1 + rand()%10; - outputs_number = 1; - neurons_number = 1 + rand()%10; - bool is_training = true; - - // Data set - - data_set.set(samples_number, inputs_number, outputs_number); - data_set.set_data_binary_random(); - data_set.set(DataSet::SampleUse::Training); - - training_samples_indices = data_set.get_sample_indices(DataSet::SampleUse::Training); - input_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Input); - target_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Target); - - batch.set(samples_number, &data_set); - batch.fill(training_samples_indices, input_variables_indices, target_variables_indices); - - // Neural network - - neural_network.set(NeuralNetwork::ModelType::Classification, {inputs_number}, {neurons_number}, {outputs_number}); - neural_network.set_parameters_random(); - - forward_propagation.set(samples_number, &neural_network); - neural_network.forward_propagate(batch.get_input_pairs(), forward_propagation, is_training); - - // Loss index - - back_propagation.set(samples_number, &sum_squared_error); - sum_squared_error.back_propagate(batch, forward_propagation, back_propagation); - - numerical_gradient = sum_squared_error.calculate_numerical_gradient(); - - assert_true(back_propagation.errors.dimension(0) == samples_number, LOG); - assert_true(back_propagation.errors.dimension(1) == outputs_number, LOG); - - assert_true(back_propagation.error() >= 0, LOG); - - assert_true(are_equal(back_propagation.gradient, numerical_gradient, type(1.0e-2)), LOG); - - } - - // Test forecasting trivial - - { - inputs_number = 1; - outputs_number = 1; - samples_number = 1; - bool is_training = true; - - // Data set - - data_set.set(samples_number, inputs_number, outputs_number); - data_set.set_data_constant(type(0)); - - training_samples_indices = data_set.get_sample_indices(DataSet::SampleUse::Training); - input_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Input); - target_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Target); - - batch.set(samples_number, &data_set); - batch.fill(training_samples_indices, input_variables_indices, target_variables_indices); - - // Neural network - /* - neural_network.set(NeuralNetwork::ModelType::Forecasting, {inputs_number, outputs_number}); - - neural_network.set_parameters_constant(type(0)); - - forward_propagation.set(samples_number, &neural_network); - neural_network.forward_propagate(batch.get_input_pairs(), forward_propagation, is_training); - - // Loss index - - back_propagation.set(samples_number, &sum_squared_error); - - //sum_squared_error.back_propagate(batch, forward_propagation, back_propagation); - - //assert_true(back_propagation.errors.dimension(0) == samples_number, LOG); - //assert_true(back_propagation.errors.dimension(1) == outputs_number, LOG); - - //assert_true(back_propagation.error() < type(1e-3), LOG); - - //assert_true(is_zero(back_propagation.gradient,type(1e-3)), LOG); - - } - - // Test forecasting random samples, inputs, outputs, neurons - { - samples_number = 1 + rand()%10; - inputs_number = 1 + rand()%10; - outputs_number = 1 + rand()%10; - neurons_number = 1 + rand()%10; - bool is_training = true; - - // Data set - - data_set.set(samples_number, inputs_number, outputs_number); - data_set.set_data_random(); - data_set.set(DataSet::SampleUse::Training); - - training_samples_indices = data_set.get_sample_indices(SampleUse::Training); - input_variables_indices = data_set.get_input_variables_indices(); - target_variables_indices = data_set.get_target_variables_indices(); - - batch.set(samples_number, &data_set); - batch.fill(training_samples_indices, input_variables_indices, target_variables_indices); - - // Neural network - - //neural_network.set(NeuralNetwork::ModelType::Forecasting, {inputs_number}, {neurons_number}, {outputs_number}); - - neural_network.set_parameters_random(); - - forward_propagation.set(samples_number, &neural_network); - //neural_network.forward_propagate(batch.get_input_pairs(), forward_propagation, is_training); - - // Loss index - - back_propagation.set(samples_number, &sum_squared_error); - sum_squared_error.back_propagate(batch, forward_propagation, back_propagation); - - numerical_gradient = sum_squared_error.calculate_numerical_gradient(); - - assert_true(back_propagation.errors.dimension(0) == samples_number, LOG); - assert_true(back_propagation.errors.dimension(1) == outputs_number, LOG); - - assert_true(back_propagation.error() >= type(0), LOG); - - assert_true(are_equal(back_propagation.gradient, numerical_gradient, type(1.0e-2)), LOG); - } - -} - - -void SumSquaredErrorTest::test_back_propagate_lm() -{ - cout << "test_back_propagate_lm\n"; - - // Test approximation random samples, inputs, outputs, neuronss - { - samples_number = 1 + rand()%10; - inputs_number = 1 + rand()%10; - outputs_number = 1 + rand()%10; - neurons_number = 1 + rand()%10; - bool is_training = true; - - // Data set - - data_set.set(samples_number, inputs_number, outputs_number); - data_set.set_data_random(); - data_set.set(DataSet::SampleUse::Training); - - training_samples_indices = data_set.get_sample_indices(DataSet::SampleUse::Training); - input_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Input); - target_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Target); - - batch.set(samples_number, &data_set); - batch.fill(training_samples_indices, input_variables_indices, target_variables_indices); - - // Neural network - - neural_network.set(NeuralNetwork::ModelType::Approximation, {inputs_number}, {neurons_number}, {outputs_number}); - neural_network.set_parameters_random(); - - forward_propagation.set(samples_number, &neural_network); - neural_network.forward_propagate(batch.get_input_pairs(), forward_propagation, is_training); - - // Loss index - - back_propagation.set(samples_number, &sum_squared_error); - sum_squared_error.back_propagate(batch, forward_propagation, back_propagation); - - back_propagation_lm.set(samples_number, &sum_squared_error); - sum_squared_error.back_propagate_lm(batch, forward_propagation, back_propagation_lm); - - numerical_gradient = sum_squared_error.calculate_numerical_gradient(); - numerical_jacobian = sum_squared_error.calculate_numerical_jacobian(); - - assert_true(back_propagation_lm.errors.dimension(0) == samples_number, LOG); - assert_true(back_propagation_lm.errors.dimension(1) == outputs_number, LOG); - - assert_true(back_propagation_lm.error() >= type(0), LOG); - assert_true(abs(back_propagation.error() - back_propagation_lm.error()) < type(1.0e-1), LOG); - - assert_true(are_equal(back_propagation_lm.gradient, numerical_gradient, type(1.0e-1)), LOG); - assert_true(are_equal(back_propagation_lm.squared_errors_jacobian, numerical_jacobian, type(1.0e-1)), LOG); - - } - - // Test binary classification random samples, inputs, outputs, neurons - { - samples_number = type(1) + rand()%10; - inputs_number = type(1) + rand()%10; - outputs_number = type(1) + rand()%10; - neurons_number = type(1) + rand()%10; - bool is_training = true; - - // Data set - - data_set.set(samples_number, inputs_number, outputs_number); - data_set.set_data_binary_random(); - data_set.set(DataSet::SampleUse::Training); - - training_samples_indices = data_set.get_sample_indices(DataSet::SampleUse::Training); - input_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Input); - target_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Target); - - batch.set(samples_number, &data_set); - batch.fill(training_samples_indices, input_variables_indices, target_variables_indices); - - // Neural network - - neural_network.set(NeuralNetwork::ModelType::Classification, {inputs_number}, {neurons_number}, {outputs_number}); - neural_network.set_parameters_random(); - - forward_propagation.set(samples_number, &neural_network); - neural_network.forward_propagate(batch.get_input_pairs(), forward_propagation, is_training); - - // Loss index - - back_propagation.set(samples_number, &sum_squared_error); - sum_squared_error.back_propagate(batch, forward_propagation, back_propagation); - - // visual studio not running - - back_propagation_lm.set(samples_number, &sum_squared_error); - - sum_squared_error.back_propagate_lm(batch, forward_propagation, back_propagation_lm); - - numerical_gradient = sum_squared_error.calculate_numerical_gradient(); - numerical_jacobian = sum_squared_error.calculate_numerical_jacobian(); - - assert_true(back_propagation_lm.errors.dimension(0) == samples_number, LOG); - assert_true(back_propagation_lm.errors.dimension(1) == outputs_number, LOG); - - assert_true(back_propagation_lm.error() >= type(0), LOG); - assert_true(abs(back_propagation.error() - back_propagation_lm.error()) < type(1.0e-2), LOG); - - assert_true(are_equal(back_propagation_lm.gradient, numerical_gradient, type(1.0e-2)), LOG); - assert_true(are_equal(back_propagation_lm.squared_errors_jacobian, numerical_jacobian, type(1.0e-2)), LOG); - - } - - // Test multiple classification random samples, inputs, outputs, neurons - { - samples_number = type(1) + rand()%10; - inputs_number = type(1) + rand()%10; - outputs_number = type(1) + rand()%10; - neurons_number = type(1) + rand()%10; - bool is_training = true; - - // Data set - - data_set.set(samples_number, inputs_number, outputs_number); - data_set.set_data_random(); - data_set.set(DataSet::SampleUse::Training); - - training_samples_indices = data_set.get_sample_indices(DataSet::SampleUse::Training); - input_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Input); - target_variables_indices = data_set.get_variable_indices(DataSet::VariableUse::Target); - - batch.set(samples_number, &data_set); - batch.fill(training_samples_indices, input_variables_indices, target_variables_indices); - - // Neural network - - neural_network.set(NeuralNetwork::ModelType::Classification, - {inputs_number}, {neurons_number}, {outputs_number}); - neural_network.set_parameters_random(); - - forward_propagation.set(samples_number, &neural_network); - neural_network.forward_propagate(batch.get_input_pairs(), forward_propagation, is_training); - - // Loss index - - back_propagation.set(samples_number, &sum_squared_error); - sum_squared_error.back_propagate(batch, forward_propagation, back_propagation); - - // visual studio not running - - back_propagation_lm.set(samples_number, &sum_squared_error); - - //sum_squared_error.back_propagate_lm(batch, forward_propagation, back_propagation_lm); - - numerical_gradient = sum_squared_error.calculate_numerical_gradient(); - numerical_jacobian = sum_squared_error.calculate_numerical_jacobian(); - - assert_true(back_propagation_lm.errors.dimension(0) == samples_number, LOG); - assert_true(back_propagation_lm.errors.dimension(1) == outputs_number, LOG); - - assert_true(back_propagation_lm.error() >= type(0), LOG); - assert_true(abs(back_propagation.error() - back_propagation_lm.error()) < type(1.0e-2), LOG); - - assert_true(are_equal(back_propagation_lm.gradient, numerical_gradient, type(1.0e-2)), LOG); - assert_true(are_equal(back_propagation_lm.squared_errors_jacobian, numerical_jacobian, type(1.0e-2)), LOG); - - } -} - - -void SumSquaredErrorTest::run_test_case() -{ - cout << "Running sum squared error test case...\n"; - - test_constructor(); - - // Back propagate - - test_back_propagate(); - - test_back_propagate_lm(); - - cout << "End of sum squared error test case.\n\n"; -} - -} -// OpenNN: Open Neural Networks Library. -// Copyright (C) 2005-2024 Artificial Intelligence Techniques, SL. -// -// This library is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 2.1 of the License, or any later version. -// -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -// Lesser General Public License for more details. - -// You should have received a copy of the GNU Lesser General Public -// License along with this library; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -*/ diff --git a/tests/tensor_utilities_test.cpp b/tests/tensors_test.cpp similarity index 100% rename from tests/tensor_utilities_test.cpp rename to tests/tensors_test.cpp diff --git a/tests/tests.vcxproj b/tests/tests.vcxproj index ca36fe9b6..764ca825f 100644 --- a/tests/tests.vcxproj +++ b/tests/tests.vcxproj @@ -133,12 +133,14 @@ + + + - - + Create From d264b603fac3b2842fb00c14978531996a72b630 Mon Sep 17 00:00:00 2001 From: RoberLopez Date: Thu, 26 Dec 2024 16:36:22 +0100 Subject: [PATCH 2/2] clean --- tests/normalized_squared_error_test.cpp | 2 - tests/quasi_newton_method_test.cpp | 20 +- tests/recurrent_layer_test.cpp | 65 ++-- tests/response_optimization_test.cpp | 74 ++-- tests/tensors_test.cpp | 27 +- tests/testing_analysis_test.cpp | 428 +++++++++--------------- tests/time_series_data_set_test.cpp | 121 ++----- tests/training_strategy_test.cpp | 49 +-- tests/transformer_test.cpp | 121 +++---- tests/unscaling_layer_test.cpp | 34 +- tests/weighted_squared_error_test.cpp | 68 ++-- 11 files changed, 334 insertions(+), 675 deletions(-) diff --git a/tests/normalized_squared_error_test.cpp b/tests/normalized_squared_error_test.cpp index bec8165ad..be06f65f0 100644 --- a/tests/normalized_squared_error_test.cpp +++ b/tests/normalized_squared_error_test.cpp @@ -417,8 +417,6 @@ void NormalizedSquaredErrorTest::test_back_propagate_lm() void NormalizedSquaredErrorTest::test_calculate_normalization_coefficient() { - cout << "test_calculate_normalization_coefficient\n"; - Index samples_number; Index inputs_number; Index outputs_number; diff --git a/tests/quasi_newton_method_test.cpp b/tests/quasi_newton_method_test.cpp index c1495726f..d6e8be725 100644 --- a/tests/quasi_newton_method_test.cpp +++ b/tests/quasi_newton_method_test.cpp @@ -49,7 +49,7 @@ TEST(QuasiNewtonMethodTest, DFP) quasi_newton_method.calculate_DFP_inverse_hessian(quasi_newton_method_data); - assert_true(are_equal(quasi_newton_method_data.inverse_hessian, inverse_hessian, type(1e-4)), LOG); + EXPECT_EQ(are_equal(quasi_newton_method_data.inverse_hessian, inverse_hessian, type(1e-4))); */ } @@ -68,7 +68,7 @@ TEST(QuasiNewtonMethodTest, BGFS) quasi_newton_method.calculate_BFGS_inverse_hessian(quasi_newton_method_data); - assert_true(are_equal(BFGS_inverse_hessian, inverse_hessian, type(1e-4)), LOG); + EXPECT_EQ(are_equal(BFGS_inverse_hessian, inverse_hessian, type(1e-4))); */ } @@ -95,7 +95,7 @@ TEST(QuasiNewtonMethodTest, Train) quasi_newton_method.set_display(false); training_results = quasi_newton_method.perform_training(); - assert_true(training_results.get_epochs_number() <= 1, LOG); + EXPECT_EQ(training_results.get_epochs_number() <= 1); // Test @@ -110,7 +110,7 @@ TEST(QuasiNewtonMethodTest, Train) training_results = quasi_newton_method.perform_training(); error = training_results.get_training_error(); - assert_true(error < old_error, LOG); + EXPECT_EQ(error < old_error); // Test @@ -122,7 +122,7 @@ TEST(QuasiNewtonMethodTest, Train) training_results = quasi_newton_method.perform_training(); error = training_results.get_training_error(); - assert_true(error <= old_error, LOG); + EXPECT_EQ(error <= old_error); // Loss goal @@ -137,7 +137,7 @@ TEST(QuasiNewtonMethodTest, Train) training_results = quasi_newton_method.perform_training(); - //assert_true(training_results.get_loss() <= training_loss_goal, LOG); + //EXPECT_EQ(training_results.get_loss() <= training_loss_goal); // Minimum loss decrease @@ -152,7 +152,7 @@ TEST(QuasiNewtonMethodTest, Train) training_results = quasi_newton_method.perform_training(); - //assert_true(training_results.get_loss_decrease() <= minimum_loss_decrease, LOG); + //EXPECT_EQ(training_results.get_loss_decrease() <= minimum_loss_decrease); */ } @@ -161,8 +161,6 @@ TEST(QuasiNewtonMethodTest, Train) void QuasiNewtonMethodTest::test_calculate_inverse_hessian_approximation() { - cout << "test_calculate_inverse_hessian_approximation\n"; - Tensor old_parameters; Tensor old_gradient; Tensor old_inverse_hessian; @@ -190,7 +188,7 @@ void QuasiNewtonMethodTest::test_calculate_inverse_hessian_approximation() quasi_newton_method.calculate_inverse_hessian_approximation(quasi_newton_method_data); - assert_true(inverse_hessian_approximation == inverse_hessian, LOG); + EXPECT_EQ(inverse_hessian_approximation == inverse_hessian); quasi_newton_method.set_inverse_hessian_approximation_method(QuasiNewtonMethod::InverseHessianApproximationMethod::DFP); @@ -200,7 +198,7 @@ void QuasiNewtonMethodTest::test_calculate_inverse_hessian_approximation() quasi_newton_method.calculate_inverse_hessian_approximation(quasi_newton_method_data); - assert_true(inverse_hessian_approximation == inverse_hessian, LOG); + EXPECT_EQ(inverse_hessian_approximation == inverse_hessian); // Test diff --git a/tests/recurrent_layer_test.cpp b/tests/recurrent_layer_test.cpp index 8c977245e..2fab15af1 100644 --- a/tests/recurrent_layer_test.cpp +++ b/tests/recurrent_layer_test.cpp @@ -34,7 +34,7 @@ TEST(RecurrentLayerTest, GeneralConstructor) recurrent_layer.set(inputs_number, neurons_number, time_steps); - assert_true(recurrent_layer.get_parameters_number() == 3, LOG); + EXPECT_EQ(recurrent_layer.get_parameters_number() == 3); // Test @@ -43,7 +43,7 @@ TEST(RecurrentLayerTest, GeneralConstructor) recurrent_layer.set(inputs_number, neurons_number, time_steps); - assert_true(recurrent_layer.get_parameters_number() == 18, LOG); + EXPECT_EQ(recurrent_layer.get_parameters_number() == 18); EXPECT_EQ(quasi_newton_method.has_loss_index(), true); */ @@ -52,32 +52,11 @@ TEST(RecurrentLayerTest, GeneralConstructor) TEST(RecurrentLayerTest, ForwardPropagate) { -} - -/* -void RecurrentLayerTest::test_calculate_activations() -{ - cout << "test_calculate_activations\n"; - - Tensor combinations; - Tensor activations; - Tensor activation_derivatives; - - Tensor numerical_activation_derivative; - Tensor maximum_difference; - -} - - -void RecurrentLayerTest::test_forward_propagate() -{ - cout << "test_forward_propagate\n"; - - neurons_number = 4; - samples_number = 2; - inputs_number = 3; - time_steps = 1; + const Index neurons_number = 4; + const Index samples_number = 2; + const Index inputs_number = 3; + const Index time_steps = 1; bool is_training = false; Tensor outputs; @@ -88,7 +67,7 @@ void RecurrentLayerTest::test_forward_propagate() Tensor new_biases; pair input_pairs; - + /* recurrent_layer.set(inputs_number, neurons_number, time_steps); recurrent_layer.set_activation_function(RecurrentLayer::ActivationFunction::HyperbolicTangent); @@ -103,12 +82,12 @@ void RecurrentLayerTest::test_forward_propagate() Tensor input_data(1); input_data(0) = inputs.data(); - input_pairs = {inputs.data(), {{samples_number, inputs_number}}}; + input_pairs = { inputs.data(), {{samples_number, inputs_number}} }; - recurrent_layer.forward_propagate({input_pairs}, &recurrent_layer_forward_propagation, is_training); + recurrent_layer.forward_propagate({ input_pairs }, &recurrent_layer_forward_propagation, is_training); outputs = recurrent_layer_forward_propagation.outputs; - + // Test samples_number = 3; @@ -118,15 +97,15 @@ void RecurrentLayerTest::test_forward_propagate() recurrent_layer.set(inputs_number, neurons_number, time_steps); - inputs.resize(samples_number,inputs_number); + inputs.resize(samples_number, inputs_number); inputs.setConstant(type(1)); recurrent_layer.set_activation_function("SoftPlus"); recurrent_layer.set_timesteps(3); - new_weights.resize(2,2); + new_weights.resize(2, 2); new_weights.setConstant(type(1)); - new_recurrent_weights.resize(2,2); + new_recurrent_weights.resize(2, 2); new_recurrent_weights.setConstant(type(1)); new_biases.resize(2); new_biases.setConstant(type(1)); @@ -137,13 +116,27 @@ void RecurrentLayerTest::test_forward_propagate() parameters = recurrent_layer.get_parameters(); - input_pairs = {inputs.data(), {{samples_number, inputs_number}}}; + input_pairs = { inputs.data(), {{samples_number, inputs_number}} }; - recurrent_layer.forward_propagate({input_pairs}, &recurrent_layer_forward_propagation, is_training); + recurrent_layer.forward_propagate({ input_pairs }, &recurrent_layer_forward_propagation, is_training); outputs = recurrent_layer_forward_propagation.outputs; +*/ +} + +/* + +void RecurrentLayerTest::test_calculate_activations() +{ + Tensor combinations; + Tensor activations; + Tensor activation_derivatives; + + Tensor numerical_activation_derivative; + Tensor maximum_difference; } + */ diff --git a/tests/response_optimization_test.cpp b/tests/response_optimization_test.cpp index ac2a3e3c2..02280bed7 100644 --- a/tests/response_optimization_test.cpp +++ b/tests/response_optimization_test.cpp @@ -11,18 +11,16 @@ /* void ResponseOptimizationTest::test_constructor() { - cout << "test_constructor\n"; - ResponseOptimization response_optimization_1(&neural_network); - assert_true(response_optimization_1.get_inputs_conditions()(0) == ResponseOptimization::Condition::None, LOG); - assert_true(response_optimization_1.get_outputs_conditions()(0) == ResponseOptimization::Condition::None, LOG); + EXPECT_EQ(response_optimization_1.get_inputs_conditions()(0) == ResponseOptimization::Condition::None); + EXPECT_EQ(response_optimization_1.get_outputs_conditions()(0) == ResponseOptimization::Condition::None); ResponseOptimization response_optimization_2(&neural_network_2); - assert_true(response_optimization_2.get_inputs_conditions()(0) == ResponseOptimization::Condition::None, LOG); - assert_true(response_optimization_2.get_inputs_conditions()(1) == ResponseOptimization::Condition::None, LOG); - assert_true(response_optimization_2.get_outputs_conditions()(0) == ResponseOptimization::Condition::None, LOG); - assert_true(response_optimization_2.get_outputs_conditions()(1) == ResponseOptimization::Condition::None, LOG); + EXPECT_EQ(response_optimization_2.get_inputs_conditions()(0) == ResponseOptimization::Condition::None); + EXPECT_EQ(response_optimization_2.get_inputs_conditions()(1) == ResponseOptimization::Condition::None); + EXPECT_EQ(response_optimization_2.get_outputs_conditions()(0) == ResponseOptimization::Condition::None); + EXPECT_EQ(response_optimization_2.get_outputs_conditions()(1) == ResponseOptimization::Condition::None); ResponseOptimization response_optimization_3; } @@ -30,26 +28,22 @@ void ResponseOptimizationTest::test_constructor() void ResponseOptimizationTest::test_calculate_inputs() { - cout << "test_calculate_inputs\n"; - ResponseOptimization response_optimization(&neural_network, &data_set); Tensor inputs = response_optimization.calculate_inputs(); - assert_true(inputs.dimension(0) == response_optimization.get_evaluations_number(), LOG); - assert_true(inputs.dimension(1) == neural_network.get_inputs_number(), LOG); + EXPECT_EQ(inputs.dimension(0) == response_optimization.get_evaluations_number()); + EXPECT_EQ(inputs.dimension(1) == neural_network.get_inputs_number()); - assert_true(inputs(0) <= response_optimization.get_inputs_maximums()(1), LOG); - assert_true(inputs(1) <= response_optimization.get_inputs_maximums()(1), LOG); - assert_true(inputs(0) >= response_optimization.get_inputs_minimums()(1), LOG); - assert_true(inputs(1) >= response_optimization.get_inputs_minimums()(1), LOG); + EXPECT_EQ(inputs(0) <= response_optimization.get_inputs_maximums()(1)); + EXPECT_EQ(inputs(1) <= response_optimization.get_inputs_maximums()(1)); + EXPECT_EQ(inputs(0) >= response_optimization.get_inputs_minimums()(1)); + EXPECT_EQ(inputs(1) >= response_optimization.get_inputs_minimums()(1)); } void ResponseOptimizationTest::test_perform_optimization() { - cout << "test_perform_optimization\n"; - ResponseOptimization response_optimization(&neural_network,&data_set); // Empty results @@ -59,7 +53,7 @@ void ResponseOptimizationTest::test_perform_optimization() response_optimization.set_output_condition(0,ResponseOptimization::Condition::GreaterEqualTo,conditions_values); ResponseOptimizationResults* results = response_optimization.perform_optimization(); - assert_true(results->optimal_variables.size() == 0, LOG); + EXPECT_EQ(results->optimal_variables.size() == 0); // Trivial case 1 @@ -70,9 +64,9 @@ void ResponseOptimizationTest::test_perform_optimization() response_optimization.set_output_condition(0,ResponseOptimization::Condition::GreaterEqualTo,conditions_values); results = response_optimization.perform_optimization(); - assert_true(results->optimal_variables(0) = 1, LOG); - assert_true(results->optimal_variables(1) <= 1, LOG); - assert_true(results->optimal_variables(2) >= 1, LOG); + EXPECT_EQ(results->optimal_variables(0) = 1); + EXPECT_EQ(results->optimal_variables(1) <= 1); + EXPECT_EQ(results->optimal_variables(2) >= 1); // Trivial case 2 @@ -87,9 +81,9 @@ void ResponseOptimizationTest::test_perform_optimization() response_optimization.set_output_condition(0,ResponseOptimization::Condition::Between,conditions_values); results = response_optimization.perform_optimization(); - assert_true(results->optimal_variables(0) = 1, LOG); - assert_true(results->optimal_variables(1) <= 1, LOG); - assert_true(1 <= results->optimal_variables(2) <= 2.5, LOG); + EXPECT_EQ(results->optimal_variables(0) = 1); + EXPECT_EQ(results->optimal_variables(1) <= 1); + EXPECT_EQ(1 <= results->optimal_variables(2) <= 2.5); // Multiple outputs case 1 @@ -101,10 +95,10 @@ void ResponseOptimizationTest::test_perform_optimization() response_optimization.set_input_condition(1,ResponseOptimization::Condition::LessEqualTo,conditions_values); results = response_optimization.perform_optimization(); - assert_true(results->optimal_variables(0) = 1, LOG); - assert_true(results->optimal_variables(1) <= 1, LOG); - assert_true(1 <= results->optimal_variables(2) <= 3.0, LOG); - assert_true(-1 <= results->optimal_variables(3) <= type(1), LOG); + EXPECT_EQ(results->optimal_variables(0) = 1); + EXPECT_EQ(results->optimal_variables(1) <= 1); + EXPECT_EQ(1 <= results->optimal_variables(2) <= 3.0); + EXPECT_EQ(-1 <= results->optimal_variables(3) <= type(1)); // Multiple outputs case 2 @@ -121,11 +115,11 @@ void ResponseOptimizationTest::test_perform_optimization() response_optimization.set_output_condition(1,ResponseOptimization::Condition::Between,conditions_values); results = response_optimization.perform_optimization(); - assert_true(results->optimal_variables(0) = 1, LOG); - assert_true(results->optimal_variables(1) <= 1, LOG); - assert_true(1 <= results->optimal_variables(2) <= 2.0, LOG); - assert_true(type(-1) <= results->optimal_variables(3), LOG); - assert_true(results->optimal_variables(3) <= type(0), LOG); + EXPECT_EQ(results->optimal_variables(0) = 1); + EXPECT_EQ(results->optimal_variables(1) <= 1); + EXPECT_EQ(1 <= results->optimal_variables(2) <= 2.0); + EXPECT_EQ(type(-1) <= results->optimal_variables(3)); + EXPECT_EQ(results->optimal_variables(3) <= type(0)); // Multiple outputs case 2 @@ -143,13 +137,13 @@ void ResponseOptimizationTest::test_perform_optimization() response_optimization.set_output_condition(1,ResponseOptimization::Condition::Between,conditions_values); results = response_optimization.perform_optimization(); - assert_true(results->optimal_variables(0) >= 0.5, LOG); + EXPECT_EQ(results->optimal_variables(0) >= 0.5); - assert_true(results->optimal_variables(1) >= 0.5, LOG); - assert_true(results->optimal_variables(2) >= 0.0, LOG); - assert_true(results->optimal_variables(2) <= 3.0, LOG); - assert_true(results->optimal_variables(3) >= type(-1), LOG); - assert_true(results->optimal_variables(3) <= 0.0, LOG); + EXPECT_EQ(results->optimal_variables(1) >= 0.5); + EXPECT_EQ(results->optimal_variables(2) >= 0.0); + EXPECT_EQ(results->optimal_variables(2) <= 3.0); + EXPECT_EQ(results->optimal_variables(3) >= type(-1)); + EXPECT_EQ(results->optimal_variables(3) <= 0.0); } */ diff --git a/tests/tensors_test.cpp b/tests/tensors_test.cpp index 37efbc918..22a74433d 100644 --- a/tests/tensors_test.cpp +++ b/tests/tensors_test.cpp @@ -1,20 +1,11 @@ #include "pch.h" -/* #include "../opennn/tensors.h" -namespace opennn -{ - -TensorsTest::TensorsTest() : UnitTesting() -{ -} - +/* void TensorsTest::test_fill_tensor_data() { - cout << "test_fill_tensor_data\n"; - Tensor submatrix; Tensor rows_indices; @@ -35,14 +26,12 @@ void TensorsTest::test_fill_tensor_data() fill_tensor_data(matrix, rows_indices, columns_indices, submatrix.data()); - assert_true(is_equal(submatrix, type(3.1416)), LOG); + EXPECT_EQ(is_equal(submatrix, type(3.1416))); } void TensorsTest::test_calculate_rank() { - cout << "test_calculate_rank\n"; - Tensor rank_greater; Tensor rank_less; @@ -55,18 +44,6 @@ void TensorsTest::test_calculate_rank() rank_less = calculate_rank_less(vector); } - -void TensorsTest::run_test_case() -{ - cout << "Running tensors test case...\n"; - - test_fill_tensor_data(); - - test_calculate_rank(); - - cout << "End of tensors test case.\n\n"; -} - } // OpenNN: Open Neural Networks Library. diff --git a/tests/testing_analysis_test.cpp b/tests/testing_analysis_test.cpp index 31f128d82..6abf93dc3 100644 --- a/tests/testing_analysis_test.cpp +++ b/tests/testing_analysis_test.cpp @@ -1,34 +1,22 @@ #include "pch.h" -/* - -namespace opennn -{ -TestingAnalysisTest::TestingAnalysisTest() : UnitTesting() -{ - testing_analysis.set_neural_network(&neural_network); - testing_analysis.set_data_set(&data_set); -} +/* void TestingAnalysisTest::test_constructor() { - cout << "test_constructor\n"; - // Neural network and data set constructor TestingAnalysis testing_analysis(&neural_network,&data_set); - assert_true(testing_analysis.get_neural_network(), LOG); + EXPECT_EQ(testing_analysis.get_neural_network()); - assert_true(testing_analysis.get_data_set(), LOG); + EXPECT_EQ(testing_analysis.get_data_set()); } void TestingAnalysisTest::test_calculate_error_data() { - cout << "test_calculate_error_data\n"; - Tensor error_data; // Test @@ -46,17 +34,15 @@ void TestingAnalysisTest::test_calculate_error_data() error_data = testing_analysis.calculate_error_data(); - assert_true(error_data.size() == 3, LOG); - assert_true(error_data.dimension(0) == 1, LOG); - assert_true(error_data.dimension(1) == 3, LOG); - assert_true(static_cast(error_data(0,0,0)) == 0.0, LOG); + EXPECT_EQ(error_data.size() == 3); + EXPECT_EQ(error_data.dimension(0) == 1); + EXPECT_EQ(error_data.dimension(1) == 3); + EXPECT_EQ(static_cast(error_data(0,0,0)) == 0.0); } void TestingAnalysisTest::test_calculate_percentage_error_data() { - cout << "test_calculate_percentage_error_data\n"; - Tensor error_data; // Test @@ -74,16 +60,14 @@ void TestingAnalysisTest::test_calculate_percentage_error_data() error_data = testing_analysis.calculate_percentage_error_data(); - assert_true(error_data.size() == 1, LOG); - assert_true(error_data.dimension(1) == 1, LOG); - assert_true(static_cast(error_data(0,0)) == 0.0, LOG); + EXPECT_EQ(error_data.size() == 1); + EXPECT_EQ(error_data.dimension(1) == 1); + EXPECT_EQ(static_cast(error_data(0,0)) == 0.0); } void TestingAnalysisTest::test_calculate_absolute_errors_descriptives() { - cout << "test_calculate_absolute_errors_descriptives\n"; - Tensor error_data; // Test @@ -101,18 +85,16 @@ void TestingAnalysisTest::test_calculate_absolute_errors_descriptives() error_data = testing_analysis.calculate_absolute_errors_descriptives(); - assert_true(error_data.size() == 1, LOG); - assert_true(static_cast(error_data[0].minimum) == 0.0, LOG); - assert_true(static_cast(error_data[0].maximum) == 0.0, LOG); - assert_true(static_cast(error_data[0].mean) == 0.0, LOG); - assert_true(static_cast(error_data[0].standard_deviation) == 0.0, LOG); + EXPECT_EQ(error_data.size() == 1); + EXPECT_EQ(static_cast(error_data[0].minimum) == 0.0); + EXPECT_EQ(static_cast(error_data[0].maximum) == 0.0); + EXPECT_EQ(static_cast(error_data[0].mean) == 0.0); + EXPECT_EQ(static_cast(error_data[0].standard_deviation) == 0.0); } void TestingAnalysisTest::test_calculate_percentage_errors_descriptives() { - cout << "test_calculate_percentage_error_descriptives\n"; - Tensor error_data; // Test @@ -130,15 +112,13 @@ void TestingAnalysisTest::test_calculate_percentage_errors_descriptives() error_data = testing_analysis.calculate_percentage_errors_descriptives(); - assert_true(error_data.size() == 1, LOG); - assert_true(static_cast(error_data[0].standard_deviation) == 0.0, LOG); + EXPECT_EQ(error_data.size() == 1); + EXPECT_EQ(static_cast(error_data[0].standard_deviation) == 0.0); } void TestingAnalysisTest::test_calculate_error_data_descriptives() { - cout << "test_calculate_error_data_descriptives\n"; - Tensor, 1> error_data_statistics; // Test @@ -156,23 +136,21 @@ void TestingAnalysisTest::test_calculate_error_data_descriptives() error_data_statistics = testing_analysis.calculate_error_data_descriptives(); - assert_true(error_data_statistics.size() == 1, LOG); - assert_true(error_data_statistics[0].size() == 3, LOG); - assert_true(static_cast(error_data_statistics[0][0].minimum) == 0.0, LOG); - assert_true(static_cast(error_data_statistics[0][0].maximum) == 0.0, LOG); - assert_true(static_cast(error_data_statistics[0][0].mean) == 0.0, LOG); - assert_true(static_cast(error_data_statistics[0][0].standard_deviation) == 0.0, LOG); - assert_true(static_cast(error_data_statistics[0][2].minimum) == 0.0, LOG); - assert_true(static_cast(error_data_statistics[0][2].maximum) == 0.0, LOG); - assert_true(static_cast(error_data_statistics[0][2].mean) == 0.0, LOG); - assert_true(static_cast(error_data_statistics[0][2].standard_deviation) == 0.0, LOG); + EXPECT_EQ(error_data_statistics.size() == 1); + EXPECT_EQ(error_data_statistics[0].size() == 3); + EXPECT_EQ(static_cast(error_data_statistics[0][0].minimum) == 0.0); + EXPECT_EQ(static_cast(error_data_statistics[0][0].maximum) == 0.0); + EXPECT_EQ(static_cast(error_data_statistics[0][0].mean) == 0.0); + EXPECT_EQ(static_cast(error_data_statistics[0][0].standard_deviation) == 0.0); + EXPECT_EQ(static_cast(error_data_statistics[0][2].minimum) == 0.0); + EXPECT_EQ(static_cast(error_data_statistics[0][2].maximum) == 0.0); + EXPECT_EQ(static_cast(error_data_statistics[0][2].mean) == 0.0); + EXPECT_EQ(static_cast(error_data_statistics[0][2].standard_deviation) == 0.0); } void TestingAnalysisTest::test_calculate_error_data_histograms() { - cout << "test_calculate_error_data_histograms\n"; - Tensor error_data_histograms; // Test @@ -190,19 +168,15 @@ void TestingAnalysisTest::test_calculate_error_data_histograms() error_data_histograms = testing_analysis.calculate_error_data_histograms(); - assert_true(error_data_histograms.size() == 1, LOG); - assert_true(error_data_histograms[0].get_bins_number() == 10, LOG); + EXPECT_EQ(error_data_histograms.size() == 1); + EXPECT_EQ(error_data_histograms[0].get_bins_number() == 10); } void TestingAnalysisTest::test_calculate_maximal_errors() { - cout << "test_calculate_maximal_errors\n"; - Tensor, 1> maximal_errors; - // Test - samples_number = 4; inputs_number = 1; targets_number = 1; @@ -216,15 +190,13 @@ void TestingAnalysisTest::test_calculate_maximal_errors() maximal_errors = testing_analysis.calculate_maximal_errors(2); - assert_true(maximal_errors.rank() == 1, LOG); - assert_true(maximal_errors[0](0) == 0 , LOG); + EXPECT_EQ(maximal_errors.rank() == 1); + EXPECT_EQ(maximal_errors[0](0) == 0 ); } void TestingAnalysisTest::test_linear_regression() { - cout << "test_linear_regression\n"; - Index neurons_number; Tensor linear_correlation; @@ -245,17 +217,15 @@ void TestingAnalysisTest::test_linear_regression() linear_correlation = testing_analysis.linear_correlation(); - assert_true(linear_correlation.size() == 1, LOG); - assert_true(isnan(linear_correlation(0).a), LOG); - assert_true(isnan(linear_correlation(0).b), LOG); - assert_true(isnan(linear_correlation(0).r), LOG); + EXPECT_EQ(linear_correlation.size() == 1); + EXPECT_EQ(isnan(linear_correlation(0).a)); + EXPECT_EQ(isnan(linear_correlation(0).b)); + EXPECT_EQ(isnan(linear_correlation(0).r)); } void TestingAnalysisTest::test_save() { - cout << "test_save\n"; - string file_name = "../data/linear_correlation.dat"; testing_analysis.save(file_name); @@ -264,8 +234,6 @@ void TestingAnalysisTest::test_save() void TestingAnalysisTest::test_perform_linear_regression() { - cout << "test_perform_linear_regression\n"; - // DataSet samples_number = 1; @@ -288,16 +256,14 @@ void TestingAnalysisTest::test_perform_linear_regression() Tensor goodness_of_fit_analysis = testing_analysis.perform_goodness_of_fit_analysis(); - assert_true(goodness_of_fit_analysis.size() == 1 , LOG); - assert_true(goodness_of_fit_analysis[0].determination - type(1) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(goodness_of_fit_analysis.size() == 1 ); + EXPECT_EQ(goodness_of_fit_analysis[0].determination - type(1) < type(NUMERIC_LIMITS_MIN)); } void TestingAnalysisTest::test_calculate_confusion() { - cout << "test_calculate_confusion\n"; - // Samples* i; Tensor actual; @@ -322,30 +288,28 @@ void TestingAnalysisTest::test_calculate_confusion() Tensor sum = confusion.sum(); - assert_true(sum(0) == 4 + 12, LOG); + EXPECT_EQ(sum(0) == 4 + 12); - assert_true(confusion.dimension(0) == 4, LOG); - assert_true(confusion.dimension(1) == 4, LOG); - assert_true(confusion(0,0) == 1, LOG); - assert_true(confusion(1,1) == 2, LOG); - assert_true(confusion(2,2) == 1, LOG); + EXPECT_EQ(confusion.dimension(0) == 4); + EXPECT_EQ(confusion.dimension(1) == 4); + EXPECT_EQ(confusion(0,0) == 1); + EXPECT_EQ(confusion(1,1) == 2); + EXPECT_EQ(confusion(2,2) == 1); - assert_true(confusion(0,3) == confusion(0,0) + confusion(0,1) + confusion(0,2), LOG); - assert_true(confusion(1,3) == confusion(1,0) + confusion(1,1) + confusion(1,2), LOG); - assert_true(confusion(2,3) == confusion(2,0) + confusion(2,1) + confusion(2,2), LOG); + EXPECT_EQ(confusion(0,3) == confusion(0,0) + confusion(0,1) + confusion(0,2)); + EXPECT_EQ(confusion(1,3) == confusion(1,0) + confusion(1,1) + confusion(1,2)); + EXPECT_EQ(confusion(2,3) == confusion(2,0) + confusion(2,1) + confusion(2,2)); - assert_true(confusion(3,0) == confusion(0,0) + confusion(1,0) + confusion(2,0), LOG); - assert_true(confusion(3,1) == confusion(0,1) + confusion(1,1) + confusion(2,1), LOG); - assert_true(confusion(3,2) == confusion(0,2) + confusion(1,2) + confusion(2,2), LOG); + EXPECT_EQ(confusion(3,0) == confusion(0,0) + confusion(1,0) + confusion(2,0)); + EXPECT_EQ(confusion(3,1) == confusion(0,1) + confusion(1,1) + confusion(2,1)); + EXPECT_EQ(confusion(3,2) == confusion(0,2) + confusion(1,2) + confusion(2,2)); - assert_true(confusion(3,3) == 4, LOG); + EXPECT_EQ(confusion(3,3) == 4); } void TestingAnalysisTest::test_calculate_binary_classification_test() { - cout << "test_calculate_binary_classification_test\n"; - // DataSet data_set.set(samples_number, inputs_number, targets_number); @@ -363,30 +327,28 @@ void TestingAnalysisTest::test_calculate_binary_classification_test() Tensor binary = testing_analysis.calculate_binary_classification_tests(); - assert_true(binary.size() == 15 , LOG); - - assert_true(binary[0] == 0 , LOG); - assert_true(binary[1] == 1 , LOG); - assert_true(binary[2] == 0 , LOG); - assert_true(binary[3] == 0 , LOG); - assert_true(binary[4] == 0 , LOG); - assert_true(binary[5] == 0 , LOG); - assert_true(binary[6] == 0 , LOG); - assert_true(binary[7] == 0 , LOG); - assert_true(binary[8] == 1 , LOG); - assert_true(binary[9] == 1 , LOG); - assert_true(binary[10] == 0 , LOG); - assert_true(binary[11] == 0 , LOG); - assert_true(binary[12] == 0 , LOG); - assert_true(binary[13] == -1 , LOG); - assert_true(binary[14] == -1 , LOG); + EXPECT_EQ(binary.size() == 15 ); + + EXPECT_EQ(binary[0] == 0 ); + EXPECT_EQ(binary[1] == 1 ); + EXPECT_EQ(binary[2] == 0 ); + EXPECT_EQ(binary[3] == 0 ); + EXPECT_EQ(binary[4] == 0 ); + EXPECT_EQ(binary[5] == 0 ); + EXPECT_EQ(binary[6] == 0 ); + EXPECT_EQ(binary[7] == 0 ); + EXPECT_EQ(binary[8] == 1 ); + EXPECT_EQ(binary[9] == 1 ); + EXPECT_EQ(binary[10] == 0 ); + EXPECT_EQ(binary[11] == 0 ); + EXPECT_EQ(binary[12] == 0 ); + EXPECT_EQ(binary[13] == -1 ); + EXPECT_EQ(binary[14] == -1 ); } void TestingAnalysisTest::test_calculate_roc_curve() { - cout << "test_calculate_roc_curve\n"; - Tensor targets; Tensor outputs; @@ -410,19 +372,19 @@ void TestingAnalysisTest::test_calculate_roc_curve() roc_curve = testing_analysis.calculate_roc_curve(targets, outputs); - assert_true(roc_curve.dimension(1) == 3, LOG); - assert_true(roc_curve.dimension(0) == 201, LOG); + EXPECT_EQ(roc_curve.dimension(1) == 3); + EXPECT_EQ(roc_curve.dimension(0) == 201); - assert_true(roc_curve(0, 0) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(0, 1) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(1, 0) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(1, 1) - type(1) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(2, 0) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(2, 1) - type(1) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(3, 0) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(3, 1) - type(1) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(4, 0) - type(1) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(4, 1) - type(1) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(roc_curve(0, 0) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(0, 1) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(1, 0) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(1, 1) - type(1) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(2, 0) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(2, 1) - type(1) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(3, 0) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(3, 1) - type(1) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(4, 0) - type(1) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(4, 1) - type(1) < type(NUMERIC_LIMITS_MIN)); // Test @@ -442,26 +404,24 @@ void TestingAnalysisTest::test_calculate_roc_curve() roc_curve = testing_analysis.calculate_roc_curve(targets, outputs); - assert_true(roc_curve.dimension(1) == 3, LOG); - assert_true(roc_curve.dimension(0) == 201, LOG); - - assert_true(roc_curve(0, 0) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(0, 1) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(1, 0) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(1, 1) - type(0.5) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(2, 0) - type(0.5) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(2, 1) - type(0.5) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(3, 0) - type(1) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(3, 1) - type(0.5) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(4, 0) - type(1) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(roc_curve(4, 1) - type(1) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(roc_curve.dimension(1) == 3); + EXPECT_EQ(roc_curve.dimension(0) == 201); + + EXPECT_EQ(roc_curve(0, 0) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(0, 1) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(1, 0) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(1, 1) - type(0.5) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(2, 0) - type(0.5) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(2, 1) - type(0.5) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(3, 0) - type(1) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(3, 1) - type(0.5) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(4, 0) - type(1) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(roc_curve(4, 1) - type(1) < type(NUMERIC_LIMITS_MIN)); } void TestingAnalysisTest::test_calculate_area_under_curve() { - cout << "test_calculate_area_under_curve\n"; - Tensor roc_curve; type area_under_curve; @@ -486,7 +446,7 @@ void TestingAnalysisTest::test_calculate_area_under_curve() area_under_curve = testing_analysis.calculate_area_under_curve(roc_curve); - assert_true(area_under_curve - type(1) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(area_under_curve - type(1) < type(NUMERIC_LIMITS_MIN)); targets.resize(4,1); @@ -506,7 +466,7 @@ void TestingAnalysisTest::test_calculate_area_under_curve() area_under_curve = testing_analysis.calculate_area_under_curve(roc_curve); - assert_true(area_under_curve - type(0.5) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(area_under_curve - type(0.5) < type(NUMERIC_LIMITS_MIN)); // Test @@ -528,7 +488,7 @@ void TestingAnalysisTest::test_calculate_area_under_curve() area_under_curve = testing_analysis.calculate_area_under_curve(roc_curve); - assert_true(area_under_curve - type(0.5) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(area_under_curve - type(0.5) < type(NUMERIC_LIMITS_MIN)); // Test @@ -550,14 +510,12 @@ void TestingAnalysisTest::test_calculate_area_under_curve() area_under_curve = testing_analysis.calculate_area_under_curve(roc_curve); - assert_true(area_under_curve < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(area_under_curve < type(NUMERIC_LIMITS_MIN)); } void TestingAnalysisTest::test_calculate_optimal_threshold() { - cout << "test_calculate_optimal_threshold\n"; - type optimal_threshold; Tensor roc_curve; @@ -582,7 +540,7 @@ void TestingAnalysisTest::test_calculate_optimal_threshold() optimal_threshold = testing_analysis.calculate_optimal_threshold(roc_curve); - assert_true(optimal_threshold - type(1) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(optimal_threshold - type(1) < type(NUMERIC_LIMITS_MIN)); // Test @@ -604,7 +562,7 @@ void TestingAnalysisTest::test_calculate_optimal_threshold() optimal_threshold = testing_analysis.calculate_optimal_threshold(roc_curve); - assert_true(optimal_threshold - type(1) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(optimal_threshold - type(1) < type(NUMERIC_LIMITS_MIN)); // Test @@ -628,15 +586,12 @@ void TestingAnalysisTest::test_calculate_optimal_threshold() optimal_threshold = testing_analysis.calculate_optimal_threshold(roc_curve); - assert_true(optimal_threshold - type(0.62) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(optimal_threshold - type(0.62) < type(NUMERIC_LIMITS_MIN)); } void TestingAnalysisTest::test_calculate_cumulative_gain() { - cout << "test_calculate_cumulative_chart\n"; - - // Test targets.resize(4,1); @@ -654,19 +609,17 @@ void TestingAnalysisTest::test_calculate_cumulative_gain() Tensor cumulative_gain = testing_analysis.calculate_cumulative_gain(targets, outputs); - assert_true(cumulative_gain.dimension(1) == 2, LOG); - assert_true(cumulative_gain.dimension(0) == 21, LOG); - assert_true(cumulative_gain(0, 0) - type(0) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(cumulative_gain(0, 1) - type(0) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(cumulative_gain(20, 0) - type(1) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(cumulative_gain(20, 1) - type(1) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(cumulative_gain.dimension(1) == 2); + EXPECT_EQ(cumulative_gain.dimension(0) == 21); + EXPECT_EQ(cumulative_gain(0, 0) - type(0) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(cumulative_gain(0, 1) - type(0) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(cumulative_gain(20, 0) - type(1) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(cumulative_gain(20, 1) - type(1) < type(NUMERIC_LIMITS_MIN)); } void TestingAnalysisTest::test_calculate_lift_chart() { - cout << "test_calculate_lift_chart\n"; - Tensor cumulative_gain; Tensor lift_chart; @@ -691,15 +644,13 @@ void TestingAnalysisTest::test_calculate_lift_chart() lift_chart = testing_analysis.calculate_lift_chart(cumulative_gain); - assert_true(lift_chart.dimension(1) == cumulative_gain.dimension(1), LOG); - assert_true(lift_chart.dimension(0) == cumulative_gain.dimension(0), LOG); + EXPECT_EQ(lift_chart.dimension(1) == cumulative_gain.dimension(1)); + EXPECT_EQ(lift_chart.dimension(0) == cumulative_gain.dimension(0)); } void TestingAnalysisTest::test_calculate_calibration_plot() { - cout << "test_calculate_calibration_plot\n"; - Tensor calibration_plot; // Test @@ -732,15 +683,13 @@ void TestingAnalysisTest::test_calculate_calibration_plot() calibration_plot = testing_analysis.calculate_calibration_plot(targets, outputs); - assert_true(calibration_plot.dimension(1) == 2, LOG); - assert_true(calibration_plot.dimension(0) == 11, LOG); + EXPECT_EQ(calibration_plot.dimension(1) == 2); + EXPECT_EQ(calibration_plot.dimension(0) == 11); } void TestingAnalysisTest::test_calculate_true_positive_samples() { - cout << "test_calculate_true_positive_samples\n"; - Tensor true_positives_indices; // Test @@ -766,8 +715,8 @@ void TestingAnalysisTest::test_calculate_true_positive_samples() true_positives_indices = testing_analysis.calculate_true_positive_samples(targets, outputs, testing_indices, threshold); - assert_true(true_positives_indices.size() == 1, LOG); - assert_true(true_positives_indices[0] == 1, LOG); + EXPECT_EQ(true_positives_indices.size() == 1); + EXPECT_EQ(true_positives_indices[0] == 1); // Test @@ -789,7 +738,7 @@ void TestingAnalysisTest::test_calculate_true_positive_samples() const Tensor not_empty = true_positives_indices.any(); - assert_true(!not_empty(0), LOG); + EXPECT_EQ(!not_empty(0)); // Test @@ -809,18 +758,16 @@ void TestingAnalysisTest::test_calculate_true_positive_samples() true_positives_indices = testing_analysis.calculate_true_positive_samples(targets, outputs, testing_indices, threshold); - assert_true(true_positives_indices.size() == 4, LOG); - assert_true(true_positives_indices[0] == 0, LOG); - assert_true(true_positives_indices[1] == 1, LOG); - assert_true(true_positives_indices[2] == 2, LOG); - assert_true(true_positives_indices[3] == 3, LOG); + EXPECT_EQ(true_positives_indices.size() == 4); + EXPECT_EQ(true_positives_indices[0] == 0); + EXPECT_EQ(true_positives_indices[1] == 1); + EXPECT_EQ(true_positives_indices[2] == 2); + EXPECT_EQ(true_positives_indices[3] == 3); } void TestingAnalysisTest::test_calculate_false_positive_samples() { - cout << "test_calculate_false_positive_samples\n"; - Tensor false_positives_indices; // Test @@ -845,8 +792,8 @@ void TestingAnalysisTest::test_calculate_false_positive_samples() false_positives_indices = testing_analysis.calculate_false_positive_samples(targets, outputs,testing_indices, threshold); - assert_true(false_positives_indices.size() == 1, LOG); - assert_true(false_positives_indices[0] == 2, LOG); + EXPECT_EQ(false_positives_indices.size() == 1); + EXPECT_EQ(false_positives_indices[0] == 2); // Test @@ -866,11 +813,11 @@ void TestingAnalysisTest::test_calculate_false_positive_samples() false_positives_indices = testing_analysis.calculate_false_positive_samples(targets, outputs, testing_indices, threshold); - assert_true(false_positives_indices.size() == 4, LOG); - assert_true(false_positives_indices[0] == 0, LOG); - assert_true(false_positives_indices[1] == 1, LOG); - assert_true(false_positives_indices[2] == 2, LOG); - assert_true(false_positives_indices[3] == 3, LOG); + EXPECT_EQ(false_positives_indices.size() == 4); + EXPECT_EQ(false_positives_indices[0] == 0); + EXPECT_EQ(false_positives_indices[1] == 1); + EXPECT_EQ(false_positives_indices[2] == 2); + EXPECT_EQ(false_positives_indices[3] == 3); // Test @@ -892,20 +839,16 @@ void TestingAnalysisTest::test_calculate_false_positive_samples() const Tensor not_empty = false_positives_indices.any(); - assert_true(!not_empty(0), LOG); + EXPECT_EQ(!not_empty(0)); - assert_true(false_positives_indices.size() == 0, LOG); + EXPECT_EQ(false_positives_indices.size() == 0); } void TestingAnalysisTest::test_calculate_false_negative_samples() { - cout << "test_calculate_false_negative_samples\n"; - Tensor false_negatives_indices; - // Test - targets.resize(4, 1); targets(0, 0) = type(0); @@ -926,8 +869,8 @@ void TestingAnalysisTest::test_calculate_false_negative_samples() false_negatives_indices = testing_analysis.calculate_false_negative_samples(targets, outputs, testing_indices, threshold); - assert_true(false_negatives_indices.size() == 1, LOG); - assert_true(false_negatives_indices[0] == 3, LOG); + EXPECT_EQ(false_negatives_indices.size() == 1); + EXPECT_EQ(false_negatives_indices[0] == 3); // Test @@ -947,11 +890,11 @@ void TestingAnalysisTest::test_calculate_false_negative_samples() false_negatives_indices = testing_analysis.calculate_false_negative_samples(targets, outputs, testing_indices, threshold); - assert_true(false_negatives_indices.size() == 0, LOG); + EXPECT_EQ(false_negatives_indices.size() == 0); const Tensor not_empty = false_negatives_indices.any(); - assert_true(!not_empty(0), LOG); + EXPECT_EQ(!not_empty(0)); // Test @@ -971,18 +914,16 @@ void TestingAnalysisTest::test_calculate_false_negative_samples() false_negatives_indices = testing_analysis.calculate_false_negative_samples(targets, outputs, testing_indices, threshold); - assert_true(false_negatives_indices.size() == 4, LOG); - assert_true(false_negatives_indices[0] == 0, LOG); - assert_true(false_negatives_indices[1] == 1, LOG); - assert_true(false_negatives_indices[2] == 2, LOG); - assert_true(false_negatives_indices[3] == 3, LOG); + EXPECT_EQ(false_negatives_indices.size() == 4); + EXPECT_EQ(false_negatives_indices[0] == 0); + EXPECT_EQ(false_negatives_indices[1] == 1); + EXPECT_EQ(false_negatives_indices[2] == 2); + EXPECT_EQ(false_negatives_indices[3] == 3); } void TestingAnalysisTest::test_calculate_true_negative_samples() { - cout << "test_calculate_true_negative_samples\n"; - Tensor true_negatives_indices; // Test @@ -1007,11 +948,11 @@ void TestingAnalysisTest::test_calculate_true_negative_samples() true_negatives_indices = testing_analysis.calculate_true_negative_samples(targets, outputs, testing_indices, threshold); - assert_true(true_negatives_indices.size() == 4, LOG); - assert_true(true_negatives_indices[0] == 0, LOG); - assert_true(true_negatives_indices[1] == 1, LOG); - assert_true(true_negatives_indices[2] == 2, LOG); - assert_true(true_negatives_indices[3] == 3, LOG); + EXPECT_EQ(true_negatives_indices.size() == 4); + EXPECT_EQ(true_negatives_indices[0] == 0); + EXPECT_EQ(true_negatives_indices[1] == 1); + EXPECT_EQ(true_negatives_indices[2] == 2); + EXPECT_EQ(true_negatives_indices[3] == 3); // Test @@ -1033,7 +974,7 @@ void TestingAnalysisTest::test_calculate_true_negative_samples() const Tensor not_empty = true_negatives_indices.any(); - assert_true(!not_empty(0), LOG); + EXPECT_EQ(!not_empty(0)); // Test @@ -1053,15 +994,13 @@ void TestingAnalysisTest::test_calculate_true_negative_samples() true_negatives_indices = testing_analysis.calculate_true_negative_samples(targets, outputs, testing_indices, threshold); - assert_true(true_negatives_indices.size() == 1, LOG); - assert_true(true_negatives_indices[0] == 0, LOG); + EXPECT_EQ(true_negatives_indices.size() == 1); + EXPECT_EQ(true_negatives_indices[0] == 0); } void TestingAnalysisTest::test_calculate_multiple_classification_rates() { - cout << "test_calculate_multiple_classification_rates\n"; - Tensor testing_indices; Tensor, 2> multiple_classification_rates; @@ -1097,81 +1036,23 @@ void TestingAnalysisTest::test_calculate_multiple_classification_rates() multiple_classification_rates = testing_analysis.calculate_multiple_classification_rates(targets, outputs, testing_indices); - assert_true(multiple_classification_rates.size() == 9, LOG); + EXPECT_EQ(multiple_classification_rates.size() == 9); - assert_true(multiple_classification_rates(0,0)(0) == 0, LOG); - assert_true(multiple_classification_rates(0,1)(0) == 3, LOG); - assert_true(multiple_classification_rates(0,2)(0) == 6, LOG); - assert_true(multiple_classification_rates(1,0)(0) == 4, LOG); - assert_true(multiple_classification_rates(1,1)(0) == 1, LOG); - assert_true(multiple_classification_rates(1,2)(0) == 7, LOG); - assert_true(multiple_classification_rates(2,0)(0) == 8, LOG); - assert_true(multiple_classification_rates(2,1)(0) == 5, LOG); - assert_true(multiple_classification_rates(2,2)(0) == 2, LOG); + EXPECT_EQ(multiple_classification_rates(0,0)(0) == 0); + EXPECT_EQ(multiple_classification_rates(0,1)(0) == 3); + EXPECT_EQ(multiple_classification_rates(0,2)(0) == 6); + EXPECT_EQ(multiple_classification_rates(1,0)(0) == 4); + EXPECT_EQ(multiple_classification_rates(1,1)(0) == 1); + EXPECT_EQ(multiple_classification_rates(1,2)(0) == 7); + EXPECT_EQ(multiple_classification_rates(2,0)(0) == 8); + EXPECT_EQ(multiple_classification_rates(2,1)(0) == 5); + EXPECT_EQ(multiple_classification_rates(2,2)(0) == 2); } - -void TestingAnalysisTest::run_test_case() -{ - cout << "Running testing analysis test case...\n"; - - test_constructor(); - - // Error data - - test_calculate_error_data(); - test_calculate_percentage_error_data(); - test_calculate_error_data_descriptives(); - test_calculate_absolute_errors_descriptives(); - test_calculate_percentage_errors_descriptives(); - test_calculate_error_data_histograms(); - test_calculate_maximal_errors(); - - // Linear regression analysista - - test_linear_regression(); - test_save(); - test_perform_linear_regression(); - - // Binary classification test - - test_calculate_binary_classification_test(); - - // Confusion matrix - - test_calculate_confusion(); - - // ROC curve - - test_calculate_roc_curve(); - test_calculate_area_under_curve(); - test_calculate_optimal_threshold(); - - // Lift chart - - test_calculate_cumulative_gain(); - test_calculate_lift_chart(); - - // Calibration plot - - test_calculate_calibration_plot(); - - // Binary classification rates - - test_calculate_true_positive_samples(); - test_calculate_false_positive_samples(); - test_calculate_false_negative_samples(); - test_calculate_true_negative_samples(); - - // Multiple classification rates - - test_calculate_multiple_classification_rates(); - - cout << "End of testing analysis test case.\n\n"; } -} +*/ // OpenNN: Open Neural Networks Library. // Copyright (C) 2005-2024 Artificial Intelligence Techniques, SL. @@ -1189,4 +1070,3 @@ void TestingAnalysisTest::run_test_case() // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -*/ \ No newline at end of file diff --git a/tests/time_series_data_set_test.cpp b/tests/time_series_data_set_test.cpp index ded4570b9..300d7e09a 100644 --- a/tests/time_series_data_set_test.cpp +++ b/tests/time_series_data_set_test.cpp @@ -1,51 +1,38 @@ #include "pch.h" -/* - #include "../opennn/time_series_data_set.h" -namespace opennn -{ - -TimeSeriesDataSetTest::TimeSeriesDataSetTest() : UnitTesting() -{ - data_set.set_display(false); -} - +/* void TimeSeriesDataSetTest::test_constructor() { - cout << "test_constructor\n"; - TimeSeriesDataSet data_set_1; - assert_true(data_set_1.get_variables_number() == 0, LOG); - assert_true(data_set_1.get_samples_number() == 0, LOG); + EXPECT_EQ(data_set_1.get_variables_number() == 0); + EXPECT_EQ(data_set_1.get_samples_number() == 0); /* // Samples and variables number constructor TimeSeriesDataSet data_set_2(1, 2); - assert_true(data_set_2.get_samples_number() == 1, LOG); - assert_true(data_set_2.get_variables_number() == 2, LOG); + EXPECT_EQ(data_set_2.get_samples_number() == 1); + EXPECT_EQ(data_set_2.get_variables_number() == 2); // Inputs, targets and samples numbers constructor TimeSeriesDataSet data_set_3(1, 1, 1); - assert_true(data_set_3.get_variables_number() == 2, LOG); - assert_true(data_set_3.get_samples_number() == 1, LOG); - assert_true(data_set_3.get_target_variables_number() == 1,LOG); - assert_true(data_set_3.get_input_variables_number() == 1,LOG); + EXPECT_EQ(data_set_3.get_variables_number() == 2); + EXPECT_EQ(data_set_3.get_samples_number() == 1); + EXPECT_EQ(data_set_3.get_target_variables_number() == 1,LOG); + EXPECT_EQ(data_set_3.get_input_variables_number() == 1,LOG); } void TimeSeriesDataSetTest::test_calculate_autocorrelations() { - cout << "test_calculate_autocorrelations\n"; - Tensor autocorrelations; Index samples_number; @@ -73,16 +60,14 @@ void TimeSeriesDataSetTest::test_calculate_autocorrelations() autocorrelations = data_set.calculate_autocorrelations(lags_number); - assert_true(autocorrelations.dimension(0) == 2, LOG); - assert_true(autocorrelations.dimension(1) == 1, LOG); + EXPECT_EQ(autocorrelations.dimension(0) == 2); + EXPECT_EQ(autocorrelations.dimension(1) == 1); } void TimeSeriesDataSetTest::test_calculate_cross_correlations() { - cout << "test_calculate_cross_correlations\n"; - Index lags_number; Tensor cross_correlations; @@ -108,15 +93,13 @@ void TimeSeriesDataSetTest::test_calculate_cross_correlations() cross_correlations = data_set.calculate_cross_correlations(lags_number); - assert_true(cross_correlations.dimension(0) == 3, LOG); + EXPECT_EQ(cross_correlations.dimension(0) == 3); } void TimeSeriesDataSetTest::test_transform_time_series() { - cout << "test_transform_time_series\n"; - data.resize(9, 2); data.setValues({{1,10}, @@ -139,27 +122,25 @@ void TimeSeriesDataSetTest::test_transform_time_series() data_set.transform_time_series(); - assert_true(data_set.get_raw_variables_number() == 6, LOG); - assert_true(data_set.get_variables_number() == 6, LOG); - assert_true(data_set.get_samples_number() == 7, LOG); + EXPECT_EQ(data_set.get_raw_variables_number() == 6); + EXPECT_EQ(data_set.get_variables_number() == 6); + EXPECT_EQ(data_set.get_samples_number() == 7); - assert_true(data_set.get_variables_number(DataSet::VariableUse::Input) == 4, LOG); - assert_true(data_set.get_variables_number(DataSet::VariableUse::Target) == 1, LOG); - assert_true(data_set.get_raw_variables_number(DataSet::VariableUse::Target) == 1, LOG); - assert_true(data_set.get_variables_number(DataSet::VariableUse::None) == 1, LOG); + EXPECT_EQ(data_set.get_variables_number(DataSet::VariableUse::Input) == 4); + EXPECT_EQ(data_set.get_variables_number(DataSet::VariableUse::Target) == 1); + EXPECT_EQ(data_set.get_raw_variables_number(DataSet::VariableUse::Target) == 1); + EXPECT_EQ(data_set.get_variables_number(DataSet::VariableUse::None) == 1); - assert_true(data_set.get_variable_name(0) == "x_lag_1", LOG); - assert_true(data_set.get_variable_name(1) == "y_lag_1", LOG); - assert_true(data_set.get_variable_name(2) == "x_lag_0", LOG); - assert_true(data_set.get_variable_name(3) == "y_lag_0", LOG); + EXPECT_EQ(data_set.get_variable_name(0) == "x_lag_1"); + EXPECT_EQ(data_set.get_variable_name(1) == "y_lag_1"); + EXPECT_EQ(data_set.get_variable_name(2) == "x_lag_0"); + EXPECT_EQ(data_set.get_variable_name(3) == "y_lag_0"); } void TimeSeriesDataSetTest::test_set_time_series_data() { - cout << "test_set_time_series_data\n"; - data.resize(4,2); data.setValues({{type(0),type(0)}, @@ -183,16 +164,14 @@ void TimeSeriesDataSetTest::test_set_time_series_data() data_set.set_time_series_data(data); - assert_true(data_set.get_time_series_data()(0) - type(15) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(data_set.get_time_series_data()(1) - type(12) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(data_set.get_time_series_data()(2) - type(9) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(data_set.get_time_series_data()(0) - type(15) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(data_set.get_time_series_data()(1) - type(12) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(data_set.get_time_series_data()(2) - type(9) < type(NUMERIC_LIMITS_MIN)); } void TimeSeriesDataSetTest::test_save_time_series_data_binary() { - cout << "test_save_time_series_data_binary\n"; - const string data_path = "../data/test"; // Test @@ -214,17 +193,15 @@ void TimeSeriesDataSetTest::test_save_time_series_data_binary() data_set.save_time_series_data_binary(data_path); data_set.load_time_series_data_binary(data_path); - assert_true(data_set.get_time_series_data()(0) - type(0) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(data_set.get_time_series_data()(1) - type(1) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(data_set.get_time_series_data()(2) - type(2) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(data_set.get_time_series_data()(0) - type(0) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(data_set.get_time_series_data()(1) - type(1) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(data_set.get_time_series_data()(2) - type(2) < type(NUMERIC_LIMITS_MIN)); } void TimeSeriesDataSetTest::test_set_steps_ahead_number() { - cout << "test_set_steps_ahead_nuber\n"; - data.resize(4,2); data.setValues({{type(0),type(0)}, {type(1),type(10)}, @@ -236,14 +213,12 @@ void TimeSeriesDataSetTest::test_set_steps_ahead_number() data_set.set_steps_ahead_number(2); data_set.transform_time_series(); - assert_true(data_set.get_lags_number() == 2, LOG); + EXPECT_EQ(data_set.get_lags_number() == 2); } void TimeSeriesDataSetTest::test_set_lags_number() { - cout << "test_set_lags_number\n"; - // Test data.resize(4,2); @@ -257,40 +232,9 @@ void TimeSeriesDataSetTest::test_set_lags_number() data_set.set_steps_ahead_number(2); data_set.transform_time_series(); - assert_true(data_set.get_steps_ahead() == 2, LOG); -} - - -void TimeSeriesDataSetTest::run_test_case() -{ - cout << "Running time series data set test case...\n"; - - test_constructor(); - - // Correlations - - test_calculate_autocorrelations(); - test_calculate_cross_correlations(); - - // Transform - - test_transform_time_series(); - - // Set series - - test_set_time_series_data(); - test_set_steps_ahead_number(); - test_set_lags_number(); - - // Saving - - test_save_time_series_data_binary(); - //test_has_time_raw_variables(); - - cout << "End of time series data set test case.\n\n"; -} - + EXPECT_EQ(data_set.get_steps_ahead() == 2); } +*/ // OpenNN: Open Neural Networks Library. // Copyright (C) 2005-2024 Artificial Intelligence Techniques, SL. @@ -308,4 +252,3 @@ void TimeSeriesDataSetTest::run_test_case() // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -*/ \ No newline at end of file diff --git a/tests/training_strategy_test.cpp b/tests/training_strategy_test.cpp index f6b2222a7..c1dc36700 100644 --- a/tests/training_strategy_test.cpp +++ b/tests/training_strategy_test.cpp @@ -1,32 +1,20 @@ #include "pch.h" /* -namespace opennn -{ - -TrainingStrategyTest::TrainingStrategyTest() : UnitTesting() -{ - training_strategy.set(&neural_network, &data_set); -} - void TrainingStrategyTest::test_constructor() { - cout << "test_constructor\n"; - // Test TrainingStrategy training_strategy_1(&neural_network, &data_set); - assert_true(training_strategy.get_neural_network(), LOG); - assert_true(training_strategy.get_data_set(), LOG); + EXPECT_EQ(training_strategy.get_neural_network()); + EXPECT_EQ(training_strategy.get_data_set()); } void TrainingStrategyTest::test_perform_training() { - cout << "test_perform_training\n"; - Index samples_number; Index inputs_number; Index targets_number; @@ -69,8 +57,6 @@ void TrainingStrategyTest::test_perform_training() void TrainingStrategyTest::test_to_XML() { - cout << "test_to_XML\n"; - string file_name = "../data/training_strategy.xml"; ofstream file(file_name); @@ -91,8 +77,6 @@ void TrainingStrategyTest::test_to_XML() void TrainingStrategyTest::test_from_XML() { - cout << "test_from_XML\n"; - training_strategy.set_optimization_method(TrainingStrategy::OptimizationMethod::GRADIENT_DESCENT); training_strategy.set_default(); @@ -110,8 +94,6 @@ void TrainingStrategyTest::test_from_XML() void TrainingStrategyTest::test_save() { - cout << "test_save\n"; - string file_name = "../data/training_strategy.xml"; training_strategy.set_optimization_method(TrainingStrategy::OptimizationMethod::GRADIENT_DESCENT); @@ -122,8 +104,6 @@ void TrainingStrategyTest::test_save() void TrainingStrategyTest::test_load() { - cout << "test_load\n"; - string file_name = "../data/training_strategy.xml"; // Test @@ -131,31 +111,9 @@ void TrainingStrategyTest::test_load() training_strategy.save(file_name); training_strategy.load(file_name); } +*/ -void TrainingStrategyTest::run_test_case() -{ - cout << "Running training strategy test case...\n"; - - test_constructor(); - - // Training - - test_perform_training(); - - // Serialization - - test_to_XML(); - test_from_XML(); - - test_save(); - test_load(); - - cout << "End of training strategy test case.\n\n"; -} - -} - // OpenNN: Open Neural Networks Library. // Copyright (C) 2005-2024 Artificial Intelligence Techniques, SL. // @@ -172,4 +130,3 @@ void TrainingStrategyTest::run_test_case() // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -*/ \ No newline at end of file diff --git a/tests/transformer_test.cpp b/tests/transformer_test.cpp index e71e94693..14b925f37 100644 --- a/tests/transformer_test.cpp +++ b/tests/transformer_test.cpp @@ -1,26 +1,17 @@ #include "pch.h" -/* //#include "../opennn/probabilistic_layer_3d.h" -namespace opennn -{ - -TransformerTest::TransformerTest() : UnitTesting() -{ -} - +/* void TransformerTest::test_constructor() { - cout << "test_constructor\n"; - Tensor, 1> layers; Transformer transformer_0; - assert_true(transformer_0.is_empty(), LOG); - assert_true(transformer_0.get_layers_number() == 0, LOG); + EXPECT_EQ(transformer_0.is_empty()); + EXPECT_EQ(transformer_0.get_layers_number() == 0); // Tensor constructor test @@ -39,14 +30,14 @@ void TransformerTest::test_constructor() Transformer transformer_1(architecture); - assert_true(transformer_1.get_layers_number() == 2 + 7 * layers_number + 10 * layers_number + 1, LOG); + EXPECT_EQ(transformer_1.get_layers_number() == 2 + 7 * layers_number + 10 * layers_number + 1); // List constructor test Transformer transformer_2({ input_length, context_length, input_dimensions, context_dimension, embedding_depth, perceptron_depth, heads_number, layers_number }); - assert_true(transformer_2.get_layers_number() == 2 + 7 * layers_number + 10 * layers_number + 1, LOG); + EXPECT_EQ(transformer_2.get_layers_number() == 2 + 7 * layers_number + 10 * layers_number + 1); // Test 3 @@ -62,7 +53,7 @@ void TransformerTest::test_constructor() Transformer transformer_3({ input_length, context_length, input_dimensions, context_dimension, embedding_depth, perceptron_depth, heads_number, layers_number }); - assert_true(transformer_3.get_layers_number() == 2 + 7 * layers_number + 10 * layers_number + 1, LOG); + EXPECT_EQ(transformer_3.get_layers_number() == 2 + 7 * layers_number + 10 * layers_number + 1); // Test 4 @@ -71,14 +62,12 @@ void TransformerTest::test_constructor() Transformer transformer_4({ input_length, context_length, input_dimensions, context_dimension, embedding_depth, perceptron_depth, heads_number, layers_number }); - assert_true(transformer_4.get_layers_number() == 2 + 7 * layers_number + 10 * layers_number + 1, LOG); + EXPECT_EQ(transformer_4.get_layers_number() == 2 + 7 * layers_number + 10 * layers_number + 1); } void TransformerTest::test_calculate_outputs() { - cout << "test_calculate_outputs\n"; - Tensor inputs; Tensor context; Tensor outputs; @@ -110,11 +99,11 @@ void TransformerTest::test_calculate_outputs() outputs = transformer.calculate_outputs(inputs, context); - assert_true(outputs.dimension(0) == batch_samples_number, LOG); - assert_true(outputs.dimension(1) == input_length, LOG); - assert_true(outputs.dimension(2) == input_dimensions, LOG); + EXPECT_EQ(outputs.dimension(0) == batch_samples_number); + EXPECT_EQ(outputs.dimension(1) == input_length); + EXPECT_EQ(outputs.dimension(2) == input_dimensions); - //assert_true(outputs.abs() < type(NUMERIC_LIMITS_MIN), LOG); + //EXPECT_EQ(outputs.abs() < type(NUMERIC_LIMITS_MIN)); // Test @@ -132,12 +121,12 @@ void TransformerTest::test_calculate_outputs() outputs = transformer.calculate_outputs(inputs); - assert_true(outputs.size() == batch_samples_number * outputs_number, LOG); - assert_true(abs(outputs(0, 0)) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(abs(outputs(0, 1)) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(abs(outputs(0, 2)) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(abs(outputs(0, 3)) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(abs(outputs(0, 4)) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(outputs.size() == batch_samples_number * outputs_number); + EXPECT_EQ(abs(outputs(0, 0)) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(abs(outputs(0, 1)) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(abs(outputs(0, 2)) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(abs(outputs(0, 3)) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(abs(outputs(0, 4)) < type(NUMERIC_LIMITS_MIN)); // Test @@ -150,9 +139,9 @@ void TransformerTest::test_calculate_outputs() outputs = transformer.calculate_outputs(inputs); - assert_true(outputs.size() == 2, LOG); - assert_true(abs(outputs(0, 0) - type(3)) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(abs(outputs(0, 1) - type(3)) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(outputs.size() == 2); + EXPECT_EQ(abs(outputs(0, 0) - type(3)) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(abs(outputs(0, 1) - type(3)) < type(NUMERIC_LIMITS_MIN)); // Test @@ -165,11 +154,11 @@ void TransformerTest::test_calculate_outputs() outputs = transformer.calculate_outputs(inputs); - assert_true(outputs.size() == 3, LOG); + EXPECT_EQ(outputs.size() == 3); - assert_true(abs(outputs(0, 0) - 3.2847) < type(1e-3), LOG); - assert_true(abs(outputs(0, 1) - 3.2847) < type(1e-3), LOG); - assert_true(abs(outputs(0, 2) - 3.2847) < type(1e-3), LOG); + EXPECT_EQ(abs(outputs(0, 0) - 3.2847) < type(1e-3)); + EXPECT_EQ(abs(outputs(0, 1) - 3.2847) < type(1e-3)); + EXPECT_EQ(abs(outputs(0, 2) - 3.2847) < type(1e-3)); // Test @@ -189,9 +178,9 @@ void TransformerTest::test_calculate_outputs() outputs = transformer.calculate_outputs(inputs); - assert_true(outputs.size() == outputs_number, LOG); - assert_true(abs(outputs(0, 0) - 0) < type(1e-3), LOG); - assert_true(abs(outputs(0, 1) - 0) < type(1e-3), LOG); + EXPECT_EQ(outputs.size() == outputs_number); + EXPECT_EQ(abs(outputs(0, 0) - 0) < type(1e-3)); + EXPECT_EQ(abs(outputs(0, 1) - 0) < type(1e-3)); // Test @@ -208,8 +197,8 @@ void TransformerTest::test_calculate_outputs() outputs = transformer.calculate_outputs(inputs); - assert_true(outputs.size() == 1, LOG); - assert_true(abs(outputs(0, 0) - 0) < type(1e-3), LOG); + EXPECT_EQ(outputs.size() == 1); + EXPECT_EQ(abs(outputs(0, 0) - 0) < type(1e-3)); // Test @@ -222,8 +211,8 @@ void TransformerTest::test_calculate_outputs() outputs = transformer.calculate_outputs(inputs); - assert_true(outputs.size() == 1, LOG); - assert_true(abs(outputs(0, 0) - type(0.5)) < type(1e-3), LOG); + EXPECT_EQ(outputs.size() == 1); + EXPECT_EQ(abs(outputs(0, 0) - type(0.5)) < type(1e-3)); // Test 7 @@ -244,19 +233,15 @@ void TransformerTest::test_calculate_outputs() outputs = transformer.calculate_outputs(inputs); - assert_true(outputs.dimension(1) == outputs_number, LOG); - assert_true(abs(outputs(0, 0)) < type(NUMERIC_LIMITS_MIN), LOG); - assert_true(abs(outputs(1, 0)) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(outputs.dimension(1) == outputs_number); + EXPECT_EQ(abs(outputs(0, 0)) < type(NUMERIC_LIMITS_MIN)); + EXPECT_EQ(abs(outputs(1, 0)) < type(NUMERIC_LIMITS_MIN)); } - void TransformerTest::test_forward_propagate() { - - cout << "test_forward_propagate\n"; - { // Test @@ -319,12 +304,12 @@ void TransformerTest::test_forward_propagate() Tensor probabilistic_activations = probabilistic_layer_forward_propagation->outputs; - assert_true(probabilistic_activations.rank() == 3, LOG); - assert_true(probabilistic_activations.dimension(0) == batch_samples_number, LOG); - assert_true(probabilistic_activations.dimension(1) == input_length, LOG); - assert_true(probabilistic_activations.dimension(2) == input_dimensions + 1, LOG); + EXPECT_EQ(probabilistic_activations.rank() == 3); + EXPECT_EQ(probabilistic_activations.dimension(0) == batch_samples_number); + EXPECT_EQ(probabilistic_activations.dimension(1) == input_length); + EXPECT_EQ(probabilistic_activations.dimension(2) == input_dimensions + 1); - assert_true(check_activations_sums(probabilistic_activations), LOG); + EXPECT_EQ(check_activations_sums(probabilistic_activations)); } @@ -390,31 +375,16 @@ void TransformerTest::test_forward_propagate() Tensor probabilistic_activations = probabilistic_layer_forward_propagation->outputs; - assert_true(probabilistic_activations.rank() == 3, LOG); - assert_true(probabilistic_activations.dimension(0) == batch_samples_number, LOG); - assert_true(probabilistic_activations.dimension(1) == input_length, LOG); - assert_true(probabilistic_activations.dimension(2) == input_dimensions + 1, LOG); - - assert_true(check_activations_sums(probabilistic_activations), LOG); + EXPECT_EQ(probabilistic_activations.rank() == 3); + EXPECT_EQ(probabilistic_activations.dimension(0) == batch_samples_number); + EXPECT_EQ(probabilistic_activations.dimension(1) == input_length); + EXPECT_EQ(probabilistic_activations.dimension(2) == input_dimensions + 1); + EXPECT_EQ(check_activations_sums(probabilistic_activations)); } } - -void TransformerTest::run_test_case() -{ - cout << "Running transformer test case...\n"; - - test_constructor(); - - test_forward_propagate(); - - test_calculate_outputs(); - - cout << "End of transformer test case.\n\n"; -} - -} +*/ // OpenNN: Open Neural Networks Library. // Copyright (C) 2005-2024 Artificial Intelligence Techniques, SL. @@ -432,4 +402,3 @@ void TransformerTest::run_test_case() // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -*/ \ No newline at end of file diff --git a/tests/unscaling_layer_test.cpp b/tests/unscaling_layer_test.cpp index ac37b9021..0debc7da6 100644 --- a/tests/unscaling_layer_test.cpp +++ b/tests/unscaling_layer_test.cpp @@ -1,47 +1,22 @@ #include "pch.h" /* -#include - -namespace opennn -{ - - -UnscalingLayerTest::UnscalingLayerTest() : UnitTesting() -{ -} - void UnscalingLayerTest::test_constructor() { - cout << "test_constructor\n"; - - // Test - UnscalingLayer unscaling_layer_1; - assert_true(unscaling_layer_1.get_type() == Layer::Type::Unscaling, LOG); - assert_true(unscaling_layer_1.get_descriptives().size() == 0, LOG); + EXPECT_EQ(unscaling_layer_1.get_type() == Layer::Type::Unscaling); + EXPECT_EQ(unscaling_layer_1.get_descriptives().size() == 0); // Test UnscalingLayer unscaling_layer_2({3}); - assert_true(unscaling_layer_2.get_descriptives().size() == 3, LOG); - -} - - -void UnscalingLayerTest::run_test_case() -{ - cout << "Running unscaling layer test case...\n"; - - test_constructor(); - - cout << "End of unscaling layer test case.\n\n"; -} + EXPECT_EQ(unscaling_layer_2.get_descriptives().size() == 3); } +*/ // OpenNN: Open Neural Networks Library. // Copyright (C) 2005-2024 Artificial Intelligence Techniques, SL. @@ -59,4 +34,3 @@ void UnscalingLayerTest::run_test_case() // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -*/ \ No newline at end of file diff --git a/tests/weighted_squared_error_test.cpp b/tests/weighted_squared_error_test.cpp index 810115a97..547a6c7a7 100644 --- a/tests/weighted_squared_error_test.cpp +++ b/tests/weighted_squared_error_test.cpp @@ -1,43 +1,31 @@ #include "pch.h" -/* - -#include "../opennn/tensors.h" -namespace opennn -{ +#include "../opennn/weighted_squared_error.h" -WeightedSquaredErrorTest::WeightedSquaredErrorTest() : UnitTesting() +TEST(WeightedSquaredErrorTest, DefaultConstructor) { - weighted_squared_error.set(&neural_network, &data_set); + WeightedSquaredError weighted_squared_error; - weighted_squared_error.set_regularization_method(LossIndex::RegularizationMethod::NoRegularization); + EXPECT_EQ(weighted_squared_error.has_neural_network(), false); + EXPECT_EQ(weighted_squared_error.has_data_set(), false); } -void WeightedSquaredErrorTest::test_constructor() +TEST(WeightedSquaredErrorTest, GeneralConstructor) { - cout << "test_constructor\n"; - - // Default + NeuralNetwork neural_network; + DataSet data_set; - WeightedSquaredError weighted_squared_error_1; + WeightedSquaredError weighted_squared_error(&neural_network, &data_set); - assert_true(!weighted_squared_error_1.has_neural_network(), LOG); - assert_true(!weighted_squared_error_1.has_data_set(), LOG); - - // Neural network and data set - - WeightedSquaredError weighted_squared_error_2(&neural_network, &data_set); - - assert_true(weighted_squared_error_2.has_neural_network(), LOG); - assert_true(weighted_squared_error_2.has_data_set(), LOG); + EXPECT_EQ(weighted_squared_error.has_neural_network(), true); + EXPECT_EQ(weighted_squared_error.has_data_set(), true); } +/* void WeightedSquaredErrorTest::test_back_propagate() { - cout << "test_back_propagate\n"; - weighted_squared_error.back_propagate(batch, forward_propagation, back_propagation); // Test binary classification trivial @@ -77,14 +65,14 @@ void WeightedSquaredErrorTest::test_back_propagate() numerical_gradient = weighted_squared_error.calculate_numerical_gradient(); - assert_true(back_propagation.errors.dimension(0) == samples_number, LOG); - assert_true(back_propagation.errors.dimension(1) == outputs_number, LOG); + EXPECT_EQ(back_propagation.errors.dimension(0) == samples_number); + EXPECT_EQ(back_propagation.errors.dimension(1) == outputs_number); - assert_true(back_propagation.errors.dimension(0) == 1, LOG); - assert_true(back_propagation.errors.dimension(1) == 1, LOG); - assert_true(back_propagation.error() - type(0.25) < type(NUMERIC_LIMITS_MIN), LOG); + EXPECT_EQ(back_propagation.errors.dimension(0) == 1); + EXPECT_EQ(back_propagation.errors.dimension(1) == 1); + EXPECT_EQ(back_propagation.error() - type(0.25) < type(NUMERIC_LIMITS_MIN)); - assert_true(are_equal(back_propagation.gradient, numerical_gradient, type(1.0e-3)), LOG); + EXPECT_EQ(are_equal(back_propagation.gradient, numerical_gradient, type(1.0e-3))); } // Test binary classification random samples, inputs, outputs, neurons @@ -126,26 +114,15 @@ void WeightedSquaredErrorTest::test_back_propagate() numerical_gradient = weighted_squared_error.calculate_numerical_gradient(); - assert_true(back_propagation.errors.dimension(0) == samples_number, LOG); - assert_true(back_propagation.errors.dimension(1) == outputs_number, LOG); + EXPECT_EQ(back_propagation.errors.dimension(0) == samples_number); + EXPECT_EQ(back_propagation.errors.dimension(1) == outputs_number); - assert_true(are_equal(back_propagation.gradient, numerical_gradient, type(1.0e-2)), LOG); + EXPECT_EQ(are_equal(back_propagation.gradient, numerical_gradient, type(1.0e-2))); } } -void WeightedSquaredErrorTest::run_test_case() -{ - cout << "Running weighted squared error test case...\n"; - - test_constructor(); - - test_back_propagate(); - - cout << "End of weighted squared error test case.\n\n"; -} - } - +*/ // OpenNN: Open Neural Networks Library. // Copyright (C) 2005-2024 Artificial Intelligence Techniques, SL. // @@ -162,4 +139,3 @@ void WeightedSquaredErrorTest::run_test_case() // You should have received a copy of the GNU Lewser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -*/ \ No newline at end of file