Skip to content

Commit

Permalink
Merge branch 'dev' into patch-5
Browse files Browse the repository at this point in the history
# Conflicts:
#	tests/response_optimization_test.cpp
  • Loading branch information
Lenny Shleymovich committed Dec 26, 2024
2 parents f478117 + d264b60 commit e40db85
Show file tree
Hide file tree
Showing 25 changed files with 496 additions and 1,432 deletions.
8 changes: 4 additions & 4 deletions opennn/kmeans.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -133,25 +133,25 @@ Tensor<type, 1> KMeans::elbow_method(const Tensor<type, 2>& data, Index max_clus
const Index rows_number = data.dimension(0);

Index original_clusters_number = clusters_number;
type sum_squared_error;
type mean_squared_error;

for(Index cluster_index = 1; cluster_index <= max_clusters; cluster_index++)
{
clusters_number = cluster_index;

fit(data);

sum_squared_error = type(0);
mean_squared_error = type(0);

for(Index row_index = 0; row_index < rows_number; row_index++)
{
data_point = data.chip(row_index, 0);
cluster_center = cluster_centers.chip(rows_cluster_labels(row_index), 0);

sum_squared_error += type(pow(l2_distance(data_point, cluster_center), 2));
mean_squared_error += type(pow(l2_distance(data_point, cluster_center), 2));
}

sum_squared_error_values(cluster_index-1) = sum_squared_error;
sum_squared_error_values(cluster_index-1) = mean_squared_error;
}

clusters_number = original_clusters_number;
Expand Down
4 changes: 2 additions & 2 deletions opennn/opennn.vcxproj
Original file line number Diff line number Diff line change
Expand Up @@ -243,10 +243,10 @@
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
</ClCompile>
<ClInclude Include="addition_layer_3d.h" />
<ClInclude Include="auto_association_data_set.h" />
<ClCompile Include="addition_layer_3d.cpp" />
<ClCompile Include="auto_association_data_set.cpp" />
<ClInclude Include="auto_associative_data_set.h" />
<ClInclude Include="auto_associative_neural_network.h" />
<ClCompile Include="auto_associative_data_set.cpp" />
<ClCompile Include="auto_associative_neural_network.cpp" />
<ClInclude Include="bounding_layer.h" />
<ClCompile Include="bounding_layer.cpp" />
Expand Down
8 changes: 0 additions & 8 deletions opennn/probabilistic_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,6 @@ struct ProbabilisticLayerBackPropagation : LayerBackPropagation

void print() const override;

// Tensor<type, 2> targets;

//Tensor<type, 1> deltas_row;
//Tensor<type, 2> activations_derivatives;

Tensor<type, 2> combination_derivatives;

Tensor<type, 1> bias_derivatives;
Expand All @@ -70,13 +65,10 @@ struct ProbabilisticLayerBackPropagationLM : LayerBackPropagationLM

void print() const override;

//Tensor<type, 1> deltas_row;

Tensor<type, 2> combination_derivatives;

Tensor<type, 2> squared_errors_Jacobian;

//Tensor<type, 2> targets;
};


Expand Down
18 changes: 0 additions & 18 deletions opennn/probabilistic_layer_3d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,6 @@ dimensions ProbabilisticLayer3D::get_output_dimensions() const
}


const type& ProbabilisticLayer3D::get_decision_threshold() const
{
return decision_threshold;
}


const ProbabilisticLayer3D::ActivationFunction& ProbabilisticLayer3D::get_activation_function() const
{
return activation_function;
Expand Down Expand Up @@ -122,8 +116,6 @@ void ProbabilisticLayer3D::set(const Index& new_inputs_number,
layer_type = Layer::Type::Probabilistic3D;

activation_function = ActivationFunction::Softmax;

decision_threshold = type(0.5);
}


Expand Down Expand Up @@ -185,12 +177,6 @@ void ProbabilisticLayer3D::set_parameters(const Tensor<type, 1>& new_parameters,
}


void ProbabilisticLayer3D::set_decision_threshold(const type& new_decision_threshold)
{
decision_threshold = new_decision_threshold;
}


void ProbabilisticLayer3D::set_activation_function(const ActivationFunction& new_activation_function)
{
activation_function = new_activation_function;
Expand Down Expand Up @@ -275,8 +261,6 @@ void ProbabilisticLayer3D::forward_propagate(const vector<pair<type*, dimensions
calculate_combinations(inputs, outputs);

calculate_activations(outputs);
cout<<outputs.dimensions()<<endl;
cout<<outputs<<endl;
}


Expand Down Expand Up @@ -391,7 +375,6 @@ void ProbabilisticLayer3D::from_XML(const XMLDocument& document)
set_inputs_number(read_xml_index(probabilistic_layer_element, "InputsNumber"));
set_inputs_depth(read_xml_index(probabilistic_layer_element, "InputsDepth"));
set_output_dimensions({read_xml_index(probabilistic_layer_element, "NeuronsNumber")});
set_decision_threshold(read_xml_type(probabilistic_layer_element, "DecisionThreshold"));
set_activation_function(read_xml_string(probabilistic_layer_element, "ActivationFunction"));
set_parameters(to_type_vector(read_xml_string(probabilistic_layer_element, "Parameters"), " "));

Expand All @@ -406,7 +389,6 @@ void ProbabilisticLayer3D::to_XML(XMLPrinter& printer) const
add_xml_element(printer, "InputsNumber", to_string(get_inputs_number_xxx()));
add_xml_element(printer, "InputsDepth", to_string(get_inputs_depth()));
add_xml_element(printer, "NeuronsNumber", to_string(get_neurons_number()));
add_xml_element(printer, "DecisionThreshold", to_string(get_decision_threshold()));
add_xml_element(printer, "ActivationFunction", get_activation_function_string());
add_xml_element(printer, "Parameters", tensor_to_string(get_parameters()));

Expand Down
8 changes: 2 additions & 6 deletions opennn/probabilistic_layer_3d.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class ProbabilisticLayer3D : public Layer

public:

enum class ActivationFunction { Softmax, Competitive };
enum class ActivationFunction { Softmax, Competitive };

ProbabilisticLayer3D(const Index& = 0,
const Index& = 0,
Expand All @@ -37,6 +37,7 @@ class ProbabilisticLayer3D : public Layer
Index get_neurons_number() const;

// @todo

dimensions get_input_dimensions() const override
{
throw runtime_error("XXX");
Expand All @@ -45,8 +46,6 @@ class ProbabilisticLayer3D : public Layer

dimensions get_output_dimensions() const override;

const type& get_decision_threshold() const;

const ActivationFunction& get_activation_function() const;
string get_activation_function_string() const;
string get_activation_function_text() const;
Expand All @@ -60,7 +59,6 @@ class ProbabilisticLayer3D : public Layer
void set_output_dimensions(const dimensions&) override;

void set_parameters(const Tensor<type, 1>&, const Index& index = 0) override;
void set_decision_threshold(const type&);

void set_activation_function(const ActivationFunction&);
void set_activation_function(const string&);
Expand Down Expand Up @@ -124,8 +122,6 @@ class ProbabilisticLayer3D : public Layer

ActivationFunction activation_function = ActivationFunction::Softmax;

type decision_threshold;

Tensor<type, 3> empty;

const Eigen::array<IndexPair<Index>, 2> double_contraction_indices = { IndexPair<Index>(0, 0), IndexPair<Index>(1, 1) };
Expand Down
30 changes: 15 additions & 15 deletions opennn/testing_analysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -492,10 +492,10 @@ Tensor<type, 1> TestingAnalysis::calculate_errors(const Tensor<type, 2>& targets

Tensor<type, 1> errors(4);

Tensor<type, 0> sum_squared_error;
sum_squared_error.device(*thread_pool_device) = (outputs - targets).square().sum().sqrt();
Tensor<type, 0> mean_squared_error;
mean_squared_error.device(*thread_pool_device) = (outputs - targets).square().sum().sqrt();

errors.setValues({sum_squared_error(0),
errors.setValues({mean_squared_error(0),
errors(0) / type(samples_number),
sqrt(errors(1)),
calculate_normalized_squared_error(targets, outputs)});;
Expand Down Expand Up @@ -534,10 +534,10 @@ Tensor<type, 1> TestingAnalysis::calculate_binary_classification_errors(const Da

// Results

Tensor<type, 0> sum_squared_error;
sum_squared_error.device(*thread_pool_device) = (outputs-targets).square().sum().sqrt();
Tensor<type, 0> mean_squared_error;
mean_squared_error.device(*thread_pool_device) = (outputs-targets).square().sum().sqrt();

errors(0) = sum_squared_error(0);
errors(0) = mean_squared_error(0);
errors(1) = errors(0)/type(training_samples_number);
errors(2) = sqrt(errors(1));
errors(3) = calculate_normalized_squared_error(targets, outputs);
Expand Down Expand Up @@ -566,10 +566,10 @@ Tensor<type, 1> TestingAnalysis::calculate_multiple_classification_errors(const

// Results

Tensor<type, 0> sum_squared_error;
sum_squared_error.device(*thread_pool_device) = (outputs-targets).square().sum().sqrt();
Tensor<type, 0> mean_squared_error;
mean_squared_error.device(*thread_pool_device) = (outputs-targets).square().sum().sqrt();

errors(0) = sum_squared_error(0);
errors(0) = mean_squared_error(0);
errors(1) = errors(0)/type(training_samples_number);
errors(2) = sqrt(errors(1));
errors(3) = calculate_normalized_squared_error(targets, outputs);
Expand All @@ -585,8 +585,8 @@ type TestingAnalysis::calculate_normalized_squared_error(const Tensor<type, 2>&

const Tensor<type, 1> targets_mean = mean(targets);

Tensor<type, 0> sum_squared_error;
sum_squared_error.device(*thread_pool_device) = (outputs - targets).square().sum();
Tensor<type, 0> mean_squared_error;
mean_squared_error.device(*thread_pool_device) = (outputs - targets).square().sum();

type normalization_coefficient = type(0);

Expand All @@ -599,7 +599,7 @@ type TestingAnalysis::calculate_normalized_squared_error(const Tensor<type, 2>&
normalization_coefficient += norm(0);
}

return sum_squared_error()/normalization_coefficient;
return mean_squared_error()/normalization_coefficient;
}


Expand Down Expand Up @@ -703,8 +703,8 @@ type TestingAnalysis::calculate_weighted_squared_error(const Tensor<type, 2>& ta

f_3.device(*thread_pool_device) = targets.constant(type(0));

Tensor<type, 0> sum_squared_error;
sum_squared_error.device(*thread_pool_device) = (if_sentence.select(f_1, else_sentence.select(f_2, f_3))).sum();
Tensor<type, 0> mean_squared_error;
mean_squared_error.device(*thread_pool_device) = (if_sentence.select(f_1, else_sentence.select(f_2, f_3))).sum();

Index negatives = 0;

Expand All @@ -716,7 +716,7 @@ type TestingAnalysis::calculate_weighted_squared_error(const Tensor<type, 2>& ta

const type normalization_coefficient = type(negatives)*negatives_weight*type(0.5);

return sum_squared_error(0)/normalization_coefficient;
return mean_squared_error(0)/normalization_coefficient;
}


Expand Down
6 changes: 3 additions & 3 deletions tests/conjugate_gradient_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ TEST(ConjugateGradientTest, DefaultConstructor)
ConjugateGradient conjugate_gradient_1;
// EXPECT_EQ(!conjugate_gradient_1.has_loss_index());

// ConjugateGradient conjugate_gradient_2(&sum_squared_error);
// ConjugateGradient conjugate_gradient_2(&mean_squared_error);
// EXPECT_EQ(conjugate_gradient_2.has_loss_index());
}

Expand All @@ -18,7 +18,7 @@ TEST(ConjugateGradientTest, GeneralConstructor)
ConjugateGradient conjugate_gradient_1;
// EXPECT_EQ(!conjugate_gradient_1.has_loss_index());

// ConjugateGradient conjugate_gradient_2(&sum_squared_error);
// ConjugateGradient conjugate_gradient_2(&mean_squared_error);
// EXPECT_EQ(conjugate_gradient_2.has_loss_index());

}
Expand Down Expand Up @@ -57,7 +57,7 @@ TEST(ConjugateGradientTest, FrParameter)
ConjugateGradient conjugate_gradient;
// EXPECT_EQ(!conjugate_gradient_1.has_loss_index());

// ConjugateGradient conjugate_gradient_2(&sum_squared_error);
// ConjugateGradient conjugate_gradient_2(&mean_squared_error);
// EXPECT_EQ(conjugate_gradient_2.has_loss_index());

}
Expand Down
12 changes: 6 additions & 6 deletions tests/learning_rate_algorithm_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ TEST(LearningRateAlgorithmTest, BracketingTriplet)
/*
Tensor<Index, 3> sample_indices(0, 1, samples_number);
LearningRateAlgorithm learning_rate_algorithm(&sum_squared_error);
LearningRateAlgorithm learning_rate_algorithm(&mean_squared_error);
type loss = 0.0;
Tensor<type, 1> training_direction;
Expand All @@ -61,12 +61,12 @@ TEST(LearningRateAlgorithmTest, BracketingTriplet)
void LearningRateAlgorithmTest::test_calculate_bracketing_triplet()
{
sum_squared_error.set_regularization_method(LossIndex::RegularizationMethod::L2);
mean_squared_error.set_regularization_method(LossIndex::RegularizationMethod::L2);
neural_network.set_parameters_random();
//loss = sum_squared_error.calculate_training_loss();
//training_direction = sum_squared_error.calculate_training_loss_gradient()*(-1.0);
//loss = mean_squared_error.calculate_training_loss();
//training_direction = mean_squared_error.calculate_training_loss_gradient()*(-1.0);
initial_learning_rate = 0.01;
Expand Down Expand Up @@ -143,7 +143,7 @@ void LearningRateAlgorithmTest::test_calculate_golden_section_directional_point(
neural_network.set(NeuralNetwork::ModelType::Approximation, {1, 1});
LearningRateAlgorithm learning_rate_algorithm(&sum_squared_error);
LearningRateAlgorithm learning_rate_algorithm(&mean_squared_error);
neural_network.set_parameters_constant(type(1));
Expand Down Expand Up @@ -181,7 +181,7 @@ void LearningRateAlgorithmTest::test_calculate_Brent_method_directional_point()
// @todo loss_index.calculate_training_loss not available
Tensor<type, 1> gradient = sum_squared_error.calculate_numerical_gradient();
Tensor<type, 1> gradient = mean_squared_error.calculate_numerical_gradient();
Tensor<type, 1> training_direction = gradient*(type(-1.0));
Expand Down
3 changes: 2 additions & 1 deletion tests/mean_squared_error_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ TEST(MeanSquaredErrorTest, BackPropagateLm)
const Index inputs_number = get_random_index(1, 1);
const Index outputs_number = get_random_index(1, 1);
const Index neurons_number = get_random_index(1, 1);

/*
// Data set
DataSet data_set(samples_number, { inputs_number }, { outputs_number });
Expand Down Expand Up @@ -121,4 +121,5 @@ TEST(MeanSquaredErrorTest, BackPropagateLm)
EXPECT_NEAR(back_propagation_lm.error(), back_propagation.error(), type(1.0e-3));
EXPECT_EQ(are_equal(back_propagation_lm.squared_errors_jacobian, numerical_jacobian), true);
EXPECT_EQ(are_equal(back_propagation_lm.gradient, numerical_gradient), true);
*/
}
3 changes: 2 additions & 1 deletion tests/minkowski_error_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ TEST(MinkowskiErrorTest, BackPropagate)
const Index inputs_number = get_random_index(1, 10);
const Index targets_number = get_random_index(1, 10);
const Index neurons_number = get_random_index(1, 10);

/*
DataSet data_set(samples_number, { inputs_number }, { targets_number });
data_set.set_data_random();
data_set.set(DataSet::SampleUse::Training);
Expand Down Expand Up @@ -63,4 +63,5 @@ TEST(MinkowskiErrorTest, BackPropagate)
const Tensor<type, 1> numerical_gradient = minkowski_error.calculate_numerical_gradient();
EXPECT_EQ(are_equal(back_propagation.gradient, numerical_gradient, type(1.0e-3)), true);
*/
}
5 changes: 2 additions & 3 deletions tests/normalized_squared_error_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ TEST(NormalizedSquaredErrorTest, BackPropagateApproximation)
const Index inputs_number = get_random_index(1, 10);
const Index targets_number = get_random_index(1, 10);
const Index neurons_number = get_random_index(1, 10);

/*
DataSet data_set(samples_number, { inputs_number }, { targets_number });
data_set.set_data_random();
data_set.set(DataSet::SampleUse::Training);
Expand Down Expand Up @@ -61,6 +61,7 @@ TEST(NormalizedSquaredErrorTest, BackPropagateApproximation)
const Tensor<type, 1> numerical_gradient = normalized_squared_error.calculate_numerical_gradient();
EXPECT_EQ(are_equal(back_propagation.gradient, numerical_gradient, type(1.0e-3)), true);
*/
}


Expand Down Expand Up @@ -416,8 +417,6 @@ void NormalizedSquaredErrorTest::test_back_propagate_lm()
void NormalizedSquaredErrorTest::test_calculate_normalization_coefficient()
{
cout << "test_calculate_normalization_coefficient\n";
Index samples_number;
Index inputs_number;
Index outputs_number;
Expand Down
5 changes: 2 additions & 3 deletions tests/probabilistic_layer_3d_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,13 @@

TEST(ProbabilisticLayer3DTest, DefaultConstructor)
{
/*

ProbabilisticLayer3D probabilistic_layer_3d;

// EXPECT_EQ(probabilistic_layer_3d.get_inputs_number(), 0);
// EXPECT_EQ(probabilistic_layer_3d.get_inputs_depth(), 0);
// EXPECT_EQ(probabilistic_layer_3d.get_neurons_number(), 0);
EXPECT_EQ(probabilistic_layer_3d.get_parameters_number(), 0);
*/
// EXPECT_EQ(probabilistic_layer_3d.get_parameters_number(), 0);
}


Expand Down
Loading

0 comments on commit e40db85

Please sign in to comment.