mirror of
https://hub.njuu.cf/TheAlgorithms/C-Plus-Plus.git
synced 2023-10-11 13:05:55 +08:00
[cpp fixes] tidied up code based on error reports by clang-tidy (#950)
* tidied up code based on error reports by clang-tidy * added doc for activation function
This commit is contained in:
parent
1f32b4e412
commit
8ab9a2ae93
@ -26,6 +26,7 @@
|
|||||||
* computed using stochastic gradient descent method.
|
* computed using stochastic gradient descent method.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <array>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <climits>
|
#include <climits>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
@ -35,7 +36,8 @@
|
|||||||
#include <numeric>
|
#include <numeric>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#define MAX_ITER 500 // INT_MAX ///< Maximum number of iterations to learn
|
/** Maximum number of iterations to learn */
|
||||||
|
constexpr int MAX_ITER = 500; // INT_MAX
|
||||||
|
|
||||||
/** \namespace machine_learning
|
/** \namespace machine_learning
|
||||||
* \brief Machine learning algorithms
|
* \brief Machine learning algorithms
|
||||||
@ -50,8 +52,8 @@ class adaline {
|
|||||||
* \param[in] convergence accuracy (optional,
|
* \param[in] convergence accuracy (optional,
|
||||||
* default=\f$1\times10^{-5}\f$)
|
* default=\f$1\times10^{-5}\f$)
|
||||||
*/
|
*/
|
||||||
adaline(int num_features, const double eta = 0.01f,
|
explicit adaline(int num_features, const double eta = 0.01f,
|
||||||
const double accuracy = 1e-5)
|
const double accuracy = 1e-5)
|
||||||
: eta(eta), accuracy(accuracy) {
|
: eta(eta), accuracy(accuracy) {
|
||||||
if (eta <= 0) {
|
if (eta <= 0) {
|
||||||
std::cerr << "learning rate should be positive and nonzero"
|
std::cerr << "learning rate should be positive and nonzero"
|
||||||
@ -64,7 +66,7 @@ class adaline {
|
|||||||
1); // additional weight is for the constant bias term
|
1); // additional weight is for the constant bias term
|
||||||
|
|
||||||
// initialize with random weights in the range [-50, 49]
|
// initialize with random weights in the range [-50, 49]
|
||||||
for (int i = 0; i < weights.size(); i++) weights[i] = 1.f;
|
for (double &weight : weights) weight = 1.f;
|
||||||
// weights[i] = (static_cast<double>(std::rand() % 100) - 50);
|
// weights[i] = (static_cast<double>(std::rand() % 100) - 50);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,8 +77,9 @@ class adaline {
|
|||||||
out << "<";
|
out << "<";
|
||||||
for (int i = 0; i < ada.weights.size(); i++) {
|
for (int i = 0; i < ada.weights.size(); i++) {
|
||||||
out << ada.weights[i];
|
out << ada.weights[i];
|
||||||
if (i < ada.weights.size() - 1)
|
if (i < ada.weights.size() - 1) {
|
||||||
out << ", ";
|
out << ", ";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
out << ">";
|
out << ">";
|
||||||
return out;
|
return out;
|
||||||
@ -90,28 +93,33 @@ class adaline {
|
|||||||
* model prediction output
|
* model prediction output
|
||||||
*/
|
*/
|
||||||
int predict(const std::vector<double> &x, double *out = nullptr) {
|
int predict(const std::vector<double> &x, double *out = nullptr) {
|
||||||
if (!check_size_match(x))
|
if (!check_size_match(x)) {
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
double y = weights.back(); // assign bias value
|
double y = weights.back(); // assign bias value
|
||||||
|
|
||||||
// for (int i = 0; i < x.size(); i++) y += x[i] * weights[i];
|
// for (int i = 0; i < x.size(); i++) y += x[i] * weights[i];
|
||||||
y = std::inner_product(x.begin(), x.end(), weights.begin(), y);
|
y = std::inner_product(x.begin(), x.end(), weights.begin(), y);
|
||||||
|
|
||||||
if (out != nullptr) // if out variable is provided
|
if (out != nullptr) { // if out variable is provided
|
||||||
*out = y;
|
*out = y;
|
||||||
|
}
|
||||||
|
|
||||||
return activation(y); // quantizer: apply ADALINE threshold function
|
return activation(y); // quantizer: apply ADALINE threshold function
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update the weights of the model using supervised learning for one
|
* Update the weights of the model using supervised learning for one
|
||||||
* feature vector \param[in] x feature vector \param[in] y known output
|
* feature vector
|
||||||
* value \returns correction factor
|
* \param[in] x feature vector
|
||||||
|
* \param[in] y known output value
|
||||||
|
* \returns correction factor
|
||||||
*/
|
*/
|
||||||
double fit(const std::vector<double> &x, const int &y) {
|
double fit(const std::vector<double> &x, const int &y) {
|
||||||
if (!check_size_match(x))
|
if (!check_size_match(x)) {
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* output of the model with current weights */
|
/* output of the model with current weights */
|
||||||
int p = predict(x);
|
int p = predict(x);
|
||||||
@ -129,21 +137,23 @@ class adaline {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Update the weights of the model using supervised learning for an
|
* Update the weights of the model using supervised learning for an
|
||||||
* array of vectors. \param[in] X array of feature vector \param[in] y
|
* array of vectors.
|
||||||
* known output value for each feature vector
|
* \param[in] X array of feature vector
|
||||||
|
* \param[in] y known output value for each feature vector
|
||||||
*/
|
*/
|
||||||
template <int N>
|
template <size_t N>
|
||||||
void fit(std::vector<double> const (&X)[N], const int *y) {
|
void fit(std::array<std::vector<double>, N> const &X,
|
||||||
|
std::array<int, N> const &Y) {
|
||||||
double avg_pred_error = 1.f;
|
double avg_pred_error = 1.f;
|
||||||
|
|
||||||
int iter;
|
int iter = 0;
|
||||||
for (iter = 0; (iter < MAX_ITER) && (avg_pred_error > accuracy);
|
for (iter = 0; (iter < MAX_ITER) && (avg_pred_error > accuracy);
|
||||||
iter++) {
|
iter++) {
|
||||||
avg_pred_error = 0.f;
|
avg_pred_error = 0.f;
|
||||||
|
|
||||||
// perform fit for each sample
|
// perform fit for each sample
|
||||||
for (int i = 0; i < N; i++) {
|
for (int i = 0; i < N; i++) {
|
||||||
double err = fit(X[i], y[i]);
|
double err = fit(X[i], Y[i]);
|
||||||
avg_pred_error += std::abs(err);
|
avg_pred_error += std::abs(err);
|
||||||
}
|
}
|
||||||
avg_pred_error /= N;
|
avg_pred_error /= N;
|
||||||
@ -154,15 +164,25 @@ class adaline {
|
|||||||
<< "\tAvg error: " << avg_pred_error << std::endl;
|
<< "\tAvg error: " << avg_pred_error << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (iter < MAX_ITER)
|
if (iter < MAX_ITER) {
|
||||||
|
|
||||||
std::cout << "Converged after " << iter << " iterations."
|
std::cout << "Converged after " << iter << " iterations."
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
else
|
} else {
|
||||||
std::cout << "Did not converge after " << iter << " iterations."
|
std::cout << "Did not converge after " << iter << " iterations."
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Defines activation function as Heaviside's step function.
|
||||||
|
* \f[
|
||||||
|
* f(x) = \begin{cases}
|
||||||
|
* -1 & \forall x \le 0\\
|
||||||
|
* 1 & \forall x > 0
|
||||||
|
* \end{cases}
|
||||||
|
* \f]
|
||||||
|
* @param x input value to apply activation on
|
||||||
|
* @return activation output
|
||||||
|
*/
|
||||||
int activation(double x) { return x > 0 ? 1 : -1; }
|
int activation(double x) { return x > 0 ? 1 : -1; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -206,15 +226,19 @@ void test1(double eta = 0.01) {
|
|||||||
|
|
||||||
const int N = 10; // number of sample points
|
const int N = 10; // number of sample points
|
||||||
|
|
||||||
std::vector<double> X[N] = {{0, 1}, {1, -2}, {2, 3}, {3, -1},
|
std::array<std::vector<double>, N> X = {
|
||||||
{4, 1}, {6, -5}, {-7, -3}, {-8, 5},
|
std::vector<double>({0, 1}), std::vector<double>({1, -2}),
|
||||||
{-9, 2}, {-10, -15}};
|
std::vector<double>({2, 3}), std::vector<double>({3, -1}),
|
||||||
int y[] = {1, -1, 1, -1, -1, -1, 1, 1, 1, -1}; // corresponding y-values
|
std::vector<double>({4, 1}), std::vector<double>({6, -5}),
|
||||||
|
std::vector<double>({-7, -3}), std::vector<double>({-8, 5}),
|
||||||
|
std::vector<double>({-9, 2}), std::vector<double>({-10, -15})};
|
||||||
|
std::array<int, N> y = {1, -1, 1, -1, -1,
|
||||||
|
-1, 1, 1, 1, -1}; // corresponding y-values
|
||||||
|
|
||||||
std::cout << "------- Test 1 -------" << std::endl;
|
std::cout << "------- Test 1 -------" << std::endl;
|
||||||
std::cout << "Model before fit: " << ada << std::endl;
|
std::cout << "Model before fit: " << ada << std::endl;
|
||||||
|
|
||||||
ada.fit(X, y);
|
ada.fit<N>(X, y);
|
||||||
std::cout << "Model after fit: " << ada << std::endl;
|
std::cout << "Model after fit: " << ada << std::endl;
|
||||||
|
|
||||||
int predict = ada.predict({5, -3});
|
int predict = ada.predict({5, -3});
|
||||||
@ -240,17 +264,17 @@ void test2(double eta = 0.01) {
|
|||||||
|
|
||||||
const int N = 50; // number of sample points
|
const int N = 50; // number of sample points
|
||||||
|
|
||||||
std::vector<double> X[N];
|
std::array<std::vector<double>, N> X;
|
||||||
int Y[N]; // corresponding y-values
|
std::array<int, N> Y{}; // corresponding y-values
|
||||||
|
|
||||||
// generate sample points in the interval
|
// generate sample points in the interval
|
||||||
// [-range2/100 , (range2-1)/100]
|
// [-range2/100 , (range2-1)/100]
|
||||||
int range = 500; // sample points full-range
|
int range = 500; // sample points full-range
|
||||||
int range2 = range >> 1; // sample points half-range
|
int range2 = range >> 1; // sample points half-range
|
||||||
for (int i = 0; i < N; i++) {
|
for (int i = 0; i < N; i++) {
|
||||||
double x0 = ((std::rand() % range) - range2) / 100.f;
|
double x0 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
|
||||||
double x1 = ((std::rand() % range) - range2) / 100.f;
|
double x1 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
|
||||||
X[i] = {x0, x1};
|
X[i] = std::vector<double>({x0, x1});
|
||||||
Y[i] = (x0 + 3. * x1) > -1 ? 1 : -1;
|
Y[i] = (x0 + 3. * x1) > -1 ? 1 : -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -262,8 +286,8 @@ void test2(double eta = 0.01) {
|
|||||||
|
|
||||||
int N_test_cases = 5;
|
int N_test_cases = 5;
|
||||||
for (int i = 0; i < N_test_cases; i++) {
|
for (int i = 0; i < N_test_cases; i++) {
|
||||||
double x0 = ((std::rand() % range) - range2) / 100.f;
|
double x0 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
|
||||||
double x1 = ((std::rand() % range) - range2) / 100.f;
|
double x1 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
|
||||||
|
|
||||||
int predict = ada.predict({x0, x1});
|
int predict = ada.predict({x0, x1});
|
||||||
|
|
||||||
@ -291,18 +315,18 @@ void test3(double eta = 0.01) {
|
|||||||
|
|
||||||
const int N = 100; // number of sample points
|
const int N = 100; // number of sample points
|
||||||
|
|
||||||
std::vector<double> X[N];
|
std::array<std::vector<double>, N> X;
|
||||||
int Y[N]; // corresponding y-values
|
std::array<int, N> Y{}; // corresponding y-values
|
||||||
|
|
||||||
// generate sample points in the interval
|
// generate sample points in the interval
|
||||||
// [-range2/100 , (range2-1)/100]
|
// [-range2/100 , (range2-1)/100]
|
||||||
int range = 200; // sample points full-range
|
int range = 200; // sample points full-range
|
||||||
int range2 = range >> 1; // sample points half-range
|
int range2 = range >> 1; // sample points half-range
|
||||||
for (int i = 0; i < N; i++) {
|
for (int i = 0; i < N; i++) {
|
||||||
double x0 = ((std::rand() % range) - range2) / 100.f;
|
double x0 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
|
||||||
double x1 = ((std::rand() % range) - range2) / 100.f;
|
double x1 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
|
||||||
double x2 = ((std::rand() % range) - range2) / 100.f;
|
double x2 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
|
||||||
X[i] = {x0, x1, x2, x0 * x0, x1 * x1, x2 * x2};
|
X[i] = std::vector<double>({x0, x1, x2, x0 * x0, x1 * x1, x2 * x2});
|
||||||
Y[i] = ((x0 * x0) + (x1 * x1) + (x2 * x2)) <= 1.f ? 1 : -1;
|
Y[i] = ((x0 * x0) + (x1 * x1) + (x2 * x2)) <= 1.f ? 1 : -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -314,9 +338,9 @@ void test3(double eta = 0.01) {
|
|||||||
|
|
||||||
int N_test_cases = 5;
|
int N_test_cases = 5;
|
||||||
for (int i = 0; i < N_test_cases; i++) {
|
for (int i = 0; i < N_test_cases; i++) {
|
||||||
double x0 = ((std::rand() % range) - range2) / 100.f;
|
double x0 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
|
||||||
double x1 = ((std::rand() % range) - range2) / 100.f;
|
double x1 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
|
||||||
double x2 = ((std::rand() % range) - range2) / 100.f;
|
double x2 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
|
||||||
|
|
||||||
int predict = ada.predict({x0, x1, x2, x0 * x0, x1 * x1, x2 * x2});
|
int predict = ada.predict({x0, x1, x2, x0 * x0, x1 * x1, x2 * x2});
|
||||||
|
|
||||||
@ -334,8 +358,9 @@ int main(int argc, char **argv) {
|
|||||||
std::srand(std::time(nullptr)); // initialize random number generator
|
std::srand(std::time(nullptr)); // initialize random number generator
|
||||||
|
|
||||||
double eta = 0.1; // default value of eta
|
double eta = 0.1; // default value of eta
|
||||||
if (argc == 2) // read eta value from commandline argument if present
|
if (argc == 2) { // read eta value from commandline argument if present
|
||||||
eta = strtof(argv[1], nullptr);
|
eta = strtof(argv[1], nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
test1(eta);
|
test1(eta);
|
||||||
|
|
||||||
|
@ -25,8 +25,11 @@
|
|||||||
*/
|
*/
|
||||||
#define _USE_MATH_DEFINES //< required for MS Visual C++
|
#define _USE_MATH_DEFINES //< required for MS Visual C++
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <array>
|
||||||
|
#include <cerrno>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
|
#include <cstring>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
@ -68,7 +71,8 @@ int save_2d_data(const char *fname,
|
|||||||
fp.open(fname);
|
fp.open(fname);
|
||||||
if (!fp.is_open()) {
|
if (!fp.is_open()) {
|
||||||
// error with opening file to write
|
// error with opening file to write
|
||||||
std::cerr << "Error opening file " << fname << "\n";
|
std::cerr << "Error opening file " << fname << ": "
|
||||||
|
<< std::strerror(errno) << "\n";
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,12 +80,14 @@ int save_2d_data(const char *fname,
|
|||||||
for (int i = 0; i < num_points; i++) {
|
for (int i = 0; i < num_points; i++) {
|
||||||
// for each feature in the array
|
// for each feature in the array
|
||||||
for (int j = 0; j < num_features; j++) {
|
for (int j = 0; j < num_features; j++) {
|
||||||
fp << X[i][j]; // print the feature value
|
fp << X[i][j]; // print the feature value
|
||||||
if (j < num_features - 1) // if not the last feature
|
if (j < num_features - 1) { // if not the last feature
|
||||||
fp << ","; // suffix comma
|
fp << ","; // suffix comma
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (i < num_points - 1) { // if not the last row
|
||||||
|
fp << "\n"; // start a new line
|
||||||
}
|
}
|
||||||
if (i < num_points - 1) // if not the last row
|
|
||||||
fp << "\n"; // start a new line
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fp.close();
|
fp.close();
|
||||||
@ -99,12 +105,12 @@ int save_2d_data(const char *fname,
|
|||||||
void get_min_2d(const std::vector<std::valarray<double>> &X, double *val,
|
void get_min_2d(const std::vector<std::valarray<double>> &X, double *val,
|
||||||
int *x_idx, int *y_idx) {
|
int *x_idx, int *y_idx) {
|
||||||
val[0] = INFINITY; // initial min value
|
val[0] = INFINITY; // initial min value
|
||||||
int N = X.size();
|
size_t N = X.size();
|
||||||
|
|
||||||
for (int i = 0; i < N; i++) { // traverse each x-index
|
for (int i = 0; i < N; i++) { // traverse each x-index
|
||||||
auto result = std::min_element(std::begin(X[i]), std::end(X[i]));
|
auto result = std::min_element(std::begin(X[i]), std::end(X[i]));
|
||||||
double d_min = *result;
|
double d_min = *result;
|
||||||
int j = std::distance(std::begin(X[i]), result);
|
std::ptrdiff_t j = std::distance(std::begin(X[i]), result);
|
||||||
|
|
||||||
if (d_min < val[0]) { // if a lower value is found
|
if (d_min < val[0]) { // if a lower value is found
|
||||||
// save the value and its index
|
// save the value and its index
|
||||||
@ -119,7 +125,8 @@ void get_min_2d(const std::vector<std::valarray<double>> &X, double *val,
|
|||||||
* \brief Machine learning algorithms
|
* \brief Machine learning algorithms
|
||||||
*/
|
*/
|
||||||
namespace machine_learning {
|
namespace machine_learning {
|
||||||
#define MIN_DISTANCE 1e-4 ///< Minimum average distance of image nodes
|
/** Minimum average distance of image nodes */
|
||||||
|
constexpr double MIN_DISTANCE = 1e-4;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create the distance matrix or
|
* Create the distance matrix or
|
||||||
@ -136,9 +143,8 @@ int save_u_matrix(const char *fname,
|
|||||||
const std::vector<std::vector<std::valarray<double>>> &W) {
|
const std::vector<std::vector<std::valarray<double>>> &W) {
|
||||||
std::ofstream fp(fname);
|
std::ofstream fp(fname);
|
||||||
if (!fp) { // error with fopen
|
if (!fp) { // error with fopen
|
||||||
char msg[120];
|
std::cerr << "File error (" << fname << "): " << std::strerror(errno)
|
||||||
std::snprintf(msg, sizeof(msg), "File error (%s): ", fname);
|
<< std::endl;
|
||||||
std::perror(msg);
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,7 +159,7 @@ int save_u_matrix(const char *fname,
|
|||||||
int to_x = std::min<int>(W.size(), i + R + 1);
|
int to_x = std::min<int>(W.size(), i + R + 1);
|
||||||
int from_y = std::max<int>(0, j - R);
|
int from_y = std::max<int>(0, j - R);
|
||||||
int to_y = std::min<int>(W[0].size(), j + R + 1);
|
int to_y = std::min<int>(W[0].size(), j + R + 1);
|
||||||
int l, m;
|
int l = 0, m = 0;
|
||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
#pragma omp parallel for reduction(+ : distance)
|
#pragma omp parallel for reduction(+ : distance)
|
||||||
#endif
|
#endif
|
||||||
@ -172,8 +178,9 @@ int save_u_matrix(const char *fname,
|
|||||||
fp << ','; // suffix comma
|
fp << ','; // suffix comma
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (i < W.size() - 1) // if not the last row
|
if (i < W.size() - 1) { // if not the last row
|
||||||
fp << '\n'; // start a new line
|
fp << '\n'; // start a new line
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fp.close();
|
fp.close();
|
||||||
@ -194,10 +201,11 @@ double update_weights(const std::valarray<double> &X,
|
|||||||
std::vector<std::vector<std::valarray<double>>> *W,
|
std::vector<std::vector<std::valarray<double>>> *W,
|
||||||
std::vector<std::valarray<double>> *D, double alpha,
|
std::vector<std::valarray<double>> *D, double alpha,
|
||||||
int R) {
|
int R) {
|
||||||
int x, y;
|
int x = 0, y = 0;
|
||||||
int num_out_x = static_cast<int>(W->size()); // output nodes - in X
|
int num_out_x = static_cast<int>(W->size()); // output nodes - in X
|
||||||
int num_out_y = static_cast<int>(W[0][0].size()); // output nodes - in Y
|
int num_out_y = static_cast<int>(W[0][0].size()); // output nodes - in Y
|
||||||
int num_features = static_cast<int>(W[0][0][0].size()); // features = in Z
|
// int num_features = static_cast<int>(W[0][0][0].size()); // features =
|
||||||
|
// in Z
|
||||||
double d_min = 0.f;
|
double d_min = 0.f;
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
@ -217,7 +225,7 @@ double update_weights(const std::valarray<double> &X,
|
|||||||
|
|
||||||
// step 2: get closest node i.e., node with snallest Euclidian distance
|
// step 2: get closest node i.e., node with snallest Euclidian distance
|
||||||
// to the current pattern
|
// to the current pattern
|
||||||
int d_min_x, d_min_y;
|
int d_min_x = 0, d_min_y = 0;
|
||||||
get_min_2d(*D, &d_min, &d_min_x, &d_min_y);
|
get_min_2d(*D, &d_min, &d_min_x, &d_min_y);
|
||||||
|
|
||||||
// step 3a: get the neighborhood range
|
// step 3a: get the neighborhood range
|
||||||
@ -261,10 +269,10 @@ double update_weights(const std::valarray<double> &X,
|
|||||||
void kohonen_som(const std::vector<std::valarray<double>> &X,
|
void kohonen_som(const std::vector<std::valarray<double>> &X,
|
||||||
std::vector<std::vector<std::valarray<double>>> *W,
|
std::vector<std::vector<std::valarray<double>>> *W,
|
||||||
double alpha_min) {
|
double alpha_min) {
|
||||||
int num_samples = X.size(); // number of rows
|
size_t num_samples = X.size(); // number of rows
|
||||||
int num_features = X[0].size(); // number of columns
|
// size_t num_features = X[0].size(); // number of columns
|
||||||
int num_out = W->size(); // output matrix size
|
size_t num_out = W->size(); // output matrix size
|
||||||
int R = num_out >> 2, iter = 0;
|
size_t R = num_out >> 2, iter = 0;
|
||||||
double alpha = 1.f;
|
double alpha = 1.f;
|
||||||
|
|
||||||
std::vector<std::valarray<double>> D(num_out);
|
std::vector<std::valarray<double>> D(num_out);
|
||||||
@ -283,15 +291,17 @@ void kohonen_som(const std::vector<std::valarray<double>> &X,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// every 100th iteration, reduce the neighborhood range
|
// every 100th iteration, reduce the neighborhood range
|
||||||
if (iter % 300 == 0 && R > 1)
|
if (iter % 300 == 0 && R > 1) {
|
||||||
R--;
|
R--;
|
||||||
|
}
|
||||||
|
|
||||||
dmin /= num_samples;
|
dmin /= num_samples;
|
||||||
|
|
||||||
// termination condition variable -> % change in minimum distance
|
// termination condition variable -> % change in minimum distance
|
||||||
dmin_ratio = (past_dmin - dmin) / past_dmin;
|
dmin_ratio = (past_dmin - dmin) / past_dmin;
|
||||||
if (dmin_ratio < 0)
|
if (dmin_ratio < 0) {
|
||||||
dmin_ratio = 1.f;
|
dmin_ratio = 1.f;
|
||||||
|
}
|
||||||
past_dmin = dmin;
|
past_dmin = dmin;
|
||||||
|
|
||||||
std::cout << "iter: " << iter << "\t alpha: " << alpha << "\t R: " << R
|
std::cout << "iter: " << iter << "\t alpha: " << alpha << "\t R: " << R
|
||||||
@ -320,14 +330,14 @@ using machine_learning::save_u_matrix;
|
|||||||
void test_2d_classes(std::vector<std::valarray<double>> *data) {
|
void test_2d_classes(std::vector<std::valarray<double>> *data) {
|
||||||
const int N = data->size();
|
const int N = data->size();
|
||||||
const double R = 0.3; // radius of cluster
|
const double R = 0.3; // radius of cluster
|
||||||
int i;
|
int i = 0;
|
||||||
const int num_classes = 4;
|
const int num_classes = 4;
|
||||||
const double centres[][2] = {
|
std::array<std::array<double, 2>, num_classes> centres = {
|
||||||
// centres of each class cluster
|
// centres of each class cluster
|
||||||
{.5, .5}, // centre of class 1
|
std::array<double, 2>({.5, .5}), // centre of class 1
|
||||||
{.5, -.5}, // centre of class 2
|
std::array<double, 2>({.5, -.5}), // centre of class 2
|
||||||
{-.5, .5}, // centre of class 3
|
std::array<double, 2>({-.5, .5}), // centre of class 3
|
||||||
{-.5, -.5} // centre of class 4
|
std::array<double, 2>({-.5, -.5}) // centre of class 4
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
@ -357,15 +367,16 @@ void test_2d_classes(std::vector<std::valarray<double>> *data) {
|
|||||||
* * `w12.csv`: trained SOM map
|
* * `w12.csv`: trained SOM map
|
||||||
*/
|
*/
|
||||||
void test1() {
|
void test1() {
|
||||||
int j, N = 300;
|
int j = 0, N = 300;
|
||||||
int features = 2;
|
int features = 2;
|
||||||
int num_out = 30;
|
int num_out = 30;
|
||||||
std::vector<std::valarray<double>> X(N);
|
std::vector<std::valarray<double>> X(N);
|
||||||
std::vector<std::vector<std::valarray<double>>> W(num_out);
|
std::vector<std::vector<std::valarray<double>>> W(num_out);
|
||||||
for (int i = 0; i < std::max(num_out, N); i++) {
|
for (int i = 0; i < std::max(num_out, N); i++) {
|
||||||
// loop till max(N, num_out)
|
// loop till max(N, num_out)
|
||||||
if (i < N) // only add new arrays if i < N
|
if (i < N) { // only add new arrays if i < N
|
||||||
X[i] = std::valarray<double>(features);
|
X[i] = std::valarray<double>(features);
|
||||||
|
}
|
||||||
if (i < num_out) { // only add new arrays if i < num_out
|
if (i < num_out) { // only add new arrays if i < num_out
|
||||||
W[i] = std::vector<std::valarray<double>>(num_out);
|
W[i] = std::vector<std::valarray<double>>(num_out);
|
||||||
for (int k = 0; k < num_out; k++) {
|
for (int k = 0; k < num_out; k++) {
|
||||||
@ -373,9 +384,10 @@ void test1() {
|
|||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
#pragma omp for
|
#pragma omp for
|
||||||
#endif
|
#endif
|
||||||
for (j = 0; j < features; j++)
|
for (j = 0; j < features; j++) {
|
||||||
// preallocate with random initial weights
|
// preallocate with random initial weights
|
||||||
W[i][k][j] = _random(-10, 10);
|
W[i][k][j] = _random(-10, 10);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -397,16 +409,16 @@ void test1() {
|
|||||||
* \param[out] data matrix to store data in
|
* \param[out] data matrix to store data in
|
||||||
*/
|
*/
|
||||||
void test_3d_classes1(std::vector<std::valarray<double>> *data) {
|
void test_3d_classes1(std::vector<std::valarray<double>> *data) {
|
||||||
const int N = data->size();
|
const size_t N = data->size();
|
||||||
const double R = 0.3; // radius of cluster
|
const double R = 0.3; // radius of cluster
|
||||||
int i;
|
int i = 0;
|
||||||
const int num_classes = 4;
|
const int num_classes = 4;
|
||||||
const double centres[][3] = {
|
const std::array<std::array<double, 3>, num_classes> centres = {
|
||||||
// centres of each class cluster
|
// centres of each class cluster
|
||||||
{.5, .5, .5}, // centre of class 1
|
std::array<double, 3>({.5, .5, .5}), // centre of class 1
|
||||||
{.5, -.5, -.5}, // centre of class 2
|
std::array<double, 3>({.5, -.5, -.5}), // centre of class 2
|
||||||
{-.5, .5, .5}, // centre of class 3
|
std::array<double, 3>({-.5, .5, .5}), // centre of class 3
|
||||||
{-.5, -.5 - .5} // centre of class 4
|
std::array<double, 3>({-.5, -.5 - .5}) // centre of class 4
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
@ -437,15 +449,16 @@ void test_3d_classes1(std::vector<std::valarray<double>> *data) {
|
|||||||
* * `w22.csv`: trained SOM map
|
* * `w22.csv`: trained SOM map
|
||||||
*/
|
*/
|
||||||
void test2() {
|
void test2() {
|
||||||
int j, N = 300;
|
int j = 0, N = 300;
|
||||||
int features = 3;
|
int features = 3;
|
||||||
int num_out = 30;
|
int num_out = 30;
|
||||||
std::vector<std::valarray<double>> X(N);
|
std::vector<std::valarray<double>> X(N);
|
||||||
std::vector<std::vector<std::valarray<double>>> W(num_out);
|
std::vector<std::vector<std::valarray<double>>> W(num_out);
|
||||||
for (int i = 0; i < std::max(num_out, N); i++) {
|
for (int i = 0; i < std::max(num_out, N); i++) {
|
||||||
// loop till max(N, num_out)
|
// loop till max(N, num_out)
|
||||||
if (i < N) // only add new arrays if i < N
|
if (i < N) { // only add new arrays if i < N
|
||||||
X[i] = std::valarray<double>(features);
|
X[i] = std::valarray<double>(features);
|
||||||
|
}
|
||||||
if (i < num_out) { // only add new arrays if i < num_out
|
if (i < num_out) { // only add new arrays if i < num_out
|
||||||
W[i] = std::vector<std::valarray<double>>(num_out);
|
W[i] = std::vector<std::valarray<double>>(num_out);
|
||||||
for (int k = 0; k < num_out; k++) {
|
for (int k = 0; k < num_out; k++) {
|
||||||
@ -453,9 +466,10 @@ void test2() {
|
|||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
#pragma omp for
|
#pragma omp for
|
||||||
#endif
|
#endif
|
||||||
for (j = 0; j < features; j++)
|
for (j = 0; j < features; j++) {
|
||||||
// preallocate with random initial weights
|
// preallocate with random initial weights
|
||||||
W[i][k][j] = _random(-10, 10);
|
W[i][k][j] = _random(-10, 10);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -477,20 +491,20 @@ void test2() {
|
|||||||
* \param[out] data matrix to store data in
|
* \param[out] data matrix to store data in
|
||||||
*/
|
*/
|
||||||
void test_3d_classes2(std::vector<std::valarray<double>> *data) {
|
void test_3d_classes2(std::vector<std::valarray<double>> *data) {
|
||||||
const int N = data->size();
|
const size_t N = data->size();
|
||||||
const double R = 0.2; // radius of cluster
|
const double R = 0.2; // radius of cluster
|
||||||
int i;
|
int i = 0;
|
||||||
const int num_classes = 8;
|
const int num_classes = 8;
|
||||||
const double centres[][3] = {
|
const std::array<std::array<double, 3>, num_classes> centres = {
|
||||||
// centres of each class cluster
|
// centres of each class cluster
|
||||||
{.5, .5, .5}, // centre of class 1
|
std::array<double, 3>({.5, .5, .5}), // centre of class 1
|
||||||
{.5, .5, -.5}, // centre of class 2
|
std::array<double, 3>({.5, .5, -.5}), // centre of class 2
|
||||||
{.5, -.5, .5}, // centre of class 3
|
std::array<double, 3>({.5, -.5, .5}), // centre of class 3
|
||||||
{.5, -.5, -.5}, // centre of class 4
|
std::array<double, 3>({.5, -.5, -.5}), // centre of class 4
|
||||||
{-.5, .5, .5}, // centre of class 5
|
std::array<double, 3>({-.5, .5, .5}), // centre of class 5
|
||||||
{-.5, .5, -.5}, // centre of class 6
|
std::array<double, 3>({-.5, .5, -.5}), // centre of class 6
|
||||||
{-.5, -.5, .5}, // centre of class 7
|
std::array<double, 3>({-.5, -.5, .5}), // centre of class 7
|
||||||
{-.5, -.5, -.5} // centre of class 8
|
std::array<double, 3>({-.5, -.5, -.5}) // centre of class 8
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
@ -521,15 +535,16 @@ void test_3d_classes2(std::vector<std::valarray<double>> *data) {
|
|||||||
* * `w32.csv`: trained SOM map
|
* * `w32.csv`: trained SOM map
|
||||||
*/
|
*/
|
||||||
void test3() {
|
void test3() {
|
||||||
int j, N = 500;
|
int j = 0, N = 500;
|
||||||
int features = 3;
|
int features = 3;
|
||||||
int num_out = 30;
|
int num_out = 30;
|
||||||
std::vector<std::valarray<double>> X(N);
|
std::vector<std::valarray<double>> X(N);
|
||||||
std::vector<std::vector<std::valarray<double>>> W(num_out);
|
std::vector<std::vector<std::valarray<double>>> W(num_out);
|
||||||
for (int i = 0; i < std::max(num_out, N); i++) {
|
for (int i = 0; i < std::max(num_out, N); i++) {
|
||||||
// loop till max(N, num_out)
|
// loop till max(N, num_out)
|
||||||
if (i < N) // only add new arrays if i < N
|
if (i < N) { // only add new arrays if i < N
|
||||||
X[i] = std::valarray<double>(features);
|
X[i] = std::valarray<double>(features);
|
||||||
|
}
|
||||||
if (i < num_out) { // only add new arrays if i < num_out
|
if (i < num_out) { // only add new arrays if i < num_out
|
||||||
W[i] = std::vector<std::valarray<double>>(num_out);
|
W[i] = std::vector<std::valarray<double>>(num_out);
|
||||||
for (int k = 0; k < num_out; k++) {
|
for (int k = 0; k < num_out; k++) {
|
||||||
@ -537,9 +552,10 @@ void test3() {
|
|||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
#pragma omp for
|
#pragma omp for
|
||||||
#endif
|
#endif
|
||||||
for (j = 0; j < features; j++)
|
for (j = 0; j < features; j++) {
|
||||||
// preallocate with random initial weights
|
// preallocate with random initial weights
|
||||||
W[i][k][j] = _random(-10, 10);
|
W[i][k][j] = _random(-10, 10);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
*/
|
*/
|
||||||
#define _USE_MATH_DEFINES // required for MS Visual C++
|
#define _USE_MATH_DEFINES // required for MS Visual C++
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <array>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
@ -71,12 +72,14 @@ int save_nd_data(const char *fname,
|
|||||||
for (int i = 0; i < num_points; i++) {
|
for (int i = 0; i < num_points; i++) {
|
||||||
// for each feature in the array
|
// for each feature in the array
|
||||||
for (int j = 0; j < num_features; j++) {
|
for (int j = 0; j < num_features; j++) {
|
||||||
fp << X[i][j]; // print the feature value
|
fp << X[i][j]; // print the feature value
|
||||||
if (j < num_features - 1) // if not the last feature
|
if (j < num_features - 1) { // if not the last feature
|
||||||
fp << ","; // suffix comma
|
fp << ","; // suffix comma
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (i < num_points - 1) { // if not the last row
|
||||||
|
fp << "\n"; // start a new line
|
||||||
}
|
}
|
||||||
if (i < num_points - 1) // if not the last row
|
|
||||||
fp << "\n"; // start a new line
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fp.close();
|
fp.close();
|
||||||
@ -100,9 +103,9 @@ namespace machine_learning {
|
|||||||
void update_weights(const std::valarray<double> &x,
|
void update_weights(const std::valarray<double> &x,
|
||||||
std::vector<std::valarray<double>> *W,
|
std::vector<std::valarray<double>> *W,
|
||||||
std::valarray<double> *D, double alpha, int R) {
|
std::valarray<double> *D, double alpha, int R) {
|
||||||
int j, k;
|
int j = 0, k = 0;
|
||||||
int num_out = W->size(); // number of SOM output nodes
|
int num_out = W->size(); // number of SOM output nodes
|
||||||
int num_features = x.size(); // number of data features
|
// int num_features = x.size(); // number of data features
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
#pragma omp for
|
#pragma omp for
|
||||||
@ -117,7 +120,7 @@ void update_weights(const std::valarray<double> &x,
|
|||||||
// step 2: get closest node i.e., node with snallest Euclidian distance to
|
// step 2: get closest node i.e., node with snallest Euclidian distance to
|
||||||
// the current pattern
|
// the current pattern
|
||||||
auto result = std::min_element(std::begin(*D), std::end(*D));
|
auto result = std::min_element(std::begin(*D), std::end(*D));
|
||||||
double d_min = *result;
|
// double d_min = *result;
|
||||||
int d_min_idx = std::distance(std::begin(*D), result);
|
int d_min_idx = std::distance(std::begin(*D), result);
|
||||||
|
|
||||||
// step 3a: get the neighborhood range
|
// step 3a: get the neighborhood range
|
||||||
@ -129,9 +132,10 @@ void update_weights(const std::valarray<double> &x,
|
|||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
#pragma omp for
|
#pragma omp for
|
||||||
#endif
|
#endif
|
||||||
for (j = from_node; j < to_node; j++)
|
for (j = from_node; j < to_node; j++) {
|
||||||
// update weights of nodes in the neighborhood
|
// update weights of nodes in the neighborhood
|
||||||
(*W)[j] += alpha * (x - (*W)[j]);
|
(*W)[j] += alpha * (x - (*W)[j]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -145,16 +149,16 @@ void update_weights(const std::valarray<double> &x,
|
|||||||
void kohonen_som_tracer(const std::vector<std::valarray<double>> &X,
|
void kohonen_som_tracer(const std::vector<std::valarray<double>> &X,
|
||||||
std::vector<std::valarray<double>> *W,
|
std::vector<std::valarray<double>> *W,
|
||||||
double alpha_min) {
|
double alpha_min) {
|
||||||
int num_samples = X.size(); // number of rows
|
int num_samples = X.size(); // number of rows
|
||||||
int num_features = X[0].size(); // number of columns
|
// int num_features = X[0].size(); // number of columns
|
||||||
int num_out = W->size(); // number of rows
|
int num_out = W->size(); // number of rows
|
||||||
int R = num_out >> 2, iter = 0;
|
int R = num_out >> 2, iter = 0;
|
||||||
double alpha = 1.f;
|
double alpha = 1.f;
|
||||||
|
|
||||||
std::valarray<double> D(num_out);
|
std::valarray<double> D(num_out);
|
||||||
|
|
||||||
// Loop alpha from 1 to slpha_min
|
// Loop alpha from 1 to slpha_min
|
||||||
for (; alpha > alpha_min; alpha -= 0.01, iter++) {
|
do {
|
||||||
// Loop for each sample pattern in the data set
|
// Loop for each sample pattern in the data set
|
||||||
for (int sample = 0; sample < num_samples; sample++) {
|
for (int sample = 0; sample < num_samples; sample++) {
|
||||||
// update weights for the current input pattern sample
|
// update weights for the current input pattern sample
|
||||||
@ -162,9 +166,13 @@ void kohonen_som_tracer(const std::vector<std::valarray<double>> &X,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// every 10th iteration, reduce the neighborhood range
|
// every 10th iteration, reduce the neighborhood range
|
||||||
if (iter % 10 == 0 && R > 1)
|
if (iter % 10 == 0 && R > 1) {
|
||||||
R--;
|
R--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
alpha -= 0.01;
|
||||||
|
iter++;
|
||||||
|
} while (alpha > alpha_min);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace machine_learning
|
} // namespace machine_learning
|
||||||
@ -190,7 +198,7 @@ void test_circle(std::vector<std::valarray<double>> *data) {
|
|||||||
const double R = 0.75, dr = 0.3;
|
const double R = 0.75, dr = 0.3;
|
||||||
double a_t = 0., b_t = 2.f * M_PI; // theta random between 0 and 2*pi
|
double a_t = 0., b_t = 2.f * M_PI; // theta random between 0 and 2*pi
|
||||||
double a_r = R - dr, b_r = R + dr; // radius random between R-dr and R+dr
|
double a_r = R - dr, b_r = R + dr; // radius random between R-dr and R+dr
|
||||||
int i;
|
int i = 0;
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
#pragma omp for
|
#pragma omp for
|
||||||
@ -223,24 +231,26 @@ void test_circle(std::vector<std::valarray<double>> *data) {
|
|||||||
* output](https://raw.githubusercontent.com/TheAlgorithms/C-Plus-Plus/docs/images/machine_learning/kohonen/test1.svg)
|
* output](https://raw.githubusercontent.com/TheAlgorithms/C-Plus-Plus/docs/images/machine_learning/kohonen/test1.svg)
|
||||||
*/
|
*/
|
||||||
void test1() {
|
void test1() {
|
||||||
int j, N = 500;
|
int j = 0, N = 500;
|
||||||
int features = 2;
|
int features = 2;
|
||||||
int num_out = 50;
|
int num_out = 50;
|
||||||
std::vector<std::valarray<double>> X(N);
|
std::vector<std::valarray<double>> X(N);
|
||||||
std::vector<std::valarray<double>> W(num_out);
|
std::vector<std::valarray<double>> W(num_out);
|
||||||
for (int i = 0; i < std::max(num_out, N); i++) {
|
for (int i = 0; i < std::max(num_out, N); i++) {
|
||||||
// loop till max(N, num_out)
|
// loop till max(N, num_out)
|
||||||
if (i < N) // only add new arrays if i < N
|
if (i < N) { // only add new arrays if i < N
|
||||||
X[i] = std::valarray<double>(features);
|
X[i] = std::valarray<double>(features);
|
||||||
|
}
|
||||||
if (i < num_out) { // only add new arrays if i < num_out
|
if (i < num_out) { // only add new arrays if i < num_out
|
||||||
W[i] = std::valarray<double>(features);
|
W[i] = std::valarray<double>(features);
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
#pragma omp for
|
#pragma omp for
|
||||||
#endif
|
#endif
|
||||||
for (j = 0; j < features; j++)
|
for (j = 0; j < features; j++) {
|
||||||
// preallocate with random initial weights
|
// preallocate with random initial weights
|
||||||
W[i][j] = _random(-1, 1);
|
W[i][j] = _random(-1, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,7 +277,7 @@ void test1() {
|
|||||||
void test_lamniscate(std::vector<std::valarray<double>> *data) {
|
void test_lamniscate(std::vector<std::valarray<double>> *data) {
|
||||||
const int N = data->size();
|
const int N = data->size();
|
||||||
const double dr = 0.2;
|
const double dr = 0.2;
|
||||||
int i;
|
int i = 0;
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
#pragma omp for
|
#pragma omp for
|
||||||
@ -303,24 +313,26 @@ void test_lamniscate(std::vector<std::valarray<double>> *data) {
|
|||||||
* output](https://raw.githubusercontent.com/TheAlgorithms/C-Plus-Plus/docs/images/machine_learning/kohonen/test2.svg)
|
* output](https://raw.githubusercontent.com/TheAlgorithms/C-Plus-Plus/docs/images/machine_learning/kohonen/test2.svg)
|
||||||
*/
|
*/
|
||||||
void test2() {
|
void test2() {
|
||||||
int j, N = 500;
|
int j = 0, N = 500;
|
||||||
int features = 2;
|
int features = 2;
|
||||||
int num_out = 20;
|
int num_out = 20;
|
||||||
std::vector<std::valarray<double>> X(N);
|
std::vector<std::valarray<double>> X(N);
|
||||||
std::vector<std::valarray<double>> W(num_out);
|
std::vector<std::valarray<double>> W(num_out);
|
||||||
for (int i = 0; i < std::max(num_out, N); i++) {
|
for (int i = 0; i < std::max(num_out, N); i++) {
|
||||||
// loop till max(N, num_out)
|
// loop till max(N, num_out)
|
||||||
if (i < N) // only add new arrays if i < N
|
if (i < N) { // only add new arrays if i < N
|
||||||
X[i] = std::valarray<double>(features);
|
X[i] = std::valarray<double>(features);
|
||||||
|
}
|
||||||
if (i < num_out) { // only add new arrays if i < num_out
|
if (i < num_out) { // only add new arrays if i < num_out
|
||||||
W[i] = std::valarray<double>(features);
|
W[i] = std::valarray<double>(features);
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
#pragma omp for
|
#pragma omp for
|
||||||
#endif
|
#endif
|
||||||
for (j = 0; j < features; j++)
|
for (j = 0; j < features; j++) {
|
||||||
// preallocate with random initial weights
|
// preallocate with random initial weights
|
||||||
W[i][j] = _random(-1, 1);
|
W[i][j] = _random(-1, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -347,18 +359,18 @@ void test2() {
|
|||||||
void test_3d_classes(std::vector<std::valarray<double>> *data) {
|
void test_3d_classes(std::vector<std::valarray<double>> *data) {
|
||||||
const int N = data->size();
|
const int N = data->size();
|
||||||
const double R = 0.1; // radius of cluster
|
const double R = 0.1; // radius of cluster
|
||||||
int i;
|
int i = 0;
|
||||||
const int num_classes = 8;
|
const int num_classes = 8;
|
||||||
const double centres[][3] = {
|
const std::array<const std::array<double, 3>, num_classes> centres = {
|
||||||
// centres of each class cluster
|
// centres of each class cluster
|
||||||
{.5, .5, .5}, // centre of class 0
|
std::array<double, 3>({.5, .5, .5}), // centre of class 0
|
||||||
{.5, .5, -.5}, // centre of class 1
|
std::array<double, 3>({.5, .5, -.5}), // centre of class 1
|
||||||
{.5, -.5, .5}, // centre of class 2
|
std::array<double, 3>({.5, -.5, .5}), // centre of class 2
|
||||||
{.5, -.5, -.5}, // centre of class 3
|
std::array<double, 3>({.5, -.5, -.5}), // centre of class 3
|
||||||
{-.5, .5, .5}, // centre of class 4
|
std::array<double, 3>({-.5, .5, .5}), // centre of class 4
|
||||||
{-.5, .5, -.5}, // centre of class 5
|
std::array<double, 3>({-.5, .5, -.5}), // centre of class 5
|
||||||
{-.5, -.5, .5}, // centre of class 6
|
std::array<double, 3>({-.5, -.5, .5}), // centre of class 6
|
||||||
{-.5, -.5, -.5} // centre of class 7
|
std::array<double, 3>({-.5, -.5, -.5}) // centre of class 7
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
@ -400,24 +412,26 @@ void test_3d_classes(std::vector<std::valarray<double>> *data) {
|
|||||||
* output](https://raw.githubusercontent.com/TheAlgorithms/C-Plus-Plus/docs/images/machine_learning/kohonen/test3.svg)
|
* output](https://raw.githubusercontent.com/TheAlgorithms/C-Plus-Plus/docs/images/machine_learning/kohonen/test3.svg)
|
||||||
*/
|
*/
|
||||||
void test3() {
|
void test3() {
|
||||||
int j, N = 200;
|
int j = 0, N = 200;
|
||||||
int features = 3;
|
int features = 3;
|
||||||
int num_out = 20;
|
int num_out = 20;
|
||||||
std::vector<std::valarray<double>> X(N);
|
std::vector<std::valarray<double>> X(N);
|
||||||
std::vector<std::valarray<double>> W(num_out);
|
std::vector<std::valarray<double>> W(num_out);
|
||||||
for (int i = 0; i < std::max(num_out, N); i++) {
|
for (int i = 0; i < std::max(num_out, N); i++) {
|
||||||
// loop till max(N, num_out)
|
// loop till max(N, num_out)
|
||||||
if (i < N) // only add new arrays if i < N
|
if (i < N) { // only add new arrays if i < N
|
||||||
X[i] = std::valarray<double>(features);
|
X[i] = std::valarray<double>(features);
|
||||||
|
}
|
||||||
if (i < num_out) { // only add new arrays if i < num_out
|
if (i < num_out) { // only add new arrays if i < num_out
|
||||||
W[i] = std::valarray<double>(features);
|
W[i] = std::valarray<double>(features);
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef _OPENMP
|
||||||
#pragma omp for
|
#pragma omp for
|
||||||
#endif
|
#endif
|
||||||
for (j = 0; j < features; j++)
|
for (j = 0; j < features; j++) {
|
||||||
// preallocate with random initial weights
|
// preallocate with random initial weights
|
||||||
W[i][j] = _random(-1, 1);
|
W[i][j] = _random(-1, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,9 +25,10 @@ std::ostream &operator<<(std::ostream &out,
|
|||||||
const char separator = ' ';
|
const char separator = ' ';
|
||||||
|
|
||||||
for (size_t row = 0; row < v.size(); row++) {
|
for (size_t row = 0; row < v.size(); row++) {
|
||||||
for (size_t col = 0; col < v[row].size(); col++)
|
for (size_t col = 0; col < v[row].size(); col++) {
|
||||||
out << std::left << std::setw(width) << std::setfill(separator)
|
out << std::left << std::setw(width) << std::setfill(separator)
|
||||||
<< v[row][col];
|
<< v[row][col];
|
||||||
|
}
|
||||||
out << std::endl;
|
out << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,9 +43,10 @@ std::ostream &operator<<(std::ostream &out, std::vector<T> const &v) {
|
|||||||
const int width = 15;
|
const int width = 15;
|
||||||
const char separator = ' ';
|
const char separator = ' ';
|
||||||
|
|
||||||
for (size_t row = 0; row < v.size(); row++)
|
for (size_t row = 0; row < v.size(); row++) {
|
||||||
out << std::left << std::setw(width) << std::setfill(separator)
|
out << std::left << std::setw(width) << std::setfill(separator)
|
||||||
<< v[row];
|
<< v[row];
|
||||||
|
}
|
||||||
|
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
@ -57,9 +59,11 @@ template <typename T>
|
|||||||
inline bool is_square(std::vector<std::vector<T>> const &A) {
|
inline bool is_square(std::vector<std::vector<T>> const &A) {
|
||||||
// Assuming A is square matrix
|
// Assuming A is square matrix
|
||||||
size_t N = A.size();
|
size_t N = A.size();
|
||||||
for (size_t i = 0; i < N; i++)
|
for (size_t i = 0; i < N; i++) {
|
||||||
if (A[i].size() != N)
|
if (A[i].size() != N) {
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,8 +94,9 @@ std::vector<std::vector<T>> operator*(std::vector<std::vector<T>> const &A,
|
|||||||
std::vector<T> v(N_B);
|
std::vector<T> v(N_B);
|
||||||
for (size_t col = 0; col < N_B; col++) {
|
for (size_t col = 0; col < N_B; col++) {
|
||||||
v[col] = static_cast<T>(0);
|
v[col] = static_cast<T>(0);
|
||||||
for (size_t j = 0; j < B.size(); j++)
|
for (size_t j = 0; j < B.size(); j++) {
|
||||||
v[col] += A[row][j] * B[j][col];
|
v[col] += A[row][j] * B[j][col];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
result[row] = v;
|
result[row] = v;
|
||||||
}
|
}
|
||||||
@ -154,8 +159,9 @@ std::vector<float> operator*(std::vector<T> const &A, float const scalar) {
|
|||||||
|
|
||||||
std::vector<float> result(N_A);
|
std::vector<float> result(N_A);
|
||||||
|
|
||||||
for (size_t row = 0; row < N_A; row++)
|
for (size_t row = 0; row < N_A; row++) {
|
||||||
result[row] = A[row] * static_cast<float>(scalar);
|
result[row] = A[row] * static_cast<float>(scalar);
|
||||||
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -226,8 +232,9 @@ std::vector<std::vector<float>> get_inverse(
|
|||||||
for (size_t row = 0; row < N; row++) {
|
for (size_t row = 0; row < N; row++) {
|
||||||
// preallocatae a resultant identity matrix
|
// preallocatae a resultant identity matrix
|
||||||
inverse[row] = std::vector<float>(N);
|
inverse[row] = std::vector<float>(N);
|
||||||
for (size_t col = 0; col < N; col++)
|
for (size_t col = 0; col < N; col++) {
|
||||||
inverse[row][col] = (row == col) ? 1.f : 0.f;
|
inverse[row][col] = (row == col) ? 1.f : 0.f;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!is_square(A)) {
|
if (!is_square(A)) {
|
||||||
@ -239,8 +246,9 @@ std::vector<std::vector<float>> get_inverse(
|
|||||||
std::vector<std::vector<float>> temp(N);
|
std::vector<std::vector<float>> temp(N);
|
||||||
for (size_t row = 0; row < N; row++) {
|
for (size_t row = 0; row < N; row++) {
|
||||||
std::vector<float> v(N);
|
std::vector<float> v(N);
|
||||||
for (size_t col = 0; col < N; col++)
|
for (size_t col = 0; col < N; col++) {
|
||||||
v[col] = static_cast<float>(A[row][col]);
|
v[col] = static_cast<float>(A[row][col]);
|
||||||
|
}
|
||||||
temp[row] = v;
|
temp[row] = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,13 +275,14 @@ std::vector<std::vector<float>> get_inverse(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// set diagonal to 1
|
// set diagonal to 1
|
||||||
float divisor = static_cast<float>(temp[row][row]);
|
auto divisor = static_cast<float>(temp[row][row]);
|
||||||
temp[row] = temp[row] / divisor;
|
temp[row] = temp[row] / divisor;
|
||||||
inverse[row] = inverse[row] / divisor;
|
inverse[row] = inverse[row] / divisor;
|
||||||
// Row transformations
|
// Row transformations
|
||||||
for (size_t row2 = 0; row2 < N; row2++) {
|
for (size_t row2 = 0; row2 < N; row2++) {
|
||||||
if (row2 == row)
|
if (row2 == row) {
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
float factor = temp[row2][row];
|
float factor = temp[row2][row];
|
||||||
temp[row2] = temp[row2] - factor * temp[row];
|
temp[row2] = temp[row2] - factor * temp[row];
|
||||||
inverse[row2] = inverse[row2] - factor * inverse[row];
|
inverse[row2] = inverse[row2] - factor * inverse[row];
|
||||||
@ -313,9 +322,10 @@ std::vector<float> fit_OLS_regressor(std::vector<std::vector<T>> const &X,
|
|||||||
std::vector<T> const &Y) {
|
std::vector<T> const &Y) {
|
||||||
// NxF
|
// NxF
|
||||||
std::vector<std::vector<T>> X2 = X;
|
std::vector<std::vector<T>> X2 = X;
|
||||||
for (size_t i = 0; i < X2.size(); i++)
|
for (size_t i = 0; i < X2.size(); i++) {
|
||||||
// add Y-intercept -> Nx(F+1)
|
// add Y-intercept -> Nx(F+1)
|
||||||
X2[i].push_back(1);
|
X2[i].push_back(1);
|
||||||
|
}
|
||||||
// (F+1)xN
|
// (F+1)xN
|
||||||
std::vector<std::vector<T>> Xt = get_transpose(X2);
|
std::vector<std::vector<T>> Xt = get_transpose(X2);
|
||||||
// (F+1)x(F+1)
|
// (F+1)x(F+1)
|
||||||
@ -347,8 +357,9 @@ std::vector<float> predict_OLS_regressor(std::vector<std::vector<T>> const &X,
|
|||||||
for (size_t rows = 0; rows < X.size(); rows++) {
|
for (size_t rows = 0; rows < X.size(); rows++) {
|
||||||
// -> start with constant term
|
// -> start with constant term
|
||||||
result[rows] = beta[X[0].size()];
|
result[rows] = beta[X[0].size()];
|
||||||
for (size_t cols = 0; cols < X[0].size(); cols++)
|
for (size_t cols = 0; cols < X[0].size(); cols++) {
|
||||||
result[rows] += beta[cols] * X[rows][cols];
|
result[rows] += beta[cols] * X[rows][cols];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Nx1
|
// Nx1
|
||||||
return result;
|
return result;
|
||||||
@ -375,8 +386,9 @@ void ols_test() {
|
|||||||
// predicted regression outputs
|
// predicted regression outputs
|
||||||
std::vector<float> out1 = predict_OLS_regressor(test_data1, beta1);
|
std::vector<float> out1 = predict_OLS_regressor(test_data1, beta1);
|
||||||
// compare predicted results are within +-0.01 limit of expected
|
// compare predicted results are within +-0.01 limit of expected
|
||||||
for (size_t rows = 0; rows < out1.size(); rows++)
|
for (size_t rows = 0; rows < out1.size(); rows++) {
|
||||||
assert(std::abs(out1[rows] - expected1[rows]) < 0.01);
|
assert(std::abs(out1[rows] - expected1[rows]) < 0.01);
|
||||||
|
}
|
||||||
std::cout << "passed\n";
|
std::cout << "passed\n";
|
||||||
|
|
||||||
/* test function = x^3 + x^2 - 100 */
|
/* test function = x^3 + x^2 - 100 */
|
||||||
@ -396,8 +408,9 @@ void ols_test() {
|
|||||||
// predicted regression outputs
|
// predicted regression outputs
|
||||||
std::vector<float> out2 = predict_OLS_regressor(test_data2, beta2);
|
std::vector<float> out2 = predict_OLS_regressor(test_data2, beta2);
|
||||||
// compare predicted results are within +-0.01 limit of expected
|
// compare predicted results are within +-0.01 limit of expected
|
||||||
for (size_t rows = 0; rows < out2.size(); rows++)
|
for (size_t rows = 0; rows < out2.size(); rows++) {
|
||||||
assert(std::abs(out2[rows] - expected2[rows]) < 0.01);
|
assert(std::abs(out2[rows] - expected2[rows]) < 0.01);
|
||||||
|
}
|
||||||
std::cout << "passed\n";
|
std::cout << "passed\n";
|
||||||
|
|
||||||
std::cout << std::endl; // ensure test results are displayed on screen
|
std::cout << std::endl; // ensure test results are displayed on screen
|
||||||
@ -410,7 +423,7 @@ void ols_test() {
|
|||||||
int main() {
|
int main() {
|
||||||
ols_test();
|
ols_test();
|
||||||
|
|
||||||
size_t N, F;
|
size_t N = 0, F = 0;
|
||||||
|
|
||||||
std::cout << "Enter number of features: ";
|
std::cout << "Enter number of features: ";
|
||||||
// number of features = columns
|
// number of features = columns
|
||||||
@ -429,9 +442,10 @@ int main() {
|
|||||||
for (size_t rows = 0; rows < N; rows++) {
|
for (size_t rows = 0; rows < N; rows++) {
|
||||||
std::vector<float> v(F);
|
std::vector<float> v(F);
|
||||||
std::cout << "Sample# " << rows + 1 << ": ";
|
std::cout << "Sample# " << rows + 1 << ": ";
|
||||||
for (size_t cols = 0; cols < F; cols++)
|
for (size_t cols = 0; cols < F; cols++) {
|
||||||
// get the F features
|
// get the F features
|
||||||
std::cin >> v[cols];
|
std::cin >> v[cols];
|
||||||
|
}
|
||||||
data[rows] = v;
|
data[rows] = v;
|
||||||
// get the corresponding output
|
// get the corresponding output
|
||||||
std::cin >> Y[rows];
|
std::cin >> Y[rows];
|
||||||
@ -440,7 +454,7 @@ int main() {
|
|||||||
std::vector<float> beta = fit_OLS_regressor(data, Y);
|
std::vector<float> beta = fit_OLS_regressor(data, Y);
|
||||||
std::cout << std::endl << std::endl << "beta:" << beta << std::endl;
|
std::cout << std::endl << std::endl << "beta:" << beta << std::endl;
|
||||||
|
|
||||||
size_t T;
|
size_t T = 0;
|
||||||
std::cout << "Enter number of test samples: ";
|
std::cout << "Enter number of test samples: ";
|
||||||
// number of test sample inputs
|
// number of test sample inputs
|
||||||
std::cin >> T;
|
std::cin >> T;
|
||||||
|
@ -17,8 +17,8 @@
|
|||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <limits>
|
#include <limits>
|
||||||
|
|
||||||
#define EPSILON 1e-10 ///< system accuracy limit
|
constexpr double EPSILON = 1e-10; ///< system accuracy limit
|
||||||
#define MAX_ITERATIONS INT16_MAX ///< Maximum number of iterations to check
|
constexpr int16_t MAX_ITERATIONS = INT16_MAX; ///< Maximum number of iterations
|
||||||
|
|
||||||
/** define \f$f(x)\f$ to find root for.
|
/** define \f$f(x)\f$ to find root for.
|
||||||
* Currently defined as:
|
* Currently defined as:
|
||||||
@ -44,8 +44,8 @@ static double eq_der(double i) {
|
|||||||
int main() {
|
int main() {
|
||||||
std::srand(std::time(nullptr)); // initialize randomizer
|
std::srand(std::time(nullptr)); // initialize randomizer
|
||||||
|
|
||||||
double z, c = std::rand() % 100, m, n;
|
double z = NAN, c = std::rand() % 100, m = NAN, n = NAN;
|
||||||
int i;
|
int i = 0;
|
||||||
|
|
||||||
std::cout << "\nInitial approximation: " << c;
|
std::cout << "\nInitial approximation: " << c;
|
||||||
|
|
||||||
@ -57,8 +57,9 @@ int main() {
|
|||||||
z = c - (m / n);
|
z = c - (m / n);
|
||||||
c = z;
|
c = z;
|
||||||
|
|
||||||
if (std::abs(m) < EPSILON) // stoping criteria
|
if (std::abs(m) < EPSILON) { // stoping criteria
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cout << "\n\nRoot: " << z << "\t\tSteps: " << i << std::endl;
|
std::cout << "\n\nRoot: " << z << "\t\tSteps: " << i << std::endl;
|
||||||
|
@ -54,8 +54,8 @@
|
|||||||
void problem(const double &x, std::valarray<double> *y,
|
void problem(const double &x, std::valarray<double> *y,
|
||||||
std::valarray<double> *dy) {
|
std::valarray<double> *dy) {
|
||||||
const double omega = 1.F; // some const for the problem
|
const double omega = 1.F; // some const for the problem
|
||||||
dy[0][0] = y[0][1]; // x dot
|
(*dy)[0] = (*y)[1]; // x dot // NOLINT
|
||||||
dy[0][1] = -omega * omega * y[0][0]; // y dot
|
(*dy)[1] = -omega * omega * (*y)[0]; // y dot // NOLINT
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -83,10 +83,10 @@ void exact_solution(const double &x, std::valarray<double> *y) {
|
|||||||
* @param[in,out] y take \f$y_n\f$ and compute \f$y_{n+1}\f$
|
* @param[in,out] y take \f$y_n\f$ and compute \f$y_{n+1}\f$
|
||||||
* @param[in,out] dy compute \f$f\left(x_n,y_n\right)\f$
|
* @param[in,out] dy compute \f$f\left(x_n,y_n\right)\f$
|
||||||
*/
|
*/
|
||||||
void forward_euler_step(const double dx, const double &x,
|
void forward_euler_step(const double dx, const double x,
|
||||||
std::valarray<double> *y, std::valarray<double> *dy) {
|
std::valarray<double> *y, std::valarray<double> *dy) {
|
||||||
problem(x, y, dy);
|
problem(x, y, dy);
|
||||||
y[0] += dy[0] * dx;
|
*y += *dy * dx;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -101,7 +101,7 @@ void forward_euler_step(const double dx, const double &x,
|
|||||||
*/
|
*/
|
||||||
double forward_euler(double dx, double x0, double x_max,
|
double forward_euler(double dx, double x0, double x_max,
|
||||||
std::valarray<double> *y, bool save_to_file = false) {
|
std::valarray<double> *y, bool save_to_file = false) {
|
||||||
std::valarray<double> dy = y[0];
|
std::valarray<double> dy = *y;
|
||||||
|
|
||||||
std::ofstream fp;
|
std::ofstream fp;
|
||||||
if (save_to_file) {
|
if (save_to_file) {
|
||||||
@ -122,9 +122,9 @@ double forward_euler(double dx, double x0, double x_max,
|
|||||||
// write to file
|
// write to file
|
||||||
fp << x << ",";
|
fp << x << ",";
|
||||||
for (int i = 0; i < L - 1; i++) {
|
for (int i = 0; i < L - 1; i++) {
|
||||||
fp << y[0][i] << ",";
|
fp << y[0][i] << ","; // NOLINT
|
||||||
}
|
}
|
||||||
fp << y[0][L - 1] << "\n";
|
fp << y[0][L - 1] << "\n"; // NOLINT
|
||||||
}
|
}
|
||||||
|
|
||||||
forward_euler_step(dx, x, y, &dy); // perform integration
|
forward_euler_step(dx, x, y, &dy); // perform integration
|
||||||
@ -133,8 +133,9 @@ double forward_euler(double dx, double x0, double x_max,
|
|||||||
/* end of integration */
|
/* end of integration */
|
||||||
std::clock_t t2 = std::clock();
|
std::clock_t t2 = std::clock();
|
||||||
|
|
||||||
if (fp.is_open())
|
if (fp.is_open()) {
|
||||||
fp.close();
|
fp.close();
|
||||||
|
}
|
||||||
|
|
||||||
return static_cast<double>(t2 - t1) / CLOCKS_PER_SEC;
|
return static_cast<double>(t2 - t1) / CLOCKS_PER_SEC;
|
||||||
}
|
}
|
||||||
@ -153,7 +154,7 @@ void save_exact_solution(const double &X0, const double &X_MAX,
|
|||||||
const double &step_size,
|
const double &step_size,
|
||||||
const std::valarray<double> &Y0) {
|
const std::valarray<double> &Y0) {
|
||||||
double x = X0;
|
double x = X0;
|
||||||
std::valarray<double> y = Y0;
|
std::valarray<double> y(Y0);
|
||||||
|
|
||||||
std::ofstream fp("exact.csv", std::ostream::out);
|
std::ofstream fp("exact.csv", std::ostream::out);
|
||||||
if (!fp.is_open()) {
|
if (!fp.is_open()) {
|
||||||
@ -166,9 +167,9 @@ void save_exact_solution(const double &X0, const double &X_MAX,
|
|||||||
do {
|
do {
|
||||||
fp << x << ",";
|
fp << x << ",";
|
||||||
for (int i = 0; i < y.size() - 1; i++) {
|
for (int i = 0; i < y.size() - 1; i++) {
|
||||||
fp << y[i] << ",";
|
fp << y[i] << ","; // NOLINT
|
||||||
}
|
}
|
||||||
fp << y[y.size() - 1] << "\n";
|
fp << y[y.size() - 1] << "\n"; // NOLINT
|
||||||
|
|
||||||
exact_solution(x, &y);
|
exact_solution(x, &y);
|
||||||
|
|
||||||
@ -186,10 +187,10 @@ void save_exact_solution(const double &X0, const double &X_MAX,
|
|||||||
* Main Function
|
* Main Function
|
||||||
*/
|
*/
|
||||||
int main(int argc, char *argv[]) {
|
int main(int argc, char *argv[]) {
|
||||||
double X0 = 0.f; /* initial value of x0 */
|
double X0 = 0.f; /* initial value of x0 */
|
||||||
double X_MAX = 10.F; /* upper limit of integration */
|
double X_MAX = 10.F; /* upper limit of integration */
|
||||||
std::valarray<double> Y0 = {1.f, 0.f}; /* initial value Y = y(x = x_0) */
|
std::valarray<double> Y0{1.f, 0.f}; /* initial value Y = y(x = x_0) */
|
||||||
double step_size;
|
double step_size = NAN;
|
||||||
|
|
||||||
if (argc == 1) {
|
if (argc == 1) {
|
||||||
std::cout << "\nEnter the step size: ";
|
std::cout << "\nEnter the step size: ";
|
||||||
|
Loading…
Reference in New Issue
Block a user