From 8ab9a2ae939b915f0da7d5daacb03eec2aed91c1 Mon Sep 17 00:00:00 2001 From: Krishna Vedala <7001608+kvedala@users.noreply.github.com> Date: Mon, 13 Jul 2020 07:52:38 -0400 Subject: [PATCH] [cpp fixes] tidied up code based on error reports by clang-tidy (#950) * tidied up code based on error reports by clang-tidy * added doc for activation function --- machine_learning/adaline_learning.cpp | 107 ++++++++------ machine_learning/kohonen_som_topology.cpp | 130 ++++++++++-------- machine_learning/kohonen_som_trace.cpp | 88 +++++++----- .../ordinary_least_squares_regressor.cpp | 48 ++++--- numerical_methods/newton_raphson_method.cpp | 11 +- numerical_methods/ode_forward_euler.cpp | 31 +++-- 6 files changed, 243 insertions(+), 172 deletions(-) diff --git a/machine_learning/adaline_learning.cpp b/machine_learning/adaline_learning.cpp index a8426ac4e..6da839a9d 100644 --- a/machine_learning/adaline_learning.cpp +++ b/machine_learning/adaline_learning.cpp @@ -26,6 +26,7 @@ * computed using stochastic gradient descent method. */ +#include #include #include #include @@ -35,7 +36,8 @@ #include #include -#define MAX_ITER 500 // INT_MAX ///< Maximum number of iterations to learn +/** Maximum number of iterations to learn */ +constexpr int MAX_ITER = 500; // INT_MAX /** \namespace machine_learning * \brief Machine learning algorithms @@ -50,8 +52,8 @@ class adaline { * \param[in] convergence accuracy (optional, * default=\f$1\times10^{-5}\f$) */ - adaline(int num_features, const double eta = 0.01f, - const double accuracy = 1e-5) + explicit adaline(int num_features, const double eta = 0.01f, + const double accuracy = 1e-5) : eta(eta), accuracy(accuracy) { if (eta <= 0) { std::cerr << "learning rate should be positive and nonzero" @@ -64,7 +66,7 @@ class adaline { 1); // additional weight is for the constant bias term // initialize with random weights in the range [-50, 49] - for (int i = 0; i < weights.size(); i++) weights[i] = 1.f; + for (double &weight : weights) weight = 1.f; // weights[i] = (static_cast(std::rand() % 100) - 50); } @@ -75,8 +77,9 @@ class adaline { out << "<"; for (int i = 0; i < ada.weights.size(); i++) { out << ada.weights[i]; - if (i < ada.weights.size() - 1) + if (i < ada.weights.size() - 1) { out << ", "; + } } out << ">"; return out; @@ -90,28 +93,33 @@ class adaline { * model prediction output */ int predict(const std::vector &x, double *out = nullptr) { - if (!check_size_match(x)) + if (!check_size_match(x)) { return 0; + } double y = weights.back(); // assign bias value // for (int i = 0; i < x.size(); i++) y += x[i] * weights[i]; y = std::inner_product(x.begin(), x.end(), weights.begin(), y); - if (out != nullptr) // if out variable is provided + if (out != nullptr) { // if out variable is provided *out = y; + } return activation(y); // quantizer: apply ADALINE threshold function } /** * Update the weights of the model using supervised learning for one - * feature vector \param[in] x feature vector \param[in] y known output - * value \returns correction factor + * feature vector + * \param[in] x feature vector + * \param[in] y known output value + * \returns correction factor */ double fit(const std::vector &x, const int &y) { - if (!check_size_match(x)) + if (!check_size_match(x)) { return 0; + } /* output of the model with current weights */ int p = predict(x); @@ -129,21 +137,23 @@ class adaline { /** * Update the weights of the model using supervised learning for an - * array of vectors. \param[in] X array of feature vector \param[in] y - * known output value for each feature vector + * array of vectors. + * \param[in] X array of feature vector + * \param[in] y known output value for each feature vector */ - template - void fit(std::vector const (&X)[N], const int *y) { + template + void fit(std::array, N> const &X, + std::array const &Y) { double avg_pred_error = 1.f; - int iter; + int iter = 0; for (iter = 0; (iter < MAX_ITER) && (avg_pred_error > accuracy); iter++) { avg_pred_error = 0.f; // perform fit for each sample for (int i = 0; i < N; i++) { - double err = fit(X[i], y[i]); + double err = fit(X[i], Y[i]); avg_pred_error += std::abs(err); } avg_pred_error /= N; @@ -154,15 +164,25 @@ class adaline { << "\tAvg error: " << avg_pred_error << std::endl; } - if (iter < MAX_ITER) - + if (iter < MAX_ITER) { std::cout << "Converged after " << iter << " iterations." << std::endl; - else + } else { std::cout << "Did not converge after " << iter << " iterations." << std::endl; + } } + /** Defines activation function as Heaviside's step function. + * \f[ + * f(x) = \begin{cases} + * -1 & \forall x \le 0\\ + * 1 & \forall x > 0 + * \end{cases} + * \f] + * @param x input value to apply activation on + * @return activation output + */ int activation(double x) { return x > 0 ? 1 : -1; } private: @@ -206,15 +226,19 @@ void test1(double eta = 0.01) { const int N = 10; // number of sample points - std::vector X[N] = {{0, 1}, {1, -2}, {2, 3}, {3, -1}, - {4, 1}, {6, -5}, {-7, -3}, {-8, 5}, - {-9, 2}, {-10, -15}}; - int y[] = {1, -1, 1, -1, -1, -1, 1, 1, 1, -1}; // corresponding y-values + std::array, N> X = { + std::vector({0, 1}), std::vector({1, -2}), + std::vector({2, 3}), std::vector({3, -1}), + std::vector({4, 1}), std::vector({6, -5}), + std::vector({-7, -3}), std::vector({-8, 5}), + std::vector({-9, 2}), std::vector({-10, -15})}; + std::array y = {1, -1, 1, -1, -1, + -1, 1, 1, 1, -1}; // corresponding y-values std::cout << "------- Test 1 -------" << std::endl; std::cout << "Model before fit: " << ada << std::endl; - ada.fit(X, y); + ada.fit(X, y); std::cout << "Model after fit: " << ada << std::endl; int predict = ada.predict({5, -3}); @@ -240,17 +264,17 @@ void test2(double eta = 0.01) { const int N = 50; // number of sample points - std::vector X[N]; - int Y[N]; // corresponding y-values + std::array, N> X; + std::array Y{}; // corresponding y-values // generate sample points in the interval // [-range2/100 , (range2-1)/100] int range = 500; // sample points full-range int range2 = range >> 1; // sample points half-range for (int i = 0; i < N; i++) { - double x0 = ((std::rand() % range) - range2) / 100.f; - double x1 = ((std::rand() % range) - range2) / 100.f; - X[i] = {x0, x1}; + double x0 = (static_cast(std::rand() % range) - range2) / 100.f; + double x1 = (static_cast(std::rand() % range) - range2) / 100.f; + X[i] = std::vector({x0, x1}); Y[i] = (x0 + 3. * x1) > -1 ? 1 : -1; } @@ -262,8 +286,8 @@ void test2(double eta = 0.01) { int N_test_cases = 5; for (int i = 0; i < N_test_cases; i++) { - double x0 = ((std::rand() % range) - range2) / 100.f; - double x1 = ((std::rand() % range) - range2) / 100.f; + double x0 = (static_cast(std::rand() % range) - range2) / 100.f; + double x1 = (static_cast(std::rand() % range) - range2) / 100.f; int predict = ada.predict({x0, x1}); @@ -291,18 +315,18 @@ void test3(double eta = 0.01) { const int N = 100; // number of sample points - std::vector X[N]; - int Y[N]; // corresponding y-values + std::array, N> X; + std::array Y{}; // corresponding y-values // generate sample points in the interval // [-range2/100 , (range2-1)/100] int range = 200; // sample points full-range int range2 = range >> 1; // sample points half-range for (int i = 0; i < N; i++) { - double x0 = ((std::rand() % range) - range2) / 100.f; - double x1 = ((std::rand() % range) - range2) / 100.f; - double x2 = ((std::rand() % range) - range2) / 100.f; - X[i] = {x0, x1, x2, x0 * x0, x1 * x1, x2 * x2}; + double x0 = (static_cast(std::rand() % range) - range2) / 100.f; + double x1 = (static_cast(std::rand() % range) - range2) / 100.f; + double x2 = (static_cast(std::rand() % range) - range2) / 100.f; + X[i] = std::vector({x0, x1, x2, x0 * x0, x1 * x1, x2 * x2}); Y[i] = ((x0 * x0) + (x1 * x1) + (x2 * x2)) <= 1.f ? 1 : -1; } @@ -314,9 +338,9 @@ void test3(double eta = 0.01) { int N_test_cases = 5; for (int i = 0; i < N_test_cases; i++) { - double x0 = ((std::rand() % range) - range2) / 100.f; - double x1 = ((std::rand() % range) - range2) / 100.f; - double x2 = ((std::rand() % range) - range2) / 100.f; + double x0 = (static_cast(std::rand() % range) - range2) / 100.f; + double x1 = (static_cast(std::rand() % range) - range2) / 100.f; + double x2 = (static_cast(std::rand() % range) - range2) / 100.f; int predict = ada.predict({x0, x1, x2, x0 * x0, x1 * x1, x2 * x2}); @@ -334,8 +358,9 @@ int main(int argc, char **argv) { std::srand(std::time(nullptr)); // initialize random number generator double eta = 0.1; // default value of eta - if (argc == 2) // read eta value from commandline argument if present + if (argc == 2) { // read eta value from commandline argument if present eta = strtof(argv[1], nullptr); + } test1(eta); diff --git a/machine_learning/kohonen_som_topology.cpp b/machine_learning/kohonen_som_topology.cpp index 016fe6d1e..cccc9faa3 100644 --- a/machine_learning/kohonen_som_topology.cpp +++ b/machine_learning/kohonen_som_topology.cpp @@ -25,8 +25,11 @@ */ #define _USE_MATH_DEFINES //< required for MS Visual C++ #include +#include +#include #include #include +#include #include #include #include @@ -68,7 +71,8 @@ int save_2d_data(const char *fname, fp.open(fname); if (!fp.is_open()) { // error with opening file to write - std::cerr << "Error opening file " << fname << "\n"; + std::cerr << "Error opening file " << fname << ": " + << std::strerror(errno) << "\n"; return -1; } @@ -76,12 +80,14 @@ int save_2d_data(const char *fname, for (int i = 0; i < num_points; i++) { // for each feature in the array for (int j = 0; j < num_features; j++) { - fp << X[i][j]; // print the feature value - if (j < num_features - 1) // if not the last feature - fp << ","; // suffix comma + fp << X[i][j]; // print the feature value + if (j < num_features - 1) { // if not the last feature + fp << ","; // suffix comma + } + } + if (i < num_points - 1) { // if not the last row + fp << "\n"; // start a new line } - if (i < num_points - 1) // if not the last row - fp << "\n"; // start a new line } fp.close(); @@ -99,12 +105,12 @@ int save_2d_data(const char *fname, void get_min_2d(const std::vector> &X, double *val, int *x_idx, int *y_idx) { val[0] = INFINITY; // initial min value - int N = X.size(); + size_t N = X.size(); for (int i = 0; i < N; i++) { // traverse each x-index auto result = std::min_element(std::begin(X[i]), std::end(X[i])); double d_min = *result; - int j = std::distance(std::begin(X[i]), result); + std::ptrdiff_t j = std::distance(std::begin(X[i]), result); if (d_min < val[0]) { // if a lower value is found // save the value and its index @@ -119,7 +125,8 @@ void get_min_2d(const std::vector> &X, double *val, * \brief Machine learning algorithms */ namespace machine_learning { -#define MIN_DISTANCE 1e-4 ///< Minimum average distance of image nodes +/** Minimum average distance of image nodes */ +constexpr double MIN_DISTANCE = 1e-4; /** * Create the distance matrix or @@ -136,9 +143,8 @@ int save_u_matrix(const char *fname, const std::vector>> &W) { std::ofstream fp(fname); if (!fp) { // error with fopen - char msg[120]; - std::snprintf(msg, sizeof(msg), "File error (%s): ", fname); - std::perror(msg); + std::cerr << "File error (" << fname << "): " << std::strerror(errno) + << std::endl; return -1; } @@ -153,7 +159,7 @@ int save_u_matrix(const char *fname, int to_x = std::min(W.size(), i + R + 1); int from_y = std::max(0, j - R); int to_y = std::min(W[0].size(), j + R + 1); - int l, m; + int l = 0, m = 0; #ifdef _OPENMP #pragma omp parallel for reduction(+ : distance) #endif @@ -172,8 +178,9 @@ int save_u_matrix(const char *fname, fp << ','; // suffix comma } } - if (i < W.size() - 1) // if not the last row - fp << '\n'; // start a new line + if (i < W.size() - 1) { // if not the last row + fp << '\n'; // start a new line + } } fp.close(); @@ -194,10 +201,11 @@ double update_weights(const std::valarray &X, std::vector>> *W, std::vector> *D, double alpha, int R) { - int x, y; + int x = 0, y = 0; int num_out_x = static_cast(W->size()); // output nodes - in X int num_out_y = static_cast(W[0][0].size()); // output nodes - in Y - int num_features = static_cast(W[0][0][0].size()); // features = in Z + // int num_features = static_cast(W[0][0][0].size()); // features = + // in Z double d_min = 0.f; #ifdef _OPENMP @@ -217,7 +225,7 @@ double update_weights(const std::valarray &X, // step 2: get closest node i.e., node with snallest Euclidian distance // to the current pattern - int d_min_x, d_min_y; + int d_min_x = 0, d_min_y = 0; get_min_2d(*D, &d_min, &d_min_x, &d_min_y); // step 3a: get the neighborhood range @@ -261,10 +269,10 @@ double update_weights(const std::valarray &X, void kohonen_som(const std::vector> &X, std::vector>> *W, double alpha_min) { - int num_samples = X.size(); // number of rows - int num_features = X[0].size(); // number of columns - int num_out = W->size(); // output matrix size - int R = num_out >> 2, iter = 0; + size_t num_samples = X.size(); // number of rows + // size_t num_features = X[0].size(); // number of columns + size_t num_out = W->size(); // output matrix size + size_t R = num_out >> 2, iter = 0; double alpha = 1.f; std::vector> D(num_out); @@ -283,15 +291,17 @@ void kohonen_som(const std::vector> &X, } // every 100th iteration, reduce the neighborhood range - if (iter % 300 == 0 && R > 1) + if (iter % 300 == 0 && R > 1) { R--; + } dmin /= num_samples; // termination condition variable -> % change in minimum distance dmin_ratio = (past_dmin - dmin) / past_dmin; - if (dmin_ratio < 0) + if (dmin_ratio < 0) { dmin_ratio = 1.f; + } past_dmin = dmin; std::cout << "iter: " << iter << "\t alpha: " << alpha << "\t R: " << R @@ -320,14 +330,14 @@ using machine_learning::save_u_matrix; void test_2d_classes(std::vector> *data) { const int N = data->size(); const double R = 0.3; // radius of cluster - int i; + int i = 0; const int num_classes = 4; - const double centres[][2] = { + std::array, num_classes> centres = { // centres of each class cluster - {.5, .5}, // centre of class 1 - {.5, -.5}, // centre of class 2 - {-.5, .5}, // centre of class 3 - {-.5, -.5} // centre of class 4 + std::array({.5, .5}), // centre of class 1 + std::array({.5, -.5}), // centre of class 2 + std::array({-.5, .5}), // centre of class 3 + std::array({-.5, -.5}) // centre of class 4 }; #ifdef _OPENMP @@ -357,15 +367,16 @@ void test_2d_classes(std::vector> *data) { * * `w12.csv`: trained SOM map */ void test1() { - int j, N = 300; + int j = 0, N = 300; int features = 2; int num_out = 30; std::vector> X(N); std::vector>> W(num_out); for (int i = 0; i < std::max(num_out, N); i++) { // loop till max(N, num_out) - if (i < N) // only add new arrays if i < N + if (i < N) { // only add new arrays if i < N X[i] = std::valarray(features); + } if (i < num_out) { // only add new arrays if i < num_out W[i] = std::vector>(num_out); for (int k = 0; k < num_out; k++) { @@ -373,9 +384,10 @@ void test1() { #ifdef _OPENMP #pragma omp for #endif - for (j = 0; j < features; j++) + for (j = 0; j < features; j++) { // preallocate with random initial weights W[i][k][j] = _random(-10, 10); + } } } } @@ -397,16 +409,16 @@ void test1() { * \param[out] data matrix to store data in */ void test_3d_classes1(std::vector> *data) { - const int N = data->size(); + const size_t N = data->size(); const double R = 0.3; // radius of cluster - int i; + int i = 0; const int num_classes = 4; - const double centres[][3] = { + const std::array, num_classes> centres = { // centres of each class cluster - {.5, .5, .5}, // centre of class 1 - {.5, -.5, -.5}, // centre of class 2 - {-.5, .5, .5}, // centre of class 3 - {-.5, -.5 - .5} // centre of class 4 + std::array({.5, .5, .5}), // centre of class 1 + std::array({.5, -.5, -.5}), // centre of class 2 + std::array({-.5, .5, .5}), // centre of class 3 + std::array({-.5, -.5 - .5}) // centre of class 4 }; #ifdef _OPENMP @@ -437,15 +449,16 @@ void test_3d_classes1(std::vector> *data) { * * `w22.csv`: trained SOM map */ void test2() { - int j, N = 300; + int j = 0, N = 300; int features = 3; int num_out = 30; std::vector> X(N); std::vector>> W(num_out); for (int i = 0; i < std::max(num_out, N); i++) { // loop till max(N, num_out) - if (i < N) // only add new arrays if i < N + if (i < N) { // only add new arrays if i < N X[i] = std::valarray(features); + } if (i < num_out) { // only add new arrays if i < num_out W[i] = std::vector>(num_out); for (int k = 0; k < num_out; k++) { @@ -453,9 +466,10 @@ void test2() { #ifdef _OPENMP #pragma omp for #endif - for (j = 0; j < features; j++) + for (j = 0; j < features; j++) { // preallocate with random initial weights W[i][k][j] = _random(-10, 10); + } } } } @@ -477,20 +491,20 @@ void test2() { * \param[out] data matrix to store data in */ void test_3d_classes2(std::vector> *data) { - const int N = data->size(); + const size_t N = data->size(); const double R = 0.2; // radius of cluster - int i; + int i = 0; const int num_classes = 8; - const double centres[][3] = { + const std::array, num_classes> centres = { // centres of each class cluster - {.5, .5, .5}, // centre of class 1 - {.5, .5, -.5}, // centre of class 2 - {.5, -.5, .5}, // centre of class 3 - {.5, -.5, -.5}, // centre of class 4 - {-.5, .5, .5}, // centre of class 5 - {-.5, .5, -.5}, // centre of class 6 - {-.5, -.5, .5}, // centre of class 7 - {-.5, -.5, -.5} // centre of class 8 + std::array({.5, .5, .5}), // centre of class 1 + std::array({.5, .5, -.5}), // centre of class 2 + std::array({.5, -.5, .5}), // centre of class 3 + std::array({.5, -.5, -.5}), // centre of class 4 + std::array({-.5, .5, .5}), // centre of class 5 + std::array({-.5, .5, -.5}), // centre of class 6 + std::array({-.5, -.5, .5}), // centre of class 7 + std::array({-.5, -.5, -.5}) // centre of class 8 }; #ifdef _OPENMP @@ -521,15 +535,16 @@ void test_3d_classes2(std::vector> *data) { * * `w32.csv`: trained SOM map */ void test3() { - int j, N = 500; + int j = 0, N = 500; int features = 3; int num_out = 30; std::vector> X(N); std::vector>> W(num_out); for (int i = 0; i < std::max(num_out, N); i++) { // loop till max(N, num_out) - if (i < N) // only add new arrays if i < N + if (i < N) { // only add new arrays if i < N X[i] = std::valarray(features); + } if (i < num_out) { // only add new arrays if i < num_out W[i] = std::vector>(num_out); for (int k = 0; k < num_out; k++) { @@ -537,9 +552,10 @@ void test3() { #ifdef _OPENMP #pragma omp for #endif - for (j = 0; j < features; j++) + for (j = 0; j < features; j++) { // preallocate with random initial weights W[i][k][j] = _random(-10, 10); + } } } } diff --git a/machine_learning/kohonen_som_trace.cpp b/machine_learning/kohonen_som_trace.cpp index 273a2a57c..63a0c02c6 100644 --- a/machine_learning/kohonen_som_trace.cpp +++ b/machine_learning/kohonen_som_trace.cpp @@ -20,6 +20,7 @@ */ #define _USE_MATH_DEFINES // required for MS Visual C++ #include +#include #include #include #include @@ -71,12 +72,14 @@ int save_nd_data(const char *fname, for (int i = 0; i < num_points; i++) { // for each feature in the array for (int j = 0; j < num_features; j++) { - fp << X[i][j]; // print the feature value - if (j < num_features - 1) // if not the last feature - fp << ","; // suffix comma + fp << X[i][j]; // print the feature value + if (j < num_features - 1) { // if not the last feature + fp << ","; // suffix comma + } + } + if (i < num_points - 1) { // if not the last row + fp << "\n"; // start a new line } - if (i < num_points - 1) // if not the last row - fp << "\n"; // start a new line } fp.close(); @@ -100,9 +103,9 @@ namespace machine_learning { void update_weights(const std::valarray &x, std::vector> *W, std::valarray *D, double alpha, int R) { - int j, k; - int num_out = W->size(); // number of SOM output nodes - int num_features = x.size(); // number of data features + int j = 0, k = 0; + int num_out = W->size(); // number of SOM output nodes + // int num_features = x.size(); // number of data features #ifdef _OPENMP #pragma omp for @@ -117,7 +120,7 @@ void update_weights(const std::valarray &x, // step 2: get closest node i.e., node with snallest Euclidian distance to // the current pattern auto result = std::min_element(std::begin(*D), std::end(*D)); - double d_min = *result; + // double d_min = *result; int d_min_idx = std::distance(std::begin(*D), result); // step 3a: get the neighborhood range @@ -129,9 +132,10 @@ void update_weights(const std::valarray &x, #ifdef _OPENMP #pragma omp for #endif - for (j = from_node; j < to_node; j++) + for (j = from_node; j < to_node; j++) { // update weights of nodes in the neighborhood (*W)[j] += alpha * (x - (*W)[j]); + } } /** @@ -145,16 +149,16 @@ void update_weights(const std::valarray &x, void kohonen_som_tracer(const std::vector> &X, std::vector> *W, double alpha_min) { - int num_samples = X.size(); // number of rows - int num_features = X[0].size(); // number of columns - int num_out = W->size(); // number of rows + int num_samples = X.size(); // number of rows + // int num_features = X[0].size(); // number of columns + int num_out = W->size(); // number of rows int R = num_out >> 2, iter = 0; double alpha = 1.f; std::valarray D(num_out); // Loop alpha from 1 to slpha_min - for (; alpha > alpha_min; alpha -= 0.01, iter++) { + do { // Loop for each sample pattern in the data set for (int sample = 0; sample < num_samples; sample++) { // update weights for the current input pattern sample @@ -162,9 +166,13 @@ void kohonen_som_tracer(const std::vector> &X, } // every 10th iteration, reduce the neighborhood range - if (iter % 10 == 0 && R > 1) + if (iter % 10 == 0 && R > 1) { R--; - } + } + + alpha -= 0.01; + iter++; + } while (alpha > alpha_min); } } // namespace machine_learning @@ -190,7 +198,7 @@ void test_circle(std::vector> *data) { const double R = 0.75, dr = 0.3; double a_t = 0., b_t = 2.f * M_PI; // theta random between 0 and 2*pi double a_r = R - dr, b_r = R + dr; // radius random between R-dr and R+dr - int i; + int i = 0; #ifdef _OPENMP #pragma omp for @@ -223,24 +231,26 @@ void test_circle(std::vector> *data) { * output](https://raw.githubusercontent.com/TheAlgorithms/C-Plus-Plus/docs/images/machine_learning/kohonen/test1.svg) */ void test1() { - int j, N = 500; + int j = 0, N = 500; int features = 2; int num_out = 50; std::vector> X(N); std::vector> W(num_out); for (int i = 0; i < std::max(num_out, N); i++) { // loop till max(N, num_out) - if (i < N) // only add new arrays if i < N + if (i < N) { // only add new arrays if i < N X[i] = std::valarray(features); + } if (i < num_out) { // only add new arrays if i < num_out W[i] = std::valarray(features); #ifdef _OPENMP #pragma omp for #endif - for (j = 0; j < features; j++) + for (j = 0; j < features; j++) { // preallocate with random initial weights W[i][j] = _random(-1, 1); + } } } @@ -267,7 +277,7 @@ void test1() { void test_lamniscate(std::vector> *data) { const int N = data->size(); const double dr = 0.2; - int i; + int i = 0; #ifdef _OPENMP #pragma omp for @@ -303,24 +313,26 @@ void test_lamniscate(std::vector> *data) { * output](https://raw.githubusercontent.com/TheAlgorithms/C-Plus-Plus/docs/images/machine_learning/kohonen/test2.svg) */ void test2() { - int j, N = 500; + int j = 0, N = 500; int features = 2; int num_out = 20; std::vector> X(N); std::vector> W(num_out); for (int i = 0; i < std::max(num_out, N); i++) { // loop till max(N, num_out) - if (i < N) // only add new arrays if i < N + if (i < N) { // only add new arrays if i < N X[i] = std::valarray(features); + } if (i < num_out) { // only add new arrays if i < num_out W[i] = std::valarray(features); #ifdef _OPENMP #pragma omp for #endif - for (j = 0; j < features; j++) + for (j = 0; j < features; j++) { // preallocate with random initial weights W[i][j] = _random(-1, 1); + } } } @@ -347,18 +359,18 @@ void test2() { void test_3d_classes(std::vector> *data) { const int N = data->size(); const double R = 0.1; // radius of cluster - int i; + int i = 0; const int num_classes = 8; - const double centres[][3] = { + const std::array, num_classes> centres = { // centres of each class cluster - {.5, .5, .5}, // centre of class 0 - {.5, .5, -.5}, // centre of class 1 - {.5, -.5, .5}, // centre of class 2 - {.5, -.5, -.5}, // centre of class 3 - {-.5, .5, .5}, // centre of class 4 - {-.5, .5, -.5}, // centre of class 5 - {-.5, -.5, .5}, // centre of class 6 - {-.5, -.5, -.5} // centre of class 7 + std::array({.5, .5, .5}), // centre of class 0 + std::array({.5, .5, -.5}), // centre of class 1 + std::array({.5, -.5, .5}), // centre of class 2 + std::array({.5, -.5, -.5}), // centre of class 3 + std::array({-.5, .5, .5}), // centre of class 4 + std::array({-.5, .5, -.5}), // centre of class 5 + std::array({-.5, -.5, .5}), // centre of class 6 + std::array({-.5, -.5, -.5}) // centre of class 7 }; #ifdef _OPENMP @@ -400,24 +412,26 @@ void test_3d_classes(std::vector> *data) { * output](https://raw.githubusercontent.com/TheAlgorithms/C-Plus-Plus/docs/images/machine_learning/kohonen/test3.svg) */ void test3() { - int j, N = 200; + int j = 0, N = 200; int features = 3; int num_out = 20; std::vector> X(N); std::vector> W(num_out); for (int i = 0; i < std::max(num_out, N); i++) { // loop till max(N, num_out) - if (i < N) // only add new arrays if i < N + if (i < N) { // only add new arrays if i < N X[i] = std::valarray(features); + } if (i < num_out) { // only add new arrays if i < num_out W[i] = std::valarray(features); #ifdef _OPENMP #pragma omp for #endif - for (j = 0; j < features; j++) + for (j = 0; j < features; j++) { // preallocate with random initial weights W[i][j] = _random(-1, 1); + } } } diff --git a/machine_learning/ordinary_least_squares_regressor.cpp b/machine_learning/ordinary_least_squares_regressor.cpp index 896504e20..0c865761b 100644 --- a/machine_learning/ordinary_least_squares_regressor.cpp +++ b/machine_learning/ordinary_least_squares_regressor.cpp @@ -25,9 +25,10 @@ std::ostream &operator<<(std::ostream &out, const char separator = ' '; for (size_t row = 0; row < v.size(); row++) { - for (size_t col = 0; col < v[row].size(); col++) + for (size_t col = 0; col < v[row].size(); col++) { out << std::left << std::setw(width) << std::setfill(separator) << v[row][col]; + } out << std::endl; } @@ -42,9 +43,10 @@ std::ostream &operator<<(std::ostream &out, std::vector const &v) { const int width = 15; const char separator = ' '; - for (size_t row = 0; row < v.size(); row++) + for (size_t row = 0; row < v.size(); row++) { out << std::left << std::setw(width) << std::setfill(separator) << v[row]; + } return out; } @@ -57,9 +59,11 @@ template inline bool is_square(std::vector> const &A) { // Assuming A is square matrix size_t N = A.size(); - for (size_t i = 0; i < N; i++) - if (A[i].size() != N) + for (size_t i = 0; i < N; i++) { + if (A[i].size() != N) { return false; + } + } return true; } @@ -90,8 +94,9 @@ std::vector> operator*(std::vector> const &A, std::vector v(N_B); for (size_t col = 0; col < N_B; col++) { v[col] = static_cast(0); - for (size_t j = 0; j < B.size(); j++) + for (size_t j = 0; j < B.size(); j++) { v[col] += A[row][j] * B[j][col]; + } } result[row] = v; } @@ -154,8 +159,9 @@ std::vector operator*(std::vector const &A, float const scalar) { std::vector result(N_A); - for (size_t row = 0; row < N_A; row++) + for (size_t row = 0; row < N_A; row++) { result[row] = A[row] * static_cast(scalar); + } return result; } @@ -226,8 +232,9 @@ std::vector> get_inverse( for (size_t row = 0; row < N; row++) { // preallocatae a resultant identity matrix inverse[row] = std::vector(N); - for (size_t col = 0; col < N; col++) + for (size_t col = 0; col < N; col++) { inverse[row][col] = (row == col) ? 1.f : 0.f; + } } if (!is_square(A)) { @@ -239,8 +246,9 @@ std::vector> get_inverse( std::vector> temp(N); for (size_t row = 0; row < N; row++) { std::vector v(N); - for (size_t col = 0; col < N; col++) + for (size_t col = 0; col < N; col++) { v[col] = static_cast(A[row][col]); + } temp[row] = v; } @@ -267,13 +275,14 @@ std::vector> get_inverse( } // set diagonal to 1 - float divisor = static_cast(temp[row][row]); + auto divisor = static_cast(temp[row][row]); temp[row] = temp[row] / divisor; inverse[row] = inverse[row] / divisor; // Row transformations for (size_t row2 = 0; row2 < N; row2++) { - if (row2 == row) + if (row2 == row) { continue; + } float factor = temp[row2][row]; temp[row2] = temp[row2] - factor * temp[row]; inverse[row2] = inverse[row2] - factor * inverse[row]; @@ -313,9 +322,10 @@ std::vector fit_OLS_regressor(std::vector> const &X, std::vector const &Y) { // NxF std::vector> X2 = X; - for (size_t i = 0; i < X2.size(); i++) + for (size_t i = 0; i < X2.size(); i++) { // add Y-intercept -> Nx(F+1) X2[i].push_back(1); + } // (F+1)xN std::vector> Xt = get_transpose(X2); // (F+1)x(F+1) @@ -347,8 +357,9 @@ std::vector predict_OLS_regressor(std::vector> const &X, for (size_t rows = 0; rows < X.size(); rows++) { // -> start with constant term result[rows] = beta[X[0].size()]; - for (size_t cols = 0; cols < X[0].size(); cols++) + for (size_t cols = 0; cols < X[0].size(); cols++) { result[rows] += beta[cols] * X[rows][cols]; + } } // Nx1 return result; @@ -375,8 +386,9 @@ void ols_test() { // predicted regression outputs std::vector out1 = predict_OLS_regressor(test_data1, beta1); // compare predicted results are within +-0.01 limit of expected - for (size_t rows = 0; rows < out1.size(); rows++) + for (size_t rows = 0; rows < out1.size(); rows++) { assert(std::abs(out1[rows] - expected1[rows]) < 0.01); + } std::cout << "passed\n"; /* test function = x^3 + x^2 - 100 */ @@ -396,8 +408,9 @@ void ols_test() { // predicted regression outputs std::vector out2 = predict_OLS_regressor(test_data2, beta2); // compare predicted results are within +-0.01 limit of expected - for (size_t rows = 0; rows < out2.size(); rows++) + for (size_t rows = 0; rows < out2.size(); rows++) { assert(std::abs(out2[rows] - expected2[rows]) < 0.01); + } std::cout << "passed\n"; std::cout << std::endl; // ensure test results are displayed on screen @@ -410,7 +423,7 @@ void ols_test() { int main() { ols_test(); - size_t N, F; + size_t N = 0, F = 0; std::cout << "Enter number of features: "; // number of features = columns @@ -429,9 +442,10 @@ int main() { for (size_t rows = 0; rows < N; rows++) { std::vector v(F); std::cout << "Sample# " << rows + 1 << ": "; - for (size_t cols = 0; cols < F; cols++) + for (size_t cols = 0; cols < F; cols++) { // get the F features std::cin >> v[cols]; + } data[rows] = v; // get the corresponding output std::cin >> Y[rows]; @@ -440,7 +454,7 @@ int main() { std::vector beta = fit_OLS_regressor(data, Y); std::cout << std::endl << std::endl << "beta:" << beta << std::endl; - size_t T; + size_t T = 0; std::cout << "Enter number of test samples: "; // number of test sample inputs std::cin >> T; diff --git a/numerical_methods/newton_raphson_method.cpp b/numerical_methods/newton_raphson_method.cpp index 7597f1b8a..17147e0be 100644 --- a/numerical_methods/newton_raphson_method.cpp +++ b/numerical_methods/newton_raphson_method.cpp @@ -17,8 +17,8 @@ #include #include -#define EPSILON 1e-10 ///< system accuracy limit -#define MAX_ITERATIONS INT16_MAX ///< Maximum number of iterations to check +constexpr double EPSILON = 1e-10; ///< system accuracy limit +constexpr int16_t MAX_ITERATIONS = INT16_MAX; ///< Maximum number of iterations /** define \f$f(x)\f$ to find root for. * Currently defined as: @@ -44,8 +44,8 @@ static double eq_der(double i) { int main() { std::srand(std::time(nullptr)); // initialize randomizer - double z, c = std::rand() % 100, m, n; - int i; + double z = NAN, c = std::rand() % 100, m = NAN, n = NAN; + int i = 0; std::cout << "\nInitial approximation: " << c; @@ -57,8 +57,9 @@ int main() { z = c - (m / n); c = z; - if (std::abs(m) < EPSILON) // stoping criteria + if (std::abs(m) < EPSILON) { // stoping criteria break; + } } std::cout << "\n\nRoot: " << z << "\t\tSteps: " << i << std::endl; diff --git a/numerical_methods/ode_forward_euler.cpp b/numerical_methods/ode_forward_euler.cpp index a4455c57a..5e4dda31b 100644 --- a/numerical_methods/ode_forward_euler.cpp +++ b/numerical_methods/ode_forward_euler.cpp @@ -54,8 +54,8 @@ void problem(const double &x, std::valarray *y, std::valarray *dy) { const double omega = 1.F; // some const for the problem - dy[0][0] = y[0][1]; // x dot - dy[0][1] = -omega * omega * y[0][0]; // y dot + (*dy)[0] = (*y)[1]; // x dot // NOLINT + (*dy)[1] = -omega * omega * (*y)[0]; // y dot // NOLINT } /** @@ -83,10 +83,10 @@ void exact_solution(const double &x, std::valarray *y) { * @param[in,out] y take \f$y_n\f$ and compute \f$y_{n+1}\f$ * @param[in,out] dy compute \f$f\left(x_n,y_n\right)\f$ */ -void forward_euler_step(const double dx, const double &x, +void forward_euler_step(const double dx, const double x, std::valarray *y, std::valarray *dy) { problem(x, y, dy); - y[0] += dy[0] * dx; + *y += *dy * dx; } /** @@ -101,7 +101,7 @@ void forward_euler_step(const double dx, const double &x, */ double forward_euler(double dx, double x0, double x_max, std::valarray *y, bool save_to_file = false) { - std::valarray dy = y[0]; + std::valarray dy = *y; std::ofstream fp; if (save_to_file) { @@ -122,9 +122,9 @@ double forward_euler(double dx, double x0, double x_max, // write to file fp << x << ","; for (int i = 0; i < L - 1; i++) { - fp << y[0][i] << ","; + fp << y[0][i] << ","; // NOLINT } - fp << y[0][L - 1] << "\n"; + fp << y[0][L - 1] << "\n"; // NOLINT } forward_euler_step(dx, x, y, &dy); // perform integration @@ -133,8 +133,9 @@ double forward_euler(double dx, double x0, double x_max, /* end of integration */ std::clock_t t2 = std::clock(); - if (fp.is_open()) + if (fp.is_open()) { fp.close(); + } return static_cast(t2 - t1) / CLOCKS_PER_SEC; } @@ -153,7 +154,7 @@ void save_exact_solution(const double &X0, const double &X_MAX, const double &step_size, const std::valarray &Y0) { double x = X0; - std::valarray y = Y0; + std::valarray y(Y0); std::ofstream fp("exact.csv", std::ostream::out); if (!fp.is_open()) { @@ -166,9 +167,9 @@ void save_exact_solution(const double &X0, const double &X_MAX, do { fp << x << ","; for (int i = 0; i < y.size() - 1; i++) { - fp << y[i] << ","; + fp << y[i] << ","; // NOLINT } - fp << y[y.size() - 1] << "\n"; + fp << y[y.size() - 1] << "\n"; // NOLINT exact_solution(x, &y); @@ -186,10 +187,10 @@ void save_exact_solution(const double &X0, const double &X_MAX, * Main Function */ int main(int argc, char *argv[]) { - double X0 = 0.f; /* initial value of x0 */ - double X_MAX = 10.F; /* upper limit of integration */ - std::valarray Y0 = {1.f, 0.f}; /* initial value Y = y(x = x_0) */ - double step_size; + double X0 = 0.f; /* initial value of x0 */ + double X_MAX = 10.F; /* upper limit of integration */ + std::valarray Y0{1.f, 0.f}; /* initial value Y = y(x = x_0) */ + double step_size = NAN; if (argc == 1) { std::cout << "\nEnter the step size: ";