Merge pull request #913 from kvedala/ols_regressor

[enhancement] Ols regressor should be considered an ML algorithm and not a numerical method
This commit is contained in:
Krishna Vedala 2020-06-27 21:01:14 -04:00 committed by GitHub
commit b79445ff82
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 7 additions and 7 deletions

View File

@ -98,6 +98,7 @@
* [Adaline Learning](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/machine_learning/adaline_learning.cpp)
* [Kohonen Som Topology](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/machine_learning/kohonen_som_topology.cpp)
* [Kohonen Som Trace](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/machine_learning/kohonen_som_trace.cpp)
* [Ordinary Least Squares Regressor](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/machine_learning/ordinary_least_squares_regressor.cpp)
## Math
* [Armstrong Number](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/math/armstrong_number.cpp)
@ -145,7 +146,6 @@
* [Ode Forward Euler](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/ode_forward_euler.cpp)
* [Ode Midpoint Euler](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/ode_midpoint_euler.cpp)
* [Ode Semi Implicit Euler](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/ode_semi_implicit_euler.cpp)
* [Ordinary Least Squares Regressor](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/ordinary_least_squares_regressor.cpp)
* [Qr Decompose](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/qr_decompose.h)
* [Qr Decomposition](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/qr_decomposition.cpp)
* [Qr Eigen Values](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/qr_eigen_values.cpp)

View File

@ -355,7 +355,7 @@ std::vector<float> predict_OLS_regressor(std::vector<std::vector<T>> const &X,
}
/** Self test checks */
void test() {
void ols_test() {
int F = 3, N = 5;
/* test function = x^2 -5 */
@ -368,12 +368,12 @@ void test() {
// perform regression modelling
std::vector<float> beta1 = fit_OLS_regressor(data1, Y1);
// create test data set with same features = x, x^2, x^3
std::vector<std::vector<float>> test1(
std::vector<std::vector<float>> test_data1(
{{-2, 4, -8}, {2, 4, 8}, {-10, 100, -1000}, {10, 100, 1000}});
// expected regression outputs
std::vector<float> expected1({-1, -1, 95, 95});
// predicted regression outputs
std::vector<float> out1 = predict_OLS_regressor(test1, beta1);
std::vector<float> out1 = predict_OLS_regressor(test_data1, beta1);
// compare predicted results are within +-0.01 limit of expected
for (size_t rows = 0; rows < out1.size(); rows++)
assert(std::abs(out1[rows] - expected1[rows]) < 0.01);
@ -389,12 +389,12 @@ void test() {
// perform regression modelling
std::vector<float> beta2 = fit_OLS_regressor(data2, Y2);
// create test data set with same features = x, x^2, x^3
std::vector<std::vector<float>> test2(
std::vector<std::vector<float>> test_data2(
{{-2, 4, -8}, {2, 4, 8}, {-10, 100, -1000}, {10, 100, 1000}});
// expected regression outputs
std::vector<float> expected2({-104, -88, -1000, 1000});
// predicted regression outputs
std::vector<float> out2 = predict_OLS_regressor(test2, beta2);
std::vector<float> out2 = predict_OLS_regressor(test_data2, beta2);
// compare predicted results are within +-0.01 limit of expected
for (size_t rows = 0; rows < out2.size(); rows++)
assert(std::abs(out2[rows] - expected2[rows]) < 0.01);
@ -408,7 +408,7 @@ void test() {
* main function
*/
int main() {
test();
ols_test();
size_t N, F;