Merge branch 'master' into fixgraph

This commit is contained in:
Ayaan Khan 2020-07-17 20:54:47 +05:30 committed by GitHub
commit 257d0b1a71
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 1127 additions and 267 deletions

6
.clang-tidy Normal file
View File

@ -0,0 +1,6 @@
---
Checks: '-*,google-*,clang-analyzer-*,-clang-analyzer-security.insecureAPI.*,cppcoreguidelines-*,-cppcoreguidelines-avoid-magic-numbers,-cppcoreguidelines-pro-bounds-*,openmp-*,performance-*,portability-*,modernize-*,-modernize-use-trailing-*'
WarningsAsErrors: '*,-google-readability-*,-google-explicit-constructor,-modernize-*,modernize-avoid-c-arrays,-performance-move-const-arg,-performance-noexcept-move-constructor,-cppcoreguidelines-init-variables,-cppcoreguidelines-pro-*,-cppcoreguidelines-owning-memory,-clang-analyzer-cplusplus.Move'
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
FormatStyle: '{ BasedOnStyle: Google, UseTab: Never, IndentWidth: 4, TabWidth: 4, AllowShortIfStatementsOnASingleLine: false, IndentCaseLabels: true, ColumnLimit: 80, AccessModifierOffset: -3, AlignConsecutiveMacros: true }'

View File

@ -16,7 +16,9 @@ jobs:
- name: requirements
run: |
sudo apt -qq -y update
sudo apt -qq install clang-format
sudo apt -qq install clang-tidy-10
# checks are passing with less errors when used with this version.
# The default installs v6.0 which did not work out well in my tests
- name: Setup Git Specs
run: |
git config --global user.name github-actions
@ -43,18 +45,6 @@ jobs:
fi
done
git commit -am "formatting filenames $GITHUB_SHA" || true
- name: Clang Formatter
run: |
for fname in $(find . -name '*.cpp' -o -name '*.h')
do
clang-format --verbose -i --style="$line1 $line2 $line3 $line4" "$fname"
done
git commit -am "formatting source-code for $GITHUB_SHA" || true
env:
line1: "{ BasedOnStyle: Google, UseTab: Never,"
line2: "IndentWidth: 4, TabWidth: 4, "
line3: "AllowShortIfStatementsOnASingleLine: false, IndentCaseLabels: false,"
line4: "ColumnLimit: 80, AccessModifierOffset: -3 }"
- name: Update DIRECTORY.md
shell: python
@ -100,24 +90,21 @@ jobs:
with open("DIRECTORY.md", "w") as out_file:
out_file.write(build_directory_md(".") + "\n")
- name: Update DIRECTORY.md
- name: Commit DIRECTORY.md
run: git commit -m "updating DIRECTORY.md" DIRECTORY.md || true
- name: Get file changes
run: |
cat DIRECTORY.md
git config --global user.name github-actions
git config --global user.email '${GITHUB_ACTOR}@users.noreply.github.com'
git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/$GITHUB_REPOSITORY
git add DIRECTORY.md
git commit -am "updating DIRECTORY.md" || true
git push --force origin HEAD:$GITHUB_REF || true
- name: Install CPPLINT
run: |
python -m pip install cpplint
git remote -v
git branch
git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/$GITHUB_REPOSITORY
git diff --diff-filter=dr --name-only origin/master > git_diff.txt
echo "Files changed-- `cat git_diff.txt`"
- name: cpplint_modified_files
- name: Configure for static lint checks
# compiling first gives clang-tidy access to all the header files and settings used to compile the programs.
# This will check for macros, if any, on linux and not for Windows. But the use of portability checks should
# be able to catch any errors for other platforms.
run: cmake -B build -S . -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
- name: Lint modified files
shell: python
run: |
import os
@ -135,9 +122,11 @@ jobs:
if not cpp_files:
sys.exit(0)
print("cpplint:")
for cpp_file in cpp_files:
subprocess.run(["cpplint", "--filter=-legal/copyright,-build/include", cpp_file], check=True, text=True)
subprocess.run(["clang-tidy-10", "--fix", "-p=build", "--extra-arg=-std=c++11", *cpp_files, "--"],
check=True, text=True, stderr=subprocess.STDOUT)
# for cpp_file in cpp_files:
# subprocess.run(["clang-tidy-10", "--fix", "-p=build", cpp_file, "--"],
# check=True, text=True, stderr=subprocess.STDOUT)
# print("g++:")
# compile_exts = tuple(".c .c++ .cc .cpp .cu .cxx".split())
@ -163,6 +152,11 @@ jobs:
bad_files = len(upper_files + space_files + nodir_files)
if bad_files:
sys.exit(bad_files)
- name: Commit and push changes
run: |
git diff DIRECTORY.md
git commit -am "clang-tidy fixes for $GITHUB_SHA" || true
git push --force origin HEAD:$GITHUB_REF || true
build:
name: Compile checks

View File

@ -1,9 +1,9 @@
FROM gitpod/workspace-full
FROM gitpod/workspace-full-vnc
RUN sudo apt-get update \
&& sudo apt-get install -y \
doxygen \
graphviz \
ninja-build \
&& pip install cpplint \
&& sudo rm -rf /var/lib/apt/lists/*
&& sudo apt-get install -y \
doxygen \
graphviz \
ninja-build \
&& pip install cpplint \
&& sudo rm -rf /var/lib/apt/lists/*

View File

@ -7,6 +7,7 @@ github:
addComment: false
addCheck: false
master: true
branches: true
pullRequestsFromForks: true
vscode:

View File

@ -17,6 +17,28 @@ if(MSVC)
endif(MSVC)
option(USE_OPENMP "flag to use OpenMP for multithreading" ON)
if(USE_OPENMP)
find_package(OpenMP)
if (OpenMP_CXX_FOUND)
message(STATUS "Building with OpenMP Multithreading.")
else()
message(STATUS "No OpenMP found, no multithreading.")
endif()
endif()
add_subdirectory(math)
add_subdirectory(others)
add_subdirectory(search)
add_subdirectory(ciphers)
add_subdirectory(strings)
add_subdirectory(sorting)
add_subdirectory(geometry)
add_subdirectory(graphics)
add_subdirectory(probability)
add_subdirectory(data_structures)
add_subdirectory(machine_learning)
add_subdirectory(numerical_methods)
add_subdirectory(graph)
cmake_policy(SET CMP0054 NEW)
cmake_policy(SET CMP0057 NEW)
@ -34,6 +56,7 @@ if(DOXYGEN_FOUND)
set(DOXYGEN_STRIP_CODE_COMMENTS NO)
set(DOXYGEN_EXT_LINKS_IN_WINDOW YES)
set(DOXYGEN_BUILTIN_STL_SUPPORT YES)
set(DOXYGEN_ENABLE_PREPROCESSING YES)
set(DOXYGEN_CLANG_ASSISTED_PARSING YES)
set(DOXYGEN_FILE_PATTERNS *.cpp *.h *.hpp *.md)
set(DOXYGEN_MATHJAX_EXTENSIONS TeX/AMSmath TeX/AMSsymbols)
@ -48,6 +71,12 @@ if(DOXYGEN_FOUND)
set(DOXYGEN_INTERACTIVE_SVG YES)
set(DOXYGEN_DOT_IMAGE_FORMAT "svg")
endif()
if(OPENMP_FOUND)
set(DOXYGEN_PREDEFINED "_OPENMP=1")
endif()
if(GLUT_FOUND)
set(DOXYGEN_PREDEFINED ${DOXYGEN_PREDEFINED} "GLUT_FOUND=1")
endif()
doxygen_add_docs(
doc
@ -56,27 +85,6 @@ if(DOXYGEN_FOUND)
)
endif()
if(USE_OPENMP)
find_package(OpenMP)
if (OpenMP_CXX_FOUND)
message(STATUS "Building with OpenMP Multithreading.")
else()
message(STATUS "No OpenMP found, no multithreading.")
endif()
endif()
add_subdirectory(math)
add_subdirectory(others)
add_subdirectory(search)
add_subdirectory(ciphers)
add_subdirectory(strings)
add_subdirectory(sorting)
add_subdirectory(geometry)
add_subdirectory(probability)
add_subdirectory(data_structures)
add_subdirectory(machine_learning)
add_subdirectory(numerical_methods)
add_subdirectory(graph)
set(CPACK_PROJECT_NAME ${PROJECT_NAME})
set(CPACK_PROJECT_VERSION ${PROJECT_VERSION})
include(CPack)

View File

@ -28,8 +28,85 @@ We are very happy that you consider implementing algorithms and data structures
- Strictly use snake_case (underscore_separated) in filenames.
- If you have added or modified code, please make sure the code compiles before submitting.
- Our automated testing runs [__cpplint__](https://github.com/cpplint/cpplint) on all pull requests so please be sure that your code passes before submitting.
- Please conform to [doxygen](https://www.doxygen.nl/manual/docblocks.html) standard and document the code as much as possible. This not only facilitates the readers but also generates the correct info on website.
- **Be consistent in use of these guidelines.**
#### Documentation
- Make sure you put useful comments in your code. Do not comment things that are obvious.
- Please avoid creating new directories if at all possible. Try to fit your work into the existing directory structure. If you want to create a new directory, then please check if a similar category has been recently suggested or created by other pull requests.
- If you have modified/added documentation, please ensure that your language is concise and contains no grammar errors.
- Do not update README.md along with other changes, first create an issue and then link to that issue in your pull request to suggest specific changes required to README.md
- The repository follows [Doxygen](https://www.doxygen.nl/manual/docblocks.html) standards and auto-generates the [repo website](https://thealgorithms.github.io/C-Plus-Plus). Please ensure the code is documented in this structure. Sample implementation is given below.
#### Test
- Make sure to add examples and test cases in your main() function.
- If you find any algorithm or document without tests, please feel free to create a pull request or issue describing suggested changes.
- Please try to add one or more `test()` functions that will invoke the algorithm implementation on random test data with expected output. Use `assert()` function to confirm that the tests will pass.
#### Typical structure of a program:
```cpp
/**
* @file
* @brief Add one line description here
* @details
* This is a multi line
* description containing links, references,
* math equations, etc
* @author [Name](https://github.com/handle)
* @see related_file.cpp, another_file.cpp
*/
#include
/**
* @namespace <check from other files in this repo>
*/
namespace name {
/**
* Class documentation
*/
class cls_name{
private:
int var1; ///< short info of this variable
char *msg; ///< short info
public:
// other members also documented as below
}
/**
* Function documentation
* @tparam T this is a one-line info about T
* @param param1 on-line info about param1
* @param param2 on-line info about param2
* @returns `true` if ...
* @returns `false` if ...
*/
template<class T>
bool func(int param1, T param2) {
// function statements here
if(/*something bad*/)
return false;
return true;
}
/** Test function */
void test() {
/* some statements */
assert(func(...) == ...); // this ensures that the algorithm works as expected
// can have multiple checks
}
/** Main function */
int main(int argc, char *argv[]) {
// code here
return 0;
}
```
#### New File Name guidelines
- Use lowercase words with ``"_"`` as separator
- For instance
@ -70,16 +147,6 @@ Common prefixes:
- docs: Documentation changes
- test: Correct existing tests or add new ones
#### Documentation
- Make sure you put useful comments in your code. Do not comment things that are obvious.
- Please avoid creating new directories if at all possible. Try to fit your work into the existing directory structure. If you want to create a new directory, then please check if a similar category has been recently suggested or created by other pull requests.
- If you have modified/added documentation, please ensure that your language is concise and contains no grammar errors.
- Do not update README.md along with other changes, first create an issue and then link to that issue in your pull request to suggest specific changes required to README.md
#### Test
- Make sure to add examples and test cases in your main() function.
- If you find any algorithm or document without tests, please feel free to create a pull request or issue describing suggested changes.
### Pull Requests
- Checkout our [pull request template](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/.github/pull_request_template.md)

View File

@ -33,6 +33,7 @@
* [Queue Using Array2](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/queue_using_array2.cpp)
* [Queue Using Linked List](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/queue_using_linked_list.cpp)
* [Queue Using Linkedlist](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/queue_using_linkedlist.cpp)
* [Skip List](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/skip_list.cpp)
* [Stack](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/stack.h)
* [Stack Using Array](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/stack_using_array.cpp)
* [Stack Using Linked List](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/stack_using_linked_list.cpp)
@ -84,6 +85,9 @@
* [Topological Sort](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/graph/topological_sort.cpp)
* [Topological Sort By Kahns Algo](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/graph/topological_sort_by_kahns_algo.cpp)
## Graphics
* [Spirograph](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/graphics/spirograph.cpp)
## Greedy Algorithms
* [Dijkstra](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/greedy_algorithms/dijkstra.cpp)
* [Huffman](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/greedy_algorithms/huffman.cpp)
@ -198,6 +202,7 @@
## Search
* [Binary Search](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/search/binary_search.cpp)
* [Exponential Search](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/search/exponential_search.cpp)
* [Fibonacci Search](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/search/fibonacci_search.cpp)
* [Hash Search](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/search/hash_search.cpp)
* [Interpolation Search](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/search/interpolation_search.cpp)
* [Interpolation Search2](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/search/interpolation_search2.cpp)

View File

@ -0,0 +1,221 @@
/**
* @file skip_list.cpp
* @brief Data structure for fast searching and insertion in \f$O(\log n)\f$
* time
* @details
* A skip list is a data structure that is used for storing a sorted list of
* items with a help of hierarchy of linked lists that connect increasingly
* sparse subsequences of the items
*
* References used: [GeeksForGeek](https://www.geeksforgeeks.org/skip-list/),
* [OpenGenus](https://iq.opengenus.org/skip-list) for PseudoCode and Code
* @author [enqidu](https://github.com/enqidu)
* @author [Krishna Vedala](https://github.com/kvedala)
*/
#include <array>
#include <cstring>
#include <ctime>
#include <iostream>
#include <memory>
#include <vector>
/** \namespace data_structure
* \brief Data-structure algorithms
*/
namespace data_structure {
constexpr int MAX_LEVEL = 2; ///< Maximum level of skip list
constexpr float PROBABILITY = 0.5; ///< Current probability for "coin toss"
/**
* Node structure [Key][Node*, Node*...]
*/
struct Node {
int key; ///< key integer
void* value; ///< pointer of value
std::vector<std::shared_ptr<Node>>
forward; ///< nodes of the given one in all levels
/**
* Creates node with provided key, level and value
* @param key is number that is used for comparision
* @param level is the maximum level node's going to added
*/
Node(int key, int level, void* value = nullptr) : key(key), value(value) {
// Initialization of forward vector
for (int i = 0; i < (level + 1); i++) {
forward.push_back(nullptr);
}
}
};
/**
* SkipList class implementation with basic methods
*/
class SkipList {
int level; ///< Maximum level of the skiplist
std::shared_ptr<Node> header; ///< Pointer to the header node
public:
/**
* Skip List constructor. Initializes header, start
* Node for searching in the list
*/
SkipList() {
level = 0;
// Header initialization
header = std::shared_ptr<Node>(new Node(-1, MAX_LEVEL));
}
/**
* Returns random level of the skip list.
* Every higher level is 2 times less likely.
* @return random level for skip list
*/
int randomLevel() {
int lvl = 0;
while (static_cast<float>(std::rand()) / RAND_MAX < PROBABILITY &&
lvl < MAX_LEVEL)
lvl++;
return lvl;
}
/**
* Inserts elements with given key and value;
* It's level is computed by randomLevel() function.
* @param key is number that is used for comparision
* @param value pointer to a value, that can be any type
*/
void insertElement(int key, void* value) {
std::cout << "Inserting" << key << "...";
std::shared_ptr<Node> x = header;
std::array<std::shared_ptr<Node>, MAX_LEVEL + 1> update;
update.fill(nullptr);
for (int i = level; i >= 0; i--) {
while (x->forward[i] != nullptr && x->forward[i]->key < key)
x = x->forward[i];
update[i] = x;
}
x = x->forward[0];
bool doesnt_exist = (x == nullptr || x->key != key);
if (doesnt_exist) {
int rlevel = randomLevel();
if (rlevel > level) {
for (int i = level + 1; i < rlevel + 1; i++) update[i] = header;
// Update current level
level = rlevel;
}
std::shared_ptr<Node> n =
std::shared_ptr<Node>(new Node(key, rlevel, value));
for (int i = 0; i <= rlevel; i++) {
n->forward[i] = update[i]->forward[i];
update[i]->forward[i] = n;
}
std::cout << "Inserted" << std::endl;
} else {
std::cout << "Exists" << std::endl;
}
}
/**
* Deletes an element by key and prints if has been removed successfully
* @param key is number that is used for comparision.
*/
void deleteElement(int key) {
std::shared_ptr<Node> x = header;
std::array<std::shared_ptr<Node>, MAX_LEVEL + 1> update;
update.fill(nullptr);
for (int i = level; i >= 0; i--) {
while (x->forward[i] != nullptr && x->forward[i]->key < key)
x = x->forward[i];
update[i] = x;
}
x = x->forward[0];
bool doesnt_exist = (x == nullptr || x->key != key);
if (!doesnt_exist) {
for (int i = 0; i <= level; i++) {
if (update[i]->forward[i] != x)
break;
update[i]->forward[i] = x->forward[i];
}
/* Remove empty levels*/
while (level > 0 && header->forward[level] == nullptr) level--;
std::cout << "Deleted" << std::endl;
} else {
std::cout << "Doesn't exist" << std::endl;
}
}
/**
* Searching element in skip list structure
* @param key is number that is used for comparision
* @return pointer to the value of the node
*/
void* searchElement(int key) {
std::shared_ptr<Node> x = header;
std::cout << "Searching for " << key << std::endl;
for (int i = level; i >= 0; i--) {
while (x->forward[i] && x->forward[i]->key < key) x = x->forward[i];
}
x = x->forward[0];
if (x && x->key == key) {
std::cout << "Found" << std::endl;
return x->value;
} else {
std::cout << "Not Found" << std::endl;
return nullptr;
}
}
/**
* Display skip list level
*/
void displayList() {
std::cout << "Displaying list:\n";
for (int i = 0; i <= level; i++) {
std::shared_ptr<Node> node = header->forward[i];
std::cout << "Level " << (i) << ": ";
while (node != nullptr) {
std::cout << node->key << " ";
node = node->forward[i];
}
std::cout << std::endl;
}
}
};
} // namespace data_structure
/**
* Main function:
* Creates and inserts random 2^[number of levels]
* elements into the skip lists and than displays it
*/
int main() {
std::srand(std::time(nullptr));
data_structure::SkipList lst;
for (int j = 0; j < (1 << (data_structure::MAX_LEVEL + 1)); j++) {
int k = (std::rand() % (1 << (data_structure::MAX_LEVEL + 2)) + 1);
lst.insertElement(k, &j);
}
lst.displayList();
return 0;
}

83
graphics/CMakeLists.txt Normal file
View File

@ -0,0 +1,83 @@
find_package(OpenGL)
if(OpenGL_FOUND)
find_package(GLUT)
if(NOT GLUT_FOUND)
message("FreeGLUT library will be downloaded and built.")
include(ExternalProject)
ExternalProject_Add (
FREEGLUT-PRJ
URL https://sourceforge.net/projects/freeglut/files/freeglut/3.2.1/freeglut-3.2.1.tar.gz
URL_MD5 cd5c670c1086358598a6d4a9d166949d
CMAKE_GENERATOR ${CMAKE_GENERATOR}
CMAKE_GENERATOR_TOOLSET ${CMAKE_GENERATOR_TOOLSET}
CMAKE_GENERATOR_PLATFORM ${CMAKE_GENERATOR_PLATFORM}
CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
-DFREEGLUT_BUILD_SHARED_LIBS=OFF
-DFREEGLUT_BUILD_STATIC_LIBS=ON
-DFREEGLUT_BUILD_DEMOS=OFF
PREFIX ${CMAKE_CURRENT_BINARY_DIR}/freeglut
# BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/freeglut-build
# BUILD_IN_SOURCE ON
# UPDATE_COMMAND ""
INSTALL_COMMAND ""
# CONFIGURE_COMMAND ""
# BUILD_COMMAND ""
)
ExternalProject_Get_Property(FREEGLUT-PRJ SOURCE_DIR)
ExternalProject_Get_Property(FREEGLUT-PRJ BINARY_DIR)
set(FREEGLUT_BIN_DIR ${BINARY_DIR})
set(FREEGLUT_SRC_DIR ${SOURCE_DIR})
# add_library(libfreeglut STATIC IMPORTED)
# set_target_properties(libfreeglut PROPERTIES IMPORTED_LOCATION ${FREEGLUT_BIN_DIR})
# set(FREEGLUT_BUILD_DEMOS OFF CACHE BOOL "")
# set(FREEGLUT_BUILD_SHARED_LIBS OFF CACHE BOOL "")
# set(FREEGLUT_BUILD_STATIC_LIBS ON CACHE BOOL "")
# add_subdirectory(${FREEGLUT_SRC_DIR} ${FREEGLUT_BIN_DIR} EXCLUDE_FROM_ALL)
# add_subdirectory(${BINARY_DIR})
# find_package(FreeGLUT)
endif(NOT GLUT_FOUND)
else(OpenGL_FOUND)
message(WARNING "OPENGL not found. Will not build graphical outputs.")
endif(OpenGL_FOUND)
# If necessary, use the RELATIVE flag, otherwise each source file may be listed
# with full pathname. RELATIVE may makes it easier to extract an executable name
# automatically.
file( GLOB APP_SOURCES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp )
# file( GLOB APP_SOURCES ${CMAKE_SOURCE_DIR}/*.c )
# AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} APP_SOURCES)
foreach( testsourcefile ${APP_SOURCES} )
# I used a simple string replace, to cut off .cpp.
string( REPLACE ".cpp" "" testname ${testsourcefile} )
add_executable( ${testname} ${testsourcefile} )
set_target_properties(${testname} PROPERTIES LINKER_LANGUAGE CXX)
if(OpenMP_CXX_FOUND)
target_link_libraries(${testname} PRIVATE OpenMP::OpenMP_CXX)
endif()
if(OpenGL_FOUND)
if(NOT GLUT_FOUND)
add_dependencies(${testname} FREEGLUT-PRJ)
target_compile_definitions(${testname} PRIVATE FREEGLUT_STATIC)
target_include_directories(${testname} PRIVATE ${FREEGLUT_SRC_DIR}/include)
target_link_directories(${testname} PRIVATE ${FREEGLUT_BIN_DIR}/lib)
target_link_libraries(${testname} PRIVATE OpenGL::GL)
target_link_libraries(${testname} INTERFACE FREEGLUT-PRJ)
# target_include_directories(${testname} PRIVATE ${FREEGLUT_INCLUDE_DIRS})
# target_link_libraries(${testname} INTERFACE freeglut_static)
else()
target_include_directories(${testname} PRIVATE ${GLUT_INCLUDE_DIRS})
target_link_libraries(${testname} PRIVATE OpenGL::GL ${GLUT_LIBRARIES})
endif()
target_compile_definitions(${testname} PRIVATE USE_GLUT)
endif(OpenGL_FOUND)
if(APPLE)
target_compile_options(${testname} PRIVATE -Wno-deprecated)
endif(APPLE)
install(TARGETS ${testname} DESTINATION "bin/graphics")
endforeach( testsourcefile ${APP_SOURCES} )

284
graphics/spirograph.cpp Normal file
View File

@ -0,0 +1,284 @@
/**
* @file
* @author [Krishna Vedala](https://github.com/kvedala)
* @brief Implementation of
* [Spirograph](https://en.wikipedia.org/wiki/Spirograph)
*
* @details
* Implementation of the program is based on the geometry shown in the figure
* below:
*
* <a
* href="https://commons.wikimedia.org/wiki/File:Resonance_Cascade.svg"><img
* src="https://upload.wikimedia.org/wikipedia/commons/3/39/Resonance_Cascade.svg"
* alt="Spirograph geometry from Wikipedia" style="width: 250px"/></a>
*/
#ifdef USE_GLUT
#ifdef __APPLE__
#include <GLUT/glut.h> // include path on Macs is different
#else
#include <GL/glut.h>
#endif // __APPLE__
#endif
#define _USE_MATH_DEFINES /**< required for MSVC compiler */
#include <array>
#include <cmath>
#include <cstdlib>
#include <ctime>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#ifdef _OPENMP
#include <omp.h>
#endif
/**
* @namespace spirograph Functions related to spirograph.cpp
*/
namespace spirograph {
/** Generate spirograph curve into arrays `x` and `y` such that the i^th point
* in 2D is represented by `(x[i],y[i])`. The generating function is given by:
* \f{eqnarray*}{
* x &=& R\left[ (1-k) \cos (t) + l\cdot k\cdot\cos \left(\frac{1-k}{k}t\right)
* \right]\\
* y &=& R\left[ (1-k) \sin (t) - l\cdot k\cdot\sin \left(\frac{1-k}{k}t\right)
* \right] \f}
* where
* * \f$R\f$ is the scaling parameter that we will consider \f$=1\f$
* * \f$l=\frac{\rho}{r}\f$ is the relative distance of marker from the centre
* of inner circle and \f$0\le l\le1\f$
* * \f$\rho\f$ is physical distance of marker from centre of inner circle
* * \f$r\f$ is the radius of inner circle
* * \f$k=\frac{r}{R}\f$ is the ratio of radius of inner circle to outer circle
* and \f$0<k<1\f$
* * \f$R\f$ is the radius of outer circle
* * \f$t\f$ is the angle of rotation of the point i.e., represents the time
* parameter
*
* Since we are considering ratios, the actual values of \f$r\f$ and
* \f$R\f$ are immaterial.
*
* @tparam N number of points = size of array
* @param [out] points Array of 2D points represented as std::pair
* @param l the relative distance of marker from the centre of
* inner circle and \f$0\le l\le1\f$
* @param k the ratio of radius of inner circle to outer circle and \f$0<k<1\f$
* @param rot the number of rotations to perform (can be fractional value)
*/
template <std::size_t N>
void spirograph(std::array<std::pair<double, double>, N> *points, double l,
double k, double rot) {
double dt = rot * 2.f * M_PI / N;
double R = 1.f;
const double k1 = 1.f - k;
int32_t step = 0;
#ifdef _OPENMP
#pragma omp for
#endif
for (step = 0; step < N; step++) {
double t = dt * step;
double first = R * (k1 * std::cos(t) + l * k * std::cos(k1 * t / k));
double second = R * (k1 * std::sin(t) - l * k * std::sin(k1 * t / k));
points[0][step].first = first;
points[0][step].second = second;
}
}
/**
* @brief Test function to save resulting points to a CSV file.
*
*/
void test() {
const size_t N = 500;
double l = 0.3, k = 0.75, rot = 10.;
std::stringstream fname;
fname << std::setw(3) << "spirograph_" << l << "_" << k << "_" << rot
<< ".csv";
std::ofstream fp(fname.str());
if (!fp.is_open()) {
perror(fname.str().c_str());
exit(EXIT_FAILURE);
}
std::array<std::pair<double, double>, N> points;
spirograph(&points, l, k, rot);
for (size_t i = 0; i < N; i++) {
fp << points[i].first << "," << points[i].first;
if (i < N - 1) {
fp << '\n';
}
}
fp.close();
}
#ifdef USE_GLUT
static bool paused = 0; /**< flag to set pause/unpause animation */
static const int animation_speed = 25; /**< animation delate in ms */
static const double step = 0.01; /**< animation step size */
static double l_ratio = step * 10; /**< the l-ratio defined in docs */
static double k_ratio = step; /**< the k-ratio defined in docs */
static const double num_rot = 20.; /**< number of rotations to simulate */
/** A wrapper that is not available in all GLUT implementations.
*/
static inline void glutBitmapString(void *font, char *message) {
for (char *ch = message; *ch != '\0'; ch++) glutBitmapCharacter(font, *ch);
}
/**
* @brief Function to graph (x,y) points on the OpenGL graphics window.
*
* @tparam N number of points = size of array
* @param [in] points Array of 2D points represented as std::pair
* @param l the relative distance of marker from the centre of
* inner circle and \f$0\le l\le1\f$ to display info
* @param k the ratio of radius of inner circle to outer circle and \f$0<k<1\f$
* to display info
*/
template <size_t N>
void display_graph(const std::array<std::pair<double, double>, N> &points,
double l, double k) {
glClearColor(1.0f, 1.0f, 1.0f,
0.0f); // Set background color to white and opaque
glClear(GL_COLOR_BUFFER_BIT); // Clear the color buffer (background)
glBegin(GL_LINES); // draw line segments
glColor3f(0.f, 0.f, 1.f); // blue
glPointSize(2.f); // point size in pixels
for (size_t i = 1; i < N; i++) {
glVertex2f(points[i - 1].first, points[i - 1].second); // line from
glVertex2f(points[i].first, points[i].second); // line to
}
glEnd();
glColor3f(0.f, 0.f, 0.f);
std::stringstream buffer;
buffer << std::setw(3) << "l = " << l;
glRasterPos2f(-.85, .85);
glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24,
const_cast<char *>(buffer.str().c_str()));
buffer.str("");
buffer.clear();
buffer << std::setw(3) << "k = " << k;
glRasterPos2f(-.85, .70);
glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24,
const_cast<char *>(buffer.str().c_str()));
glutSwapBuffers();
}
/**
* @brief Test function with animation
*
*/
void test2() {
const size_t N = 5000; // number of samples
static bool direction1 = true; // increment if true, otherwise decrement
static bool direction2 = true; // increment if true, otherwise decrement
std::array<std::pair<double, double>, N> points;
spirograph(&points, l_ratio, k_ratio, num_rot);
display_graph(points, l_ratio, k_ratio);
if (paused)
// if paused, do not update l_ratio and k_ratio
return;
if (direction1) { // increment k_ratio
if (k_ratio >= (1.f - step)) // maximum limit
direction1 = false; // reverse direction of k_ratio
else
k_ratio += step;
} else { // decrement k_ratio
if (k_ratio <= step) { // minimum limit
direction1 = true; // reverse direction of k_ratio
if (direction2) { // increment l_ratio
if (l_ratio >= (1.f - step)) // max limit of l_ratio
direction2 = false; // reverse direction of l_ratio
else
l_ratio += step;
} else { // decrement l_ratio
if (l_ratio <= step) // minimum limit of l_ratio
direction2 = true; // reverse direction of l_ratio
else
l_ratio -= step;
}
} else { // no min limit of k_ratio
k_ratio -= step;
}
}
}
/**
* @brief GLUT timer callback function to add animation delay.
*/
void timer_cb(int t) {
glutTimerFunc(animation_speed, timer_cb, 0);
glutPostRedisplay();
}
/**
* @brief Keypress event call back function.
*
* @param key ID of the key pressed
* @param x mouse pointer position at event
* @param y mouse pointer position at event
*/
void keyboard_cb(unsigned char key, int x, int y) {
switch (key) {
case ' ': // spacebar toggles pause
paused = !paused; // toggle
break;
case GLUT_KEY_UP:
case '+': // up arrow key
k_ratio += step;
break;
case GLUT_KEY_DOWN:
case '_': // down arrow key
k_ratio -= step;
break;
case GLUT_KEY_RIGHT:
case '=': // left arrow key
l_ratio += step;
break;
case GLUT_KEY_LEFT:
case '-': // right arrow key
l_ratio -= step;
break;
case 0x1B: // escape key exits
exit(EXIT_SUCCESS);
default:
return;
}
}
#endif
} // namespace spirograph
/** Main function */
int main(int argc, char **argv) {
spirograph::test();
#ifdef USE_GLUT
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE);
glutCreateWindow("Spirograph");
glutInitWindowSize(400, 400);
// glutIdleFunc(glutPostRedisplay);
glutTimerFunc(spirograph::animation_speed, spirograph::timer_cb, 0);
glutKeyboardFunc(spirograph::keyboard_cb);
glutDisplayFunc(spirograph::test2);
glutMainLoop();
#endif
return 0;
}

View File

@ -26,6 +26,7 @@
* computed using stochastic gradient descent method.
*/
#include <array>
#include <cassert>
#include <climits>
#include <cmath>
@ -35,7 +36,8 @@
#include <numeric>
#include <vector>
#define MAX_ITER 500 // INT_MAX ///< Maximum number of iterations to learn
/** Maximum number of iterations to learn */
constexpr int MAX_ITER = 500; // INT_MAX
/** \namespace machine_learning
* \brief Machine learning algorithms
@ -50,8 +52,8 @@ class adaline {
* \param[in] convergence accuracy (optional,
* default=\f$1\times10^{-5}\f$)
*/
adaline(int num_features, const double eta = 0.01f,
const double accuracy = 1e-5)
explicit adaline(int num_features, const double eta = 0.01f,
const double accuracy = 1e-5)
: eta(eta), accuracy(accuracy) {
if (eta <= 0) {
std::cerr << "learning rate should be positive and nonzero"
@ -64,7 +66,7 @@ class adaline {
1); // additional weight is for the constant bias term
// initialize with random weights in the range [-50, 49]
for (int i = 0; i < weights.size(); i++) weights[i] = 1.f;
for (double &weight : weights) weight = 1.f;
// weights[i] = (static_cast<double>(std::rand() % 100) - 50);
}
@ -75,8 +77,9 @@ class adaline {
out << "<";
for (int i = 0; i < ada.weights.size(); i++) {
out << ada.weights[i];
if (i < ada.weights.size() - 1)
if (i < ada.weights.size() - 1) {
out << ", ";
}
}
out << ">";
return out;
@ -90,28 +93,33 @@ class adaline {
* model prediction output
*/
int predict(const std::vector<double> &x, double *out = nullptr) {
if (!check_size_match(x))
if (!check_size_match(x)) {
return 0;
}
double y = weights.back(); // assign bias value
// for (int i = 0; i < x.size(); i++) y += x[i] * weights[i];
y = std::inner_product(x.begin(), x.end(), weights.begin(), y);
if (out != nullptr) // if out variable is provided
if (out != nullptr) { // if out variable is provided
*out = y;
}
return activation(y); // quantizer: apply ADALINE threshold function
}
/**
* Update the weights of the model using supervised learning for one
* feature vector \param[in] x feature vector \param[in] y known output
* value \returns correction factor
* feature vector
* \param[in] x feature vector
* \param[in] y known output value
* \returns correction factor
*/
double fit(const std::vector<double> &x, const int &y) {
if (!check_size_match(x))
if (!check_size_match(x)) {
return 0;
}
/* output of the model with current weights */
int p = predict(x);
@ -129,21 +137,23 @@ class adaline {
/**
* Update the weights of the model using supervised learning for an
* array of vectors. \param[in] X array of feature vector \param[in] y
* known output value for each feature vector
* array of vectors.
* \param[in] X array of feature vector
* \param[in] y known output value for each feature vector
*/
template <int N>
void fit(std::vector<double> const (&X)[N], const int *y) {
template <size_t N>
void fit(std::array<std::vector<double>, N> const &X,
std::array<int, N> const &Y) {
double avg_pred_error = 1.f;
int iter;
int iter = 0;
for (iter = 0; (iter < MAX_ITER) && (avg_pred_error > accuracy);
iter++) {
avg_pred_error = 0.f;
// perform fit for each sample
for (int i = 0; i < N; i++) {
double err = fit(X[i], y[i]);
double err = fit(X[i], Y[i]);
avg_pred_error += std::abs(err);
}
avg_pred_error /= N;
@ -154,15 +164,25 @@ class adaline {
<< "\tAvg error: " << avg_pred_error << std::endl;
}
if (iter < MAX_ITER)
if (iter < MAX_ITER) {
std::cout << "Converged after " << iter << " iterations."
<< std::endl;
else
} else {
std::cout << "Did not converge after " << iter << " iterations."
<< std::endl;
}
}
/** Defines activation function as Heaviside's step function.
* \f[
* f(x) = \begin{cases}
* -1 & \forall x \le 0\\
* 1 & \forall x > 0
* \end{cases}
* \f]
* @param x input value to apply activation on
* @return activation output
*/
int activation(double x) { return x > 0 ? 1 : -1; }
private:
@ -206,15 +226,19 @@ void test1(double eta = 0.01) {
const int N = 10; // number of sample points
std::vector<double> X[N] = {{0, 1}, {1, -2}, {2, 3}, {3, -1},
{4, 1}, {6, -5}, {-7, -3}, {-8, 5},
{-9, 2}, {-10, -15}};
int y[] = {1, -1, 1, -1, -1, -1, 1, 1, 1, -1}; // corresponding y-values
std::array<std::vector<double>, N> X = {
std::vector<double>({0, 1}), std::vector<double>({1, -2}),
std::vector<double>({2, 3}), std::vector<double>({3, -1}),
std::vector<double>({4, 1}), std::vector<double>({6, -5}),
std::vector<double>({-7, -3}), std::vector<double>({-8, 5}),
std::vector<double>({-9, 2}), std::vector<double>({-10, -15})};
std::array<int, N> y = {1, -1, 1, -1, -1,
-1, 1, 1, 1, -1}; // corresponding y-values
std::cout << "------- Test 1 -------" << std::endl;
std::cout << "Model before fit: " << ada << std::endl;
ada.fit(X, y);
ada.fit<N>(X, y);
std::cout << "Model after fit: " << ada << std::endl;
int predict = ada.predict({5, -3});
@ -240,17 +264,17 @@ void test2(double eta = 0.01) {
const int N = 50; // number of sample points
std::vector<double> X[N];
int Y[N]; // corresponding y-values
std::array<std::vector<double>, N> X;
std::array<int, N> Y{}; // corresponding y-values
// generate sample points in the interval
// [-range2/100 , (range2-1)/100]
int range = 500; // sample points full-range
int range2 = range >> 1; // sample points half-range
for (int i = 0; i < N; i++) {
double x0 = ((std::rand() % range) - range2) / 100.f;
double x1 = ((std::rand() % range) - range2) / 100.f;
X[i] = {x0, x1};
double x0 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
double x1 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
X[i] = std::vector<double>({x0, x1});
Y[i] = (x0 + 3. * x1) > -1 ? 1 : -1;
}
@ -262,8 +286,8 @@ void test2(double eta = 0.01) {
int N_test_cases = 5;
for (int i = 0; i < N_test_cases; i++) {
double x0 = ((std::rand() % range) - range2) / 100.f;
double x1 = ((std::rand() % range) - range2) / 100.f;
double x0 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
double x1 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
int predict = ada.predict({x0, x1});
@ -291,18 +315,18 @@ void test3(double eta = 0.01) {
const int N = 100; // number of sample points
std::vector<double> X[N];
int Y[N]; // corresponding y-values
std::array<std::vector<double>, N> X;
std::array<int, N> Y{}; // corresponding y-values
// generate sample points in the interval
// [-range2/100 , (range2-1)/100]
int range = 200; // sample points full-range
int range2 = range >> 1; // sample points half-range
for (int i = 0; i < N; i++) {
double x0 = ((std::rand() % range) - range2) / 100.f;
double x1 = ((std::rand() % range) - range2) / 100.f;
double x2 = ((std::rand() % range) - range2) / 100.f;
X[i] = {x0, x1, x2, x0 * x0, x1 * x1, x2 * x2};
double x0 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
double x1 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
double x2 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
X[i] = std::vector<double>({x0, x1, x2, x0 * x0, x1 * x1, x2 * x2});
Y[i] = ((x0 * x0) + (x1 * x1) + (x2 * x2)) <= 1.f ? 1 : -1;
}
@ -314,9 +338,9 @@ void test3(double eta = 0.01) {
int N_test_cases = 5;
for (int i = 0; i < N_test_cases; i++) {
double x0 = ((std::rand() % range) - range2) / 100.f;
double x1 = ((std::rand() % range) - range2) / 100.f;
double x2 = ((std::rand() % range) - range2) / 100.f;
double x0 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
double x1 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
double x2 = (static_cast<double>(std::rand() % range) - range2) / 100.f;
int predict = ada.predict({x0, x1, x2, x0 * x0, x1 * x1, x2 * x2});
@ -334,8 +358,9 @@ int main(int argc, char **argv) {
std::srand(std::time(nullptr)); // initialize random number generator
double eta = 0.1; // default value of eta
if (argc == 2) // read eta value from commandline argument if present
if (argc == 2) { // read eta value from commandline argument if present
eta = strtof(argv[1], nullptr);
}
test1(eta);

View File

@ -25,8 +25,11 @@
*/
#define _USE_MATH_DEFINES //< required for MS Visual C++
#include <algorithm>
#include <array>
#include <cerrno>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <fstream>
#include <iostream>
@ -68,7 +71,8 @@ int save_2d_data(const char *fname,
fp.open(fname);
if (!fp.is_open()) {
// error with opening file to write
std::cerr << "Error opening file " << fname << "\n";
std::cerr << "Error opening file " << fname << ": "
<< std::strerror(errno) << "\n";
return -1;
}
@ -76,12 +80,14 @@ int save_2d_data(const char *fname,
for (int i = 0; i < num_points; i++) {
// for each feature in the array
for (int j = 0; j < num_features; j++) {
fp << X[i][j]; // print the feature value
if (j < num_features - 1) // if not the last feature
fp << ","; // suffix comma
fp << X[i][j]; // print the feature value
if (j < num_features - 1) { // if not the last feature
fp << ","; // suffix comma
}
}
if (i < num_points - 1) { // if not the last row
fp << "\n"; // start a new line
}
if (i < num_points - 1) // if not the last row
fp << "\n"; // start a new line
}
fp.close();
@ -99,12 +105,12 @@ int save_2d_data(const char *fname,
void get_min_2d(const std::vector<std::valarray<double>> &X, double *val,
int *x_idx, int *y_idx) {
val[0] = INFINITY; // initial min value
int N = X.size();
size_t N = X.size();
for (int i = 0; i < N; i++) { // traverse each x-index
auto result = std::min_element(std::begin(X[i]), std::end(X[i]));
double d_min = *result;
int j = std::distance(std::begin(X[i]), result);
std::ptrdiff_t j = std::distance(std::begin(X[i]), result);
if (d_min < val[0]) { // if a lower value is found
// save the value and its index
@ -119,7 +125,8 @@ void get_min_2d(const std::vector<std::valarray<double>> &X, double *val,
* \brief Machine learning algorithms
*/
namespace machine_learning {
#define MIN_DISTANCE 1e-4 ///< Minimum average distance of image nodes
/** Minimum average distance of image nodes */
constexpr double MIN_DISTANCE = 1e-4;
/**
* Create the distance matrix or
@ -136,9 +143,8 @@ int save_u_matrix(const char *fname,
const std::vector<std::vector<std::valarray<double>>> &W) {
std::ofstream fp(fname);
if (!fp) { // error with fopen
char msg[120];
std::snprintf(msg, sizeof(msg), "File error (%s): ", fname);
std::perror(msg);
std::cerr << "File error (" << fname << "): " << std::strerror(errno)
<< std::endl;
return -1;
}
@ -153,7 +159,7 @@ int save_u_matrix(const char *fname,
int to_x = std::min<int>(W.size(), i + R + 1);
int from_y = std::max<int>(0, j - R);
int to_y = std::min<int>(W[0].size(), j + R + 1);
int l, m;
int l = 0, m = 0;
#ifdef _OPENMP
#pragma omp parallel for reduction(+ : distance)
#endif
@ -172,8 +178,9 @@ int save_u_matrix(const char *fname,
fp << ','; // suffix comma
}
}
if (i < W.size() - 1) // if not the last row
fp << '\n'; // start a new line
if (i < W.size() - 1) { // if not the last row
fp << '\n'; // start a new line
}
}
fp.close();
@ -194,10 +201,11 @@ double update_weights(const std::valarray<double> &X,
std::vector<std::vector<std::valarray<double>>> *W,
std::vector<std::valarray<double>> *D, double alpha,
int R) {
int x, y;
int x = 0, y = 0;
int num_out_x = static_cast<int>(W->size()); // output nodes - in X
int num_out_y = static_cast<int>(W[0][0].size()); // output nodes - in Y
int num_features = static_cast<int>(W[0][0][0].size()); // features = in Z
// int num_features = static_cast<int>(W[0][0][0].size()); // features =
// in Z
double d_min = 0.f;
#ifdef _OPENMP
@ -217,7 +225,7 @@ double update_weights(const std::valarray<double> &X,
// step 2: get closest node i.e., node with snallest Euclidian distance
// to the current pattern
int d_min_x, d_min_y;
int d_min_x = 0, d_min_y = 0;
get_min_2d(*D, &d_min, &d_min_x, &d_min_y);
// step 3a: get the neighborhood range
@ -261,10 +269,10 @@ double update_weights(const std::valarray<double> &X,
void kohonen_som(const std::vector<std::valarray<double>> &X,
std::vector<std::vector<std::valarray<double>>> *W,
double alpha_min) {
int num_samples = X.size(); // number of rows
int num_features = X[0].size(); // number of columns
int num_out = W->size(); // output matrix size
int R = num_out >> 2, iter = 0;
size_t num_samples = X.size(); // number of rows
// size_t num_features = X[0].size(); // number of columns
size_t num_out = W->size(); // output matrix size
size_t R = num_out >> 2, iter = 0;
double alpha = 1.f;
std::vector<std::valarray<double>> D(num_out);
@ -283,15 +291,17 @@ void kohonen_som(const std::vector<std::valarray<double>> &X,
}
// every 100th iteration, reduce the neighborhood range
if (iter % 300 == 0 && R > 1)
if (iter % 300 == 0 && R > 1) {
R--;
}
dmin /= num_samples;
// termination condition variable -> % change in minimum distance
dmin_ratio = (past_dmin - dmin) / past_dmin;
if (dmin_ratio < 0)
if (dmin_ratio < 0) {
dmin_ratio = 1.f;
}
past_dmin = dmin;
std::cout << "iter: " << iter << "\t alpha: " << alpha << "\t R: " << R
@ -320,14 +330,14 @@ using machine_learning::save_u_matrix;
void test_2d_classes(std::vector<std::valarray<double>> *data) {
const int N = data->size();
const double R = 0.3; // radius of cluster
int i;
int i = 0;
const int num_classes = 4;
const double centres[][2] = {
std::array<std::array<double, 2>, num_classes> centres = {
// centres of each class cluster
{.5, .5}, // centre of class 1
{.5, -.5}, // centre of class 2
{-.5, .5}, // centre of class 3
{-.5, -.5} // centre of class 4
std::array<double, 2>({.5, .5}), // centre of class 1
std::array<double, 2>({.5, -.5}), // centre of class 2
std::array<double, 2>({-.5, .5}), // centre of class 3
std::array<double, 2>({-.5, -.5}) // centre of class 4
};
#ifdef _OPENMP
@ -357,15 +367,16 @@ void test_2d_classes(std::vector<std::valarray<double>> *data) {
* * `w12.csv`: trained SOM map
*/
void test1() {
int j, N = 300;
int j = 0, N = 300;
int features = 2;
int num_out = 30;
std::vector<std::valarray<double>> X(N);
std::vector<std::vector<std::valarray<double>>> W(num_out);
for (int i = 0; i < std::max(num_out, N); i++) {
// loop till max(N, num_out)
if (i < N) // only add new arrays if i < N
if (i < N) { // only add new arrays if i < N
X[i] = std::valarray<double>(features);
}
if (i < num_out) { // only add new arrays if i < num_out
W[i] = std::vector<std::valarray<double>>(num_out);
for (int k = 0; k < num_out; k++) {
@ -373,9 +384,10 @@ void test1() {
#ifdef _OPENMP
#pragma omp for
#endif
for (j = 0; j < features; j++)
for (j = 0; j < features; j++) {
// preallocate with random initial weights
W[i][k][j] = _random(-10, 10);
}
}
}
}
@ -397,16 +409,16 @@ void test1() {
* \param[out] data matrix to store data in
*/
void test_3d_classes1(std::vector<std::valarray<double>> *data) {
const int N = data->size();
const size_t N = data->size();
const double R = 0.3; // radius of cluster
int i;
int i = 0;
const int num_classes = 4;
const double centres[][3] = {
const std::array<std::array<double, 3>, num_classes> centres = {
// centres of each class cluster
{.5, .5, .5}, // centre of class 1
{.5, -.5, -.5}, // centre of class 2
{-.5, .5, .5}, // centre of class 3
{-.5, -.5 - .5} // centre of class 4
std::array<double, 3>({.5, .5, .5}), // centre of class 1
std::array<double, 3>({.5, -.5, -.5}), // centre of class 2
std::array<double, 3>({-.5, .5, .5}), // centre of class 3
std::array<double, 3>({-.5, -.5 - .5}) // centre of class 4
};
#ifdef _OPENMP
@ -437,15 +449,16 @@ void test_3d_classes1(std::vector<std::valarray<double>> *data) {
* * `w22.csv`: trained SOM map
*/
void test2() {
int j, N = 300;
int j = 0, N = 300;
int features = 3;
int num_out = 30;
std::vector<std::valarray<double>> X(N);
std::vector<std::vector<std::valarray<double>>> W(num_out);
for (int i = 0; i < std::max(num_out, N); i++) {
// loop till max(N, num_out)
if (i < N) // only add new arrays if i < N
if (i < N) { // only add new arrays if i < N
X[i] = std::valarray<double>(features);
}
if (i < num_out) { // only add new arrays if i < num_out
W[i] = std::vector<std::valarray<double>>(num_out);
for (int k = 0; k < num_out; k++) {
@ -453,9 +466,10 @@ void test2() {
#ifdef _OPENMP
#pragma omp for
#endif
for (j = 0; j < features; j++)
for (j = 0; j < features; j++) {
// preallocate with random initial weights
W[i][k][j] = _random(-10, 10);
}
}
}
}
@ -477,20 +491,20 @@ void test2() {
* \param[out] data matrix to store data in
*/
void test_3d_classes2(std::vector<std::valarray<double>> *data) {
const int N = data->size();
const size_t N = data->size();
const double R = 0.2; // radius of cluster
int i;
int i = 0;
const int num_classes = 8;
const double centres[][3] = {
const std::array<std::array<double, 3>, num_classes> centres = {
// centres of each class cluster
{.5, .5, .5}, // centre of class 1
{.5, .5, -.5}, // centre of class 2
{.5, -.5, .5}, // centre of class 3
{.5, -.5, -.5}, // centre of class 4
{-.5, .5, .5}, // centre of class 5
{-.5, .5, -.5}, // centre of class 6
{-.5, -.5, .5}, // centre of class 7
{-.5, -.5, -.5} // centre of class 8
std::array<double, 3>({.5, .5, .5}), // centre of class 1
std::array<double, 3>({.5, .5, -.5}), // centre of class 2
std::array<double, 3>({.5, -.5, .5}), // centre of class 3
std::array<double, 3>({.5, -.5, -.5}), // centre of class 4
std::array<double, 3>({-.5, .5, .5}), // centre of class 5
std::array<double, 3>({-.5, .5, -.5}), // centre of class 6
std::array<double, 3>({-.5, -.5, .5}), // centre of class 7
std::array<double, 3>({-.5, -.5, -.5}) // centre of class 8
};
#ifdef _OPENMP
@ -521,15 +535,16 @@ void test_3d_classes2(std::vector<std::valarray<double>> *data) {
* * `w32.csv`: trained SOM map
*/
void test3() {
int j, N = 500;
int j = 0, N = 500;
int features = 3;
int num_out = 30;
std::vector<std::valarray<double>> X(N);
std::vector<std::vector<std::valarray<double>>> W(num_out);
for (int i = 0; i < std::max(num_out, N); i++) {
// loop till max(N, num_out)
if (i < N) // only add new arrays if i < N
if (i < N) { // only add new arrays if i < N
X[i] = std::valarray<double>(features);
}
if (i < num_out) { // only add new arrays if i < num_out
W[i] = std::vector<std::valarray<double>>(num_out);
for (int k = 0; k < num_out; k++) {
@ -537,9 +552,10 @@ void test3() {
#ifdef _OPENMP
#pragma omp for
#endif
for (j = 0; j < features; j++)
for (j = 0; j < features; j++) {
// preallocate with random initial weights
W[i][k][j] = _random(-10, 10);
}
}
}
}

View File

@ -20,6 +20,7 @@
*/
#define _USE_MATH_DEFINES // required for MS Visual C++
#include <algorithm>
#include <array>
#include <cmath>
#include <cstdlib>
#include <ctime>
@ -71,12 +72,14 @@ int save_nd_data(const char *fname,
for (int i = 0; i < num_points; i++) {
// for each feature in the array
for (int j = 0; j < num_features; j++) {
fp << X[i][j]; // print the feature value
if (j < num_features - 1) // if not the last feature
fp << ","; // suffix comma
fp << X[i][j]; // print the feature value
if (j < num_features - 1) { // if not the last feature
fp << ","; // suffix comma
}
}
if (i < num_points - 1) { // if not the last row
fp << "\n"; // start a new line
}
if (i < num_points - 1) // if not the last row
fp << "\n"; // start a new line
}
fp.close();
@ -100,9 +103,9 @@ namespace machine_learning {
void update_weights(const std::valarray<double> &x,
std::vector<std::valarray<double>> *W,
std::valarray<double> *D, double alpha, int R) {
int j, k;
int num_out = W->size(); // number of SOM output nodes
int num_features = x.size(); // number of data features
int j = 0, k = 0;
int num_out = W->size(); // number of SOM output nodes
// int num_features = x.size(); // number of data features
#ifdef _OPENMP
#pragma omp for
@ -117,7 +120,7 @@ void update_weights(const std::valarray<double> &x,
// step 2: get closest node i.e., node with snallest Euclidian distance to
// the current pattern
auto result = std::min_element(std::begin(*D), std::end(*D));
double d_min = *result;
// double d_min = *result;
int d_min_idx = std::distance(std::begin(*D), result);
// step 3a: get the neighborhood range
@ -129,9 +132,10 @@ void update_weights(const std::valarray<double> &x,
#ifdef _OPENMP
#pragma omp for
#endif
for (j = from_node; j < to_node; j++)
for (j = from_node; j < to_node; j++) {
// update weights of nodes in the neighborhood
(*W)[j] += alpha * (x - (*W)[j]);
}
}
/**
@ -145,16 +149,16 @@ void update_weights(const std::valarray<double> &x,
void kohonen_som_tracer(const std::vector<std::valarray<double>> &X,
std::vector<std::valarray<double>> *W,
double alpha_min) {
int num_samples = X.size(); // number of rows
int num_features = X[0].size(); // number of columns
int num_out = W->size(); // number of rows
int num_samples = X.size(); // number of rows
// int num_features = X[0].size(); // number of columns
int num_out = W->size(); // number of rows
int R = num_out >> 2, iter = 0;
double alpha = 1.f;
std::valarray<double> D(num_out);
// Loop alpha from 1 to slpha_min
for (; alpha > alpha_min; alpha -= 0.01, iter++) {
do {
// Loop for each sample pattern in the data set
for (int sample = 0; sample < num_samples; sample++) {
// update weights for the current input pattern sample
@ -162,9 +166,13 @@ void kohonen_som_tracer(const std::vector<std::valarray<double>> &X,
}
// every 10th iteration, reduce the neighborhood range
if (iter % 10 == 0 && R > 1)
if (iter % 10 == 0 && R > 1) {
R--;
}
}
alpha -= 0.01;
iter++;
} while (alpha > alpha_min);
}
} // namespace machine_learning
@ -190,7 +198,7 @@ void test_circle(std::vector<std::valarray<double>> *data) {
const double R = 0.75, dr = 0.3;
double a_t = 0., b_t = 2.f * M_PI; // theta random between 0 and 2*pi
double a_r = R - dr, b_r = R + dr; // radius random between R-dr and R+dr
int i;
int i = 0;
#ifdef _OPENMP
#pragma omp for
@ -223,24 +231,26 @@ void test_circle(std::vector<std::valarray<double>> *data) {
* output](https://raw.githubusercontent.com/TheAlgorithms/C-Plus-Plus/docs/images/machine_learning/kohonen/test1.svg)
*/
void test1() {
int j, N = 500;
int j = 0, N = 500;
int features = 2;
int num_out = 50;
std::vector<std::valarray<double>> X(N);
std::vector<std::valarray<double>> W(num_out);
for (int i = 0; i < std::max(num_out, N); i++) {
// loop till max(N, num_out)
if (i < N) // only add new arrays if i < N
if (i < N) { // only add new arrays if i < N
X[i] = std::valarray<double>(features);
}
if (i < num_out) { // only add new arrays if i < num_out
W[i] = std::valarray<double>(features);
#ifdef _OPENMP
#pragma omp for
#endif
for (j = 0; j < features; j++)
for (j = 0; j < features; j++) {
// preallocate with random initial weights
W[i][j] = _random(-1, 1);
}
}
}
@ -267,7 +277,7 @@ void test1() {
void test_lamniscate(std::vector<std::valarray<double>> *data) {
const int N = data->size();
const double dr = 0.2;
int i;
int i = 0;
#ifdef _OPENMP
#pragma omp for
@ -303,24 +313,26 @@ void test_lamniscate(std::vector<std::valarray<double>> *data) {
* output](https://raw.githubusercontent.com/TheAlgorithms/C-Plus-Plus/docs/images/machine_learning/kohonen/test2.svg)
*/
void test2() {
int j, N = 500;
int j = 0, N = 500;
int features = 2;
int num_out = 20;
std::vector<std::valarray<double>> X(N);
std::vector<std::valarray<double>> W(num_out);
for (int i = 0; i < std::max(num_out, N); i++) {
// loop till max(N, num_out)
if (i < N) // only add new arrays if i < N
if (i < N) { // only add new arrays if i < N
X[i] = std::valarray<double>(features);
}
if (i < num_out) { // only add new arrays if i < num_out
W[i] = std::valarray<double>(features);
#ifdef _OPENMP
#pragma omp for
#endif
for (j = 0; j < features; j++)
for (j = 0; j < features; j++) {
// preallocate with random initial weights
W[i][j] = _random(-1, 1);
}
}
}
@ -347,18 +359,18 @@ void test2() {
void test_3d_classes(std::vector<std::valarray<double>> *data) {
const int N = data->size();
const double R = 0.1; // radius of cluster
int i;
int i = 0;
const int num_classes = 8;
const double centres[][3] = {
const std::array<const std::array<double, 3>, num_classes> centres = {
// centres of each class cluster
{.5, .5, .5}, // centre of class 0
{.5, .5, -.5}, // centre of class 1
{.5, -.5, .5}, // centre of class 2
{.5, -.5, -.5}, // centre of class 3
{-.5, .5, .5}, // centre of class 4
{-.5, .5, -.5}, // centre of class 5
{-.5, -.5, .5}, // centre of class 6
{-.5, -.5, -.5} // centre of class 7
std::array<double, 3>({.5, .5, .5}), // centre of class 0
std::array<double, 3>({.5, .5, -.5}), // centre of class 1
std::array<double, 3>({.5, -.5, .5}), // centre of class 2
std::array<double, 3>({.5, -.5, -.5}), // centre of class 3
std::array<double, 3>({-.5, .5, .5}), // centre of class 4
std::array<double, 3>({-.5, .5, -.5}), // centre of class 5
std::array<double, 3>({-.5, -.5, .5}), // centre of class 6
std::array<double, 3>({-.5, -.5, -.5}) // centre of class 7
};
#ifdef _OPENMP
@ -400,24 +412,26 @@ void test_3d_classes(std::vector<std::valarray<double>> *data) {
* output](https://raw.githubusercontent.com/TheAlgorithms/C-Plus-Plus/docs/images/machine_learning/kohonen/test3.svg)
*/
void test3() {
int j, N = 200;
int j = 0, N = 200;
int features = 3;
int num_out = 20;
std::vector<std::valarray<double>> X(N);
std::vector<std::valarray<double>> W(num_out);
for (int i = 0; i < std::max(num_out, N); i++) {
// loop till max(N, num_out)
if (i < N) // only add new arrays if i < N
if (i < N) { // only add new arrays if i < N
X[i] = std::valarray<double>(features);
}
if (i < num_out) { // only add new arrays if i < num_out
W[i] = std::valarray<double>(features);
#ifdef _OPENMP
#pragma omp for
#endif
for (j = 0; j < features; j++)
for (j = 0; j < features; j++) {
// preallocate with random initial weights
W[i][j] = _random(-1, 1);
}
}
}

View File

@ -25,9 +25,10 @@ std::ostream &operator<<(std::ostream &out,
const char separator = ' ';
for (size_t row = 0; row < v.size(); row++) {
for (size_t col = 0; col < v[row].size(); col++)
for (size_t col = 0; col < v[row].size(); col++) {
out << std::left << std::setw(width) << std::setfill(separator)
<< v[row][col];
}
out << std::endl;
}
@ -42,9 +43,10 @@ std::ostream &operator<<(std::ostream &out, std::vector<T> const &v) {
const int width = 15;
const char separator = ' ';
for (size_t row = 0; row < v.size(); row++)
for (size_t row = 0; row < v.size(); row++) {
out << std::left << std::setw(width) << std::setfill(separator)
<< v[row];
}
return out;
}
@ -57,9 +59,11 @@ template <typename T>
inline bool is_square(std::vector<std::vector<T>> const &A) {
// Assuming A is square matrix
size_t N = A.size();
for (size_t i = 0; i < N; i++)
if (A[i].size() != N)
for (size_t i = 0; i < N; i++) {
if (A[i].size() != N) {
return false;
}
}
return true;
}
@ -90,8 +94,9 @@ std::vector<std::vector<T>> operator*(std::vector<std::vector<T>> const &A,
std::vector<T> v(N_B);
for (size_t col = 0; col < N_B; col++) {
v[col] = static_cast<T>(0);
for (size_t j = 0; j < B.size(); j++)
for (size_t j = 0; j < B.size(); j++) {
v[col] += A[row][j] * B[j][col];
}
}
result[row] = v;
}
@ -154,8 +159,9 @@ std::vector<float> operator*(std::vector<T> const &A, float const scalar) {
std::vector<float> result(N_A);
for (size_t row = 0; row < N_A; row++)
for (size_t row = 0; row < N_A; row++) {
result[row] = A[row] * static_cast<float>(scalar);
}
return result;
}
@ -226,8 +232,9 @@ std::vector<std::vector<float>> get_inverse(
for (size_t row = 0; row < N; row++) {
// preallocatae a resultant identity matrix
inverse[row] = std::vector<float>(N);
for (size_t col = 0; col < N; col++)
for (size_t col = 0; col < N; col++) {
inverse[row][col] = (row == col) ? 1.f : 0.f;
}
}
if (!is_square(A)) {
@ -239,8 +246,9 @@ std::vector<std::vector<float>> get_inverse(
std::vector<std::vector<float>> temp(N);
for (size_t row = 0; row < N; row++) {
std::vector<float> v(N);
for (size_t col = 0; col < N; col++)
for (size_t col = 0; col < N; col++) {
v[col] = static_cast<float>(A[row][col]);
}
temp[row] = v;
}
@ -267,13 +275,14 @@ std::vector<std::vector<float>> get_inverse(
}
// set diagonal to 1
float divisor = static_cast<float>(temp[row][row]);
auto divisor = static_cast<float>(temp[row][row]);
temp[row] = temp[row] / divisor;
inverse[row] = inverse[row] / divisor;
// Row transformations
for (size_t row2 = 0; row2 < N; row2++) {
if (row2 == row)
if (row2 == row) {
continue;
}
float factor = temp[row2][row];
temp[row2] = temp[row2] - factor * temp[row];
inverse[row2] = inverse[row2] - factor * inverse[row];
@ -313,9 +322,10 @@ std::vector<float> fit_OLS_regressor(std::vector<std::vector<T>> const &X,
std::vector<T> const &Y) {
// NxF
std::vector<std::vector<T>> X2 = X;
for (size_t i = 0; i < X2.size(); i++)
for (size_t i = 0; i < X2.size(); i++) {
// add Y-intercept -> Nx(F+1)
X2[i].push_back(1);
}
// (F+1)xN
std::vector<std::vector<T>> Xt = get_transpose(X2);
// (F+1)x(F+1)
@ -347,8 +357,9 @@ std::vector<float> predict_OLS_regressor(std::vector<std::vector<T>> const &X,
for (size_t rows = 0; rows < X.size(); rows++) {
// -> start with constant term
result[rows] = beta[X[0].size()];
for (size_t cols = 0; cols < X[0].size(); cols++)
for (size_t cols = 0; cols < X[0].size(); cols++) {
result[rows] += beta[cols] * X[rows][cols];
}
}
// Nx1
return result;
@ -375,8 +386,9 @@ void ols_test() {
// predicted regression outputs
std::vector<float> out1 = predict_OLS_regressor(test_data1, beta1);
// compare predicted results are within +-0.01 limit of expected
for (size_t rows = 0; rows < out1.size(); rows++)
for (size_t rows = 0; rows < out1.size(); rows++) {
assert(std::abs(out1[rows] - expected1[rows]) < 0.01);
}
std::cout << "passed\n";
/* test function = x^3 + x^2 - 100 */
@ -396,8 +408,9 @@ void ols_test() {
// predicted regression outputs
std::vector<float> out2 = predict_OLS_regressor(test_data2, beta2);
// compare predicted results are within +-0.01 limit of expected
for (size_t rows = 0; rows < out2.size(); rows++)
for (size_t rows = 0; rows < out2.size(); rows++) {
assert(std::abs(out2[rows] - expected2[rows]) < 0.01);
}
std::cout << "passed\n";
std::cout << std::endl; // ensure test results are displayed on screen
@ -410,7 +423,7 @@ void ols_test() {
int main() {
ols_test();
size_t N, F;
size_t N = 0, F = 0;
std::cout << "Enter number of features: ";
// number of features = columns
@ -429,9 +442,10 @@ int main() {
for (size_t rows = 0; rows < N; rows++) {
std::vector<float> v(F);
std::cout << "Sample# " << rows + 1 << ": ";
for (size_t cols = 0; cols < F; cols++)
for (size_t cols = 0; cols < F; cols++) {
// get the F features
std::cin >> v[cols];
}
data[rows] = v;
// get the corresponding output
std::cin >> Y[rows];
@ -440,7 +454,7 @@ int main() {
std::vector<float> beta = fit_OLS_regressor(data, Y);
std::cout << std::endl << std::endl << "beta:" << beta << std::endl;
size_t T;
size_t T = 0;
std::cout << "Enter number of test samples: ";
// number of test sample inputs
std::cin >> T;

View File

@ -10,25 +10,21 @@
* @see primes_up_to_billion.cpp prime_numbers.cpp
*/
#include <iostream>
/** Maximum number of primes */
#define MAX 10000000
/** array to store the primes */
bool isprime[MAX];
#include <iostream> // for io operations
/**
* This is the function that finds the primes and eliminates
* the multiples.
* @param N number of primes to check
* @param [out] isprime a boolean array of size `N` identifying if `i`^th number is prime or not
*/
void sieve(uint32_t N) {
isprime[0] = false;
isprime[1] = false;
for (uint32_t i = 2; i <= N; i++) {
if (isprime[i]) {
for (uint32_t j = (i << 1); j <= N; j += i) {
isprime[j] = false;
void sieve(uint32_t N, bool *isprime) {
isprime[0] = true;
isprime[1] = true;
for (uint32_t i = 2; i * i <= N; i++) {
if (!isprime[i]) {
for (uint32_t j = (i << 1); j <= N; j = j + i) {
isprime[j] = true;
}
}
}
@ -36,10 +32,12 @@ void sieve(uint32_t N) {
/**
* This function prints out the primes to STDOUT
* @param N number of primes to check
* @param [in] isprime a boolean array of size `N` identifying if `i`^th number is prime or not
*/
void print(uint32_t N) {
for (uint32_t i = 1; i <= N; i++) {
if (isprime[i]) {
void print(uint32_t N, const bool *isprime) {
for (uint32_t i = 2; i <= N; i++) {
if (!isprime[i]) {
std::cout << i << ' ';
}
}
@ -47,19 +45,14 @@ void print(uint32_t N) {
}
/**
* Initialize the array
* Main function
*/
void init() {
for (uint32_t i = 1; i < MAX; i++) {
isprime[i] = true;
}
}
/** main function */
int main() {
uint32_t N = 100;
init();
sieve(N);
print(N);
bool *isprime = new bool[N];
sieve(N, isprime);
print(N, isprime);
delete[] isprime;
return 0;
}

View File

@ -17,8 +17,8 @@
#include <iostream>
#include <limits>
#define EPSILON 1e-10 ///< system accuracy limit
#define MAX_ITERATIONS INT16_MAX ///< Maximum number of iterations to check
constexpr double EPSILON = 1e-10; ///< system accuracy limit
constexpr int16_t MAX_ITERATIONS = INT16_MAX; ///< Maximum number of iterations
/** define \f$f(x)\f$ to find root for.
* Currently defined as:
@ -44,8 +44,8 @@ static double eq_der(double i) {
int main() {
std::srand(std::time(nullptr)); // initialize randomizer
double z, c = std::rand() % 100, m, n;
int i;
double z = NAN, c = std::rand() % 100, m = NAN, n = NAN;
int i = 0;
std::cout << "\nInitial approximation: " << c;
@ -57,8 +57,9 @@ int main() {
z = c - (m / n);
c = z;
if (std::abs(m) < EPSILON) // stoping criteria
if (std::abs(m) < EPSILON) { // stoping criteria
break;
}
}
std::cout << "\n\nRoot: " << z << "\t\tSteps: " << i << std::endl;

View File

@ -54,8 +54,8 @@
void problem(const double &x, std::valarray<double> *y,
std::valarray<double> *dy) {
const double omega = 1.F; // some const for the problem
dy[0][0] = y[0][1]; // x dot
dy[0][1] = -omega * omega * y[0][0]; // y dot
(*dy)[0] = (*y)[1]; // x dot // NOLINT
(*dy)[1] = -omega * omega * (*y)[0]; // y dot // NOLINT
}
/**
@ -83,10 +83,10 @@ void exact_solution(const double &x, std::valarray<double> *y) {
* @param[in,out] y take \f$y_n\f$ and compute \f$y_{n+1}\f$
* @param[in,out] dy compute \f$f\left(x_n,y_n\right)\f$
*/
void forward_euler_step(const double dx, const double &x,
void forward_euler_step(const double dx, const double x,
std::valarray<double> *y, std::valarray<double> *dy) {
problem(x, y, dy);
y[0] += dy[0] * dx;
*y += *dy * dx;
}
/**
@ -101,7 +101,7 @@ void forward_euler_step(const double dx, const double &x,
*/
double forward_euler(double dx, double x0, double x_max,
std::valarray<double> *y, bool save_to_file = false) {
std::valarray<double> dy = y[0];
std::valarray<double> dy = *y;
std::ofstream fp;
if (save_to_file) {
@ -122,9 +122,9 @@ double forward_euler(double dx, double x0, double x_max,
// write to file
fp << x << ",";
for (int i = 0; i < L - 1; i++) {
fp << y[0][i] << ",";
fp << y[0][i] << ","; // NOLINT
}
fp << y[0][L - 1] << "\n";
fp << y[0][L - 1] << "\n"; // NOLINT
}
forward_euler_step(dx, x, y, &dy); // perform integration
@ -133,8 +133,9 @@ double forward_euler(double dx, double x0, double x_max,
/* end of integration */
std::clock_t t2 = std::clock();
if (fp.is_open())
if (fp.is_open()) {
fp.close();
}
return static_cast<double>(t2 - t1) / CLOCKS_PER_SEC;
}
@ -153,7 +154,7 @@ void save_exact_solution(const double &X0, const double &X_MAX,
const double &step_size,
const std::valarray<double> &Y0) {
double x = X0;
std::valarray<double> y = Y0;
std::valarray<double> y(Y0);
std::ofstream fp("exact.csv", std::ostream::out);
if (!fp.is_open()) {
@ -166,9 +167,9 @@ void save_exact_solution(const double &X0, const double &X_MAX,
do {
fp << x << ",";
for (int i = 0; i < y.size() - 1; i++) {
fp << y[i] << ",";
fp << y[i] << ","; // NOLINT
}
fp << y[y.size() - 1] << "\n";
fp << y[y.size() - 1] << "\n"; // NOLINT
exact_solution(x, &y);
@ -186,10 +187,10 @@ void save_exact_solution(const double &X0, const double &X_MAX,
* Main Function
*/
int main(int argc, char *argv[]) {
double X0 = 0.f; /* initial value of x0 */
double X_MAX = 10.F; /* upper limit of integration */
std::valarray<double> Y0 = {1.f, 0.f}; /* initial value Y = y(x = x_0) */
double step_size;
double X0 = 0.f; /* initial value of x0 */
double X_MAX = 10.F; /* upper limit of integration */
std::valarray<double> Y0{1.f, 0.f}; /* initial value Y = y(x = x_0) */
double step_size = NAN;
if (argc == 1) {
std::cout << "\nEnter the step size: ";

127
search/fibonacci_search.cpp Normal file
View File

@ -0,0 +1,127 @@
/**
* @author sprintyaf
* @file fibonacci_search.cpp
* @brief [Fibonacci search
* algorithm](https://en.wikipedia.org/wiki/Fibonacci_search_technique)
*/
#include <iostream>
#include <vector> // for std::vector class
#include <cassert> // for assert
#include <cstdlib> // for random numbers
#include <algorithm> // for sorting
/**
* @brief using fibonacci search algorithm finds an index of a given element in a sorted array
*
* @param arr sorted array
* @param value value that we're looking for
* @returns if the array contains the value, returns an index of the element. otherwise -1.
*/
int fibonacci_search(const std::vector<int> &arr, int value){
// initialize last and current members of Fibonacci sequence
int last = 0, current = 1;
int length = arr.size(); // array size
// next member of Fibonacci sequence which is "last" + "current"
int next = last + current;
// "next" will store the smallest Fibonacci number greater or equal to "length"
while(next < length){
last = current;
current = next;
next = last + current;
}
// "offset" is the end of eliminated range from front
int offset = -1, index;
// while loop until there are elements left to consider.
// when "next" becomes 1, last is equal to 0, so search is done,
// because arr[offset] will already be eliminated
while(next > 1){
// check if "last" is valid location
index = std::min(offset + last, length-1);
// if value is greater than the value at "index", eliminate the subarray from offset to index
if(arr[index] < value){
next = current;
current = last;
last = next - current;
offset = index;
// if value is less than the value at "index", eliminate the subarray after index+1
} else if(arr[index] > value){
next = last;
current = current - last;
last = next - current;
// element is found
} else {
return index;
}
}
// comparing the last element
if(current && !arr.empty() && arr[offset+1] == value){
return offset+1;
}
// value was not found, return -1
return -1;
}
/**
* @brief random tests for checking performance when an array doesn't contain an element
*/
bool no_occurence_tests(){
bool passed = true;
int rand_num, rand_value, index, num_tests = 1000;
std::vector<int> arr;
while(num_tests--){
arr.clear();
for(int i = 0; i < 100; i++){
rand_num = std::rand() % 1000;
arr.push_back(rand_num);
}
rand_value = std::rand() % 1000;
while(std::find(arr.begin(), arr.end(), rand_value) != arr.end()){
std::remove(arr.begin(), arr.end(), rand_value);
}
sort(arr.begin(), arr.end());
index = fibonacci_search(arr, rand_value);
passed = passed && (index == -1);
}
return passed;
}
/**
* @brief random tests which cover cases when we have one, multiple or zero occurences of the value we're looking for
*/
bool random_tests(){
bool passed = true;
int rand_num, rand_value, index, real_value, num_tests = 10000;
std::vector<int> arr;
while(num_tests--){
arr.clear();
for(int i = 0; i < 100; i++){
rand_num = std::rand() % 1000;
arr.push_back(rand_num);
}
rand_value = std::rand() % 1000;
std::sort(arr.begin(), arr.end());
index = fibonacci_search(arr, rand_value);
if(index != -1){
real_value = arr[index];
passed = passed && (real_value == rand_value);
} else {
passed = passed && (std::find(arr.begin(), arr.end(), rand_value) == arr.end());
}
}
return passed;
}
/**
* Main Function
* testing the algorithm
*/
int main() {
assert(no_occurence_tests());
assert(random_tests());
return 0;
}