mirror of
https://hub.njuu.cf/TheAlgorithms/Python.git
synced 2023-10-11 13:06:12 +08:00
Tighten up psf/black and flake8 (#2024)
* Tighten up psf/black and flake8 * Fix some tests * Fix some E741 * Fix some E741 * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com>
This commit is contained in:
parent
21ed8968c0
commit
1f8a21d727
10
.travis.yml
10
.travis.yml
@ -4,13 +4,13 @@ language: python
|
||||
python: 3.8
|
||||
cache: pip
|
||||
before_install: pip install --upgrade pip setuptools six
|
||||
install: pip install -r requirements.txt
|
||||
install: pip install black flake8
|
||||
before_script:
|
||||
- black --check . || true
|
||||
- IGNORE=E123,E203,E265,E266,E302,E401,E402,E712,E731,E741,E743,F811,F841,W291,W293,W503
|
||||
- flake8 . --count --ignore=$IGNORE --max-complexity=25 --max-line-length=127 --show-source --statistics
|
||||
script:
|
||||
- black --check .
|
||||
- flake8 --ignore=E203,W503 --max-complexity=25 --max-line-length=120 --statistics --count .
|
||||
- scripts/validate_filenames.py # no uppercase, no spaces, in a directory
|
||||
- pip install -r requirements.txt # fast fail on black, flake8, validate_filenames
|
||||
script:
|
||||
- mypy --ignore-missing-imports .
|
||||
- pytest --doctest-modules --cov-report=term-missing:skip-covered --cov=. .
|
||||
after_success:
|
||||
|
@ -222,6 +222,7 @@
|
||||
* [Bellman Ford](https://github.com/TheAlgorithms/Python/blob/master/graphs/bellman_ford.py)
|
||||
* [Bfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs.py)
|
||||
* [Bfs Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs_shortest_path.py)
|
||||
* [Bidirectional A Star](https://github.com/TheAlgorithms/Python/blob/master/graphs/bidirectional_a_star.py)
|
||||
* [Breadth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search.py)
|
||||
* [Breadth First Search Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search_shortest_path.py)
|
||||
* [Check Bipartite Graph Bfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_bipartite_graph_bfs.py)
|
||||
@ -242,6 +243,7 @@
|
||||
* [Graph List](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_list.py)
|
||||
* [Graph Matrix](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_matrix.py)
|
||||
* [Graphs Floyd Warshall](https://github.com/TheAlgorithms/Python/blob/master/graphs/graphs_floyd_warshall.py)
|
||||
* [Greedy Best First](https://github.com/TheAlgorithms/Python/blob/master/graphs/greedy_best_first.py)
|
||||
* [Kahns Algorithm Long](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_long.py)
|
||||
* [Kahns Algorithm Topo](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_topo.py)
|
||||
* [Minimum Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal.py)
|
||||
@ -409,6 +411,7 @@
|
||||
* [Fischer Yates Shuffle](https://github.com/TheAlgorithms/Python/blob/master/other/fischer_yates_shuffle.py)
|
||||
* [Frequency Finder](https://github.com/TheAlgorithms/Python/blob/master/other/frequency_finder.py)
|
||||
* [Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/other/game_of_life.py)
|
||||
* [Gauss Easter](https://github.com/TheAlgorithms/Python/blob/master/other/gauss_easter.py)
|
||||
* [Greedy](https://github.com/TheAlgorithms/Python/blob/master/other/greedy.py)
|
||||
* [Integeration By Simpson Approx](https://github.com/TheAlgorithms/Python/blob/master/other/integeration_by_simpson_approx.py)
|
||||
* [Largest Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/other/largest_subarray_sum.py)
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
import math
|
||||
|
||||
|
||||
# for calculating u value
|
||||
def ucal(u, p):
|
||||
"""
|
||||
|
@ -18,7 +18,7 @@ def valid_coloring(
|
||||
|
||||
>>> neighbours = [0,1,0,1,0]
|
||||
>>> colored_vertices = [0, 2, 1, 2, 0]
|
||||
|
||||
|
||||
>>> color = 1
|
||||
>>> valid_coloring(neighbours, colored_vertices, color)
|
||||
True
|
||||
@ -37,11 +37,11 @@ def valid_coloring(
|
||||
def util_color(
|
||||
graph: List[List[int]], max_colors: int, colored_vertices: List[int], index: int
|
||||
) -> bool:
|
||||
"""
|
||||
"""
|
||||
Pseudo-Code
|
||||
|
||||
Base Case:
|
||||
1. Check if coloring is complete
|
||||
1. Check if coloring is complete
|
||||
1.1 If complete return True (meaning that we successfully colored graph)
|
||||
|
||||
Recursive Step:
|
||||
@ -60,7 +60,7 @@ def util_color(
|
||||
>>> max_colors = 3
|
||||
>>> colored_vertices = [0, 1, 0, 0, 0]
|
||||
>>> index = 3
|
||||
|
||||
|
||||
>>> util_color(graph, max_colors, colored_vertices, index)
|
||||
True
|
||||
|
||||
@ -87,11 +87,11 @@ def util_color(
|
||||
|
||||
|
||||
def color(graph: List[List[int]], max_colors: int) -> List[int]:
|
||||
"""
|
||||
"""
|
||||
Wrapper function to call subroutine called util_color
|
||||
which will either return True or False.
|
||||
If True is returned colored_vertices list is filled with correct colorings
|
||||
|
||||
|
||||
>>> graph = [[0, 1, 0, 0, 0],
|
||||
... [1, 0, 1, 0, 1],
|
||||
... [0, 1, 0, 1, 0],
|
||||
|
@ -1,9 +1,9 @@
|
||||
"""
|
||||
A Hamiltonian cycle (Hamiltonian circuit) is a graph cycle
|
||||
A Hamiltonian cycle (Hamiltonian circuit) is a graph cycle
|
||||
through a graph that visits each node exactly once.
|
||||
Determining whether such paths and cycles exist in graphs
|
||||
Determining whether such paths and cycles exist in graphs
|
||||
is the 'Hamiltonian path problem', which is NP-complete.
|
||||
|
||||
|
||||
Wikipedia: https://en.wikipedia.org/wiki/Hamiltonian_path
|
||||
"""
|
||||
from typing import List
|
||||
@ -18,7 +18,7 @@ def valid_connection(
|
||||
2. Next vertex should not be in path
|
||||
If both validations succeeds we return true saying that it is possible to connect this vertices
|
||||
either we return false
|
||||
|
||||
|
||||
Case 1:Use exact graph as in main function, with initialized values
|
||||
>>> graph = [[0, 1, 0, 1, 0],
|
||||
... [1, 0, 1, 1, 1],
|
||||
@ -56,11 +56,11 @@ def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int)
|
||||
Recursive Step:
|
||||
2. Iterate over each vertex
|
||||
Check if next vertex is valid for transiting from current vertex
|
||||
2.1 Remember next vertex as next transition
|
||||
2.1 Remember next vertex as next transition
|
||||
2.2 Do recursive call and check if going to this vertex solves problem
|
||||
2.3 if next vertex leads to solution return True
|
||||
2.4 else backtrack, delete remembered vertex
|
||||
|
||||
|
||||
Case 1: Use exact graph as in main function, with initialized values
|
||||
>>> graph = [[0, 1, 0, 1, 0],
|
||||
... [1, 0, 1, 1, 1],
|
||||
@ -111,12 +111,12 @@ def hamilton_cycle(graph: List[List[int]], start_index: int = 0) -> List[int]:
|
||||
Wrapper function to call subroutine called util_hamilton_cycle,
|
||||
which will either return array of vertices indicating hamiltonian cycle
|
||||
or an empty list indicating that hamiltonian cycle was not found.
|
||||
Case 1:
|
||||
Following graph consists of 5 edges.
|
||||
Case 1:
|
||||
Following graph consists of 5 edges.
|
||||
If we look closely, we can see that there are multiple Hamiltonian cycles.
|
||||
For example one result is when we iterate like:
|
||||
For example one result is when we iterate like:
|
||||
(0)->(1)->(2)->(4)->(3)->(0)
|
||||
|
||||
|
||||
(0)---(1)---(2)
|
||||
| / \ |
|
||||
| / \ |
|
||||
@ -130,10 +130,10 @@ def hamilton_cycle(graph: List[List[int]], start_index: int = 0) -> List[int]:
|
||||
... [0, 1, 1, 1, 0]]
|
||||
>>> hamilton_cycle(graph)
|
||||
[0, 1, 2, 4, 3, 0]
|
||||
|
||||
Case 2:
|
||||
|
||||
Case 2:
|
||||
Same Graph as it was in Case 1, changed starting index from default to 3
|
||||
|
||||
|
||||
(0)---(1)---(2)
|
||||
| / \ |
|
||||
| / \ |
|
||||
@ -147,11 +147,11 @@ def hamilton_cycle(graph: List[List[int]], start_index: int = 0) -> List[int]:
|
||||
... [0, 1, 1, 1, 0]]
|
||||
>>> hamilton_cycle(graph, 3)
|
||||
[3, 0, 1, 2, 4, 3]
|
||||
|
||||
|
||||
Case 3:
|
||||
Following Graph is exactly what it was before, but edge 3-4 is removed.
|
||||
Result is that there is no Hamiltonian Cycle anymore.
|
||||
|
||||
|
||||
(0)---(1)---(2)
|
||||
| / \ |
|
||||
| / \ |
|
||||
|
@ -1,10 +1,10 @@
|
||||
import math
|
||||
|
||||
""" Minimax helps to achieve maximum score in a game by checking all possible moves
|
||||
depth is current depth in game tree.
|
||||
depth is current depth in game tree.
|
||||
nodeIndex is index of current node in scores[].
|
||||
if move is of maximizer return true else false
|
||||
leaves of game tree is stored in scores[]
|
||||
leaves of game tree is stored in scores[]
|
||||
height is maximum height of Game tree
|
||||
"""
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
"""
|
||||
|
||||
The nqueens problem is of placing N queens on a N * N
|
||||
The nqueens problem is of placing N queens on a N * N
|
||||
chess board such that no queen can attack any other queens placed
|
||||
on that chess board.
|
||||
This means that one queen cannot have any other queen on its horizontal, vertical and
|
||||
This means that one queen cannot have any other queen on its horizontal, vertical and
|
||||
diagonal lines.
|
||||
|
||||
"""
|
||||
@ -12,7 +12,7 @@ solution = []
|
||||
|
||||
def isSafe(board, row, column):
|
||||
"""
|
||||
This function returns a boolean value True if it is safe to place a queen there considering
|
||||
This function returns a boolean value True if it is safe to place a queen there considering
|
||||
the current state of the board.
|
||||
|
||||
Parameters :
|
||||
@ -40,13 +40,13 @@ def isSafe(board, row, column):
|
||||
|
||||
def solve(board, row):
|
||||
"""
|
||||
It creates a state space tree and calls the safe function until it receives a
|
||||
False Boolean and terminates that branch and backtracks to the next
|
||||
It creates a state space tree and calls the safe function until it receives a
|
||||
False Boolean and terminates that branch and backtracks to the next
|
||||
possible solution branch.
|
||||
"""
|
||||
if row >= len(board):
|
||||
"""
|
||||
If the row number exceeds N we have board with a successful combination
|
||||
If the row number exceeds N we have board with a successful combination
|
||||
and that combination is appended to the solution list and the board is printed.
|
||||
|
||||
"""
|
||||
@ -56,9 +56,9 @@ def solve(board, row):
|
||||
return
|
||||
for i in range(len(board)):
|
||||
"""
|
||||
For every row it iterates through each column to check if it is feasible to place a
|
||||
For every row it iterates through each column to check if it is feasible to place a
|
||||
queen there.
|
||||
If all the combinations for that particular branch are successful the board is
|
||||
If all the combinations for that particular branch are successful the board is
|
||||
reinitialized for the next possible combination.
|
||||
"""
|
||||
if isSafe(board, row, i):
|
||||
|
@ -1,9 +1,12 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
def decrypt_caesar_with_chi_squared(
|
||||
ciphertext: str,
|
||||
cipher_alphabet=None,
|
||||
frequencies_dict=None,
|
||||
case_sensetive: bool = False,
|
||||
) -> list:
|
||||
) -> tuple:
|
||||
"""
|
||||
Basic Usage
|
||||
===========
|
||||
@ -96,15 +99,19 @@ def decrypt_caesar_with_chi_squared(
|
||||
Further Reading
|
||||
================
|
||||
|
||||
* http://practicalcryptography.com/cryptanalysis/text-characterisation/chi-squared-statistic/
|
||||
* http://practicalcryptography.com/cryptanalysis/text-characterisation/chi-squared-
|
||||
statistic/
|
||||
* https://en.wikipedia.org/wiki/Letter_frequency
|
||||
* https://en.wikipedia.org/wiki/Chi-squared_test
|
||||
* https://en.m.wikipedia.org/wiki/Caesar_cipher
|
||||
|
||||
Doctests
|
||||
========
|
||||
>>> decrypt_caesar_with_chi_squared('dof pz aol jhlzhy jpwoly zv wvwbshy? pa pz avv lhzf av jyhjr!')
|
||||
(7, 3129.228005747531, 'why is the caesar cipher so popular? it is too easy to crack!')
|
||||
>>> decrypt_caesar_with_chi_squared(
|
||||
... 'dof pz aol jhlzhy jpwoly zv wvwbshy? pa pz avv lhzf av jyhjr!'
|
||||
... ) # doctest: +NORMALIZE_WHITESPACE
|
||||
(7, 3129.228005747531,
|
||||
'why is the caesar cipher so popular? it is too easy to crack!')
|
||||
|
||||
>>> decrypt_caesar_with_chi_squared('crybd cdbsxq')
|
||||
(10, 233.35343938980898, 'short string')
|
||||
@ -172,7 +179,7 @@ def decrypt_caesar_with_chi_squared(
|
||||
# Append the character if it isn't in the alphabet
|
||||
decrypted_with_shift += letter
|
||||
|
||||
chi_squared_statistic = 0
|
||||
chi_squared_statistic = 0.0
|
||||
|
||||
# Loop through each letter in the decoded message with the shift
|
||||
for letter in decrypted_with_shift:
|
||||
@ -181,7 +188,8 @@ def decrypt_caesar_with_chi_squared(
|
||||
# Get the amount of times the letter occurs in the message
|
||||
occurrences = decrypted_with_shift.count(letter)
|
||||
|
||||
# Get the excepcted amount of times the letter should appear based on letter frequencies
|
||||
# Get the excepcted amount of times the letter should appear based
|
||||
# on letter frequencies
|
||||
expected = frequencies[letter] * occurrences
|
||||
|
||||
# Complete the chi squared statistic formula
|
||||
@ -194,7 +202,8 @@ def decrypt_caesar_with_chi_squared(
|
||||
# Get the amount of times the letter occurs in the message
|
||||
occurrences = decrypted_with_shift.count(letter)
|
||||
|
||||
# Get the excepcted amount of times the letter should appear based on letter frequencies
|
||||
# Get the excepcted amount of times the letter should appear based
|
||||
# on letter frequencies
|
||||
expected = frequencies[letter] * occurrences
|
||||
|
||||
# Complete the chi squared statistic formula
|
||||
@ -209,7 +218,8 @@ def decrypt_caesar_with_chi_squared(
|
||||
decrypted_with_shift,
|
||||
]
|
||||
|
||||
# Get the most likely cipher by finding the cipher with the smallest chi squared statistic
|
||||
# Get the most likely cipher by finding the cipher with the smallest chi squared
|
||||
# statistic
|
||||
most_likely_cipher = min(
|
||||
chi_squared_statistic_values, key=chi_squared_statistic_values.get
|
||||
)
|
||||
|
@ -1,7 +1,9 @@
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import rabin_miller as rabinMiller, cryptomath_module as cryptoMath
|
||||
|
||||
import cryptomath_module as cryptoMath
|
||||
import rabin_miller as rabinMiller
|
||||
|
||||
min_primitive_root = 3
|
||||
|
||||
|
@ -25,7 +25,7 @@ def mixed_keyword(key="college", pt="UNIVERSITY"):
|
||||
for i in key:
|
||||
if i not in temp:
|
||||
temp.append(i)
|
||||
l = len(temp)
|
||||
len_temp = len(temp)
|
||||
# print(temp)
|
||||
alpha = []
|
||||
modalpha = []
|
||||
@ -40,17 +40,17 @@ def mixed_keyword(key="college", pt="UNIVERSITY"):
|
||||
k = 0
|
||||
for i in range(r):
|
||||
t = []
|
||||
for j in range(l):
|
||||
for j in range(len_temp):
|
||||
t.append(temp[k])
|
||||
if not (k < 25):
|
||||
break
|
||||
k += 1
|
||||
modalpha.append(t)
|
||||
# print(modalpha)
|
||||
d = dict()
|
||||
d = {}
|
||||
j = 0
|
||||
k = 0
|
||||
for j in range(l):
|
||||
for j in range(len_temp):
|
||||
for i in modalpha:
|
||||
if not (len(i) - 1 >= j):
|
||||
break
|
||||
|
@ -1,4 +1,5 @@
|
||||
import sys, random
|
||||
import random
|
||||
import sys
|
||||
|
||||
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
|
@ -1,4 +1,7 @@
|
||||
import time, os, sys
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
import transposition_cipher as transCipher
|
||||
|
||||
|
||||
|
@ -8,7 +8,7 @@ import math
|
||||
|
||||
def decimal_to_octal(num: int) -> str:
|
||||
"""Convert a Decimal Number to an Octal Number.
|
||||
|
||||
|
||||
>>> all(decimal_to_octal(i) == oct(i) for i in (0, 2, 8, 64, 65, 216, 255, 256, 512))
|
||||
True
|
||||
"""
|
||||
|
@ -89,8 +89,8 @@ def leftrotation(node):
|
||||
Bl Br UB Br C
|
||||
/
|
||||
UB
|
||||
|
||||
UB = unbalanced node
|
||||
|
||||
UB = unbalanced node
|
||||
"""
|
||||
print("left rotation node:", node.getdata())
|
||||
ret = node.getleft()
|
||||
@ -120,11 +120,11 @@ def rightrotation(node):
|
||||
|
||||
def rlrotation(node):
|
||||
r"""
|
||||
A A Br
|
||||
A A Br
|
||||
/ \ / \ / \
|
||||
B C RR Br C LR B A
|
||||
/ \ --> / \ --> / / \
|
||||
Bl Br B UB Bl UB C
|
||||
Bl Br B UB Bl UB C
|
||||
\ /
|
||||
UB Bl
|
||||
RR = rightrotation LR = leftrotation
|
||||
@ -276,13 +276,13 @@ class AVLtree:
|
||||
if __name__ == "__main__":
|
||||
t = AVLtree()
|
||||
t.traversale()
|
||||
l = list(range(10))
|
||||
random.shuffle(l)
|
||||
for i in l:
|
||||
lst = list(range(10))
|
||||
random.shuffle(lst)
|
||||
for i in lst:
|
||||
t.insert(i)
|
||||
t.traversale()
|
||||
|
||||
random.shuffle(l)
|
||||
for i in l:
|
||||
random.shuffle(lst)
|
||||
for i in lst:
|
||||
t.del_node(i)
|
||||
t.traversale()
|
||||
|
@ -1,4 +1,9 @@
|
||||
class Node: # This is the Class Node with a constructor that contains data variable to type data and left, right pointers.
|
||||
class Node:
|
||||
"""
|
||||
This is the Class Node with a constructor that contains data variable to type data
|
||||
and left, right pointers.
|
||||
"""
|
||||
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
self.left = None
|
||||
|
@ -16,8 +16,8 @@ class SegmentTree:
|
||||
def right(self, idx):
|
||||
return idx * 2 + 1
|
||||
|
||||
def build(self, idx, l, r, A):
|
||||
if l == r:
|
||||
def build(self, idx, l, r, A): # noqa: E741
|
||||
if l == r: # noqa: E741
|
||||
self.st[idx] = A[l - 1]
|
||||
else:
|
||||
mid = (l + r) // 2
|
||||
@ -25,14 +25,16 @@ class SegmentTree:
|
||||
self.build(self.right(idx), mid + 1, r, A)
|
||||
self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
|
||||
|
||||
# update with O(lg N) (Normal segment tree without lazy update will take O(Nlg N) for each update)
|
||||
def update(
|
||||
self, idx, l, r, a, b, val
|
||||
): # update(1, 1, N, a, b, v) for update val v to [a,b]
|
||||
if self.flag[idx] == True:
|
||||
# update with O(lg N) (Normal segment tree without lazy update will take O(Nlg N)
|
||||
# for each update)
|
||||
def update(self, idx, l, r, a, b, val): # noqa: E741
|
||||
"""
|
||||
update(1, 1, N, a, b, v) for update val v to [a,b]
|
||||
"""
|
||||
if self.flag[idx] is True:
|
||||
self.st[idx] = self.lazy[idx]
|
||||
self.flag[idx] = False
|
||||
if l != r:
|
||||
if l != r: # noqa: E741
|
||||
self.lazy[self.left(idx)] = self.lazy[idx]
|
||||
self.lazy[self.right(idx)] = self.lazy[idx]
|
||||
self.flag[self.left(idx)] = True
|
||||
@ -40,9 +42,9 @@ class SegmentTree:
|
||||
|
||||
if r < a or l > b:
|
||||
return True
|
||||
if l >= a and r <= b:
|
||||
if l >= a and r <= b: # noqa: E741
|
||||
self.st[idx] = val
|
||||
if l != r:
|
||||
if l != r: # noqa: E741
|
||||
self.lazy[self.left(idx)] = val
|
||||
self.lazy[self.right(idx)] = val
|
||||
self.flag[self.left(idx)] = True
|
||||
@ -55,18 +57,21 @@ class SegmentTree:
|
||||
return True
|
||||
|
||||
# query with O(lg N)
|
||||
def query(self, idx, l, r, a, b): # query(1, 1, N, a, b) for query max of [a,b]
|
||||
if self.flag[idx] == True:
|
||||
def query(self, idx, l, r, a, b): # noqa: E741
|
||||
"""
|
||||
query(1, 1, N, a, b) for query max of [a,b]
|
||||
"""
|
||||
if self.flag[idx] is True:
|
||||
self.st[idx] = self.lazy[idx]
|
||||
self.flag[idx] = False
|
||||
if l != r:
|
||||
if l != r: # noqa: E741
|
||||
self.lazy[self.left(idx)] = self.lazy[idx]
|
||||
self.lazy[self.right(idx)] = self.lazy[idx]
|
||||
self.flag[self.left(idx)] = True
|
||||
self.flag[self.right(idx)] = True
|
||||
if r < a or l > b:
|
||||
return -math.inf
|
||||
if l >= a and r <= b:
|
||||
if l >= a and r <= b: # noqa: E741
|
||||
return self.st[idx]
|
||||
mid = (l + r) // 2
|
||||
q1 = self.query(self.left(idx), l, mid, a, b)
|
||||
|
@ -1,6 +1,7 @@
|
||||
"""
|
||||
A non-recursive Segment Tree implementation with range query and single element update,
|
||||
works virtually with any list of the same type of elements with a "commutative" combiner.
|
||||
works virtually with any list of the same type of elements with a "commutative"
|
||||
combiner.
|
||||
|
||||
Explanation:
|
||||
https://www.geeksforgeeks.org/iterative-segment-tree-range-minimum-query/
|
||||
@ -22,7 +23,8 @@ https://www.geeksforgeeks.org/segment-tree-efficient-implementation/
|
||||
>>> st.update(4, 1)
|
||||
>>> st.query(3, 4)
|
||||
0
|
||||
>>> st = SegmentTree([[1, 2, 3], [3, 2, 1], [1, 1, 1]], lambda a, b: [a[i] + b[i] for i in range(len(a))])
|
||||
>>> st = SegmentTree([[1, 2, 3], [3, 2, 1], [1, 1, 1]], lambda a, b: [a[i] + b[i] for i
|
||||
... in range(len(a))])
|
||||
>>> st.query(0, 1)
|
||||
[4, 4, 4]
|
||||
>>> st.query(1, 2)
|
||||
@ -47,7 +49,8 @@ class SegmentTree:
|
||||
|
||||
>>> SegmentTree(['a', 'b', 'c'], lambda a, b: '{}{}'.format(a, b)).query(0, 2)
|
||||
'abc'
|
||||
>>> SegmentTree([(1, 2), (2, 3), (3, 4)], lambda a, b: (a[0] + b[0], a[1] + b[1])).query(0, 2)
|
||||
>>> SegmentTree([(1, 2), (2, 3), (3, 4)],
|
||||
... lambda a, b: (a[0] + b[0], a[1] + b[1])).query(0, 2)
|
||||
(6, 9)
|
||||
"""
|
||||
self.N = len(arr)
|
||||
@ -78,7 +81,7 @@ class SegmentTree:
|
||||
p = p // 2
|
||||
self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1])
|
||||
|
||||
def query(self, l: int, r: int) -> T:
|
||||
def query(self, l: int, r: int) -> T: # noqa: E741
|
||||
"""
|
||||
Get range query value in log(N) time
|
||||
:param l: left element index
|
||||
@ -95,9 +98,9 @@ class SegmentTree:
|
||||
>>> st.query(2, 3)
|
||||
7
|
||||
"""
|
||||
l, r = l + self.N, r + self.N
|
||||
l, r = l + self.N, r + self.N # noqa: E741
|
||||
res = None
|
||||
while l <= r:
|
||||
while l <= r: # noqa: E741
|
||||
if l % 2 == 1:
|
||||
res = self.st[l] if res is None else self.fn(res, self.st[l])
|
||||
if r % 2 == 0:
|
||||
|
@ -15,8 +15,8 @@ class SegmentTree:
|
||||
def right(self, idx):
|
||||
return idx * 2 + 1
|
||||
|
||||
def build(self, idx, l, r):
|
||||
if l == r:
|
||||
def build(self, idx, l, r): # noqa: E741
|
||||
if l == r: # noqa: E741
|
||||
self.st[idx] = A[l]
|
||||
else:
|
||||
mid = (l + r) // 2
|
||||
@ -27,12 +27,13 @@ class SegmentTree:
|
||||
def update(self, a, b, val):
|
||||
return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)
|
||||
|
||||
def update_recursive(
|
||||
self, idx, l, r, a, b, val
|
||||
): # update(1, 1, N, a, b, v) for update val v to [a,b]
|
||||
def update_recursive(self, idx, l, r, a, b, val): # noqa: E741
|
||||
"""
|
||||
update(1, 1, N, a, b, v) for update val v to [a,b]
|
||||
"""
|
||||
if r < a or l > b:
|
||||
return True
|
||||
if l == r:
|
||||
if l == r: # noqa: E741
|
||||
self.st[idx] = val
|
||||
return True
|
||||
mid = (l + r) // 2
|
||||
@ -44,12 +45,13 @@ class SegmentTree:
|
||||
def query(self, a, b):
|
||||
return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)
|
||||
|
||||
def query_recursive(
|
||||
self, idx, l, r, a, b
|
||||
): # query(1, 1, N, a, b) for query max of [a,b]
|
||||
def query_recursive(self, idx, l, r, a, b): # noqa: E741
|
||||
"""
|
||||
query(1, 1, N, a, b) for query max of [a,b]
|
||||
"""
|
||||
if r < a or l > b:
|
||||
return -math.inf
|
||||
if l >= a and r <= b:
|
||||
if l >= a and r <= b: # noqa: E741
|
||||
return self.st[idx]
|
||||
mid = (l + r) // 2
|
||||
q1 = self.query_recursive(self.left(idx), l, mid, a, b)
|
||||
|
@ -1,3 +1,5 @@
|
||||
# flake8: noqa
|
||||
|
||||
from random import random
|
||||
from typing import Tuple
|
||||
|
||||
@ -161,7 +163,8 @@ def main():
|
||||
"""After each command, program prints treap"""
|
||||
root = None
|
||||
print(
|
||||
"enter numbers to create a tree, + value to add value into treap, - value to erase all nodes with value. 'q' to quit. "
|
||||
"enter numbers to create a tree, + value to add value into treap, "
|
||||
"- value to erase all nodes with value. 'q' to quit. "
|
||||
)
|
||||
|
||||
args = input()
|
||||
|
@ -5,7 +5,7 @@ from hash_table import HashTable
|
||||
|
||||
class QuadraticProbing(HashTable):
|
||||
"""
|
||||
Basic Hash Table example with open addressing using Quadratic Probing
|
||||
Basic Hash Table example with open addressing using Quadratic Probing
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
@ -1,3 +1,5 @@
|
||||
# flake8: noqa
|
||||
|
||||
"""
|
||||
Binomial Heap
|
||||
Reference: Advanced Data Structures, Peter Brass
|
||||
|
@ -66,7 +66,7 @@ class MinHeap:
|
||||
# this is min-heapify method
|
||||
def sift_down(self, idx, array):
|
||||
while True:
|
||||
l = self.get_left_child_idx(idx)
|
||||
l = self.get_left_child_idx(idx) # noqa: E741
|
||||
r = self.get_right_child_idx(idx)
|
||||
|
||||
smallest = idx
|
||||
@ -132,7 +132,7 @@ class MinHeap:
|
||||
self.sift_up(self.idx_of_element[node])
|
||||
|
||||
|
||||
## USAGE
|
||||
# USAGE
|
||||
|
||||
r = Node("R", -1)
|
||||
b = Node("B", 6)
|
||||
|
@ -1,5 +1,5 @@
|
||||
"""
|
||||
Implementing Deque using DoublyLinkedList ...
|
||||
Implementing Deque using DoublyLinkedList ...
|
||||
Operations:
|
||||
1. insertion in the front -> O(1)
|
||||
2. insertion in the end -> O(1)
|
||||
@ -61,7 +61,7 @@ class _DoublyLinkedBase:
|
||||
|
||||
class LinkedDeque(_DoublyLinkedBase):
|
||||
def first(self):
|
||||
""" return first element
|
||||
""" return first element
|
||||
>>> d = LinkedDeque()
|
||||
>>> d.add_first('A').first()
|
||||
'A'
|
||||
@ -84,7 +84,7 @@ class LinkedDeque(_DoublyLinkedBase):
|
||||
raise Exception("List is empty")
|
||||
return self._trailer._prev._data
|
||||
|
||||
### DEque Insert Operations (At the front, At the end) ###
|
||||
# DEque Insert Operations (At the front, At the end)
|
||||
|
||||
def add_first(self, element):
|
||||
""" insertion in the front
|
||||
@ -100,7 +100,7 @@ class LinkedDeque(_DoublyLinkedBase):
|
||||
"""
|
||||
return self._insert(self._trailer._prev, element, self._trailer)
|
||||
|
||||
### DEqueu Remove Operations (At the front, At the end) ###
|
||||
# DEqueu Remove Operations (At the front, At the end)
|
||||
|
||||
def remove_first(self):
|
||||
""" removal from the front
|
||||
|
@ -43,7 +43,7 @@ class LinkedList:
|
||||
-20
|
||||
>>> link.middle_element()
|
||||
12
|
||||
>>>
|
||||
>>>
|
||||
"""
|
||||
slow_pointer = self.head
|
||||
fast_pointer = self.head
|
||||
|
@ -22,7 +22,7 @@ import operator as op
|
||||
|
||||
def Solve(Postfix):
|
||||
Stack = []
|
||||
Div = lambda x, y: int(x / y) # integer division operation
|
||||
Div = lambda x, y: int(x / y) # noqa: E731 integer division operation
|
||||
Opr = {
|
||||
"^": op.pow,
|
||||
"*": op.mul,
|
||||
@ -38,29 +38,27 @@ def Solve(Postfix):
|
||||
for x in Postfix:
|
||||
if x.isdigit(): # if x in digit
|
||||
Stack.append(x) # append x to stack
|
||||
print(
|
||||
x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(Stack), sep=" | "
|
||||
) # output in tabular format
|
||||
# output in tabular format
|
||||
print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(Stack), sep=" | ")
|
||||
else:
|
||||
B = Stack.pop() # pop stack
|
||||
print(
|
||||
"".rjust(8), ("pop(" + B + ")").ljust(12), ",".join(Stack), sep=" | "
|
||||
) # output in tabular format
|
||||
# output in tabular format
|
||||
print("".rjust(8), ("pop(" + B + ")").ljust(12), ",".join(Stack), sep=" | ")
|
||||
|
||||
A = Stack.pop() # pop stack
|
||||
print(
|
||||
"".rjust(8), ("pop(" + A + ")").ljust(12), ",".join(Stack), sep=" | "
|
||||
) # output in tabular format
|
||||
# output in tabular format
|
||||
print("".rjust(8), ("pop(" + A + ")").ljust(12), ",".join(Stack), sep=" | ")
|
||||
|
||||
Stack.append(
|
||||
str(Opr[x](int(A), int(B)))
|
||||
) # evaluate the 2 values popped from stack & push result to stack
|
||||
# output in tabular format
|
||||
print(
|
||||
x.rjust(8),
|
||||
("push(" + A + x + B + ")").ljust(12),
|
||||
",".join(Stack),
|
||||
sep=" | ",
|
||||
) # output in tabular format
|
||||
)
|
||||
|
||||
return int(Stack[0])
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""
|
||||
A Trie/Prefix Tree is a kind of search tree used to provide quick lookup
|
||||
of words/patterns in a set of words. A basic Trie however has O(n^2) space complexity
|
||||
making it impractical in practice. It however provides O(max(search_string, length of longest word))
|
||||
making it impractical in practice. It however provides O(max(search_string, length of longest word))
|
||||
lookup time making it an optimal approach when space is not an issue.
|
||||
"""
|
||||
|
||||
|
@ -54,9 +54,9 @@ class Burkes:
|
||||
current_error = greyscale + self.error_table[x][y] - 255
|
||||
"""
|
||||
Burkes error propagation (`*` is current pixel):
|
||||
|
||||
* 8/32 4/32
|
||||
2/32 4/32 8/32 4/32 2/32
|
||||
|
||||
* 8/32 4/32
|
||||
2/32 4/32 8/32 4/32 2/32
|
||||
"""
|
||||
self.error_table[y][x + 1] += int(8 / 32 * current_error)
|
||||
self.error_table[y][x + 2] += int(4 / 32 * current_error)
|
||||
|
@ -29,8 +29,8 @@ def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255):
|
||||
dst = np.zeros((image_row, image_col))
|
||||
|
||||
"""
|
||||
Non-maximum suppression. If the edge strength of the current pixel is the largest compared to the other pixels
|
||||
in the mask with the same direction, the value will be preserved. Otherwise, the value will be suppressed.
|
||||
Non-maximum suppression. If the edge strength of the current pixel is the largest compared to the other pixels
|
||||
in the mask with the same direction, the value will be preserved. Otherwise, the value will be suppressed.
|
||||
"""
|
||||
for row in range(1, image_row - 1):
|
||||
for col in range(1, image_col - 1):
|
||||
|
@ -6,26 +6,28 @@
|
||||
# Imports
|
||||
import numpy as np
|
||||
|
||||
|
||||
# Class implemented to calculus the index
|
||||
class IndexCalculation:
|
||||
"""
|
||||
# Class Summary
|
||||
This algorithm consists in calculating vegetation indices, these indices
|
||||
can be used for precision agriculture for example (or remote sensing). There are
|
||||
functions to define the data and to calculate the implemented indices.
|
||||
This algorithm consists in calculating vegetation indices, these
|
||||
indices can be used for precision agriculture for example (or remote
|
||||
sensing). There are functions to define the data and to calculate the
|
||||
implemented indices.
|
||||
|
||||
# Vegetation index
|
||||
https://en.wikipedia.org/wiki/Vegetation_Index
|
||||
A Vegetation Index (VI) is a spectral transformation of two or more bands designed
|
||||
to enhance the contribution of vegetation properties and allow reliable spatial and
|
||||
temporal inter-comparisons of terrestrial photosynthetic activity and canopy
|
||||
structural variations
|
||||
|
||||
A Vegetation Index (VI) is a spectral transformation of two or more bands
|
||||
designed to enhance the contribution of vegetation properties and allow
|
||||
reliable spatial and temporal inter-comparisons of terrestrial
|
||||
photosynthetic activity and canopy structural variations
|
||||
|
||||
# Information about channels (Wavelength range for each)
|
||||
* nir - near-infrared
|
||||
https://www.malvernpanalytical.com/br/products/technology/near-infrared-spectroscopy
|
||||
Wavelength Range 700 nm to 2500 nm
|
||||
* Red Edge
|
||||
* Red Edge
|
||||
https://en.wikipedia.org/wiki/Red_edge
|
||||
Wavelength Range 680 nm to 730 nm
|
||||
* red
|
||||
@ -38,7 +40,7 @@ class IndexCalculation:
|
||||
https://en.wikipedia.org/wiki/Color
|
||||
Wavelength Range 520 nm to 560 nm
|
||||
|
||||
|
||||
|
||||
# Implemented index list
|
||||
#"abbreviationOfIndexName" -- list of channels used
|
||||
|
||||
@ -84,17 +86,19 @@ class IndexCalculation:
|
||||
#"NDRE" -- redEdge, nir
|
||||
|
||||
#list of all index implemented
|
||||
#allIndex = ["ARVI2", "CCCI", "CVI", "GLI", "NDVI", "BNDVI", "redEdgeNDVI", "GNDVI",
|
||||
"GBNDVI", "GRNDVI", "RBNDVI", "PNDVI", "ATSAVI", "BWDRVI", "CIgreen",
|
||||
"CIrededge", "CI", "CTVI", "GDVI", "EVI", "GEMI", "GOSAVI", "GSAVI",
|
||||
"Hue", "IVI", "IPVI", "I", "RVI", "MRVI", "MSAVI", "NormG", "NormNIR",
|
||||
"NormR", "NGRDI", "RI", "S", "IF", "DVI", "TVI", "NDRE"]
|
||||
#allIndex = ["ARVI2", "CCCI", "CVI", "GLI", "NDVI", "BNDVI", "redEdgeNDVI",
|
||||
"GNDVI", "GBNDVI", "GRNDVI", "RBNDVI", "PNDVI", "ATSAVI",
|
||||
"BWDRVI", "CIgreen", "CIrededge", "CI", "CTVI", "GDVI", "EVI",
|
||||
"GEMI", "GOSAVI", "GSAVI", "Hue", "IVI", "IPVI", "I", "RVI",
|
||||
"MRVI", "MSAVI", "NormG", "NormNIR", "NormR", "NGRDI", "RI",
|
||||
"S", "IF", "DVI", "TVI", "NDRE"]
|
||||
|
||||
#list of index with not blue channel
|
||||
#notBlueIndex = ["ARVI2", "CCCI", "CVI", "NDVI", "redEdgeNDVI", "GNDVI", "GRNDVI",
|
||||
"ATSAVI", "CIgreen", "CIrededge", "CTVI", "GDVI", "GEMI", "GOSAVI",
|
||||
"GSAVI", "IVI", "IPVI", "RVI", "MRVI", "MSAVI", "NormG", "NormNIR",
|
||||
"NormR", "NGRDI", "RI", "DVI", "TVI", "NDRE"]
|
||||
#notBlueIndex = ["ARVI2", "CCCI", "CVI", "NDVI", "redEdgeNDVI", "GNDVI",
|
||||
"GRNDVI", "ATSAVI", "CIgreen", "CIrededge", "CTVI", "GDVI",
|
||||
"GEMI", "GOSAVI", "GSAVI", "IVI", "IPVI", "RVI", "MRVI",
|
||||
"MSAVI", "NormG", "NormNIR", "NormR", "NGRDI", "RI", "DVI",
|
||||
"TVI", "NDRE"]
|
||||
|
||||
#list of index just with RGB channels
|
||||
#RGBIndex = ["GLI", "CI", "Hue", "I", "NGRDI", "RI", "S", "IF"]
|
||||
@ -121,8 +125,8 @@ class IndexCalculation:
|
||||
self, index="", red=None, green=None, blue=None, redEdge=None, nir=None
|
||||
):
|
||||
"""
|
||||
performs the calculation of the index with the values instantiated in the class
|
||||
:str index: abbreviation of index name to perform
|
||||
performs the calculation of the index with the values instantiated in the class
|
||||
:str index: abbreviation of index name to perform
|
||||
"""
|
||||
self.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir)
|
||||
funcs = {
|
||||
@ -213,8 +217,8 @@ class IndexCalculation:
|
||||
|
||||
def NDVI(self):
|
||||
"""
|
||||
Normalized Difference self.nir/self.red Normalized Difference Vegetation Index,
|
||||
Calibrated NDVI - CDVI
|
||||
Normalized Difference self.nir/self.red Normalized Difference Vegetation
|
||||
Index, Calibrated NDVI - CDVI
|
||||
https://www.indexdatabase.de/db/i-single.php?id=58
|
||||
:return: index
|
||||
"""
|
||||
@ -222,7 +226,7 @@ class IndexCalculation:
|
||||
|
||||
def BNDVI(self):
|
||||
"""
|
||||
Normalized Difference self.nir/self.blue self.blue-normalized difference
|
||||
Normalized Difference self.nir/self.blue self.blue-normalized difference
|
||||
vegetation index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=135
|
||||
:return: index
|
||||
@ -410,7 +414,7 @@ class IndexCalculation:
|
||||
"""
|
||||
return (self.nir / ((self.nir + self.red) / 2)) * (self.NDVI() + 1)
|
||||
|
||||
def I(self):
|
||||
def I(self): # noqa: E741,E743
|
||||
"""
|
||||
Intensity
|
||||
https://www.indexdatabase.de/db/i-single.php?id=36
|
||||
@ -471,8 +475,9 @@ class IndexCalculation:
|
||||
|
||||
def NGRDI(self):
|
||||
"""
|
||||
Normalized Difference self.green/self.red Normalized self.green self.red
|
||||
difference index, Visible Atmospherically Resistant Indices self.green (VIself.green)
|
||||
Normalized Difference self.green/self.red Normalized self.green self.red
|
||||
difference index, Visible Atmospherically Resistant Indices self.green
|
||||
(VIself.green)
|
||||
https://www.indexdatabase.de/db/i-single.php?id=390
|
||||
:return: index
|
||||
"""
|
||||
@ -506,7 +511,7 @@ class IndexCalculation:
|
||||
|
||||
def DVI(self):
|
||||
"""
|
||||
Simple Ratio self.nir/self.red Difference Vegetation Index, Vegetation Index
|
||||
Simple Ratio self.nir/self.red Difference Vegetation Index, Vegetation Index
|
||||
Number (VIN)
|
||||
https://www.indexdatabase.de/db/i-single.php?id=12
|
||||
:return: index
|
||||
@ -535,7 +540,7 @@ nir = np.ones((1000,1000, 1),dtype="float64") * 52200
|
||||
|
||||
# Examples of how to use the class
|
||||
|
||||
# instantiating the class
|
||||
# instantiating the class
|
||||
cl = IndexCalculation()
|
||||
|
||||
# instantiating the class with the values
|
||||
@ -556,9 +561,12 @@ indexValue_form2 = cl.CCCI()
|
||||
indexValue_form3 = cl.calculation("CCCI", red=red, green=green, blue=blue,
|
||||
redEdge=redEdge, nir=nir).astype(np.float64)
|
||||
|
||||
print("Form 1: "+np.array2string(indexValue_form1, precision=20, separator=', ', floatmode='maxprec_equal'))
|
||||
print("Form 2: "+np.array2string(indexValue_form2, precision=20, separator=', ', floatmode='maxprec_equal'))
|
||||
print("Form 3: "+np.array2string(indexValue_form3, precision=20, separator=', ', floatmode='maxprec_equal'))
|
||||
print("Form 1: "+np.array2string(indexValue_form1, precision=20, separator=', ',
|
||||
floatmode='maxprec_equal'))
|
||||
print("Form 2: "+np.array2string(indexValue_form2, precision=20, separator=', ',
|
||||
floatmode='maxprec_equal'))
|
||||
print("Form 3: "+np.array2string(indexValue_form3, precision=20, separator=', ',
|
||||
floatmode='maxprec_equal'))
|
||||
|
||||
# A list of examples results for different type of data at NDVI
|
||||
# float16 -> 0.31567383 #NDVI (red = 50, nir = 100)
|
||||
|
@ -1,10 +1,10 @@
|
||||
"""
|
||||
Given a array of length n, max_subarray_sum() finds
|
||||
"""
|
||||
Given a array of length n, max_subarray_sum() finds
|
||||
the maximum of sum of contiguous sub-array using divide and conquer method.
|
||||
|
||||
Time complexity : O(n log n)
|
||||
|
||||
Ref : INTRODUCTION TO ALGORITHMS THIRD EDITION
|
||||
Ref : INTRODUCTION TO ALGORITHMS THIRD EDITION
|
||||
(section : 4, sub-section : 4.1, page : 70)
|
||||
|
||||
"""
|
||||
@ -13,10 +13,10 @@ Ref : INTRODUCTION TO ALGORITHMS THIRD EDITION
|
||||
def max_sum_from_start(array):
|
||||
""" This function finds the maximum contiguous sum of array from 0 index
|
||||
|
||||
Parameters :
|
||||
Parameters :
|
||||
array (list[int]) : given array
|
||||
|
||||
Returns :
|
||||
|
||||
Returns :
|
||||
max_sum (int) : maximum contiguous sum of array from 0 index
|
||||
|
||||
"""
|
||||
@ -32,10 +32,10 @@ def max_sum_from_start(array):
|
||||
def max_cross_array_sum(array, left, mid, right):
|
||||
""" This function finds the maximum contiguous sum of left and right arrays
|
||||
|
||||
Parameters :
|
||||
array, left, mid, right (list[int], int, int, int)
|
||||
|
||||
Returns :
|
||||
Parameters :
|
||||
array, left, mid, right (list[int], int, int, int)
|
||||
|
||||
Returns :
|
||||
(int) : maximum of sum of contiguous sum of left and right arrays
|
||||
|
||||
"""
|
||||
@ -48,11 +48,11 @@ def max_cross_array_sum(array, left, mid, right):
|
||||
def max_subarray_sum(array, left, right):
|
||||
""" Maximum contiguous sub-array sum, using divide and conquer method
|
||||
|
||||
Parameters :
|
||||
array, left, right (list[int], int, int) :
|
||||
Parameters :
|
||||
array, left, right (list[int], int, int) :
|
||||
given array, current left index and current right index
|
||||
|
||||
Returns :
|
||||
|
||||
Returns :
|
||||
int : maximum of sum of contiguous sub-array
|
||||
|
||||
"""
|
||||
|
@ -1,5 +1,5 @@
|
||||
def merge(a, b, m, e):
|
||||
l = a[b : m + 1]
|
||||
l = a[b : m + 1] # noqa: E741
|
||||
r = a[m + 1 : e + 1]
|
||||
k = b
|
||||
i = 0
|
||||
|
@ -26,9 +26,9 @@ def factorial(num):
|
||||
|
||||
# factorial of num
|
||||
# uncomment the following to see how recalculations are avoided
|
||||
##result=[-1]*10
|
||||
##result[0]=result[1]=1
|
||||
##print(factorial(5))
|
||||
# result=[-1]*10
|
||||
# result[0]=result[1]=1
|
||||
# print(factorial(5))
|
||||
# print(factorial(3))
|
||||
# print(factorial(7))
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
"""
|
||||
Author : Syed Faizan (3rd Year Student IIIT Pune)
|
||||
Author : Syed Faizan (3rd Year Student IIIT Pune)
|
||||
github : faizan2700
|
||||
You are given a bitmask m and you want to efficiently iterate through all of
|
||||
its submasks. The mask s is submask of m if only bits that were included in
|
||||
@ -33,7 +33,7 @@ def list_of_submasks(mask: int) -> List[int]:
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AssertionError: mask needs to be positive integer, your input 0
|
||||
|
||||
|
||||
"""
|
||||
|
||||
fmt = "mask needs to be positive integer, your input {}"
|
||||
|
@ -76,7 +76,7 @@ if __name__ == "__main__":
|
||||
expected_subseq = "GTAB"
|
||||
|
||||
ln, subseq = longest_common_subsequence(a, b)
|
||||
## print("len =", ln, ", sub-sequence =", subseq)
|
||||
print("len =", ln, ", sub-sequence =", subseq)
|
||||
import doctest
|
||||
|
||||
doctest.testmod()
|
||||
|
@ -1,11 +1,14 @@
|
||||
"""
|
||||
Author : Mehdi ALAOUI
|
||||
|
||||
This is a pure Python implementation of Dynamic Programming solution to the longest increasing subsequence of a given sequence.
|
||||
This is a pure Python implementation of Dynamic Programming solution to the longest
|
||||
increasing subsequence of a given sequence.
|
||||
|
||||
The problem is :
|
||||
Given an array, to find the longest and increasing sub-array in that given array and return it.
|
||||
Example: [10, 22, 9, 33, 21, 50, 41, 60, 80] as input will return [10, 22, 33, 41, 60, 80] as output
|
||||
Given an array, to find the longest and increasing sub-array in that given array and
|
||||
return it.
|
||||
Example: [10, 22, 9, 33, 21, 50, 41, 60, 80] as input will return
|
||||
[10, 22, 33, 41, 60, 80] as output
|
||||
"""
|
||||
from typing import List
|
||||
|
||||
@ -21,11 +24,13 @@ def longest_subsequence(array: List[int]) -> List[int]: # This function is recu
|
||||
[8]
|
||||
>>> longest_subsequence([1, 1, 1])
|
||||
[1, 1, 1]
|
||||
>>> longest_subsequence([])
|
||||
[]
|
||||
"""
|
||||
array_length = len(array)
|
||||
if (
|
||||
array_length <= 1
|
||||
): # If the array contains only one element, we return it (it's the stop condition of recursion)
|
||||
# If the array contains only one element, we return it (it's the stop condition of
|
||||
# recursion)
|
||||
if array_length <= 1:
|
||||
return array
|
||||
# Else
|
||||
pivot = array[0]
|
||||
|
@ -1,19 +1,19 @@
|
||||
#############################
|
||||
# Author: Aravind Kashyap
|
||||
# File: lis.py
|
||||
# comments: This programme outputs the Longest Strictly Increasing Subsequence in O(NLogN)
|
||||
# Where N is the Number of elements in the list
|
||||
# comments: This programme outputs the Longest Strictly Increasing Subsequence in
|
||||
# O(NLogN) Where N is the Number of elements in the list
|
||||
#############################
|
||||
from typing import List
|
||||
|
||||
|
||||
def CeilIndex(v, l, r, key):
|
||||
def CeilIndex(v, l, r, key): # noqa: E741
|
||||
while r - l > 1:
|
||||
m = (l + r) // 2
|
||||
if v[m] >= key:
|
||||
r = m
|
||||
else:
|
||||
l = m
|
||||
l = m # noqa: E741
|
||||
return r
|
||||
|
||||
|
||||
@ -23,7 +23,8 @@ def LongestIncreasingSubsequenceLength(v: List[int]) -> int:
|
||||
6
|
||||
>>> LongestIncreasingSubsequenceLength([])
|
||||
0
|
||||
>>> LongestIncreasingSubsequenceLength([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15])
|
||||
>>> LongestIncreasingSubsequenceLength([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3,
|
||||
... 11, 7, 15])
|
||||
6
|
||||
>>> LongestIncreasingSubsequenceLength([5, 4, 3, 2, 1])
|
||||
1
|
||||
|
@ -44,12 +44,12 @@ def max_sub_array(nums: List[int]) -> int:
|
||||
|
||||
>>> max_sub_array([-2, 1, -3, 4, -1, 2, 1, -5, 4])
|
||||
6
|
||||
|
||||
|
||||
An empty (sub)array has sum 0.
|
||||
>>> max_sub_array([])
|
||||
0
|
||||
|
||||
If all elements are negative, the largest subarray would be the empty array,
|
||||
|
||||
If all elements are negative, the largest subarray would be the empty array,
|
||||
having the sum 0.
|
||||
>>> max_sub_array([-1, -2, -3])
|
||||
0
|
||||
|
@ -23,7 +23,7 @@ def findMin(arr):
|
||||
dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
|
||||
|
||||
for j in range(int(s / 2), -1, -1):
|
||||
if dp[n][j] == True:
|
||||
if dp[n][j] is True:
|
||||
diff = s - 2 * j
|
||||
break
|
||||
|
||||
|
@ -40,7 +40,7 @@ class Node:
|
||||
def print_binary_search_tree(root, key, i, j, parent, is_left):
|
||||
"""
|
||||
Recursive function to print a BST from a root table.
|
||||
|
||||
|
||||
>>> key = [3, 8, 9, 10, 17, 21]
|
||||
>>> root = [[0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 3], [0, 0, 2, 3, 3, 3], \
|
||||
[0, 0, 0, 3, 3, 3], [0, 0, 0, 0, 4, 5], [0, 0, 0, 0, 0, 5]]
|
||||
@ -73,7 +73,7 @@ def find_optimal_binary_search_tree(nodes):
|
||||
The dynamic programming algorithm below runs in O(n^2) time.
|
||||
Implemented from CLRS (Introduction to Algorithms) book.
|
||||
https://en.wikipedia.org/wiki/Introduction_to_Algorithms
|
||||
|
||||
|
||||
>>> find_optimal_binary_search_tree([Node(12, 8), Node(10, 34), Node(20, 50), \
|
||||
Node(42, 3), Node(25, 40), Node(37, 30)])
|
||||
Binary search tree nodes:
|
||||
@ -104,14 +104,15 @@ def find_optimal_binary_search_tree(nodes):
|
||||
# This 2D array stores the overall tree cost (which's as minimized as possible);
|
||||
# for a single key, cost is equal to frequency of the key.
|
||||
dp = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)]
|
||||
# sum[i][j] stores the sum of key frequencies between i and j inclusive in nodes array
|
||||
# sum[i][j] stores the sum of key frequencies between i and j inclusive in nodes
|
||||
# array
|
||||
sum = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)]
|
||||
# stores tree roots that will be used later for constructing binary search tree
|
||||
root = [[i if i == j else 0 for j in range(n)] for i in range(n)]
|
||||
|
||||
for l in range(2, n + 1): # l is an interval length
|
||||
for i in range(n - l + 1):
|
||||
j = i + l - 1
|
||||
for interval_length in range(2, n + 1):
|
||||
for i in range(n - interval_length + 1):
|
||||
j = i + interval_length - 1
|
||||
|
||||
dp[i][j] = sys.maxsize # set the value to "infinity"
|
||||
sum[i][j] = sum[i][j - 1] + freqs[j]
|
||||
|
@ -1,12 +1,15 @@
|
||||
# Python program to print all subset combinations of n element in given set of r element.
|
||||
# arr[] ---> Input Array
|
||||
# data[] ---> Temporary array to store current combination
|
||||
# start & end ---> Staring and Ending indexes in arr[]
|
||||
# index ---> Current index in data[]
|
||||
# r ---> Size of a combination to be printed
|
||||
|
||||
|
||||
def combination_util(arr, n, r, index, data, i):
|
||||
# Current combination is ready to be printed,
|
||||
# print it
|
||||
"""
|
||||
Current combination is ready to be printed, print it
|
||||
arr[] ---> Input Array
|
||||
data[] ---> Temporary array to store current combination
|
||||
start & end ---> Staring and Ending indexes in arr[]
|
||||
index ---> Current index in data[]
|
||||
r ---> Size of a combination to be printed
|
||||
"""
|
||||
if index == r:
|
||||
for j in range(r):
|
||||
print(data[j], end=" ")
|
||||
|
@ -15,8 +15,8 @@ def lamberts_ellipsoidal_distance(
|
||||
|
||||
Representing the earth as an ellipsoid allows us to approximate distances between points
|
||||
on the surface much better than a sphere. Ellipsoidal formulas treat the Earth as an
|
||||
oblate ellipsoid which means accounting for the flattening that happens at the North
|
||||
and South poles. Lambert's formulae provide accuracy on the order of 10 meteres over
|
||||
oblate ellipsoid which means accounting for the flattening that happens at the North
|
||||
and South poles. Lambert's formulae provide accuracy on the order of 10 meteres over
|
||||
thousands of kilometeres. Other methods can provide millimeter-level accuracy but this
|
||||
is a simpler method to calculate long range distances without increasing computational
|
||||
intensity.
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Finding Articulation Points in Undirected Graph
|
||||
def computeAP(l):
|
||||
def computeAP(l): # noqa: E741
|
||||
n = len(l)
|
||||
outEdgeCount = 0
|
||||
low = [0] * n
|
||||
@ -36,12 +36,12 @@ def computeAP(l):
|
||||
isArt[i] = outEdgeCount > 1
|
||||
|
||||
for x in range(len(isArt)):
|
||||
if isArt[x] == True:
|
||||
if isArt[x] is True:
|
||||
print(x)
|
||||
|
||||
|
||||
# Adjacency list of graph
|
||||
l = {
|
||||
data = {
|
||||
0: [1, 2],
|
||||
1: [0, 2],
|
||||
2: [0, 1, 3, 5],
|
||||
@ -52,4 +52,4 @@ l = {
|
||||
7: [6, 8],
|
||||
8: [5, 7],
|
||||
}
|
||||
computeAP(l)
|
||||
computeAP(data)
|
||||
|
@ -1,3 +1,6 @@
|
||||
from collections import deque
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Accept No. of Nodes and edges
|
||||
n, m = map(int, input().split(" "))
|
||||
@ -72,7 +75,6 @@ def dfs(G, s):
|
||||
Q - Traversal Stack
|
||||
--------------------------------------------------------------------------------
|
||||
"""
|
||||
from collections import deque
|
||||
|
||||
|
||||
def bfs(G, s):
|
||||
@ -125,7 +127,6 @@ def dijk(G, s):
|
||||
Topological Sort
|
||||
--------------------------------------------------------------------------------
|
||||
"""
|
||||
from collections import deque
|
||||
|
||||
|
||||
def topo(G, ind=None, Q=None):
|
||||
@ -235,10 +236,10 @@ def prim(G, s):
|
||||
|
||||
def edglist():
|
||||
n, m = map(int, input().split(" "))
|
||||
l = []
|
||||
edges = []
|
||||
for i in range(m):
|
||||
l.append(map(int, input().split(" ")))
|
||||
return l, n
|
||||
edges.append(map(int, input().split(" ")))
|
||||
return edges, n
|
||||
|
||||
|
||||
"""
|
||||
|
@ -9,7 +9,7 @@ def printDist(dist, V):
|
||||
|
||||
def BellmanFord(graph: List[Dict[str, int]], V: int, E: int, src: int) -> int:
|
||||
"""
|
||||
Returns shortest paths from a vertex src to all
|
||||
Returns shortest paths from a vertex src to all
|
||||
other vertices.
|
||||
"""
|
||||
mdist = [float("inf") for i in range(V)]
|
||||
|
@ -1,6 +1,8 @@
|
||||
"""Breath First Search (BFS) can be used when finding the shortest path
|
||||
"""Breath First Search (BFS) can be used when finding the shortest path
|
||||
from a given source node to a target node in an unweighted graph.
|
||||
"""
|
||||
from typing import Dict
|
||||
|
||||
graph = {
|
||||
"A": ["B", "C", "E"],
|
||||
"B": ["A", "D", "E"],
|
||||
@ -11,8 +13,6 @@ graph = {
|
||||
"G": ["C"],
|
||||
}
|
||||
|
||||
from typing import Dict
|
||||
|
||||
|
||||
class Graph:
|
||||
def __init__(self, graph: Dict[str, str], source_vertex: str) -> None:
|
||||
@ -46,8 +46,9 @@ class Graph:
|
||||
def shortest_path(self, target_vertex: str) -> str:
|
||||
"""This shortest path function returns a string, describing the result:
|
||||
1.) No path is found. The string is a human readable message to indicate this.
|
||||
2.) The shortest path is found. The string is in the form `v1(->v2->v3->...->vn)`,
|
||||
where v1 is the source vertex and vn is the target vertex, if it exists separately.
|
||||
2.) The shortest path is found. The string is in the form
|
||||
`v1(->v2->v3->...->vn)`, where v1 is the source vertex and vn is the target
|
||||
vertex, if it exists separately.
|
||||
|
||||
>>> g = Graph(graph, "G")
|
||||
>>> g.breath_first_search()
|
||||
|
@ -1,21 +1,22 @@
|
||||
# Check whether Graph is Bipartite or Not using BFS
|
||||
|
||||
|
||||
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
|
||||
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
|
||||
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
|
||||
# or u belongs to V and v to U. We can also say that there is no edge that connects
|
||||
# vertices of same set.
|
||||
def checkBipartite(l):
|
||||
def checkBipartite(graph):
|
||||
queue = []
|
||||
visited = [False] * len(l)
|
||||
color = [-1] * len(l)
|
||||
visited = [False] * len(graph)
|
||||
color = [-1] * len(graph)
|
||||
|
||||
def bfs():
|
||||
while queue:
|
||||
u = queue.pop(0)
|
||||
visited[u] = True
|
||||
|
||||
for neighbour in l[u]:
|
||||
for neighbour in graph[u]:
|
||||
|
||||
if neighbour == u:
|
||||
return False
|
||||
@ -29,16 +30,16 @@ def checkBipartite(l):
|
||||
|
||||
return True
|
||||
|
||||
for i in range(len(l)):
|
||||
for i in range(len(graph)):
|
||||
if not visited[i]:
|
||||
queue.append(i)
|
||||
color[i] = 0
|
||||
if bfs() == False:
|
||||
if bfs() is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# Adjacency List of graph
|
||||
l = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}
|
||||
print(checkBipartite(l))
|
||||
if __name__ == "__main__":
|
||||
# Adjacency List of graph
|
||||
print(checkBipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}))
|
||||
|
@ -1,27 +1,28 @@
|
||||
# Check whether Graph is Bipartite or Not using DFS
|
||||
|
||||
|
||||
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
|
||||
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
|
||||
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
|
||||
# or u belongs to V and v to U. We can also say that there is no edge that connects
|
||||
# vertices of same set.
|
||||
def check_bipartite_dfs(l):
|
||||
visited = [False] * len(l)
|
||||
color = [-1] * len(l)
|
||||
def check_bipartite_dfs(graph):
|
||||
visited = [False] * len(graph)
|
||||
color = [-1] * len(graph)
|
||||
|
||||
def dfs(v, c):
|
||||
visited[v] = True
|
||||
color[v] = c
|
||||
for u in l[v]:
|
||||
for u in graph[v]:
|
||||
if not visited[u]:
|
||||
dfs(u, 1 - c)
|
||||
|
||||
for i in range(len(l)):
|
||||
for i in range(len(graph)):
|
||||
if not visited[i]:
|
||||
dfs(i, 0)
|
||||
|
||||
for i in range(len(l)):
|
||||
for j in l[i]:
|
||||
for i in range(len(graph)):
|
||||
for j in graph[i]:
|
||||
if color[i] == color[j]:
|
||||
return False
|
||||
|
||||
@ -29,5 +30,5 @@ def check_bipartite_dfs(l):
|
||||
|
||||
|
||||
# Adjacency list of graph
|
||||
l = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
|
||||
print(check_bipartite_dfs(l))
|
||||
graph = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
|
||||
print(check_bipartite_dfs(graph))
|
||||
|
@ -1,6 +1,6 @@
|
||||
"""The DFS function simply calls itself recursively for every unvisited child of
|
||||
its argument. We can emulate that behaviour precisely using a stack of iterators.
|
||||
Instead of recursively calling with a node, we'll push an iterator to the node's
|
||||
"""The DFS function simply calls itself recursively for every unvisited child of
|
||||
its argument. We can emulate that behaviour precisely using a stack of iterators.
|
||||
Instead of recursively calling with a node, we'll push an iterator to the node's
|
||||
children onto the iterator stack. When the iterator at the top of the stack
|
||||
terminates, we'll pop it off the stack.
|
||||
|
||||
@ -21,7 +21,7 @@ def depth_first_search(graph: Dict, start: str) -> Set[int]:
|
||||
:param graph: directed graph in dictionary format
|
||||
:param vertex: starting vectex as a string
|
||||
:returns: the trace of the search
|
||||
>>> G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"],
|
||||
>>> G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"],
|
||||
... "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"],
|
||||
... "F": ["C", "E", "G"], "G": ["F"] }
|
||||
>>> start = "A"
|
||||
|
@ -28,7 +28,7 @@ class Graph:
|
||||
|
||||
# call the recursive helper function
|
||||
for i in range(len(self.vertex)):
|
||||
if visited[i] == False:
|
||||
if visited[i] is False:
|
||||
self.DFSRec(i, visited)
|
||||
|
||||
def DFSRec(self, startVertex, visited):
|
||||
@ -39,7 +39,7 @@ class Graph:
|
||||
|
||||
# Recur for all the vertices that are adjacent to this node
|
||||
for i in self.vertex.keys():
|
||||
if visited[i] == False:
|
||||
if visited[i] is False:
|
||||
self.DFSRec(i, visited)
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
"""pseudo-code"""
|
||||
|
||||
"""
|
||||
pseudo-code
|
||||
|
||||
DIJKSTRA(graph G, start vertex s, destination vertex d):
|
||||
|
||||
//all nodes initially unexplored
|
||||
@ -30,7 +30,6 @@ only the distance between previous vertex and current vertex but the entire
|
||||
distance between each vertex that makes up the path from start vertex to target
|
||||
vertex.
|
||||
"""
|
||||
|
||||
import heapq
|
||||
|
||||
|
||||
|
@ -37,7 +37,7 @@ class Dinic:
|
||||
# Here we calculate the flow that reaches the sink
|
||||
def max_flow(self, source, sink):
|
||||
flow, self.q[0] = 0, source
|
||||
for l in range(31): # l = 30 maybe faster for random data
|
||||
for l in range(31): # noqa: E741 l = 30 maybe faster for random data
|
||||
while True:
|
||||
self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q)
|
||||
qi, qe, self.lvl[source] = 0, 1, 1
|
||||
|
@ -71,8 +71,8 @@ class DirectedGraph:
|
||||
if len(stack) == 0:
|
||||
return visited
|
||||
|
||||
# c is the count of nodes you want and if you leave it or pass -1 to the function the count
|
||||
# will be random from 10 to 10000
|
||||
# c is the count of nodes you want and if you leave it or pass -1 to the function
|
||||
# the count will be random from 10 to 10000
|
||||
def fill_graph_randomly(self, c=-1):
|
||||
if c == -1:
|
||||
c = (math.floor(rand.random() * 10000)) + 10
|
||||
@ -168,14 +168,14 @@ class DirectedGraph:
|
||||
and indirect_parents.count(__[1]) > 0
|
||||
and not on_the_way_back
|
||||
):
|
||||
l = len(stack) - 1
|
||||
while True and l >= 0:
|
||||
if stack[l] == __[1]:
|
||||
len_stack = len(stack) - 1
|
||||
while True and len_stack >= 0:
|
||||
if stack[len_stack] == __[1]:
|
||||
anticipating_nodes.add(__[1])
|
||||
break
|
||||
else:
|
||||
anticipating_nodes.add(stack[l])
|
||||
l -= 1
|
||||
anticipating_nodes.add(stack[len_stack])
|
||||
len_stack -= 1
|
||||
if visited.count(__[1]) < 1:
|
||||
stack.append(__[1])
|
||||
visited.append(__[1])
|
||||
@ -221,15 +221,15 @@ class DirectedGraph:
|
||||
and indirect_parents.count(__[1]) > 0
|
||||
and not on_the_way_back
|
||||
):
|
||||
l = len(stack) - 1
|
||||
while True and l >= 0:
|
||||
if stack[l] == __[1]:
|
||||
len_stack_minus_one = len(stack) - 1
|
||||
while True and len_stack_minus_one >= 0:
|
||||
if stack[len_stack_minus_one] == __[1]:
|
||||
anticipating_nodes.add(__[1])
|
||||
break
|
||||
else:
|
||||
return True
|
||||
anticipating_nodes.add(stack[l])
|
||||
l -= 1
|
||||
anticipating_nodes.add(stack[len_stack_minus_one])
|
||||
len_stack_minus_one -= 1
|
||||
if visited.count(__[1]) < 1:
|
||||
stack.append(__[1])
|
||||
visited.append(__[1])
|
||||
@ -341,8 +341,8 @@ class Graph:
|
||||
if len(stack) == 0:
|
||||
return visited
|
||||
|
||||
# c is the count of nodes you want and if you leave it or pass -1 to the function the count
|
||||
# will be random from 10 to 10000
|
||||
# c is the count of nodes you want and if you leave it or pass -1 to the function
|
||||
# the count will be random from 10 to 10000
|
||||
def fill_graph_randomly(self, c=-1):
|
||||
if c == -1:
|
||||
c = (math.floor(rand.random() * 10000)) + 10
|
||||
@ -397,14 +397,14 @@ class Graph:
|
||||
and indirect_parents.count(__[1]) > 0
|
||||
and not on_the_way_back
|
||||
):
|
||||
l = len(stack) - 1
|
||||
while True and l >= 0:
|
||||
if stack[l] == __[1]:
|
||||
len_stack = len(stack) - 1
|
||||
while True and len_stack >= 0:
|
||||
if stack[len_stack] == __[1]:
|
||||
anticipating_nodes.add(__[1])
|
||||
break
|
||||
else:
|
||||
anticipating_nodes.add(stack[l])
|
||||
l -= 1
|
||||
anticipating_nodes.add(stack[len_stack])
|
||||
len_stack -= 1
|
||||
if visited.count(__[1]) < 1:
|
||||
stack.append(__[1])
|
||||
visited.append(__[1])
|
||||
@ -450,15 +450,15 @@ class Graph:
|
||||
and indirect_parents.count(__[1]) > 0
|
||||
and not on_the_way_back
|
||||
):
|
||||
l = len(stack) - 1
|
||||
while True and l >= 0:
|
||||
if stack[l] == __[1]:
|
||||
len_stack_minus_one = len(stack) - 1
|
||||
while True and len_stack_minus_one >= 0:
|
||||
if stack[len_stack_minus_one] == __[1]:
|
||||
anticipating_nodes.add(__[1])
|
||||
break
|
||||
else:
|
||||
return True
|
||||
anticipating_nodes.add(stack[l])
|
||||
l -= 1
|
||||
anticipating_nodes.add(stack[len_stack_minus_one])
|
||||
len_stack_minus_one -= 1
|
||||
if visited.count(__[1]) < 1:
|
||||
stack.append(__[1])
|
||||
visited.append(__[1])
|
||||
|
@ -9,7 +9,7 @@
|
||||
def dfs(u, graph, visited_edge, path=[]):
|
||||
path = path + [u]
|
||||
for v in graph[u]:
|
||||
if visited_edge[u][v] == False:
|
||||
if visited_edge[u][v] is False:
|
||||
visited_edge[u][v], visited_edge[v][u] = True, True
|
||||
path = dfs(v, graph, visited_edge, path)
|
||||
return path
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Finding Bridges in Undirected Graph
|
||||
def computeBridges(l):
|
||||
def computeBridges(graph):
|
||||
id = 0
|
||||
n = len(l) # No of vertices in graph
|
||||
n = len(graph) # No of vertices in graph
|
||||
low = [0] * n
|
||||
visited = [False] * n
|
||||
|
||||
@ -9,7 +9,7 @@ def computeBridges(l):
|
||||
visited[at] = True
|
||||
low[at] = id
|
||||
id += 1
|
||||
for to in l[at]:
|
||||
for to in graph[at]:
|
||||
if to == parent:
|
||||
pass
|
||||
elif not visited[to]:
|
||||
@ -28,7 +28,7 @@ def computeBridges(l):
|
||||
print(bridges)
|
||||
|
||||
|
||||
l = {
|
||||
graph = {
|
||||
0: [1, 2],
|
||||
1: [0, 2],
|
||||
2: [0, 1, 3, 5],
|
||||
@ -39,4 +39,4 @@ l = {
|
||||
7: [6, 8],
|
||||
8: [5, 7],
|
||||
}
|
||||
computeBridges(l)
|
||||
computeBridges(graph)
|
||||
|
@ -19,7 +19,7 @@ edge_array = [
|
||||
['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'bh-e12', 'cd-e2', 'df-e8', 'dh-e10'],
|
||||
['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'cd-e2', 'ce-e4', 'de-e1', 'df-e8',
|
||||
'dg-e5', 'ef-e3', 'eg-e2', 'fg-e6']
|
||||
]
|
||||
]
|
||||
# fmt: on
|
||||
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Finding longest distance in Directed Acyclic Graph using KahnsAlgorithm
|
||||
def longestDistance(l):
|
||||
indegree = [0] * len(l)
|
||||
def longestDistance(graph):
|
||||
indegree = [0] * len(graph)
|
||||
queue = []
|
||||
longDist = [1] * len(l)
|
||||
longDist = [1] * len(graph)
|
||||
|
||||
for key, values in l.items():
|
||||
for key, values in graph.items():
|
||||
for i in values:
|
||||
indegree[i] += 1
|
||||
|
||||
@ -14,7 +14,7 @@ def longestDistance(l):
|
||||
|
||||
while queue:
|
||||
vertex = queue.pop(0)
|
||||
for x in l[vertex]:
|
||||
for x in graph[vertex]:
|
||||
indegree[x] -= 1
|
||||
|
||||
if longDist[vertex] + 1 > longDist[x]:
|
||||
@ -27,5 +27,5 @@ def longestDistance(l):
|
||||
|
||||
|
||||
# Adjacency list of Graph
|
||||
l = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
|
||||
longestDistance(l)
|
||||
graph = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
|
||||
longestDistance(graph)
|
||||
|
@ -1,11 +1,14 @@
|
||||
# Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph using BFS
|
||||
def topologicalSort(l):
|
||||
indegree = [0] * len(l)
|
||||
def topologicalSort(graph):
|
||||
"""
|
||||
Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph
|
||||
using BFS
|
||||
"""
|
||||
indegree = [0] * len(graph)
|
||||
queue = []
|
||||
topo = []
|
||||
cnt = 0
|
||||
|
||||
for key, values in l.items():
|
||||
for key, values in graph.items():
|
||||
for i in values:
|
||||
indegree[i] += 1
|
||||
|
||||
@ -17,17 +20,17 @@ def topologicalSort(l):
|
||||
vertex = queue.pop(0)
|
||||
cnt += 1
|
||||
topo.append(vertex)
|
||||
for x in l[vertex]:
|
||||
for x in graph[vertex]:
|
||||
indegree[x] -= 1
|
||||
if indegree[x] == 0:
|
||||
queue.append(x)
|
||||
|
||||
if cnt != len(l):
|
||||
if cnt != len(graph):
|
||||
print("Cycle exists")
|
||||
else:
|
||||
print(topo)
|
||||
|
||||
|
||||
# Adjacency List of Graph
|
||||
l = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
|
||||
topologicalSort(l)
|
||||
graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
|
||||
topologicalSort(graph)
|
||||
|
@ -2,7 +2,7 @@ import sys
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
def PrimsAlgorithm(l):
|
||||
def PrimsAlgorithm(l): # noqa: E741
|
||||
|
||||
nodePosition = []
|
||||
|
||||
@ -109,7 +109,7 @@ if __name__ == "__main__":
|
||||
e = int(input("Enter number of edges: ").strip())
|
||||
adjlist = defaultdict(list)
|
||||
for x in range(e):
|
||||
l = [int(x) for x in input().strip().split()]
|
||||
l = [int(x) for x in input().strip().split()] # noqa: E741
|
||||
adjlist[l[0]].append([l[1], l[2]])
|
||||
adjlist[l[1]].append([l[0], l[2]])
|
||||
print(PrimsAlgorithm(adjlist))
|
||||
|
@ -79,24 +79,23 @@ def reset():
|
||||
machine_time = 0
|
||||
|
||||
|
||||
#######################################
|
||||
if __name__ == "__main__":
|
||||
# Initialization
|
||||
reset()
|
||||
|
||||
# Initialization
|
||||
reset()
|
||||
# Pushing Data (Input)
|
||||
import random
|
||||
|
||||
# Pushing Data (Input)
|
||||
import random
|
||||
message = random.sample(range(0xFFFFFFFF), 100)
|
||||
for chunk in message:
|
||||
push(chunk)
|
||||
|
||||
message = random.sample(range(0xFFFFFFFF), 100)
|
||||
for chunk in message:
|
||||
push(chunk)
|
||||
# for controlling
|
||||
inp = ""
|
||||
|
||||
# for controlling
|
||||
inp = ""
|
||||
|
||||
# Pulling Data (Output)
|
||||
while inp in ("e", "E"):
|
||||
print("%s" % format(pull(), "#04x"))
|
||||
print(buffer_space)
|
||||
print(params_space)
|
||||
inp = input("(e)exit? ").strip()
|
||||
# Pulling Data (Output)
|
||||
while inp in ("e", "E"):
|
||||
print("%s" % format(pull(), "#04x"))
|
||||
print(buffer_space)
|
||||
print(params_space)
|
||||
inp = input("(e)exit? ").strip()
|
||||
|
@ -47,6 +47,7 @@
|
||||
# Imports
|
||||
import numpy as np
|
||||
|
||||
|
||||
# Functions of binary conversion--------------------------------------
|
||||
def text_to_bits(text, encoding="utf-8", errors="surrogatepass"):
|
||||
"""
|
||||
|
@ -27,10 +27,10 @@ import random
|
||||
class Vector:
|
||||
"""
|
||||
This class represents a vector of arbitrary size.
|
||||
You need to give the vector components.
|
||||
|
||||
You need to give the vector components.
|
||||
|
||||
Overview about the methods:
|
||||
|
||||
|
||||
constructor(components : list) : init the vector
|
||||
set(components : list) : changes the vector components.
|
||||
__str__() : toString method
|
||||
@ -124,7 +124,7 @@ class Vector:
|
||||
|
||||
def __mul__(self, other):
|
||||
"""
|
||||
mul implements the scalar multiplication
|
||||
mul implements the scalar multiplication
|
||||
and the dot-product
|
||||
"""
|
||||
if isinstance(other, float) or isinstance(other, int):
|
||||
@ -167,7 +167,7 @@ def zeroVector(dimension):
|
||||
|
||||
def unitBasisVector(dimension, pos):
|
||||
"""
|
||||
returns a unit basis vector with a One
|
||||
returns a unit basis vector with a One
|
||||
at index 'pos' (indexing at 0)
|
||||
"""
|
||||
# precondition
|
||||
@ -196,7 +196,7 @@ def randomVector(N, a, b):
|
||||
"""
|
||||
input: size (N) of the vector.
|
||||
random range (a,b)
|
||||
output: returns a random vector of size N, with
|
||||
output: returns a random vector of size N, with
|
||||
random integer components between 'a' and 'b'.
|
||||
"""
|
||||
random.seed(None)
|
||||
@ -208,10 +208,10 @@ class Matrix:
|
||||
"""
|
||||
class: Matrix
|
||||
This class represents a arbitrary matrix.
|
||||
|
||||
|
||||
Overview about the methods:
|
||||
|
||||
__str__() : returns a string representation
|
||||
|
||||
__str__() : returns a string representation
|
||||
operator * : implements the matrix vector multiplication
|
||||
implements the matrix-scalar multiplication.
|
||||
changeComponent(x,y,value) : changes the specified component.
|
||||
|
@ -19,7 +19,7 @@ class Test(unittest.TestCase):
|
||||
x = Vector([1, 2, 3])
|
||||
self.assertEqual(x.component(0), 1)
|
||||
self.assertEqual(x.component(2), 3)
|
||||
y = Vector()
|
||||
_ = Vector()
|
||||
|
||||
def test_str(self):
|
||||
"""
|
||||
|
@ -11,9 +11,11 @@ Python:
|
||||
Inputs:
|
||||
- X , a 2D numpy array of features.
|
||||
- k , number of clusters to create.
|
||||
- initial_centroids , initial centroid values generated by utility function(mentioned in usage).
|
||||
- initial_centroids , initial centroid values generated by utility function(mentioned
|
||||
in usage).
|
||||
- maxiter , maximum number of iterations to process.
|
||||
- heterogeneity , empty list that will be filled with hetrogeneity values if passed to kmeans func.
|
||||
- heterogeneity , empty list that will be filled with hetrogeneity values if passed
|
||||
to kmeans func.
|
||||
|
||||
Usage:
|
||||
1. define 'k' value, 'X' features array and 'hetrogeneity' empty list
|
||||
@ -22,7 +24,8 @@ Usage:
|
||||
initial_centroids = get_initial_centroids(
|
||||
X,
|
||||
k,
|
||||
seed=0 # seed value for initial centroid generation, None for randomness(default=None)
|
||||
seed=0 # seed value for initial centroid generation,
|
||||
# None for randomness(default=None)
|
||||
)
|
||||
|
||||
3. find centroids and clusters using kmeans function.
|
||||
@ -37,7 +40,8 @@ Usage:
|
||||
)
|
||||
|
||||
|
||||
4. Plot the loss function, hetrogeneity values for every iteration saved in hetrogeneity list.
|
||||
4. Plot the loss function, hetrogeneity values for every iteration saved in
|
||||
hetrogeneity list.
|
||||
plot_heterogeneity(
|
||||
heterogeneity,
|
||||
k
|
||||
@ -46,8 +50,9 @@ Usage:
|
||||
5. Have fun..
|
||||
|
||||
"""
|
||||
from sklearn.metrics import pairwise_distances
|
||||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
from sklearn.metrics import pairwise_distances
|
||||
|
||||
TAG = "K-MEANS-CLUST/ "
|
||||
|
||||
@ -118,9 +123,6 @@ def compute_heterogeneity(data, k, centroids, cluster_assignment):
|
||||
return heterogeneity
|
||||
|
||||
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
|
||||
def plot_heterogeneity(heterogeneity, k):
|
||||
plt.figure(figsize=(7, 4))
|
||||
plt.plot(heterogeneity, linewidth=4)
|
||||
@ -136,9 +138,11 @@ def kmeans(
|
||||
):
|
||||
"""This function runs k-means on given data and initial set of centroids.
|
||||
maxiter: maximum number of iterations to run.(default=500)
|
||||
record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations
|
||||
record_heterogeneity: (optional) a list, to store the history of heterogeneity
|
||||
as function of iterations
|
||||
if None, do not store the history.
|
||||
verbose: if True, print how many data points changed their cluster labels in each iteration"""
|
||||
verbose: if True, print how many data points changed their cluster labels in
|
||||
each iteration"""
|
||||
centroids = initial_centroids[:]
|
||||
prev_cluster_assignment = None
|
||||
|
||||
@ -149,7 +153,8 @@ def kmeans(
|
||||
# 1. Make cluster assignments using nearest centroids
|
||||
cluster_assignment = assign_clusters(data, centroids)
|
||||
|
||||
# 2. Compute a new centroid for each of the k clusters, averaging all data points assigned to that cluster.
|
||||
# 2. Compute a new centroid for each of the k clusters, averaging all data
|
||||
# points assigned to that cluster.
|
||||
centroids = revise_centroids(data, k, cluster_assignment)
|
||||
|
||||
# Check for convergence: if none of the assignments changed, stop
|
||||
|
@ -186,7 +186,8 @@ def predict_y_values(
|
||||
>>> means = [5.011267842911003, 10.011267842911003, 15.011267842911002]
|
||||
>>> variance = 0.9618530973487494
|
||||
>>> probabilities = [0.3333333333333333, 0.3333333333333333, 0.3333333333333333]
|
||||
>>> predict_y_values(x_items, means, variance, probabilities) # doctest: +NORMALIZE_WHITESPACE
|
||||
>>> predict_y_values(x_items, means, variance,
|
||||
... probabilities) # doctest: +NORMALIZE_WHITESPACE
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
||||
2, 2, 2, 2, 2, 2, 2, 2, 2]
|
||||
@ -211,7 +212,7 @@ def predict_y_values(
|
||||
# appending discriminant values of each item to 'results' list
|
||||
results.append(temp)
|
||||
|
||||
return [l.index(max(l)) for l in results]
|
||||
return [result.index(max(result)) for result in results]
|
||||
|
||||
|
||||
# Calculating Accuracy
|
||||
|
@ -1,5 +1,12 @@
|
||||
import matplotlib.pyplot as plt
|
||||
import pandas as pd
|
||||
from sklearn.linear_model import LinearRegression
|
||||
|
||||
# Splitting the dataset into the Training set and Test set
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
# Fitting Polynomial Regression to the dataset
|
||||
from sklearn.preprocessing import PolynomialFeatures
|
||||
|
||||
# Importing the dataset
|
||||
dataset = pd.read_csv(
|
||||
@ -9,16 +16,9 @@ X = dataset.iloc[:, 1:2].values
|
||||
y = dataset.iloc[:, 2].values
|
||||
|
||||
|
||||
# Splitting the dataset into the Training set and Test set
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
|
||||
|
||||
|
||||
# Fitting Polynomial Regression to the dataset
|
||||
from sklearn.preprocessing import PolynomialFeatures
|
||||
from sklearn.linear_model import LinearRegression
|
||||
|
||||
poly_reg = PolynomialFeatures(degree=4)
|
||||
X_poly = poly_reg.fit_transform(X)
|
||||
pol_reg = LinearRegression()
|
||||
|
@ -14,6 +14,7 @@ import numpy as np
|
||||
and types of data
|
||||
"""
|
||||
|
||||
|
||||
# Mean Absolute Error
|
||||
def mae(predict, actual):
|
||||
"""
|
||||
|
@ -9,7 +9,7 @@ def aliquot_sum(input_num: int) -> int:
|
||||
@return: the aliquot sum of input_num, if input_num is positive.
|
||||
Otherwise, raise a ValueError
|
||||
Wikipedia Explanation: https://en.wikipedia.org/wiki/Aliquot_sum
|
||||
|
||||
|
||||
>>> aliquot_sum(15)
|
||||
9
|
||||
>>> aliquot_sum(6)
|
||||
|
@ -4,8 +4,8 @@ from typing import List
|
||||
def allocation_num(number_of_bytes: int, partitions: int) -> List[str]:
|
||||
"""
|
||||
Divide a number of bytes into x partitions.
|
||||
|
||||
In a multi-threaded download, this algorithm could be used to provide
|
||||
|
||||
In a multi-threaded download, this algorithm could be used to provide
|
||||
each worker thread with a block of non-overlapping bytes to download.
|
||||
For example:
|
||||
for i in allocation_list:
|
||||
|
@ -1,16 +1,16 @@
|
||||
def bailey_borwein_plouffe(digit_position: int, precision: int = 1000) -> str:
|
||||
"""
|
||||
Implement a popular pi-digit-extraction algorithm known as the
|
||||
Implement a popular pi-digit-extraction algorithm known as the
|
||||
Bailey-Borwein-Plouffe (BBP) formula to calculate the nth hex digit of pi.
|
||||
Wikipedia page:
|
||||
https://en.wikipedia.org/wiki/Bailey%E2%80%93Borwein%E2%80%93Plouffe_formula
|
||||
@param digit_position: a positive integer representing the position of the digit to extract.
|
||||
@param digit_position: a positive integer representing the position of the digit to extract.
|
||||
The digit immediately after the decimal point is located at position 1.
|
||||
@param precision: number of terms in the second summation to calculate.
|
||||
A higher number reduces the chance of an error but increases the runtime.
|
||||
@return: a hexadecimal digit representing the digit at the nth position
|
||||
in pi's decimal expansion.
|
||||
|
||||
|
||||
>>> "".join(bailey_borwein_plouffe(i) for i in range(1, 11))
|
||||
'243f6a8885'
|
||||
>>> bailey_borwein_plouffe(5, 10000)
|
||||
@ -59,11 +59,11 @@ def _subsum(
|
||||
# only care about first digit of fractional part; don't need decimal
|
||||
"""
|
||||
Private helper function to implement the summation
|
||||
functionality.
|
||||
functionality.
|
||||
@param digit_pos_to_extract: digit position to extract
|
||||
@param denominator_addend: added to denominator of fractions in the formula
|
||||
@param precision: same as precision in main function
|
||||
@return: floating-point number whose integer part is not important
|
||||
@return: floating-point number whose integer part is not important
|
||||
"""
|
||||
sum = 0.0
|
||||
for sum_index in range(digit_pos_to_extract + precision):
|
||||
|
@ -18,8 +18,9 @@ def collatz_sequence(n: int) -> List[int]:
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
Exception: Sequence only defined for natural numbers
|
||||
>>> collatz_sequence(43)
|
||||
[43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
|
||||
>>> collatz_sequence(43) # doctest: +NORMALIZE_WHITESPACE
|
||||
[43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7,
|
||||
22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
|
||||
"""
|
||||
|
||||
if not isinstance(n, int) or n < 1:
|
||||
|
@ -6,7 +6,7 @@ def find_max(nums, left, right):
|
||||
:param left: index of first element
|
||||
:param right: index of last element
|
||||
:return: max in nums
|
||||
|
||||
|
||||
>>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]
|
||||
>>> find_max(nums, 0, len(nums) - 1) == max(nums)
|
||||
True
|
||||
|
@ -6,8 +6,8 @@ from numpy import inf
|
||||
def gamma(num: float) -> float:
|
||||
"""
|
||||
https://en.wikipedia.org/wiki/Gamma_function
|
||||
In mathematics, the gamma function is one commonly
|
||||
used extension of the factorial function to complex numbers.
|
||||
In mathematics, the gamma function is one commonly
|
||||
used extension of the factorial function to complex numbers.
|
||||
The gamma function is defined for all complex numbers except the non-positive integers
|
||||
|
||||
|
||||
@ -16,7 +16,7 @@ def gamma(num: float) -> float:
|
||||
...
|
||||
ValueError: math domain error
|
||||
|
||||
|
||||
|
||||
|
||||
>>> gamma(0)
|
||||
Traceback (most recent call last):
|
||||
@ -27,12 +27,12 @@ def gamma(num: float) -> float:
|
||||
>>> gamma(9)
|
||||
40320.0
|
||||
|
||||
>>> from math import gamma as math_gamma
|
||||
>>> from math import gamma as math_gamma
|
||||
>>> all(gamma(i)/math_gamma(i) <= 1.000000001 and abs(gamma(i)/math_gamma(i)) > .99999999 for i in range(1, 50))
|
||||
True
|
||||
|
||||
|
||||
>>> from math import gamma as math_gamma
|
||||
>>> from math import gamma as math_gamma
|
||||
>>> gamma(-1)/math_gamma(-1) <= 1.000000001
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
@ -40,7 +40,7 @@ def gamma(num: float) -> float:
|
||||
|
||||
|
||||
>>> from math import gamma as math_gamma
|
||||
>>> gamma(3.3) - math_gamma(3.3) <= 0.00000001
|
||||
>>> gamma(3.3) - math_gamma(3.3) <= 0.00000001
|
||||
True
|
||||
"""
|
||||
|
||||
|
@ -12,7 +12,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int:
|
||||
"""
|
||||
>>> gaussian(1)
|
||||
0.24197072451914337
|
||||
|
||||
|
||||
>>> gaussian(24)
|
||||
3.342714441794458e-126
|
||||
|
||||
@ -25,7 +25,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int:
|
||||
1.33830226e-04, 1.48671951e-06, 6.07588285e-09, 9.13472041e-12,
|
||||
5.05227108e-15, 1.02797736e-18, 7.69459863e-23, 2.11881925e-27,
|
||||
2.14638374e-32, 7.99882776e-38, 1.09660656e-43])
|
||||
|
||||
|
||||
>>> gaussian(15)
|
||||
5.530709549844416e-50
|
||||
|
||||
|
@ -13,12 +13,12 @@ def is_square_free(factors: List[int]) -> bool:
|
||||
returns True if the factors are square free.
|
||||
>>> is_square_free([1, 1, 2, 3, 4])
|
||||
False
|
||||
|
||||
|
||||
These are wrong but should return some value
|
||||
it simply checks for repition in the numbers.
|
||||
>>> is_square_free([1, 3, 4, 'sd', 0.0])
|
||||
True
|
||||
|
||||
|
||||
>>> is_square_free([1, 0.5, 2, 0.0])
|
||||
True
|
||||
>>> is_square_free([1, 2, 2, 5])
|
||||
|
@ -1,15 +1,15 @@
|
||||
def kthPermutation(k, n):
|
||||
"""
|
||||
Finds k'th lexicographic permutation (in increasing order) of
|
||||
Finds k'th lexicographic permutation (in increasing order) of
|
||||
0,1,2,...n-1 in O(n^2) time.
|
||||
|
||||
|
||||
Examples:
|
||||
First permutation is always 0,1,2,...n
|
||||
>>> kthPermutation(0,5)
|
||||
[0, 1, 2, 3, 4]
|
||||
|
||||
|
||||
The order of permutation of 0,1,2,3 is [0,1,2,3], [0,1,3,2], [0,2,1,3],
|
||||
[0,2,3,1], [0,3,1,2], [0,3,2,1], [1,0,2,3], [1,0,3,2], [1,2,0,3],
|
||||
[0,2,3,1], [0,3,1,2], [0,3,2,1], [1,0,2,3], [1,0,3,2], [1,2,0,3],
|
||||
[1,2,3,0], [1,3,0,2]
|
||||
>>> kthPermutation(10,4)
|
||||
[1, 3, 0, 2]
|
||||
|
@ -1,12 +1,12 @@
|
||||
"""
|
||||
In mathematics, the Lucas–Lehmer test (LLT) is a primality test for Mersenne numbers.
|
||||
https://en.wikipedia.org/wiki/Lucas%E2%80%93Lehmer_primality_test
|
||||
|
||||
|
||||
A Mersenne number is a number that is one less than a power of two.
|
||||
That is M_p = 2^p - 1
|
||||
https://en.wikipedia.org/wiki/Mersenne_prime
|
||||
|
||||
The Lucas–Lehmer test is the primality test used by the
|
||||
|
||||
The Lucas–Lehmer test is the primality test used by the
|
||||
Great Internet Mersenne Prime Search (GIMPS) to locate large primes.
|
||||
"""
|
||||
|
||||
@ -17,10 +17,10 @@ def lucas_lehmer_test(p: int) -> bool:
|
||||
"""
|
||||
>>> lucas_lehmer_test(p=7)
|
||||
True
|
||||
|
||||
|
||||
>>> lucas_lehmer_test(p=11)
|
||||
False
|
||||
|
||||
|
||||
# M_11 = 2^11 - 1 = 2047 = 23 * 89
|
||||
"""
|
||||
|
||||
|
@ -4,7 +4,7 @@ import timeit
|
||||
|
||||
"""
|
||||
Matrix Exponentiation is a technique to solve linear recurrences in logarithmic time.
|
||||
You read more about it here:
|
||||
You read more about it here:
|
||||
http://zobayer.blogspot.com/2010/11/matrix-exponentiation.html
|
||||
https://www.hackerearth.com/practice/notes/matrix-exponentiation-1/
|
||||
"""
|
||||
|
@ -1,6 +1,6 @@
|
||||
"""
|
||||
Modular Exponential.
|
||||
Modular exponentiation is a type of exponentiation performed over a modulus.
|
||||
Modular exponentiation is a type of exponentiation performed over a modulus.
|
||||
For more explanation, please check https://en.wikipedia.org/wiki/Modular_exponentiation
|
||||
"""
|
||||
|
||||
|
@ -45,13 +45,13 @@ def area_under_curve_estimator(
|
||||
) -> float:
|
||||
"""
|
||||
An implementation of the Monte Carlo method to find area under
|
||||
a single variable non-negative real-valued continuous function,
|
||||
say f(x), where x lies within a continuous bounded interval,
|
||||
say [min_value, max_value], where min_value and max_value are
|
||||
a single variable non-negative real-valued continuous function,
|
||||
say f(x), where x lies within a continuous bounded interval,
|
||||
say [min_value, max_value], where min_value and max_value are
|
||||
finite numbers
|
||||
1. Let x be a uniformly distributed random variable between min_value to
|
||||
1. Let x be a uniformly distributed random variable between min_value to
|
||||
max_value
|
||||
2. Expected value of f(x) =
|
||||
2. Expected value of f(x) =
|
||||
(integrate f(x) from min_value to max_value)/(max_value - min_value)
|
||||
3. Finding expected value of f(x):
|
||||
a. Repeatedly draw x from uniform distribution
|
||||
|
@ -24,7 +24,7 @@ def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=Fa
|
||||
a = x0 # set the initial guess
|
||||
steps = [a]
|
||||
error = abs(f(a))
|
||||
f1 = lambda x: calc_derivative(f, x, h=step) # Derivative of f(x)
|
||||
f1 = lambda x: calc_derivative(f, x, h=step) # noqa: E731 Derivative of f(x)
|
||||
for _ in range(maxiter):
|
||||
if f1(a) == 0:
|
||||
raise ValueError("No converging solution found")
|
||||
@ -44,7 +44,7 @@ def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=Fa
|
||||
if __name__ == "__main__":
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
f = lambda x: m.tanh(x) ** 2 - m.exp(3 * x)
|
||||
f = lambda x: m.tanh(x) ** 2 - m.exp(3 * x) # noqa: E731
|
||||
solution, error, steps = newton_raphson(
|
||||
f, x0=10, maxiter=1000, step=1e-6, logsteps=True
|
||||
)
|
||||
|
@ -7,7 +7,7 @@ from typing import List
|
||||
def prime_factors(n: int) -> List[int]:
|
||||
"""
|
||||
Returns prime factors of n as a list.
|
||||
|
||||
|
||||
>>> prime_factors(0)
|
||||
[]
|
||||
>>> prime_factors(100)
|
||||
|
@ -1,11 +1,13 @@
|
||||
# flake8: noqa
|
||||
|
||||
"""
|
||||
Sieve of Eratosthenes
|
||||
|
||||
Input : n =10
|
||||
Output : 2 3 5 7
|
||||
Output: 2 3 5 7
|
||||
|
||||
Input : n = 20
|
||||
Output: 2 3 5 7 11 13 17 19
|
||||
Output: 2 3 5 7 11 13 17 19
|
||||
|
||||
you can read in detail about this at
|
||||
https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
|
||||
|
@ -14,8 +14,8 @@ def radians(degree: float) -> float:
|
||||
4.782202150464463
|
||||
>>> radians(109.82)
|
||||
1.9167205845401725
|
||||
|
||||
>>> from math import radians as math_radians
|
||||
|
||||
>>> from math import radians as math_radians
|
||||
>>> all(abs(radians(i)-math_radians(i)) <= 0.00000001 for i in range(-2, 361))
|
||||
True
|
||||
"""
|
||||
|
@ -12,36 +12,36 @@ class FFT:
|
||||
|
||||
Reference:
|
||||
https://en.wikipedia.org/wiki/Cooley%E2%80%93Tukey_FFT_algorithm#The_radix-2_DIT_case
|
||||
|
||||
For polynomials of degree m and n the algorithms has complexity
|
||||
|
||||
For polynomials of degree m and n the algorithms has complexity
|
||||
O(n*logn + m*logm)
|
||||
|
||||
|
||||
The main part of the algorithm is split in two parts:
|
||||
1) __DFT: We compute the discrete fourier transform (DFT) of A and B using a
|
||||
bottom-up dynamic approach -
|
||||
1) __DFT: We compute the discrete fourier transform (DFT) of A and B using a
|
||||
bottom-up dynamic approach -
|
||||
2) __multiply: Once we obtain the DFT of A*B, we can similarly
|
||||
invert it to obtain A*B
|
||||
|
||||
The class FFT takes two polynomials A and B with complex coefficients as arguments;
|
||||
The class FFT takes two polynomials A and B with complex coefficients as arguments;
|
||||
The two polynomials should be represented as a sequence of coefficients starting
|
||||
from the free term. Thus, for instance x + 2*x^3 could be represented as
|
||||
[0,1,0,2] or (0,1,0,2). The constructor adds some zeros at the end so that the
|
||||
polynomials have the same length which is a power of 2 at least the length of
|
||||
their product.
|
||||
|
||||
from the free term. Thus, for instance x + 2*x^3 could be represented as
|
||||
[0,1,0,2] or (0,1,0,2). The constructor adds some zeros at the end so that the
|
||||
polynomials have the same length which is a power of 2 at least the length of
|
||||
their product.
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
Create two polynomials as sequences
|
||||
>>> A = [0, 1, 0, 2] # x+2x^3
|
||||
>>> B = (2, 3, 4, 0) # 2+3x+4x^2
|
||||
|
||||
|
||||
Create an FFT object with them
|
||||
>>> x = FFT(A, B)
|
||||
|
||||
|
||||
Print product
|
||||
>>> print(x.product) # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5
|
||||
[(-0+0j), (2+0j), (3+0j), (8+0j), (6+0j), (8+0j)]
|
||||
|
||||
|
||||
__str__ test
|
||||
>>> print(x)
|
||||
A = 0*x^0 + 1*x^1 + 2*x^0 + 3*x^2
|
||||
|
@ -16,7 +16,7 @@ import math
|
||||
def sieve(n):
|
||||
"""
|
||||
Returns a list with all prime numbers up to n.
|
||||
|
||||
|
||||
>>> sieve(50)
|
||||
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
|
||||
>>> sieve(25)
|
||||
@ -31,7 +31,7 @@ def sieve(n):
|
||||
[]
|
||||
"""
|
||||
|
||||
l = [True] * (n + 1)
|
||||
l = [True] * (n + 1) # noqa: E741
|
||||
prime = []
|
||||
start = 2
|
||||
end = int(math.sqrt(n))
|
||||
|
@ -24,10 +24,10 @@ def square_root_iterative(
|
||||
"""
|
||||
Square root is aproximated using Newtons method.
|
||||
https://en.wikipedia.org/wiki/Newton%27s_method
|
||||
|
||||
|
||||
>>> all(abs(square_root_iterative(i)-math.sqrt(i)) <= .00000000000001 for i in range(0, 500))
|
||||
True
|
||||
|
||||
|
||||
>>> square_root_iterative(-1)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
|
@ -11,7 +11,7 @@ class Matrix:
|
||||
|
||||
Example:
|
||||
>>> a = Matrix(2, 3, 1)
|
||||
>>> a
|
||||
>>> a
|
||||
Matrix consist of 2 rows and 3 columns
|
||||
[1, 1, 1]
|
||||
[1, 1, 1]
|
||||
@ -186,10 +186,10 @@ class Matrix:
|
||||
|
||||
Example:
|
||||
>>> a = Matrix(2, 3)
|
||||
>>> for r in range(2):
|
||||
>>> for r in range(2):
|
||||
... for c in range(3):
|
||||
... a[r,c] = r*c
|
||||
...
|
||||
...
|
||||
>>> a.transpose()
|
||||
Matrix consist of 3 rows and 2 columns
|
||||
[0, 0]
|
||||
@ -209,14 +209,14 @@ class Matrix:
|
||||
Apply Sherman-Morrison formula in O(n^2).
|
||||
To learn this formula, please look this: https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula
|
||||
This method returns (A + uv^T)^(-1) where A^(-1) is self. Returns None if it's impossible to calculate.
|
||||
Warning: This method doesn't check if self is invertible.
|
||||
Warning: This method doesn't check if self is invertible.
|
||||
Make sure self is invertible before execute this method.
|
||||
|
||||
Example:
|
||||
>>> ainv = Matrix(3, 3, 0)
|
||||
>>> for i in range(3): ainv[i,i] = 1
|
||||
...
|
||||
>>> u = Matrix(3, 1, 0)
|
||||
...
|
||||
>>> u = Matrix(3, 1, 0)
|
||||
>>> u[0,0], u[1,0], u[2,0] = 1, 2, -3
|
||||
>>> v = Matrix(3, 1, 0)
|
||||
>>> v[0,0], v[1,0], v[2,0] = 4, -2, 5
|
||||
|
@ -16,7 +16,7 @@ def BFS(graph, s, t, parent):
|
||||
while queue:
|
||||
u = queue.pop(0)
|
||||
for ind in range(len(graph[u])):
|
||||
if visited[ind] == False and graph[u][ind] > 0:
|
||||
if visited[ind] is False and graph[u][ind] > 0:
|
||||
queue.append(ind)
|
||||
visited[ind] = True
|
||||
parent[ind] = u
|
||||
|
@ -19,7 +19,7 @@ def BFS(graph, s, t, parent):
|
||||
while queue:
|
||||
u = queue.pop(0)
|
||||
for ind in range(len(graph[u])):
|
||||
if visited[ind] == False and graph[u][ind] > 0:
|
||||
if visited[ind] is False and graph[u][ind] > 0:
|
||||
queue.append(ind)
|
||||
visited[ind] = True
|
||||
parent[ind] = u
|
||||
|
@ -1,7 +1,9 @@
|
||||
"""The following implementation assumes that the activities
|
||||
# flake8: noqa
|
||||
|
||||
"""The following implementation assumes that the activities
|
||||
are already sorted according to their finish time"""
|
||||
|
||||
"""Prints a maximum set of activities that can be done by a
|
||||
"""Prints a maximum set of activities that can be done by a
|
||||
single person, one at a time"""
|
||||
# n --> Total number of activities
|
||||
# start[]--> An array that contains start time of all activities
|
||||
@ -10,8 +12,8 @@ single person, one at a time"""
|
||||
|
||||
def printMaxActivities(start, finish):
|
||||
"""
|
||||
>>> start = [1, 3, 0, 5, 8, 5]
|
||||
>>> finish = [2, 4, 6, 7, 9, 9]
|
||||
>>> start = [1, 3, 0, 5, 8, 5]
|
||||
>>> finish = [2, 4, 6, 7, 9, 9]
|
||||
>>> printMaxActivities(start, finish)
|
||||
The following activities are selected:
|
||||
0 1 3 4
|
||||
|
@ -1,4 +1,7 @@
|
||||
import collections, pprint, time, os
|
||||
import collections
|
||||
import os
|
||||
import pprint
|
||||
import time
|
||||
|
||||
start_time = time.time()
|
||||
print("creating word list...")
|
||||
|
@ -55,6 +55,7 @@ def isEnglish(message, wordPercentage=20, letterPercentage=85):
|
||||
return wordsMatch and lettersMatch
|
||||
|
||||
|
||||
import doctest
|
||||
if __name__ == "__main__":
|
||||
import doctest
|
||||
|
||||
doctest.testmod()
|
||||
doctest.testmod()
|
||||
|
@ -145,7 +145,7 @@ class BankersAlgorithm:
|
||||
Process 5 is executing.
|
||||
Updated available resource stack for processes: 8 5 9 7
|
||||
The process is in a safe state.
|
||||
<BLANKLINE>
|
||||
<BLANKLINE>
|
||||
"""
|
||||
need_list = self.__need()
|
||||
alloc_resources_table = self.__allocated_resources_table
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""Conway's Game Of Life, Author Anurag Kumar(mailto:anuragkumarak95@gmail.com)
|
||||
"""Conway's Game Of Life, Author Anurag Kumar(mailto:anuragkumarak95@gmail.com)
|
||||
|
||||
Requirements:
|
||||
- numpy
|
||||
@ -13,7 +13,7 @@ Usage:
|
||||
- $python3 game_o_life <canvas_size:int>
|
||||
|
||||
Game-Of-Life Rules:
|
||||
|
||||
|
||||
1.
|
||||
Any live cell with fewer than two live neighbours
|
||||
dies, as if caused by under-population.
|
||||
@ -27,8 +27,10 @@ Game-Of-Life Rules:
|
||||
Any dead cell with exactly three live neighbours be-
|
||||
comes a live cell, as if by reproduction.
|
||||
"""
|
||||
import random
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
import random, sys
|
||||
from matplotlib import pyplot as plt
|
||||
from matplotlib.colors import ListedColormap
|
||||
|
||||
|
@ -43,14 +43,14 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo
|
||||
|
||||
Returns:
|
||||
result : the value of the approximated integration of function in range a to b
|
||||
|
||||
|
||||
Raises:
|
||||
AssertionError: function is not callable
|
||||
AssertionError: a is not float or integer
|
||||
AssertionError: function should return float or integer
|
||||
AssertionError: b is not float or integer
|
||||
AssertionError: precision is not positive integer
|
||||
|
||||
|
||||
>>> simpson_integration(lambda x : x*x,1,2,3)
|
||||
2.333
|
||||
|
||||
@ -72,7 +72,7 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AssertionError: the function(object) passed should be callable your input : wrong_input
|
||||
|
||||
|
||||
>>> simpson_integration(lambda x : x*x,3.45,3.2,1)
|
||||
-2.8
|
||||
|
||||
@ -85,7 +85,7 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AssertionError: precision should be positive integer your input : -1
|
||||
|
||||
|
||||
"""
|
||||
assert callable(
|
||||
function
|
||||
|
@ -1,5 +1,6 @@
|
||||
# Python program for generating diamond pattern in Python 3.7+
|
||||
|
||||
|
||||
# Function to print upper half of diamond (pyramid)
|
||||
def floyd(n):
|
||||
"""
|
||||
|
@ -44,9 +44,9 @@ def function(expansion, s0, s1, key, message):
|
||||
right = message[4:]
|
||||
temp = apply_table(right, expansion)
|
||||
temp = XOR(temp, key)
|
||||
l = apply_sbox(s0, temp[:4])
|
||||
l = apply_sbox(s0, temp[:4]) # noqa: E741
|
||||
r = apply_sbox(s1, temp[4:])
|
||||
l = "0" * (2 - len(l)) + l
|
||||
l = "0" * (2 - len(l)) + l # noqa: E741
|
||||
r = "0" * (2 - len(r)) + r
|
||||
temp = apply_table(l + r, p4_table)
|
||||
temp = XOR(left, temp)
|
||||
|
@ -48,7 +48,7 @@ def solution(n):
|
||||
"""
|
||||
try:
|
||||
n = int(n)
|
||||
except (TypeError, ValueError) as e:
|
||||
except (TypeError, ValueError):
|
||||
raise TypeError("Parameter n must be int or passive of cast to int.")
|
||||
if n <= 0:
|
||||
raise ValueError("Parameter n must be greater or equal to one.")
|
||||
|
@ -50,7 +50,7 @@ def solution(n):
|
||||
"""
|
||||
try:
|
||||
n = int(n)
|
||||
except (TypeError, ValueError) as e:
|
||||
except (TypeError, ValueError):
|
||||
raise TypeError("Parameter n must be int or passive of cast to int.")
|
||||
if n <= 0:
|
||||
raise ValueError("Parameter n must be greater or equal to one.")
|
||||
|
@ -37,7 +37,7 @@ def solution(n):
|
||||
"""
|
||||
try:
|
||||
n = int(n)
|
||||
except (TypeError, ValueError) as e:
|
||||
except (TypeError, ValueError):
|
||||
raise TypeError("Parameter n must be int or passive of cast to int.")
|
||||
if n <= 0:
|
||||
raise ValueError("Parameter n must be greater or equal to one.")
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user