Tighten up psf/black and flake8 (#2024)

* Tighten up psf/black and flake8

* Fix some tests

* Fix some E741

* Fix some E741

* updating DIRECTORY.md

Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com>
This commit is contained in:
Christian Clauss 2020-05-22 08:10:11 +02:00 committed by GitHub
parent 21ed8968c0
commit 1f8a21d727
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
124 changed files with 583 additions and 495 deletions

View File

@ -4,13 +4,13 @@ language: python
python: 3.8
cache: pip
before_install: pip install --upgrade pip setuptools six
install: pip install -r requirements.txt
install: pip install black flake8
before_script:
- black --check . || true
- IGNORE=E123,E203,E265,E266,E302,E401,E402,E712,E731,E741,E743,F811,F841,W291,W293,W503
- flake8 . --count --ignore=$IGNORE --max-complexity=25 --max-line-length=127 --show-source --statistics
script:
- black --check .
- flake8 --ignore=E203,W503 --max-complexity=25 --max-line-length=120 --statistics --count .
- scripts/validate_filenames.py # no uppercase, no spaces, in a directory
- pip install -r requirements.txt # fast fail on black, flake8, validate_filenames
script:
- mypy --ignore-missing-imports .
- pytest --doctest-modules --cov-report=term-missing:skip-covered --cov=. .
after_success:

View File

@ -222,6 +222,7 @@
* [Bellman Ford](https://github.com/TheAlgorithms/Python/blob/master/graphs/bellman_ford.py)
* [Bfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs.py)
* [Bfs Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs_shortest_path.py)
* [Bidirectional A Star](https://github.com/TheAlgorithms/Python/blob/master/graphs/bidirectional_a_star.py)
* [Breadth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search.py)
* [Breadth First Search Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search_shortest_path.py)
* [Check Bipartite Graph Bfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_bipartite_graph_bfs.py)
@ -242,6 +243,7 @@
* [Graph List](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_list.py)
* [Graph Matrix](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_matrix.py)
* [Graphs Floyd Warshall](https://github.com/TheAlgorithms/Python/blob/master/graphs/graphs_floyd_warshall.py)
* [Greedy Best First](https://github.com/TheAlgorithms/Python/blob/master/graphs/greedy_best_first.py)
* [Kahns Algorithm Long](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_long.py)
* [Kahns Algorithm Topo](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_topo.py)
* [Minimum Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal.py)
@ -409,6 +411,7 @@
* [Fischer Yates Shuffle](https://github.com/TheAlgorithms/Python/blob/master/other/fischer_yates_shuffle.py)
* [Frequency Finder](https://github.com/TheAlgorithms/Python/blob/master/other/frequency_finder.py)
* [Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/other/game_of_life.py)
* [Gauss Easter](https://github.com/TheAlgorithms/Python/blob/master/other/gauss_easter.py)
* [Greedy](https://github.com/TheAlgorithms/Python/blob/master/other/greedy.py)
* [Integeration By Simpson Approx](https://github.com/TheAlgorithms/Python/blob/master/other/integeration_by_simpson_approx.py)
* [Largest Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/other/largest_subarray_sum.py)

View File

@ -2,6 +2,7 @@
import math
# for calculating u value
def ucal(u, p):
"""

View File

@ -1,9 +1,12 @@
#!/usr/bin/env python3
def decrypt_caesar_with_chi_squared(
ciphertext: str,
cipher_alphabet=None,
frequencies_dict=None,
case_sensetive: bool = False,
) -> list:
) -> tuple:
"""
Basic Usage
===========
@ -96,15 +99,19 @@ def decrypt_caesar_with_chi_squared(
Further Reading
================
* http://practicalcryptography.com/cryptanalysis/text-characterisation/chi-squared-statistic/
* http://practicalcryptography.com/cryptanalysis/text-characterisation/chi-squared-
statistic/
* https://en.wikipedia.org/wiki/Letter_frequency
* https://en.wikipedia.org/wiki/Chi-squared_test
* https://en.m.wikipedia.org/wiki/Caesar_cipher
Doctests
========
>>> decrypt_caesar_with_chi_squared('dof pz aol jhlzhy jpwoly zv wvwbshy? pa pz avv lhzf av jyhjr!')
(7, 3129.228005747531, 'why is the caesar cipher so popular? it is too easy to crack!')
>>> decrypt_caesar_with_chi_squared(
... 'dof pz aol jhlzhy jpwoly zv wvwbshy? pa pz avv lhzf av jyhjr!'
... ) # doctest: +NORMALIZE_WHITESPACE
(7, 3129.228005747531,
'why is the caesar cipher so popular? it is too easy to crack!')
>>> decrypt_caesar_with_chi_squared('crybd cdbsxq')
(10, 233.35343938980898, 'short string')
@ -172,7 +179,7 @@ def decrypt_caesar_with_chi_squared(
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
chi_squared_statistic = 0
chi_squared_statistic = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
@ -181,7 +188,8 @@ def decrypt_caesar_with_chi_squared(
# Get the amount of times the letter occurs in the message
occurrences = decrypted_with_shift.count(letter)
# Get the excepcted amount of times the letter should appear based on letter frequencies
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
expected = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
@ -194,7 +202,8 @@ def decrypt_caesar_with_chi_squared(
# Get the amount of times the letter occurs in the message
occurrences = decrypted_with_shift.count(letter)
# Get the excepcted amount of times the letter should appear based on letter frequencies
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
expected = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
@ -209,7 +218,8 @@ def decrypt_caesar_with_chi_squared(
decrypted_with_shift,
]
# Get the most likely cipher by finding the cipher with the smallest chi squared statistic
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
most_likely_cipher = min(
chi_squared_statistic_values, key=chi_squared_statistic_values.get
)

View File

@ -1,7 +1,9 @@
import os
import random
import sys
import rabin_miller as rabinMiller, cryptomath_module as cryptoMath
import cryptomath_module as cryptoMath
import rabin_miller as rabinMiller
min_primitive_root = 3

View File

@ -25,7 +25,7 @@ def mixed_keyword(key="college", pt="UNIVERSITY"):
for i in key:
if i not in temp:
temp.append(i)
l = len(temp)
len_temp = len(temp)
# print(temp)
alpha = []
modalpha = []
@ -40,17 +40,17 @@ def mixed_keyword(key="college", pt="UNIVERSITY"):
k = 0
for i in range(r):
t = []
for j in range(l):
for j in range(len_temp):
t.append(temp[k])
if not (k < 25):
break
k += 1
modalpha.append(t)
# print(modalpha)
d = dict()
d = {}
j = 0
k = 0
for j in range(l):
for j in range(len_temp):
for i in modalpha:
if not (len(i) - 1 >= j):
break

View File

@ -1,4 +1,5 @@
import sys, random
import random
import sys
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"

View File

@ -1,4 +1,7 @@
import time, os, sys
import os
import sys
import time
import transposition_cipher as transCipher

View File

@ -276,13 +276,13 @@ class AVLtree:
if __name__ == "__main__":
t = AVLtree()
t.traversale()
l = list(range(10))
random.shuffle(l)
for i in l:
lst = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
t.traversale()
random.shuffle(l)
for i in l:
random.shuffle(lst)
for i in lst:
t.del_node(i)
t.traversale()

View File

@ -1,4 +1,9 @@
class Node: # This is the Class Node with a constructor that contains data variable to type data and left, right pointers.
class Node:
"""
This is the Class Node with a constructor that contains data variable to type data
and left, right pointers.
"""
def __init__(self, data):
self.data = data
self.left = None

View File

@ -16,8 +16,8 @@ class SegmentTree:
def right(self, idx):
return idx * 2 + 1
def build(self, idx, l, r, A):
if l == r:
def build(self, idx, l, r, A): # noqa: E741
if l == r: # noqa: E741
self.st[idx] = A[l - 1]
else:
mid = (l + r) // 2
@ -25,14 +25,16 @@ class SegmentTree:
self.build(self.right(idx), mid + 1, r, A)
self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
# update with O(lg N) (Normal segment tree without lazy update will take O(Nlg N) for each update)
def update(
self, idx, l, r, a, b, val
): # update(1, 1, N, a, b, v) for update val v to [a,b]
if self.flag[idx] == True:
# update with O(lg N) (Normal segment tree without lazy update will take O(Nlg N)
# for each update)
def update(self, idx, l, r, a, b, val): # noqa: E741
"""
update(1, 1, N, a, b, v) for update val v to [a,b]
"""
if self.flag[idx] is True:
self.st[idx] = self.lazy[idx]
self.flag[idx] = False
if l != r:
if l != r: # noqa: E741
self.lazy[self.left(idx)] = self.lazy[idx]
self.lazy[self.right(idx)] = self.lazy[idx]
self.flag[self.left(idx)] = True
@ -40,9 +42,9 @@ class SegmentTree:
if r < a or l > b:
return True
if l >= a and r <= b:
if l >= a and r <= b: # noqa: E741
self.st[idx] = val
if l != r:
if l != r: # noqa: E741
self.lazy[self.left(idx)] = val
self.lazy[self.right(idx)] = val
self.flag[self.left(idx)] = True
@ -55,18 +57,21 @@ class SegmentTree:
return True
# query with O(lg N)
def query(self, idx, l, r, a, b): # query(1, 1, N, a, b) for query max of [a,b]
if self.flag[idx] == True:
def query(self, idx, l, r, a, b): # noqa: E741
"""
query(1, 1, N, a, b) for query max of [a,b]
"""
if self.flag[idx] is True:
self.st[idx] = self.lazy[idx]
self.flag[idx] = False
if l != r:
if l != r: # noqa: E741
self.lazy[self.left(idx)] = self.lazy[idx]
self.lazy[self.right(idx)] = self.lazy[idx]
self.flag[self.left(idx)] = True
self.flag[self.right(idx)] = True
if r < a or l > b:
return -math.inf
if l >= a and r <= b:
if l >= a and r <= b: # noqa: E741
return self.st[idx]
mid = (l + r) // 2
q1 = self.query(self.left(idx), l, mid, a, b)

View File

@ -1,6 +1,7 @@
"""
A non-recursive Segment Tree implementation with range query and single element update,
works virtually with any list of the same type of elements with a "commutative" combiner.
works virtually with any list of the same type of elements with a "commutative"
combiner.
Explanation:
https://www.geeksforgeeks.org/iterative-segment-tree-range-minimum-query/
@ -22,7 +23,8 @@ https://www.geeksforgeeks.org/segment-tree-efficient-implementation/
>>> st.update(4, 1)
>>> st.query(3, 4)
0
>>> st = SegmentTree([[1, 2, 3], [3, 2, 1], [1, 1, 1]], lambda a, b: [a[i] + b[i] for i in range(len(a))])
>>> st = SegmentTree([[1, 2, 3], [3, 2, 1], [1, 1, 1]], lambda a, b: [a[i] + b[i] for i
... in range(len(a))])
>>> st.query(0, 1)
[4, 4, 4]
>>> st.query(1, 2)
@ -47,7 +49,8 @@ class SegmentTree:
>>> SegmentTree(['a', 'b', 'c'], lambda a, b: '{}{}'.format(a, b)).query(0, 2)
'abc'
>>> SegmentTree([(1, 2), (2, 3), (3, 4)], lambda a, b: (a[0] + b[0], a[1] + b[1])).query(0, 2)
>>> SegmentTree([(1, 2), (2, 3), (3, 4)],
... lambda a, b: (a[0] + b[0], a[1] + b[1])).query(0, 2)
(6, 9)
"""
self.N = len(arr)
@ -78,7 +81,7 @@ class SegmentTree:
p = p // 2
self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1])
def query(self, l: int, r: int) -> T:
def query(self, l: int, r: int) -> T: # noqa: E741
"""
Get range query value in log(N) time
:param l: left element index
@ -95,9 +98,9 @@ class SegmentTree:
>>> st.query(2, 3)
7
"""
l, r = l + self.N, r + self.N
l, r = l + self.N, r + self.N # noqa: E741
res = None
while l <= r:
while l <= r: # noqa: E741
if l % 2 == 1:
res = self.st[l] if res is None else self.fn(res, self.st[l])
if r % 2 == 0:

View File

@ -15,8 +15,8 @@ class SegmentTree:
def right(self, idx):
return idx * 2 + 1
def build(self, idx, l, r):
if l == r:
def build(self, idx, l, r): # noqa: E741
if l == r: # noqa: E741
self.st[idx] = A[l]
else:
mid = (l + r) // 2
@ -27,12 +27,13 @@ class SegmentTree:
def update(self, a, b, val):
return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)
def update_recursive(
self, idx, l, r, a, b, val
): # update(1, 1, N, a, b, v) for update val v to [a,b]
def update_recursive(self, idx, l, r, a, b, val): # noqa: E741
"""
update(1, 1, N, a, b, v) for update val v to [a,b]
"""
if r < a or l > b:
return True
if l == r:
if l == r: # noqa: E741
self.st[idx] = val
return True
mid = (l + r) // 2
@ -44,12 +45,13 @@ class SegmentTree:
def query(self, a, b):
return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)
def query_recursive(
self, idx, l, r, a, b
): # query(1, 1, N, a, b) for query max of [a,b]
def query_recursive(self, idx, l, r, a, b): # noqa: E741
"""
query(1, 1, N, a, b) for query max of [a,b]
"""
if r < a or l > b:
return -math.inf
if l >= a and r <= b:
if l >= a and r <= b: # noqa: E741
return self.st[idx]
mid = (l + r) // 2
q1 = self.query_recursive(self.left(idx), l, mid, a, b)

View File

@ -1,3 +1,5 @@
# flake8: noqa
from random import random
from typing import Tuple
@ -161,7 +163,8 @@ def main():
"""After each command, program prints treap"""
root = None
print(
"enter numbers to create a tree, + value to add value into treap, - value to erase all nodes with value. 'q' to quit. "
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. "
)
args = input()

View File

@ -1,3 +1,5 @@
# flake8: noqa
"""
Binomial Heap
Reference: Advanced Data Structures, Peter Brass

View File

@ -66,7 +66,7 @@ class MinHeap:
# this is min-heapify method
def sift_down(self, idx, array):
while True:
l = self.get_left_child_idx(idx)
l = self.get_left_child_idx(idx) # noqa: E741
r = self.get_right_child_idx(idx)
smallest = idx
@ -132,7 +132,7 @@ class MinHeap:
self.sift_up(self.idx_of_element[node])
## USAGE
# USAGE
r = Node("R", -1)
b = Node("B", 6)

View File

@ -84,7 +84,7 @@ class LinkedDeque(_DoublyLinkedBase):
raise Exception("List is empty")
return self._trailer._prev._data
### DEque Insert Operations (At the front, At the end) ###
# DEque Insert Operations (At the front, At the end)
def add_first(self, element):
""" insertion in the front
@ -100,7 +100,7 @@ class LinkedDeque(_DoublyLinkedBase):
"""
return self._insert(self._trailer._prev, element, self._trailer)
### DEqueu Remove Operations (At the front, At the end) ###
# DEqueu Remove Operations (At the front, At the end)
def remove_first(self):
""" removal from the front

View File

@ -22,7 +22,7 @@ import operator as op
def Solve(Postfix):
Stack = []
Div = lambda x, y: int(x / y) # integer division operation
Div = lambda x, y: int(x / y) # noqa: E731 integer division operation
Opr = {
"^": op.pow,
"*": op.mul,
@ -38,29 +38,27 @@ def Solve(Postfix):
for x in Postfix:
if x.isdigit(): # if x in digit
Stack.append(x) # append x to stack
print(
x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(Stack), sep=" | "
) # output in tabular format
# output in tabular format
print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(Stack), sep=" | ")
else:
B = Stack.pop() # pop stack
print(
"".rjust(8), ("pop(" + B + ")").ljust(12), ",".join(Stack), sep=" | "
) # output in tabular format
# output in tabular format
print("".rjust(8), ("pop(" + B + ")").ljust(12), ",".join(Stack), sep=" | ")
A = Stack.pop() # pop stack
print(
"".rjust(8), ("pop(" + A + ")").ljust(12), ",".join(Stack), sep=" | "
) # output in tabular format
# output in tabular format
print("".rjust(8), ("pop(" + A + ")").ljust(12), ",".join(Stack), sep=" | ")
Stack.append(
str(Opr[x](int(A), int(B)))
) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8),
("push(" + A + x + B + ")").ljust(12),
",".join(Stack),
sep=" | ",
) # output in tabular format
)
return int(Stack[0])

View File

@ -6,20 +6,22 @@
# Imports
import numpy as np
# Class implemented to calculus the index
class IndexCalculation:
"""
# Class Summary
This algorithm consists in calculating vegetation indices, these indices
can be used for precision agriculture for example (or remote sensing). There are
functions to define the data and to calculate the implemented indices.
This algorithm consists in calculating vegetation indices, these
indices can be used for precision agriculture for example (or remote
sensing). There are functions to define the data and to calculate the
implemented indices.
# Vegetation index
https://en.wikipedia.org/wiki/Vegetation_Index
A Vegetation Index (VI) is a spectral transformation of two or more bands designed
to enhance the contribution of vegetation properties and allow reliable spatial and
temporal inter-comparisons of terrestrial photosynthetic activity and canopy
structural variations
A Vegetation Index (VI) is a spectral transformation of two or more bands
designed to enhance the contribution of vegetation properties and allow
reliable spatial and temporal inter-comparisons of terrestrial
photosynthetic activity and canopy structural variations
# Information about channels (Wavelength range for each)
* nir - near-infrared
@ -84,17 +86,19 @@ class IndexCalculation:
#"NDRE" -- redEdge, nir
#list of all index implemented
#allIndex = ["ARVI2", "CCCI", "CVI", "GLI", "NDVI", "BNDVI", "redEdgeNDVI", "GNDVI",
"GBNDVI", "GRNDVI", "RBNDVI", "PNDVI", "ATSAVI", "BWDRVI", "CIgreen",
"CIrededge", "CI", "CTVI", "GDVI", "EVI", "GEMI", "GOSAVI", "GSAVI",
"Hue", "IVI", "IPVI", "I", "RVI", "MRVI", "MSAVI", "NormG", "NormNIR",
"NormR", "NGRDI", "RI", "S", "IF", "DVI", "TVI", "NDRE"]
#allIndex = ["ARVI2", "CCCI", "CVI", "GLI", "NDVI", "BNDVI", "redEdgeNDVI",
"GNDVI", "GBNDVI", "GRNDVI", "RBNDVI", "PNDVI", "ATSAVI",
"BWDRVI", "CIgreen", "CIrededge", "CI", "CTVI", "GDVI", "EVI",
"GEMI", "GOSAVI", "GSAVI", "Hue", "IVI", "IPVI", "I", "RVI",
"MRVI", "MSAVI", "NormG", "NormNIR", "NormR", "NGRDI", "RI",
"S", "IF", "DVI", "TVI", "NDRE"]
#list of index with not blue channel
#notBlueIndex = ["ARVI2", "CCCI", "CVI", "NDVI", "redEdgeNDVI", "GNDVI", "GRNDVI",
"ATSAVI", "CIgreen", "CIrededge", "CTVI", "GDVI", "GEMI", "GOSAVI",
"GSAVI", "IVI", "IPVI", "RVI", "MRVI", "MSAVI", "NormG", "NormNIR",
"NormR", "NGRDI", "RI", "DVI", "TVI", "NDRE"]
#notBlueIndex = ["ARVI2", "CCCI", "CVI", "NDVI", "redEdgeNDVI", "GNDVI",
"GRNDVI", "ATSAVI", "CIgreen", "CIrededge", "CTVI", "GDVI",
"GEMI", "GOSAVI", "GSAVI", "IVI", "IPVI", "RVI", "MRVI",
"MSAVI", "NormG", "NormNIR", "NormR", "NGRDI", "RI", "DVI",
"TVI", "NDRE"]
#list of index just with RGB channels
#RGBIndex = ["GLI", "CI", "Hue", "I", "NGRDI", "RI", "S", "IF"]
@ -213,8 +217,8 @@ class IndexCalculation:
def NDVI(self):
"""
Normalized Difference self.nir/self.red Normalized Difference Vegetation Index,
Calibrated NDVI - CDVI
Normalized Difference self.nir/self.red Normalized Difference Vegetation
Index, Calibrated NDVI - CDVI
https://www.indexdatabase.de/db/i-single.php?id=58
:return: index
"""
@ -410,7 +414,7 @@ class IndexCalculation:
"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.NDVI() + 1)
def I(self):
def I(self): # noqa: E741,E743
"""
Intensity
https://www.indexdatabase.de/db/i-single.php?id=36
@ -472,7 +476,8 @@ class IndexCalculation:
def NGRDI(self):
"""
Normalized Difference self.green/self.red Normalized self.green self.red
difference index, Visible Atmospherically Resistant Indices self.green (VIself.green)
difference index, Visible Atmospherically Resistant Indices self.green
(VIself.green)
https://www.indexdatabase.de/db/i-single.php?id=390
:return: index
"""
@ -556,9 +561,12 @@ indexValue_form2 = cl.CCCI()
indexValue_form3 = cl.calculation("CCCI", red=red, green=green, blue=blue,
redEdge=redEdge, nir=nir).astype(np.float64)
print("Form 1: "+np.array2string(indexValue_form1, precision=20, separator=', ', floatmode='maxprec_equal'))
print("Form 2: "+np.array2string(indexValue_form2, precision=20, separator=', ', floatmode='maxprec_equal'))
print("Form 3: "+np.array2string(indexValue_form3, precision=20, separator=', ', floatmode='maxprec_equal'))
print("Form 1: "+np.array2string(indexValue_form1, precision=20, separator=', ',
floatmode='maxprec_equal'))
print("Form 2: "+np.array2string(indexValue_form2, precision=20, separator=', ',
floatmode='maxprec_equal'))
print("Form 3: "+np.array2string(indexValue_form3, precision=20, separator=', ',
floatmode='maxprec_equal'))
# A list of examples results for different type of data at NDVI
# float16 -> 0.31567383 #NDVI (red = 50, nir = 100)

View File

@ -1,5 +1,5 @@
def merge(a, b, m, e):
l = a[b : m + 1]
l = a[b : m + 1] # noqa: E741
r = a[m + 1 : e + 1]
k = b
i = 0

View File

@ -26,9 +26,9 @@ def factorial(num):
# factorial of num
# uncomment the following to see how recalculations are avoided
##result=[-1]*10
##result[0]=result[1]=1
##print(factorial(5))
# result=[-1]*10
# result[0]=result[1]=1
# print(factorial(5))
# print(factorial(3))
# print(factorial(7))

View File

@ -76,7 +76,7 @@ if __name__ == "__main__":
expected_subseq = "GTAB"
ln, subseq = longest_common_subsequence(a, b)
## print("len =", ln, ", sub-sequence =", subseq)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()

View File

@ -1,11 +1,14 @@
"""
Author : Mehdi ALAOUI
This is a pure Python implementation of Dynamic Programming solution to the longest increasing subsequence of a given sequence.
This is a pure Python implementation of Dynamic Programming solution to the longest
increasing subsequence of a given sequence.
The problem is :
Given an array, to find the longest and increasing sub-array in that given array and return it.
Example: [10, 22, 9, 33, 21, 50, 41, 60, 80] as input will return [10, 22, 33, 41, 60, 80] as output
Given an array, to find the longest and increasing sub-array in that given array and
return it.
Example: [10, 22, 9, 33, 21, 50, 41, 60, 80] as input will return
[10, 22, 33, 41, 60, 80] as output
"""
from typing import List
@ -21,11 +24,13 @@ def longest_subsequence(array: List[int]) -> List[int]: # This function is recu
[8]
>>> longest_subsequence([1, 1, 1])
[1, 1, 1]
>>> longest_subsequence([])
[]
"""
array_length = len(array)
if (
array_length <= 1
): # If the array contains only one element, we return it (it's the stop condition of recursion)
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
pivot = array[0]

View File

@ -1,19 +1,19 @@
#############################
# Author: Aravind Kashyap
# File: lis.py
# comments: This programme outputs the Longest Strictly Increasing Subsequence in O(NLogN)
# Where N is the Number of elements in the list
# comments: This programme outputs the Longest Strictly Increasing Subsequence in
# O(NLogN) Where N is the Number of elements in the list
#############################
from typing import List
def CeilIndex(v, l, r, key):
def CeilIndex(v, l, r, key): # noqa: E741
while r - l > 1:
m = (l + r) // 2
if v[m] >= key:
r = m
else:
l = m
l = m # noqa: E741
return r
@ -23,7 +23,8 @@ def LongestIncreasingSubsequenceLength(v: List[int]) -> int:
6
>>> LongestIncreasingSubsequenceLength([])
0
>>> LongestIncreasingSubsequenceLength([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15])
>>> LongestIncreasingSubsequenceLength([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3,
... 11, 7, 15])
6
>>> LongestIncreasingSubsequenceLength([5, 4, 3, 2, 1])
1

View File

@ -23,7 +23,7 @@ def findMin(arr):
dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2), -1, -1):
if dp[n][j] == True:
if dp[n][j] is True:
diff = s - 2 * j
break

View File

@ -104,14 +104,15 @@ def find_optimal_binary_search_tree(nodes):
# This 2D array stores the overall tree cost (which's as minimized as possible);
# for a single key, cost is equal to frequency of the key.
dp = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)]
# sum[i][j] stores the sum of key frequencies between i and j inclusive in nodes array
# sum[i][j] stores the sum of key frequencies between i and j inclusive in nodes
# array
sum = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)]
# stores tree roots that will be used later for constructing binary search tree
root = [[i if i == j else 0 for j in range(n)] for i in range(n)]
for l in range(2, n + 1): # l is an interval length
for i in range(n - l + 1):
j = i + l - 1
for interval_length in range(2, n + 1):
for i in range(n - interval_length + 1):
j = i + interval_length - 1
dp[i][j] = sys.maxsize # set the value to "infinity"
sum[i][j] = sum[i][j - 1] + freqs[j]

View File

@ -1,12 +1,15 @@
# Python program to print all subset combinations of n element in given set of r element.
# arr[] ---> Input Array
# data[] ---> Temporary array to store current combination
# start & end ---> Staring and Ending indexes in arr[]
# index ---> Current index in data[]
# r ---> Size of a combination to be printed
def combination_util(arr, n, r, index, data, i):
# Current combination is ready to be printed,
# print it
"""
Current combination is ready to be printed, print it
arr[] ---> Input Array
data[] ---> Temporary array to store current combination
start & end ---> Staring and Ending indexes in arr[]
index ---> Current index in data[]
r ---> Size of a combination to be printed
"""
if index == r:
for j in range(r):
print(data[j], end=" ")

View File

@ -1,5 +1,5 @@
# Finding Articulation Points in Undirected Graph
def computeAP(l):
def computeAP(l): # noqa: E741
n = len(l)
outEdgeCount = 0
low = [0] * n
@ -36,12 +36,12 @@ def computeAP(l):
isArt[i] = outEdgeCount > 1
for x in range(len(isArt)):
if isArt[x] == True:
if isArt[x] is True:
print(x)
# Adjacency list of graph
l = {
data = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
@ -52,4 +52,4 @@ l = {
7: [6, 8],
8: [5, 7],
}
computeAP(l)
computeAP(data)

View File

@ -1,3 +1,6 @@
from collections import deque
if __name__ == "__main__":
# Accept No. of Nodes and edges
n, m = map(int, input().split(" "))
@ -72,7 +75,6 @@ def dfs(G, s):
Q - Traversal Stack
--------------------------------------------------------------------------------
"""
from collections import deque
def bfs(G, s):
@ -125,7 +127,6 @@ def dijk(G, s):
Topological Sort
--------------------------------------------------------------------------------
"""
from collections import deque
def topo(G, ind=None, Q=None):
@ -235,10 +236,10 @@ def prim(G, s):
def edglist():
n, m = map(int, input().split(" "))
l = []
edges = []
for i in range(m):
l.append(map(int, input().split(" ")))
return l, n
edges.append(map(int, input().split(" ")))
return edges, n
"""

View File

@ -1,6 +1,8 @@
"""Breath First Search (BFS) can be used when finding the shortest path
from a given source node to a target node in an unweighted graph.
"""
from typing import Dict
graph = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
@ -11,8 +13,6 @@ graph = {
"G": ["C"],
}
from typing import Dict
class Graph:
def __init__(self, graph: Dict[str, str], source_vertex: str) -> None:
@ -46,8 +46,9 @@ class Graph:
def shortest_path(self, target_vertex: str) -> str:
"""This shortest path function returns a string, describing the result:
1.) No path is found. The string is a human readable message to indicate this.
2.) The shortest path is found. The string is in the form `v1(->v2->v3->...->vn)`,
where v1 is the source vertex and vn is the target vertex, if it exists separately.
2.) The shortest path is found. The string is in the form
`v1(->v2->v3->...->vn)`, where v1 is the source vertex and vn is the target
vertex, if it exists separately.
>>> g = Graph(graph, "G")
>>> g.breath_first_search()

View File

@ -1,21 +1,22 @@
# Check whether Graph is Bipartite or Not using BFS
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def checkBipartite(l):
def checkBipartite(graph):
queue = []
visited = [False] * len(l)
color = [-1] * len(l)
visited = [False] * len(graph)
color = [-1] * len(graph)
def bfs():
while queue:
u = queue.pop(0)
visited[u] = True
for neighbour in l[u]:
for neighbour in graph[u]:
if neighbour == u:
return False
@ -29,16 +30,16 @@ def checkBipartite(l):
return True
for i in range(len(l)):
for i in range(len(graph)):
if not visited[i]:
queue.append(i)
color[i] = 0
if bfs() == False:
if bfs() is False:
return False
return True
if __name__ == "__main__":
# Adjacency List of graph
l = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}
print(checkBipartite(l))
print(checkBipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}))

View File

@ -1,27 +1,28 @@
# Check whether Graph is Bipartite or Not using DFS
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def check_bipartite_dfs(l):
visited = [False] * len(l)
color = [-1] * len(l)
def check_bipartite_dfs(graph):
visited = [False] * len(graph)
color = [-1] * len(graph)
def dfs(v, c):
visited[v] = True
color[v] = c
for u in l[v]:
for u in graph[v]:
if not visited[u]:
dfs(u, 1 - c)
for i in range(len(l)):
for i in range(len(graph)):
if not visited[i]:
dfs(i, 0)
for i in range(len(l)):
for j in l[i]:
for i in range(len(graph)):
for j in graph[i]:
if color[i] == color[j]:
return False
@ -29,5 +30,5 @@ def check_bipartite_dfs(l):
# Adjacency list of graph
l = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(l))
graph = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))

View File

@ -28,7 +28,7 @@ class Graph:
# call the recursive helper function
for i in range(len(self.vertex)):
if visited[i] == False:
if visited[i] is False:
self.DFSRec(i, visited)
def DFSRec(self, startVertex, visited):
@ -39,7 +39,7 @@ class Graph:
# Recur for all the vertices that are adjacent to this node
for i in self.vertex.keys():
if visited[i] == False:
if visited[i] is False:
self.DFSRec(i, visited)

View File

@ -1,6 +1,6 @@
"""pseudo-code"""
"""
pseudo-code
DIJKSTRA(graph G, start vertex s, destination vertex d):
//all nodes initially unexplored
@ -30,7 +30,6 @@ only the distance between previous vertex and current vertex but the entire
distance between each vertex that makes up the path from start vertex to target
vertex.
"""
import heapq

View File

@ -37,7 +37,7 @@ class Dinic:
# Here we calculate the flow that reaches the sink
def max_flow(self, source, sink):
flow, self.q[0] = 0, source
for l in range(31): # l = 30 maybe faster for random data
for l in range(31): # noqa: E741 l = 30 maybe faster for random data
while True:
self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q)
qi, qe, self.lvl[source] = 0, 1, 1

View File

@ -71,8 +71,8 @@ class DirectedGraph:
if len(stack) == 0:
return visited
# c is the count of nodes you want and if you leave it or pass -1 to the function the count
# will be random from 10 to 10000
# c is the count of nodes you want and if you leave it or pass -1 to the function
# the count will be random from 10 to 10000
def fill_graph_randomly(self, c=-1):
if c == -1:
c = (math.floor(rand.random() * 10000)) + 10
@ -168,14 +168,14 @@ class DirectedGraph:
and indirect_parents.count(__[1]) > 0
and not on_the_way_back
):
l = len(stack) - 1
while True and l >= 0:
if stack[l] == __[1]:
len_stack = len(stack) - 1
while True and len_stack >= 0:
if stack[len_stack] == __[1]:
anticipating_nodes.add(__[1])
break
else:
anticipating_nodes.add(stack[l])
l -= 1
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(__[1]) < 1:
stack.append(__[1])
visited.append(__[1])
@ -221,15 +221,15 @@ class DirectedGraph:
and indirect_parents.count(__[1]) > 0
and not on_the_way_back
):
l = len(stack) - 1
while True and l >= 0:
if stack[l] == __[1]:
len_stack_minus_one = len(stack) - 1
while True and len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == __[1]:
anticipating_nodes.add(__[1])
break
else:
return True
anticipating_nodes.add(stack[l])
l -= 1
anticipating_nodes.add(stack[len_stack_minus_one])
len_stack_minus_one -= 1
if visited.count(__[1]) < 1:
stack.append(__[1])
visited.append(__[1])
@ -341,8 +341,8 @@ class Graph:
if len(stack) == 0:
return visited
# c is the count of nodes you want and if you leave it or pass -1 to the function the count
# will be random from 10 to 10000
# c is the count of nodes you want and if you leave it or pass -1 to the function
# the count will be random from 10 to 10000
def fill_graph_randomly(self, c=-1):
if c == -1:
c = (math.floor(rand.random() * 10000)) + 10
@ -397,14 +397,14 @@ class Graph:
and indirect_parents.count(__[1]) > 0
and not on_the_way_back
):
l = len(stack) - 1
while True and l >= 0:
if stack[l] == __[1]:
len_stack = len(stack) - 1
while True and len_stack >= 0:
if stack[len_stack] == __[1]:
anticipating_nodes.add(__[1])
break
else:
anticipating_nodes.add(stack[l])
l -= 1
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(__[1]) < 1:
stack.append(__[1])
visited.append(__[1])
@ -450,15 +450,15 @@ class Graph:
and indirect_parents.count(__[1]) > 0
and not on_the_way_back
):
l = len(stack) - 1
while True and l >= 0:
if stack[l] == __[1]:
len_stack_minus_one = len(stack) - 1
while True and len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == __[1]:
anticipating_nodes.add(__[1])
break
else:
return True
anticipating_nodes.add(stack[l])
l -= 1
anticipating_nodes.add(stack[len_stack_minus_one])
len_stack_minus_one -= 1
if visited.count(__[1]) < 1:
stack.append(__[1])
visited.append(__[1])

View File

@ -9,7 +9,7 @@
def dfs(u, graph, visited_edge, path=[]):
path = path + [u]
for v in graph[u]:
if visited_edge[u][v] == False:
if visited_edge[u][v] is False:
visited_edge[u][v], visited_edge[v][u] = True, True
path = dfs(v, graph, visited_edge, path)
return path

View File

@ -1,7 +1,7 @@
# Finding Bridges in Undirected Graph
def computeBridges(l):
def computeBridges(graph):
id = 0
n = len(l) # No of vertices in graph
n = len(graph) # No of vertices in graph
low = [0] * n
visited = [False] * n
@ -9,7 +9,7 @@ def computeBridges(l):
visited[at] = True
low[at] = id
id += 1
for to in l[at]:
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
@ -28,7 +28,7 @@ def computeBridges(l):
print(bridges)
l = {
graph = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
@ -39,4 +39,4 @@ l = {
7: [6, 8],
8: [5, 7],
}
computeBridges(l)
computeBridges(graph)

View File

@ -1,10 +1,10 @@
# Finding longest distance in Directed Acyclic Graph using KahnsAlgorithm
def longestDistance(l):
indegree = [0] * len(l)
def longestDistance(graph):
indegree = [0] * len(graph)
queue = []
longDist = [1] * len(l)
longDist = [1] * len(graph)
for key, values in l.items():
for key, values in graph.items():
for i in values:
indegree[i] += 1
@ -14,7 +14,7 @@ def longestDistance(l):
while queue:
vertex = queue.pop(0)
for x in l[vertex]:
for x in graph[vertex]:
indegree[x] -= 1
if longDist[vertex] + 1 > longDist[x]:
@ -27,5 +27,5 @@ def longestDistance(l):
# Adjacency list of Graph
l = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longestDistance(l)
graph = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longestDistance(graph)

View File

@ -1,11 +1,14 @@
# Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph using BFS
def topologicalSort(l):
indegree = [0] * len(l)
def topologicalSort(graph):
"""
Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph
using BFS
"""
indegree = [0] * len(graph)
queue = []
topo = []
cnt = 0
for key, values in l.items():
for key, values in graph.items():
for i in values:
indegree[i] += 1
@ -17,17 +20,17 @@ def topologicalSort(l):
vertex = queue.pop(0)
cnt += 1
topo.append(vertex)
for x in l[vertex]:
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(x)
if cnt != len(l):
if cnt != len(graph):
print("Cycle exists")
else:
print(topo)
# Adjacency List of Graph
l = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topologicalSort(l)
graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topologicalSort(graph)

View File

@ -2,7 +2,7 @@ import sys
from collections import defaultdict
def PrimsAlgorithm(l):
def PrimsAlgorithm(l): # noqa: E741
nodePosition = []
@ -109,7 +109,7 @@ if __name__ == "__main__":
e = int(input("Enter number of edges: ").strip())
adjlist = defaultdict(list)
for x in range(e):
l = [int(x) for x in input().strip().split()]
l = [int(x) for x in input().strip().split()] # noqa: E741
adjlist[l[0]].append([l[1], l[2]])
adjlist[l[1]].append([l[0], l[2]])
print(PrimsAlgorithm(adjlist))

View File

@ -79,8 +79,7 @@ def reset():
machine_time = 0
#######################################
if __name__ == "__main__":
# Initialization
reset()

View File

@ -47,6 +47,7 @@
# Imports
import numpy as np
# Functions of binary conversion--------------------------------------
def text_to_bits(text, encoding="utf-8", errors="surrogatepass"):
"""

View File

@ -19,7 +19,7 @@ class Test(unittest.TestCase):
x = Vector([1, 2, 3])
self.assertEqual(x.component(0), 1)
self.assertEqual(x.component(2), 3)
y = Vector()
_ = Vector()
def test_str(self):
"""

View File

@ -11,9 +11,11 @@ Python:
Inputs:
- X , a 2D numpy array of features.
- k , number of clusters to create.
- initial_centroids , initial centroid values generated by utility function(mentioned in usage).
- initial_centroids , initial centroid values generated by utility function(mentioned
in usage).
- maxiter , maximum number of iterations to process.
- heterogeneity , empty list that will be filled with hetrogeneity values if passed to kmeans func.
- heterogeneity , empty list that will be filled with hetrogeneity values if passed
to kmeans func.
Usage:
1. define 'k' value, 'X' features array and 'hetrogeneity' empty list
@ -22,7 +24,8 @@ Usage:
initial_centroids = get_initial_centroids(
X,
k,
seed=0 # seed value for initial centroid generation, None for randomness(default=None)
seed=0 # seed value for initial centroid generation,
# None for randomness(default=None)
)
3. find centroids and clusters using kmeans function.
@ -37,7 +40,8 @@ Usage:
)
4. Plot the loss function, hetrogeneity values for every iteration saved in hetrogeneity list.
4. Plot the loss function, hetrogeneity values for every iteration saved in
hetrogeneity list.
plot_heterogeneity(
heterogeneity,
k
@ -46,8 +50,9 @@ Usage:
5. Have fun..
"""
from sklearn.metrics import pairwise_distances
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import pairwise_distances
TAG = "K-MEANS-CLUST/ "
@ -118,9 +123,6 @@ def compute_heterogeneity(data, k, centroids, cluster_assignment):
return heterogeneity
from matplotlib import pyplot as plt
def plot_heterogeneity(heterogeneity, k):
plt.figure(figsize=(7, 4))
plt.plot(heterogeneity, linewidth=4)
@ -136,9 +138,11 @@ def kmeans(
):
"""This function runs k-means on given data and initial set of centroids.
maxiter: maximum number of iterations to run.(default=500)
record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations
record_heterogeneity: (optional) a list, to store the history of heterogeneity
as function of iterations
if None, do not store the history.
verbose: if True, print how many data points changed their cluster labels in each iteration"""
verbose: if True, print how many data points changed their cluster labels in
each iteration"""
centroids = initial_centroids[:]
prev_cluster_assignment = None
@ -149,7 +153,8 @@ def kmeans(
# 1. Make cluster assignments using nearest centroids
cluster_assignment = assign_clusters(data, centroids)
# 2. Compute a new centroid for each of the k clusters, averaging all data points assigned to that cluster.
# 2. Compute a new centroid for each of the k clusters, averaging all data
# points assigned to that cluster.
centroids = revise_centroids(data, k, cluster_assignment)
# Check for convergence: if none of the assignments changed, stop

View File

@ -186,7 +186,8 @@ def predict_y_values(
>>> means = [5.011267842911003, 10.011267842911003, 15.011267842911002]
>>> variance = 0.9618530973487494
>>> probabilities = [0.3333333333333333, 0.3333333333333333, 0.3333333333333333]
>>> predict_y_values(x_items, means, variance, probabilities) # doctest: +NORMALIZE_WHITESPACE
>>> predict_y_values(x_items, means, variance,
... probabilities) # doctest: +NORMALIZE_WHITESPACE
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2]
@ -211,7 +212,7 @@ def predict_y_values(
# appending discriminant values of each item to 'results' list
results.append(temp)
return [l.index(max(l)) for l in results]
return [result.index(max(result)) for result in results]
# Calculating Accuracy

View File

@ -1,5 +1,12 @@
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
dataset = pd.read_csv(
@ -9,16 +16,9 @@ X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(X)
pol_reg = LinearRegression()

View File

@ -14,6 +14,7 @@ import numpy as np
and types of data
"""
# Mean Absolute Error
def mae(predict, actual):
"""

View File

@ -18,8 +18,9 @@ def collatz_sequence(n: int) -> List[int]:
Traceback (most recent call last):
...
Exception: Sequence only defined for natural numbers
>>> collatz_sequence(43)
[43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
>>> collatz_sequence(43) # doctest: +NORMALIZE_WHITESPACE
[43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7,
22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
"""
if not isinstance(n, int) or n < 1:

View File

@ -24,7 +24,7 @@ def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=Fa
a = x0 # set the initial guess
steps = [a]
error = abs(f(a))
f1 = lambda x: calc_derivative(f, x, h=step) # Derivative of f(x)
f1 = lambda x: calc_derivative(f, x, h=step) # noqa: E731 Derivative of f(x)
for _ in range(maxiter):
if f1(a) == 0:
raise ValueError("No converging solution found")
@ -44,7 +44,7 @@ def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=Fa
if __name__ == "__main__":
import matplotlib.pyplot as plt
f = lambda x: m.tanh(x) ** 2 - m.exp(3 * x)
f = lambda x: m.tanh(x) ** 2 - m.exp(3 * x) # noqa: E731
solution, error, steps = newton_raphson(
f, x0=10, maxiter=1000, step=1e-6, logsteps=True
)

View File

@ -1,3 +1,5 @@
# flake8: noqa
"""
Sieve of Eratosthenes

View File

@ -31,7 +31,7 @@ def sieve(n):
[]
"""
l = [True] * (n + 1)
l = [True] * (n + 1) # noqa: E741
prime = []
start = 2
end = int(math.sqrt(n))

View File

@ -16,7 +16,7 @@ def BFS(graph, s, t, parent):
while queue:
u = queue.pop(0)
for ind in range(len(graph[u])):
if visited[ind] == False and graph[u][ind] > 0:
if visited[ind] is False and graph[u][ind] > 0:
queue.append(ind)
visited[ind] = True
parent[ind] = u

View File

@ -19,7 +19,7 @@ def BFS(graph, s, t, parent):
while queue:
u = queue.pop(0)
for ind in range(len(graph[u])):
if visited[ind] == False and graph[u][ind] > 0:
if visited[ind] is False and graph[u][ind] > 0:
queue.append(ind)
visited[ind] = True
parent[ind] = u

View File

@ -1,3 +1,5 @@
# flake8: noqa
"""The following implementation assumes that the activities
are already sorted according to their finish time"""

View File

@ -1,4 +1,7 @@
import collections, pprint, time, os
import collections
import os
import pprint
import time
start_time = time.time()
print("creating word list...")

View File

@ -55,6 +55,7 @@ def isEnglish(message, wordPercentage=20, letterPercentage=85):
return wordsMatch and lettersMatch
if __name__ == "__main__":
import doctest
doctest.testmod()

View File

@ -27,8 +27,10 @@ Game-Of-Life Rules:
Any dead cell with exactly three live neighbours be-
comes a live cell, as if by reproduction.
"""
import random
import sys
import numpy as np
import random, sys
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap

View File

@ -1,5 +1,6 @@
# Python program for generating diamond pattern in Python 3.7+
# Function to print upper half of diamond (pyramid)
def floyd(n):
"""

View File

@ -44,9 +44,9 @@ def function(expansion, s0, s1, key, message):
right = message[4:]
temp = apply_table(right, expansion)
temp = XOR(temp, key)
l = apply_sbox(s0, temp[:4])
l = apply_sbox(s0, temp[:4]) # noqa: E741
r = apply_sbox(s1, temp[4:])
l = "0" * (2 - len(l)) + l
l = "0" * (2 - len(l)) + l # noqa: E741
r = "0" * (2 - len(r)) + r
temp = apply_table(l + r, p4_table)
temp = XOR(left, temp)

View File

@ -48,7 +48,7 @@ def solution(n):
"""
try:
n = int(n)
except (TypeError, ValueError) as e:
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or passive of cast to int.")
if n <= 0:
raise ValueError("Parameter n must be greater or equal to one.")

View File

@ -50,7 +50,7 @@ def solution(n):
"""
try:
n = int(n)
except (TypeError, ValueError) as e:
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or passive of cast to int.")
if n <= 0:
raise ValueError("Parameter n must be greater or equal to one.")

View File

@ -37,7 +37,7 @@ def solution(n):
"""
try:
n = int(n)
except (TypeError, ValueError) as e:
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or passive of cast to int.")
if n <= 0:
raise ValueError("Parameter n must be greater or equal to one.")

View File

@ -41,7 +41,7 @@ def solution(n):
"""
try:
n = int(n)
except (TypeError, ValueError) as e:
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or passive of cast to int.")
if n <= 0:
raise ValueError("Parameter n must be greater or equal to one.")

View File

@ -50,7 +50,7 @@ def solution(n):
"""
try:
n = int(n)
except (TypeError, ValueError) as e:
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or passive of cast to int.")
if n <= 0:
raise ValueError("Parameter n must be greater or equal to one.")

View File

@ -34,7 +34,7 @@ def solution():
70600674
"""
with open(os.path.dirname(__file__) + "/grid.txt") as f:
l = []
l = [] # noqa: E741
for i in range(20):
l.append([int(x) for x in f.readline().split()])

View File

@ -40,7 +40,7 @@ def solution(n):
"""Returns the sum of all semidivisible numbers not exceeding n."""
semidivisible = []
for x in range(n):
l = [i for i in input().split()]
l = [i for i in input().split()] # noqa: E741
c2 = 1
while 1:
if len(fib(l[0], l[1], c2)) < int(l[2]):

View File

@ -1,5 +1,7 @@
# https://en.wikipedia.org/wiki/Simulated_annealing
import math, random
import math
import random
from hill_climbing import SearchProblem

View File

@ -12,6 +12,7 @@ import sys
# It is recommended for users to keep this number greater than or equal to 10.
precision = 10
# This is the linear search that will occur after the search space has become smaller.
def lin_search(left, right, A, target):
for i in range(left, right + 1):

View File

@ -1,9 +1,10 @@
# Python program for Bitonic Sort. Note that this program
# works only when size of input is a power of 2.
# The parameter dir indicates the sorting direction, ASCENDING
# or DESCENDING; if (a[i] > a[j]) agrees with the direction,
# then a[i] and a[j] are interchanged.*/
# then a[i] and a[j] are interchanged.
def compAndSwap(a, i, j, dire):
if (dire == 1 and a[i] > a[j]) or (dire == 0 and a[i] < a[j]):
a[i], a[j] = a[j], a[i]

View File

@ -9,8 +9,9 @@ def palindromic_string(input_string):
1. first this convert input_string("xyx") into new_string("x|y|x") where odd
positions are actual input characters.
2. for each character in new_string it find corresponding length and store the length
and l,r to store previously calculated info.(please look the explanation for details)
2. for each character in new_string it find corresponding length and store the
length and l,r to store previously calculated info.(please look the explanation
for details)
3. return corresponding output_string by removing all "|"
"""
@ -26,7 +27,8 @@ def palindromic_string(input_string):
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic substring
# we will store the starting and ending of previous furthest ending palindromic
# substring
l, r = 0, 0
# length[i] shows the length of palindromic substring with center i
@ -47,7 +49,7 @@ def palindromic_string(input_string):
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if i + k - 1 > r:
l = i - k + 1
l = i - k + 1 # noqa: E741
r = i + k - 1
# update max_length and start position
@ -72,32 +74,34 @@ if __name__ == "__main__":
"""
...a0...a1...a2.....a3......a4...a5...a6....
consider the string for which we are calculating the longest palindromic substring is shown above where ...
are some characters in between and right now we are calculating the length of palindromic substring with
center at a5 with following conditions :
i) we have stored the length of palindromic substring which has center at a3 (starts at l ends at r) and it
is the furthest ending till now, and it has ending after a6
consider the string for which we are calculating the longest palindromic substring is
shown above where ... are some characters in between and right now we are calculating
the length of palindromic substring with center at a5 with following conditions :
i) we have stored the length of palindromic substring which has center at a3 (starts at
l ends at r) and it is the furthest ending till now, and it has ending after a6
ii) a2 and a4 are equally distant from a3 so char(a2) == char(a4)
iii) a0 and a6 are equally distant from a3 so char(a0) == char(a6)
iv) a1 is corresponding equal character of a5 in palindrome with center a3 (remember that in below derivation of a4==a6)
iv) a1 is corresponding equal character of a5 in palindrome with center a3 (remember
that in below derivation of a4==a6)
now for a5 we will calculate the length of palindromic substring with center as a5 but can we use previously
calculated information in some way?
Yes, look the above string we know that a5 is inside the palindrome with center a3 and previously we have
have calculated that
now for a5 we will calculate the length of palindromic substring with center as a5 but
can we use previously calculated information in some way?
Yes, look the above string we know that a5 is inside the palindrome with center a3 and
previously we have have calculated that
a0==a2 (palindrome of center a1)
a2==a4 (palindrome of center a3)
a0==a6 (palindrome of center a3)
so a4==a6
so we can say that palindrome at center a5 is at least as long as palindrome at center a1
but this only holds if a0 and a6 are inside the limits of palindrome centered at a3 so finally ..
so we can say that palindrome at center a5 is at least as long as palindrome at center
a1 but this only holds if a0 and a6 are inside the limits of palindrome centered at a3
so finally ..
len_of_palindrome__at(a5) = min(len_of_palindrome_at(a1), r-a5)
where a3 lies from l to r and we have to keep updating that
and if the a5 lies outside of l,r boundary we calculate length of palindrome with bruteforce and update
l,r.
and if the a5 lies outside of l,r boundary we calculate length of palindrome with
bruteforce and update l,r.
it gives the linear time complexity just like z-function
"""

View File

@ -11,10 +11,7 @@ def reverse_words(input_str: str) -> str:
>>> reverse_words(sentence)
'Python love I'
"""
input_str = input_str.split(" ")
new_str = list()
return " ".join(reversed(input_str))
return " ".join(reversed(input_str.split(" ")))
if __name__ == "__main__":

View File

@ -1,3 +1,5 @@
# flake8: noqa
"""
This is pure Python implementation of tree traversal algorithms
"""