[mypy] Fix web_programming directory (#4297)

* Update world_covid19_stats.py

* Delete monkeytype_config.py

* updating DIRECTORY.md

* Apply pyannotate suggestions to emails_from_url.py

* mypy web_programming/emails_from_url.py

* super().__init__()

* mypy --ignore-missing-imports web_programming/emails_from_url.py

* Update emails_from_url.py

* self.urls: list[str] = []

* mypy: Fix web_programming directory

Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com>
Co-authored-by: Dhruv Manilawala <dhruvmanila@gmail.com>
This commit is contained in:
Christian Clauss 2021-03-31 05:18:07 +02:00 committed by GitHub
parent c22c7d503b
commit 895bca3654
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 12 additions and 11 deletions

View File

@ -23,7 +23,7 @@ jobs:
python -m pip install mypy pytest-cov -r requirements.txt python -m pip install mypy pytest-cov -r requirements.txt
# FIXME: #4052 fix mypy errors in the exclude directories and remove them below # FIXME: #4052 fix mypy errors in the exclude directories and remove them below
- run: mypy --ignore-missing-imports - run: mypy --ignore-missing-imports
--exclude '(arithmetic_analysis|ciphers|conversions|data_structures|digital_image_processing|dynamic_programming|graphs|hashes|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings|web_programming*)/$' . --exclude '(arithmetic_analysis|ciphers|conversions|data_structures|digital_image_processing|dynamic_programming|graphs|hashes|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings*)/$' .
- name: Run tests - name: Run tests
run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. .
- if: ${{ success() }} - if: ${{ success() }}

View File

@ -9,7 +9,7 @@ import requests
URL_BASE = "https://www.amdoren.com/api/currency.php" URL_BASE = "https://www.amdoren.com/api/currency.php"
TESTING = os.getenv("CI", False) TESTING = os.getenv("CI", False)
API_KEY = os.getenv("AMDOREN_API_KEY") API_KEY = os.getenv("AMDOREN_API_KEY", "")
if not API_KEY and not TESTING: if not API_KEY and not TESTING:
raise KeyError("Please put your API key in an environment variable.") raise KeyError("Please put your API key in an environment variable.")

View File

@ -8,18 +8,19 @@ __status__ = "Alpha"
import re import re
from html.parser import HTMLParser from html.parser import HTMLParser
from typing import Optional
from urllib import parse from urllib import parse
import requests import requests
class Parser(HTMLParser): class Parser(HTMLParser):
def __init__(self, domain: str): def __init__(self, domain: str) -> None:
HTMLParser.__init__(self) super().__init__()
self.data = [] self.urls: list[str] = []
self.domain = domain self.domain = domain
def handle_starttag(self, tag: str, attrs: str) -> None: def handle_starttag(self, tag: str, attrs: list[tuple[str, Optional[str]]]) -> None:
""" """
This function parse html to take takes url from tags This function parse html to take takes url from tags
""" """
@ -29,10 +30,10 @@ class Parser(HTMLParser):
for name, value in attrs: for name, value in attrs:
# If href is defined, and not empty nor # print it. # If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "": if name == "href" and value != "#" and value != "":
# If not already in data. # If not already in urls.
if value not in self.data: if value not in self.urls:
url = parse.urljoin(self.domain, value) url = parse.urljoin(self.domain, value)
self.data.append(url) self.urls.append(url)
# Get main domain name (example.com) # Get main domain name (example.com)
@ -59,7 +60,7 @@ def get_sub_domain_name(url: str) -> str:
return parse.urlparse(url).netloc return parse.urlparse(url).netloc
def emails_from_url(url: str = "https://github.com") -> list: def emails_from_url(url: str = "https://github.com") -> list[str]:
""" """
This function takes url and return all valid urls This function takes url and return all valid urls
""" """
@ -78,7 +79,7 @@ def emails_from_url(url: str = "https://github.com") -> list:
# Get links and loop through # Get links and loop through
valid_emails = set() valid_emails = set()
for link in parser.data: for link in parser.urls:
# open URL. # open URL.
# read = requests.get(link) # read = requests.get(link)
try: try: