diff --git a/CHANGELOG.org b/CHANGELOG.org index b4e7bb9..58f7dd5 100644 --- a/CHANGELOG.org +++ b/CHANGELOG.org @@ -23,3 +23,5 @@ ** 0.5.2 <2022-07-21> Added pyproject.toml. Added img/ folder. +** 0.6.0 <2022-08-11> + Updated the cli interface diff --git a/pyproject.toml b/pyproject.toml index 367db68..4d83227 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,8 +21,9 @@ dependencies = [ "requests", "bs4", "notify-py", + "rich", ] [project.urls] "Homepage" = "https://gitlab.com/KKlochko/anitube-simple-notification" -"Bug Tracker" = "https://gitlab.com/KKlochko/anitube-simple-notification/issues" \ No newline at end of file +"Bug Tracker" = "https://gitlab.com/KKlochko/anitube-simple-notification/issues" diff --git a/src/main.py b/src/main.py index 0689013..0335922 100644 --- a/src/main.py +++ b/src/main.py @@ -20,6 +20,8 @@ from db import DataBase from scraper import Scraper from notify import Notification +from rich.console import Console +from rich.progress import track import time def get_urls(file_path): @@ -53,28 +55,33 @@ def main(): urls = get_urls(URL_FILE) #print(f"{urls}") + # Console initialising + console = Console() + # Checks for new urls in file and add as current state # If one of page has updated then notifing. # Repeating the checking with the waiting period. while True: - print(f"[DOING] Checking for animes [0/{len(urls)}]") + console.print(f"[yellow][DOING][/] Checking for animes [0/{len(urls)}]") count = 0 for url in urls: data = scr.get_anime(url, POSTERS) if data == None: - print(f"[ERROR] A conections trouble is occured.") + console.print(f"[red][ERROR][/] A conections trouble is occured.") continue url, title, status, poster_path = data - print(f"[DOING] Checking for \"{title}\" [{count}/{len(urls)}]") + console.print(f"[yellow][DOING][/] Checking for \"{title}\" [{count}/{len(urls)}]") r = db.add_anime_if(url, title, status, poster_path) if r == -1: n = Notification(title, MESSAGE, poster_path) n.send() - print(f"[NOTIFICATION] \"{title}\"") + console.print(f"[blue bold][NOTIFICATION][/] \"{title}\"") count+=1 - print(f"[DONE] Checking for \"{title}\" [{count}/{len(urls)}]") - print(f"[WAITING] The next check is after {WAITING_PERIOD} seconds") - time.sleep(WAITING_PERIOD) + console.print(f"[green][DONE][/] Checking for \"{title}\" [{count}/{len(urls)}]") + console.print(f"[yellow][WAITING][/] The next check is after {WAITING_PERIOD} seconds") + # Sleep while waiting + for n in track(range(WAITING_PERIOD), description="Waiting..."): + time.sleep(1) if __name__ == "__main__": main() diff --git a/src/scraper.py b/src/scraper.py index 4a55fce..ea9a264 100644 --- a/src/scraper.py +++ b/src/scraper.py @@ -24,6 +24,8 @@ This module has all for simplify work with scraping. import requests from bs4 import BeautifulSoup +from rich.console import Console +from rich.progress import track import os class Scraper: @@ -63,9 +65,10 @@ class Scraper: poster_url = "https://anitube.in.ua" + soup.find('span', class_="story_post").find('img').get('src') poster_path = f"{self.POSTER_PATH}/{poster_url.split('/')[-1]}" if GETPOSTER and not self.file_exist(poster_path): - print(f"[DONWLOADING] The poster for {title}") - img = requests.get(poster_url) - with open(poster_path,'wb') as file: - file.write(img.content) - print(f"[DONWLOADED] The poster for {title}") + console = Console() + with console.status("[yellow]Downloading...[/]"): + img = requests.get(poster_url) + with open(poster_path,'wb') as file: + file.write(img.content) + console.print(f"[green][DONWLOADED][/] The poster for \"{title}\"") return [url, title, status, poster_path]