(Updated) 0.6.0 - the cli interface.

main
KKlochko 3 years ago
parent 7394f88d29
commit 5c16001824

@ -23,3 +23,5 @@
** 0.5.2 <2022-07-21> ** 0.5.2 <2022-07-21>
Added pyproject.toml. Added pyproject.toml.
Added img/ folder. Added img/ folder.
** 0.6.0 <2022-08-11>
Updated the cli interface

@ -21,6 +21,7 @@ dependencies = [
"requests", "requests",
"bs4", "bs4",
"notify-py", "notify-py",
"rich",
] ]
[project.urls] [project.urls]

@ -20,6 +20,8 @@
from db import DataBase from db import DataBase
from scraper import Scraper from scraper import Scraper
from notify import Notification from notify import Notification
from rich.console import Console
from rich.progress import track
import time import time
def get_urls(file_path): def get_urls(file_path):
@ -53,28 +55,33 @@ def main():
urls = get_urls(URL_FILE) urls = get_urls(URL_FILE)
#print(f"{urls}") #print(f"{urls}")
# Console initialising
console = Console()
# Checks for new urls in file and add as current state # Checks for new urls in file and add as current state
# If one of page has updated then notifing. # If one of page has updated then notifing.
# Repeating the checking with the waiting period. # Repeating the checking with the waiting period.
while True: while True:
print(f"[DOING] Checking for animes [0/{len(urls)}]") console.print(f"[yellow][DOING][/] Checking for animes [0/{len(urls)}]")
count = 0 count = 0
for url in urls: for url in urls:
data = scr.get_anime(url, POSTERS) data = scr.get_anime(url, POSTERS)
if data == None: if data == None:
print(f"[ERROR] A conections trouble is occured.") console.print(f"[red][ERROR][/] A conections trouble is occured.")
continue continue
url, title, status, poster_path = data url, title, status, poster_path = data
print(f"[DOING] Checking for \"{title}\" [{count}/{len(urls)}]") console.print(f"[yellow][DOING][/] Checking for \"{title}\" [{count}/{len(urls)}]")
r = db.add_anime_if(url, title, status, poster_path) r = db.add_anime_if(url, title, status, poster_path)
if r == -1: if r == -1:
n = Notification(title, MESSAGE, poster_path) n = Notification(title, MESSAGE, poster_path)
n.send() n.send()
print(f"[NOTIFICATION] \"{title}\"") console.print(f"[blue bold][NOTIFICATION][/] \"{title}\"")
count+=1 count+=1
print(f"[DONE] Checking for \"{title}\" [{count}/{len(urls)}]") console.print(f"[green][DONE][/] Checking for \"{title}\" [{count}/{len(urls)}]")
print(f"[WAITING] The next check is after {WAITING_PERIOD} seconds") console.print(f"[yellow][WAITING][/] The next check is after {WAITING_PERIOD} seconds")
time.sleep(WAITING_PERIOD) # Sleep while waiting
for n in track(range(WAITING_PERIOD), description="Waiting..."):
time.sleep(1)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

@ -24,6 +24,8 @@ This module has all for simplify work with scraping.
import requests import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from rich.console import Console
from rich.progress import track
import os import os
class Scraper: class Scraper:
@ -63,9 +65,10 @@ class Scraper:
poster_url = "https://anitube.in.ua" + soup.find('span', class_="story_post").find('img').get('src') poster_url = "https://anitube.in.ua" + soup.find('span', class_="story_post").find('img').get('src')
poster_path = f"{self.POSTER_PATH}/{poster_url.split('/')[-1]}" poster_path = f"{self.POSTER_PATH}/{poster_url.split('/')[-1]}"
if GETPOSTER and not self.file_exist(poster_path): if GETPOSTER and not self.file_exist(poster_path):
print(f"[DONWLOADING] The poster for {title}") console = Console()
with console.status("[yellow]Downloading...[/]"):
img = requests.get(poster_url) img = requests.get(poster_url)
with open(poster_path,'wb') as file: with open(poster_path,'wb') as file:
file.write(img.content) file.write(img.content)
print(f"[DONWLOADED] The poster for {title}") console.print(f"[green][DONWLOADED][/] The poster for \"{title}\"")
return [url, title, status, poster_path] return [url, title, status, poster_path]

Loading…
Cancel
Save