summary refs log tree commit diff stats
diff options
context:
space:
mode:
-rw-r--r--Overpost.py5
-rw-r--r--Sunstroke.bat2
-rw-r--r--main.py43
-rw-r--r--requirements.txtbin122 -> 122 bytes
4 files changed, 41 insertions, 9 deletions
diff --git a/Overpost.py b/Overpost.py
index 6404b37..cbea1ea 100644
--- a/Overpost.py
+++ b/Overpost.py
@@ -69,10 +69,11 @@ def get_links(rss_url):
     return [ parse_entry(entry) for entry in feed.entries ]
 
 def get_newspaper(prefix="", index=0):
-    links = get_links(RSS_URL)
+    all_links = get_links(RSS_URL)
     try:
-        daily = links[index][1]
+        daily = all_links[index][1]
     except IndexError:
+        print("Empty feed.")
         return {}
     return { k: v for k, v in daily.items() if k.startswith(prefix)}
 
diff --git a/Sunstroke.bat b/Sunstroke.bat
new file mode 100644
index 0000000..61d0f16
--- /dev/null
+++ b/Sunstroke.bat
@@ -0,0 +1,2 @@
+@echo off
+.\venv\Scripts\python.exe .\main.py
diff --git a/main.py b/main.py
index d7fb9c9..c77f965 100644
--- a/main.py
+++ b/main.py
@@ -1,25 +1,54 @@
 from Overpost import get_newspaper
 from MyPyload import Pyload
+from urllib.error import URLError
 from os import getenv
+from datetime import datetime
 
 NEWSPAPER_PREFIX = getenv("NEWSPAPER_PREFIX") or ""
+HOST_PREFERENCE = [ 'katfile.com', 'rapidgator.net', 'www.easybytez.com' ]
 
-def scroll_dict(dictionary):
+def scroll_list(array, buffer=1000):
+    array_len = len(array)
     i = 0
-    for key, values in dictionary.items():
-        if i >= len(values):
+    while i < buffer:
+        if i >= array_len:
             i = 0
-        yield key, values[i]
+        yield array[i]
         i += 1
 
+def get_host(link):
+    return link.split("/")[2]
+
+def filter_links(links, host):
+    for link in links:
+        if get_host(link) == host:
+            return link
+        
+def get_sorted_links(dictionary):
+    hosts = scroll_list(HOST_PREFERENCE)
+    return [ filter_links(links, next(hosts)) for _, links in dictionary.items() ]
+
 def download_link(connection, name, link):
     return connection.addPackage(name=name, links=[link])
 
+def handle_links(name, links):
+    try:
+        con = Pyload()
+        return [ download_link(con, name, link) for link in links ]
+    except URLError:
+        print("Connessione a Pyload rifiutata.")
+
+    print("Link da aggiungere manualmente:\n")
+    for x in links:
+        print(x)
+    print()
+    return []
+
 def main():
     newspapers = get_newspaper(NEWSPAPER_PREFIX, 0) # 0 -> today
-    con = Pyload()
-    pids = [ download_link(con, NEWSPAPER_PREFIX, link) for _, link in scroll_dict(newspapers) ]
-    print(pids)
+    name = NEWSPAPER_PREFIX + datetime.today().strftime("%Y-%m-%d")
+    links = get_sorted_links(newspapers)
+    pids = handle_links(name, links)
 
 if __name__ == "__main__":
     exit(main())
diff --git a/requirements.txt b/requirements.txt
index ad62927..361922c 100644
--- a/requirements.txt
+++ b/requirements.txt
Binary files differ