summary refs log tree commit diff stats
path: root/main.py
diff options
context:
space:
mode:
authorAndronaco Marco <marco.andronaco@olivetti.com>2023-07-12 15:47:56 +0200
committerAndronaco Marco <marco.andronaco@olivetti.com>2023-07-12 15:47:56 +0200
commiteceadcf2f0e4150b131b14e7c3c9553f1169b87e (patch)
tree8b4f20909640d91c0727226422065861d0f80d52 /main.py
parentdbe85ea6d39ca7b2037ecf8c439b7d5d8f97e939 (diff)
downloadsunstroke-eceadcf2f0e4150b131b14e7c3c9553f1169b87e.tar.gz
sort based on host preference, add startup script
Diffstat (limited to 'main.py')
-rw-r--r--main.py43
1 files changed, 36 insertions, 7 deletions
diff --git a/main.py b/main.py
index d7fb9c9..c77f965 100644
--- a/main.py
+++ b/main.py
@@ -1,25 +1,54 @@
 from Overpost import get_newspaper
 from MyPyload import Pyload
+from urllib.error import URLError
 from os import getenv
+from datetime import datetime
 
 NEWSPAPER_PREFIX = getenv("NEWSPAPER_PREFIX") or ""
+HOST_PREFERENCE = [ 'katfile.com', 'rapidgator.net', 'www.easybytez.com' ]
 
-def scroll_dict(dictionary):
+def scroll_list(array, buffer=1000):
+    array_len = len(array)
     i = 0
-    for key, values in dictionary.items():
-        if i >= len(values):
+    while i < buffer:
+        if i >= array_len:
             i = 0
-        yield key, values[i]
+        yield array[i]
         i += 1
 
+def get_host(link):
+    return link.split("/")[2]
+
+def filter_links(links, host):
+    for link in links:
+        if get_host(link) == host:
+            return link
+        
+def get_sorted_links(dictionary):
+    hosts = scroll_list(HOST_PREFERENCE)
+    return [ filter_links(links, next(hosts)) for _, links in dictionary.items() ]
+
 def download_link(connection, name, link):
     return connection.addPackage(name=name, links=[link])
 
+def handle_links(name, links):
+    try:
+        con = Pyload()
+        return [ download_link(con, name, link) for link in links ]
+    except URLError:
+        print("Connessione a Pyload rifiutata.")
+
+    print("Link da aggiungere manualmente:\n")
+    for x in links:
+        print(x)
+    print()
+    return []
+
 def main():
     newspapers = get_newspaper(NEWSPAPER_PREFIX, 0) # 0 -> today
-    con = Pyload()
-    pids = [ download_link(con, NEWSPAPER_PREFIX, link) for _, link in scroll_dict(newspapers) ]
-    print(pids)
+    name = NEWSPAPER_PREFIX + datetime.today().strftime("%Y-%m-%d")
+    links = get_sorted_links(newspapers)
+    pids = handle_links(name, links)
 
 if __name__ == "__main__":
     exit(main())