summary refs log tree commit diff stats
diff options
context:
space:
mode:
-rw-r--r--ranger/config/rc.conf4
-rw-r--r--ranger/container/settings.py1
-rw-r--r--ranger/ext/papermanager.py52
3 files changed, 36 insertions, 21 deletions
diff --git a/ranger/config/rc.conf b/ranger/config/rc.conf
index 107995da..34ee2fad 100644
--- a/ranger/config/rc.conf
+++ b/ranger/config/rc.conf
@@ -174,6 +174,10 @@ set show_selection_in_titlebar true
 # increases CPU load.
 set idle_delay 2000
 
+# In the paper manger mode, should ranger try to find the ".paperinfo" file in
+# directories above the current directory?
+set papermanager_deep_search true
+
 # ===================================================================
 # == Local Options
 # ===================================================================
diff --git a/ranger/container/settings.py b/ranger/container/settings.py
index f75c274f..ad42b13f 100644
--- a/ranger/container/settings.py
+++ b/ranger/container/settings.py
@@ -31,6 +31,7 @@ ALLOWED_SETTINGS = {
     'mouse_enabled': bool,
     'open_all_images': bool,
     'padding_right': bool,
+    'papermanager_deep_search': bool,
     'preview_directories': bool,
     'preview_files': bool,
     'preview_images': bool,
diff --git a/ranger/ext/papermanager.py b/ranger/ext/papermanager.py
index 69332b21..ff6ffb73 100644
--- a/ranger/ext/papermanager.py
+++ b/ranger/ext/papermanager.py
@@ -14,7 +14,8 @@ The columns are:
 5. URL
 """
 
-PAGERINFO_FILE_NAME = ".pagerinfo"
+PAPERINFO_FILE_NAME = ".paperinfo"
+DEEP_SEARCH_DEFAULT = True
 
 import csv
 from os.path import join, dirname, exists, basename
@@ -25,6 +26,7 @@ class PaperManager(object):
     def __init__(self):
         self.metadata_cache = dict()
         self.metafile_cache = dict()
+        self.deep_search = DEEP_SEARCH_DEFAULT
 
     def reset(self):
         self.metadata_cache.clear()
@@ -36,33 +38,41 @@ class PaperManager(object):
         except KeyError:
             result = OpenStruct(filename=filename, title=None, year=None,
                     authors=None, url=None)
-            metafile = join(dirname(filename), PAGERINFO_FILE_NAME)
 
-            # get entries of the metadata file
-            if metafile in self.metafile_cache:
-                entries = self.metafile_cache[metafile]
-            else:
-                if exists(metafile):
-                    reader = csv.reader(open(metafile, "r"),
-                            skipinitialspace=True)
-
-                    entries = list(entry for entry in reader if len(entry) == 5)
-                    self.metafile_cache[metafile] = entries
-                else:
-                    # No metadata file
-                    entries = []
-
-            # Find the relevant entry in the metadata file
             valid = (filename, basename(filename))
-            for entry in entries:
-                if entry[0] in valid:
-                    self._fill_ostruct_with_data(result, entry)
-                    break
+            for metafile in self._get_metafile_names(filename):
+                for entry in self._get_metafile_content(metafile):
+                    if entry[0] in valid:
+                        self._fill_ostruct_with_data(result, entry)
+                        self.metadata_cache[filename] = result
+                        return result
 
             # Cache the value
             self.metadata_cache[filename] = result
             return result
 
+    def _get_metafile_content(self, metafile):
+        if metafile in self.metafile_cache:
+            return self.metafile_cache[metafile]
+        else:
+            if exists(metafile):
+                reader = csv.reader(open(metafile, "r"),
+                        skipinitialspace=True)
+
+                entries = list(entry for entry in reader if len(entry) == 5)
+                self.metafile_cache[metafile] = entries
+                return entries
+            else:
+                return []
+
+    def _get_metafile_names(self, path):
+        base = dirname(path)
+        yield join(base, PAPERINFO_FILE_NAME)
+        if self.deep_search:
+            dirs = base.split("/")[1:]
+            for i in reversed(range(len(dirs))):
+                yield join("/" + "/".join(dirs[0:i]), PAPERINFO_FILE_NAME)
+
     def _fill_ostruct_with_data(self, ostruct, dataset):
         filename, year, title, authors, url = dataset
         if year:    ostruct.year = year