#!/usr/bin/perl use strict; use warnings; use Time::Piece; use HTTP::Tiny; use JSON::MaybeXS; # For wrapping comment blocks. use Unicode::LineBreak; my $lb = Unicode::LineBreak->new(ColMax => 76); # Default is 76. # Printing UTF-8 to STDOUT. binmode(STDOUT, "encoding(UTF-8)"); die "usage: draco [-dhv] \n" unless scalar @ARGV; my $DEBUG; my $VERSION = "v0.2.2"; # Dispatch table to be parsed before url. my %dispatch = ( '-v' => sub { print "Draco $VERSION\n"; exit; }, '-d' => sub { $DEBUG = 1; print STDERR "draco: debug on.\n"; }, '-h' => sub { print qq{Draco $VERSION Options: -d Turn on debug messages. Debug messages will be printed to STDERR. -h Print this help. -v Print version. Environment Variables: FETCH_ALL Fetch all comments. This will make multiple HTTP calls to reddit. This doesn't fetch *all* the comments. }; exit; }, ); if (exists $dispatch{$ARGV[0]}) { # shift @ARGV to get $url in next shift. $dispatch{shift @ARGV}->(); } # This is the start time. my $start_time = time; my $last_log = $start_time; # $url contains the reddit post. Raise the limit to 500 comments which # is the maximum reddit allows. my $url = shift @ARGV; my $json_url = "${url}.json?limit=500&sort=top"; my $http = HTTP::Tiny->new( verify_SSL => 1 ); # Fetch the post. print_time() if $DEBUG; print STDERR "fetching `$json_url'.\n" if $DEBUG; my $response = get_response($json_url); # Decode json. print STDERR "decoding json response.\n" if $DEBUG; my $json_data = decode_json($response->{content}); # $post contains post data my $post = $json_data->[0]->{data}->{children}->[0]->{data}; # $comments contains comment data. We are interested in: replies, # author, body, created_utc & permalink. my $comments = $json_data->[1]->{data}->{children}; # Start the Org document. print "#+", "STARTUP:content\n"; # Print the date. my $current_date = Time::Piece->new->strftime('%+'); print "#+", "DATE: $current_date\n"; print "\n"; # Print the post title & it's link. print "* ", "[[$post->{url}][$post->{title}]]\n"; # Add various details to :PROPERTIES:. print ":PROPERTIES:\n"; # Include the created date & archive date in properties. print ":CREATED_UTC: ", Time::Piece->strptime($post->{created_utc}, '%s') ->strftime('%+'), "\n"; print ":ARCHIVE_DATE: $current_date\n"; foreach my $detail (qw( subreddit created_utc author permalink upvote_ratio ups downs score )) { print ":${detail}: =$post->{$detail}=\n" if scalar $post->{$detail}; } print ":END:\n"; # Add selftext if present. print "\n#+BEGIN_SRC markdown\n", # Break the text at 76 column & add 2 space before every new line. " ", $lb->break($post->{selftext}) =~ s/\n/\n\ \ /gr, "\n", "#+END_SRC\n" if scalar $post->{selftext}; my (@http_calls, %counter); $counter{print_comment_chain_call} = 0; $counter{iterate_over_comments_call} = 0; print_time() if $DEBUG; print STDERR "iterating over top-level comments.\n" if $DEBUG; # We are going to put a dot after each HTTP call. print STDERR "each dot is a HTTP call.\n" if $DEBUG; # Iterate over top-level comments. The second argument is level # (depth), it should be 0 for top-level comments. iterate_over_comments($comments, 0); # Seperate the dots from the rest by a line break. print STDERR "\n" if $DEBUG; print_time() if $DEBUG; # Print important stats. print STDERR "\n" if $DEBUG; print STDERR "total http calls: ", scalar @http_calls, "\n" if $DEBUG; print STDERR "total print_comment_chain calls: ", $counter{print_comment_chain_call}, "\n" if $DEBUG; print STDERR "total iterate_over_comments calls: ", $counter{iterate_over_comments_call}, "\n" if $DEBUG; sub print_time { print STDERR " "; print STDERR "time since [start, last log]: [", time - $start_time, ", ", time - $last_log, "] seconds\n"; $last_log = time; } sub get_response { my $url = shift @_; print STDERR "." if $DEBUG; my $response = $http->get($url); push @http_calls, $url; die "Unexpected response - $response->{status}: $response->{reason} : $url" unless $response->{success}; return $response; } # Pass as argument and it'll return you the json url to # that comment thread. sub get_comment_thread_from_id { my $comment_id = shift @_; # Reddit doesn't like this kind of url: # http://///.json # # It wants this kind of url: # http:////.json # # Notice the extra '/' in first url. my $json_url = $url; $json_url .= "/" unless substr $url, -1 eq "/"; $json_url .= "${comment_id}.json?limit=500&sort=top"; return $json_url; } # This was being used multiple times so I moved it to a subroutine. # It'll take $comment_id & return $comments. sub get_all_comments_from_id { my $comment_id = shift @_; my $json_url = get_comment_thread_from_id($comment_id); # Fetch the comment. my $response = get_response($json_url); # Decode json. my $json_data = decode_json($response->{content}); # $comments contains comment data. my $comments = $json_data->[1]->{data}->{children}; return $comments; } # First argument requires $comments & second is the level (depth). sub iterate_over_comments { my $comments = shift @_; my $level = shift @_; $counter{iterate_over_comments_call}++; foreach my $co
                               ━━━━━━━━━
                                 OCTANS

                                Andinus
                               ━━━━━━━━━


Table of Contents
─────────────────

1. Demo
2. Documentation
.. 1. Implementation
.. 2. Options


Octans is a program to solve Algot's Wordplay (Wordsearch) puzzles.

• Website: <https://andinus.nand.sh/octans>
• Source: <https://git.tilde.institute/andinus/octans>
• GitHub: <https://github.com/andinus/octans>


1 Demo
══════

  This was recorded with `asciinema(1)'.

  [https://asciinema.org/a/384464.png]

  ⁃ Octans 2020-01-14: <https://asciinema.org/a/384464>
  ⁃ alt-link (download): <https://andinus.nand.sh/static/octans>


[https://asciinema.org/a/384464.png] <https://asciinema.org/a/384464>


2 Documentation
═══════════════

2.1 Implementation
──────────────────

  Initially it went over the list of words & checked if they exist in
  the grid. This was very slow.

  Currently it walks the grid & checks if the current string exist in
  the dictionary. This is faster for these reasons:

  • The dictionary is sorted, we perform binary range search on the
    dictionary to return the list of all words that start with specific
    string.
  • Starting positions are limited.

  If the dictionary wasn't sorted then this probably would've been
  slower than previous implementation.


2.2 Options
───────────