diff options
author | Brian Chu <brianmchu42@gmail.com> | 2021-12-19 22:57:37 -0800 |
---|---|---|
committer | Brian Chu <brianmchu42@gmail.com> | 2021-12-19 22:57:37 -0800 |
commit | d4f482804b9777768fa4f9fedde0cd549578502b (patch) | |
tree | 211098ceaad6046234e0f55fe86dc539f791d4a4 | |
parent | 04d85bf194d50f8da584a8c657d7490649befb7c (diff) | |
download | AdventOfCode2021-d4f482804b9777768fa4f9fedde0cd549578502b.tar.gz |
catch up on solutions up to day 20
-rw-r--r-- | day12.py | 35 | ||||
-rw-r--r-- | day13.py | 50 | ||||
-rw-r--r-- | day14.py | 30 | ||||
-rw-r--r-- | day15.py | 76 | ||||
-rw-r--r-- | day16.py | 97 | ||||
-rw-r--r-- | day17.py | 39 | ||||
-rw-r--r-- | day18.py | 71 | ||||
-rw-r--r-- | day19.py | 137 | ||||
-rw-r--r-- | day20.py | 19 |
9 files changed, 554 insertions, 0 deletions
diff --git a/day12.py b/day12.py new file mode 100644 index 0000000..55732b5 --- /dev/null +++ b/day12.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python + +from collections import defaultdict + +graph = defaultdict(list) +with open("day12.txt") as data: + for line in data: + vert1, vert2 = line.strip().split('-') + if vert2 != "start": + graph[vert1].append(vert2) + if vert1 != "start": + graph[vert2].append(vert1) + del graph["end"] + +# part 1 +def walk(path=['start']): + count = 0 + for point in graph[path[-1]]: + if point.isupper() or not point in path: + count += 1 if point == 'end' else walk(path + [point]) + return count + +print(walk()) + +# part 2 +def walk2(path=['start']): + count = 0 + for point in graph[path[-1]]: + count += 1 if point == 'end' else (walk2, + walk)[point.islower() + and point in path]( + path + [point]) + return count + +print(walk2()) diff --git a/day13.py b/day13.py new file mode 100644 index 0000000..a1f4058 --- /dev/null +++ b/day13.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +import numpy as np +import re + +split_pattern = re.compile('(x|y)=([\d]*)') +xMax = yMax = 0 +with open('day13.txt') as data: + coords = [] + for line in data: + if line == '\n': + break + else: + x, y = map(int, line.strip().split(',')) + xMax = max(xMax, x) + yMax = max(yMax, y) + coords += [(x, y)] + splits = [] + for line in data: + splits += [split_pattern.search(line.strip()).group(1, 2)] + +thermal = np.zeros((xMax+1, yMax+1)).astype(bool) +for x, y in coords: + thermal[x, y] = True + +# part 1 +def fold_map(split): + # folds always fold the map in half + axis, val = split[0], int(split[1]) + if axis == 'x': + folded = np.flip(thermal[val+1:], 0) + length = folded.shape[0] + thermal[val-length:val] = thermal[val-length:val] | folded + return thermal[:val] + else: + folded = np.flip(thermal[:, val+1:], 1) + length = folded.shape[1] + thermal[:, val-length:val] = thermal[:, val-length:val] | folded + return thermal[:, :val] +# part 1 +print(np.count_nonzero(fold_map(splits[0]))) + +# part 2 +for split in splits: + thermal = fold_map(split) +thermal_str = thermal.astype(str) +thermal_str[thermal] = "*" +thermal_str[~thermal] = " " +for line in thermal_str.T: + print("".join(line)) diff --git a/day14.py b/day14.py new file mode 100644 index 0000000..f04d327 --- /dev/null +++ b/day14.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +from collections import defaultdict + +with open('day14.txt') as data: + template = next(data).strip() + next(data) + rules = {} + for line in data: + k, v = line.strip().split(' -> ') + rules[k] = v + +def polymerize(steps=10): + pairs = defaultdict(int) + for i in range(len(template) - 1): + pairs[template[i] + template[i+1]] += 1 + + for i in range(steps): + update = defaultdict(int) + for k, v in pairs.items(): + update[k[0] + rules[k]] += v + update[rules[k] + k[1]] += v + pairs = update + + counts = defaultdict(int) + for k, v in pairs.items(): + counts[k[0]] += v + return max(counts.values()) - min(counts.values()) - 1 + +print(polymerize()) +print(polymerize(40)) diff --git a/day15.py b/day15.py new file mode 100644 index 0000000..90aaed3 --- /dev/null +++ b/day15.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python + +import numpy as np +import math +from heapq import * +from collections import defaultdict + +with open('day15.txt') as data: + costs = [] + for line in data: + costs.append(list(line.strip())) + costs = np.array(costs).astype(int) + +def a_star(costs) -> int: + def h(point): + m, n = point + # approximate with straight line distance between points + return math.sqrt((costs.shape[0]-m)**2 + (costs.shape[1]-n)**2) + + def neighbors(point): + m, n = point + neighboring = {(0, 1), (1, 0), (0, -1), (-1, 0)} + return {(m+dx, n+dy) for dx, dy in neighboring + if 0<=m+dx<costs.shape[0] and 0<=n+dy<costs.shape[1]} + + def path_sum(parents, current): + complete = [current] + while current in parents: + current = parents[current] + if current != (0, 0): + complete.insert(0, current) + return sum(map(lambda x: costs[x], complete)) + + discovered = [(0,0)] + parents = {} + + gScore = defaultdict(lambda : math.inf) + gScore[(0,0)] = 0 + + fScore = defaultdict(lambda : math.inf) + fScore[(0,0)] = h((0,0)) + + while discovered: + if discovered[0] == (goal := (costs.shape[0]-1, costs.shape[1]-1)): + return path_sum(parents, goal) + + current = heappop(discovered) + for neighbor in neighbors(current): + tentative = gScore[current] + costs[neighbor] + if tentative < gScore[neighbor]: + parents[neighbor] = current + gScore[neighbor] = tentative + fScore[neighbor] = tentative + h(neighbor) + if neighbor not in discovered: + heappush(discovered, neighbor) + else: + return None +# part 1 +print(a_star(costs)) + +# part 2 +new_costs = np.zeros((costs.shape[0] * 5, costs.shape[1] * 5)).astype(int) + +def increment(arr): + arr += 1 + arr[arr > 9] = 1 + +xDim, yDim = costs.shape +for i in range(5): + for j in range(5): + copied = np.copy(costs) + for _ in range(i+j): + increment(copied) + new_costs[xDim*i:xDim*(i+1), yDim*j:yDim*(j+1)] = copied + +print(a_star(new_costs)) diff --git a/day16.py b/day16.py new file mode 100644 index 0000000..5adedfc --- /dev/null +++ b/day16.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python +import math + +hex2bin = {digit : f'{i:>04b}' for i, digit in enumerate("0123456789ABCDEF")} +with open('day16.txt') as data: + bits = "".join(hex2bin[char] for char in data.read().strip()) + +pos = 0 +def read_bits(num_bits): + global pos + num = bits[pos:pos+num_bits] + pos += num_bits + return num + +def read_int(num_bits): +# reads arbitrary number of bits from bitstring to a number + bit_str = read_bits(num_bits) + return int(bit_str, 2) + +def read_literal_int(): +# reads the value of a literal: type id = 4 + total = 0 + while True: + digit = read_bits(5) + total <<= 4 + total += int(digit[1:], 2) + if digit[0] == '0': + break + return total + +def parse_packet(): + version = read_int(3) + type_id = read_int(3) + if type_id == 4: + literal = read_literal_int() + return { + 'version': version, + 'type_id': type_id, + 'literal': literal + } + else: + length_id = read_int(1) + if length_id == 0: + bit_length = read_int(15) + stop_pos = pos + bit_length + subpackets = [] + while pos < stop_pos: + p = parse_packet() + subpackets.append(p) + return { + 'version': version, + 'type_id': type_id, + 'packets': subpackets + } + else: + sub_length = read_int(11) + subpackets = [] + for i in range(sub_length): + subpackets.append(parse_packet()) + return { + 'version': version, + 'type_id': type_id, + 'packets': subpackets + } + +def get_version_sum(packet): + version_sum = packet['version'] + if 'literal' not in packet: + for subpacket in packet['packets']: + version_sum += get_version_sum(subpacket) + return version_sum + +def evaluate_packet(packet): + match packet['type_id']: + case 0: + return sum(evaluate_packet(sub) for sub in packet['packets']) + case 1: + return math.prod(evaluate_packet(sub) for sub in packet['packets']) + case 2: + return min(evaluate_packet(sub) for sub in packet['packets']) + case 3: + return max(evaluate_packet(sub) for sub in packet['packets']) + case 4: + return packet['literal'] + case 5: + return int(evaluate_packet(packet['packets'][0]) > evaluate_packet(packet['packets'][1])) + case 6: + return int(evaluate_packet(packet['packets'][0]) < evaluate_packet(packet['packets'][1])) + case 7: + return int(evaluate_packet(packet['packets'][0]) == evaluate_packet(packet['packets'][1])) + +parsed = parse_packet() +# part 1 +print(get_version_sum(parsed)) + +# part 2 +print(evaluate_packet(parsed)) diff --git a/day17.py b/day17.py new file mode 100644 index 0000000..53881ea --- /dev/null +++ b/day17.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +# target area: 236 <= x <= 262, -78 <= y <= -58 +# i was originally going to do a brute force solution +# since the problem implies that the displacement on the x axis is the Vx'th triangular number +# which gives a lower bound on the number of steps to calculate the displacement on the y axis +# but there's actually a really brilliant solution located at +# https://github.com/prendradjaja/advent-of-code-2021/blob/main/17--trick-shot/a.py +# which I am paraphrasing here + +yvel = 77 +print(sum(n for n in range(1, yvel+1))) + + +# part 2 +xMin, xMax, yMin, yMax = 236, 262, -78, -58 + +def is_hit(vel): + for pos in trajectory(vel): + if xMin <= pos[0] <= xMax and yMin <= pos[1] <= yMax: + return True + return False + +def trajectory(vel): + pos = (0,0) + while pos[0] <= xMax and pos[1] >= yMin: + yield pos + pos = (pos[0] + vel[0], pos[1] + vel[1]) + vel = ( + max(0, vel[0] - 1), + vel[1] - 1 + ) + +result = 0 +for xvel in range(0, xMax+1): + for yvel in range(yMin, -yMin): + if is_hit((xvel, yvel)): + result += 1 +print(result) diff --git a/day18.py b/day18.py new file mode 100644 index 0000000..7711309 --- /dev/null +++ b/day18.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python + +def unpack(line, n=1): + '''flattens the number while also keeping track of level''' + if line == []: + return line + if isinstance(line[0], list): + return unpack(line[0], n+1) + unpack(line[1:], n) + return [[line[0], n]] + unpack(line[1:], n) + +with open('day18.txt') as data: + nums = [] + for line in data: + num = unpack(eval(line)) + nums.append(num) + +def explode(line): + for i in range(len(line)-1): + num1, level1 = line[i] + num2, level2 = line[i+1] + if level1 == level2 == 5: + line[i] = [0, 4] + if i != 0: + line[i-1][0] += num1 + if i+2 != len(line): + line[i+2][0] += num2 + line.pop(i+1) + return True + return False + +def split(line): + for i in range(len(line)): + num, level = line[i] + if num >= 10: + line[i:i+1] = [num//2, level+1], [num//2 + num%2, level+1] + return True + return False + +def reduce_num(line): + while True: + if explode(line): + continue + if not split(line): + break + + return line + +def magnitude(num): + while len(num) > 1: + for i, ((num1, depth1), (num2, depth2)) in enumerate(zip(num, num[1:])): + if depth1 != depth2: continue + val = num1 * 3 + num2 * 2 + num = num[:i] + [[val, depth1-1]] + num[i+2:] + break + return num[0][0] + +# part 1 +curr = nums[0] +for line in nums[1:]: + summation = list(map(lambda x: [x[0], x[1] + 1], curr + line)) + curr = reduce_num(summation) +print(magnitude(curr)) + +# part 2 +from itertools import permutations +permute_nums = permutations(nums, 2) +maximum = 0 +for perm in permute_nums: + summation = reduce_num(list(map(lambda x: [x[0], x[1]+1], perm[0] + perm[1]))) + maximum = max(maximum, magnitude(summation)) +print(maximum) diff --git a/day19.py b/day19.py new file mode 100644 index 0000000..7e70d68 --- /dev/null +++ b/day19.py @@ -0,0 +1,137 @@ +from collections import Counter, defaultdict +import numpy as np + +def fit_homography(P1, P2): + # p is a size (N, 3) set of 3d points + # X is a size (N, 4) set of 3d points + + N, _ = P1.shape + M = np.zeros((N*3, 12)) + b = np.zeros((N*3, 1)) + for i, (p, y) in enumerate(zip(P1, P2)): + b[i*3:i*3+3, 0] = y + + M[i*3, :3] = p + M[i*3 + 1, 3:6] = p + M[i*3 + 2, 6:9] = p + + M[i*3: i*3 + 3, 9:] = np.eye(3) + + x = np.linalg.inv(M.T @ M) @ M.T @ b + + R = x[:9].reshape(3,3).round().astype(int) + t = x[9:].round().astype(int) + return R, t + +class SensorData: + def __init__(self, points: np.ndarray): + self.points = points + self.point_to_dists = defaultdict(set) + + self.sensors = np.zeros((1,3)) + + # Get dists for points + self._find_dists() + + def _find_dists(self): + self.point_to_dists = defaultdict(set) + for p1 in self.points: + for p2 in self.points: + if tuple(p1) == tuple(p2): continue + d = ((p1 - p2) ** 2).sum().item() + self.point_to_dists[tuple(p1)].add(d) + self.point_to_dists[tuple(p2)].add(d) + + def add_points(self, other): + matches = self.match_points(other) + + if matches is None or matches.shape[0] < 12: + return False + + P1, P2 = self.points[matches[:,0], :], other.points[matches[:,1], :] + + R, t = fit_homography(P2, P1) + + transformed = other.points @ R.T + t.T + + self.points = np.concatenate([self.points, transformed]) + self.points = np.unique(self.points, axis=0) + # self._find_dists() + for new_point, old_point in zip(transformed, other.points): + self.point_to_dists[tuple(new_point)].update(other.point_to_dists[tuple(old_point)]) + + # Also map the sensor location to store list of sensors + self.sensors = np.concatenate([ + self.sensors, + other.sensors @ R.T + t.T + ]) + + return True + + def match_points(self, other): + paired = [] + for j, p2 in enumerate(other.points): + for i, p1 in enumerate(self.points): + dists1 = self.point_to_dists[tuple(p1)] + dists2 = other.point_to_dists[tuple(p2)] + common = dists1.intersection(dists2) + + if len(common) < 11: continue + + paired.append((i, j)) + + if len(paired) == 12: + break + + if not paired: return None + + return np.stack(paired) + + def max_dist(self): + best = 0 + for s1 in self.sensors: + for s2 in self.sensors: + d = np.abs(s1 - s2).sum().item() + + best = max(d, best) + return int(best) + +def parse_input(contents): + sensors = contents.split('\n\n') + + output = [] + for sensor in sensors: + output.append( + np.stack( + [list(map(int, line.split(','))) + for line in sensor.splitlines()[1:]] + ) + ) + + return output + +def part_1(sensors): + sensors = [SensorData(np.copy(n)) for n in sensors] + + data = sensors[0] + to_pair = set(range(1, len(sensors))) + while to_pair: + again = set() + for i in to_pair: + if data.add_points(sensors[i]): continue + again.add(i) + + to_pair = again + + return data + +if __name__ == "__main__": + print('--- Part 1 ---') + with open('day19.txt') as f: + sensors = parse_input(f.read()) + + data = part_1(sensors) + print(data.points.shape[0]) + + print('--- Part 2 ---') + print(data.max_dist()) diff --git a/day20.py b/day20.py new file mode 100644 index 0000000..74ca7fd --- /dev/null +++ b/day20.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +import numpy as np +from scipy.ndimage import convolve + +with open('day20.txt') as data: + algo, image = data.read().split('\n\n') + algo = np.array(list(algo)) + algo = (algo == '#').astype(int) + image = np.stack(list(map(list, image.strip().split('\n')))) + image = (image == '#').astype(int) + image = np.pad(image, (100, 100)) + +weights = 2 ** np.arange(9).reshape(3,3) +# part 1 +for i in range(50): + image = algo[convolve(image, weights)] + if i == 1: + print(image.sum()) +print(image.sum()) |