chore: create new project structure and aoc.py runner script

This commit is contained in:
2025-08-04 16:23:06 +02:00
parent f76375d835
commit e2964c6c36
91 changed files with 177 additions and 113 deletions

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env python3
import itertools
def main(inp):
# Part 1
changes = [int(n) for n in inp]
print(sum(changes))
freq = 0
seen = {0}
for num in itertools.cycle(changes):
freq += num
if freq in seen:
print(freq)
break
seen.add(freq)
if __name__ == '__main__':
import sys
infile = sys.argv[1] if len(sys.argv) > 1 else "example.txt"
with open(infile) as inp:
main(inp.readlines())

View File

@@ -0,0 +1,30 @@
from collections import Counter
def part2(ids):
for id_a in ids:
for id_b in ids:
common_letters = [a for a, b in zip(id_a, id_b) if a == b]
if len(common_letters) == len(id_a) - 1:
res = "".join(common_letters)
print("Part 2: ", res)
return
def main(lines):
total_twice, total_thrice = 0, 0
for line in lines:
c = Counter(line)
total_twice += 1 if [k for k,v in c.items() if v == 2] != [] else 0
total_thrice += 1 if [k for k,v in c.items() if v == 3] != [] else 0
print(f"Part 1: ", total_twice * total_thrice)
part2(lines)
if __name__ == "__main__":
import sys
infile = sys.argv[1] if len(sys.argv) > 1 else "example.txt"
with open(infile) as inp:
main([l.rstrip() for l in inp.readlines()])

View File

@@ -0,0 +1,55 @@
#!/usr/bin/env python3
import re
import itertools
from dataclasses import dataclass
from collections import defaultdict
@dataclass
class Rectangle:
x: int
y: int
width: int
height: int
@property
def x2(self):
return self.x + self.width
@property
def y2(self):
return self.y + self.height
def parse_line(l):
parsed = re.findall(r"\d+", l)
id_, x, y, width, height = map(int, parsed)
return id_, Rectangle(x, y, width, height)
def main(inp):
regions = defaultdict(set)
for id_, region in map(parse_line, inp):
for x in range(region.x, region.x2):
for y in range(region.y, region.y2):
regions[x, y].add(id_)
total = sum(len(x) > 1 for x in regions.values())
print(f"Part 1: ", total)
all_ids = set()
overlapping_ids = set()
for region in regions.values():
all_ids.update(region)
if len(region) > 1:
overlapping_ids.update(region)
difference = all_ids - overlapping_ids
print(f"Part 2: {difference.pop()}")
if __name__ == '__main__':
import sys
infile = sys.argv[1] if len(sys.argv) > 1 else "example.txt"
with open(infile) as inp:
main([l.rstrip() for l in inp.readlines()])

View File

@@ -0,0 +1,41 @@
#!/usr/bin/env python3
import re
from collections import defaultdict, Counter
from pprint import pprint
def main(inp):
# total minutes asleep per guard
guards = defaultdict(int)
# list of guards sleeping for each minute
minutes = defaultdict(lambda: defaultdict(int))
for l in sorted(inp):
minute = re.search(r":(\d+)", l).group(1)
if "#" in l:
current_id = re.search(r"#(\d+)", l).group(1)
elif "asleep" in l:
start = int(minute)
elif "wakes" in l:
end = int(minute)
guards[current_id] += end - start
for m in range(start, end):
minutes[m][current_id] += 1
# Find the guard that has the most minutes asleep. What minute does that guard spend asleep the most?
guard_id = max(guards.items(), key=lambda x: x[1])[0]
minute = max([(k, v[guard_id]) for k, v in minutes.items() if guard_id in v], key=lambda x: x[1])[0]
print("Part 1: ", int(guard_id) * minute)
# Of all guards, which guard is most frequently asleep on the same minute?
maxs = { m: max(minutes[m].items(), key=lambda x: x[1]) for m in minutes.keys()}
minute, rest = max(maxs.items(), key=lambda x: x[1][1])
id_, _ = rest
print("Part 2: ", int(id_) * minute)
if __name__ == '__main__':
import sys
infile = sys.argv[1] if len(sys.argv) > 1 else "example.txt"
with open(infile) as inp:
main(inp.readlines())

View File

@@ -0,0 +1,25 @@
def reduce_polymer(p):
res = [""]
for x in p:
if res[-1].swapcase() == x:
res.pop()
else:
res.append(x)
return "".join(res)
def main(inp):
print("Part 1: ", len(reduce_polymer(inp)))
letters = set(x.lower() for x in inp)
min_ = 2 << 32
for l in letters:
p = inp.replace(l.lower(), "").replace(l.upper(), "")
min_ = min(len(reduce_polymer(p)), min_)
print("Part 2: ", min_)
if __name__ == "__main__":
import fileinput
inp = next(l.rstrip() for l in fileinput.input())
main(inp)

View File

@@ -0,0 +1,59 @@
from collections import defaultdict, namedtuple
Point = namedtuple('Point', ['x', 'y'])
def dist(a, b):
"manhattan distance"
return abs(a.x - b.x) + abs(a.y - b.y)
def iter_grid(bounds):
min_, max_ = bounds
for x in range(min_.x, max_.x + 1):
for y in range(min_.y, max_.y + 1):
yield Point(x, y)
def is_edge(p, bounds):
start, end = bounds
return p.x == start.x or p.x == end.x or p.y == start.y or p.y == end.y
def main(inp):
grid = [Point(*map(int, l.split(", "))) for l in inp]
bounds = (Point(min(p.x for p in grid), min(p.y for p in grid)),
Point(max(p.x for p in grid), max(p.y for p in grid)))
areas = defaultdict(int)
infinite_regions = set()
for p in iter_grid(bounds):
# find dist to every point of grid
distances = sorted((dist(p2, p), i) for i, p2 in enumerate(grid))
# equally far from two or more coordinates, don't count
if distances[0][0] == distances[1][0]:
continue
_, index = distances[0]
areas[index] += 1
if is_edge(p, bounds):
infinite_regions.add(index)
# remove all infinite regions by index
for i in infinite_regions:
del areas[i]
print("Part 1: ", max(areas.values()))
count = 0
for cur in iter_grid(bounds):
s = sum(dist(cur, p) for p in grid)
if s < 10_000:
count += 1
print("Part 2: ", count)
if __name__ == "__main__":
import fileinput
inp = list(l.rstrip() for l in fileinput.input())
main(inp)

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env python3
import re
from bisect import insort
from collections import defaultdict
def topological_sort(graph, reverse_deps):
# find starting nodes: with no incoming edges (aka indegree 0)
queue = sorted([task for task in graph.keys() if reverse_deps[task] == set()])
order = []
seen = set()
while queue != []:
current = queue.pop(0)
if current not in seen:
seen.add(current)
order.append(current)
# add dependencies if all prerequisites are already visited,
# insert them in alphabetical order
for d in graph[current]:
if all(x in order for x in reverse_deps[d]):
insort(queue, d)
return order
def main(inp):
dependencies = defaultdict(set)
reverse_deps = defaultdict(set)
for l in inp:
first, second = re.findall(r"[sS]tep (\w)", l)
dependencies[first].add(second)
reverse_deps[second].add(first)
order = topological_sort(dependencies, reverse_deps)
print("Part 1: ", "".join(order))
done = []
doing = dict()
workers = 5
step = 0
number_of_tasks = len(order)
while len(done) != number_of_tasks:
assert len(doing) <= workers
for i in range(workers):
# check if the worker has a pending task
if i in doing:
task = doing[i]
if is_task_done(task, step):
#print(f"{step}: worker #{i}, task {task} done")
del doing[i]
done.append(task[0])
else:
continue
next_task = get_task(dependencies, reverse_deps, done, doing)
if next_task is not None:
#print(f"{step}: worker #{i}, starting task {next_task}")
doing[i] = (next_task, step)
#print(f"{step}: {doing} {done}")
if len(done) == number_of_tasks:
break
step += 1
print(f"{step}\t{'\t'.join(x[0] for x in doing.values())}")
print(step)
def get_task(graph, reverse_deps, done, doing):
queue = sorted([task for task in graph.keys() if all(x in done for x in reverse_deps[task])])
doingg = [x[0] for x in doing.values()]
for t in queue:
if t not in done and t not in doingg:
return t
return None
def is_task_done(task, step):
letter, start_t = task
duration = ord(letter) - ord("A") + 61
if step - start_t >= duration:
return True
return False
if __name__ == '__main__':
import sys
infile = sys.argv[1] if len(sys.argv) > 1 else "example.txt"
with open(infile) as inp:
main([l.rstrip() for l in inp.readlines()])