Day 5: Print Queue

Megathread guidelines

  • Keep top level comments as only solutions, if you want to say something other than a solution put it in a new post. (replies to comments can be whatever)
  • You can send code in code blocks by using three backticks, the code, and then three backticks or use something such as https://topaz.github.io/paste/ if you prefer sending it through a URL

FAQ

  • janAkali@lemmy.one
    link
    fedilink
    English
    arrow-up
    6
    ·
    edit-2
    24 days ago

    Nim

    Solution: sort numbers using custom rules and compare if sorted == original. Part 2 is trivial.
    Runtime for both parts: 1.05 ms

    proc parseRules(input: string): Table[int, seq[int]] =
      for line in input.splitLines():
        let pair = line.split('|')
        let (a, b) = (pair[0].parseInt, pair[1].parseInt)
        discard result.hasKeyOrPut(a, newSeq[int]())
        result[a].add b
    
    proc solve(input: string): AOCSolution[int, int] =
      let chunks = input.split("\n\n")
      let later = parseRules(chunks[0])
      for line in chunks[1].splitLines():
        let numbers = line.split(',').map(parseInt)
        let sorted = numbers.sorted(cmp =
          proc(a,b: int): int =
            if a in later and b in later[a]: -1
            elif b in later and a in later[b]: 1
            else: 0
        )
        if numbers == sorted:
          result.part1 += numbers[numbers.len div 2]
        else:
          result.part2 += sorted[sorted.len div 2]
    

    Codeberg repo

  • hades@lemm.ee
    link
    fedilink
    arrow-up
    5
    ·
    24 days ago

    C#

    using QuickGraph;
    using QuickGraph.Algorithms.TopologicalSort;
    public class Day05 : Solver
    {
      private List<int[]> updates;
      private List<int[]> updates_ordered;
    
      public void Presolve(string input) {
        var blocks = input.Trim().Split("\n\n");
        List<(int, int)> rules = new();
        foreach (var line in blocks[0].Split("\n")) {
          var pair = line.Split('|');
          rules.Add((int.Parse(pair[0]), int.Parse(pair[1])));
        }
        updates = new();
        updates_ordered = new();
        foreach (var line in input.Trim().Split("\n\n")[1].Split("\n")) {
          var update = line.Split(',').Select(int.Parse).ToArray();
          updates.Add(update);
    
          var graph = new AdjacencyGraph<int, Edge<int>>();
          graph.AddVertexRange(update);
          graph.AddEdgeRange(rules
            .Where(rule => update.Contains(rule.Item1) && update.Contains(rule.Item2))
            .Select(rule => new Edge<int>(rule.Item1, rule.Item2)));
          List<int> ordered_update = [];
          new TopologicalSortAlgorithm<int, Edge<int>>(graph).Compute(ordered_update);
          updates_ordered.Add(ordered_update.ToArray());
        }
      }
    
      public string SolveFirst() => updates.Zip(updates_ordered)
        .Where(unordered_ordered => unordered_ordered.First.SequenceEqual(unordered_ordered.Second))
        .Select(unordered_ordered => unordered_ordered.First)
        .Select(update => update[update.Length / 2])
        .Sum().ToString();
    
      public string SolveSecond() => updates.Zip(updates_ordered)
        .Where(unordered_ordered => !unordered_ordered.First.SequenceEqual(unordered_ordered.Second))
        .Select(unordered_ordered => unordered_ordered.Second)
        .Select(update => update[update.Length / 2])
        .Sum().ToString();
    }
    
      • hades@lemm.ee
        link
        fedilink
        arrow-up
        2
        ·
        24 days ago

        You’ll need to sort them anyway :)

        (my first version of the first part only checked the order, without sorting).

  • Andy@programming.dev
    link
    fedilink
    arrow-up
    5
    ·
    24 days ago

    Factor

    : get-input ( -- rules updates )
      "vocab:aoc-2024/05/input.txt" utf8 file-lines
      { "" } split1
      "|" "," [ '[ [ _ split ] map ] ] bi@ bi* ;
    
    : relevant-rules ( rules update -- rules' )
      '[ [ _ in? ] all? ] filter ;
    
    : compliant? ( rules update -- ? )
      [ relevant-rules ] keep-under
      [ [ index* ] with map first2 < ] with all? ;
    
    : middle-number ( update -- n )
      dup length 2 /i nth-of string>number ;
    
    : part1 ( -- n )
      get-input
      [ compliant? ] with
      [ middle-number ] filter-map sum ;
    
    : compare-pages ( rules page1 page2 -- <=> )
      [ 2array relevant-rules ] keep-under
      [ drop +eq+ ] [ first index zero? +gt+ +lt+ ? ] if-empty ;
    
    : correct-update ( rules update -- update' )
      [ swapd compare-pages ] with sort-with ;
    
    : part2 ( -- n )
      get-input dupd
      [ compliant? ] with reject
      [ correct-update middle-number ] with map-sum ;
    

    on GitHub

  • proved_unglue@programming.dev
    link
    fedilink
    arrow-up
    5
    ·
    edit-2
    24 days ago

    Kotlin

    Took me a while to figure out how to sort according to the rules. 🤯

    fun part1(input: String): Int {
        val (rules, listOfNumbers) = parse(input)
        return listOfNumbers
            .filter { numbers -> numbers == sort(numbers, rules) }
            .sumOf { numbers -> numbers[numbers.size / 2] }
    }
    
    fun part2(input: String): Int {
        val (rules, listOfNumbers) = parse(input)
        return listOfNumbers
            .filterNot { numbers -> numbers == sort(numbers, rules) }
            .map { numbers -> sort(numbers, rules) }
            .sumOf { numbers -> numbers[numbers.size / 2] }
    }
    
    private fun sort(numbers: List<Int>, rules: List<Pair<Int, Int>>): List<Int> {
        return numbers.sortedWith { a, b -> if (rules.contains(a to b)) -1 else 1 }
    }
    
    private fun parse(input: String): Pair<List<Pair<Int, Int>>, List<List<Int>>> {
        val (rulesSection, numbersSection) = input.split("\n\n")
        val rules = rulesSection.lines()
            .mapNotNull { line -> """(\d{2})\|(\d{2})""".toRegex().matchEntire(line) }
            .map { match -> match.groups[1]?.value?.toInt()!! to match.groups[2]?.value?.toInt()!! }
        val numbers = numbersSection.lines().map { line -> line.split(',').map { it.toInt() } }
        return rules to numbers
    }
    
      • proved_unglue@programming.dev
        link
        fedilink
        arrow-up
        1
        ·
        24 days ago

        I guess adding type aliases and removing the regex from parser makes it a bit more readable.

        typealias Rule = Pair<Int, Int>
        typealias PageNumbers = List<Int>
        
        fun part1(input: String): Int {
            val (rules, listOfNumbers) = parse(input)
            return listOfNumbers
                .filter { numbers -> numbers == sort(numbers, rules) }
                .sumOf { numbers -> numbers[numbers.size / 2] }
        }
        
        fun part2(input: String): Int {
            val (rules, listOfNumbers) = parse(input)
            return listOfNumbers
                .filterNot { numbers -> numbers == sort(numbers, rules) }
                .map { numbers -> sort(numbers, rules) }
                .sumOf { numbers -> numbers[numbers.size / 2] }
        }
        
        private fun sort(numbers: PageNumbers, rules: List<Rule>): PageNumbers {
            return numbers.sortedWith { a, b -> if (rules.contains(a to b)) -1 else 1 }
        }
        
        private fun parse(input: String): Pair<List<Rule>, List<PageNumbers>> {
            val (rulesSection, numbersSection) = input.split("\n\n")
            val rules = rulesSection.lines()
                .mapNotNull { line ->
                    val parts = line.split('|').map { it.toInt() }
                    if (parts.size >= 2) parts[0] to parts[1] else null
                }
            val numbers = numbersSection.lines()
                .map { line -> line.split(',').map { it.toInt() } }
            return rules to numbers
        }
        
  • mykl@lemmy.world
    link
    fedilink
    arrow-up
    4
    ·
    edit-2
    24 days ago

    Dart

    A bit easier than I first thought it was going to be.

    I had a look at the Uiua discussion, and this one looks to be beyond my pay grade, so this will be it for today.

    import 'package:collection/collection.dart';
    import 'package:more/more.dart';
    
    (int, int) solve(List<String> lines) {
      var parts = lines.splitAfter((e) => e == '');
      var pred = SetMultimap.fromEntries(parts.first.skipLast(1).map((e) {
        var ps = e.split('|').map(int.parse);
        return MapEntry(ps.last, ps.first);
      }));
      ordering(a, b) => pred[a].contains(b) ? 1 : 0;
    
      var pageSets = parts.last.map((e) => e.split(',').map(int.parse).toList());
      var partn = pageSets.partition((ps) => ps.isSorted(ordering));
      return (
        partn.truthy.map((e) => e[e.length ~/ 2]).sum,
        partn.falsey.map((e) => (e..sort(ordering))[e.length ~/ 2]).sum
      );
    }
    
    part1(List<String> lines) => solve(lines).$1;
    part2(List<String> lines) => solve(lines).$2;
    
  • mykl@lemmy.world
    link
    fedilink
    arrow-up
    4
    ·
    24 days ago

    Uiua

    Well it’s still today here, and this is how I spent my evening. It’s not pretty or maybe even good, but it works on the test data…

    spoiler

    Uses Kahn’s algorithm with simplifying assumptions based on the helpful nature of the data.

    Try it here

    Data  ()⊸≠@\n "47|53\n97|13\n97|61\n97|47\n75|29\n61|13\n75|53\n29|13\n97|29\n53|29\n61|53\n97|53\n61|29\n47|13\n75|47\n97|75\n47|61\n75|61\n47|29\n75|13\n53|13\n\n75,47,61,53,29\n97,61,53,29,13\n75,29,13\n75,97,47,61,53\n61,13,29\n97,13,75,29,47"
    Rs    ≡◇(⊜⋕⊸≠@|)▽⊸≡◇(⧻⊚⌕@|)Data
    Ps    ≡⍚(⊜⋕⊸≠@,)▽⊸≡◇(¬⧻⊚⌕@|)Data
    
    NoPred   ⊢▽:((=0/+⌕)⊙¤)◴♭⟜≡⊣                # Find entry without predecessors.
    GetLead  (:((¬/+=))⊙¤)NoPred             # Remove that leading entry.
    Rules    ⇌⊂⊃(⇌⊢°□⊢|≡°□↘1)[□⍢(GetLead|≠1)] Rs # Repeatedly find rule without predecessors (Kaaaaaahn!).
    
    Sorted    ⊏⍏⊗,Rules
    IsSorted  /×>0≡/-◫2⊗°□: Rules
    MidVal    :(⌊÷ 2)
    
    ⇌⊕□⊸≡IsSorted Ps        # Group by whether the pages are in sort order.
    ≡◇(/+≡◇(MidVal Sorted)) # Find midpoints and sum.
    
    
      • mykl@lemmy.world
        link
        fedilink
        arrow-up
        2
        ·
        23 days ago

        Ah, but the terseness of the code allows the beauty of the underlying algorithm to shine through :-)

    • mykl@lemmy.world
      link
      fedilink
      arrow-up
      2
      ·
      edit-2
      23 days ago

      Oh my. I just watched yernab’s video, and this becomes so much easier:

      # Order is totally specified, so sort by number of predecessors,
      # check to see which were already sorted, then group and sum each group.
      Data  (□⊜□⊸≠@\n)(¬⦷"\n\n")"47|53\n97|13\n97|61\n97|47\n75|29\n61|13\n75|53\n29|13\n97|29\n53|29\n61|53\n97|53\n61|29\n47|13\n75|47\n97|75\n47|61\n75|61\n47|29\n75|13\n53|13\n\n75,47,61,53,29\n97,61,53,29,13\n75,29,13\n75,97,47,61,53\n61,13,29\n97,13,75,29,47"
      Rs    ≡◇(⊜⋕⊸≠@|)°□⊢Data
      Ps    ≡⍚(⊜⋕⊸≠@,)°□⊣Data
      (/+≡◇(⊡⌊÷2⧻.))¬≡≍⟜:≡⍚(⊏⍏/+⊞(Rs)..).Ps
      
  • lwhjp@lemmy.sdf.org
    link
    fedilink
    arrow-up
    4
    ·
    edit-2
    24 days ago

    Haskell

    Part two was actually much easier than I thought it was!

    import Control.Arrow
    import Data.Bool
    import Data.List
    import Data.List.Split
    import Data.Maybe
    
    readInput :: String -> ([(Int, Int)], [[Int]])
    readInput = (readRules *** readUpdates . tail) . break null . lines
      where
        readRules = map $ (read *** read . tail) . break (== '|')
        readUpdates = map $ map read . splitOn ","
    
    mid = (!!) <*> ((`div` 2) . length)
    
    isSortedBy rules = (`all` rules) . match
      where
        match ps (x, y) = fromMaybe True $ (<) <$> elemIndex x ps <*> elemIndex y ps
    
    pageOrder rules = curry $ bool GT LT . (`elem` rules)
    
    main = do
      (rules, updates) <- readInput <$> readFile "input05"
      let (part1, part2) = partition (isSortedBy rules) updates
      mapM_ (print . sum . map mid) [part1, sortBy (pageOrder rules) <$> part2]
    
  • Gobbel2000@programming.dev
    link
    fedilink
    arrow-up
    3
    ·
    24 days ago

    Rust

    While part 1 was pretty quick, part 2 took me a while to figure something out. I figured that the relation would probably be a total ordering, and obtained the actual order using topological sorting. But it turns out the relation has cycles, so the topological sort must be limited to the elements that actually occur in the lists.

    Solution
    use std::collections::{HashSet, HashMap, VecDeque};
    
    fn parse_lists(input: &str) -> Vec<Vec<u32>> {
        input.lines()
            .map(|l| l.split(',').map(|e| e.parse().unwrap()).collect())
            .collect()
    }
    
    fn parse_relation(input: String) -> (HashSet<(u32, u32)>, Vec<Vec<u32>>) {
        let (ordering, lists) = input.split_once("\n\n").unwrap();
        let relation = ordering.lines()
            .map(|l| {
                let (a, b) = l.split_once('|').unwrap();
                (a.parse().unwrap(), b.parse().unwrap())
            })
            .collect();
        (relation, parse_lists(lists))
    }
    
    fn parse_graph(input: String) -> (Vec<Vec<u32>>, Vec<Vec<u32>>) {
        let (ordering, lists) = input.split_once("\n\n").unwrap();
        let mut graph = Vec::new();
        for l in ordering.lines() {
            let (a, b) = l.split_once('|').unwrap();
            let v: u32 = a.parse().unwrap();
            let w: u32 = b.parse().unwrap();
            let new_len = v.max(w) as usize + 1;
            if new_len > graph.len() {
                graph.resize(new_len, Vec::new())
            }
            graph[v as usize].push(w);
        }
        (graph, parse_lists(lists))
    }
    
    
    fn part1(input: String) {
        let (relation, lists) = parse_relation(input); 
        let mut sum = 0;
        for l in lists {
            let mut valid = true;
            for i in 0..l.len() {
                for j in 0..i {
                    if relation.contains(&(l[i], l[j])) {
                        valid = false;
                        break
                    }
                }
                if !valid { break }
            }
            if valid {
                sum += l[l.len() / 2];
            }
        }
        println!("{sum}");
    }
    
    
    // Topological order of graph, but limited to nodes in the set `subgraph`.
    // Otherwise the graph is not acyclic.
    fn topological_sort(graph: &[Vec<u32>], subgraph: &HashSet<u32>) -> Vec<u32> {
        let mut order = VecDeque::with_capacity(subgraph.len());
        let mut marked = vec![false; graph.len()];
        for &v in subgraph {
            if !marked[v as usize] {
                dfs(graph, subgraph, v as usize, &mut marked, &mut order)
            }
        }
        order.into()
    }
    
    fn dfs(graph: &[Vec<u32>], subgraph: &HashSet<u32>, v: usize, marked: &mut [bool], order: &mut VecDeque<u32>) {
        marked[v] = true;
        for &w in graph[v].iter().filter(|v| subgraph.contains(v)) {
            if !marked[w as usize] {
                dfs(graph, subgraph, w as usize, marked, order);
            }
        }
        order.push_front(v as u32);
    }
    
    fn rank(order: &[u32]) -> HashMap<u32, u32> {
        order.iter().enumerate().map(|(i, x)| (*x, i as u32)).collect()
    }
    
    // Part 1 with topological sorting, which is slower
    fn _part1(input: String) {
        let (graph, lists) = parse_graph(input);
        let mut sum = 0;
        for l in lists {
            let subgraph = HashSet::from_iter(l.iter().copied());
            let rank = rank(&topological_sort(&graph, &subgraph));
            if l.is_sorted_by_key(|x| rank[x]) {
                sum += l[l.len() / 2];
            }
        }
        println!("{sum}");
    }
    
    fn part2(input: String) {
        let (graph, lists) = parse_graph(input);
        let mut sum = 0;
        for mut l in lists {
            let subgraph = HashSet::from_iter(l.iter().copied());
            let rank = rank(&topological_sort(&graph, &subgraph));
            if !l.is_sorted_by_key(|x| rank[x]) {
                l.sort_unstable_by_key(|x| rank[x]);            
                sum += l[l.len() / 2];
            }
        }
        println!("{sum}");
    }
    
    util::aoc_main!();
    

    also on github

  • ystael@beehaw.org
    link
    fedilink
    arrow-up
    3
    ·
    24 days ago

    J

    This is a problem where J’s biases lead one to a very different solution from most of the others. The natural representation of a directed graph in J is an adjacency matrix, and sorting is specified in terms of a permutation to apply rather than in terms of a comparator: x /: y (respectively x \: y) determines the permutation that would put y in ascending (descending) order, then applies that permutation to x.

    data_file_name =: '5.data'
    lines =: cutopen fread data_file_name
    NB. manuals start with the first line where the index of a comma is &lt; 5
    start_of_manuals =: 1 i.~ 5 > ',' i.~"1 > lines
    NB. ". can't parse the | so replace it with a space
    edges =: ". (' ' &amp; (2}))"1 > start_of_manuals {. lines
    NB. don't unbox and parse yet because they aren't all the same length
    manuals =: start_of_manuals }. lines
    max_page =: >./ , edges
    NB. adjacency matrix of the page partial ordering; e.i. makes identity matrix
    adjacency =: 1 (&lt; edges)} e. i. >: max_page
    NB. ordered line is true if line is ordered according to the adjacency matrix
    ordered =: monad define
       pages =. ". > y
       NB. index pairs 0 &lt;: i &lt; j &lt; n; box and raze to avoid array fill
       page_pairs =. ; (&lt; @: (,~"0 i.)"0) i. # pages
       */ adjacency {~ &lt;"1 pages {~ page_pairs
    )
    midpoint =: ({~ (&lt;. @: -: @: #)) @: ". @: >
    result1 =: +/ (ordered"0 * midpoint"0) manuals
    
    NB. toposort line yields the pages of line topologically sorted by adjacency
    NB. this is *not* a general topological sort but works for our restricted case:
    NB. we know that each individual manual will be totally ordered
    toposort =: monad define
       pages =. ". > y
       NB. for each page, count the pages which come after it, then sort descending
       pages \: +/"1 adjacency {~ &lt;"1 pages ,"0/ pages
    )
    NB. midpoint2 doesn't parse, but does remove trailing zeroes
    midpoint2 =: ({~ (&lt;. @: -: @: #)) @: ({.~ (i. &amp; 0))
    result2 =: +/ (1 - ordered"0 manuals) * midpoint2"1 toposort"0 manuals
    
  • Sparrow_1029@programming.dev
    link
    fedilink
    arrow-up
    3
    ·
    edit-2
    24 days ago

    Rust

    Real thinker. Messed around with a couple solutions before this one. The gist is to take all the pairwise comparisons given and record them for easy access in a ranking matrix.

    For the sample input, this grid would look like this (I left out all the non-present integers, but it would be a 98 x 98 grid where all the empty spaces are filled with Ordering::Equal):

       13 29 47 53 61 75 97
    13  =  >  >  >  >  >  >
    29  <  =  >  >  >  >  >
    47  <  <  =  <  <  >  >
    53  <  <  >  =  >  >  >
    61  <  <  >  <  =  >  >
    75  <  <  <  <  <  =  >
    97  <  <  <  <  <  <  =
    

    I discovered this can’t be used for a total order on the actual puzzle input because there were cycles in the pairs given (see how rust changed sort implementations as of 1.81). I used usize for convenience (I did it with u8 for all the pair values originally, but kept having to cast over and over as usize). Didn’t notice a performance difference, but I’m sure uses a bit more memory.

    Also I Liked the simple_grid crate a little better than the grid one. Will have to refactor that out at some point.

    solution
    use std::{cmp::Ordering, fs::read_to_string};
    
    use simple_grid::Grid;
    
    type Idx = (usize, usize);
    type Matrix = Grid<Ordering>;
    type Page = Vec<usize>;
    
    fn parse_input(input: &str) -> (Vec<Idx>, Vec<Page>) {
        let split: Vec<&str> = input.split("\n\n").collect();
        let (pair_str, page_str) = (split[0], split[1]);
        let pairs = parse_pairs(pair_str);
        let pages = parse_pages(page_str);
        (pairs, pages)
    }
    
    fn parse_pairs(input: &str) -> Vec<Idx> {
        input
            .lines()
            .map(|l| {
                let (a, b) = l.split_once('|').unwrap();
                (a.parse().unwrap(), b.parse().unwrap())
            })
            .collect()
    }
    
    fn parse_pages(input: &str) -> Vec<Page> {
        input
            .lines()
            .map(|l| -> Page {
                l.split(",")
                    .map(|d| d.parse::<usize>().expect("invalid digit"))
                    .collect()
            })
            .collect()
    }
    
    fn create_matrix(pairs: &[Idx]) -> Matrix {
        let max = *pairs
            .iter()
            .flat_map(|(a, b)| [a, b])
            .max()
            .expect("iterator is non-empty")
            + 1;
        let mut matrix = Grid::new(max, max, vec![Ordering::Equal; max * max]);
        for (a, b) in pairs {
            matrix.replace_cell((*a, *b), Ordering::Less);
            matrix.replace_cell((*b, *a), Ordering::Greater);
        }
        matrix
    }
    
    fn valid_pages(pages: &[Page], matrix: &Matrix) -> usize {
        pages
            .iter()
            .filter_map(|p| {
                if check_order(p, matrix) {
                    Some(p[p.len() / 2])
                } else {
                    None
                }
            })
            .sum()
    }
    
    fn fix_invalid_pages(pages: &mut [Page], matrix: &Matrix) -> usize {
        pages
            .iter_mut()
            .filter(|p| !check_order(p, matrix))
            .map(|v| {
                v.sort_by(|a, b| *matrix.get((*a, *b)).unwrap());
                v[v.len() / 2]
            })
            .sum()
    }
    
    fn check_order(page: &[usize], matrix: &Matrix) -> bool {
        page.is_sorted_by(|a, b| *matrix.get((*a, *b)).unwrap() == Ordering::Less)
    }
    
    pub fn solve() {
        let input = read_to_string("inputs/day05.txt").expect("read file");
        let (pairs, mut pages) = parse_input(&input);
        let matrix = create_matrix(&pairs);
        println!("Part 1: {}", valid_pages(&pages, &matrix));
        println!("Part 2: {}", fix_invalid_pages(&mut pages, &matrix));
    }
    

    On github

    *Edit: I did try switching to just using std::collections::HashMap, but it was 0.1 ms slower on average than using the simple_grid::GridVec[idx] access is faster maybe?

  • Zarlin@lemmy.world
    link
    fedilink
    arrow-up
    2
    ·
    24 days ago

    Nim

    import ../aoc, strutils, sequtils, tables
    
    type
      Rules = ref Table[int, seq[int]]
    
    #check if an update sequence is valid
    proc valid(update:seq[int], rules:Rules):bool =
      for pi, p in update:
        for r in rules.getOrDefault(p):
          let ri = update.find(r)
          if ri != -1 and ri < pi:
            return false
      return true
    
    proc backtrack(p:int, index:int, update:seq[int], rules: Rules, sorted: var seq[int]):bool =
      if index == 0:
        sorted[index] = p
        return true
      
      for r in rules.getOrDefault(p):
        if r in update and r.backtrack(index-1, update, rules, sorted):
          sorted[index] = p
          return true
      
      return false
    
    #fix an invalid sequence
    proc fix(update:seq[int], rules: Rules):seq[int] =
      echo "fixing", update
      var sorted = newSeqWith(update.len, 0);
      for p in update:
        if p.backtrack(update.len-1, update, rules, sorted):
          return sorted
      return @[]
    
    proc solve*(input:string): array[2,int] =
      let parts = input.split("\r\n\r\n");
      
      let rulePairs = parts[0].splitLines.mapIt(it.strip.split('|').map(parseInt))
      let updates = parts[1].splitLines.mapIt(it.split(',').map(parseInt))
      
      # fill rules table
      var rules = new Rules
      for rp in rulePairs:
        if rules.hasKey(rp[0]):
          rules[rp[0]].add rp[1];
        else:
          rules[rp[0]] = @[rp[1]]
          
      # fill reverse rules table
      var backRules = new Rules
      for rp in rulePairs:
        if backRules.hasKey(rp[1]):
          backRules[rp[1]].add rp[0];
        else:
          backRules[rp[1]] = @[rp[0]]
      
      for u in updates:
        if u.valid(rules):
          result[0] += u[u.len div 2]
        else:
          let uf = u.fix(backRules)
          result[1] += uf[uf.len div 2]
    

    I thought of doing a sort at first, but dismissed it for some reason, so I came up with this slow and bulky recursive backtracking thing which traverses the rules as a graph until it reaches a depth equal to the given sequence. Not my finest work, but it does solve the puzzle :)

  • VegOwOtenks@lemmy.world
    link
    fedilink
    arrow-up
    2
    ·
    edit-2
    24 days ago

    Haskell

    It’s more complicated than it needs to be, could’ve done the first part just like the second.
    Also it takes one second (!) to run it .-.

    import Data.Maybe as Maybe
    import Data.List as List
    import Control.Arrow hiding (first, second)
    
    parseRule :: String -> (Int, Int)
    parseRule s = (read . take 2 &&& read . drop 3) s
    
    replace t r c = if t == c then r else c
    
    parse :: String -> ([(Int, Int)], [[Int]])
    parse s = (map parseRule rules, map (map read . words) updates)
            where
                    rules = takeWhile (/= "") . lines $ s
                    updates = init . map (map (replace ',' ' ')) . drop 1 . dropWhile (/= "") . lines $ s
    
    validRule (pairLeft, pairRight) (ruleLeft, ruleRight)
            | pairLeft == ruleRight && pairRight == ruleLeft = False
            | otherwise = True
    
    validatePair rs p = all (validRule p) rs
    
    validateUpdate rs u = all (validatePair rs) pairs
            where 
                    pairs = List.concatMap (\ t -> map (head t, ) (tail t)) . filter (length >>> (> 1)) . tails $ u
    
    middleElement :: [a] -> a
    middleElement us = (us !!) $ (length us `div` 2)
    
    part1 (rs, us) = sum . map (middleElement) . filter (validateUpdate rs) $ us
    
    insertOrderly rs i is = insertOrderly' frontRules i is
            where
                    frontRules = filter (((== i) . fst)) rs
    
    insertOrderly' _  i [] = [i]
    insertOrderly' rs i (i':is)
            | any (snd >>> (== i')) rs = i : i' : is
            | otherwise = i' : insertOrderly' rs i is
    
    part2 (rs, us) = sum . map middleElement . Maybe.mapMaybe ((orderUpdate &&& id) >>> \ p -> if (fst p /= snd p) then Just $ fst p else Nothing) $ us
            where
                    orderUpdate = foldr (insertOrderly rs) []
    
    main = getContents >>= print . (part1 &&& part2) . parse
    
  • Ananace@lemmy.ananace.dev
    link
    fedilink
    arrow-up
    2
    ·
    24 days ago

    Well, this one ended up with a surprisingly easy part 2 with how I wrote it.
    Not the most computationally optimal code, but since they’re still cheap enough to run in milliseconds I’m not overly bothered.

    C#
    class OrderComparer : IComparer<int>
    {
      Dictionary<int, List<int>> ordering;
      public OrderComparer(Dictionary<int, List<int>> ordering) {
        this.ordering = ordering;
      }
    
      public int Compare(int x, int y)
      {
        if (ordering.ContainsKey(x) && ordering[x].Contains(y))
          return -1;
        return 1;
      }
    }
    
    Dictionary<int, List<int>> ordering = new Dictionary<int, List<int>>();
    int[][] updates = new int[0][];
    
    public void Input(IEnumerable<string> lines)
    {
      foreach (var pair in lines.TakeWhile(l => l.Contains('|')).Select(l => l.Split('|').Select(w => int.Parse(w))))
      {
        if (!ordering.ContainsKey(pair.First()))
          ordering[pair.First()] = new List<int>();
        ordering[pair.First()].Add(pair.Last());
      }
      updates = lines.SkipWhile(s => s.Contains('|') || string.IsNullOrWhiteSpace(s)).Select(l => l.Split(',').Select(w => int.Parse(w)).ToArray()).ToArray();
    }
    
    public void Part1()
    {
      int correct = 0;
      var comparer = new OrderComparer(ordering);
      foreach (var update in updates)
      {
        var ordered = update.Order(comparer);
        if (update.SequenceEqual(ordered))
          correct += ordered.Skip(ordered.Count() / 2).First();
      }
    
      Console.WriteLine($"Sum: {correct}");
    }
    public void Part2()
    {
      int incorrect = 0;
      var comparer = new OrderComparer(ordering);
      foreach (var update in updates)
      {
        var ordered = update.Order(comparer);
        if (!update.SequenceEqual(ordered))
          incorrect += ordered.Skip(ordered.Count() / 2).First();
      }
    
      Console.WriteLine($"Sum: {incorrect}");
    }
    
  • iAvicenna@lemmy.world
    link
    fedilink
    arrow-up
    2
    ·
    edit-2
    24 days ago

    Python

    sort using a compare function

    from math import floor
    from pathlib import Path
    from functools import cmp_to_key
    cwd = Path(__file__).parent
    
    def parse_protocol(path):
    
      with path.open("r") as fp:
        data = fp.read().splitlines()
    
      rules = data[:data.index('')]
      page_to_rule = {r.split('|')[0]:[] for r in rules}
      [page_to_rule[r.split('|')[0]].append(r.split('|')[1]) for r in rules]
    
      updates = list(map(lambda x: x.split(','), data[data.index('')+1:]))
    
      return page_to_rule, updates
    
    def sort_pages(pages, page_to_rule):
    
      compare_pages = lambda page1, page2:\
        0 if page1 not in page_to_rule or page2 not in page_to_rule[page1] else -1
    
      return sorted(pages, key = cmp_to_key(compare_pages))
    
    def solve_problem(file_name, fix):
    
      page_to_rule, updates = parse_protocol(Path(cwd, file_name))
    
      to_print = [temp_p[int(floor(len(pages)/2))] for pages in updates
                  if (not fix and (temp_p:=pages) == sort_pages(pages, page_to_rule))
                  or (fix and (temp_p:=sort_pages(pages, page_to_rule)) != pages)]
    
      return sum(map(int,to_print))