147 lines
3.0 KiB
Go
147 lines
3.0 KiB
Go
|
package main
|
||
|
|
||
|
import (
|
||
|
"fmt"
|
||
|
"os"
|
||
|
"regexp"
|
||
|
"strings"
|
||
|
|
||
|
"git.annabunch.es/annabunches/adventofcode/2020/lib/util"
|
||
|
)
|
||
|
|
||
|
type Node struct {
|
||
|
name string
|
||
|
children [][]*Node
|
||
|
}
|
||
|
|
||
|
const (
|
||
|
STATE_RULES = iota
|
||
|
STATE_DATA
|
||
|
)
|
||
|
|
||
|
func NewRule(name string) *Node {
|
||
|
return &Node{
|
||
|
name: name,
|
||
|
children: make([][]*Node, 0),
|
||
|
}
|
||
|
}
|
||
|
|
||
|
var expandRe = regexp.MustCompile("[0-9]+")
|
||
|
|
||
|
func parseRule(name string, rules map[string]string) *Node {
|
||
|
ruleString := rules[name]
|
||
|
rule := NewRule(name)
|
||
|
|
||
|
// and split into subrules
|
||
|
for _, text := range strings.Split(ruleString, " | ") {
|
||
|
// quoted strings are our terminals
|
||
|
if strings.HasPrefix(text, "\"") {
|
||
|
rule.name = strings.Trim(text, "\"") // change the rule to its "real" name
|
||
|
continue
|
||
|
}
|
||
|
// everything else are rule indexes, which we turn into child nodes
|
||
|
childSet := make([]*Node, 0)
|
||
|
for _, childName := range strings.Split(text, " ") {
|
||
|
childSet = append(childSet, parseRule(childName, rules))
|
||
|
}
|
||
|
rule.children = append(rule.children, childSet)
|
||
|
}
|
||
|
|
||
|
return rule
|
||
|
}
|
||
|
|
||
|
func parseInput(input []string) (map[string]bool, []string) {
|
||
|
stateRe := regexp.MustCompile("^([0-9]+): (.*)$")
|
||
|
state := STATE_RULES
|
||
|
data := make([]string, 0)
|
||
|
|
||
|
var root *Node
|
||
|
rules := make(map[string]string) // unparsed rules
|
||
|
|
||
|
for _, line := range input {
|
||
|
switch state {
|
||
|
case STATE_RULES:
|
||
|
if line == "" {
|
||
|
state = STATE_DATA
|
||
|
continue
|
||
|
}
|
||
|
|
||
|
reData := stateRe.FindAllStringSubmatch(line, 16)
|
||
|
|
||
|
// rule name
|
||
|
rules[reData[0][1]] = reData[0][2]
|
||
|
case STATE_DATA:
|
||
|
data = append(data, line)
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// now parse the rules we stored
|
||
|
root = parseRule("0", rules)
|
||
|
|
||
|
// now we expand the grammar - generate all possible strings in the language
|
||
|
rawLanguage := expand(root)
|
||
|
language := make(map[string]bool)
|
||
|
for _, term := range rawLanguage {
|
||
|
language[term] = true
|
||
|
}
|
||
|
|
||
|
return language, data
|
||
|
}
|
||
|
|
||
|
// Expand the list of all possible substrings starting with node
|
||
|
func expand(node *Node) []string {
|
||
|
items := make([]string, 0)
|
||
|
|
||
|
if len(node.children) == 0 {
|
||
|
items = append(items, node.name)
|
||
|
return items
|
||
|
}
|
||
|
|
||
|
for _, childSet := range node.children {
|
||
|
newItems := make([][]string, 0)
|
||
|
for _, child := range childSet {
|
||
|
newItems = append(newItems, expand(child))
|
||
|
}
|
||
|
items = append(items, combine(newItems, "")...)
|
||
|
}
|
||
|
|
||
|
return items
|
||
|
}
|
||
|
|
||
|
// takes a list of strings and combines them in order, returning a list of
|
||
|
// strings of all possible combinations. Example:
|
||
|
// [["foo"] ["bar baz"] ["fnord"]] => ["foobarfnord" "foobazfnord"]
|
||
|
func combine(terms [][]string, acc string) []string {
|
||
|
remaining := len(terms)
|
||
|
results := make([]string, 0)
|
||
|
|
||
|
if remaining == 0 {
|
||
|
return results
|
||
|
}
|
||
|
|
||
|
for _, part := range terms[0] {
|
||
|
newAcc := acc + part
|
||
|
if remaining == 1 {
|
||
|
results = append(results, newAcc)
|
||
|
continue
|
||
|
}
|
||
|
|
||
|
results = append(results, combine(terms[1:], newAcc)...)
|
||
|
}
|
||
|
|
||
|
return results
|
||
|
}
|
||
|
|
||
|
func main() {
|
||
|
// step := os.Args[1]
|
||
|
values := util.InputParserStrings(os.Args[2])
|
||
|
language, data := parseInput(values)
|
||
|
count := 0
|
||
|
for _, item := range data {
|
||
|
if language[item] {
|
||
|
count++
|
||
|
}
|
||
|
}
|
||
|
fmt.Println(count)
|
||
|
}
|