1 :load matcher.scala |
|
2 |
|
3 // some regular expressions |
|
4 val DIGIT = RANGE("0123456789".toList) |
|
5 val NONZERODIGIT = RANGE("123456789".toList) |
|
6 |
|
7 val NUMBER = ALT(SEQ(NONZERODIGIT, STAR(DIGIT)), "0") |
|
8 val LPAREN = CHAR('(') |
|
9 val RPAREN = CHAR(')') |
|
10 val WHITESPACE = PLUS(RANGE(" \n".toList)) |
|
11 val OPS = RANGE("+-*".toList) |
|
12 |
|
13 // for classifying the strings that have been recognised |
|
14 |
|
15 abstract class Token |
|
16 case object T_WHITESPACE extends Token |
|
17 case object T_NUM extends Token |
|
18 case class T_OP(s: String) extends Token |
|
19 case object T_LPAREN extends Token |
|
20 case object T_RPAREN extends Token |
|
21 case class NT(s: String) extends Token |
|
22 |
|
23 |
|
24 def tokenizer(rs: List[Rule[Token]], s: String) : List[Token] = |
|
25 tokenize(rs, s.toList).filterNot(_ match { |
|
26 case T_WHITESPACE => true |
|
27 case _ => false |
|
28 }) |
|
29 |
|
30 // lexing rules for arithmetic expressions |
|
31 val lexing_rules: List[Rule[Token]]= |
|
32 List((NUMBER, (s) => T_NUM), |
|
33 (WHITESPACE, (s) => T_WHITESPACE), |
|
34 (LPAREN, (s) => T_LPAREN), |
|
35 (RPAREN, (s) => T_RPAREN), |
|
36 (OPS, (s) => T_OP(s.mkString))) |
|
37 |
|
38 |
|
39 type Grammar = List[(String, List[Token])] |
|
40 |
|
41 // grammar for arithmetic expressions |
|
42 val grammar = |
|
43 List ("F" -> List(T_NUM), |
|
44 "E" -> List(T_NUM), |
|
45 "E" -> List(NT("E"), T_OP("+"), NT("E")), |
|
46 "E" -> List(NT("E"), T_OP("-"), NT("E")), |
|
47 "E" -> List(NT("E"), T_OP("*"), NT("E")), |
|
48 "E" -> List(T_LPAREN, NT("E"), T_RPAREN)) |
|
49 |
|
50 |
|
51 def chop[A](ts1: List[A], prefix: List[A], ts2: List[A]) : Option[(List[A], List[A])] = |
|
52 ts1 match { |
|
53 case Nil => None |
|
54 case t::ts => |
|
55 if (ts1.startsWith(prefix)) Some(ts2.reverse, ts1.drop(prefix.length)) |
|
56 else chop(ts, prefix, t::ts2) |
|
57 } |
|
58 |
|
59 // examples |
|
60 chop(List(1,2,3,4,5,6,7,8,9), List(4,5), Nil) |
|
61 chop(List(1,2,3,4,5,6,7,8,9), List(3,5), Nil) |
|
62 |
|
63 def replace[A](ts: List[A], out: List[A], in: List [A]) = |
|
64 chop(ts, out, Nil) match { |
|
65 case None => None |
|
66 case Some((before, after)) => Some(before ::: in ::: after) |
|
67 } |
|
68 |
|
69 def parse(g: Grammar, ts: List[Token]) : Boolean = { |
|
70 println(ts) |
|
71 if (ts == List(NT("E"))) true |
|
72 else { |
|
73 val tss = for ((lhs, rhs) <- g) yield replace(ts, rhs, List(NT(lhs))) |
|
74 tss.flatten.exists(parse(g, _)) |
|
75 } |
|
76 } |
|
77 |
|
78 def parser(g: Grammar, rs: List[Rule[Token]], s: String) = { |
|
79 println("\n") |
|
80 parse(g, tokenizer(rs, s)) |
|
81 } |
|
82 |
|
83 |
|
84 |
|
85 parser(grammar, lexing_rules, "2 + 3 * 4 + 1") |
|
86 parser(grammar, lexing_rules, "(2 + 3) * (4 + 1)") |
|
87 parser(grammar, lexing_rules, "(2 + 3) * 4 (4 + 1)") |
|
88 |
|
89 |
|
90 |
|