62
|
1 |
:load matcher.scala
|
|
2 |
|
|
3 |
// some regular expressions
|
|
4 |
val DIGIT = RANGE("0123456789".toList)
|
|
5 |
val NONZERODIGIT = RANGE("123456789".toList)
|
|
6 |
|
|
7 |
val NUMBER = ALT(SEQ(NONZERODIGIT, STAR(DIGIT)), "0")
|
|
8 |
val LPAREN = CHAR('(')
|
|
9 |
val RPAREN = CHAR(')')
|
|
10 |
val WHITESPACE = PLUS(RANGE(" \n".toList))
|
|
11 |
val OPS = RANGE("+-*".toList)
|
|
12 |
|
|
13 |
// for classifying the strings that have been recognised
|
|
14 |
abstract class Token
|
|
15 |
case object T_WHITESPACE extends Token
|
|
16 |
case object T_NUM extends Token
|
|
17 |
case class T_OP(s: String) extends Token
|
|
18 |
case object T_LPAREN extends Token
|
|
19 |
case object T_RPAREN extends Token
|
|
20 |
case class T_NT(s: String, rhs: List[Token]) extends Token
|
|
21 |
|
|
22 |
def tokenizer(rs: List[Rule[Token]], s: String) : List[Token] =
|
|
23 |
tokenize(rs, s.toList).filterNot(_ match {
|
|
24 |
case T_WHITESPACE => true
|
|
25 |
case _ => false
|
|
26 |
})
|
|
27 |
|
|
28 |
|
|
29 |
|
|
30 |
// lexing rules for arithmetic expressions
|
|
31 |
val lexing_rules: List[Rule[Token]]=
|
|
32 |
List((NUMBER, (s) => T_NUM),
|
|
33 |
(WHITESPACE, (s) => T_WHITESPACE),
|
|
34 |
(LPAREN, (s) => T_LPAREN),
|
|
35 |
(RPAREN, (s) => T_RPAREN),
|
|
36 |
(OPS, (s) => T_OP(s.mkString)))
|
|
37 |
|
|
38 |
|
|
39 |
type Grammar = List[(String, List[Token])]
|
|
40 |
|
|
41 |
// grammar for arithmetic expressions
|
|
42 |
val grammar =
|
|
43 |
List ("E" -> List(T_NUM),
|
|
44 |
"E" -> List(T_NT("E", Nil), T_OP("+"), T_NT("E", Nil)),
|
|
45 |
"E" -> List(T_NT("E", Nil), T_OP("-"), T_NT("E", Nil)),
|
|
46 |
"E" -> List(T_NT("E", Nil), T_OP("*"), T_NT("E", Nil)),
|
|
47 |
"E" -> List(T_LPAREN, T_NT("E", Nil), T_RPAREN))
|
|
48 |
|
|
49 |
def startsWith[A](ts1: List[A], ts2: List[A]) : Boolean = (ts1, ts2) match {
|
|
50 |
case (_, Nil) => true
|
|
51 |
case (T_NT(e, _)::ts1,T_NT(f, _)::ts2) => (e == f) && startsWith(ts1, ts2)
|
|
52 |
case (t1::ts1, t2::ts2) => (t1 == t2) && startsWith(ts1, ts2)
|
|
53 |
case _ => false
|
|
54 |
}
|
|
55 |
|
|
56 |
def chop[A](ts1: List[A], prefix: List[A], ts2: List[A]) : Option[(List[A], List[A])] =
|
|
57 |
ts1 match {
|
|
58 |
case Nil => None
|
|
59 |
case t::ts =>
|
|
60 |
if (startsWith(ts1, prefix)) Some(ts2.reverse, ts1.drop(prefix.length))
|
|
61 |
else chop(ts, prefix, t::ts2)
|
|
62 |
}
|
|
63 |
|
|
64 |
// examples
|
|
65 |
chop(List(1,2,3,4,5,6,7,8,9), List(4,5), Nil)
|
|
66 |
chop(List(1,2,3,4,5,6,7,8,9), List(3,5), Nil)
|
|
67 |
|
|
68 |
def replace[A](ts: List[A], out: List[A], in: List [A]) =
|
|
69 |
chop(ts, out, Nil) match {
|
|
70 |
case None => None
|
|
71 |
case Some((before, after)) => Some(before ::: in ::: after)
|
|
72 |
}
|
|
73 |
|
|
74 |
def parse1(g: Grammar, ts: List[Token]) : Boolean = ts match {
|
|
75 |
case List(T_NT("E", tree)) => { println(tree); true }
|
|
76 |
case _ => {
|
|
77 |
val tss = for ((lhs, rhs) <- g) yield replace(ts, rhs, List(T_NT(lhs, rhs)))
|
|
78 |
tss.flatten.exists(parse1(g, _))
|
|
79 |
}
|
|
80 |
}
|
|
81 |
|
|
82 |
|
|
83 |
println() ; parse1(grammar, tokenizer(lexing_rules, "2 + 3 * 4 + 1"))
|
|
84 |
println() ; parse1(grammar, tokenizer(lexing_rules, "(2 + 3) * (4 + 1)"))
|
|
85 |
println() ; parse1(grammar, tokenizer(lexing_rules, "(2 + 3) * 4 (4 + 1)"))
|
|
86 |
|
|
87 |
|
|
88 |
|