|
1 |
|
2 // regular expressions including NOT |
|
3 abstract class Rexp |
|
4 |
|
5 case object NULL extends Rexp |
|
6 case object EMPTY extends Rexp |
|
7 case class CHAR(c: Char) extends Rexp |
|
8 case class ALT(r1: Rexp, r2: Rexp) extends Rexp |
|
9 case class SEQ(r1: Rexp, r2: Rexp) extends Rexp |
|
10 case class STAR(r: Rexp) extends Rexp |
|
11 case class NOT(r: Rexp) extends Rexp |
|
12 |
|
13 |
|
14 // some convenience for typing in regular expressions |
|
15 def charlist2rexp(s : List[Char]) : Rexp = s match { |
|
16 case Nil => EMPTY |
|
17 case c::Nil => CHAR(c) |
|
18 case c::s => SEQ(CHAR(c), charlist2rexp(s)) |
|
19 } |
|
20 implicit def string2rexp(s : String) : Rexp = charlist2rexp(s.toList) |
|
21 |
|
22 |
|
23 // nullable function: tests whether the regular |
|
24 // expression can recognise the empty string |
|
25 def nullable (r: Rexp) : Boolean = r match { |
|
26 case NULL => false |
|
27 case EMPTY => true |
|
28 case CHAR(_) => false |
|
29 case ALT(r1, r2) => nullable(r1) || nullable(r2) |
|
30 case SEQ(r1, r2) => nullable(r1) && nullable(r2) |
|
31 case STAR(_) => true |
|
32 case NOT(r) => !(nullable(r)) |
|
33 } |
|
34 |
|
35 // tests whether a regular expression |
|
36 // cannot recognise more |
|
37 def no_more (r: Rexp) : Boolean = r match { |
|
38 case NULL => true |
|
39 case EMPTY => false |
|
40 case CHAR(_) => false |
|
41 case ALT(r1, r2) => no_more(r1) && no_more(r2) |
|
42 case SEQ(r1, r2) => if (nullable(r1)) (no_more(r1) && no_more(r2)) else no_more(r1) |
|
43 case STAR(_) => false |
|
44 case NOT(r) => !(no_more(r)) |
|
45 } |
|
46 |
|
47 |
|
48 // derivative of a regular expression w.r.t. a character |
|
49 def der (c: Char, r: Rexp) : Rexp = r match { |
|
50 case NULL => NULL |
|
51 case EMPTY => NULL case CHAR(d) => if (c == d) EMPTY else NULL |
|
52 case ALT(r1, r2) => ALT(der(c, r1), der(c, r2)) |
|
53 case SEQ(r1, r2) => |
|
54 if (nullable(r1)) ALT(SEQ(der(c, r1), r2), der(c, r2)) |
|
55 else SEQ(der(c, r1), r2) |
|
56 case STAR(r) => SEQ(der(c, r), STAR(r)) |
|
57 case NOT(r) => NOT(der (c, r)) |
|
58 } |
|
59 |
|
60 // regular expression for specifying |
|
61 // ranges of characters |
|
62 def RANGE(s : List[Char]) : Rexp = s match { |
|
63 case Nil => NULL |
|
64 case c::Nil => CHAR(c) |
|
65 case c::s => ALT(CHAR(c), RANGE(s)) |
|
66 } |
|
67 |
|
68 // one or more |
|
69 def PLUS(r: Rexp) = SEQ(r, STAR(r)) |
|
70 |
|
71 // some regular expressions |
|
72 val DIGIT = RANGE("0123456789".toList) |
|
73 val NONZERODIGIT = RANGE("123456789".toList) |
|
74 |
|
75 val NUMBER = ALT(SEQ(NONZERODIGIT, STAR(DIGIT)), "0") |
|
76 val LPAREN = CHAR('(') |
|
77 val RPAREN = CHAR(')') |
|
78 val WHITESPACE = PLUS(RANGE(" \n".toList)) |
|
79 val OPS = RANGE("+-*".toList) |
|
80 |
|
81 // for classifying the strings that have been recognised |
|
82 abstract class Token |
|
83 |
|
84 case object T_WHITESPACE extends Token |
|
85 case object T_NUM extends Token |
|
86 case class T_OP(s: String) extends Token |
|
87 case object T_LPAREN extends Token |
|
88 case object T_RPAREN extends Token |
|
89 case class T_NT(s: String) extends Token |
|
90 |
|
91 type Rule = (Rexp, List[Char] => Token) |
|
92 |
|
93 def error (s: String) = throw new IllegalArgumentException ("Cannot tokenize: " + s) |
|
94 |
|
95 def munch(r: Rexp, action: List[Char] => Token, s: List[Char], t: List[Char]) : Option[(List[Char], Token)] = |
|
96 s match { |
|
97 case Nil if (nullable(r)) => Some(Nil, action(t)) |
|
98 case Nil => None |
|
99 case c::s if (no_more(der (c, r)) && nullable(r)) => Some(c::s, action(t)) |
|
100 case c::s if (no_more(der (c, r))) => None |
|
101 case c::s => munch(der (c, r), action, s, t ::: List(c)) |
|
102 } |
|
103 |
|
104 def one_token (rs: List[Rule], s: List[Char]) : (List[Char], Token) = { |
|
105 val somes = rs.map { (r) => munch(r._1, r._2, s, Nil) } .flatten |
|
106 if (somes == Nil) error(s.mkString) else (somes sortBy (_._1.length) head) |
|
107 } |
|
108 |
|
109 def tokenize (rs: List[Rule], s: List[Char]) : List[Token] = s match { |
|
110 case Nil => Nil |
|
111 case _ => one_token(rs, s) match { |
|
112 case (rest, token) => token :: tokenize(rs, rest) |
|
113 } |
|
114 } |
|
115 |
|
116 def tokenizer(rs: List[Rule], s: String) : List[Token] = |
|
117 tokenize(rs, s.toList).filterNot(_ match { |
|
118 case T_WHITESPACE => true |
|
119 case _ => false |
|
120 }) |
|
121 |
|
122 |
|
123 |
|
124 // lexing rules for arithmetic expressions |
|
125 val lexing_rules: List[Rule]= |
|
126 List((NUMBER, (s) => T_NUM), |
|
127 (WHITESPACE, (s) => T_WHITESPACE), |
|
128 (LPAREN, (s) => T_LPAREN), |
|
129 (RPAREN, (s) => T_RPAREN), |
|
130 (OPS, (s) => T_OP(s.mkString))) |
|
131 |
|
132 |
|
133 // examples |
|
134 println(tokenizer(lexing_rules, "2 + 3 * 4 + 1")) |
|
135 println(tokenizer(lexing_rules, "(2 + 3) * (4 + 1)")) |
|
136 |
|
137 |
|
138 type Grammar = List[(String, List[Token])] |
|
139 |
|
140 // grammar for arithmetic expressions |
|
141 val grammar = |
|
142 List ("E" -> List(T_NUM), |
|
143 "E" -> List(T_NT("E"), T_OP("+"), T_NT("E")), |
|
144 "E" -> List(T_NT("E"), T_OP("-"), T_NT("E")), |
|
145 "E" -> List(T_NT("E"), T_OP("*"), T_NT("E")), |
|
146 "E" -> List(T_LPAREN, T_NT("E"), T_RPAREN)) |
|
147 |
|
148 |
|
149 def chop[A](ts1: List[A], prefix: List[A], ts2: List[A]) : Option[(List[A], List[A])] = |
|
150 ts1 match { |
|
151 case Nil => None |
|
152 case t::ts => |
|
153 if (ts1.startsWith(prefix)) Some(ts2.reverse, ts1.drop(prefix.length)) |
|
154 else chop(ts, prefix, t::ts2) |
|
155 } |
|
156 |
|
157 // examples |
|
158 chop(List(1,2,3,4,5,6,7,8,9), List(4,5), Nil) |
|
159 chop(List(1,2,3,4,5,6,7,8,9), List(3,5), Nil) |
|
160 |
|
161 def replace[A](ts: List[A], out: List[A], in: List [A]) = |
|
162 chop(ts, out, Nil) match { |
|
163 case None => None |
|
164 case Some((before, after)) => Some(before ::: in ::: after) |
|
165 } |
|
166 |
|
167 def parse1(g: Grammar, ts: List[Token]) : Boolean = { |
|
168 //println(ts) |
|
169 if (ts == List(T_NT("E"))) true |
|
170 else { |
|
171 val tss = for ((lhs, rhs) <- g) yield replace(ts, rhs, List(T_NT(lhs))) |
|
172 tss.flatten.exists(parse1(g, _)) |
|
173 } |
|
174 } |
|
175 |
|
176 |
|
177 println() ; parse1(grammar, tokenizer(lexing_rules, "2 + 3 * 4 + 1")) |
|
178 println() ; parse1(grammar, tokenizer(lexing_rules, "(2 + 3) * (4 + 1)")) |
|
179 println() ; parse1(grammar, tokenizer(lexing_rules, "(2 + 3) * 4 (4 + 1)")) |
|
180 |
|
181 |
|
182 |