--- a/parser.scala Wed Nov 14 00:02:38 2012 +0000
+++ b/parser.scala Wed Nov 14 08:38:19 2012 +0000
@@ -1,72 +1,4 @@
-
-// regular expressions including NOT
-abstract class Rexp
-
-case object NULL extends Rexp
-case object EMPTY extends Rexp
-case class CHAR(c: Char) extends Rexp
-case class ALT(r1: Rexp, r2: Rexp) extends Rexp
-case class SEQ(r1: Rexp, r2: Rexp) extends Rexp
-case class STAR(r: Rexp) extends Rexp
-case class NOT(r: Rexp) extends Rexp
-
-
-// some convenience for typing in regular expressions
-def charlist2rexp(s : List[Char]) : Rexp = s match {
- case Nil => EMPTY
- case c::Nil => CHAR(c)
- case c::s => SEQ(CHAR(c), charlist2rexp(s))
-}
-implicit def string2rexp(s : String) : Rexp = charlist2rexp(s.toList)
-
-
-// nullable function: tests whether the regular
-// expression can recognise the empty string
-def nullable (r: Rexp) : Boolean = r match {
- case NULL => false
- case EMPTY => true
- case CHAR(_) => false
- case ALT(r1, r2) => nullable(r1) || nullable(r2)
- case SEQ(r1, r2) => nullable(r1) && nullable(r2)
- case STAR(_) => true
- case NOT(r) => !(nullable(r))
-}
-
-// tests whether a regular expression
-// cannot recognise more
-def no_more (r: Rexp) : Boolean = r match {
- case NULL => true
- case EMPTY => false
- case CHAR(_) => false
- case ALT(r1, r2) => no_more(r1) && no_more(r2)
- case SEQ(r1, r2) => if (nullable(r1)) (no_more(r1) && no_more(r2)) else no_more(r1)
- case STAR(_) => false
- case NOT(r) => !(no_more(r))
-}
-
-
-// derivative of a regular expression w.r.t. a character
-def der (c: Char, r: Rexp) : Rexp = r match {
- case NULL => NULL
- case EMPTY => NULL case CHAR(d) => if (c == d) EMPTY else NULL
- case ALT(r1, r2) => ALT(der(c, r1), der(c, r2))
- case SEQ(r1, r2) =>
- if (nullable(r1)) ALT(SEQ(der(c, r1), r2), der(c, r2))
- else SEQ(der(c, r1), r2)
- case STAR(r) => SEQ(der(c, r), STAR(r))
- case NOT(r) => NOT(der (c, r))
-}
-
-// regular expression for specifying
-// ranges of characters
-def RANGE(s : List[Char]) : Rexp = s match {
- case Nil => NULL
- case c::Nil => CHAR(c)
- case c::s => ALT(CHAR(c), RANGE(s))
-}
-
-// one or more
-def PLUS(r: Rexp) = SEQ(r, STAR(r))
+:load matcher.scala
// some regular expressions
val DIGIT = RANGE("0123456789".toList)
@@ -79,8 +11,8 @@
val OPS = RANGE("+-*".toList)
// for classifying the strings that have been recognised
+
abstract class Token
-
case object T_WHITESPACE extends Token
case object T_NUM extends Token
case class T_OP(s: String) extends Token
@@ -88,41 +20,15 @@
case object T_RPAREN extends Token
case class NT(s: String) extends Token
-type Rule = (Rexp, List[Char] => Token)
-def error (s: String) = throw new IllegalArgumentException ("Cannot tokenize: " + s)
-
-def munch(r: Rexp, action: List[Char] => Token, s: List[Char], t: List[Char]) : Option[(List[Char], Token)] =
- s match {
- case Nil if (nullable(r)) => Some(Nil, action(t))
- case Nil => None
- case c::s if (no_more(der (c, r)) && nullable(r)) => Some(c::s, action(t))
- case c::s if (no_more(der (c, r))) => None
- case c::s => munch(der (c, r), action, s, t ::: List(c))
- }
-
-def one_token (rs: List[Rule], s: List[Char]) : (List[Char], Token) = {
- val somes = rs.map { (r) => munch(r._1, r._2, s, Nil) } .flatten
- if (somes == Nil) error(s.mkString) else (somes sortBy (_._1.length) head)
-}
-
-def tokenize (rs: List[Rule], s: List[Char]) : List[Token] = s match {
- case Nil => Nil
- case _ => one_token(rs, s) match {
- case (rest, token) => token :: tokenize(rs, rest)
- }
-}
-
-def tokenizer(rs: List[Rule], s: String) : List[Token] =
+def tokenizer(rs: List[Rule[Token]], s: String) : List[Token] =
tokenize(rs, s.toList).filterNot(_ match {
case T_WHITESPACE => true
case _ => false
})
-
-
// lexing rules for arithmetic expressions
-val lexing_rules: List[Rule]=
+val lexing_rules: List[Rule[Token]]=
List((NUMBER, (s) => T_NUM),
(WHITESPACE, (s) => T_WHITESPACE),
(LPAREN, (s) => T_LPAREN),
@@ -130,11 +36,6 @@
(OPS, (s) => T_OP(s.mkString)))
-// examples
-println(tokenizer(lexing_rules, "2 + 3 * 4 + 1"))
-println(tokenizer(lexing_rules, "(2 + 3) * (4 + 1)"))
-
-
type Grammar = List[(String, List[Token])]
// grammar for arithmetic expressions
@@ -174,7 +75,7 @@
}
}
-def parser(g: Grammar, rs: List[Rule], s: String) = {
+def parser(g: Grammar, rs: List[Rule[Token]], s: String) = {
println("\n")
parse(g, tokenizer(rs, s))
}