--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/parser5.scala Mon Nov 19 14:18:42 2012 +0000
@@ -0,0 +1,113 @@
+val DIGIT = RANGE("0123456789")
+val NONZERODIGIT = RANGE("123456789")
+
+val NUMBER = ALT(SEQ(NONZERODIGIT, STAR(DIGIT)), "0")
+val LPAREN = CHAR('(')
+val RPAREN = CHAR(')')
+val WHITESPACE = PLUS(RANGE(" \n"))
+val OPS = RANGE("+-*")
+
+// for classifying the strings that have been recognised
+abstract class Token
+case object T_WHITESPACE extends Token
+case class T_NUM(s: String) extends Token
+case class T_OP(s: String) extends Token
+case object T_LPAREN extends Token
+case object T_RPAREN extends Token
+
+val lexing_rules: List[Rule[Token]]=
+ List((NUMBER, (s) => T_NUM(s.mkString)),
+ (WHITESPACE, (s) => T_WHITESPACE),
+ (LPAREN, (s) => T_LPAREN),
+ (RPAREN, (s) => T_RPAREN),
+ (OPS, (s) => T_OP(s.mkString)))
+
+val Tk = Tokenizer(lexing_rules, List(T_WHITESPACE))
+
+
+// parser combinators with input type I and return type T
+// and memoisation
+
+case class SubList[T](s: List[T], l: Int, h: Int) {
+ def low = l
+ def high = h
+ def length = h - l
+ def sublist(l: Int = l, h: Int = h) = s.slice(l, h)
+ def set(low: Int = l, high: Int = h) = SubList(s, low, high)
+}
+
+type Ctxt[T] = List[(String, SubList[T])]
+
+abstract class Parser[I, T] {
+
+ def parse(ts: SubList[I], ctxt: Ctxt[I]): Set[(T, SubList[I])]
+
+ def parse_all(s: List[I]) : Set[T] =
+ for ((head, tail) <- parse(SubList(s, 0, s.length), Nil); if (tail.sublist() == Nil)) yield head
+
+ def || (right : => Parser[I, T]) : Parser[I, T] = new AltParser(this, right)
+ def ==>[S] (f: => T => S) : Parser [I, S] = new FunParser(this, f)
+ def ~[S] (right : => Parser[I, S]) : Parser[I, (T, S)] = new SeqParser(this, right)
+ def ~>[S] (right : => Parser[I, S]) : Parser[I, S] = this ~ right ==> (_._2)
+ def <~[S] (right : => Parser[I, S]) : Parser[I, T] = this ~ right ==> (_._1)
+}
+
+class SeqParser[I, T, S](p: => Parser[I, T], q: => Parser[I, S]) extends Parser[I, (T, S)] {
+ def parse(sb: SubList[I], ctxt: Ctxt[I]) =
+ for ((head1, tail1) <- p.parse(sb, ctxt);
+ (head2, tail2) <- q.parse(tail1, ctxt)) yield ((head1, head2), tail2)
+}
+
+class AltParser[I, T](p: => Parser[I, T], q: => Parser[I, T]) extends Parser[I, T] {
+ def parse(sb: SubList[I], ctxt: Ctxt[I]) = p.parse(sb, ctxt) ++ q.parse(sb, ctxt)
+}
+
+class FunParser[I, T, S](p: => Parser[I, T], f: T => S) extends Parser[I, S] {
+ def parse(sb: SubList[I], ctxt: Ctxt[I]) =
+ for ((head, tail) <- p.parse(sb, ctxt)) yield (f(head), tail)
+}
+
+case object NumParser extends Parser[Token, Int] {
+ def parse(sb: SubList[Token], ctxt: Ctxt[Token]) = {
+ if (0 < sb.length) sb.sublist(sb.low, sb.low + 1) match {
+ case T_NUM(i)::Nil => Set((i.toInt, sb.set(low = sb.low + 1)))
+ case _ => Set()
+ }
+ else Set()
+ }
+}
+
+case class TokParser(t: Token) extends Parser[Token, Token] {
+ def parse(sb: SubList[Token], ctxt: Ctxt[Token]) = {
+ if (0 < sb.length && sb.sublist(sb.low, sb.low + 1) == List(t)) Set((t, sb.set(low = sb.low + 1)))
+ else Set()
+ }
+}
+
+implicit def token2tparser(t: Token) = TokParser(t)
+
+class IgnLst[I, T](p: => Parser[I, T]) extends Parser[I, T] {
+ def parse(sb: SubList[I], ctxt: Ctxt[I]) = {
+ if (sb.length == 0) Set()
+ else for ((head, tail) <- p.parse(sb.set(high = sb.high - 1), ctxt))
+ yield (head, tail.set(high = tail.high + 1))
+ }
+}
+
+class CHECK[I, T](nt: String, p: => Parser[I, T]) extends Parser[I, T] {
+ def parse(sb: SubList[I], ctxt: Ctxt[I]) = {
+ val should_trim = ctxt.contains (nt, sb)
+ if (should_trim && sb.length == 0) Set()
+ else if (should_trim) new IgnLst(p).parse(sb, (nt, sb)::ctxt)
+ else p.parse(sb, (nt, sb)::ctxt)
+ }
+}
+
+lazy val E: Parser[Token, Int] =
+ new CHECK("E", (E ~ T_OP("+") ~ E) ==> { case ((x, y), z) => x + z} ||
+ (E ~ T_OP("*") ~ E) ==> { case ((x, y), z) => x * z} ||
+ (T_LPAREN ~ E ~ T_RPAREN) ==> { case ((x, y), z) => y} ||
+ NumParser)
+
+println(E.parse_all(Tk.fromString("1 + 2 * 3")))
+println(E.parse_all(Tk.fromString("(1 + 2) * 3")))