parser1.scala
changeset 71 7717f20f0504
parent 61 a80f0cf17f91
--- a/parser1.scala	Wed Nov 21 09:04:11 2012 +0000
+++ b/parser1.scala	Fri Nov 23 14:08:31 2012 +0000
@@ -1,31 +1,27 @@
-:load matcher.scala
+// A naive bottom-up parser with backtracking
+//
+// Needs:
+//   :load matcher.scala
 
 // some regular expressions
-val DIGIT = RANGE("0123456789".toList)
-val NONZERODIGIT = RANGE("123456789".toList)
+val DIGIT = RANGE("0123456789")
+val NONZERODIGIT = RANGE("123456789")
 
 val NUMBER = ALT(SEQ(NONZERODIGIT, STAR(DIGIT)), "0")
 val LPAREN = CHAR('(')
 val RPAREN = CHAR(')')
-val WHITESPACE = PLUS(RANGE(" \n".toList))
-val OPS = RANGE("+-*".toList)
+val WHITESPACE = PLUS(RANGE(" \n"))
+val OPS = RANGE("+-*")
 
 // for classifying the strings that have been recognised
+
 abstract class Token
 case object T_WHITESPACE extends Token
 case object T_NUM extends Token
 case class T_OP(s: String) extends Token
 case object T_LPAREN extends Token
 case object T_RPAREN extends Token
-case class T_NT(s: String, rhs: List[Token]) extends Token
-
-def tokenizer(rs: List[Rule[Token]], s: String) : List[Token] = 
-  tokenize(rs, s.toList).filterNot(_ match {
-    case T_WHITESPACE => true
-    case _ => false
-  })
-
-
+case class NT(s: String) extends Token
 
 // lexing rules for arithmetic expressions
 val lexing_rules: List[Rule[Token]]= 
@@ -35,33 +31,30 @@
        (RPAREN, (s) => T_RPAREN),
        (OPS, (s) => T_OP(s.mkString)))
 
+// the tokenizer
+val Tok = Tokenizer(lexing_rules, List(T_WHITESPACE))
 
 type Grammar = List[(String, List[Token])]
 
 // grammar for arithmetic expressions
 val grammar = 
-  List ("E" -> List(T_NUM),
-        "E" -> List(T_NT("E", Nil), T_OP("+"), T_NT("E", Nil)),
-        "E" -> List(T_NT("E", Nil), T_OP("-"), T_NT("E", Nil)),
-        "E" -> List(T_NT("E", Nil), T_OP("*"), T_NT("E", Nil)),    
-        "E" -> List(T_LPAREN, T_NT("E", Nil), T_RPAREN))
+  List ("F" -> List(T_NUM),
+        "E" -> List(T_NUM),
+        "E" -> List(NT("E"), T_OP("+"), NT("E")),
+        "E" -> List(NT("E"), T_OP("-"), NT("E")),
+        "E" -> List(NT("E"), T_OP("*"), NT("E")),    
+        "E" -> List(T_LPAREN, NT("E"), T_RPAREN))
 
-def startsWith[A](ts1: List[A], ts2: List[A]) : Boolean = (ts1, ts2) match {
-  case (_, Nil) => true
-  case (T_NT(e, _)::ts1,T_NT(f, _)::ts2) => (e == f) && startsWith(ts1, ts2)
-  case (t1::ts1, t2::ts2) => (t1 == t2) && startsWith(ts1, ts2)
-  case _ => false
-}
 
 def chop[A](ts1: List[A], prefix: List[A], ts2: List[A]) : Option[(List[A], List[A])] = 
   ts1 match {
     case Nil => None
     case t::ts => 
-      if (startsWith(ts1, prefix)) Some(ts2.reverse, ts1.drop(prefix.length))
+      if (ts1.startsWith(prefix)) Some(ts2.reverse, ts1.drop(prefix.length))
       else chop(ts, prefix, t::ts2)
   }
 
-// examples
+// examples for chop 
 chop(List(1,2,3,4,5,6,7,8,9), List(4,5), Nil)  
 chop(List(1,2,3,4,5,6,7,8,9), List(3,5), Nil)  
 
@@ -71,18 +64,25 @@
     case Some((before, after)) => Some(before ::: in ::: after)
   }  
 
-def parse1(g: Grammar, ts: List[Token]) : Boolean = ts match {
-  case List(T_NT("E", tree)) => { println(tree); true }
-  case _ => {
-    val tss = for ((lhs, rhs) <- g) yield replace(ts, rhs, List(T_NT(lhs, rhs)))
-    tss.flatten.exists(parse1(g, _))
+def parse(g: Grammar, ts: List[Token]) : Boolean = {
+  println(ts)
+  if (ts == List(NT("E"))) true
+  else {
+    val tss = for ((lhs, rhs) <- g) yield replace(ts, rhs, List(NT(lhs)))
+    tss.flatten.exists(parse(g, _))
   }
 }
  
+def parser(g: Grammar, s: String) = {
+  println("\n")
+  parse(g, Tok.fromString(s))
+}
+  
 
-println() ; parse1(grammar, tokenizer(lexing_rules, "2 + 3 * 4 + 1"))
-println() ; parse1(grammar, tokenizer(lexing_rules, "(2 + 3) * (4 + 1)"))
-println() ; parse1(grammar, tokenizer(lexing_rules, "(2 + 3) * 4 (4 + 1)"))
+
+parser(grammar, "2 + 3 *    4 +       1")
+parser(grammar, "(2 + 3) * (4 + 1)")
+parser(grammar, "(2 + 3) * 4 (4 + 1)")