--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/scala/Term_grammar.scala Sat Jun 15 09:11:11 2013 -0400
@@ -0,0 +1,62 @@
+//:load matcher.scala
+//:load parser3.scala
+
+// some regular expressions
+val LETTER = RANGE("abcdefghijklmnopqrstuvwxyz")
+val ID = PLUS(LETTER)
+
+val DIGIT = RANGE("0123456789")
+val NONZERODIGIT = RANGE("123456789")
+val NUMBER = ALT(SEQ(NONZERODIGIT, STAR(DIGIT)), "0")
+
+val LPAREN = CHAR('(')
+val RPAREN = CHAR(')')
+
+val WHITESPACE = PLUS(RANGE(" \n"))
+val OPS = RANGE("+-*")
+
+// for classifying the strings that have been lexed
+abstract class Token
+
+case object T_WHITESPACE extends Token
+case class T_NUM(s: String) extends Token
+case class T_OP(s: String) extends Token
+case object T_LPAREN extends Token
+case object T_RPAREN extends Token
+
+
+// lexing rules for arithmetic expressions
+val lexing_rules: List[Rule[Token]]=
+ List((NUMBER, (s) => T_NUM(s.mkString)),
+ (WHITESPACE, (s) => T_WHITESPACE),
+ (LPAREN, (s) => T_LPAREN),
+ (RPAREN, (s) => T_RPAREN),
+ (OPS, (s) => T_OP(s.mkString)))
+
+val Tk = Tokenizer(lexing_rules, List(T_WHITESPACE))
+
+
+case class TokParser(tok: Token) extends Parser[List[Token], Token] {
+ def parse(ts: List[Token]) = ts match {
+ case t::ts if (t == tok) => Set((t, ts))
+ case _ => Set ()
+ }
+}
+implicit def token2tparser(t: Token) = TokParser(t)
+
+case object NumParser extends Parser[List[Token], Int] {
+ def parse(ts: List[Token]) = ts match {
+ case T_NUM(s)::ts => Set((s.toInt, ts))
+ case _ => Set ()
+ }
+}
+
+lazy val E: Parser[List[Token], Int] = (T ~ T_OP("+") ~ E) ==> { case ((x, y), z) => x + z } || T
+lazy val T: Parser[List[Token], Int] = (F ~ T_OP("*") ~ T) ==> { case ((x, y), z) => x * z } || F
+lazy val F: Parser[List[Token], Int] = (T_LPAREN ~> E <~ T_RPAREN) || NumParser
+
+println(E.parse_all(Tk.fromString("1 + 2 + 3")))
+println(E.parse_all(Tk.fromString("1 + 2 * 3")))
+println(E.parse_all(Tk.fromString("(1 + 2) * 3")))
+println(E.parse_all(Tk.fromString("(14 + 2) * (3 + 2)")))
+