--- a/compile.scala Wed Nov 28 08:28:26 2012 +0000
+++ b/compile.scala Mon Dec 03 15:35:27 2012 +0000
@@ -1,7 +1,7 @@
// A parser and evaluator for teh while language
//
-//:load matcher.scala
-//:load parser3.scala
+import matcher._
+import parser._
// some regular expressions
val SYM = RANGE("ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz_")
@@ -32,7 +32,7 @@
case class T_NUM(s: String) extends Token
case class T_KWD(s: String) extends Token
-val lexing_rules: List[Rule[Token]] =
+val lexing_rules: List[(Rexp, List[Char] => Token)] =
List((KEYWORD, (s) => T_KWD(s.mkString)),
(ID, (s) => T_ID(s.mkString)),
(OP, (s) => T_OP(s.mkString)),
@@ -260,7 +260,7 @@
//examples
-//compile_to("loops.while", "loops.j")
+compile_to("loops.while", "loops.j")
//compile_to("fib.while", "fib.j")
@@ -315,7 +315,7 @@
-// javabyte code assmbler
+// Javabyte code assmbler
//
// java -jar jvm/jasmin-2.4/jasmin.jar loops.j
@@ -324,4 +324,3 @@
-
--- a/matcher.scala Wed Nov 28 08:28:26 2012 +0000
+++ b/matcher.scala Mon Dec 03 15:35:27 2012 +0000
@@ -1,5 +1,7 @@
+package object matcher {
-// regular expressions including NOT
+// regular expressions
+// including constructors for NOT and ALLC
abstract class Rexp
case object NULL extends Rexp
@@ -53,6 +55,41 @@
case NOT(r) => NOT(der (c, r))
}
+// main class for the tokenizer
+case class Tokenizer[T](rules: List[(Rexp, List[Char] => T)], excl: List[T] = Nil) {
+
+def munch(r: Rexp, action: List[Char] => T, s: List[Char], t: List[Char]) : Option[(List[Char], T)] =
+ s match {
+ case Nil if (nullable(r)) => Some(Nil, action(t))
+ case Nil => None
+ case c::s if (no_more(der (c, r)) && nullable(r)) => Some(c::s, action(t))
+ case c::s if (no_more(der (c, r))) => None
+ case c::s => munch(der (c, r), action, s, t ::: List(c))
+ }
+
+def one_token(s: List[Char]) : Either[(List[Char], T), String] = {
+ val somes = rules.map { (r) => munch(r._1, r._2, s, Nil) }.flatten
+ if (somes == Nil) Right(s.mkString)
+ else Left(somes sortBy (_._1.length) head)
+}
+
+def tokenize(cs: List[Char]) : List[T] = cs match {
+ case Nil => Nil
+ case _ => one_token(cs) match {
+ case Left((rest, token)) => token :: tokenize(rest)
+ case Right(s) => { println("Cannot tokenize: \"" + s + "\""); Nil }
+ }
+}
+
+def fromString(s: String) : List[T] =
+ tokenize(s.toList).filterNot(excl.contains(_))
+
+def fromFile(name: String) : List[T] =
+ fromString(io.Source.fromFile(name).mkString)
+
+}
+
+
// regular expression for specifying
// ranges of characters
def Range(s : List[Char]) : Rexp = s match {
@@ -90,39 +127,4 @@
}
implicit def string2rexp(s : String) : Rexp = charlist2rexp(s.toList)
-
-type Rule[T] = (Rexp, List[Char] => T)
-
-case class Tokenizer[T](rules: List[Rule[T]], excl: List[T] = Nil) {
-
- def munch(r: Rexp, action: List[Char] => T, s: List[Char], t: List[Char]) : Option[(List[Char], T)] =
- s match {
- case Nil if (nullable(r)) => Some(Nil, action(t))
- case Nil => None
- case c::s if (no_more(der (c, r)) && nullable(r)) => Some(c::s, action(t))
- case c::s if (no_more(der (c, r))) => None
- case c::s => munch(der (c, r), action, s, t ::: List(c))
- }
-
- def one_token(s: List[Char]) : Either[(List[Char], T), String] = {
- val somes = rules.map { (r) => munch(r._1, r._2, s, Nil) }.flatten
- if (somes == Nil) Right(s.mkString)
- else Left(somes sortBy (_._1.length) head)
- }
-
- def tokenize(cs: List[Char]) : List[T] = cs match {
- case Nil => Nil
- case _ => one_token(cs) match {
- case Left((rest, token)) => token :: tokenize(rest)
- case Right(s) => { println("Cannot tokenize: \"" + s + "\""); Nil }
- }
- }
-
- def fromString(s: String) : List[T] =
- tokenize(s.toList).filterNot(excl.contains(_))
-
- def fromFile(name: String) : List[T] =
- fromString(io.Source.fromFile(name).mkString)
-
}
-
--- a/parser3.scala Wed Nov 28 08:28:26 2012 +0000
+++ b/parser3.scala Mon Dec 03 15:35:27 2012 +0000
@@ -1,5 +1,9 @@
+package object parser {
-// parser combinators with input type I and return type T
+// parser combinators
+// with input type I and return type T
+//
+// needs to be compiled with scalac parser3.scala
abstract class Parser[I <% Seq[_], T] {
def parse(ts: I): Set[(T, I)]
@@ -34,4 +38,4 @@
for ((head, tail) <- p.parse(sb)) yield (f(head), tail)
}
-
+}
--- a/re1.scala Wed Nov 28 08:28:26 2012 +0000
+++ b/re1.scala Mon Dec 03 15:35:27 2012 +0000
@@ -49,6 +49,10 @@
// main matcher function
def matcher(r: Rexp, s: String) : Boolean = nullable(ders(s.toList, r))
+//example
+//val r = STAR(ALT(SEQ(CHAR('a'), CHAR('b')), CHAR('b')))
+//der('b', r)
+//der('b', r)
//one or zero
def OPT(r: Rexp) = ALT(r, EMPTY)
--- a/while.scala Wed Nov 28 08:28:26 2012 +0000
+++ b/while.scala Mon Dec 03 15:35:27 2012 +0000
@@ -1,7 +1,8 @@
// A parser and evaluator for teh while language
//
-//:load matcher.scala
-//:load parser3.scala
+import matcher._
+import parser._
+
// some regular expressions
val SYM = RANGE("ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz_")
@@ -30,7 +31,7 @@
case class T_NUM(s: String) extends Token
case class T_KWD(s: String) extends Token
-val lexing_rules: List[Rule[Token]] =
+val lexing_rules: List[(Rexp, List[Char] => Token)] =
List((KEYWORD, (s) => T_KWD(s.mkString)),
(ID, (s) => T_ID(s.mkString)),
(OP, (s) => T_OP(s.mkString)),
--- a/while1.scala Wed Nov 28 08:28:26 2012 +0000
+++ b/while1.scala Mon Dec 03 15:35:27 2012 +0000
@@ -1,7 +1,8 @@
-// A parser and evaluator for teh while language
+// A parser and evaluator for the WHILE language
//
-//:load matcher.scala
-//:load parser3.scala
+import matcher._
+import parser._
+
// some regular expressions
val SYM = RANGE("ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz_")
@@ -32,7 +33,7 @@
case class T_NUM(s: String) extends Token
case class T_KWD(s: String) extends Token
-val lexing_rules: List[Rule[Token]] =
+val lexing_rules: List[(Rexp, List[Char] => Token)] =
List((KEYWORD, (s) => T_KWD(s.mkString)),
(ID, (s) => T_ID(s.mkString)),
(OP, (s) => T_OP(s.mkString)),