# HG changeset patch # User Christian Urban # Date 1446932223 0 # Node ID 04127a5aad2393af9f3453160d9648abd8cfb172 # Parent 5a83336a969073547a5ea08a285d5cb9d6715194 updated diff -r 5a83336a9690 -r 04127a5aad23 progs/comb1.scala --- a/progs/comb1.scala Fri Nov 06 08:52:16 2015 +0000 +++ b/progs/comb1.scala Sat Nov 07 21:37:03 2015 +0000 @@ -44,11 +44,13 @@ } } -case object NumParser extends Parser[String, String] { +case object NumParser extends Parser[String, Int] { val reg = "[0-9]+".r def parse(sb: String) = reg.findPrefixOf(sb) match { case None => Set() - case Some(s) => Set(sb.splitAt(s.length)) + case Some(s) => Set(sb.splitAt(s.length) match { + case (x, y) => (x.toInt, y) + }) } } @@ -88,16 +90,16 @@ P.parse_all("()") // arithmetic expressions -lazy val E: Parser[String, String] = - (F ~ "*" ~ T) ==> { case ((x, y), z) => x + y + z } || F -lazy val F: Parser[String, String] = - ((T ~ "+" ~ T) ==> { case ((x, y), z) => x + y + z } || - (T ~ "-" ~ T) ==> { case ((x, y), z) => x + y + z } || T) -lazy val T: Parser[String, String] = - ("(" ~ E ~ ")") ==> { case ((x, y), z) => x + y + z } || NumParser +lazy val E: Parser[String, Int] = + (F ~ "*" ~ F) ==> { case ((x, y), z) => x * z } || F +lazy val F: Parser[String, Int] = + ((T ~ "+" ~ T) ==> { case ((x, y), z) => x + z } || + (T ~ "-" ~ T) ==> { case ((x, y), z) => x - z } || T) +lazy val T: Parser[String, Int] = + ("(" ~ E ~ ")") ==> { case ((x, y), z) => y } || NumParser -println(E.parse_all("1*2+3")) -println(E.parse_all("1+2*3")) +println(E.parse("1*2+3")) +println(E.parse("1 + 2 * 3")) println(E.parse_all("(1+2)+3")) println(E.parse_all("1+2+3")) // this is not parsed, because of // how the grammar is set up diff -r 5a83336a9690 -r 04127a5aad23 progs/token2.scala --- a/progs/token2.scala Fri Nov 06 08:52:16 2015 +0000 +++ b/progs/token2.scala Sat Nov 07 21:37:03 2015 +0000 @@ -1,25 +1,35 @@ -import scala.language.implicitConversions +import scala.language.implicitConversions import scala.language.reflectiveCalls -import scala.util._ -import scala.annotation.tailrec +import scala.annotation.tailrec -sealed abstract class Rexp - +abstract class Rexp case object NULL extends Rexp case object EMPTY extends Rexp case class CHAR(c: Char) extends Rexp -case class ALT(r1: Rexp, r2: Rexp) extends Rexp -case class SEQ(r1: Rexp, r2: Rexp) extends Rexp -case class STAR(r: Rexp) extends Rexp +case class ALT(r1: Rexp, r2: Rexp) extends Rexp +case class SEQ(r1: Rexp, r2: Rexp) extends Rexp +case class STAR(r: Rexp) extends Rexp +case class RECD(x: String, r: Rexp) extends Rexp +case class CRANGE(cs: String) extends Rexp +case class PLUS(r: Rexp) extends Rexp -def charlist2rexp(s : List[Char]) : Rexp = s match { +abstract class Val +case object Empty extends Val +case class Chr(c: Char) extends Val +case class Seq(v1: Val, v2: Val) extends Val +case class Left(v: Val) extends Val +case class Right(v: Val) extends Val +case class Stars(vs: List[Val]) extends Val +case class Rec(x: String, v: Val) extends Val + +// some convenience for typing in regular expressions +def charlist2rexp(s : List[Char]): Rexp = s match { case Nil => EMPTY case c::Nil => CHAR(c) case c::s => SEQ(CHAR(c), charlist2rexp(s)) } implicit def string2rexp(s : String) : Rexp = charlist2rexp(s.toList) - implicit def RexpOps(r: Rexp) = new { def | (s: Rexp) = ALT(r, s) def % = STAR(r) @@ -32,58 +42,11 @@ def % = STAR(s) def ~ (r: Rexp) = SEQ(s, r) def ~ (r: String) = SEQ(s, r) + def $ (r: Rexp) = RECD(s, r) } -def Range(s : List[Char]) : Rexp = s match { - case Nil => NULL - case c::Nil => CHAR(c) - case c::s => ALT(CHAR(c), Range(s)) -} -def RANGE(s: String) = Range(s.toList) - -def PLUS(r: Rexp) = SEQ(r, STAR(r)) - -val SYM = RANGE("ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz_") -val DIGIT = RANGE("0123456789") -val ID = SYM ~ (SYM | DIGIT).% -val NUM = PLUS(DIGIT) -val KEYWORD : Rexp = "skip" | "while" | "do" | "if" | "then" | "else" | "read" | "write" -val SEMI: Rexp = ";" -val OP: Rexp = ":=" | "=" | "-" | "+" | "*" | "!=" | "<" | ">" -val WHITESPACE = PLUS(RANGE(" \n")) -val RPAREN: Rexp = ")" -val LPAREN: Rexp = "(" -val BEGIN: Rexp = "{" -val END: Rexp = "}" - -abstract class Token -case object T_WHITESPACE extends Token -case object T_SEMI extends Token -case object T_LPAREN extends Token -case object T_RPAREN extends Token -case object T_BEGIN extends Token -case object T_END extends Token -case class T_ID(s: String) extends Token -case class T_OP(s: String) extends Token -case class T_NUM(s: String) extends Token -case class T_KWD(s: String) extends Token -case class T_ERR(s: String) extends Token // special error token - -type TokenFun = String => Token -type LexRules = List[(Rexp, TokenFun)] -val lexing_rules: LexRules = - List((KEYWORD, (s) => T_KWD(s)), - (ID, (s) => T_ID(s)), - (OP, (s) => T_OP(s)), - (NUM, (s) => T_NUM(s)), - (SEMI, (s) => T_SEMI), - (LPAREN, (s) => T_LPAREN), - (RPAREN, (s) => T_RPAREN), - (BEGIN, (s) => T_BEGIN), - (END, (s) => T_END), - (WHITESPACE, (s) => T_WHITESPACE)) - - +// nullable function: tests whether the regular +// expression can recognise the empty string def nullable (r: Rexp) : Boolean = r match { case NULL => false case EMPTY => true @@ -91,77 +54,239 @@ case ALT(r1, r2) => nullable(r1) || nullable(r2) case SEQ(r1, r2) => nullable(r1) && nullable(r2) case STAR(_) => true + case RECD(_, r) => nullable(r) + case CRANGE(_) => false + case PLUS(r) => nullable(r) } -def zeroable (r: Rexp) : Boolean = r match { - case NULL => true - case EMPTY => false - case CHAR(_) => false - case ALT(r1, r2) => zeroable(r1) && zeroable(r2) - case SEQ(r1, r2) => zeroable(r1) || zeroable(r2) - case STAR(_) => false -} - +// derivative of a regular expression w.r.t. a character def der (c: Char, r: Rexp) : Rexp = r match { case NULL => NULL - case EMPTY => NULL + case EMPTY => NULL case CHAR(d) => if (c == d) EMPTY else NULL case ALT(r1, r2) => ALT(der(c, r1), der(c, r2)) case SEQ(r1, r2) => if (nullable(r1)) ALT(SEQ(der(c, r1), r2), der(c, r2)) else SEQ(der(c, r1), r2) case STAR(r) => SEQ(der(c, r), STAR(r)) + case RECD(_, r1) => der(c, r1) + case CRANGE(cs) => if (cs.contains(c)) EMPTY else NULL + case PLUS(r) => SEQ(der(c, r), STAR(r)) +} + +// derivative w.r.t. a string (iterates der) +def ders (s: List[Char], r: Rexp) : Rexp = s match { + case Nil => r + case c::s => ders(s, der(c, r)) +} + +// extracts a string from value +def flatten(v: Val) : String = v match { + case Empty => "" + case Chr(c) => c.toString + case Left(v) => flatten(v) + case Right(v) => flatten(v) + case Seq(v1, v2) => flatten(v1) + flatten(v2) + case Stars(vs) => vs.map(flatten).mkString + case Rec(_, v) => flatten(v) +} + +// extracts an environment from a value +def env(v: Val) : List[(String, String)] = v match { + case Empty => Nil + case Chr(c) => Nil + case Left(v) => env(v) + case Right(v) => env(v) + case Seq(v1, v2) => env(v1) ::: env(v2) + case Stars(vs) => vs.flatMap(env) + case Rec(x, v) => (x, flatten(v))::env(v) +} + +// injection part +def mkeps(r: Rexp) : Val = r match { + case EMPTY => Empty + case ALT(r1, r2) => + if (nullable(r1)) Left(mkeps(r1)) else Right(mkeps(r2)) + case SEQ(r1, r2) => Seq(mkeps(r1), mkeps(r2)) + case STAR(r) => Stars(Nil) + case RECD(x, r) => Rec(x, mkeps(r)) + case PLUS(r) => Stars(List(mkeps(r))) } +def inj(r: Rexp, c: Char, v: Val) : Val = (r, v) match { + case (STAR(r), Seq(v1, Stars(vs))) => Stars(inj(r, c, v1)::vs) + case (SEQ(r1, r2), Seq(v1, v2)) => Seq(inj(r1, c, v1), v2) + case (SEQ(r1, r2), Left(Seq(v1, v2))) => Seq(inj(r1, c, v1), v2) + case (SEQ(r1, r2), Right(v2)) => Seq(mkeps(r1), inj(r2, c, v2)) + case (ALT(r1, r2), Left(v1)) => Left(inj(r1, c, v1)) + case (ALT(r1, r2), Right(v2)) => Right(inj(r2, c, v2)) + case (CHAR(_), Empty) => Chr(c) + case (CRANGE(_), Empty) => Chr(c) + case (RECD(x, r1), _) => Rec(x, inj(r1, c, v)) + case (PLUS(r), Seq(v1, Stars(vs))) => Stars(inj(r, c, v1)::vs) +} -// calculates derivatives until all of them are zeroable -@tailrec -def munch(s: List[Char], - pos: Int, - rs: LexRules, - last: Option[(Int, TokenFun)]): Option[(Int, TokenFun)] = rs match { - case Nil => last - case rs if (s.length <= pos) => last - case rs => { - val ders = rs.map({case (r, tf) => (der(s(pos), r), tf)}) - val rs_nzero = ders.filterNot({case (r, _) => zeroable(r)}) - val rs_nulls = ders.filter({case (r, _) => nullable(r)}) - val new_last = if (rs_nulls != Nil) Some((pos, rs_nulls.head._2)) else last - munch(s, 1 + pos, rs_nzero, new_last) +// main lexing function (produces a value) +def lex(r: Rexp, s: List[Char]) : Val = s match { + case Nil => if (nullable(r)) mkeps(r) else throw new Exception("Not matched") + case c::cs => inj(r, c, lex(der(c, r), cs)) +} + +def lexing(r: Rexp, s: String) : Val = lex(r, s.toList) + +lexing(("ab" | "ab") ~ ("b" | EMPTY), "ab") + +// some "rectification" functions for simplification +def F_ID(v: Val): Val = v +def F_RIGHT(f: Val => Val) = (v:Val) => Right(f(v)) +def F_LEFT(f: Val => Val) = (v:Val) => Left(f(v)) +def F_ALT(f1: Val => Val, f2: Val => Val) = (v:Val) => v match { + case Right(v) => Right(f2(v)) + case Left(v) => Left(f1(v)) +} +def F_SEQ(f1: Val => Val, f2: Val => Val) = (v:Val) => v match { + case Seq(v1, v2) => Seq(f1(v1), f2(v2)) +} +def F_SEQ_Empty1(f1: Val => Val, f2: Val => Val) = + (v:Val) => Seq(f1(Empty), f2(v)) +def F_SEQ_Empty2(f1: Val => Val, f2: Val => Val) = + (v:Val) => Seq(f1(v), f2(Empty)) +def F_RECD(f: Val => Val) = (v:Val) => v match { + case Rec(x, v) => Rec(x, f(v)) +} +def F_ERROR(v: Val): Val = throw new Exception("error") + +// simplification of regular expressions returning also an +// rectification function; no simplification under STAR +def simp(r: Rexp): (Rexp, Val => Val) = r match { + case ALT(r1, r2) => { + val (r1s, f1s) = simp(r1) + val (r2s, f2s) = simp(r2) + (r1s, r2s) match { + case (NULL, _) => (r2s, F_RIGHT(f2s)) + case (_, NULL) => (r1s, F_LEFT(f1s)) + case _ => if (r1s == r2s) (r1s, F_LEFT(f1s)) + else (ALT (r1s, r2s), F_ALT(f1s, f2s)) + } + } + case SEQ(r1, r2) => { + val (r1s, f1s) = simp(r1) + val (r2s, f2s) = simp(r2) + (r1s, r2s) match { + case (NULL, _) => (NULL, F_ERROR) + case (_, NULL) => (NULL, F_ERROR) + case (EMPTY, _) => (r2s, F_SEQ_Empty1(f1s, f2s)) + case (_, EMPTY) => (r1s, F_SEQ_Empty2(f1s, f2s)) + case _ => (SEQ(r1s,r2s), F_SEQ(f1s, f2s)) + } + } + case RECD(x, r1) => { + val (r1s, f1s) = simp(r1) + (RECD(x, r1s), F_RECD(f1s)) + } + case r => (r, F_ID) +} + +def lex_simp(r: Rexp, s: List[Char]) : Val = s match { + case Nil => if (nullable(r)) mkeps(r) else throw new Exception("Not matched") + case c::cs => { + val (r_simp, f_simp) = simp(der(c, r)) + inj(r, c, f_simp(lex_simp(r_simp, cs))) } } -// iterates the munching function and returns a Token list -def tokenize(s: String, rs: LexRules) : List[Token] = munch(s.toList, 0, rs, None) match { - case None if (s == "") => Nil - case None => List(T_ERR("Lexing error: $s")) - case Some((n, tf)) => { - val (head, tail) = s.splitAt(n + 1) - tf(head)::tokenize(tail, rs) - } +def lexing_simp(r: Rexp, s: String) : Val = lex_simp(r, s.toList) + +lexing_simp(("a" | "ab") ~ ("b" | ""), "ab") + +// Lexing Rules for a Small While Language + +val SYM = CRANGE("abcdefghijklmnopqrstuvwxyz") +val DIGIT = CRANGE("0123456789") +val ID = SYM ~ (SYM | DIGIT).% +val NUM = PLUS(DIGIT) +val KEYWORD : Rexp = "skip" | "while" | "do" | "if" | "then" | "else" | "read" | "write" | "true" | "false" +val SEMI: Rexp = ";" +val OP: Rexp = ":=" | "==" | "-" | "+" | "*" | "!=" | "<" | ">" | "<=" | ">=" | "%" | "/" +val WHITESPACE = PLUS(" " | "\n" | "\t") +val RPAREN: Rexp = ")" +val LPAREN: Rexp = "(" +val BEGIN: Rexp = "{" +val END: Rexp = "}" +val STRING: Rexp = "\"" ~ SYM.% ~ "\"" + + +val WHILE_REGS = (("k" $ KEYWORD) | + ("i" $ ID) | + ("o" $ OP) | + ("n" $ NUM) | + ("s" $ SEMI) | + ("str" $ STRING) | + ("p" $ (LPAREN | RPAREN)) | + ("b" $ (BEGIN | END)) | + ("w" $ WHITESPACE)).% + +// filters out all white spaces +def tokenise(r: Rexp, s: String) = + env(lexing_simp(r, s)).filterNot { (s) => s._1 == "w"}.mkString("\n") + + +// Testing +//============ + +def time[T](code: => T) = { + val start = System.nanoTime() + val result = code + val end = System.nanoTime() + println((end - start)/1.0e9) + result } -val test_prog = """ -start := XXX; -x := start; -y := start; -z := start; -while 0 < x do { - while 0 < y do { - while 0 < z do { - z := z - 1 - }; - z := start; - y := y - 1 - }; - y := start; - x := x - 1 +val r1 = ("a" | "ab") ~ ("bcd" | "c") +println(lexing(r1, "abcd")) + +val r2 = ("" | "a") ~ ("ab" | "b") +println(lexing(r2, "ab")) + + +// Two Simple While Tests +//======================== +println("prog0 test") + +val prog0 = """read n""" +println(env(lexing_simp(WHILE_REGS, prog0))) + +println("prog1 test") + +val prog1 = """read n; write (n)""" +println(env(lexing_simp(WHILE_REGS, prog1))) + + +// Big Test +//========== + +val prog2 = """ +write "fib"; +read n; +minus1 := 0; +minus2 := 1; +while n > 0 do { + temp := minus2; + minus2 := minus1 + minus2; + minus1 := temp; + n := n - 1 }; -write x; -write y; -write z +write "result"; +write minus2 """ -println(tokenize(test_prog, lexing_rules).mkString("\n")) +println("Tokens") +println(tokenise(WHILE_REGS, prog2)) +for (i <- 1 to 120 by 10) { + print(i.toString + ": ") + time(lexing_simp(WHILE_REGS, prog2 * i)) +} + +