# HG changeset patch # User Christian Urban # Date 1352882730 0 # Node ID 5988e44ea048eaeba44f7e8e9b9f4e2ba0d85109 # Parent a80f0cf17f91714ce5246db76319cc0754ae2092 added diff -r a80f0cf17f91 -r 5988e44ea048 matcher.scala --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/matcher.scala Wed Nov 14 08:45:30 2012 +0000 @@ -0,0 +1,100 @@ + +// regular expressions including NOT +abstract class Rexp + +case object NULL extends Rexp +case object EMPTY extends Rexp +case class CHAR(c: Char) extends Rexp +case class ALT(r1: Rexp, r2: Rexp) extends Rexp +case class SEQ(r1: Rexp, r2: Rexp) extends Rexp +case class STAR(r: Rexp) extends Rexp +case class NOT(r: Rexp) extends Rexp + + +// some convenience for typing in regular expressions +def charlist2rexp(s : List[Char]) : Rexp = s match { + case Nil => EMPTY + case c::Nil => CHAR(c) + case c::s => SEQ(CHAR(c), charlist2rexp(s)) +} +implicit def string2rexp(s : String) : Rexp = charlist2rexp(s.toList) + + +// nullable function: tests whether the regular +// expression can recognise the empty string +def nullable (r: Rexp) : Boolean = r match { + case NULL => false + case EMPTY => true + case CHAR(_) => false + case ALT(r1, r2) => nullable(r1) || nullable(r2) + case SEQ(r1, r2) => nullable(r1) && nullable(r2) + case STAR(_) => true + case NOT(r) => !(nullable(r)) +} + +// tests whether a regular expression +// cannot recognise more +def no_more (r: Rexp) : Boolean = r match { + case NULL => true + case EMPTY => false + case CHAR(_) => false + case ALT(r1, r2) => no_more(r1) && no_more(r2) + case SEQ(r1, r2) => if (nullable(r1)) (no_more(r1) && no_more(r2)) else no_more(r1) + case STAR(_) => false + case NOT(r) => !(no_more(r)) +} + + +// derivative of a regular expression w.r.t. a character +def der (c: Char, r: Rexp) : Rexp = r match { + case NULL => NULL + case EMPTY => NULL case CHAR(d) => if (c == d) EMPTY else NULL + case ALT(r1, r2) => ALT(der(c, r1), der(c, r2)) + case SEQ(r1, r2) => + if (nullable(r1)) ALT(SEQ(der(c, r1), r2), der(c, r2)) + else SEQ(der(c, r1), r2) + case STAR(r) => SEQ(der(c, r), STAR(r)) + case NOT(r) => NOT(der (c, r)) +} + +// regular expression for specifying +// ranges of characters +def RANGE(s : List[Char]) : Rexp = s match { + case Nil => NULL + case c::Nil => CHAR(c) + case c::s => ALT(CHAR(c), RANGE(s)) +} + +// one or more +def PLUS(r: Rexp) = SEQ(r, STAR(r)) + + +type Rule[T] = (Rexp, List[Char] => T) + +def error (s: String) = throw new IllegalArgumentException ("Cannot tokenize: " + s) + +def munch[T](r: Rexp, action: List[Char] => T, s: List[Char], t: List[Char]) : Option[(List[Char], T)] = + s match { + case Nil if (nullable(r)) => Some(Nil, action(t)) + case Nil => None + case c::s if (no_more(der (c, r)) && nullable(r)) => Some(c::s, action(t)) + case c::s if (no_more(der (c, r))) => None + case c::s => munch(der (c, r), action, s, t ::: List(c)) + } + +def one_token[T](rs: List[Rule[T]], s: List[Char]) : (List[Char], T) = { + val somes = rs.map { (r) => munch(r._1, r._2, s, Nil) } .flatten + if (somes == Nil) error(s.mkString) else (somes sortBy (_._1.length) head) +} + +def tokenize[T](rs: List[Rule[T]], s: List[Char]) : List[T] = s match { + case Nil => Nil + case _ => one_token(rs, s) match { + case (rest, token) => token :: tokenize(rs, rest) + } +} + + + + + diff -r a80f0cf17f91 -r 5988e44ea048 parser1.scala --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/parser1.scala Wed Nov 14 08:45:30 2012 +0000 @@ -0,0 +1,88 @@ +:load matcher.scala + +// some regular expressions +val DIGIT = RANGE("0123456789".toList) +val NONZERODIGIT = RANGE("123456789".toList) + +val NUMBER = ALT(SEQ(NONZERODIGIT, STAR(DIGIT)), "0") +val LPAREN = CHAR('(') +val RPAREN = CHAR(')') +val WHITESPACE = PLUS(RANGE(" \n".toList)) +val OPS = RANGE("+-*".toList) + +// for classifying the strings that have been recognised +abstract class Token +case object T_WHITESPACE extends Token +case object T_NUM extends Token +case class T_OP(s: String) extends Token +case object T_LPAREN extends Token +case object T_RPAREN extends Token +case class T_NT(s: String, rhs: List[Token]) extends Token + +def tokenizer(rs: List[Rule[Token]], s: String) : List[Token] = + tokenize(rs, s.toList).filterNot(_ match { + case T_WHITESPACE => true + case _ => false + }) + + + +// lexing rules for arithmetic expressions +val lexing_rules: List[Rule[Token]]= + List((NUMBER, (s) => T_NUM), + (WHITESPACE, (s) => T_WHITESPACE), + (LPAREN, (s) => T_LPAREN), + (RPAREN, (s) => T_RPAREN), + (OPS, (s) => T_OP(s.mkString))) + + +type Grammar = List[(String, List[Token])] + +// grammar for arithmetic expressions +val grammar = + List ("E" -> List(T_NUM), + "E" -> List(T_NT("E", Nil), T_OP("+"), T_NT("E", Nil)), + "E" -> List(T_NT("E", Nil), T_OP("-"), T_NT("E", Nil)), + "E" -> List(T_NT("E", Nil), T_OP("*"), T_NT("E", Nil)), + "E" -> List(T_LPAREN, T_NT("E", Nil), T_RPAREN)) + +def startsWith[A](ts1: List[A], ts2: List[A]) : Boolean = (ts1, ts2) match { + case (_, Nil) => true + case (T_NT(e, _)::ts1,T_NT(f, _)::ts2) => (e == f) && startsWith(ts1, ts2) + case (t1::ts1, t2::ts2) => (t1 == t2) && startsWith(ts1, ts2) + case _ => false +} + +def chop[A](ts1: List[A], prefix: List[A], ts2: List[A]) : Option[(List[A], List[A])] = + ts1 match { + case Nil => None + case t::ts => + if (startsWith(ts1, prefix)) Some(ts2.reverse, ts1.drop(prefix.length)) + else chop(ts, prefix, t::ts2) + } + +// examples +chop(List(1,2,3,4,5,6,7,8,9), List(4,5), Nil) +chop(List(1,2,3,4,5,6,7,8,9), List(3,5), Nil) + +def replace[A](ts: List[A], out: List[A], in: List [A]) = + chop(ts, out, Nil) match { + case None => None + case Some((before, after)) => Some(before ::: in ::: after) + } + +def parse1(g: Grammar, ts: List[Token]) : Boolean = ts match { + case List(T_NT("E", tree)) => { println(tree); true } + case _ => { + val tss = for ((lhs, rhs) <- g) yield replace(ts, rhs, List(T_NT(lhs, rhs))) + tss.flatten.exists(parse1(g, _)) + } +} + + +println() ; parse1(grammar, tokenizer(lexing_rules, "2 + 3 * 4 + 1")) +println() ; parse1(grammar, tokenizer(lexing_rules, "(2 + 3) * (4 + 1)")) +println() ; parse1(grammar, tokenizer(lexing_rules, "(2 + 3) * 4 (4 + 1)")) + + + diff -r a80f0cf17f91 -r 5988e44ea048 parser2.scala --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/parser2.scala Wed Nov 14 08:45:30 2012 +0000 @@ -0,0 +1,141 @@ +:load matcher.scala + +// some regular expressions +val LETTER = RANGE("abcdefghijklmnopqrstuvwxyz".toList) +val ID = PLUS(LETTER) + +val DIGIT = RANGE("0123456789".toList) +val NONZERODIGIT = RANGE("123456789".toList) +val NUMBER = ALT(SEQ(NONZERODIGIT, STAR(DIGIT)), "0") + +val LPAREN = CHAR('(') +val RPAREN = CHAR(')') + +val WHITESPACE = PLUS(RANGE(" \n".toList)) +val OPS = RANGE("+-*".toList) + +// for classifying the strings that have been recognised +abstract class Token + +case object T_WHITESPACE extends Token +case class T_NUM(s: String) extends Token +case class T_ID(s: String) extends Token +case class T_OP(s: String) extends Token +case object T_LPAREN extends Token +case object T_RPAREN extends Token +case object T_IF extends Token +case object T_THEN extends Token +case object T_ELSE extends Token + +def tokenizer(rs: List[Rule[Token]], s: String) : List[Token] = + tokenize(rs, s.toList).filterNot(_ match { + case T_WHITESPACE => true + case _ => false + }) + + +// lexing rules for arithmetic expressions +val lexing_rules: List[Rule[Token]]= + List(("if", (s) => T_IF), + ("then", (s) => T_THEN), + ("else", (s) => T_ELSE), + (NUMBER, (s) => T_NUM(s.mkString)), + (ID, (s) => T_ID(s.mkString)), + (WHITESPACE, (s) => T_WHITESPACE), + (LPAREN, (s) => T_LPAREN), + (RPAREN, (s) => T_RPAREN), + (OPS, (s) => T_OP(s.mkString))) + + +// parse trees +abstract class ParseTree +case class Leaf(t: Token) extends ParseTree +case class Branch(pts: List[ParseTree]) extends ParseTree + +def combine(pt1: ParseTree, pt2: ParseTree) = pt1 match { + case Leaf(t) => Branch(List(Leaf(t), pt2)) + case Branch(pts) => Branch(pts ++ List(pt2)) +} + +// parser combinators +abstract class Parser { + def parse(ts: List[Token]): Set[(ParseTree, List[Token])] + + def parse_all(ts: List[Token]) : Set[ParseTree] = + for ((head, tail) <- parse(ts); if (tail == Nil)) yield head + + def || (right : => Parser) : Parser = new AltParser(this, right) + def ~ (right : => Parser) : Parser = new SeqParser(this, right) +} + +class AltParser(p: => Parser, q: => Parser) extends Parser { + def parse (ts: List[Token]) = p.parse(ts) ++ q.parse(ts) +} + +class SeqParser(p: => Parser, q: => Parser) extends Parser { + def parse(ts: List[Token]) = + for ((head1, tail1) <- p.parse(ts); + (head2, tail2) <- q.parse(tail1)) yield (combine(head1, head2), tail2) +} + +class ListParser(ps: => List[Parser]) extends Parser { + def parse(ts: List[Token]) = ps match { + case Nil => Set() + case p::Nil => p.parse(ts) + case p::ps => + for ((head1, tail1) <- p.parse(ts); + (head2, tail2) <- new ListParser(ps).parse(tail1)) yield (Branch(List(head1, head2)), tail2) + } +} + +case class TokParser(tok: Token) extends Parser { + def parse(ts: List[Token]) = ts match { + case t::ts if (t == tok) => Set((Leaf(t), ts)) + case _ => Set () + } +} + +implicit def token2tparser(t: Token) = TokParser(t) + +case object IdParser extends Parser { + def parse(ts: List[Token]) = ts match { + case T_ID(s)::ts => Set((Leaf(T_ID(s)), ts)) + case _ => Set () + } +} + +case object NumParser extends Parser { + def parse(ts: List[Token]) = ts match { + case T_NUM(s)::ts => Set((Leaf(T_NUM(s)), ts)) + case _ => Set () + } +} + +lazy val E: Parser = (T ~ T_OP("+") ~ E) || T // start symbol +lazy val T: Parser = (F ~ T_OP("*") ~ T) || F +lazy val F: Parser = (T_LPAREN ~ E ~ T_RPAREN) || NumParser + +println(E.parse_all(tokenizer(lexing_rules, "1 + 2 + 3"))) + +def eval(t: ParseTree) : Int = t match { + case Leaf(T_NUM(n)) => n.toInt + case Branch(List(t1, Leaf(T_OP("+")), t2)) => eval(t1) + eval(t2) + case Branch(List(t1, Leaf(T_OP("*")), t2)) => eval(t1) * eval(t2) + case Branch(List(Leaf(T_LPAREN), t, Leaf(T_RPAREN))) => eval(t) +} + +(E.parse_all(tokenizer(lexing_rules, "1 + 2 + 3"))).map(eval(_)) +(E.parse_all(tokenizer(lexing_rules, "1 + 2 * 3"))).map(eval(_)) +(E.parse_all(tokenizer(lexing_rules, "(1 + 2) * 3"))).map(eval(_)) + +lazy val EXPR: Parser = + new ListParser(List(T_IF, EXPR, T_THEN, EXPR)) || + new ListParser(List(T_IF, EXPR, T_THEN, EXPR, T_ELSE, EXPR)) || + IdParser + +println(EXPR.parse_all(tokenizer(lexing_rules, "if a then b else c"))) +println(EXPR.parse_all(tokenizer(lexing_rules, "if a then if x then y else c"))) + + + + diff -r a80f0cf17f91 -r 5988e44ea048 parser2a.scala --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/parser2a.scala Wed Nov 14 08:45:30 2012 +0000 @@ -0,0 +1,106 @@ +:load matcher.scala + +// some regular expressions +val LETTER = RANGE("abcdefghijklmnopqrstuvwxyz".toList) +val ID = PLUS(LETTER) + +val DIGIT = RANGE("0123456789".toList) +val NONZERODIGIT = RANGE("123456789".toList) +val NUMBER = ALT(SEQ(NONZERODIGIT, STAR(DIGIT)), "0") + +val LPAREN = CHAR('(') +val RPAREN = CHAR(')') + +val WHITESPACE = PLUS(RANGE(" \n".toList)) +val OPS = RANGE("+-*".toList) + +// for classifying the strings that have been recognised +abstract class Token + +case object T_WHITESPACE extends Token +case class T_NUM(s: String) extends Token +case class T_ID(s: String) extends Token +case class T_OP(s: String) extends Token +case object T_LPAREN extends Token +case object T_RPAREN extends Token +case object T_IF extends Token +case object T_THEN extends Token +case object T_ELSE extends Token + +def tokenizer(rs: List[Rule[Token]], s: String) : List[Token] = + tokenize(rs, s.toList).filterNot(_ match { + case T_WHITESPACE => true + case _ => false + }) + + +// lexing rules for arithmetic expressions +val lexing_rules: List[Rule[Token]]= + List(("if", (s) => T_IF), + ("then", (s) => T_THEN), + ("else", (s) => T_ELSE), + (NUMBER, (s) => T_NUM(s.mkString)), + (ID, (s) => T_ID(s.mkString)), + (WHITESPACE, (s) => T_WHITESPACE), + (LPAREN, (s) => T_LPAREN), + (RPAREN, (s) => T_RPAREN), + (OPS, (s) => T_OP(s.mkString))) + + +// parser combinators with return type T +abstract class Parser[T] { + def parse(ts: List[Token]): Set[(T, List[Token])] + + def parse_all(ts: List[Token]) : Set[T] = + for ((head, tail) <- parse(ts); if (tail == Nil)) yield head + + def || (right : => Parser[T]) : Parser[T] = new AltParser(this, right) + def ==>[S] (f: => T => S) : Parser [S] = new FunParser(this, f) + def ~[S] (right : => Parser[S]) : Parser[(T, S)] = new SeqParser(this, right) + def ~>[S] (right : => Parser[S]) : Parser[S] = this ~ right ==> (x => x._2) + def <~[S] (right : => Parser[S]) : Parser[T] = this ~ right ==> (x => x._1) + +} + +class SeqParser[T, S](p: => Parser[T], q: => Parser[S]) extends Parser[(T, S)] { + def parse(sb: List[Token]) = + for ((head1, tail1) <- p.parse(sb); + (head2, tail2) <- q.parse(tail1)) yield ((head1, head2), tail2) +} + +class AltParser[T](p: => Parser[T], q: => Parser[T]) extends Parser[T] { + def parse (sb: List[Token]) = p.parse(sb) ++ q.parse(sb) +} + +class FunParser[T, S](p: => Parser[T], f: T => S) extends Parser[S] { + def parse (sb: List[Token]) = + for ((head, tail) <- p.parse(sb)) yield (f(head), tail) +} + + +case class TokParser(tok: Token) extends Parser[Token] { + def parse(ts: List[Token]) = ts match { + case t::ts if (t == tok) => Set((t, ts)) + case _ => Set () + } +} + +implicit def token2tparser(t: Token) = TokParser(t) + +case object NumParser extends Parser[Int] { + def parse(ts: List[Token]) = ts match { + case T_NUM(s)::ts => Set((s.toInt, ts)) + case _ => Set () + } +} + +lazy val E: Parser[Int] = (T ~ T_OP("+") ~ E) ==> { case ((x, y), z) => x + z } || T // start symbol +lazy val T: Parser[Int] = (F ~ T_OP("*") ~ T) ==> { case ((x, y), z) => x * z } || F +lazy val F: Parser[Int] = (T_LPAREN ~> E <~ T_RPAREN) || NumParser + +println(E.parse_all(tokenizer(lexing_rules, "1 + 2 + 3"))) +println(E.parse_all(tokenizer(lexing_rules, "1 + 2 * 3"))) +println(E.parse_all(tokenizer(lexing_rules, "(1 + 2) * 3"))) + +println(E.parse_all(tokenizer(lexing_rules, "(1 - 2) * 3"))) +println(E.parse_all(tokenizer(lexing_rules, "(1 + 2) * - 3")))