parser.scala
author Christian Urban <urbanc@in.tum.de>
Wed, 14 Nov 2012 00:02:38 +0000
changeset 60 68d664c204d2
parent 54 485f38b530ab
child 61 a80f0cf17f91
permissions -rw-r--r--
updated


// regular expressions including NOT
abstract class Rexp

case object NULL extends Rexp
case object EMPTY extends Rexp
case class CHAR(c: Char) extends Rexp
case class ALT(r1: Rexp, r2: Rexp) extends Rexp
case class SEQ(r1: Rexp, r2: Rexp) extends Rexp
case class STAR(r: Rexp) extends Rexp
case class NOT(r: Rexp) extends Rexp


// some convenience for typing in regular expressions
def charlist2rexp(s : List[Char]) : Rexp = s match {
  case Nil => EMPTY
  case c::Nil => CHAR(c)
  case c::s => SEQ(CHAR(c), charlist2rexp(s))
}
implicit def string2rexp(s : String) : Rexp = charlist2rexp(s.toList)


// nullable function: tests whether the regular 
// expression can recognise the empty string
def nullable (r: Rexp) : Boolean = r match {
  case NULL => false
  case EMPTY => true
  case CHAR(_) => false
  case ALT(r1, r2) => nullable(r1) || nullable(r2)
  case SEQ(r1, r2) => nullable(r1) && nullable(r2)
  case STAR(_) => true
  case NOT(r) => !(nullable(r))
}

// tests whether a regular expression 
// cannot recognise more
def no_more (r: Rexp) : Boolean = r match {
  case NULL => true
  case EMPTY => false
  case CHAR(_) => false
  case ALT(r1, r2) => no_more(r1) && no_more(r2)
  case SEQ(r1, r2) => if (nullable(r1)) (no_more(r1) && no_more(r2)) else no_more(r1)
  case STAR(_) => false
  case NOT(r) => !(no_more(r))
}


// derivative of a regular expression w.r.t. a character
def der (c: Char, r: Rexp) : Rexp = r match {
  case NULL => NULL
  case EMPTY => NULL  case CHAR(d) => if (c == d) EMPTY else NULL
  case ALT(r1, r2) => ALT(der(c, r1), der(c, r2))
  case SEQ(r1, r2) => 
    if (nullable(r1)) ALT(SEQ(der(c, r1), r2), der(c, r2))
    else SEQ(der(c, r1), r2)
  case STAR(r) => SEQ(der(c, r), STAR(r))
  case NOT(r) => NOT(der (c, r))
}

// regular expression for specifying 
// ranges of characters
def RANGE(s : List[Char]) : Rexp = s match {
  case Nil => NULL
  case c::Nil => CHAR(c)
  case c::s => ALT(CHAR(c), RANGE(s))
}

// one or more
def PLUS(r: Rexp) = SEQ(r, STAR(r))

// some regular expressions
val DIGIT = RANGE("0123456789".toList)
val NONZERODIGIT = RANGE("123456789".toList)

val NUMBER = ALT(SEQ(NONZERODIGIT, STAR(DIGIT)), "0")
val LPAREN = CHAR('(')
val RPAREN = CHAR(')')
val WHITESPACE = PLUS(RANGE(" \n".toList))
val OPS = RANGE("+-*".toList)

// for classifying the strings that have been recognised
abstract class Token

case object T_WHITESPACE extends Token
case object T_NUM extends Token
case class T_OP(s: String) extends Token
case object T_LPAREN extends Token
case object T_RPAREN extends Token
case class NT(s: String) extends Token

type Rule = (Rexp, List[Char] => Token)

def error (s: String) = throw new IllegalArgumentException ("Cannot tokenize: " + s)

def munch(r: Rexp, action: List[Char] => Token, s: List[Char], t: List[Char]) : Option[(List[Char], Token)] = 
  s match {
    case Nil if (nullable(r)) => Some(Nil, action(t))
    case Nil => None
    case c::s if (no_more(der (c, r)) && nullable(r)) => Some(c::s, action(t))
    case c::s if (no_more(der (c, r))) => None
    case c::s => munch(der (c, r), action, s, t ::: List(c))
  }

def one_token (rs: List[Rule], s: List[Char]) : (List[Char], Token) = {
 val somes = rs.map { (r) => munch(r._1, r._2, s, Nil) } .flatten
 if (somes == Nil) error(s.mkString) else (somes sortBy (_._1.length) head)
}

def tokenize (rs: List[Rule], s: List[Char]) : List[Token] = s match {
  case Nil => Nil
  case _ => one_token(rs, s) match {
    case (rest, token) => token :: tokenize(rs, rest) 
  }
}

def tokenizer(rs: List[Rule], s: String) : List[Token] = 
  tokenize(rs, s.toList).filterNot(_ match {
    case T_WHITESPACE => true
    case _ => false
  })



// lexing rules for arithmetic expressions
val lexing_rules: List[Rule]= 
  List((NUMBER, (s) => T_NUM),
       (WHITESPACE, (s) => T_WHITESPACE),
       (LPAREN, (s) => T_LPAREN),
       (RPAREN, (s) => T_RPAREN),
       (OPS, (s) => T_OP(s.mkString)))


// examples
println(tokenizer(lexing_rules, "2 + 3 * 4 + 1"))
println(tokenizer(lexing_rules, "(2 + 3) * (4 + 1)"))


type Grammar = List[(String, List[Token])]

// grammar for arithmetic expressions
val grammar = 
  List ("F" -> List(T_NUM),
        "E" -> List(T_NUM),
        "E" -> List(NT("E"), T_OP("+"), NT("E")),
        "E" -> List(NT("E"), T_OP("-"), NT("E")),
        "E" -> List(NT("E"), T_OP("*"), NT("E")),    
        "E" -> List(T_LPAREN, NT("E"), T_RPAREN))


def chop[A](ts1: List[A], prefix: List[A], ts2: List[A]) : Option[(List[A], List[A])] = 
  ts1 match {
    case Nil => None
    case t::ts => 
      if (ts1.startsWith(prefix)) Some(ts2.reverse, ts1.drop(prefix.length))
      else chop(ts, prefix, t::ts2)
  }

// examples
chop(List(1,2,3,4,5,6,7,8,9), List(4,5), Nil)  
chop(List(1,2,3,4,5,6,7,8,9), List(3,5), Nil)  

def replace[A](ts: List[A], out: List[A], in: List [A]) = 
  chop(ts, out, Nil) match {
    case None => None
    case Some((before, after)) => Some(before ::: in ::: after)
  }  

def parse(g: Grammar, ts: List[Token]) : Boolean = {
  println(ts)
  if (ts == List(NT("E"))) true
  else {
    val tss = for ((lhs, rhs) <- g) yield replace(ts, rhs, List(NT(lhs)))
    tss.flatten.exists(parse(g, _))
  }
}
 
def parser(g: Grammar, rs: List[Rule], s: String) = {
  println("\n")
  parse(g, tokenizer(rs, s))
}
  


parser(grammar, lexing_rules, "2 + 3 *    4 +       1")
parser(grammar, lexing_rules, "(2 + 3) * (4 + 1)")
parser(grammar, lexing_rules, "(2 + 3) * 4 (4 + 1)")