--- a/regexp3.scala Wed Oct 10 14:08:49 2012 +0100
+++ b/regexp3.scala Thu Oct 11 10:58:18 2012 +0100
@@ -1,4 +1,5 @@
-// regular expressions
+
+// regular expressions including NOT
abstract class Rexp
case object NULL extends Rexp
@@ -26,22 +27,21 @@
case EMPTY => true
case CHAR(_) => false
case ALT(r1, r2) => nullable(r1) || nullable(r2)
- case SEQ(r1, r2) => if (nullable(r1)) (zeroable(r1) && zeroable(r2)) else zeroable(r1)
+ case SEQ(r1, r2) => nullable(r1) && nullable(r2)
case STAR(_) => true
case NOT(r) => !(nullable(r))
}
// tests whether a regular expression
-// recognises nothing
-def zeroable (r: Rexp) : Boolean = r match {
+// cannot recognise more
+def no_more (r: Rexp) : Boolean = r match {
case NULL => true
case EMPTY => false
case CHAR(_) => false
- case ALT(r1, r2) => zeroable(r1) && zeroable(r2)
- case SEQ(r1, r2) => if (nullable(r1)) (zeroable(r1) && zeroable(r2)) else zeroable(r1)
- //case SEQ(r1, r2) => zeroable(r1) || zeroable(r2)
+ case ALT(r1, r2) => no_more(r1) && no_more(r2)
+ case SEQ(r1, r2) => if (nullable(r1)) (no_more(r1) && no_more(r2)) else no_more(r1)
case STAR(_) => false
- case NOT(r) => !(zeroable(r))
+ case NOT(r) => !(no_more(r))
}
@@ -58,16 +58,6 @@
case NOT(r) => NOT(der (c, r))
}
-// derivative w.r.t. a string (iterates der)
-def ders (s: List[Char], r: Rexp) : Rexp = s match {
- case Nil => r
- case c::s => ders(s, der(c, r))
-}
-
-// main matcher function
-def matcher(r: Rexp, s: String) : Boolean = nullable(ders(s.toList, r))
-
-
// regular expression for specifying
// ranges of characters
def RANGE(s : List[Char]) : Rexp = s match {
@@ -76,71 +66,38 @@
case c::s => ALT(CHAR(c), RANGE(s))
}
-//one or more
+// one or more
def PLUS(r: Rexp) = SEQ(r, STAR(r))
-
-//some regular expressions
+// some regular expressions
val LOWERCASE = RANGE("abcdefghijklmnopqrstuvwxyz".toList)
val UPPERCASE = RANGE("ABCDEFGHIJKLMNOPQRSTUVWXYZ".toList)
val LETTER = ALT(LOWERCASE, UPPERCASE)
-val DIGITS = RANGE("0123456789".toList)
-val NONZERODIGITS = RANGE("123456789".toList)
+val DIGIT = RANGE("0123456789".toList)
+val NONZERODIGIT = RANGE("123456789".toList)
-val IDENT = SEQ(LETTER, STAR(ALT(LETTER,DIGITS)))
-val NUMBER = ALT(SEQ(NONZERODIGITS, STAR(DIGITS)), "0")
+val IDENT = SEQ(LETTER, STAR(ALT(LETTER,DIGIT)))
+val NUMBER = ALT(SEQ(NONZERODIGIT, STAR(DIGIT)), "0")
val WHITESPACE = RANGE(" \n".toList)
val WHITESPACES = PLUS(WHITESPACE)
-val ALL = ALT(ALT(LETTER, DIGITS), WHITESPACE)
-
+val ALL = ALT(ALT(LETTER, DIGIT), WHITESPACE)
val COMMENT = SEQ(SEQ("/*", NOT(SEQ(SEQ(STAR(ALL), "*/"), STAR(ALL)))), "*/")
+
+// for classifying the strings that have been recognised
abstract class Token
case object T_WHITESPACE extends Token
+case object T_COMMENT extends Token
case class T_IDENT(s: String) extends Token
case class T_OP(s: String) extends Token
case class T_NUM(n: Int) extends Token
case class T_KEYWORD(s: String) extends Token
-case object T_COMMENT extends Token
-
-
-// an example list of rules
-type Rule = (Rexp, List[Char] => Token)
-
-val rules: List[Rule]=
- List(("if", (s) => T_KEYWORD(s.mkString)),
- ("then", (s) => T_KEYWORD(s.mkString)),
- ("else", (s) => T_KEYWORD(s.mkString)),
- ("+", (s) => T_OP(s.mkString)),
- (IDENT, (s) => T_IDENT(s.mkString)),
- (NUMBER, (s) => T_NUM(s.mkString.toInt)),
- (WHITESPACES, (s) => T_WHITESPACE))
-def error (s: String) = throw new IllegalArgumentException ("Could not lex " + s)
-
-def munch(r: Rexp, action: List[Char] => Token, s: List[Char], t: List[Char]) : Option[(List[Char], Token)] =
- s match {
- case Nil if (nullable(r)) => Some(Nil, action(t))
- case Nil => None
- case c::s if (zeroable(der (c, r)) && nullable(r)) => Some(c::s, action(t))
- case c::s if (zeroable(der (c, r))) => None
- case c::s => munch(der (c, r), action, s, t ::: List(c))
- }
-
-def lex_one (rs: List[Rule], s: List[Char]) : (List[Char], Token) = {
- val somes = rs.map { (r) => munch(r._1, r._2, s, Nil) } .flatten
- if (somes == Nil) error(s.mkString) else (somes sortBy (_._1.length) head)
-}
-
-def lex_all (rs: List[Rule], s: List[Char]) : List[Token] = s match {
- case Nil => Nil
- case _ => lex_one(rs, s) match {
- case (rest, t) => t :: lex_all(rs, rest)
- }
-}
+// an example list of syntactic rules
+type Rule = (Rexp, List[Char] => Token)
val rules: List[Rule]=
List(("if", (s) => T_KEYWORD(s.mkString)),
@@ -152,25 +109,34 @@
(WHITESPACES, (s) => T_WHITESPACE),
(COMMENT, (s) => T_COMMENT))
-println(lex_all(rules, "/*ifff if */ hhjj /*34 */".toList))
+def error (s: String) = throw new IllegalArgumentException ("Cannot tokenize: " + s)
-munch(COMMENT, (s) => T_COMMENT , "/*ifff if */ hhjj /*34 */".toList, Nil)
-val COMMENT2 = NOT(SEQ(SEQ(STAR(ALL), "*/"), STAR(ALL)))
-
-der('/', COMMENT)
-zeroable(der('/', COMMENT))
-zeroable(der('a', COMMENT2))
+def munch(r: Rexp, action: List[Char] => Token, s: List[Char], t: List[Char]) : Option[(List[Char], Token)] =
+ s match {
+ case Nil if (nullable(r)) => Some(Nil, action(t))
+ case Nil => None
+ case c::s if (no_more(der (c, r)) && nullable(r)) => Some(c::s, action(t))
+ case c::s if (no_more(der (c, r))) => None
+ case c::s => munch(der (c, r), action, s, t ::: List(c))
+ }
-matcher(COMMENT2, "ifff if 34")
-munch(COMMENT2, "ifff if 34".toList, Nil)
-starts_with(COMMENT2, 'i')
-lex_all(regs, "ifff if 34".toList)
-lex_all(regs, "ifff $ if 34".toList)
+def one_token (rs: List[Rule], s: List[Char]) : (List[Char], Token) = {
+ val somes = rs.map { (r) => munch(r._1, r._2, s, Nil) } .flatten
+ if (somes == Nil) error(s.mkString) else (somes sortBy (_._1.length) head)
+}
-println(lex_all(rules, "/* if true then */ then 42 else +".toList))
-println(lex_all(rules, "if true then then 42 else +".toList))
-println(lex_all(rules, "ifff if 34 34".toList))
-println(lex_all(rules, "ifff $ if 34".toList))
+def tokenize (rs: List[Rule], s: List[Char]) : List[Token] = s match {
+ case Nil => Nil
+ case _ => one_token(rs, s) match {
+ case (rest, token) => token :: tokenize(rs, rest)
+ }
+}
-
+//examples
+println(tokenize(rules, "if true then then 42 else +".toList))
+println(tokenize(rules, "if+true+then+then+42+else +".toList))
+println(tokenize(rules, "ifff if 34 34".toList))
+println(tokenize(rules, "/*ifff if */ hhjj /*34 */".toList))
+println(tokenize(rules, "/* if true then */ then 42 else +".toList))
+//println(tokenize(rules, "ifff $ if 34".toList)) // causes an error because of the symbol $