diff -r 47f86885d481 -r e85600529ca5 scala/html.scala --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/scala/html.scala Sat Jun 15 09:11:11 2013 -0400 @@ -0,0 +1,99 @@ + +//:load matcher.scala + +// some regular expressions +val SYM = RANGE("""ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz.,!?-{[()]}':;%0123456789""") +val WORD = PLUS(SYM) + +val BTAG = SEQS("<", WORD, ">") +val ETAG = SEQS("", WORD, ">") + +val WHITESPACE = PLUS(RANGE(" \n")) + +// for classifying the strings that have been recognised +abstract class Token +case object T_WHITESPACE extends Token +case class T_WORD(s: String) extends Token +case class T_ETAG(s: String) extends Token +case class T_BTAG(s: String) extends Token +case class T_NT(s: String, rhs: List[Token]) extends Token + +val lexing_rules: List[Rule[Token]] = + List((BTAG, (s) => T_BTAG(s.mkString)), + (ETAG, (s) => T_ETAG(s.mkString)), + (WORD, (s) => T_WORD(s.mkString)), + (WHITESPACE, (s) => T_WHITESPACE)) + +// the tokenizer +val T = Tokenizer(lexing_rules) + +// width for printing +val WIDTH = 60 + + +def interpret(ts: List[Token], c: Int, ctr: List[String]) : Unit= ts match { + case Nil => println(Console.RESET) + case T_WHITESPACE::rest => print(Console.RESET + " "); interpret(rest, c + 1, ctr) + case T_WORD(s)::rest => { + val newstr = Console.RESET + ctr.reverse.mkString + s + if (c + s.length < WIDTH) { + print(newstr); + interpret(rest, c + s.length, ctr) + } + else { + print("\n" + newstr) + interpret(rest, s.length, ctr) + } + } + case T_BTAG("
")::rest => print("\n"); interpret(rest, 0, ctr) + case T_ETAG("
")::rest => print("\n"); interpret(rest, 0, ctr) + case T_BTAG("")::rest => interpret(rest, c, Console.BOLD :: ctr) + case T_BTAG("")::rest => interpret(rest, c, Console.UNDERLINED :: ctr) + case T_BTAG("