# HG changeset patch # User Christian Urban # Date 1572175868 0 # Node ID 67c60bf4f4f5fa98f2259ba40ab3588abc9da255 # Parent e22b5faa7e6676e773e4108af5b049b2f0259543 updated diff -r e22b5faa7e66 -r 67c60bf4f4f5 progs/lexer.scala --- a/progs/lexer.scala Sun Oct 27 11:16:09 2019 +0000 +++ b/progs/lexer.scala Sun Oct 27 11:31:08 2019 +0000 @@ -160,10 +160,6 @@ case _ => (SEQ(r1s,r2s), F_SEQ(f1s, f2s)) } } - case RECD(x, r1) => { - val (r1s, f1s) = simp(r1) - (RECD(x, r1s), F_RECD(f1s)) - } case r => (r, F_ID) } diff -r e22b5faa7e66 -r 67c60bf4f4f5 progs/tokenise.scala --- a/progs/tokenise.scala Sun Oct 27 11:16:09 2019 +0000 +++ b/progs/tokenise.scala Sun Oct 27 11:31:08 2019 +0000 @@ -172,10 +172,6 @@ case _ => (SEQ(r1s,r2s), F_SEQ(f1s, f2s)) } } - case RECD(x, r1) => { - val (r1s, f1s) = simp(r1) - (RECD(x, r1s), F_RECD(f1s)) - } case r => (r, F_ID) } @@ -257,7 +253,7 @@ case ("str", s) => T_STR(s) } -// filters out all un-interesting token +// filters out all un-interesting tokens def tokenise(s: String) : List[Token] = lexing_simp(WHILE_REGS, s).collect(token) @@ -277,4 +273,4 @@ } -} \ No newline at end of file +}