# HG changeset patch # User Christian Urban # Date 1572175868 0 # Node ID 551d018cbbacb80e533908322a33ff1eff107260 # Parent 2f5a4d76756d16f3910b71f12a6d782b655ea811 updated diff -r 2f5a4d76756d -r 551d018cbbac progs/lexer.scala --- a/progs/lexer.scala Sun Oct 27 11:16:09 2019 +0000 +++ b/progs/lexer.scala Sun Oct 27 11:31:08 2019 +0000 @@ -160,10 +160,6 @@ case _ => (SEQ(r1s,r2s), F_SEQ(f1s, f2s)) } } - case RECD(x, r1) => { - val (r1s, f1s) = simp(r1) - (RECD(x, r1s), F_RECD(f1s)) - } case r => (r, F_ID) } diff -r 2f5a4d76756d -r 551d018cbbac progs/tokenise.scala --- a/progs/tokenise.scala Sun Oct 27 11:16:09 2019 +0000 +++ b/progs/tokenise.scala Sun Oct 27 11:31:08 2019 +0000 @@ -172,10 +172,6 @@ case _ => (SEQ(r1s,r2s), F_SEQ(f1s, f2s)) } } - case RECD(x, r1) => { - val (r1s, f1s) = simp(r1) - (RECD(x, r1s), F_RECD(f1s)) - } case r => (r, F_ID) } @@ -257,7 +253,7 @@ case ("str", s) => T_STR(s) } -// filters out all un-interesting token +// filters out all un-interesting tokens def tokenise(s: String) : List[Token] = lexing_simp(WHILE_REGS, s).collect(token) @@ -277,4 +273,4 @@ } -} \ No newline at end of file +}