--- a/progs/lexer.scala Sun Oct 27 11:16:09 2019 +0000
+++ b/progs/lexer.scala Sun Oct 27 11:31:08 2019 +0000
@@ -160,10 +160,6 @@
case _ => (SEQ(r1s,r2s), F_SEQ(f1s, f2s))
}
}
- case RECD(x, r1) => {
- val (r1s, f1s) = simp(r1)
- (RECD(x, r1s), F_RECD(f1s))
- }
case r => (r, F_ID)
}
--- a/progs/tokenise.scala Sun Oct 27 11:16:09 2019 +0000
+++ b/progs/tokenise.scala Sun Oct 27 11:31:08 2019 +0000
@@ -172,10 +172,6 @@
case _ => (SEQ(r1s,r2s), F_SEQ(f1s, f2s))
}
}
- case RECD(x, r1) => {
- val (r1s, f1s) = simp(r1)
- (RECD(x, r1s), F_RECD(f1s))
- }
case r => (r, F_ID)
}
@@ -257,7 +253,7 @@
case ("str", s) => T_STR(s)
}
-// filters out all un-interesting token
+// filters out all un-interesting tokens
def tokenise(s: String) : List[Token] =
lexing_simp(WHILE_REGS, s).collect(token)
@@ -277,4 +273,4 @@
}
-}
\ No newline at end of file
+}