updated
authorChristian Urban <christian.urban@kcl.ac.uk>
Thu, 19 Sep 2024 15:47:33 +0100
changeset 961 c0600f8b6427
parent 960 c7009356ddd8
child 962 5176cbb819c2
updated
handouts/ho01.pdf
handouts/ho02.pdf
handouts/ho02.tex
progs/fun/fun_llvm.sc
progs/fun/fun_parser.sc
progs/fun/fun_tokens.sc
progs/matcher/re1.sc
progs/parser-combinators/comb1.sc
progs/while/compile.sc
slides/slides01.pdf
slides/slides01.tex
solutions/cw3/lexer.sc
solutions/cw3/parser.sc
solutions/cw4/lexer.sc
solutions/cw4/parser.sc
Binary file handouts/ho01.pdf has changed
Binary file handouts/ho02.pdf has changed
--- a/handouts/ho02.tex	Wed May 29 13:25:30 2024 +0100
+++ b/handouts/ho02.tex	Thu Sep 19 15:47:33 2024 +0100
@@ -8,7 +8,7 @@
 
 \begin{document}
 \fnote{\copyright{} Christian Urban, King's College London, 
-  2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023}
+  2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024}
 
 
 \section*{Handout 2 (Regular Expression Matching)}
@@ -389,10 +389,10 @@
 construct the regular expression for $s$ by calling $\textit{der}$ with
 $r_1$ and ``appending'' $r_2$. There is however one exception
 to this simple rule: if $r_1$ can match the empty string, then
-all of $c\!::\!s$ is matched by $r_2$. So in case $r_1$ is
+all of $c\!::\!s$ is matched by $r_2$. Therefore in case $r_1$ is
 nullable (that is can match the empty string) we have to allow
 the choice $\textit{der}\,c\,r_2$ for calculating the regular
-expression that can match $s$. Therefore we have to add the
+expression that can match $s$. This means we have to add the
 regular expression $\textit{der}\,c\,r_2$ in the result. The $*$-case
 is again simple: if $r^*$ matches a string of the form
 $c\!::\!s$, then the first part must be ``matched'' by a
@@ -788,7 +788,7 @@
 (23/Aug/2016) I found another place where this algorithm can
 be sped up (this idea is not integrated with what is coming next, but
 I present it nonetheless). The idea is to not define \texttt{ders}
-that it iterates the derivative character-by-character, but in bigger
+so that it iterates the derivative character-by-character, but in bigger
 chunks. The resulting code for \texttt{ders2} looks as follows:
 
 \lstinputlisting[numbers=none]{../progs/app52.scala} 
--- a/progs/fun/fun_llvm.sc	Wed May 29 13:25:30 2024 +0100
+++ b/progs/fun/fun_llvm.sc	Thu Sep 19 15:47:33 2024 +0100
@@ -41,8 +41,12 @@
 //    ./a.out
 
 
-import $file.fun_tokens, fun_tokens._
-import $file.fun_parser, fun_parser._ 
+//> using toolkit 0.5.0
+// > using file fun_tokens.scala
+// > using file fun_parser.scala
+
+//import $file.fun_tokens, fun_tokens._
+//import $file.fun_parser, fun_parser._ 
 
 
 // for generating new labels
@@ -267,7 +271,6 @@
   }
 }
 
-print("%d\n", i)
 val prelude = """
 @.str = private constant [4 x i8] c"%d\0A\00"
 
@@ -360,6 +363,7 @@
 
 
 */
+/*
 factC(6, x => x)
 factC(6, x => {println(s"The final Result is $x") ; 0})
 factC(6, _ + 1)
@@ -373,4 +377,5 @@
 def fibC(n: Int, ret: Int => Int) : Int = {
   if (n == 0 || n == 1) ret(1)
   else fibC(n - 1, x => fibC(n - 2, y => ret(x + y)))
-}
\ No newline at end of file
+}
+*/
--- a/progs/fun/fun_parser.sc	Wed May 29 13:25:30 2024 +0100
+++ b/progs/fun/fun_parser.sc	Thu Sep 19 15:47:33 2024 +0100
@@ -10,16 +10,21 @@
 // this will generate a parse-tree from a list
 // of tokens
 
+//> using toolkit 0.5.0
+// > using file fun_tokens.scala
+
+
 import scala.language.implicitConversions    
 import scala.language.reflectiveCalls
 
-import $file.fun_tokens, fun_tokens._ 
+
+//import $file.fun_tokens, fun_tokens._ 
 
 
 // Parser combinators
 //    type parameter I needs to be of Seq-type
 //
-type IsSeq[I] = I => Seq[_]
+type IsSeq[I] = I => Seq[?]
 
 /*
 abstract class Parser[I, T](using is: I => Seq[_])  {
@@ -32,7 +37,7 @@
 */
 
 
-abstract class Parser[I, T](using is: I => Seq[_]) {
+abstract class Parser[I, T](using is: I => Seq[?]) {
   def parse(ts: I): Set[(T, I)]
 
   def parse_single(ts: I) : T = 
@@ -76,7 +81,7 @@
   def map[S](f: => T => S) = new MapParser[I, T, S](p, f)
 }
 
-def ListParser[I, T, S](p: => Parser[I, T], q: => Parser[I, S])(using is: I => Seq[_]): Parser[I, List[T]] = {
+def ListParser[I, T, S](p: => Parser[I, T], q: => Parser[I, S])(using is: I => Seq[?]): Parser[I, List[T]] = {
   (p ~ q ~ ListParser(p, q)).map{ case (x:T) ~ (y:S) ~ (z:List[T]) => x :: z } ||
   (p.map[List[T]]{s => List(s)})
 }
@@ -184,7 +189,7 @@
   Prog.parse_single(tks)
 
 //@doc("Parses a file.")
-@main
+//@main
 def main(fname: String) : Unit = {
   val tks = tokenise(os.read(os.pwd / fname))
   println(parse_tks(tks))
--- a/progs/fun/fun_tokens.sc	Wed May 29 13:25:30 2024 +0100
+++ b/progs/fun/fun_tokens.sc	Thu Sep 19 15:47:33 2024 +0100
@@ -8,6 +8,7 @@
 //     amm fun_tokens.sc defs.fun
 //
 
+//> using toolkit 0.5.0
  
 
 import scala.language.implicitConversions    
@@ -41,7 +42,7 @@
   charlist2rexp(s.toList)
 
 extension (s: String) {
-  def $ (r: Rexp) = RECD(s, r)
+  infix def $ (r: Rexp) = RECD(s, r)
 }
 
 extension (r: Rexp) {
@@ -252,7 +253,7 @@
 // import os._
 
 //@doc("Tokenising a file.")
-@main
+//@main
 def main(fname: String) = {
   println(tokenise(os.read(os.pwd / fname)))
 }
--- a/progs/matcher/re1.sc	Wed May 29 13:25:30 2024 +0100
+++ b/progs/matcher/re1.sc	Thu Sep 19 15:47:33 2024 +0100
@@ -100,8 +100,6 @@
 
 
 // test: (a?{n}) (a{n})
-@arg(doc = "Test (a?{n}) (a{n})")
-@main
 def test1() = {
   println("Test (a?{n}) (a{n})")
 
@@ -111,8 +109,6 @@
 }
 
 // test: (a*)* b
-@arg(doc = "Test (a*)* b")
-@main
 def test2() = {
   println("Test (a*)* b")
 
@@ -165,7 +161,7 @@
 
 size(ders(("a" * 30).toList, BIG))      // 31010539
 
-@main
+
 def test3() = {
   println("Test (a + aa)*")
 
@@ -174,7 +170,7 @@
   }
 }
 
-
-@arg(doc = "All tests.")
 @main
 def all() = { test1(); test2() ; test3() } 
+
+//all()
--- a/progs/parser-combinators/comb1.sc	Wed May 29 13:25:30 2024 +0100
+++ b/progs/parser-combinators/comb1.sc	Thu Sep 19 15:47:33 2024 +0100
@@ -7,22 +7,30 @@
 
 
 //  Note, in the lectures I did not show the type bound
-//  using is: I => Seq[_], which means that the input
-//  type 'I' needs to be a sequence.
+//  I : IsSeq, which means that the input type 'I' needs 
+//  to be a sequence that can be tested to be empty.
+
+trait IsSeq[I] {
+  extension (i: I) def isEmpty: Boolean
+}
 
-type IsSeq[I] = I => Seq[?]
+given IsSeq[String] = _.isEmpty
+given [I]: IsSeq[Seq[I]] = _.isEmpty
+
 
-abstract class Parser[I: IsSeq, T](using is: IsSeq[I])  {
+// parser class
+//==============
+
+abstract class Parser[I : IsSeq, T]  {
   def parse(in: I): Set[(T, I)]
 
   def parse_all(in: I) : Set[T] =
     for ((hd, tl) <- parse(in);
-        if is(tl).isEmpty) yield hd
+        if tl.isEmpty) yield hd
 }
 
-
 // parser combinators
-
+//====================
 
 // alternative parser
 class AltParser[I : IsSeq, T](p: => Parser[I, T],
@@ -169,14 +177,18 @@
 println(E.parse_all("1+2+3"))
 
 
-// with parser combinators (and other parsing algorithms)
-// no left-recursion is allowed, otherwise the will loop
+// with parser combinators (and many other parsing algorithms)
+// no left-recursion is allowed, otherwise the will loop;
+// newer versions of Scala (3.5) will actually give a warning
+// about this
 
+/*
 lazy val EL: Parser[String, Int] =
   ((EL ~ p"+" ~ EL).map{ case ((x, y), z) => x + z} ||
    (EL ~ p"*" ~ EL).map{ case ((x, y), z) => x * z} ||
    (p"(" ~ EL ~ p")").map{ case ((x, y), z) => y} ||
    NumParserInt)
+*/
 
 // this will run forever:
 //println(EL.parse_all("1+2+3"))
--- a/progs/while/compile.sc	Wed May 29 13:25:30 2024 +0100
+++ b/progs/while/compile.sc	Thu Sep 19 15:47:33 2024 +0100
@@ -81,7 +81,7 @@
 
 extension (sc: StringContext) {
     def i(args: Any*): String = "   " ++ sc.s(args:_*) ++ "\n"
-    def l(args: Any*): String = sc.s(args:_*) ++ ":\n"
+    def l(args: Any*): String = sc.s(args:_*) ++ ":"
 }
 
 // this allows us to write things like
@@ -131,22 +131,22 @@
     val if_end = Fresh("If_end")
     val (instrs1, env1) = compile_block(bl1, env)
     val (instrs2, env2) = compile_block(bl2, env1)
-    (compile_bexp(b, env, if_else) ++
-     instrs1 ++
-     i"goto $if_end" ++
-     l"$if_else" ++
-     instrs2 ++
-     l"$if_end", env2)
+    (s"""|${compile_bexp(b, env, if_else)}
+         |${instrs1}
+         |${i"goto $if_end"}
+         |${l"$if_else"}
+         |${instrs2}
+         |${l"$if_end"}""".stripMargin, env2)
   }
   case While(b, bl) => {
     val loop_begin = Fresh("Loop_begin")
     val loop_end = Fresh("Loop_end")
     val (instrs1, env1) = compile_block(bl, env)
-    (l"$loop_begin" ++
-     compile_bexp(b, env, loop_end) ++
-     instrs1 ++
-     i"goto $loop_begin" ++
-     l"$loop_end", env1)
+    (s"""|${l"$loop_begin"}
+         |${compile_bexp(b, env, loop_end)}
+         |$instrs1
+         |${i"goto $loop_begin"}
+         |${l"$loop_end"}""".stripMargin, env1)
   }
   case Write(x) => 
     (i"iload ${env(x)} \t\t; $x" ++ 
@@ -166,7 +166,7 @@
 // main compilation function for blocks
 def compile(bl: Block, class_name: String) : String = {
   val instructions = compile_block(bl, Map.empty)._1
-  (beginning ++ instructions ++ ending).replaceAllLiterally("XXX", class_name)
+  (beginning ++ instructions ++ ending).replace("XXX", class_name)
 }
 
 
Binary file slides/slides01.pdf has changed
--- a/slides/slides01.tex	Wed May 29 13:25:30 2024 +0100
+++ b/slides/slides01.tex	Thu Sep 19 15:47:33 2024 +0100
@@ -254,7 +254,7 @@
   \begin{center}
   \begin{tabular}{ll}
   Email:  & christian.urban at kcl.ac.uk\\
-  Office Hour: & Thurdays 15 -- 16\\
+  Office Hour: & Fridays 12 -- 14\\
   Location: & N7.07 (North Wing, Bush House)\\
   Slides \& Progs: & KEATS\\
   Pollev: & \texttt{\alert{https://pollev.com/cfltutoratki576}}\\  
@@ -513,7 +513,7 @@
   
 \textbf{Boeing 777's}: First flight in 1994. They want to achieve
 triple redundancy for potential hardware faults.
-\here{http://www.citemaster.net/get/db3a81c6-548e-11e5-9d2e-00163e009cc7/R8.pdf}\bigskip
+\here{https://ieeexplore.ieee.org/document/495891}\bigskip
   
 They compile 1 Ada program to\medskip
   
@@ -627,7 +627,6 @@
 \footnotesize\textcolor{gray}{Grace Hopper}\smallskip\\
 
 {\small\textcolor{gray}{(she made it to David Letterman's Tonight Show
-    \here{https://youtu.be/3N_ywhx6_K0?t=31} /
     \here{https://www.youtube.com/watch?v=oE2uls6iIEU})}}
 \end{flushright}
 \end{textblock}
@@ -643,13 +642,13 @@
 
 \begin{itemize}
 \item final exam in January (35\%)
-\item five CWs (65\%) 
+\item four CWs (65\% - first CW is optional) 
 \end{itemize}\bigskip\bigskip\pause
 
 
 \textbf{Weekly Homework (optional):}
 \begin{itemize}
-\item uploaded on KEATS, send answers via email, (try to!) respond individually
+\item uploaded on KEATS - solutions will be discussed during the SGTs
 \item \alert{\bf all} questions in the exam will be from the HWs!!
 \end{itemize}  
 
@@ -664,8 +663,8 @@
 
 \begin{center}
 \begin{tikzpicture}
-  \begin{axis}[symbolic x coords={2016,2017,2018,2019,2020,2021,2022,2023},
-    width  = \textwidth,
+  \begin{axis}[symbolic x coords={2016,2017,2018,2019,2020,2021,2022,2023,2024},
+    width  = 1.1\textwidth,
     height = 5cm,
     bar width=8mm,
     nodes near coords,
@@ -679,8 +678,9 @@
     ]
     
 \addplot[ybar,style={rred,fill=rred!75,mark=none},text=black] coordinates {
-(2023,183)
-(2022,112)
+(2024,136)
+(2023,169)
+(2022,111)
 (2021,98)
 (2020,59)
 (2019,38)
--- a/solutions/cw3/lexer.sc	Wed May 29 13:25:30 2024 +0100
+++ b/solutions/cw3/lexer.sc	Thu Sep 19 15:47:33 2024 +0100
@@ -1,21 +1,24 @@
 // Lexer from CW2
 //================
 
+//> using toolkit 0.4.0
+//> using file project.sc
+import project.*
 
 // Rexp
 abstract class Rexp
 case object ZERO extends Rexp
 case object ONE extends Rexp
 case class CHAR(c: Char) extends Rexp
-case class ALT(r1: Rexp, r2: Rexp) extends Rexp 
-case class SEQ(r1: Rexp, r2: Rexp) extends Rexp 
-case class STAR(r: Rexp) extends Rexp 
+case class ALT(r1: Rexp, r2: Rexp) extends Rexp
+case class SEQ(r1: Rexp, r2: Rexp) extends Rexp
+case class STAR(r: Rexp) extends Rexp
 case class RECD(x: String, r: Rexp) extends Rexp
 
 case class RANGE(s: Set[Char]) extends Rexp
 case class PLUS(r: Rexp) extends Rexp
 case class OPTIONAL(r: Rexp) extends Rexp
-case class NTIMES(r: Rexp, n: Int) extends Rexp 
+case class NTIMES(r: Rexp, n: Int) extends Rexp
 
 // Values
 abstract class Val
@@ -35,7 +38,7 @@
   case c::s => SEQ(CHAR(c), charlist2rexp(s))
 }
 
-implicit def string2rexp(s : String) : Rexp = 
+implicit def string2rexp(s : String) : Rexp =
   charlist2rexp(s.toList)
 
 extension (r: Rexp) {
@@ -50,7 +53,7 @@
   def % = STAR(s)
   def ~ (r: Rexp) = SEQ(s, r)
   def ~ (r: String) = SEQ(s, r)
-  def $ (r: Rexp) = RECD(s, r)
+  infix def $ (r: Rexp) = RECD(s, r)
 }
 
 // nullable
@@ -75,16 +78,16 @@
   case ONE => ZERO
   case CHAR(d) => if (c == d) ONE else ZERO
   case ALT(r1, r2) => ALT(der(c, r1), der(c, r2))
-  case SEQ(r1, r2) => 
+  case SEQ(r1, r2) =>
     if (nullable(r1)) ALT(SEQ(der(c, r1), r2), der(c, r2))
     else SEQ(der(c, r1), r2)
   case STAR(r) => SEQ(der(c, r), STAR(r))
 
   case RECD(_, r1) => der(c, r1)
-  case RANGE(s) => if (s.contains(c)) ONE else ZERO 
+  case RANGE(s) => if (s.contains(c)) ONE else ZERO
   case PLUS(r1) => SEQ(der(c, r1), STAR(r1))
   case OPTIONAL(r1) => der(c, r1)
-  case NTIMES(r, i) => 
+  case NTIMES(r, i) =>
     if (i == 0) ZERO else SEQ(der(c, r), NTIMES(r, i - 1))
 }
 
@@ -113,7 +116,7 @@
 // Mkeps
 def mkeps(r: Rexp) : Val = r match {
   case ONE => Empty
-  case ALT(r1, r2) => 
+  case ALT(r1, r2) =>
     if (nullable(r1)) Left(mkeps(r1)) else Right(mkeps(r2))
   case SEQ(r1, r2) => Sequ(mkeps(r1), mkeps(r2))
   case STAR(r) => Stars(Nil)
@@ -132,7 +135,7 @@
   case (SEQ(r1, r2), Right(v2)) => Sequ(mkeps(r1), inj(r2, c, v2))
   case (ALT(r1, r2), Left(v1)) => Left(inj(r1, c, v1))
   case (ALT(r1, r2), Right(v2)) => Right(inj(r2, c, v2))
-  case (CHAR(d), Empty) => Chr(c) 
+  case (CHAR(d), Empty) => Chr(c)
   case (RECD(x, r1), _) => Rec(x, inj(r1, c, v))
 
   case (RANGE(_), Empty) => Chr(c)
@@ -152,9 +155,9 @@
 def F_SEQ(f1: Val => Val, f2: Val => Val) = (v:Val) => v match {
   case Sequ(v1, v2) => Sequ(f1(v1), f2(v2))
 }
-def F_SEQ_Empty1(f1: Val => Val, f2: Val => Val) = 
+def F_SEQ_Empty1(f1: Val => Val, f2: Val => Val) =
   (v:Val) => Sequ(f1(Empty), f2(v))
-def F_SEQ_Empty2(f1: Val => Val, f2: Val => Val) = 
+def F_SEQ_Empty2(f1: Val => Val, f2: Val => Val) =
   (v:Val) => Sequ(f1(v), f2(Empty))
 def F_RECD(f: Val => Val) = (v:Val) => v match {
   case Rec(x, v) => Rec(x, f(v))
@@ -170,7 +173,7 @@
       case (ZERO, _) => (r2s, F_RIGHT(f2s))
       case (_, ZERO) => (r1s, F_LEFT(f1s))
       case _ => if (r1s == r2s) (r1s, F_LEFT(f1s))
-                else (ALT (r1s, r2s), F_ALT(f1s, f2s)) 
+                else (ALT (r1s, r2s), F_ALT(f1s, f2s))
     }
   }
   case SEQ(r1, r2) => {
@@ -189,8 +192,8 @@
 
 // Lex
 def lex_simp(r: Rexp, s: List[Char]) : Val = s match {
-  case Nil => if (nullable(r)) mkeps(r) else 
-    { throw new Exception("lexing error") } 
+  case Nil => if (nullable(r)) mkeps(r) else
+    { throw new Exception("lexing error") }
   case c::cs => {
     val (r_simp, f_simp) = simp(der(c, r))
     inj(r, c, f_simp(lex_simp(r_simp, cs)))
@@ -200,7 +203,7 @@
 def lexing_simp(r: Rexp, s: String) = env(lex_simp(r, s.toList))
 
 // Language specific code
-val KEYWORD : Rexp = "while" | "if" | "then" | "else" | "do" | "for" | "to" | "true" | "false" | "read" | "write" | "skip" 
+val KEYWORD : Rexp = "while" | "if" | "then" | "else" | "do" | "for" | "to" | "true" | "false" | "read" | "write" | "skip"
 val OP : Rexp = "+" | "-" | "*" | "%" | "/" | "==" | "!=" | ">" | "<" | ">=" | "<=" | ":=" | "&&" | "||"
 val LET: Rexp = RANGE(('A' to 'Z').toSet ++ ('a' to 'z').toSet)
 val SYM : Rexp = RANGE(Set('.', '_', '>', '<', '=', ';', ',', ':', ')', '('))
@@ -213,31 +216,51 @@
 val ID : Rexp = LET ~ (LET | "_" | DIGIT).%
 val NUM : Rexp = "0" | (DIGIT1 ~ DIGIT.%)
 val EOL : Rexp = "\n" | "\r\n"
-val COMMENT : Rexp = "//" ~ (LET | SYM | PARENS | " " | DIGIT).% ~ EOL 
+val COMMENT : Rexp = "//" ~ (LET | SYM | PARENS | " " | DIGIT).% ~ EOL
 
-val WHILE_REGS = (("k" $ KEYWORD) | 
-                  ("o" $ OP) | 
+val WHILE_REGS = (("k" $ KEYWORD) |
+                  ("o" $ OP) |
                   ("str" $ STRING) |
                   ("p" $ PARENS) |
-                  ("s" $ SEMI) | 
-                  ("w" $ WHITESPACE) | 
-                  ("i" $ ID) | 
+                  ("s" $ SEMI) |
+                  ("w" $ WHITESPACE) |
+                  ("i" $ ID) |
                   ("n" $ NUM) |
-		  ("c" $ COMMENT)).%
+                  ("c" $ COMMENT)).%
+
+
 
 
 
+def escapedChar(ch: Char): String = ch match {
+  case '\b' => "\\b"
+  case '\t' => "\\t"
+  case '\n' => "\\n"
+  case '\f' => "\\f"
+  case '\r' => "\\r"
+  case '"'  => "\\\""
+  case '\'' => "\\\'"
+  case '\\' => "\\\\"
+  case _    => if (ch.isControl) "\\0" + Integer.toOctalString(ch.toInt)
+               else              String.valueOf(ch)
+}
+
+def esc(s: String): String = "\"" + escapeImpl(s) + "\""
+def escapeImpl(s: String): String = s.flatMap(escapedChar)
+
+/*
 def esc(raw: String): String = {
   import scala.reflect.runtime.universe._
   Literal(Constant(raw)).toString
 }
+*/
 
 def escape(tks: List[(String, String)]) =
-  tks.map{ case (s1, s2) => (esc(s1), esc(s2))}
+  tks.map{ case (s1, s2) => (s1, s2)}
 
 
 // Tokens
-abstract class Token extends Serializable 
+abstract class Token extends Serializable
 case class T_KEYWORD(s: String) extends Token
 case class T_OP(s: String) extends Token
 case class T_STRING(s: String) extends Token
@@ -257,11 +280,10 @@
 }
 
 // Tokenise
-def tokenise(s: String) = //: List[Token] = 
-  escape(lexing_simp(WHILE_REGS, s)).filter{p => p._1 != "\"w\""}//.collect(token)
+def tokenise(s: String) = //: List[Token] =
+  escape(lexing_simp(WHILE_REGS, s)).collect(token)
 
 
 
 
 println(tokenise(os.read(os.pwd / "primes.while")))
-
--- a/solutions/cw3/parser.sc	Wed May 29 13:25:30 2024 +0100
+++ b/solutions/cw3/parser.sc	Thu Sep 19 15:47:33 2024 +0100
@@ -1,42 +1,47 @@
 // CW3
 
-import $file.lexer
-import lexer._ 
-
+//> using file project.scala
+//> using file lexer.sc
+import lexer.*
 
 case class ~[+A, +B](x: A, y: B)
 
 // parser combinators
+//
+type IsSeq[I] = I => Seq[?]
 
-abstract class Parser[I, T](using is: I => Seq[_])  {
-  def parse(in: I): Set[(T, I)]  
+abstract class Parser[I : IsSeq, T](using is: I => Seq[?])  {
+  def parse(in: I): Set[(T, I)]
 
   def parse_all(in: I) : Set[T] =
-    for ((hd, tl) <- parse(in); 
+    for ((hd, tl) <- parse(in);
         if is(tl).isEmpty) yield hd
 }
 
+// parser combinators
+
 // alternative parser
-class AltParser[I, T](p: => Parser[I, T], 
-                      q: => Parser[I, T])(using I => Seq[_]) extends Parser[I, T] {
-  def parse(in: I) = p.parse(in) ++ q.parse(in)   
+class AltParser[I : IsSeq, T](p: => Parser[I, T],
+                              q: => Parser[I, T]) extends Parser[I, T] {
+  def parse(in: I) = p.parse(in) ++ q.parse(in)
 }
 
 // sequence parser
-class SeqParser[I, T, S](p: => Parser[I, T], 
-                         q: => Parser[I, S])(using I => Seq[_]) extends Parser[I, ~[T, S]] {
-  def parse(in: I) = 
-    for ((hd1, tl1) <- p.parse(in); 
+class SeqParser[I : IsSeq, T, S](p: => Parser[I, T],
+                                 q: => Parser[I, S]) extends Parser[I, ~[T, S]] {
+  def parse(in: I) =
+    for ((hd1, tl1) <- p.parse(in);
          (hd2, tl2) <- q.parse(tl1)) yield (new ~(hd1, hd2), tl2)
 }
 
 // map parser
-class MapParser[I, T, S](p: => Parser[I, T], 
-                         f: T => S)(using I => Seq[_]) extends Parser[I, S] {
+class MapParser[I : IsSeq, T, S](p: => Parser[I, T],
+                                 f: T => S) extends Parser[I, S] {
   def parse(in: I) = for ((hd, tl) <- p.parse(in)) yield (f(hd), tl)
 }
 
 
+
 /*
 // atomic parser for (particular) strings
 case class StrParser(s: String) extends Parser[String, String] {
@@ -46,15 +51,15 @@
   }
 }
 
-extension (sc: StringContext) 
+extension (sc: StringContext)
   def p(args: Any*) = StrParser(sc.s(args:_*))
 */
 
 // more convenient syntax for parser combinators
-extension [I, T](p: Parser[I, T])(using I => Seq[_]) {
-  def ||(q : => Parser[I, T]) = new AltParser[I, T](p, q)
-  def ~[S] (q : => Parser[I, S]) = new SeqParser[I, T, S](p, q)
-  def map[S](f: => T => S) = new MapParser[I, T, S](p, f)
+extension [I : IsSeq, T](p: Parser[I, T]) {
+  def ||(q : => Parser[I, T]) = AltParser[I, T](p, q)
+  def ~[S] (q : => Parser[I, S]) = SeqParser[I, T, S](p, q)
+  def map[S](f: => T => S) = MapParser[I, T, S](p, f)
 }
 
 // New parser that takes as input a list of tokens
@@ -63,7 +68,7 @@
       // an example of an atomic parser for characters
       if (!in.isEmpty && in.head == t) Set((t, in.tail)) else Set()
     }
-}   
+}
 
 case class TokenListParser(ts: List[Token]) extends Parser[List[Token], List[Token]] {
     def parse(tsb: List[Token]) = {
@@ -72,16 +77,14 @@
     }
 }
 
-// Implicit definitions to go from a token 
+// Implicit definitions to go from a token
 // or a list of tokens to a TokenListParser
-implicit def token2parser(t: Token) : Parser[List[Token], Token] = 
+implicit def token2parser(t: Token) : Parser[List[Token], Token] =
   TokenParser(t)
 
 extension (t: Token) {
-    def || (q : => Parser[List[Token], Token]) = 
-      new AltParser[List[Token], Token](t, q)
-    def ~[S](q : => Parser[List[Token], S]) = 
-      new SeqParser[List[Token], Token, S](t, q)  
+    def || (q : => Parser[List[Token], Token]) = AltParser[List[Token], Token](t, q)
+    def ~[S](q : => Parser[List[Token], S]) = SeqParser[List[Token], Token, S](t, q)
 }
 
 
@@ -133,26 +136,26 @@
 
 
 // WHILE Language Parsing
-lazy val AExp: Parser[List[Token], AExp] = 
+lazy val AExp: Parser[List[Token], AExp] =
   (Te ~ T_OP("+") ~ AExp).map{ case x ~ _ ~ z => Aop("+", x, z): AExp } ||
   (Te ~ T_OP("-") ~ AExp).map{ case x ~ _ ~ z => Aop("-", x, z): AExp } || Te
-lazy val Te: Parser[List[Token], AExp] = 
-  (Fa ~ T_OP("*") ~ Te).map{ case x ~ _ ~ z => Aop("*", x, z): AExp } || 
-  (Fa ~ T_OP("/") ~ Te).map{ case x ~ _ ~ z => Aop("/", x, z): AExp } || 
-  (Fa ~ T_OP("%") ~ Te).map{ case x ~ _ ~ z => Aop("%", x, z): AExp } || Fa  
-lazy val Fa: Parser[List[Token], AExp] = 
-   (T_PAREN("(") ~ AExp ~ T_PAREN(")")).map{ case _ ~ y ~ _ => y } || 
-   IdParser().map{Var(_)}  || 
+lazy val Te: Parser[List[Token], AExp] =
+  (Fa ~ T_OP("*") ~ Te).map{ case x ~ _ ~ z => Aop("*", x, z): AExp } ||
+  (Fa ~ T_OP("/") ~ Te).map{ case x ~ _ ~ z => Aop("/", x, z): AExp } ||
+  (Fa ~ T_OP("%") ~ Te).map{ case x ~ _ ~ z => Aop("%", x, z): AExp } || Fa
+lazy val Fa: Parser[List[Token], AExp] =
+   (T_PAREN("(") ~ AExp ~ T_PAREN(")")).map{ case _ ~ y ~ _ => y } ||
+   IdParser().map{Var(_)}  ||
    NumParser().map{Num(_)}
 
-lazy val BExp: Parser[List[Token], BExp] = 
-   (AExp ~ T_OP("==") ~ AExp).map{ case x ~ _ ~ z => Bop("==", x, z): BExp } || 
-   (AExp ~ T_OP("!=") ~ AExp).map{ case x ~ _ ~ z => Bop("!=", x, z): BExp } || 
-   (AExp ~ T_OP("<") ~ AExp).map{ case x ~ _ ~ z => Bop("<", x, z): BExp } || 
+lazy val BExp: Parser[List[Token], BExp] =
+   (AExp ~ T_OP("==") ~ AExp).map{ case x ~ _ ~ z => Bop("==", x, z): BExp } ||
+   (AExp ~ T_OP("!=") ~ AExp).map{ case x ~ _ ~ z => Bop("!=", x, z): BExp } ||
+   (AExp ~ T_OP("<") ~ AExp).map{ case x ~ _ ~ z => Bop("<", x, z): BExp } ||
    (AExp ~ T_OP(">") ~ AExp).map{ case x ~ _ ~ z => Bop(">", x, z): BExp } ||
    (T_PAREN("(") ~ BExp ~ T_PAREN(")") ~ T_OP("&&") ~ BExp).map{ case _ ~ y ~ _ ~ _ ~ v => And(y, v): BExp } ||
    (T_PAREN("(") ~ BExp ~ T_PAREN(")") ~ T_OP("||") ~ BExp).map{ case _ ~ y ~ _ ~ _ ~ v => Or(y, v): BExp } ||
-   (T_KEYWORD("true").map(_ => True: BExp )) || 
+   (T_KEYWORD("true").map(_ => True: BExp )) ||
    (T_KEYWORD("false").map(_ => False: BExp )) ||
    (T_PAREN("(") ~ BExp ~ T_PAREN(")")).map{ case _ ~ x ~ _ => x }
 
@@ -163,7 +166,7 @@
     (T_KEYWORD("while") ~ BExp ~ T_KEYWORD("do") ~ Block).map{ case _ ~ y ~ _ ~ w => While(y, w) : Stmt } ||
     (T_KEYWORD("read") ~ IdParser()).map{ case _ ~ id => Read(id): Stmt} ||
     (T_KEYWORD("write") ~ IdParser()).map{ case _ ~ id => WriteId(id): Stmt} ||
-    (T_KEYWORD("write") ~ StringParser()).map{ case _ ~ s => WriteString(s): Stmt} || 
+    (T_KEYWORD("write") ~ StringParser()).map{ case _ ~ s => WriteString(s): Stmt} ||
     (T_KEYWORD("write") ~ T_PAREN("(") ~ IdParser() ~ T_PAREN(")")).map{ case _ ~ _ ~ id ~ _ => WriteId(id): Stmt} ||
     (T_KEYWORD("write") ~  T_PAREN("(") ~ StringParser() ~ T_PAREN(")")).map{ case _ ~ _ ~ s ~ _ => WriteString(s): Stmt}
 
@@ -219,8 +222,8 @@
 def eval_stmt(s: Stmt, env: Env) : Env = s match {
   case Skip => env
   case Assign(x, a) => env + (x -> eval_aexp(a, env))
-  case If(b, bl1, bl2) => if (eval_bexp(b, env)) eval_bl(bl1, env) else eval_bl(bl2, env) 
-  case While(b, bl) => 
+  case If(b, bl1, bl2) => if (eval_bexp(b, env)) eval_bl(bl1, env) else eval_bl(bl2, env)
+  case While(b, bl) =>
     if (eval_bexp(b, env)) eval_stmt(While(b, bl), eval_bl(bl, env))
     else env
 
@@ -230,10 +233,10 @@
         env
        }
 
-  case Read(x) => { 
-      println("Enter an integer and press ENTER:") ; 
+  case Read(x) => {
+      println("Enter an integer and press ENTER:") ;
       val n = readInt() ; // Note: Does not work when using the REPL
-      eval_stmt(Assign(x, Num(n)), env) 
+      eval_stmt(Assign(x, Num(n)), env)
   }
 }
 
@@ -251,7 +254,7 @@
 
 
 
-@main
+//@main
 def run(file: String) = {
   val contents = os.read(os.pwd / file)
   println(s"Lex $file: ")
@@ -262,7 +265,7 @@
   println(eval(Stmts.parse_all(tokenise(contents)).head))
 }
 
-@main
+//@main
 def test(file: String) = {
   val contents = os.read(os.pwd / file)
   println(s"Lex $file: ")
@@ -279,3 +282,6 @@
 println("Time taken in seconds: ")
 println((end - start)/(1.0e9))
 */
+
+test("primes.while")
+run("primes.while")
--- a/solutions/cw4/lexer.sc	Wed May 29 13:25:30 2024 +0100
+++ b/solutions/cw4/lexer.sc	Thu Sep 19 15:47:33 2024 +0100
@@ -1,6 +1,8 @@
 // Lexer from CW2 with additions to the rules
 //============================================
 
+import scala.language.implicitConversions
+
 // Rexp
 abstract class Rexp
 case object ZERO extends Rexp
@@ -34,8 +36,9 @@
   case c::s => SEQ(CHAR(c), charlist2rexp(s))
 }
 
-implicit def string2rexp(s : String) : Rexp = 
-  charlist2rexp(s.toList)
+given Conversion[String, Rexp] = (s => charlist2rexp(s.toList))
+//implicit def string2rexp(s : String) : Rexp = 
+//  charlist2rexp(s.toList)
 
 extension (r: Rexp) {
   def | (s: Rexp) = ALT(r, s)
@@ -45,11 +48,11 @@
 
 extension (s: String) {
   def | (r: Rexp) = ALT(s, r)
-  def | (r: String) = ALT(s, r)
+  //def | (r: String) = ALT(s, r)
   def % = STAR(s)
   def ~ (r: Rexp) = SEQ(s, r)
-  def ~ (r: String) = SEQ(s, r)
-  def $ (r: Rexp) = RECD(s, r)
+  //def ~ (r: String) = SEQ(s, r)
+  infix def $ (r: Rexp) = RECD(s, r)
 }
 
 // nullable
--- a/solutions/cw4/parser.sc	Wed May 29 13:25:30 2024 +0100
+++ b/solutions/cw4/parser.sc	Thu Sep 19 15:47:33 2024 +0100
@@ -1,13 +1,14 @@
 // CW3
 
-import $file.lexer
-import lexer._ 
+import scala.language.implicitConversions
+
+import $file.lexer, lexer._ 
 
 case class ~[+A, +B](x: A, y: B)
 
 // parser combinators
 
-abstract class Parser[I, T](using is: I => Seq[_])  {
+abstract class Parser[I, T](using is: I => Seq[?])  {
   def parse(in: I): Set[(T, I)]  
 
   def parse_all(in: I) : Set[T] =
@@ -17,13 +18,13 @@
 
 // alternative parser
 class AltParser[I, T](p: => Parser[I, T], 
-                      q: => Parser[I, T])(using I => Seq[_]) extends Parser[I, T] {
+                      q: => Parser[I, T])(using I => Seq[?]) extends Parser[I, T] {
   def parse(in: I) = p.parse(in) ++ q.parse(in)   
 }
 
 // sequence parser
 class SeqParser[I, T, S](p: => Parser[I, T], 
-                         q: => Parser[I, S])(using I => Seq[_]) extends Parser[I, ~[T, S]] {
+                         q: => Parser[I, S])(using I => Seq[?]) extends Parser[I, ~[T, S]] {
   def parse(in: I) = 
     for ((hd1, tl1) <- p.parse(in); 
          (hd2, tl2) <- q.parse(tl1)) yield (new ~(hd1, hd2), tl2)
@@ -31,14 +32,14 @@
 
 // map parser
 class MapParser[I, T, S](p: => Parser[I, T], 
-                         f: T => S)(using I => Seq[_]) extends Parser[I, S] {
+                         f: T => S)(using I => Seq[?]) extends Parser[I, S] {
   def parse(in: I) = for ((hd, tl) <- p.parse(in)) yield (f(hd), tl)
 }
 
 // more convenient syntax for parser combinators
-extension [I, T](p: Parser[I, T])(using I => Seq[_]) {
+extension [I, T](p: Parser[I, T])(using I => Seq[?]) {
   def ||(q : => Parser[I, T]) = new AltParser[I, T](p, q)
-  def ~[S] (q : => Parser[I, S]) = new SeqParser[I, T, S](p, q)
+  def ~[S] (q : => Parser[I, S])(using S => Seq[?]) = new SeqParser[I, T, S](p, q)
   def map[S](f: => T => S) = new MapParser[I, T, S](p, f)
 }
 
@@ -71,16 +72,18 @@
 
 // Implicit definitions to go from a token 
 // or a list of tokens to a TokenListParser
-implicit def token2parser(t: Token) : Parser[List[Token], Token] = 
-  TokenParser(t)
+given Conversion[Token, Parser[List[Token], Token]] = (t => TokenParser(t))
+//implicit def token2parser(t: Token) : Parser[List[Token], Token] = 
+//  TokenParser(t)
 
+/*
 extension (t: Token) {
     def || (q : => Parser[List[Token], Token]) = 
       new AltParser[List[Token], Token](t, q)
     def ~[S](q : => Parser[List[Token], S]) = 
       new SeqParser[List[Token], Token, S](t, q)  
 }
-
+*/
 
 
 // Abstract Syntax Trees