hws/hw09.tex
author Christian Urban <christian.urban@kcl.ac.uk>
Tue, 04 Apr 2023 22:31:09 +0100
changeset 906 2bf1516d730f
parent 901 33cff35bdc1a
child 916 10f834eb0a9e
permissions -rw-r--r--
updated

\documentclass{article}
\usepackage{../style}
\usepackage{../graphics}
\usepackage{../langs}
\usepackage{../grammar}

\begin{document}

\section*{Homework 9}

\HEADER

\begin{enumerate}
\item Describe what is meant by \emph{eliminating tail
      recursion}? When can this optimization be applied and
      why is it of benefit?

\item A programming language has arithmetic expression. For an
  arithmetic expression the compiler of this language produces the
  following snippet of JVM code.

\begin{lstlisting}[language=JVMIS,numbers=none]
ldc 1 
ldc 2 
ldc 3 
imul 
ldc 4 
ldc 3 
isub 
iadd 
iadd
\end{lstlisting}

  Give the arithmetic expression that produced this code.  Make sure
  you give all necessary parentheses.

\item Describe what the following JVM instructions do!

  
\begin{lstlisting}[language=JVMIS2,numbers=none]
ldc 3    
iload 3
istore 1
ifeq label
if_icmpge label
\end{lstlisting}

\item What does the following JVM function calculate?

\begin{lstlisting}[language=JVMIS2,numbers=none]    
.method public static bar(I)I
.limit locals 1
.limit stack 9
   iload 0
   ldc 0
   if_icmpne If_else_8
   ldc 0
   goto If_end_9
If_else_8:
   iload 0
   ldc 1
   if_icmpne If_else_10
   ldc 1
   goto If_end_11
If_else_10:
   iload 0
   ldc 1
   isub
   invokestatic bar(I)I
   iload 0
   ldc 2
   isub
   invokestatic bar(I)I
   iadd
If_end_11:
If_end_9:
   ireturn
.end method  
\end{lstlisting}

\item What does the following LLVM function calculate? Give the
  corresponding arithmetic expression .

\begin{lstlisting}[language=LLVM,numbers=none]  
define i32 @foo(i32 %a, i32 %b)  {
  %1 = mul i32 %a, %a
  %2 = mul i32 %a, 2
  %3 = mul i32 %2, %b
  %4 = add i32 %1, %3
  %5 = mul i32 %b, %b
  %6 = add i32 %5, %4
  ret i32 %6
}
\end{lstlisting}

\item As an optimisation technique, a compiler might want to detect `dead code' and
  not generate anything for this code. Why does this optimisation technique have the
  potential of speeding up the run-time of a program? (Hint: On what CPUs are programs
  run nowadays?)

\item In an earlier question, we analysed the advantages of having a lexer-phase
  before running the parser (having a lexer is definitely a good thing to have). But you
  might wonder if a lexer can also be implemented by a parser and some simple
  grammar rules. Consider for example:

  \begin{plstx}[margin=1cm]
    : \meta{S\/} ::= (\meta{Kw\/}\mid \meta{Id\/}\mid \meta{Ws\/}) \cdot \meta{S\/} \;\mid\; \epsilon\\
    : \meta{Kw\/} ::= \texttt{if} \mid \texttt{then} \mid \ldots\\
    : \meta{Id\/} ::= (\texttt{a} \mid\ldots\mid \texttt{z}) \cdot \meta{Id\/} \;\mid\; \epsilon\\
    : \meta{Ws\/} ::= \ldots\\
  \end{plstx}

  What is wrong with implementing a lexer in this way?

\item What is the difference between a parse tree and an abstract
  syntax tree? Give some simple examples for each of them.


\item Give a description of how the Brzozowski matcher works. 
  The description should be coherent and logical.

\item Give a description of how a compiler for the While-language can
  be implemented. You should assume you are producing code for the JVM.
  The description should be coherent and logical.

  
\item \POSTSCRIPT  

%  \item It is true (I confirmed it) that
%  
%  \begin{center} if $\varnothing$ does not occur in $r$
%  \;\;then\;\;$L(r) \not= \{\}$ 
%  \end{center}
%  
%  \noindent
%  holds, or equivalently
%  
%  \begin{center}
%  $L(r) = \{\}$ \;\;implies\;\; $\varnothing$ occurs in $r$.
%  \end{center}
%  
%  \noindent
%  You can prove either version by induction on $r$. The best way to
%  make more formal what is meant by `$\varnothing$ occurs in $r$', you can define
%  the following function:
%  
%  \begin{center}
%  \begin{tabular}{@ {}l@ {\hspace{2mm}}c@ {\hspace{2mm}}l@ {}}
%  $occurs(\varnothing)$      & $\dn$ & $true$\\
%  $occurs(\epsilon)$           & $\dn$ &  $f\!alse$\\
%  $occurs (c)$                    & $\dn$ &  $f\!alse$\\
%  $occurs (r_1 + r_2)$       & $\dn$ &  $occurs(r_1) \vee occurs(r_2)$\\ 
%  $occurs (r_1 \cdot r_2)$ & $\dn$ &  $occurs(r_1) \vee occurs(r_2)$\\
%  $occurs (r^*)$                & $\dn$ & $occurs(r)$ \\
%  \end{tabular}
%  \end{center}
%  
%  \noindent
%  Now you can prove 
%  
%  \begin{center}
%  $L(r) = \{\}$ \;\;implies\;\; $occurs(r)$.
%  \end{center}
%  
%  \noindent
%  The interesting cases are $r_1 + r_2$ and $r^*$.
%  The other direction is not true, that is if $occurs(r)$ then $L(r) = \{\}$. A counter example
%  is $\varnothing + a$: although $\varnothing$ occurs in this regular expression, the corresponding
%  language is not empty. The obvious extension to include the not-regular expression, $\sim r$,
%  also leads to an incorrect statement. Suppose we add the clause
%    
%  \begin{center}
%  \begin{tabular}{@ {}l@ {\hspace{2mm}}c@ {\hspace{2mm}}l@ {}}
%  $occurs(\sim r)$      & $\dn$ & $occurs(r)$
%  \end{tabular}
%  \end{center}  
%  
%  \noindent
%  to the definition above, then it will not be true that
%  
%  \begin{center}
%  $L(r) = \{\}$ \;\;implies\;\; $occurs(r)$.
%  \end{center}
%  
%  \noindent
%  Assume the alphabet contains just $a$ and $b$, find a counter example to this
%  property.
  
\end{enumerate}

\end{document}

%%% Local Variables: 
%%% mode: latex
%%% TeX-master: t
%%% End: