ha
authorChengsong
Thu, 24 Mar 2022 20:52:34 +0000
changeset 465 2e7c7111c0be
parent 464 e6248d2c20c2
child 466 31abe0e496bc
ha
PhdThesisRealOne/.DS_Store
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/.DS_Store
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Appendices/AppendixA.aux
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Chapters/Chapter1.aux
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Chapters/Chapter1.tex
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Chapters/Chapter2.aux
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Chapters/Chapter3.aux
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/example.bib
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.aux
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.lof
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.log
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.lot
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.out
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.pdf
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.synctex.gz
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.tex
PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.toc
thys2/BasicIdentities.thy
thys2/ClosedForms.thy
Binary file PhdThesisRealOne/.DS_Store has changed
Binary file PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/.DS_Store has changed
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Appendices/AppendixA.aux	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Appendices/AppendixA.aux	Thu Mar 24 20:52:34 2022 +0000
@@ -1,12 +1,12 @@
 \relax 
 \providecommand\hyper@newdestlabel[2]{}
-\@writefile{toc}{\contentsline {chapter}{\numberline {A}Frequently Asked Questions}{17}{appendix.53}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {A}Frequently Asked Questions}{27}{appendix.62}}
 \@writefile{lof}{\addvspace {10\p@ }}
 \@writefile{lot}{\addvspace {10\p@ }}
-\newlabel{AppendixA}{{A}{17}{Frequently Asked Questions}{appendix.53}{}}
-\@writefile{toc}{\contentsline {section}{\numberline {A.1}How do I change the colors of links?}{17}{section.54}}
+\newlabel{AppendixA}{{A}{27}{Frequently Asked Questions}{appendix.62}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {A.1}How do I change the colors of links?}{27}{section.63}}
 \@setckpt{Appendices/AppendixA}{
-\setcounter{page}{18}
+\setcounter{page}{28}
 \setcounter{equation}{0}
 \setcounter{enumi}{0}
 \setcounter{enumii}{0}
@@ -28,7 +28,7 @@
 \setcounter{ContinuedFloat}{0}
 \setcounter{Item}{0}
 \setcounter{Hfootnote}{1}
-\setcounter{bookmark@seq@number}{39}
+\setcounter{bookmark@seq@number}{42}
 \setcounter{tabx@nest}{0}
 \setcounter{listtotal}{0}
 \setcounter{listcount}{0}
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Chapters/Chapter1.aux	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Chapters/Chapter1.aux	Thu Mar 24 20:52:34 2022 +0000
@@ -1,42 +1,47 @@
 \relax 
 \providecommand\hyper@newdestlabel[2]{}
-\@writefile{toc}{\contentsline {chapter}{\numberline {1}Introduction}{1}{chapter.14}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {1}POSIX Lexing With Bit-codes}{1}{chapter.14}}
 \@writefile{lof}{\addvspace {10\p@ }}
 \@writefile{lot}{\addvspace {10\p@ }}
-\newlabel{Chapter1}{{1}{1}{Introduction}{chapter.14}{}}
-\@writefile{toc}{\contentsline {section}{\numberline {1.1}Why Brzozowski}{2}{section.15}}
-\@writefile{toc}{\contentsline {section}{\numberline {1.2}Backgound}{4}{section.16}}
-\@writefile{toc}{\contentsline {section}{\numberline {1.3}Existing Practical Approaches}{5}{section.17}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {1.3.1}DFA Approach}{5}{subsection.18}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {1.3.2}NFA Approach}{5}{subsection.19}}
-\@writefile{toc}{\contentsline {section}{\numberline {1.4}Our Approach}{5}{section.20}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {1.4.1}Existing Work}{5}{subsection.21}}
-\@writefile{toc}{\contentsline {section}{\numberline {1.5}What this Template Includes}{5}{section.22}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {1.5.1}Folders}{5}{subsection.23}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {1.5.2}Files}{6}{subsection.24}}
-\@writefile{toc}{\contentsline {section}{\numberline {1.6}Filling in Your Information in the \texttt  {\bfseries  main.tex} File}{7}{section.25}}
-\newlabel{FillingFile}{{1.6}{7}{Filling in Your Information in the \file {main.tex} File}{section.25}{}}
-\@writefile{toc}{\contentsline {section}{\numberline {1.7}The \texttt  {main.tex} File Explained}{7}{section.26}}
-\@writefile{toc}{\contentsline {section}{\numberline {1.8}Thesis Features and Conventions}{8}{section.27}}
-\newlabel{ThesisConventions}{{1.8}{8}{Thesis Features and Conventions}{section.27}{}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {1.8.1}Printing Format}{8}{subsection.28}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {1.8.2}Using US Letter Paper}{9}{subsection.29}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {1.8.3}References}{9}{subsection.30}}
-\@writefile{toc}{\contentsline {subsubsection}{A Note on bibtex}{9}{section*.32}}
-\@writefile{lot}{\contentsline {table}{\numberline {1.1}{\ignorespaces The effects of treatments X and Y on the four groups studied.\relax }}{10}{table.caption.34}}
+\newlabel{Chapter1}{{1}{1}{POSIX Lexing With Bit-codes}{chapter.14}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.1}Introduction To Regexes}{1}{section.15}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.1.1}Back References in Regex--Non-Regular part}{4}{subsection.16}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.2}Our Solution--Brzozowski Derivatives}{4}{section.17}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.3}Preliminaries about Lexing Using Brzozowski derivatives}{4}{section.18}}
+\newlabel{graph:*}{{1.1}{6}{Preliminaries about Lexing Using Brzozowski derivatives}{equation.19}{}}
+\newlabel{graph:2}{{1.3}{8}{Values and the Lexing Algorithm by Sulzmann and Lu}{equation.22}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.4}Backgound}{14}{section.25}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.5}Engineering and Academic Approaches to Deal with Catastrophic Backtracking}{15}{section.26}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.5.1}DFA Approach}{15}{subsection.27}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.5.2}NFA Approach}{15}{subsection.28}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.6}Our Approach}{15}{section.29}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.6.1}Existing Work}{15}{subsection.30}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.7}What this Template Includes}{16}{section.31}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.7.1}Folders}{16}{subsection.32}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.7.2}Files}{16}{subsection.33}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.8}Filling in Your Information in the \texttt  {\bfseries  main.tex} File}{17}{section.34}}
+\newlabel{FillingFile}{{1.8}{17}{Filling in Your Information in the \file {main.tex} File}{section.34}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.9}The \texttt  {main.tex} File Explained}{17}{section.35}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.10}Thesis Features and Conventions}{18}{section.36}}
+\newlabel{ThesisConventions}{{1.10}{18}{Thesis Features and Conventions}{section.36}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.10.1}Printing Format}{19}{subsection.37}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.10.2}Using US Letter Paper}{19}{subsection.38}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.10.3}References}{19}{subsection.39}}
+\@writefile{lot}{\contentsline {table}{\numberline {1.1}{\ignorespaces The effects of treatments X and Y on the four groups studied.\relax }}{20}{table.caption.43}}
 \providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}}
-\newlabel{tab:treatments}{{1.1}{10}{The effects of treatments X and Y on the four groups studied.\relax }{table.caption.34}{}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {1.8.4}Tables}{10}{subsection.33}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {1.8.5}Figures}{10}{subsection.35}}
-\@writefile{lof}{\contentsline {figure}{\numberline {1.1}{\ignorespaces An Electron}}{11}{figure.caption.36}}
-\newlabel{fig:Electron}{{1.1}{11}{An Electron}{figure.caption.36}{}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {1.8.6}Typesetting mathematics}{11}{subsection.37}}
-\newlabel{eqn:Einstein}{{1.1}{12}{Typesetting mathematics}{equation.38}{}}
-\@writefile{toc}{\contentsline {section}{\numberline {1.9}Sectioning and Subsectioning}{12}{section.39}}
-\@writefile{toc}{\contentsline {section}{\numberline {1.10}In Closing}{12}{section.40}}
+\newlabel{tab:treatments}{{1.1}{20}{The effects of treatments X and Y on the four groups studied.\relax }{table.caption.43}{}}
+\@writefile{toc}{\contentsline {subsubsection}{A Note on bibtex}{20}{section*.41}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.10.4}Tables}{20}{subsection.42}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.10.5}Figures}{20}{subsection.44}}
+\@writefile{lof}{\contentsline {figure}{\numberline {1.1}{\ignorespaces An Electron}}{21}{figure.caption.45}}
+\newlabel{fig:Electron}{{1.1}{21}{An Electron}{figure.caption.45}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.10.6}Typesetting mathematics}{22}{subsection.46}}
+\newlabel{eqn:Einstein}{{1.4}{22}{Typesetting mathematics}{equation.47}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.11}Sectioning and Subsectioning}{22}{section.48}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.12}In Closing}{22}{section.49}}
 \@setckpt{Chapters/Chapter1}{
-\setcounter{page}{13}
-\setcounter{equation}{1}
+\setcounter{page}{23}
+\setcounter{equation}{4}
 \setcounter{enumi}{0}
 \setcounter{enumii}{0}
 \setcounter{enumiii}{0}
@@ -45,7 +50,7 @@
 \setcounter{mpfootnote}{0}
 \setcounter{part}{0}
 \setcounter{chapter}{1}
-\setcounter{section}{10}
+\setcounter{section}{12}
 \setcounter{subsection}{0}
 \setcounter{subsubsection}{0}
 \setcounter{paragraph}{0}
@@ -57,7 +62,7 @@
 \setcounter{ContinuedFloat}{0}
 \setcounter{Item}{0}
 \setcounter{Hfootnote}{1}
-\setcounter{bookmark@seq@number}{26}
+\setcounter{bookmark@seq@number}{29}
 \setcounter{tabx@nest}{0}
 \setcounter{listtotal}{0}
 \setcounter{listcount}{0}
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Chapters/Chapter1.tex	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Chapters/Chapter1.tex	Thu Mar 24 20:52:34 2022 +0000
@@ -43,13 +43,20 @@
 \def\distinct{\mathit{distinct}}
 \def\blexersimp{\mathit{blexer}\_\mathit{simp}}
 %----------------------------------------------------------------------------------------
-
-
-
+%This part is about regular expressions, Brzozowski derivatives,
+%and a bit-coded lexing algorithm with proven correctness and time bounds.
+Regular expressions are widely used in modern day programming tasks.
+Be it IDE with syntax hightlighting and auto completion, 
+command line tools like $\mathit{grep}$ that facilitates easy 
+processing of text by search and replace, network intrusion
+detection systems that rejects suspicious traffic, or compiler
+front-ends--there is always an important phase of the
+task that involves matching a regular 
+exression with a string.
+Given its usefulness and ubiquity, one would imagine that
+modern regular expression matching implementations
+are mature and fully-studied.
 
-Regular expression matching and lexing has been 
- widely-used and well-implemented
-in software industry. 
 If you go to a popular programming language's
 regex engine,
 you can supply it with regex and strings of your own,
@@ -151,6 +158,7 @@
 
 The opens up the possibility of
  a ReDoS (regular expression denial-of-service ) attack.
+ \section{Why Backtracking Algorithm for Regexes?}
 
 Theoretical results say that regular expression matching
 should be linear with respect to the input. You could construct
@@ -212,6 +220,148 @@
 outage had several causes, at the heart was a regular expression that
 was used to monitor network
 %traffic.\footnote{\url{https://blog.cloudflare.com/details-of-the-cloudflare-outage-on-july-2-2019/}}
+
+It turns out that regex libraries not only suffer from 
+exponential backtracking problems, 
+but also undesired (or even buggy) outputs.
+%TODO: comment from who
+xxx commented that most regex libraries are not
+correctly implementing the POSIX (maximum-munch)
+rule of regular expression matching.
+A concrete example would be 
+the regex
+\begin{verbatim}
+(((((a*a*)b*)b){20})*)c
+\end{verbatim}
+and the string
+\begin{verbatim}
+baabaabababaabaaaaaaaaababaa
+aababababaaaabaaabaaaaaabaab
+aabababaababaaaaaaaaababaaaa
+babababaaaaaaaaaaaaac
+\end{verbatim}
+
+This seemingly complex regex simply says "some $a$'s
+followed by some $b$'s then followed by 1 single $b$,
+and this iterates 20 times, finally followed by a $c$.
+And a POSIX match would involve the entire string,"eating up"
+all the $b$'s in it.
+%TODO: give a coloured example of how this matches POSIXly
+
+This regex would trigger catastrophic backtracking in 
+languages like Python and Java,
+whereas it gives a correct but uninformative (non-POSIX)
+match in languages like Go or .NET--The match with only 
+character $c$.
+
+Backtracking or depth-first search might give us
+exponential running time, and quite a few tools 
+avoid that by determinising the $\mathit{NFA}$ 
+into a $\mathit{DFA}$ and minimizes it.
+For example, $\mathit{LEX}$ and $\mathit{JFLEX}$ are tools
+in $C$ and $\mathit{JAVA}$ that generates $\mathit{DFA}$-based
+lexers.
+However, they do not scale well with bounded repetitions.
+Bounded repetitions, usually written in the form
+$r^{\{c\}}$ (where $c$ is a constant natural number),
+denotes a regular expression accepting strings
+that can be divided into $c$ substrings, and each 
+substring is in $r$. 
+%TODO: draw example NFA.
+For the regular expression $(a|b)^*a(a|b)^{\{2\}}$,
+an $\mathit{NFA}$ describing it would look like:
+\begin{center}
+\begin{tikzpicture}[shorten >=1pt,node distance=2cm,on grid,auto] 
+   \node[state,initial] (q_0)   {$q_0$}; 
+   \node[state, red] (q_1) [right=of q_0] {$q_1$}; 
+   \node[state, red] (q_2) [right=of q_1] {$q_2$}; 
+   \node[state,accepting](q_3) [right=of q_2] {$q_3$};
+    \path[->] 
+    (q_0) edge  node {a} (q_1)
+    	  edge [loop below] node {a,b} ()
+    (q_1) edge  node  {a,b} (q_2)
+          edge [loop above] node {0} ()
+    (q_2) edge  node  {a,b} (q_3);
+\end{tikzpicture}
+\end{center}
+The red states are "counter states" which counts down 
+the number of characters needed in addition to the current
+string to make a successful match.
+For example, state $q_1$ indicates a match that has
+gone past the $(a|b)$ part of $(a|b)^*a(a|b)^{\{2\}}$,
+and just consumed the "delimiter" $a$ in the middle, and 
+need to match 2 more iterations of $a|b$ to complete.
+State $q_2$ on the other hand, can be viewed as a state
+after $q_1$ has consumed 1 character, and just waits
+for 1 more character to complete.
+Depending on the actual characters appearing in the 
+input string, the states $q_1$ and $q_2$ may or may
+not be active, independent from each other.
+A $\mathit{DFA}$ for such an $\mathit{NFA}$ would
+contain at least 4 non-equivalent states that cannot be merged, 
+because subset states indicating which of $q_1$ and $q_2$
+are active are at least four: $\phi$, $\{q_1\}$, 
+$\{q_2\}$, $\{q_1, q_2\}$.
+Generalizing this to regular expressions with larger
+bounded repetitions number, we have $r^*ar^{\{n\}}$
+in general would require at least $2^n$ states
+in a $\mathit{DFA}$. This is to represent all different 
+configurations of "countdown" states.
+For those regexes, tools such as $\mathit{JFLEX}$ 
+would generate gigantic $\mathit{DFA}$'s or even
+give out memory errors.
+
+For this reason, regex libraries that support 
+bounded repetitions often choose to use the $\mathit{NFA}$ 
+approach.
+One can simulate the $\mathit{NFA}$ running in two ways:
+one by keeping track of all active states after consuming 
+a character, and update that set of states iteratively.
+This is a breadth-first-search of the $\mathit{NFA}$.
+for a path terminating
+at an accepting state.
+Languages like $\mathit{GO}$ and $\mathit{RUST}$ use this
+type of $\mathit{NFA}$ simulation, and guarantees a linear runtime
+in terms of input string length.
+The other way to use $\mathit{NFA}$ for matching is to take 
+a single state in a path each time, and backtrack if that path
+fails. This is a depth-first-search that could end up
+with exponential run time.
+The reason behind backtracking algorithms in languages like
+Java and Python is that they support back-references.
+\subsection{Back References in Regex--Non-Regular part}
+If we label sub-expressions by parenthesizing them and give 
+them a number by the order their opening parenthesis appear,
+$\underset{1}{(}\ldots\underset{2}{(}\ldots\underset{3}{(}\ldots\underset{4}{(}\ldots)\ldots)\ldots)\ldots)$
+We can use the following syntax to denote that we want a string just matched by a 
+sub-expression to appear at a certain location again exactly:
+$(.*)\backslash 1$
+would match the string like $\mathit{bobo}$, $\mathit{weewee}$ and etc.
+
+Back-reference is a construct in the "regex" standard
+that programmers found quite useful, but not exactly 
+regular any more.
+In fact, that allows the regex construct to express 
+languages that cannot be contained in context-free
+languages
+For example, the back reference $(a^*)\backslash 1 \backslash 1$
+expresses the language $\{a^na^na^n\mid n \in N\}$,
+which cannot be expressed by context-free grammars.
+To express such a language one would need context-sensitive
+grammars, and matching/parsing for such grammars is NP-hard in 
+general.
+%TODO:cite reference for NP-hardness of CSG matching
+For such constructs, the most intuitive way of matching is
+using backtracking algorithms, and there are no known algorithms 
+that does not backtrack.
+%TODO:read a bit more about back reference algorithms
+
+
+
+
+\section{Our Solution--Brzozowski Derivatives}
+
+
  
  Is it possible to have a regex lexing algorithm with proven correctness and 
  time complexity, which allows easy extensions to
@@ -220,8 +370,30 @@
  
  We propose Brzozowski's derivatives as a solution to this problem.
  
- 
- \section{Why Brzozowski}
+The main contribution of this thesis is a proven correct lexing algorithm
+with formalized time bounds.
+To our best knowledge, there is no lexing libraries using Brzozowski derivatives
+that have a provable time guarantee, 
+and claims about running time are usually speculative and backed by thin empirical
+evidence.
+%TODO: give references
+For example, Sulzmann and Lu had proposed an algorithm  in which they
+claim a linear running time.
+But that was falsified by our experiments and the running time 
+is actually $\Omega(2^n)$ in the worst case.
+A similar claim about a theoretical runtime of $O(n^2)$ is made for the Verbatim
+%TODO: give references
+lexer, which calculates POSIX matches and is based on derivatives.
+They formalized the correctness of the lexer, but not the complexity.
+In the performance evaluation section, they simply analyzed the run time
+of matching $a$ with the string $\underbrace{a \ldots a}_{\text{n a's}}$
+and concluded that the algorithm is quadratic in terms of input length.
+When we tried out their extracted OCaml code with our example $(a+aa)^*$,
+the time it took to lex only 40 $a$'s was 5 minutes.
+We therefore believe our results of a proof of performance on general
+inputs rather than specific examples a novel contribution.\\
+
+ \section{Preliminaries about Lexing Using Brzozowski derivatives}
  In the last fifteen or so years, Brzozowski's derivatives of regular
 expressions have sparked quite a bit of interest in the functional
 programming and theorem prover communities.  
@@ -327,7 +499,705 @@
 
 
  
- 
+  
+\begin{center}
+\begin{tabular}{lcl}
+		$\ZERO \backslash c$ & $\dn$ & $\ZERO$\\  
+		$\ONE \backslash c$  & $\dn$ & $\ZERO$\\
+		$d \backslash c$     & $\dn$ & 
+		$\mathit{if} \;c = d\;\mathit{then}\;\ONE\;\mathit{else}\;\ZERO$\\
+$(r_1 + r_2)\backslash c$     & $\dn$ & $r_1 \backslash c \,+\, r_2 \backslash c$\\
+$(r_1 \cdot r_2)\backslash c$ & $\dn$ & $\mathit{if} \, nullable(r_1)$\\
+	&   & $\mathit{then}\;(r_1\backslash c) \cdot r_2 \,+\, r_2\backslash c$\\
+	&   & $\mathit{else}\;(r_1\backslash c) \cdot r_2$\\
+	$(r^*)\backslash c$           & $\dn$ & $(r\backslash c) \cdot r^*$\\
+\end{tabular}
+\end{center}
+\noindent
+\noindent
+The function derivative, written $\backslash c$, 
+defines how a regular expression evolves into
+a new regular expression after all the string it contains
+is chopped off a certain head character $c$.
+The most involved cases are the sequence 
+and star case.
+The sequence case says that if the first regular expression
+contains an empty string then second component of the sequence
+might be chosen as the target regular expression to be chopped
+off its head character.
+The star regular expression unwraps the iteration of
+regular expression and attack the star regular expression
+to its back again to make sure there are 0 or more iterations
+following this unfolded iteration.
+
+
+The main property of the derivative operation
+that enables us to reason about the correctness of
+an algorithm using derivatives is 
+
+\begin{center}
+$c\!::\!s \in L(r)$ holds
+if and only if $s \in L(r\backslash c)$.
+\end{center}
+
+\noindent
+We can generalise the derivative operation shown above for single characters
+to strings as follows:
+
+\begin{center}
+\begin{tabular}{lcl}
+$r \backslash (c\!::\!s) $ & $\dn$ & $(r \backslash c) \backslash s$ \\
+$r \backslash [\,] $ & $\dn$ & $r$
+\end{tabular}
+\end{center}
+
+\noindent
+and then define Brzozowski's  regular-expression matching algorithm as:
+
+\[
+match\;s\;r \;\dn\; nullable(r\backslash s)
+\]
+
+\noindent
+Assuming the a string is given as a sequence of characters, say $c_0c_1..c_n$, 
+this algorithm presented graphically is as follows:
+
+\begin{equation}\label{graph:*}
+\begin{tikzcd}
+r_0 \arrow[r, "\backslash c_0"]  & r_1 \arrow[r, "\backslash c_1"] & r_2 \arrow[r, dashed]  & r_n  \arrow[r,"\textit{nullable}?"] & \;\textrm{YES}/\textrm{NO}
+\end{tikzcd}
+\end{equation}
+
+\noindent
+where we start with  a regular expression  $r_0$, build successive
+derivatives until we exhaust the string and then use \textit{nullable}
+to test whether the result can match the empty string. It can  be
+relatively  easily shown that this matcher is correct  (that is given
+an $s = c_0...c_{n-1}$ and an $r_0$, it generates YES if and only if $s \in L(r_0)$).
+
+Beautiful and simple definition.
+
+If we implement the above algorithm naively, however,
+the algorithm can be excruciatingly slow. For example, when starting with the regular
+expression $(a + aa)^*$ and building 12 successive derivatives
+w.r.t.~the character $a$, one obtains a derivative regular expression
+with more than 8000 nodes (when viewed as a tree). Operations like
+$\backslash$ and $\nullable$ need to traverse such trees and
+consequently the bigger the size of the derivative the slower the
+algorithm. 
+
+Brzozowski was quick in finding that during this process a lot useless
+$\ONE$s and $\ZERO$s are generated and therefore not optimal.
+He also introduced some "similarity rules" such
+as $P+(Q+R) = (P+Q)+R$ to merge syntactically 
+different but language-equivalent sub-regexes to further decrease the size
+of the intermediate regexes. 
+
+More simplifications are possible, such as deleting duplicates
+and opening up nested alternatives to trigger even more simplifications.
+And suppose we apply simplification after each derivative step, and compose
+these two operations together as an atomic one: $a \backslash_{simp}\,c \dn
+\textit{simp}(a \backslash c)$. Then we can build
+a matcher without having  cumbersome regular expressions.
+
+
+If we want the size of derivatives in the algorithm to
+stay even lower, we would need more aggressive simplifications.
+Essentially we need to delete useless $\ZERO$s and $\ONE$s, as well as
+deleting duplicates whenever possible. For example, the parentheses in
+$(a+b) \cdot c + b\cdot c$ can be opened up to get $a\cdot c + b \cdot c + b
+\cdot c$, and then simplified to just $a \cdot c + b \cdot c$. Another
+example is simplifying $(a^*+a) + (a^*+ \ONE) + (a +\ONE)$ to just
+$a^*+a+\ONE$. Adding these more aggressive simplification rules help us
+to achieve a very tight size bound, namely,
+ the same size bound as that of the \emph{partial derivatives}. 
+
+Building derivatives and then simplify them.
+So far so good. But what if we want to 
+do lexing instead of just a YES/NO answer?
+This requires us to go back again to the world 
+without simplification first for a moment.
+Sulzmann and Lu~\cite{Sulzmann2014} first came up with a nice and 
+elegant(arguably as beautiful as the original
+derivatives definition) solution for this.
+
+\subsection*{Values and the Lexing Algorithm by Sulzmann and Lu}
+
+
+They first defined the datatypes for storing the 
+lexing information called a \emph{value} or
+sometimes also \emph{lexical value}.  These values and regular
+expressions correspond to each other as illustrated in the following
+table:
+
+\begin{center}
+	\begin{tabular}{c@{\hspace{20mm}}c}
+		\begin{tabular}{@{}rrl@{}}
+			\multicolumn{3}{@{}l}{\textbf{Regular Expressions}}\medskip\\
+			$r$ & $::=$  & $\ZERO$\\
+			& $\mid$ & $\ONE$   \\
+			& $\mid$ & $c$          \\
+			& $\mid$ & $r_1 \cdot r_2$\\
+			& $\mid$ & $r_1 + r_2$   \\
+			\\
+			& $\mid$ & $r^*$         \\
+		\end{tabular}
+		&
+		\begin{tabular}{@{\hspace{0mm}}rrl@{}}
+			\multicolumn{3}{@{}l}{\textbf{Values}}\medskip\\
+			$v$ & $::=$  & \\
+			&        & $\Empty$   \\
+			& $\mid$ & $\Char(c)$          \\
+			& $\mid$ & $\Seq\,v_1\, v_2$\\
+			& $\mid$ & $\Left(v)$   \\
+			& $\mid$ & $\Right(v)$  \\
+			& $\mid$ & $\Stars\,[v_1,\ldots\,v_n]$ \\
+		\end{tabular}
+	\end{tabular}
+\end{center}
+
+\noindent
+One regular expression can have multiple lexical values. For example
+for the regular expression $(a+b)^*$, it has a infinite list of
+values corresponding to it: $\Stars\,[]$, $\Stars\,[\Left(Char(a))]$,
+$\Stars\,[\Right(Char(b))]$, $\Stars\,[\Left(Char(a),\,\Right(Char(b))]$,
+$\ldots$, and vice versa.
+Even for the regular expression matching a certain string, there could 
+still be more than one value corresponding to it.
+Take the example where $r= (a^*\cdot a^*)^*$ and the string 
+$s=\underbrace{aa\ldots a}_\text{n \textit{a}s}$.
+The number of different ways of matching 
+without allowing any value under a star to be flattened
+to an empty string can be given by the following formula:
+\begin{center}
+	$C_n = (n+1)+n C_1+\ldots + 2 C_{n-1}$
+\end{center}
+and a closed form formula can be calculated to be
+\begin{equation}
+	C_n =\frac{(2+\sqrt{2})^n - (2-\sqrt{2})^n}{4\sqrt{2}}
+\end{equation}
+which is clearly in exponential order.
+A lexer aimed at getting all the possible values has an exponential
+worst case runtime. Therefore it is impractical to try to generate
+all possible matches in a run. In practice, we are usually 
+interested about POSIX values, which by intuition always
+match the leftmost regular expression when there is a choice
+and always match a sub part as much as possible before proceeding
+to the next token. For example, the above example has the POSIX value
+$ \Stars\,[\Seq(Stars\,[\underbrace{\Char(a),\ldots,\Char(a)}_\text{n iterations}], Stars\,[])]$.
+The output of an algorithm we want would be a POSIX matching
+encoded as a value.
+The contribution of Sulzmann and Lu is an extension of Brzozowski's
+algorithm by a second phase (the first phase being building successive
+derivatives---see \eqref{graph:*}). In this second phase, a POSIX value 
+is generated in case the regular expression matches  the string. 
+Pictorially, the Sulzmann and Lu algorithm is as follows:
+
+\begin{ceqn}
+\begin{equation}\label{graph:2}
+\begin{tikzcd}
+r_0 \arrow[r, "\backslash c_0"]  \arrow[d] & r_1 \arrow[r, "\backslash c_1"] \arrow[d] & r_2 \arrow[r, dashed] \arrow[d] & r_n \arrow[d, "mkeps" description] \\
+v_0           & v_1 \arrow[l,"inj_{r_0} c_0"]                & v_2 \arrow[l, "inj_{r_1} c_1"]              & v_n \arrow[l, dashed]         
+\end{tikzcd}
+\end{equation}
+\end{ceqn}
+
+
+\noindent
+For convenience, we shall employ the following notations: the regular
+expression we start with is $r_0$, and the given string $s$ is composed
+of characters $c_0 c_1 \ldots c_{n-1}$. In  the first phase from the
+left to right, we build the derivatives $r_1$, $r_2$, \ldots  according
+to the characters $c_0$, $c_1$  until we exhaust the string and obtain
+the derivative $r_n$. We test whether this derivative is
+$\textit{nullable}$ or not. If not, we know the string does not match
+$r$ and no value needs to be generated. If yes, we start building the
+values incrementally by \emph{injecting} back the characters into the
+earlier values $v_n, \ldots, v_0$. This is the second phase of the
+algorithm from the right to left. For the first value $v_n$, we call the
+function $\textit{mkeps}$, which builds a POSIX lexical value
+for how the empty string has been matched by the (nullable) regular
+expression $r_n$. This function is defined as
+
+	\begin{center}
+		\begin{tabular}{lcl}
+			$\mkeps(\ONE)$ 		& $\dn$ & $\Empty$ \\
+			$\mkeps(r_{1}+r_{2})$	& $\dn$ 
+			& \textit{if} $\nullable(r_{1})$\\ 
+			& & \textit{then} $\Left(\mkeps(r_{1}))$\\ 
+			& & \textit{else} $\Right(\mkeps(r_{2}))$\\
+			$\mkeps(r_1\cdot r_2)$ 	& $\dn$ & $\Seq\,(\mkeps\,r_1)\,(\mkeps\,r_2)$\\
+			$mkeps(r^*)$	        & $\dn$ & $\Stars\,[]$
+		\end{tabular}
+	\end{center}
+
+
+\noindent 
+After the $\mkeps$-call, we inject back the characters one by one in order to build
+the lexical value $v_i$ for how the regex $r_i$ matches the string $s_i$
+($s_i = c_i \ldots c_{n-1}$ ) from the previous lexical value $v_{i+1}$.
+After injecting back $n$ characters, we get the lexical value for how $r_0$
+matches $s$. The POSIX value is maintained throught out the process.
+For this Sulzmann and Lu defined a function that reverses
+the ``chopping off'' of characters during the derivative phase. The
+corresponding function is called \emph{injection}, written
+$\textit{inj}$; it takes three arguments: the first one is a regular
+expression ${r_{i-1}}$, before the character is chopped off, the second
+is a character ${c_{i-1}}$, the character we want to inject and the
+third argument is the value ${v_i}$, into which one wants to inject the
+character (it corresponds to the regular expression after the character
+has been chopped off). The result of this function is a new value. The
+definition of $\textit{inj}$ is as follows: 
+
+\begin{center}
+\begin{tabular}{l@{\hspace{1mm}}c@{\hspace{1mm}}l}
+  $\textit{inj}\,(c)\,c\,Empty$            & $\dn$ & $Char\,c$\\
+  $\textit{inj}\,(r_1 + r_2)\,c\,\Left(v)$ & $\dn$ & $\Left(\textit{inj}\,r_1\,c\,v)$\\
+  $\textit{inj}\,(r_1 + r_2)\,c\,Right(v)$ & $\dn$ & $Right(\textit{inj}\,r_2\,c\,v)$\\
+  $\textit{inj}\,(r_1 \cdot r_2)\,c\,Seq(v_1,v_2)$ & $\dn$  & $Seq(\textit{inj}\,r_1\,c\,v_1,v_2)$\\
+  $\textit{inj}\,(r_1 \cdot r_2)\,c\,\Left(Seq(v_1,v_2))$ & $\dn$  & $Seq(\textit{inj}\,r_1\,c\,v_1,v_2)$\\
+  $\textit{inj}\,(r_1 \cdot r_2)\,c\,Right(v)$ & $\dn$  & $Seq(\textit{mkeps}(r_1),\textit{inj}\,r_2\,c\,v)$\\
+  $\textit{inj}\,(r^*)\,c\,Seq(v,Stars\,vs)$         & $\dn$  & $Stars((\textit{inj}\,r\,c\,v)\,::\,vs)$\\
+\end{tabular}
+\end{center}
+
+\noindent This definition is by recursion on the ``shape'' of regular
+expressions and values. 
+The clauses basically do one thing--identifying the ``holes'' on 
+value to inject the character back into.
+For instance, in the last clause for injecting back to a value
+that would turn into a new star value that corresponds to a star,
+we know it must be a sequence value. And we know that the first 
+value of that sequence corresponds to the child regex of the star
+with the first character being chopped off--an iteration of the star
+that had just been unfolded. This value is followed by the already
+matched star iterations we collected before. So we inject the character 
+back to the first value and form a new value with this new iteration
+being added to the previous list of iterations, all under the $Stars$
+top level.
+
+We have mentioned before that derivatives without simplification 
+can get clumsy, and this is true for values as well--they reflect
+the regular expressions size by definition.
+
+One can introduce simplification on the regex and values, but have to
+be careful in not breaking the correctness as the injection 
+function heavily relies on the structure of the regexes and values
+being correct and match each other.
+It can be achieved by recording some extra rectification functions
+during the derivatives step, and applying these rectifications in 
+each run during the injection phase.
+And we can prove that the POSIX value of how
+regular expressions match strings will not be affected---although is much harder
+to establish. Some initial results in this regard have been
+obtained in \cite{AusafDyckhoffUrban2016}. 
+
+%Brzozowski, after giving the derivatives and simplification,
+%did not explore lexing with simplification or he may well be 
+%stuck on an efficient simplificaiton with a proof.
+%He went on to explore the use of derivatives together with 
+%automaton, and did not try lexing using derivatives.
+
+We want to get rid of complex and fragile rectification of values.
+Can we not create those intermediate values $v_1,\ldots v_n$,
+and get the lexing information that should be already there while
+doing derivatives in one pass, without a second phase of injection?
+In the meantime, can we make sure that simplifications
+are easily handled without breaking the correctness of the algorithm?
+
+Sulzmann and Lu solved this problem by
+introducing additional informtaion to the 
+regular expressions called \emph{bitcodes}.
+
+\subsection*{Bit-coded Algorithm}
+Bits and bitcodes (lists of bits) are defined as:
+
+\begin{center}
+		$b ::=   1 \mid  0 \qquad
+bs ::= [] \mid b::bs    
+$
+\end{center}
+
+\noindent
+The $1$ and $0$ are not in bold in order to avoid 
+confusion with the regular expressions $\ZERO$ and $\ONE$. Bitcodes (or
+bit-lists) can be used to encode values (or potentially incomplete values) in a
+compact form. This can be straightforwardly seen in the following
+coding function from values to bitcodes: 
+
+\begin{center}
+\begin{tabular}{lcl}
+  $\textit{code}(\Empty)$ & $\dn$ & $[]$\\
+  $\textit{code}(\Char\,c)$ & $\dn$ & $[]$\\
+  $\textit{code}(\Left\,v)$ & $\dn$ & $0 :: code(v)$\\
+  $\textit{code}(\Right\,v)$ & $\dn$ & $1 :: code(v)$\\
+  $\textit{code}(\Seq\,v_1\,v_2)$ & $\dn$ & $code(v_1) \,@\, code(v_2)$\\
+  $\textit{code}(\Stars\,[])$ & $\dn$ & $[0]$\\
+  $\textit{code}(\Stars\,(v\!::\!vs))$ & $\dn$ & $1 :: code(v) \;@\;
+                                                 code(\Stars\,vs)$
+\end{tabular}    
+\end{center} 
+
+\noindent
+Here $\textit{code}$ encodes a value into a bitcodes by converting
+$\Left$ into $0$, $\Right$ into $1$, and marks the start of a non-empty
+star iteration by $1$. The border where a local star terminates
+is marked by $0$. This coding is lossy, as it throws away the information about
+characters, and also does not encode the ``boundary'' between two
+sequence values. Moreover, with only the bitcode we cannot even tell
+whether the $1$s and $0$s are for $\Left/\Right$ or $\Stars$. The
+reason for choosing this compact way of storing information is that the
+relatively small size of bits can be easily manipulated and ``moved
+around'' in a regular expression. In order to recover values, we will 
+need the corresponding regular expression as an extra information. This
+means the decoding function is defined as:
+
+
+%\begin{definition}[Bitdecoding of Values]\mbox{}
+\begin{center}
+\begin{tabular}{@{}l@{\hspace{1mm}}c@{\hspace{1mm}}l@{}}
+  $\textit{decode}'\,bs\,(\ONE)$ & $\dn$ & $(\Empty, bs)$\\
+  $\textit{decode}'\,bs\,(c)$ & $\dn$ & $(\Char\,c, bs)$\\
+  $\textit{decode}'\,(0\!::\!bs)\;(r_1 + r_2)$ & $\dn$ &
+     $\textit{let}\,(v, bs_1) = \textit{decode}'\,bs\,r_1\;\textit{in}\;
+       (\Left\,v, bs_1)$\\
+  $\textit{decode}'\,(1\!::\!bs)\;(r_1 + r_2)$ & $\dn$ &
+     $\textit{let}\,(v, bs_1) = \textit{decode}'\,bs\,r_2\;\textit{in}\;
+       (\Right\,v, bs_1)$\\                           
+  $\textit{decode}'\,bs\;(r_1\cdot r_2)$ & $\dn$ &
+        $\textit{let}\,(v_1, bs_1) = \textit{decode}'\,bs\,r_1\;\textit{in}$\\
+  & &   $\textit{let}\,(v_2, bs_2) = \textit{decode}'\,bs_1\,r_2$\\
+  & &   \hspace{35mm}$\textit{in}\;(\Seq\,v_1\,v_2, bs_2)$\\
+  $\textit{decode}'\,(0\!::\!bs)\,(r^*)$ & $\dn$ & $(\Stars\,[], bs)$\\
+  $\textit{decode}'\,(1\!::\!bs)\,(r^*)$ & $\dn$ & 
+         $\textit{let}\,(v, bs_1) = \textit{decode}'\,bs\,r\;\textit{in}$\\
+  & &   $\textit{let}\,(\Stars\,vs, bs_2) = \textit{decode}'\,bs_1\,r^*$\\
+  & &   \hspace{35mm}$\textit{in}\;(\Stars\,v\!::\!vs, bs_2)$\bigskip\\
+  
+  $\textit{decode}\,bs\,r$ & $\dn$ &
+     $\textit{let}\,(v, bs') = \textit{decode}'\,bs\,r\;\textit{in}$\\
+  & & $\textit{if}\;bs' = []\;\textit{then}\;\textit{Some}\,v\;
+       \textit{else}\;\textit{None}$                       
+\end{tabular}    
+\end{center}    
+%\end{definition}
+
+Sulzmann and Lu's integrated the bitcodes into regular expressions to
+create annotated regular expressions \cite{Sulzmann2014}.
+\emph{Annotated regular expressions} are defined by the following
+grammar:%\comment{ALTS should have  an $as$ in  the definitions, not  just $a_1$ and $a_2$}
+
+\begin{center}
+\begin{tabular}{lcl}
+  $\textit{a}$ & $::=$  & $\ZERO$\\
+                  & $\mid$ & $_{bs}\ONE$\\
+                  & $\mid$ & $_{bs}{\bf c}$\\
+                  & $\mid$ & $_{bs}\sum\,as$\\
+                  & $\mid$ & $_{bs}a_1\cdot a_2$\\
+                  & $\mid$ & $_{bs}a^*$
+\end{tabular}    
+\end{center}  
+%(in \textit{ALTS})
+
+\noindent
+where $bs$ stands for bitcodes, $a$  for $\mathbf{a}$nnotated regular
+expressions and $as$ for a list of annotated regular expressions.
+The alternative constructor($\sum$) has been generalized to 
+accept a list of annotated regular expressions rather than just 2.
+We will show that these bitcodes encode information about
+the (POSIX) value that should be generated by the Sulzmann and Lu
+algorithm.
+
+
+To do lexing using annotated regular expressions, we shall first
+transform the usual (un-annotated) regular expressions into annotated
+regular expressions. This operation is called \emph{internalisation} and
+defined as follows:
+
+%\begin{definition}
+\begin{center}
+\begin{tabular}{lcl}
+  $(\ZERO)^\uparrow$ & $\dn$ & $\ZERO$\\
+  $(\ONE)^\uparrow$ & $\dn$ & $_{[]}\ONE$\\
+  $(c)^\uparrow$ & $\dn$ & $_{[]}{\bf c}$\\
+  $(r_1 + r_2)^\uparrow$ & $\dn$ &
+  $_{[]}\sum[\textit{fuse}\,[0]\,r_1^\uparrow,\,
+  \textit{fuse}\,[1]\,r_2^\uparrow]$\\
+  $(r_1\cdot r_2)^\uparrow$ & $\dn$ &
+         $_{[]}r_1^\uparrow \cdot r_2^\uparrow$\\
+  $(r^*)^\uparrow$ & $\dn$ &
+         $_{[]}(r^\uparrow)^*$\\
+\end{tabular}    
+\end{center}    
+%\end{definition}
+
+\noindent
+We use up arrows here to indicate that the basic un-annotated regular
+expressions are ``lifted up'' into something slightly more complex. In the
+fourth clause, $\textit{fuse}$ is an auxiliary function that helps to
+attach bits to the front of an annotated regular expression. Its
+definition is as follows:
+
+\begin{center}
+\begin{tabular}{lcl}
+  $\textit{fuse}\;bs \; \ZERO$ & $\dn$ & $\ZERO$\\
+  $\textit{fuse}\;bs\; _{bs'}\ONE$ & $\dn$ &
+     $_{bs @ bs'}\ONE$\\
+  $\textit{fuse}\;bs\;_{bs'}{\bf c}$ & $\dn$ &
+     $_{bs@bs'}{\bf c}$\\
+  $\textit{fuse}\;bs\,_{bs'}\sum\textit{as}$ & $\dn$ &
+     $_{bs@bs'}\sum\textit{as}$\\
+  $\textit{fuse}\;bs\; _{bs'}a_1\cdot a_2$ & $\dn$ &
+     $_{bs@bs'}a_1 \cdot a_2$\\
+  $\textit{fuse}\;bs\,_{bs'}a^*$ & $\dn$ &
+     $_{bs @ bs'}a^*$
+\end{tabular}    
+\end{center}  
+
+\noindent
+After internalising the regular expression, we perform successive
+derivative operations on the annotated regular expressions. This
+derivative operation is the same as what we had previously for the
+basic regular expressions, except that we beed to take care of
+the bitcodes:
+
+
+\iffalse
+ %\begin{definition}{bder}
+\begin{center}
+  \begin{tabular}{@{}lcl@{}}
+  $(\textit{ZERO})\,\backslash c$ & $\dn$ & $\textit{ZERO}$\\  
+  $(\textit{ONE}\;bs)\,\backslash c$ & $\dn$ & $\textit{ZERO}$\\  
+  $(\textit{CHAR}\;bs\,d)\,\backslash c$ & $\dn$ &
+        $\textit{if}\;c=d\; \;\textit{then}\;
+         \textit{ONE}\;bs\;\textit{else}\;\textit{ZERO}$\\  
+  $(\textit{ALTS}\;bs\,as)\,\backslash c$ & $\dn$ &
+  $\textit{ALTS}\;bs\,(as.map(\backslash c))$\\
+  $(\textit{SEQ}\;bs\,a_1\,a_2)\,\backslash c$ & $\dn$ &
+     $\textit{if}\;\textit{bnullable}\,a_1$\\
+					       & &$\textit{then}\;\textit{ALTS}\,bs\,List((\textit{SEQ}\,[]\,(a_1\,\backslash c)\,a_2),$\\
+					       & &$\phantom{\textit{then}\;\textit{ALTS}\,bs\,}(\textit{fuse}\,(\textit{bmkeps}\,a_1)\,(a_2\,\backslash c)))$\\
+  & &$\textit{else}\;\textit{SEQ}\,bs\,(a_1\,\backslash c)\,a_2$\\
+  $(\textit{STAR}\,bs\,a)\,\backslash c$ & $\dn$ &
+      $\textit{SEQ}\;bs\,(\textit{fuse}\, [\Z] (r\,\backslash c))\,
+       (\textit{STAR}\,[]\,r)$
+\end{tabular}    
+\end{center}    
+%\end{definition}
+
+\begin{center}
+  \begin{tabular}{@{}lcl@{}}
+  $(\textit{ZERO})\,\backslash c$ & $\dn$ & $\textit{ZERO}$\\  
+  $(_{bs}\textit{ONE})\,\backslash c$ & $\dn$ & $\textit{ZERO}$\\  
+  $(_{bs}\textit{CHAR}\;d)\,\backslash c$ & $\dn$ &
+        $\textit{if}\;c=d\; \;\textit{then}\;
+         _{bs}\textit{ONE}\;\textit{else}\;\textit{ZERO}$\\  
+  $(_{bs}\textit{ALTS}\;\textit{as})\,\backslash c$ & $\dn$ &
+  $_{bs}\textit{ALTS}\;(\textit{as}.\textit{map}(\backslash c))$\\
+  $(_{bs}\textit{SEQ}\;a_1\,a_2)\,\backslash c$ & $\dn$ &
+     $\textit{if}\;\textit{bnullable}\,a_1$\\
+					       & &$\textit{then}\;_{bs}\textit{ALTS}\,List((_{[]}\textit{SEQ}\,(a_1\,\backslash c)\,a_2),$\\
+					       & &$\phantom{\textit{then}\;_{bs}\textit{ALTS}\,}(\textit{fuse}\,(\textit{bmkeps}\,a_1)\,(a_2\,\backslash c)))$\\
+  & &$\textit{else}\;_{bs}\textit{SEQ}\,(a_1\,\backslash c)\,a_2$\\
+  $(_{bs}\textit{STAR}\,a)\,\backslash c$ & $\dn$ &
+      $_{bs}\textit{SEQ}\;(\textit{fuse}\, [0] \; r\,\backslash c )\,
+       (_{bs}\textit{STAR}\,[]\,r)$
+\end{tabular}    
+\end{center}    
+%\end{definition}
+\fi
+
+\begin{center}
+  \begin{tabular}{@{}lcl@{}}
+  $(\ZERO)\,\backslash c$ & $\dn$ & $\ZERO$\\  
+  $(_{bs}\ONE)\,\backslash c$ & $\dn$ & $\ZERO$\\  
+  $(_{bs}{\bf d})\,\backslash c$ & $\dn$ &
+        $\textit{if}\;c=d\; \;\textit{then}\;
+         _{bs}\ONE\;\textit{else}\;\ZERO$\\  
+  $(_{bs}\sum \;\textit{as})\,\backslash c$ & $\dn$ &
+  $_{bs}\sum\;(\textit{as.map}(\backslash c))$\\
+  $(_{bs}\;a_1\cdot a_2)\,\backslash c$ & $\dn$ &
+     $\textit{if}\;\textit{bnullable}\,a_1$\\
+					       & &$\textit{then}\;_{bs}\sum\,[(_{[]}\,(a_1\,\backslash c)\cdot\,a_2),$\\
+					       & &$\phantom{\textit{then},\;_{bs}\sum\,}(\textit{fuse}\,(\textit{bmkeps}\,a_1)\,(a_2\,\backslash c))]$\\
+  & &$\textit{else}\;_{bs}\,(a_1\,\backslash c)\cdot a_2$\\
+  $(_{bs}a^*)\,\backslash c$ & $\dn$ &
+      $_{bs}(\textit{fuse}\, [0] \; r\,\backslash c)\cdot
+       (_{[]}r^*))$
+\end{tabular}    
+\end{center}    
+
+%\end{definition}
+\noindent
+For instance, when we do derivative of  $_{bs}a^*$ with respect to c,
+we need to unfold it into a sequence,
+and attach an additional bit $0$ to the front of $r \backslash c$
+to indicate that there is one more star iteration. Also the sequence clause
+is more subtle---when $a_1$ is $\textit{bnullable}$ (here
+\textit{bnullable} is exactly the same as $\textit{nullable}$, except
+that it is for annotated regular expressions, therefore we omit the
+definition). Assume that $\textit{bmkeps}$ correctly extracts the bitcode for how
+$a_1$ matches the string prior to character $c$ (more on this later),
+then the right branch of alternative, which is $\textit{fuse} \; \bmkeps \;  a_1 (a_2
+\backslash c)$ will collapse the regular expression $a_1$(as it has
+already been fully matched) and store the parsing information at the
+head of the regular expression $a_2 \backslash c$ by fusing to it. The
+bitsequence $\textit{bs}$, which was initially attached to the
+first element of the sequence $a_1 \cdot a_2$, has
+now been elevated to the top-level of $\sum$, as this information will be
+needed whichever way the sequence is matched---no matter whether $c$ belongs
+to $a_1$ or $ a_2$. After building these derivatives and maintaining all
+the lexing information, we complete the lexing by collecting the
+bitcodes using a generalised version of the $\textit{mkeps}$ function
+for annotated regular expressions, called $\textit{bmkeps}$:
+
+
+%\begin{definition}[\textit{bmkeps}]\mbox{}
+\begin{center}
+\begin{tabular}{lcl}
+  $\textit{bmkeps}\,(_{bs}\ONE)$ & $\dn$ & $bs$\\
+  $\textit{bmkeps}\,(_{bs}\sum a::\textit{as})$ & $\dn$ &
+     $\textit{if}\;\textit{bnullable}\,a$\\
+  & &$\textit{then}\;bs\,@\,\textit{bmkeps}\,a$\\
+  & &$\textit{else}\;bs\,@\,\textit{bmkeps}\,(_{bs}\sum \textit{as})$\\
+  $\textit{bmkeps}\,(_{bs} a_1 \cdot a_2)$ & $\dn$ &
+     $bs \,@\,\textit{bmkeps}\,a_1\,@\, \textit{bmkeps}\,a_2$\\
+  $\textit{bmkeps}\,(_{bs}a^*)$ & $\dn$ &
+     $bs \,@\, [0]$
+\end{tabular}    
+\end{center}    
+%\end{definition}
+
+\noindent
+This function completes the value information by travelling along the
+path of the regular expression that corresponds to a POSIX value and
+collecting all the bitcodes, and using $S$ to indicate the end of star
+iterations. If we take the bitcodes produced by $\textit{bmkeps}$ and
+decode them, we get the value we expect. The corresponding lexing
+algorithm looks as follows:
+
+\begin{center}
+\begin{tabular}{lcl}
+  $\textit{blexer}\;r\,s$ & $\dn$ &
+      $\textit{let}\;a = (r^\uparrow)\backslash s\;\textit{in}$\\                
+  & & $\;\;\textit{if}\; \textit{bnullable}(a)$\\
+  & & $\;\;\textit{then}\;\textit{decode}\,(\textit{bmkeps}\,a)\,r$\\
+  & & $\;\;\textit{else}\;\textit{None}$
+\end{tabular}
+\end{center}
+
+\noindent
+In this definition $\_\backslash s$ is the  generalisation  of the derivative
+operation from characters to strings (just like the derivatives for un-annotated
+regular expressions).
+
+Remember tha one of the important reasons we introduced bitcodes
+is that they can make simplification more structured and therefore guaranteeing
+the correctness.
+
+\subsection*{Our Simplification Rules}
+
+In this section we introduce aggressive (in terms of size) simplification rules
+on annotated regular expressions
+in order to keep derivatives small. Such simplifications are promising
+as we have
+generated test data that show
+that a good tight bound can be achieved. Obviously we could only
+partially cover  the search space as there are infinitely many regular
+expressions and strings. 
+
+One modification we introduced is to allow a list of annotated regular
+expressions in the $\sum$ constructor. This allows us to not just
+delete unnecessary $\ZERO$s and $\ONE$s from regular expressions, but
+also unnecessary ``copies'' of regular expressions (very similar to
+simplifying $r + r$ to just $r$, but in a more general setting). Another
+modification is that we use simplification rules inspired by Antimirov's
+work on partial derivatives. They maintain the idea that only the first
+``copy'' of a regular expression in an alternative contributes to the
+calculation of a POSIX value. All subsequent copies can be pruned away from
+the regular expression. A recursive definition of our  simplification function 
+that looks somewhat similar to our Scala code is given below:
+%\comment{Use $\ZERO$, $\ONE$ and so on. 
+%Is it $ALTS$ or $ALTS$?}\\
+
+\begin{center}
+  \begin{tabular}{@{}lcl@{}}
+   
+  $\textit{simp} \; (_{bs}a_1\cdot a_2)$ & $\dn$ & $ (\textit{simp} \; a_1, \textit{simp}  \; a_2) \; \textit{match} $ \\
+   &&$\quad\textit{case} \; (\ZERO, \_) \Rightarrow  \ZERO$ \\
+   &&$\quad\textit{case} \; (\_, \ZERO) \Rightarrow  \ZERO$ \\
+   &&$\quad\textit{case} \;  (\ONE, a_2') \Rightarrow  \textit{fuse} \; bs \;  a_2'$ \\
+   &&$\quad\textit{case} \; (a_1', \ONE) \Rightarrow  \textit{fuse} \; bs \;  a_1'$ \\
+   &&$\quad\textit{case} \; (a_1', a_2') \Rightarrow   _{bs}a_1' \cdot a_2'$ \\
+
+  $\textit{simp} \; (_{bs}\sum \textit{as})$ & $\dn$ & $\textit{distinct}( \textit{flatten} ( \textit{as.map(simp)})) \; \textit{match} $ \\
+  &&$\quad\textit{case} \; [] \Rightarrow  \ZERO$ \\
+   &&$\quad\textit{case} \; a :: [] \Rightarrow  \textit{fuse bs a}$ \\
+   &&$\quad\textit{case} \;  as' \Rightarrow _{bs}\sum \textit{as'}$\\ 
+
+   $\textit{simp} \; a$ & $\dn$ & $\textit{a} \qquad \textit{otherwise}$   
+\end{tabular}    
+\end{center}    
+
+\noindent
+The simplification does a pattern matching on the regular expression.
+When it detected that the regular expression is an alternative or
+sequence, it will try to simplify its children regular expressions
+recursively and then see if one of the children turn into $\ZERO$ or
+$\ONE$, which might trigger further simplification at the current level.
+The most involved part is the $\sum$ clause, where we use two
+auxiliary functions $\textit{flatten}$ and $\textit{distinct}$ to open up nested
+alternatives and reduce as many duplicates as possible. Function
+$\textit{distinct}$  keeps the first occurring copy only and remove all later ones
+when detected duplicates. Function $\textit{flatten}$ opens up nested $\sum$s.
+Its recursive definition is given below:
+
+ \begin{center}
+  \begin{tabular}{@{}lcl@{}}
+  $\textit{flatten} \; (_{bs}\sum \textit{as}) :: \textit{as'}$ & $\dn$ & $(\textit{map} \;
+     (\textit{fuse}\;bs)\; \textit{as}) \; @ \; \textit{flatten} \; as' $ \\
+  $\textit{flatten} \; \ZERO :: as'$ & $\dn$ & $ \textit{flatten} \;  \textit{as'} $ \\
+    $\textit{flatten} \; a :: as'$ & $\dn$ & $a :: \textit{flatten} \; \textit{as'}$ \quad(otherwise) 
+\end{tabular}    
+\end{center}  
+
+\noindent
+Here $\textit{flatten}$ behaves like the traditional functional programming flatten
+function, except that it also removes $\ZERO$s. Or in terms of regular expressions, it
+removes parentheses, for example changing $a+(b+c)$ into $a+b+c$.
+
+Having defined the $\simp$ function,
+we can use the previous notation of  natural
+extension from derivative w.r.t.~character to derivative
+w.r.t.~string:%\comment{simp in  the [] case?}
+
+\begin{center}
+\begin{tabular}{lcl}
+$r \backslash_{simp} (c\!::\!s) $ & $\dn$ & $(r \backslash_{simp}\, c) \backslash_{simp}\, s$ \\
+$r \backslash_{simp} [\,] $ & $\dn$ & $r$
+\end{tabular}
+\end{center}
+
+\noindent
+to obtain an optimised version of the algorithm:
+
+ \begin{center}
+\begin{tabular}{lcl}
+  $\textit{blexer\_simp}\;r\,s$ & $\dn$ &
+      $\textit{let}\;a = (r^\uparrow)\backslash_{simp}\, s\;\textit{in}$\\                
+  & & $\;\;\textit{if}\; \textit{bnullable}(a)$\\
+  & & $\;\;\textit{then}\;\textit{decode}\,(\textit{bmkeps}\,a)\,r$\\
+  & & $\;\;\textit{else}\;\textit{None}$
+\end{tabular}
+\end{center}
+
+\noindent
+This algorithm keeps the regular expression size small, for example,
+with this simplification our previous $(a + aa)^*$ example's 8000 nodes
+will be reduced to just 6 and stays constant, no matter how long the
+input string is.
+
+
  
 Derivatives give a simple solution
 to the problem of matching a string $s$ with a regular
@@ -430,14 +1300,25 @@
 
 %----------------------------------------------------------------------------------------
 
-\section{Existing Practical Approaches}
+\section{Engineering and Academic Approaches to Deal with Catastrophic Backtracking}
 
 The reason behind is that regex libraries in popular language engines
  often want to support richer constructs
 than  the most basic regular expressions, and lexing rather than matching
 is needed for sub-match extraction.
 
-
+There is also static analysis work on regular expression that
+have potential expoential behavious. Rathnayake and Thielecke 
+\parencite{Rathnayake2014StaticAF} proposed an algorithm
+that detects regular expressions triggering exponential
+behavious on backtracking matchers.
+People also developed static analysis methods for
+generating non-linear polynomial worst-time estimates
+for regexes, attack string that exploit the worst-time 
+scenario, and "attack automata" that generates
+attack strings.
+For a comprehensive analysis, please refer to Weideman's thesis 
+\parencite{Weideman2017Static}.
 
 \subsection{DFA Approach}
 
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Chapters/Chapter2.aux	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Chapters/Chapter2.aux	Thu Mar 24 20:52:34 2022 +0000
@@ -1,16 +1,16 @@
 \relax 
 \providecommand\hyper@newdestlabel[2]{}
-\@writefile{toc}{\contentsline {chapter}{\numberline {2}Chapter Title Here}{13}{chapter.41}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {2}Chapter Title Here}{23}{chapter.50}}
 \@writefile{lof}{\addvspace {10\p@ }}
 \@writefile{lot}{\addvspace {10\p@ }}
-\newlabel{ChapterX}{{2}{13}{Chapter Title Here}{chapter.41}{}}
-\@writefile{toc}{\contentsline {section}{\numberline {2.1}Properties of $\delimiter "026E30F c$}{13}{section.42}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.1.1}function $\delimiter "026E30F c$ is not 1-to-1}{13}{subsection.43}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.1.2}Subsection 1}{13}{subsection.44}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.1.3}Subsection 2}{13}{subsection.45}}
-\@writefile{toc}{\contentsline {section}{\numberline {2.2}Main Section 2}{14}{section.46}}
+\newlabel{ChapterX}{{2}{23}{Chapter Title Here}{chapter.50}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {2.1}Properties of $\delimiter "026E30F c$}{23}{section.51}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {2.1.1}function $\delimiter "026E30F c$ is not 1-to-1}{23}{subsection.52}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {2.1.2}Subsection 1}{23}{subsection.53}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {2.1.3}Subsection 2}{23}{subsection.54}}
+\@writefile{toc}{\contentsline {section}{\numberline {2.2}Main Section 2}{24}{section.55}}
 \@setckpt{Chapters/Chapter2}{
-\setcounter{page}{15}
+\setcounter{page}{25}
 \setcounter{equation}{0}
 \setcounter{enumi}{0}
 \setcounter{enumii}{0}
@@ -32,7 +32,7 @@
 \setcounter{ContinuedFloat}{0}
 \setcounter{Item}{0}
 \setcounter{Hfootnote}{1}
-\setcounter{bookmark@seq@number}{32}
+\setcounter{bookmark@seq@number}{35}
 \setcounter{tabx@nest}{0}
 \setcounter{listtotal}{0}
 \setcounter{listcount}{0}
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Chapters/Chapter3.aux	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/Chapters/Chapter3.aux	Thu Mar 24 20:52:34 2022 +0000
@@ -1,15 +1,15 @@
 \relax 
 \providecommand\hyper@newdestlabel[2]{}
-\@writefile{toc}{\contentsline {chapter}{\numberline {3}Common Identities In Simplification-Related Functions}{15}{chapter.47}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {3}Common Identities In Simplification-Related Functions}{25}{chapter.56}}
 \@writefile{lof}{\addvspace {10\p@ }}
 \@writefile{lot}{\addvspace {10\p@ }}
-\newlabel{ChapterX}{{3}{15}{Common Identities In Simplification-Related Functions}{chapter.47}{}}
-\@writefile{toc}{\contentsline {section}{\numberline {3.1}Idempotency of $\mathit  {simp}$}{15}{section.48}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {3.1.1}Syntactic Equivalence Under $\mathit  {simp}$}{15}{subsection.50}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {3.1.2}Subsection 2}{15}{subsection.51}}
-\@writefile{toc}{\contentsline {section}{\numberline {3.2}Main Section 2}{15}{section.52}}
+\newlabel{ChapterX}{{3}{25}{Common Identities In Simplification-Related Functions}{chapter.56}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {3.1}Idempotency of $\mathit  {simp}$}{25}{section.57}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {3.1.1}Syntactic Equivalence Under $\mathit  {simp}$}{25}{subsection.59}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {3.1.2}Subsection 2}{25}{subsection.60}}
+\@writefile{toc}{\contentsline {section}{\numberline {3.2}Main Section 2}{25}{section.61}}
 \@setckpt{Chapters/Chapter3}{
-\setcounter{page}{16}
+\setcounter{page}{26}
 \setcounter{equation}{1}
 \setcounter{enumi}{0}
 \setcounter{enumii}{0}
@@ -31,7 +31,7 @@
 \setcounter{ContinuedFloat}{0}
 \setcounter{Item}{0}
 \setcounter{Hfootnote}{1}
-\setcounter{bookmark@seq@number}{37}
+\setcounter{bookmark@seq@number}{40}
 \setcounter{tabx@nest}{0}
 \setcounter{listtotal}{0}
 \setcounter{listcount}{0}
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/example.bib	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/example.bib	Thu Mar 24 20:52:34 2022 +0000
@@ -6,7 +6,13 @@
 
 %% Saved with string encoding Unicode (UTF-8) 
 
-
+@article{Rathnayake2014StaticAF,
+  title={Static Analysis for Regular Expression Exponential Runtime via Substructural Logics},
+  author={Asiri Rathnayake and Hayo Thielecke},
+  journal={ArXiv,
+  year={2014},
+  volume={abs/1405.7058}
+}
 
 @inproceedings{RibeiroAgda2017,
 	abstract = {We describe the formalization of a regular expression (RE) parsing algorithm that produces a bit representation of its parse tree in the dependently typed language Agda. The algorithm computes bit-codes using Brzozowski derivatives and we prove that produced codes are equivalent to parse trees ensuring soundness and completeness w.r.t an inductive RE semantics. We include the certified algorithm in a tool developed by us, named verigrep, for regular expression based search in the style of the well known GNU grep. Practical experiments conducted with this tool are reported.},
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.aux	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.aux	Thu Mar 24 20:52:34 2022 +0000
@@ -42,19 +42,31 @@
 \citation{Brzozowski1964}
 \abx@aux@cite{Brzozowski1964}
 \abx@aux@segm{0}{0}{Brzozowski1964}
-\abx@aux@page{1}{2}
+\abx@aux@page{1}{4}
 \citation{Sulzmann2014}
 \abx@aux@cite{Sulzmann2014}
 \abx@aux@segm{0}{0}{Sulzmann2014}
 \citation{AusafDyckhoffUrban2016}
 \abx@aux@cite{AusafDyckhoffUrban2016}
 \abx@aux@segm{0}{0}{AusafDyckhoffUrban2016}
+\citation{Sulzmann2014}
+\abx@aux@segm{0}{0}{Sulzmann2014}
+\citation{Sulzmann2014}
+\abx@aux@segm{0}{0}{Sulzmann2014}
+\citation{AusafDyckhoffUrban2016}
+\abx@aux@segm{0}{0}{AusafDyckhoffUrban2016}
 \citation{AusafDyckhoffUrban2016}
 \abx@aux@segm{0}{0}{AusafDyckhoffUrban2016}
 \citation{Sulzmann2014}
 \abx@aux@segm{0}{0}{Sulzmann2014}
 \citation{Sulzmann2014}
 \abx@aux@segm{0}{0}{Sulzmann2014}
+\citation{Rathnayake2014StaticAF}
+\abx@aux@cite{Rathnayake2014StaticAF}
+\abx@aux@segm{0}{0}{Rathnayake2014StaticAF}
+\citation{Weideman2017Static}
+\abx@aux@cite{Weideman2017Static}
+\abx@aux@segm{0}{0}{Weideman2017Static}
 \citation{Brzozowski1964}
 \abx@aux@segm{0}{0}{Brzozowski1964}
 \citation{Owens2008}
@@ -69,11 +81,11 @@
 \citation{RibeiroAgda2017}
 \abx@aux@cite{RibeiroAgda2017}
 \abx@aux@segm{0}{0}{RibeiroAgda2017}
-\abx@aux@page{2}{5}
-\abx@aux@page{3}{5}
-\abx@aux@page{4}{5}
-\abx@aux@page{5}{5}
-\abx@aux@page{6}{5}
+\abx@aux@page{2}{15}
+\abx@aux@page{3}{15}
+\abx@aux@page{4}{15}
+\abx@aux@page{5}{16}
+\abx@aux@page{6}{16}
 \citation{Reference1}
 \abx@aux@cite{Reference1}
 \abx@aux@segm{0}{0}{Reference1}
@@ -94,9 +106,9 @@
 \abx@aux@defaultrefcontext{0}{Krauss2011}{nyt/global//global/global}
 \abx@aux@defaultrefcontext{0}{Owens2008}{nyt/global//global/global}
 \abx@aux@defaultrefcontext{0}{RibeiroAgda2017}{nyt/global//global/global}
-\@writefile{toc}{\contentsline {chapter}{Bibliography}{19}{appendix*.55}}
-\abx@aux@page{7}{19}
-\abx@aux@page{8}{19}
-\abx@aux@page{9}{19}
-\abx@aux@page{10}{19}
-\abx@aux@page{11}{19}
+\@writefile{toc}{\contentsline {chapter}{Bibliography}{29}{appendix*.64}}
+\abx@aux@page{7}{29}
+\abx@aux@page{8}{29}
+\abx@aux@page{9}{29}
+\abx@aux@page{10}{29}
+\abx@aux@page{11}{29}
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.lof	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.lof	Thu Mar 24 20:52:34 2022 +0000
@@ -1,7 +1,7 @@
 \boolfalse {citerequest}\boolfalse {citetracker}\boolfalse {pagetracker}\boolfalse {backtracker}\relax 
 \babel@toc {english}{}
 \addvspace {10\p@ }
-\contentsline {figure}{\numberline {1.1}{\ignorespaces An Electron}}{11}{figure.caption.36}
+\contentsline {figure}{\numberline {1.1}{\ignorespaces An Electron}}{21}{figure.caption.45}
 \addvspace {10\p@ }
 \addvspace {10\p@ }
 \addvspace {10\p@ }
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.log	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.log	Thu Mar 24 20:52:34 2022 +0000
@@ -1,4 +1,4 @@
-This is pdfTeX, Version 3.14159265-2.6-1.40.19 (TeX Live 2018) (preloaded format=pdflatex 2019.2.7)  18 MAR 2022 21:25
+This is pdfTeX, Version 3.14159265-2.6-1.40.19 (TeX Live 2018) (preloaded format=pdflatex 2019.2.7)  23 MAR 2022 14:08
 entering extended mode
  restricted \write18 enabled.
  file:line:error style messages enabled.
@@ -1846,20 +1846,54 @@
 \pgfarrowlinewidth=\dimen314
 )))
 (/usr/local/texlive/2018/texmf-dist/tex/generic/pgf/frontendlayer/tikz/librarie
+s/tikzlibraryautomata.code.tex
+File: tikzlibraryautomata.code.tex 2008/07/14 v3.0.1a (rcs-revision 1.3)
+
+(/usr/local/texlive/2018/texmf-dist/tex/generic/pgf/frontendlayer/tikz/librarie
+s/tikzlibraryshapes.multipart.code.tex
+File: tikzlibraryshapes.multipart.code.tex 2008/01/09 v3.0.1a (rcs-revision 1.1
+)
+
+(/usr/local/texlive/2018/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibrary
+shapes.multipart.code.tex
+File: pgflibraryshapes.multipart.code.tex 2010/01/07 v3.0.1a (rcs-revision 1.2)
+
+\pgfnodepartlowerbox=\box69
+\pgfnodeparttwobox=\box70
+\pgfnodepartthreebox=\box71
+\pgfnodepartfourbox=\box72
+\pgfnodeparttwentybox=\box73
+\pgfnodepartnineteenbox=\box74
+\pgfnodeparteighteenbox=\box75
+\pgfnodepartseventeenbox=\box76
+\pgfnodepartsixteenbox=\box77
+\pgfnodepartfifteenbox=\box78
+\pgfnodepartfourteenbox=\box79
+\pgfnodepartthirteenbox=\box80
+\pgfnodeparttwelvebox=\box81
+\pgfnodepartelevenbox=\box82
+\pgfnodeparttenbox=\box83
+\pgfnodepartninebox=\box84
+\pgfnodeparteightbox=\box85
+\pgfnodepartsevenbox=\box86
+\pgfnodepartsixbox=\box87
+\pgfnodepartfivebox=\box88
+)))
+(/usr/local/texlive/2018/texmf-dist/tex/generic/pgf/frontendlayer/tikz/librarie
 s/tikzlibrarypositioning.code.tex
 File: tikzlibrarypositioning.code.tex 2008/10/06 v3.0.1a (rcs-revision 1.7)
 )
-Package hyperref Info: Option `bookmarksopen' set `true' on input line 102.
-Package hyperref Info: Option `hypertexnames' set `false' on input line 102.
-Package hyperref Info: Option `colorlinks' set `true' on input line 102.
-Package hyperref Info: Option `unicode' set `true' on input line 102.
+Package hyperref Info: Option `bookmarksopen' set `true' on input line 103.
+Package hyperref Info: Option `hypertexnames' set `false' on input line 103.
+Package hyperref Info: Option `colorlinks' set `true' on input line 103.
+Package hyperref Info: Option `unicode' set `true' on input line 103.
 
 (/usr/local/texlive/2018/texmf-dist/tex/latex/hyperref/puenc.def
 File: puenc.def 2018/02/06 v6.86b Hyperref: PDF Unicode definition (HO)
 Now handling font encoding PU ...
 ... no UTF-8 mapping file for font encoding PU
 )
-Package hyperref Info: Option `breaklinks' set `true' on input line 102.
+Package hyperref Info: Option `breaklinks' set `true' on input line 103.
 Package biblatex Info: Trying to load language 'english'...
 Package biblatex Info: ... file 'english.lbx' found.
 
@@ -1877,23 +1911,23 @@
 ) (./Appendices/AppendixA.aux))
 \openout1 = `main.aux'.
 
-LaTeX Font Info:    Checking defaults for OML/cmm/m/it on input line 102.
-LaTeX Font Info:    ... okay on input line 102.
-LaTeX Font Info:    Checking defaults for T1/cmr/m/n on input line 102.
-LaTeX Font Info:    ... okay on input line 102.
-LaTeX Font Info:    Checking defaults for OT1/cmr/m/n on input line 102.
-LaTeX Font Info:    ... okay on input line 102.
-LaTeX Font Info:    Checking defaults for OMS/cmsy/m/n on input line 102.
-LaTeX Font Info:    ... okay on input line 102.
-LaTeX Font Info:    Checking defaults for OMX/cmex/m/n on input line 102.
-LaTeX Font Info:    ... okay on input line 102.
-LaTeX Font Info:    Checking defaults for U/cmr/m/n on input line 102.
-LaTeX Font Info:    ... okay on input line 102.
-LaTeX Font Info:    Checking defaults for PD1/pdf/m/n on input line 102.
-LaTeX Font Info:    ... okay on input line 102.
-LaTeX Font Info:    Checking defaults for PU/pdf/m/n on input line 102.
-LaTeX Font Info:    ... okay on input line 102.
-LaTeX Font Info:    Try loading font information for T1+ppl on input line 102.
+LaTeX Font Info:    Checking defaults for OML/cmm/m/it on input line 103.
+LaTeX Font Info:    ... okay on input line 103.
+LaTeX Font Info:    Checking defaults for T1/cmr/m/n on input line 103.
+LaTeX Font Info:    ... okay on input line 103.
+LaTeX Font Info:    Checking defaults for OT1/cmr/m/n on input line 103.
+LaTeX Font Info:    ... okay on input line 103.
+LaTeX Font Info:    Checking defaults for OMS/cmsy/m/n on input line 103.
+LaTeX Font Info:    ... okay on input line 103.
+LaTeX Font Info:    Checking defaults for OMX/cmex/m/n on input line 103.
+LaTeX Font Info:    ... okay on input line 103.
+LaTeX Font Info:    Checking defaults for U/cmr/m/n on input line 103.
+LaTeX Font Info:    ... okay on input line 103.
+LaTeX Font Info:    Checking defaults for PD1/pdf/m/n on input line 103.
+LaTeX Font Info:    ... okay on input line 103.
+LaTeX Font Info:    Checking defaults for PU/pdf/m/n on input line 103.
+LaTeX Font Info:    ... okay on input line 103.
+LaTeX Font Info:    Try loading font information for T1+ppl on input line 103.
 
 (/usr/local/texlive/2018/texmf-dist/tex/latex/psnfss/t1ppl.fd
 File: t1ppl.fd 2001/06/04 font definitions for T1/ppl.
@@ -1984,7 +2018,7 @@
 [Loading MPS to PDF converter (version 2006.09.02).]
 \scratchcounter=\count461
 \scratchdimen=\dimen315
-\scratchbox=\box69
+\scratchbox=\box89
 \nofMPsegments=\count462
 \nofMParguments=\count463
 \everyMPshowfont=\toks43
@@ -2050,16 +2084,16 @@
 * (1in=72.27pt=25.4mm, 1cm=28.453pt)
 
 Package scrlayer Info: Setting magic \footheight to \baselineskip while
-(scrlayer)             \begin{document} on input line 102.
-Package scrbase Info: activating english \abbrevname on input line 102.
-Package scrbase Info: activating english \byname on input line 102.
-Package scrbase Info: activating english \acknowledgementname on input line 102
+(scrlayer)             \begin{document} on input line 103.
+Package scrbase Info: activating english \abbrevname on input line 103.
+Package scrbase Info: activating english \byname on input line 103.
+Package scrbase Info: activating english \acknowledgementname on input line 103
 .
-Package scrbase Info: activating english \authorshipname on input line 102.
-Package scrbase Info: activating english \constantsname on input line 102.
-Package scrbase Info: activating english \symbolsname on input line 102.
-\AtBeginShipoutBox=\box70
-Package hyperref Info: Link coloring ON on input line 102.
+Package scrbase Info: activating english \authorshipname on input line 103.
+Package scrbase Info: activating english \constantsname on input line 103.
+Package scrbase Info: activating english \symbolsname on input line 103.
+\AtBeginShipoutBox=\box90
+Package hyperref Info: Link coloring ON on input line 103.
 (/usr/local/texlive/2018/texmf-dist/tex/latex/hyperref/nameref.sty
 Package: nameref 2016/05/21 v2.44 Cross-referencing by name of section
 
@@ -2068,9 +2102,9 @@
 )
 \c@section@level=\count467
 )
-LaTeX Info: Redefining \ref on input line 102.
-LaTeX Info: Redefining \pageref on input line 102.
-LaTeX Info: Redefining \nameref on input line 102.
+LaTeX Info: Redefining \ref on input line 103.
+LaTeX Info: Redefining \pageref on input line 103.
+LaTeX Info: Redefining \nameref on input line 103.
 
 (./main.out) (./main.out)
 \@outlinefile=\write6
@@ -2093,39 +2127,39 @@
 (biblatex)                - Can't use 'location' + 'address'.
 
 )
-Package biblatex Info: Reference section=0 on input line 102.
-Package biblatex Info: Reference segment=0 on input line 102.
+Package biblatex Info: Reference section=0 on input line 103.
+Package biblatex Info: Reference segment=0 on input line 103.
  ABD: EveryShipout initializing macros
 
 Package pgfplots Warning: running in backwards compatibility mode (unsuitable t
 ick labels; missing features). Consider writing \pgfplotsset{compat=1.16} into 
 your preamble.
- on input line 102.
+ on input line 103.
 
-LaTeX Font Info:    Try loading font information for OT1+ppl on input line 102.
+LaTeX Font Info:    Try loading font information for OT1+ppl on input line 103.
 
 (/usr/local/texlive/2018/texmf-dist/tex/latex/psnfss/ot1ppl.fd
 File: ot1ppl.fd 2001/06/04 font definitions for OT1/ppl.
 )
-LaTeX Font Info:    Try loading font information for OML+zplm on input line 102
+LaTeX Font Info:    Try loading font information for OML+zplm on input line 103
 .
 
 (/usr/local/texlive/2018/texmf-dist/tex/latex/psnfss/omlzplm.fd
 File: omlzplm.fd 2002/09/08 Fontinst v1.914 font definitions for OML/zplm.
 )
-LaTeX Font Info:    Try loading font information for OMS+zplm on input line 102
+LaTeX Font Info:    Try loading font information for OMS+zplm on input line 103
 .
 
 (/usr/local/texlive/2018/texmf-dist/tex/latex/psnfss/omszplm.fd
 File: omszplm.fd 2002/09/08 Fontinst v1.914 font definitions for OMS/zplm.
 )
-LaTeX Font Info:    Try loading font information for OMX+zplm on input line 102
+LaTeX Font Info:    Try loading font information for OMX+zplm on input line 103
 .
 
 (/usr/local/texlive/2018/texmf-dist/tex/latex/psnfss/omxzplm.fd
 File: omxzplm.fd 2002/09/08 Fontinst v1.914 font definitions for OMX/zplm.
 )
-LaTeX Font Info:    Try loading font information for OT1+zplm on input line 102
+LaTeX Font Info:    Try loading font information for OT1+zplm on input line 103
 .
 
 (/usr/local/texlive/2018/texmf-dist/tex/latex/psnfss/ot1zplm.fd
@@ -2133,7 +2167,7 @@
 )
 1: section
 LaTeX Font Info:    Font shape `T1/ppl/bx/n' in size <20.74> not available
-(Font)              Font shape `T1/ppl/b/n' tried instead on input line 120.
+(Font)              Font shape `T1/ppl/b/n' tried instead on input line 121.
 [1
 
 
@@ -2145,15 +2179,15 @@
 --------------------
  Declaration of Authorship
 --------------------
-LaTeX Font Info:    Try loading font information for OMS+ppl on input line 162.
+LaTeX Font Info:    Try loading font information for OMS+ppl on input line 163.
 
 (/usr/local/texlive/2018/texmf-dist/tex/latex/psnfss/omsppl.fd
 File: omsppl.fd 
 )
 LaTeX Font Info:    Font shape `OMS/ppl/m/n' in size <10.95> not available
-(Font)              Font shape `OMS/cmsy/m/n' tried instead on input line 162.
+(Font)              Font shape `OMS/cmsy/m/n' tried instead on input line 163.
 
-Underfull \hbox (badness 10000) in paragraph at lines 167--168
+Underfull \hbox (badness 10000) in paragraph at lines 168--169
 
  []
 
@@ -2167,7 +2201,7 @@
  Abstract
 --------------------
 LaTeX Font Info:    Font shape `T1/ppl/bx/n' in size <10.95> not available
-(Font)              Font shape `T1/ppl/b/n' tried instead on input line 193.
+(Font)              Font shape `T1/ppl/b/n' tried instead on input line 194.
 1: section
 [7] [8
 
@@ -2180,15 +2214,19 @@
 
 ]
 LaTeX Font Info:    Font shape `T1/ppl/bx/n' in size <24.88> not available
-(Font)              Font shape `T1/ppl/b/n' tried instead on input line 242.
+(Font)              Font shape `T1/ppl/b/n' tried instead on input line 243.
  (./main.toc
-LaTeX Font Info:    Try loading font information for T1+cmtt on input line 17.
+Overfull \hbox (9.30757pt too wide) in paragraph at lines 12--12
+ [][] [][][]\T1/ppl/m/n/10.95 Engineering and Aca-demic Ap-proaches to Deal wit
+h Catas-trophic Back- 
+ []
 
+LaTeX Font Info:    Try loading font information for T1+cmtt on input line 20.
 (/usr/local/texlive/2018/texmf-dist/tex/latex/base/t1cmtt.fd
 File: t1cmtt.fd 2014/09/29 v2.5h Standard LaTeX font definitions
 )
 LaTeX Font Info:    Font shape `T1/cmtt/bx/n' in size <10.95> not available
-(Font)              Font shape `T1/cmtt/m/n' tried instead on input line 17.
+(Font)              Font shape `T1/cmtt/m/n' tried instead on input line 20.
 
 1: section
 [11])
@@ -2241,16 +2279,17 @@
 LaTeX Font Info:    Font shape `T1/ppl/bx/n' in size <17.28> not available
 (Font)              Font shape `T1/ppl/b/n' tried instead on input line 3.
 
-Overfull \hbox (5.82434pt too wide) in paragraph at lines 50--62
-\T1/ppl/m/n/10.95 Regular ex-pres-sion match-ing and lex-ing has been widely-us
-ed and well-implemented
+Underfull \hbox (badness 10000) in paragraph at lines 48--70
+
  []
 
+LaTeX Font Info:    Font shape `T1/ppl/bx/n' in size <14.4> not available
+(Font)              Font shape `T1/ppl/b/n' tried instead on input line 71.
 PGFPlots: reading {re-js.data}
 PGFPlots: reading {re-python2.data}
 PGFPlots: reading {re-java.data}
 
-Overfull \hbox (6.56624pt too wide) in paragraph at lines 78--144
+Overfull \hbox (6.56624pt too wide) in paragraph at lines 109--175
  [] 
  []
 
@@ -2258,89 +2297,153 @@
 [1
 
 
-]
-LaTeX Font Info:    Font shape `T1/ppl/bx/n' in size <14.4> not available
-(Font)              Font shape `T1/ppl/b/n' tried instead on input line 224.
- [2]
+] [2] [3]
+LaTeX Font Info:    Font shape `T1/ppl/bx/n' in size <12> not available
+(Font)              Font shape `T1/ppl/b/n' tried instead on input line 355.
 
-LaTeX Warning: Citation 'Sulzmann2014' on page 3 undefined on input line 345.
+Overfull \hbox (2.81108pt too wide) in paragraph at lines 386--386
+[]\T1/ppl/b/n/14.4 Preliminaries about Lex-ing Us-ing Br-zo-zowski deriva-tives
+ 
+ []
+
+[4] [5] [6]
+
+LaTeX Warning: Citation 'Sulzmann2014' on page 7 undefined on input line 610.
 
 
-LaTeX Warning: Citation 'AusafDyckhoffUrban2016' on page 3 undefined on input l
-ine 355.
+Overfull \hbox (4.93819pt too wide) in paragraph at lines 605--613
+\T1/ppl/m/n/10.95 the world with-out sim-pli-fi-ca-tion first for a mo-ment. Su
+lz-mann and Lu []
+ []
+
+[7] [8]
+Overfull \hbox (0.91322pt too wide) in paragraph at lines 769--772
+[]\T1/ppl/m/n/10.95 We have men-tioned be-fore that deriva-tives with-out sim-p
+li-fi-ca-tion can get clumsy,
+ []
+
+
+LaTeX Warning: Citation 'AusafDyckhoffUrban2016' on page 9 undefined on input l
+ine 783.
 
-[3]
-Overfull \hbox (9.67143pt too wide) in paragraph at lines 365--372
+[9]
+
+LaTeX Warning: Citation 'Sulzmann2014' on page 10 undefined on input line 876.
+
+[10] [11] [12]
+
+LaTeX Warning: Citation 'Sulzmann2014' on page 13 undefined on input line 1205.
+
+
+
+LaTeX Warning: Citation 'AusafDyckhoffUrban2016' on page 13 undefined on input 
+line 1215.
+
+[13]
+Overfull \hbox (9.67143pt too wide) in paragraph at lines 1225--1232
  [] 
  []
 
 
-LaTeX Warning: Citation 'AusafDyckhoffUrban2016' on page 4 undefined on input l
-ine 383.
+LaTeX Warning: Citation 'AusafDyckhoffUrban2016' on page 14 undefined on input 
+line 1243.
 
 
-LaTeX Warning: Citation 'Sulzmann2014' on page 4 undefined on input line 390.
+LaTeX Warning: Citation 'Sulzmann2014' on page 14 undefined on input line 1250.
 
 
-Overfull \hbox (38.38536pt too wide) in paragraph at lines 389--402
+
+Overfull \hbox (38.38536pt too wide) in paragraph at lines 1249--1262
 []\T1/ppl/m/n/10.95 Sulzmann and Lu over-come this ``growth prob-lem'' in a sec
 -ond al-go-rithm []
  []
 
 
-LaTeX Warning: Citation 'Sulzmann2014' on page 4 undefined on input line 409.
+LaTeX Warning: Citation 'Sulzmann2014' on page 14 undefined on input line 1269.
+
+
+[14]
+Overfull \hbox (11.45154pt too wide) in paragraph at lines 1293--1293
+[]\T1/ppl/b/n/14.4 Engineering and Aca-demic Ap-proaches to Deal with Catas-
+ []
+
+
+LaTeX Warning: Citation 'Rathnayake2014StaticAF' on page 15 undefined on input 
+line 1302.
+
+
+LaTeX Warning: Citation 'Weideman2017Static' on page 15 undefined on input line
+ 1311.
+
 
-[4]
-LaTeX Font Info:    Font shape `T1/ppl/bx/n' in size <12> not available
-(Font)              Font shape `T1/ppl/b/n' tried instead on input line 442.
- [5] [6]
+Overfull \hbox (0.98518pt too wide) in paragraph at lines 1300--1312
+\T1/ppl/m/n/10.95 poen-tial be-havi-ous. Rath-nayake and Thi-elecke ([]) pro-po
+sed
+ []
+
+
+Package scrlayer-scrpage Warning: \headheight to low.
+(scrlayer-scrpage)                At least 27.2pt needed,
+(scrlayer-scrpage)                but only 18.85364pt found.
+(scrlayer-scrpage)                I'll enlarge \headheight, for further
+(scrlayer-scrpage)                processing, but you should do this yourself,
+(scrlayer-scrpage)                e.g., setting geometry's option
+(scrlayer-scrpage)                `head=27.2pt'.
+(scrlayer-scrpage)                I'll also decrease \topmargin on input line 1
+359.
+
+
+Overfull \vbox (8.34636pt too high) has occurred while \output is active []
+
+
+[15] [16]
 LaTeX Font Info:    Font shape `T1/cmtt/bx/n' in size <14.4> not available
-(Font)              Font shape `T1/cmtt/m/n' tried instead on input line 542.
+(Font)              Font shape `T1/cmtt/m/n' tried instead on input line 1413.
 
 
 LaTeX Font Warning: Font shape `T1/cmtt/bx/sl' undefined
-(Font)              using `T1/cmtt/bx/n' instead on input line 563.
+(Font)              using `T1/cmtt/bx/n' instead on input line 1428.
 
-[7]
-Overfull \hbox (8.37132pt too wide) in paragraph at lines 586--587
+[17]
+Overfull \hbox (8.37132pt too wide) in paragraph at lines 1457--1458
 \T1/ppl/m/n/10.95 sided print-ing is as sim-ple as un-com-ment-ing the \T1/cmtt
 /m/it/10.95 oneside \T1/ppl/m/n/10.95 op-tion of the \T1/cmtt/m/n/10.95 documen
 tclass
  []
 
-[8]
+[18]
 
-LaTeX Warning: Citation 'Reference1' on page 9 undefined on input line 600.
+LaTeX Warning: Citation 'Reference1' on page 19 undefined on input line 1471.
 
 
-LaTeX Warning: Citation 'Reference2' on page 9 undefined on input line 600.
+LaTeX Warning: Citation 'Reference2' on page 19 undefined on input line 1471.
 
 
-LaTeX Warning: Citation 'Reference1' on page 9 undefined on input line 600.
+LaTeX Warning: Citation 'Reference1' on page 19 undefined on input line 1471.
 
 
-LaTeX Warning: Citation 'Reference3' on page 9 undefined on input line 600.
+LaTeX Warning: Citation 'Reference3' on page 19 undefined on input line 1471.
 
-[9]
-<Figures/Electron.pdf, id=397, 398.3386pt x 284.52756pt>
+[19] [20]
+<Figures/Electron.pdf, id=468, 398.3386pt x 284.52756pt>
 File: Figures/Electron.pdf Graphic file (type pdf)
 <use Figures/Electron.pdf>
-Package pdftex.def Info: Figures/Electron.pdf  used on input line 668.
+Package pdftex.def Info: Figures/Electron.pdf  used on input line 1539.
 (pdftex.def)             Requested size: 398.33762pt x 284.52686pt.
- [10] [11 <./Figures/Electron.pdf>]
-Overfull \hbox (0.90797pt too wide) in paragraph at lines 714--715
+ [21 <./Figures/Electron.pdf>]
+Overfull \hbox (0.90797pt too wide) in paragraph at lines 1585--1586
 \T1/ppl/m/n/10.95 au-to-mat-i-cally builds a ta-ble of Con-tents by look-ing at
  all the [][]\T1/cmtt/m/n/10.95 \chapter{}[]\T1/ppl/m/n/10.95 , [][]\T1/cmtt/m/
 n/10.95 \section{}
  []
 
 
-Overfull \hbox (0.45015pt too wide) in paragraph at lines 716--717
+Overfull \hbox (0.45015pt too wide) in paragraph at lines 1587--1588
 []\T1/ppl/m/n/10.95 The Ta-ble of Con-tents should only list the sec-tions to t
 hree (3) lev-els. A [][]\T1/cmtt/m/n/10.95 chapter{}
  []
 
-) [12]
+) [22]
 \openout2 = `Chapters/Chapter2.aux'.
 
  (./Chapters/Chapter2.tex
@@ -2375,11 +2478,11 @@
  []
 
 1: section
-[13
+[23
 
 
 
-]) [14]
+]) [24]
 \openout2 = `Chapters/Chapter3.aux'.
 
  (./Chapters/Chapter3.tex
@@ -2402,14 +2505,14 @@
 
 )
 1: section
-[15
+[25
 
 
 
 ]
 \openout2 = `Appendices/AppendixA.aux'.
 
- (./Appendices/AppendixA.tex [16
+ (./Appendices/AppendixA.tex [26
 
 
 
@@ -2417,23 +2520,23 @@
 Appendix A.
 )
 1: section
-[17] [18
+[27] [28
 
 
 ]
-Overfull \hbox (2.23135pt too wide) in paragraph at lines 352--352
+Overfull \hbox (2.23135pt too wide) in paragraph at lines 353--353
 [][]\T1/ppl/m/n/10.95 Coquand, T. and V. Siles (2011). ^^P  A De-ci-sion Pro-ce
 -dure for Reg-u-lar Ex-pres-sion Equiv-
  []
 
 1: section
-[19]
-Package atveryend Info: Empty hook `BeforeClearDocument' on input line 356.
-Package atveryend Info: Empty hook `AfterLastShipout' on input line 356.
+[29]
+Package atveryend Info: Empty hook `BeforeClearDocument' on input line 357.
+Package atveryend Info: Empty hook `AfterLastShipout' on input line 357.
  (./main.aux (./Chapters/Chapter1.aux) (./Chapters/Chapter2.aux)
 (./Chapters/Chapter3.aux) (./Appendices/AppendixA.aux))
-Package atveryend Info: Executing hook `AtVeryEndDocument' on input line 356.
-Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 356.
+Package atveryend Info: Executing hook `AtVeryEndDocument' on input line 357.
+Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 357.
 
 
 Package rerunfilecheck Warning: File `main.out' has changed.
@@ -2441,8 +2544,8 @@
 (rerunfilecheck)                or use package `bookmark'.
 
 Package rerunfilecheck Info: Checksums for `main.out':
-(rerunfilecheck)             Before: A1566BE6F098E555B89E8456A1E7EC35;6165
-(rerunfilecheck)             After:  49DA0DF877C6E4F446C2ABFDA2A76F7F;6180.
+(rerunfilecheck)             Before: 7C4FAB9392C071FEE9D94ACC44DE54BD;7293
+(rerunfilecheck)             After:  2BB1A368827AF4B7D099B0B43EF92AAA;7417.
 
 LaTeX Font Warning: Some font shapes were not available, defaults substituted.
 
@@ -2462,13 +2565,13 @@
 
  ) 
 Here is how much of TeX's memory you used:
- 55923 strings out of 492649
- 1290707 string characters out of 6129623
- 1580170 words of memory out of 5000000
- 58948 multiletter control sequences out of 15000+600000
- 610785 words of font info for 178 fonts, out of 8000000 for 9000
+ 56764 strings out of 492649
+ 1316779 string characters out of 6129623
+ 1611358 words of memory out of 5000000
+ 59750 multiletter control sequences out of 15000+600000
+ 617554 words of font info for 196 fonts, out of 8000000 for 9000
  1141 hyphenation exceptions out of 8191
- 74i,24n,109p,10377b,2427s stack positions out of 5000i,500n,10000p,200000b,80000s
+ 74i,24n,109p,10377b,2424s stack positions out of 5000i,500n,10000p,200000b,80000s
 {/usr/local/texlive/2018/texmf-dist/fonts/enc/dvips/cm-super/cm-super-t1.enc}
 {/usr/local/texlive/2018/texmf-dist/fonts/enc/dvips/base/8r.enc}</usr/local/tex
 live/2018/texmf-dist/fonts/type1/public/amsfonts/cm/cmex10.pfb></usr/local/texl
@@ -2484,10 +2587,10 @@
 ype1/urw/palatino/uplr8a.pfb></usr/local/texlive/2018/texmf-dist/fonts/type1/ur
 w/palatino/uplr8a.pfb></usr/local/texlive/2018/texmf-dist/fonts/type1/urw/palat
 ino/uplri8a.pfb>
-Output written on main.pdf (43 pages, 308963 bytes).
+Output written on main.pdf (53 pages, 352872 bytes).
 PDF statistics:
- 583 PDF objects out of 1000 (max. 8388607)
- 510 compressed objects within 6 object streams
- 104 named destinations out of 1000 (max. 500000)
- 338 words of extra memory for PDF output out of 10000 (max. 10000000)
+ 651 PDF objects out of 1000 (max. 8388607)
+ 568 compressed objects within 6 object streams
+ 123 named destinations out of 1000 (max. 500000)
+ 362 words of extra memory for PDF output out of 10000 (max. 10000000)
 
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.lot	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.lot	Thu Mar 24 20:52:34 2022 +0000
@@ -1,7 +1,7 @@
 \boolfalse {citerequest}\boolfalse {citetracker}\boolfalse {pagetracker}\boolfalse {backtracker}\relax 
 \babel@toc {english}{}
 \addvspace {10\p@ }
-\contentsline {table}{\numberline {1.1}{\ignorespaces The effects of treatments X and Y on the four groups studied.\relax }}{10}{table.caption.34}
+\contentsline {table}{\numberline {1.1}{\ignorespaces The effects of treatments X and Y on the four groups studied.\relax }}{20}{table.caption.43}
 \addvspace {10\p@ }
 \addvspace {10\p@ }
 \addvspace {10\p@ }
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.out	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.out	Thu Mar 24 20:52:34 2022 +0000
@@ -1,40 +1,43 @@
 \BOOKMARK [0][-]{section*.1}{\376\377\000D\000e\000c\000l\000a\000r\000a\000t\000i\000o\000n\000\040\000o\000f\000\040\000A\000u\000t\000h\000o\000r\000s\000h\000i\000p}{}% 1
 \BOOKMARK [0][-]{section*.2}{\376\377\000A\000b\000s\000t\000r\000a\000c\000t}{}% 2
 \BOOKMARK [0][-]{section*.3}{\376\377\000A\000c\000k\000n\000o\000w\000l\000e\000d\000g\000e\000m\000e\000n\000t\000s}{}% 3
-\BOOKMARK [0][-]{chapter.14}{\376\377\000I\000n\000t\000r\000o\000d\000u\000c\000t\000i\000o\000n}{}% 4
-\BOOKMARK [1][-]{section.15}{\376\377\000W\000h\000y\000\040\000B\000r\000z\000o\000z\000o\000w\000s\000k\000i}{chapter.14}% 5
-\BOOKMARK [1][-]{section.16}{\376\377\000B\000a\000c\000k\000g\000o\000u\000n\000d}{chapter.14}% 6
-\BOOKMARK [1][-]{section.17}{\376\377\000E\000x\000i\000s\000t\000i\000n\000g\000\040\000P\000r\000a\000c\000t\000i\000c\000a\000l\000\040\000A\000p\000p\000r\000o\000a\000c\000h\000e\000s}{chapter.14}% 7
-\BOOKMARK [2][-]{subsection.18}{\376\377\000D\000F\000A\000\040\000A\000p\000p\000r\000o\000a\000c\000h}{section.17}% 8
-\BOOKMARK [2][-]{subsection.19}{\376\377\000N\000F\000A\000\040\000A\000p\000p\000r\000o\000a\000c\000h}{section.17}% 9
-\BOOKMARK [1][-]{section.20}{\376\377\000O\000u\000r\000\040\000A\000p\000p\000r\000o\000a\000c\000h}{chapter.14}% 10
-\BOOKMARK [2][-]{subsection.21}{\376\377\000E\000x\000i\000s\000t\000i\000n\000g\000\040\000W\000o\000r\000k}{section.20}% 11
-\BOOKMARK [1][-]{section.22}{\376\377\000W\000h\000a\000t\000\040\000t\000h\000i\000s\000\040\000T\000e\000m\000p\000l\000a\000t\000e\000\040\000I\000n\000c\000l\000u\000d\000e\000s}{chapter.14}% 12
-\BOOKMARK [2][-]{subsection.23}{\376\377\000F\000o\000l\000d\000e\000r\000s}{section.22}% 13
-\BOOKMARK [2][-]{subsection.24}{\376\377\000F\000i\000l\000e\000s}{section.22}% 14
-\BOOKMARK [1][-]{section.25}{\376\377\000F\000i\000l\000l\000i\000n\000g\000\040\000i\000n\000\040\000Y\000o\000u\000r\000\040\000I\000n\000f\000o\000r\000m\000a\000t\000i\000o\000n\000\040\000i\000n\000\040\000t\000h\000e\000\040\000m\000a\000i\000n\000.\000t\000e\000x\000\040\000F\000i\000l\000e}{chapter.14}% 15
-\BOOKMARK [1][-]{section.26}{\376\377\000T\000h\000e\000\040\000m\000a\000i\000n\000.\000t\000e\000x\000\040\000F\000i\000l\000e\000\040\000E\000x\000p\000l\000a\000i\000n\000e\000d}{chapter.14}% 16
-\BOOKMARK [1][-]{section.27}{\376\377\000T\000h\000e\000s\000i\000s\000\040\000F\000e\000a\000t\000u\000r\000e\000s\000\040\000a\000n\000d\000\040\000C\000o\000n\000v\000e\000n\000t\000i\000o\000n\000s}{chapter.14}% 17
-\BOOKMARK [2][-]{subsection.28}{\376\377\000P\000r\000i\000n\000t\000i\000n\000g\000\040\000F\000o\000r\000m\000a\000t}{section.27}% 18
-\BOOKMARK [2][-]{subsection.29}{\376\377\000U\000s\000i\000n\000g\000\040\000U\000S\000\040\000L\000e\000t\000t\000e\000r\000\040\000P\000a\000p\000e\000r}{section.27}% 19
-\BOOKMARK [2][-]{subsection.30}{\376\377\000R\000e\000f\000e\000r\000e\000n\000c\000e\000s}{section.27}% 20
-\BOOKMARK [3][-]{section*.32}{\376\377\000A\000\040\000N\000o\000t\000e\000\040\000o\000n\000\040\000b\000i\000b\000t\000e\000x}{subsection.30}% 21
-\BOOKMARK [2][-]{subsection.33}{\376\377\000T\000a\000b\000l\000e\000s}{section.27}% 22
-\BOOKMARK [2][-]{subsection.35}{\376\377\000F\000i\000g\000u\000r\000e\000s}{section.27}% 23
-\BOOKMARK [2][-]{subsection.37}{\376\377\000T\000y\000p\000e\000s\000e\000t\000t\000i\000n\000g\000\040\000m\000a\000t\000h\000e\000m\000a\000t\000i\000c\000s}{section.27}% 24
-\BOOKMARK [1][-]{section.39}{\376\377\000S\000e\000c\000t\000i\000o\000n\000i\000n\000g\000\040\000a\000n\000d\000\040\000S\000u\000b\000s\000e\000c\000t\000i\000o\000n\000i\000n\000g}{chapter.14}% 25
-\BOOKMARK [1][-]{section.40}{\376\377\000I\000n\000\040\000C\000l\000o\000s\000i\000n\000g}{chapter.14}% 26
-\BOOKMARK [0][-]{chapter.41}{\376\377\000C\000h\000a\000p\000t\000e\000r\000\040\000T\000i\000t\000l\000e\000\040\000H\000e\000r\000e}{}% 27
-\BOOKMARK [1][-]{section.42}{\376\377\000P\000r\000o\000p\000e\000r\000t\000i\000e\000s\000\040\000o\000f\000\040\000"\0000\0002\0006\000E\0003\0000\000F\000\040\000c}{chapter.41}% 28
-\BOOKMARK [2][-]{subsection.43}{\376\377\000f\000u\000n\000c\000t\000i\000o\000n\000\040\000"\0000\0002\0006\000E\0003\0000\000F\000\040\000c\000\040\000i\000s\000\040\000n\000o\000t\000\040\0001\000-\000t\000o\000-\0001}{section.42}% 29
-\BOOKMARK [2][-]{subsection.44}{\376\377\000S\000u\000b\000s\000e\000c\000t\000i\000o\000n\000\040\0001}{section.42}% 30
-\BOOKMARK [2][-]{subsection.45}{\376\377\000S\000u\000b\000s\000e\000c\000t\000i\000o\000n\000\040\0002}{section.42}% 31
-\BOOKMARK [1][-]{section.46}{\376\377\000M\000a\000i\000n\000\040\000S\000e\000c\000t\000i\000o\000n\000\040\0002}{chapter.41}% 32
-\BOOKMARK [0][-]{chapter.47}{\376\377\000C\000o\000m\000m\000o\000n\000\040\000I\000d\000e\000n\000t\000i\000t\000i\000e\000s\000\040\000I\000n\000\040\000S\000i\000m\000p\000l\000i\000f\000i\000c\000a\000t\000i\000o\000n\000-\000R\000e\000l\000a\000t\000e\000d\000\040\000F\000u\000n\000c\000t\000i\000o\000n\000s}{}% 33
-\BOOKMARK [1][-]{section.48}{\376\377\000I\000d\000e\000m\000p\000o\000t\000e\000n\000c\000y\000\040\000o\000f\000\040\000s\000i\000m\000p}{chapter.47}% 34
-\BOOKMARK [2][-]{subsection.50}{\376\377\000S\000y\000n\000t\000a\000c\000t\000i\000c\000\040\000E\000q\000u\000i\000v\000a\000l\000e\000n\000c\000e\000\040\000U\000n\000d\000e\000r\000\040\000s\000i\000m\000p}{section.48}% 35
-\BOOKMARK [2][-]{subsection.51}{\376\377\000S\000u\000b\000s\000e\000c\000t\000i\000o\000n\000\040\0002}{section.48}% 36
-\BOOKMARK [1][-]{section.52}{\376\377\000M\000a\000i\000n\000\040\000S\000e\000c\000t\000i\000o\000n\000\040\0002}{chapter.47}% 37
-\BOOKMARK [0][-]{appendix.53}{\376\377\000F\000r\000e\000q\000u\000e\000n\000t\000l\000y\000\040\000A\000s\000k\000e\000d\000\040\000Q\000u\000e\000s\000t\000i\000o\000n\000s}{}% 38
-\BOOKMARK [1][-]{section.54}{\376\377\000H\000o\000w\000\040\000d\000o\000\040\000I\000\040\000c\000h\000a\000n\000g\000e\000\040\000t\000h\000e\000\040\000c\000o\000l\000o\000r\000s\000\040\000o\000f\000\040\000l\000i\000n\000k\000s\000?}{appendix.53}% 39
-\BOOKMARK [0][-]{appendix*.55}{\376\377\000B\000i\000b\000l\000i\000o\000g\000r\000a\000p\000h\000y}{}% 40
+\BOOKMARK [0][-]{chapter.14}{\376\377\000P\000O\000S\000I\000X\000\040\000L\000e\000x\000i\000n\000g\000\040\000W\000i\000t\000h\000\040\000B\000i\000t\000-\000c\000o\000d\000e\000s}{}% 4
+\BOOKMARK [1][-]{section.15}{\376\377\000I\000n\000t\000r\000o\000d\000u\000c\000t\000i\000o\000n\000\040\000T\000o\000\040\000R\000e\000g\000e\000x\000e\000s}{chapter.14}% 5
+\BOOKMARK [2][-]{subsection.16}{\376\377\000B\000a\000c\000k\000\040\000R\000e\000f\000e\000r\000e\000n\000c\000e\000s\000\040\000i\000n\000\040\000R\000e\000g\000e\000x\040\023\000N\000o\000n\000-\000R\000e\000g\000u\000l\000a\000r\000\040\000p\000a\000r\000t}{section.15}% 6
+\BOOKMARK [1][-]{section.17}{\376\377\000O\000u\000r\000\040\000S\000o\000l\000u\000t\000i\000o\000n\040\023\000B\000r\000z\000o\000z\000o\000w\000s\000k\000i\000\040\000D\000e\000r\000i\000v\000a\000t\000i\000v\000e\000s}{chapter.14}% 7
+\BOOKMARK [1][-]{section.18}{\376\377\000P\000r\000e\000l\000i\000m\000i\000n\000a\000r\000i\000e\000s\000\040\000a\000b\000o\000u\000t\000\040\000L\000e\000x\000i\000n\000g\000\040\000U\000s\000i\000n\000g\000\040\000B\000r\000z\000o\000z\000o\000w\000s\000k\000i\000\040\000d\000e\000r\000i\000v\000a\000t\000i\000v\000e\000s}{chapter.14}% 8
+\BOOKMARK [1][-]{section.25}{\376\377\000B\000a\000c\000k\000g\000o\000u\000n\000d}{chapter.14}% 9
+\BOOKMARK [1][-]{section.26}{\376\377\000E\000n\000g\000i\000n\000e\000e\000r\000i\000n\000g\000\040\000a\000n\000d\000\040\000A\000c\000a\000d\000e\000m\000i\000c\000\040\000A\000p\000p\000r\000o\000a\000c\000h\000e\000s\000\040\000t\000o\000\040\000D\000e\000a\000l\000\040\000w\000i\000t\000h\000\040\000C\000a\000t\000a\000s\000t\000r\000o\000p\000h\000i\000c\000\040\000B\000a\000c\000k\000t\000r\000a\000c\000k\000i\000n\000g}{chapter.14}% 10
+\BOOKMARK [2][-]{subsection.27}{\376\377\000D\000F\000A\000\040\000A\000p\000p\000r\000o\000a\000c\000h}{section.26}% 11
+\BOOKMARK [2][-]{subsection.28}{\376\377\000N\000F\000A\000\040\000A\000p\000p\000r\000o\000a\000c\000h}{section.26}% 12
+\BOOKMARK [1][-]{section.29}{\376\377\000O\000u\000r\000\040\000A\000p\000p\000r\000o\000a\000c\000h}{chapter.14}% 13
+\BOOKMARK [2][-]{subsection.30}{\376\377\000E\000x\000i\000s\000t\000i\000n\000g\000\040\000W\000o\000r\000k}{section.29}% 14
+\BOOKMARK [1][-]{section.31}{\376\377\000W\000h\000a\000t\000\040\000t\000h\000i\000s\000\040\000T\000e\000m\000p\000l\000a\000t\000e\000\040\000I\000n\000c\000l\000u\000d\000e\000s}{chapter.14}% 15
+\BOOKMARK [2][-]{subsection.32}{\376\377\000F\000o\000l\000d\000e\000r\000s}{section.31}% 16
+\BOOKMARK [2][-]{subsection.33}{\376\377\000F\000i\000l\000e\000s}{section.31}% 17
+\BOOKMARK [1][-]{section.34}{\376\377\000F\000i\000l\000l\000i\000n\000g\000\040\000i\000n\000\040\000Y\000o\000u\000r\000\040\000I\000n\000f\000o\000r\000m\000a\000t\000i\000o\000n\000\040\000i\000n\000\040\000t\000h\000e\000\040\000m\000a\000i\000n\000.\000t\000e\000x\000\040\000F\000i\000l\000e}{chapter.14}% 18
+\BOOKMARK [1][-]{section.35}{\376\377\000T\000h\000e\000\040\000m\000a\000i\000n\000.\000t\000e\000x\000\040\000F\000i\000l\000e\000\040\000E\000x\000p\000l\000a\000i\000n\000e\000d}{chapter.14}% 19
+\BOOKMARK [1][-]{section.36}{\376\377\000T\000h\000e\000s\000i\000s\000\040\000F\000e\000a\000t\000u\000r\000e\000s\000\040\000a\000n\000d\000\040\000C\000o\000n\000v\000e\000n\000t\000i\000o\000n\000s}{chapter.14}% 20
+\BOOKMARK [2][-]{subsection.37}{\376\377\000P\000r\000i\000n\000t\000i\000n\000g\000\040\000F\000o\000r\000m\000a\000t}{section.36}% 21
+\BOOKMARK [2][-]{subsection.38}{\376\377\000U\000s\000i\000n\000g\000\040\000U\000S\000\040\000L\000e\000t\000t\000e\000r\000\040\000P\000a\000p\000e\000r}{section.36}% 22
+\BOOKMARK [2][-]{subsection.39}{\376\377\000R\000e\000f\000e\000r\000e\000n\000c\000e\000s}{section.36}% 23
+\BOOKMARK [3][-]{section*.41}{\376\377\000A\000\040\000N\000o\000t\000e\000\040\000o\000n\000\040\000b\000i\000b\000t\000e\000x}{subsection.39}% 24
+\BOOKMARK [2][-]{subsection.42}{\376\377\000T\000a\000b\000l\000e\000s}{section.36}% 25
+\BOOKMARK [2][-]{subsection.44}{\376\377\000F\000i\000g\000u\000r\000e\000s}{section.36}% 26
+\BOOKMARK [2][-]{subsection.46}{\376\377\000T\000y\000p\000e\000s\000e\000t\000t\000i\000n\000g\000\040\000m\000a\000t\000h\000e\000m\000a\000t\000i\000c\000s}{section.36}% 27
+\BOOKMARK [1][-]{section.48}{\376\377\000S\000e\000c\000t\000i\000o\000n\000i\000n\000g\000\040\000a\000n\000d\000\040\000S\000u\000b\000s\000e\000c\000t\000i\000o\000n\000i\000n\000g}{chapter.14}% 28
+\BOOKMARK [1][-]{section.49}{\376\377\000I\000n\000\040\000C\000l\000o\000s\000i\000n\000g}{chapter.14}% 29
+\BOOKMARK [0][-]{chapter.50}{\376\377\000C\000h\000a\000p\000t\000e\000r\000\040\000T\000i\000t\000l\000e\000\040\000H\000e\000r\000e}{}% 30
+\BOOKMARK [1][-]{section.51}{\376\377\000P\000r\000o\000p\000e\000r\000t\000i\000e\000s\000\040\000o\000f\000\040\000"\0000\0002\0006\000E\0003\0000\000F\000\040\000c}{chapter.50}% 31
+\BOOKMARK [2][-]{subsection.52}{\376\377\000f\000u\000n\000c\000t\000i\000o\000n\000\040\000"\0000\0002\0006\000E\0003\0000\000F\000\040\000c\000\040\000i\000s\000\040\000n\000o\000t\000\040\0001\000-\000t\000o\000-\0001}{section.51}% 32
+\BOOKMARK [2][-]{subsection.53}{\376\377\000S\000u\000b\000s\000e\000c\000t\000i\000o\000n\000\040\0001}{section.51}% 33
+\BOOKMARK [2][-]{subsection.54}{\376\377\000S\000u\000b\000s\000e\000c\000t\000i\000o\000n\000\040\0002}{section.51}% 34
+\BOOKMARK [1][-]{section.55}{\376\377\000M\000a\000i\000n\000\040\000S\000e\000c\000t\000i\000o\000n\000\040\0002}{chapter.50}% 35
+\BOOKMARK [0][-]{chapter.56}{\376\377\000C\000o\000m\000m\000o\000n\000\040\000I\000d\000e\000n\000t\000i\000t\000i\000e\000s\000\040\000I\000n\000\040\000S\000i\000m\000p\000l\000i\000f\000i\000c\000a\000t\000i\000o\000n\000-\000R\000e\000l\000a\000t\000e\000d\000\040\000F\000u\000n\000c\000t\000i\000o\000n\000s}{}% 36
+\BOOKMARK [1][-]{section.57}{\376\377\000I\000d\000e\000m\000p\000o\000t\000e\000n\000c\000y\000\040\000o\000f\000\040\000s\000i\000m\000p}{chapter.56}% 37
+\BOOKMARK [2][-]{subsection.59}{\376\377\000S\000y\000n\000t\000a\000c\000t\000i\000c\000\040\000E\000q\000u\000i\000v\000a\000l\000e\000n\000c\000e\000\040\000U\000n\000d\000e\000r\000\040\000s\000i\000m\000p}{section.57}% 38
+\BOOKMARK [2][-]{subsection.60}{\376\377\000S\000u\000b\000s\000e\000c\000t\000i\000o\000n\000\040\0002}{section.57}% 39
+\BOOKMARK [1][-]{section.61}{\376\377\000M\000a\000i\000n\000\040\000S\000e\000c\000t\000i\000o\000n\000\040\0002}{chapter.56}% 40
+\BOOKMARK [0][-]{appendix.62}{\376\377\000F\000r\000e\000q\000u\000e\000n\000t\000l\000y\000\040\000A\000s\000k\000e\000d\000\040\000Q\000u\000e\000s\000t\000i\000o\000n\000s}{}% 41
+\BOOKMARK [1][-]{section.63}{\376\377\000H\000o\000w\000\040\000d\000o\000\040\000I\000\040\000c\000h\000a\000n\000g\000e\000\040\000t\000h\000e\000\040\000c\000o\000l\000o\000r\000s\000\040\000o\000f\000\040\000l\000i\000n\000k\000s\000?}{appendix.62}% 42
+\BOOKMARK [0][-]{appendix*.64}{\376\377\000B\000i\000b\000l\000i\000o\000g\000r\000a\000p\000h\000y}{}% 43
Binary file PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.pdf has changed
Binary file PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.synctex.gz has changed
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.tex	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.tex	Thu Mar 24 20:52:34 2022 +0000
@@ -59,7 +59,8 @@
 \usepackage{enumitem}
 \usepackage{nccmath}
 \usepackage{tikz-cd}
-\usetikzlibrary{positioning}
+\usepackage{tikz}
+\usetikzlibrary{automata, positioning}
 
 %----------------------------------------------------------------------------------------
 %	MARGIN SETTINGS
--- a/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.toc	Wed Mar 23 10:09:32 2022 +0000
+++ b/PhdThesisRealOne/LaTeXTemplates_masters-doctoral-thesis_v2/main.toc	Thu Mar 24 20:52:34 2022 +0000
@@ -3,40 +3,43 @@
 \contentsline {chapter}{Declaration of Authorship}{iii}{section*.1}
 \contentsline {chapter}{Abstract}{vii}{section*.2}
 \contentsline {chapter}{Acknowledgements}{ix}{section*.3}
-\contentsline {chapter}{\numberline {1}Introduction}{1}{chapter.14}
-\contentsline {section}{\numberline {1.1}Why Brzozowski}{2}{section.15}
-\contentsline {section}{\numberline {1.2}Backgound}{4}{section.16}
-\contentsline {section}{\numberline {1.3}Existing Practical Approaches}{5}{section.17}
-\contentsline {subsection}{\numberline {1.3.1}DFA Approach}{5}{subsection.18}
-\contentsline {subsection}{\numberline {1.3.2}NFA Approach}{5}{subsection.19}
-\contentsline {section}{\numberline {1.4}Our Approach}{5}{section.20}
-\contentsline {subsection}{\numberline {1.4.1}Existing Work}{5}{subsection.21}
-\contentsline {section}{\numberline {1.5}What this Template Includes}{5}{section.22}
-\contentsline {subsection}{\numberline {1.5.1}Folders}{5}{subsection.23}
-\contentsline {subsection}{\numberline {1.5.2}Files}{6}{subsection.24}
-\contentsline {section}{\numberline {1.6}Filling in Your Information in the \texttt {\bfseries main.tex} File}{7}{section.25}
-\contentsline {section}{\numberline {1.7}The \texttt {main.tex} File Explained}{7}{section.26}
-\contentsline {section}{\numberline {1.8}Thesis Features and Conventions}{8}{section.27}
-\contentsline {subsection}{\numberline {1.8.1}Printing Format}{8}{subsection.28}
-\contentsline {subsection}{\numberline {1.8.2}Using US Letter Paper}{9}{subsection.29}
-\contentsline {subsection}{\numberline {1.8.3}References}{9}{subsection.30}
-\contentsline {subsubsection}{A Note on bibtex}{9}{section*.32}
-\contentsline {subsection}{\numberline {1.8.4}Tables}{10}{subsection.33}
-\contentsline {subsection}{\numberline {1.8.5}Figures}{10}{subsection.35}
-\contentsline {subsection}{\numberline {1.8.6}Typesetting mathematics}{11}{subsection.37}
-\contentsline {section}{\numberline {1.9}Sectioning and Subsectioning}{12}{section.39}
-\contentsline {section}{\numberline {1.10}In Closing}{12}{section.40}
-\contentsline {chapter}{\numberline {2}Chapter Title Here}{13}{chapter.41}
-\contentsline {section}{\numberline {2.1}Properties of $\delimiter "026E30F c$}{13}{section.42}
-\contentsline {subsection}{\numberline {2.1.1}function $\delimiter "026E30F c$ is not 1-to-1}{13}{subsection.43}
-\contentsline {subsection}{\numberline {2.1.2}Subsection 1}{13}{subsection.44}
-\contentsline {subsection}{\numberline {2.1.3}Subsection 2}{13}{subsection.45}
-\contentsline {section}{\numberline {2.2}Main Section 2}{14}{section.46}
-\contentsline {chapter}{\numberline {3}Common Identities In Simplification-Related Functions}{15}{chapter.47}
-\contentsline {section}{\numberline {3.1}Idempotency of $\mathit {simp}$}{15}{section.48}
-\contentsline {subsection}{\numberline {3.1.1}Syntactic Equivalence Under $\mathit {simp}$}{15}{subsection.50}
-\contentsline {subsection}{\numberline {3.1.2}Subsection 2}{15}{subsection.51}
-\contentsline {section}{\numberline {3.2}Main Section 2}{15}{section.52}
-\contentsline {chapter}{\numberline {A}Frequently Asked Questions}{17}{appendix.53}
-\contentsline {section}{\numberline {A.1}How do I change the colors of links?}{17}{section.54}
-\contentsline {chapter}{Bibliography}{19}{appendix*.55}
+\contentsline {chapter}{\numberline {1}POSIX Lexing With Bit-codes}{1}{chapter.14}
+\contentsline {section}{\numberline {1.1}Introduction To Regexes}{1}{section.15}
+\contentsline {subsection}{\numberline {1.1.1}Back References in Regex--Non-Regular part}{4}{subsection.16}
+\contentsline {section}{\numberline {1.2}Our Solution--Brzozowski Derivatives}{4}{section.17}
+\contentsline {section}{\numberline {1.3}Preliminaries about Lexing Using Brzozowski derivatives}{4}{section.18}
+\contentsline {section}{\numberline {1.4}Backgound}{14}{section.25}
+\contentsline {section}{\numberline {1.5}Engineering and Academic Approaches to Deal with Catastrophic Backtracking}{15}{section.26}
+\contentsline {subsection}{\numberline {1.5.1}DFA Approach}{15}{subsection.27}
+\contentsline {subsection}{\numberline {1.5.2}NFA Approach}{15}{subsection.28}
+\contentsline {section}{\numberline {1.6}Our Approach}{15}{section.29}
+\contentsline {subsection}{\numberline {1.6.1}Existing Work}{15}{subsection.30}
+\contentsline {section}{\numberline {1.7}What this Template Includes}{16}{section.31}
+\contentsline {subsection}{\numberline {1.7.1}Folders}{16}{subsection.32}
+\contentsline {subsection}{\numberline {1.7.2}Files}{16}{subsection.33}
+\contentsline {section}{\numberline {1.8}Filling in Your Information in the \texttt {\bfseries main.tex} File}{17}{section.34}
+\contentsline {section}{\numberline {1.9}The \texttt {main.tex} File Explained}{17}{section.35}
+\contentsline {section}{\numberline {1.10}Thesis Features and Conventions}{18}{section.36}
+\contentsline {subsection}{\numberline {1.10.1}Printing Format}{19}{subsection.37}
+\contentsline {subsection}{\numberline {1.10.2}Using US Letter Paper}{19}{subsection.38}
+\contentsline {subsection}{\numberline {1.10.3}References}{19}{subsection.39}
+\contentsline {subsubsection}{A Note on bibtex}{20}{section*.41}
+\contentsline {subsection}{\numberline {1.10.4}Tables}{20}{subsection.42}
+\contentsline {subsection}{\numberline {1.10.5}Figures}{20}{subsection.44}
+\contentsline {subsection}{\numberline {1.10.6}Typesetting mathematics}{22}{subsection.46}
+\contentsline {section}{\numberline {1.11}Sectioning and Subsectioning}{22}{section.48}
+\contentsline {section}{\numberline {1.12}In Closing}{22}{section.49}
+\contentsline {chapter}{\numberline {2}Chapter Title Here}{23}{chapter.50}
+\contentsline {section}{\numberline {2.1}Properties of $\delimiter "026E30F c$}{23}{section.51}
+\contentsline {subsection}{\numberline {2.1.1}function $\delimiter "026E30F c$ is not 1-to-1}{23}{subsection.52}
+\contentsline {subsection}{\numberline {2.1.2}Subsection 1}{23}{subsection.53}
+\contentsline {subsection}{\numberline {2.1.3}Subsection 2}{23}{subsection.54}
+\contentsline {section}{\numberline {2.2}Main Section 2}{24}{section.55}
+\contentsline {chapter}{\numberline {3}Common Identities In Simplification-Related Functions}{25}{chapter.56}
+\contentsline {section}{\numberline {3.1}Idempotency of $\mathit {simp}$}{25}{section.57}
+\contentsline {subsection}{\numberline {3.1.1}Syntactic Equivalence Under $\mathit {simp}$}{25}{subsection.59}
+\contentsline {subsection}{\numberline {3.1.2}Subsection 2}{25}{subsection.60}
+\contentsline {section}{\numberline {3.2}Main Section 2}{25}{section.61}
+\contentsline {chapter}{\numberline {A}Frequently Asked Questions}{27}{appendix.62}
+\contentsline {section}{\numberline {A.1}How do I change the colors of links?}{27}{section.63}
+\contentsline {chapter}{Bibliography}{29}{appendix*.64}
--- a/thys2/BasicIdentities.thy	Wed Mar 23 10:09:32 2022 +0000
+++ b/thys2/BasicIdentities.thy	Thu Mar 24 20:52:34 2022 +0000
@@ -71,6 +71,20 @@
 | "rsimp_ALTs [r] =  r"
 | "rsimp_ALTs rs = RALTS rs"
 
+lemma rsimpalts_gte2elems:
+  shows "size rlist \<ge> 2 \<Longrightarrow> rsimp_ALTs rlist = RALTS rlist"
+  apply(induct rlist)
+   apply simp
+  apply(induct rlist)
+   apply simp
+  apply (metis Suc_le_length_iff rsimp_ALTs.simps(3))
+  by blast
+
+lemma rsimpalts_conscons:
+  shows "rsimp_ALTs (r1 # rsa @ r2 # rsb) = RALTS (r1 # rsa @ r2 # rsb)"
+  by (metis Nil_is_append_conv list.exhaust rsimp_ALTs.simps(3))
+
+
 fun rsimp_SEQ :: " rrexp \<Rightarrow> rrexp \<Rightarrow> rrexp"
   where
   "rsimp_SEQ  RZERO _ = RZERO"
@@ -264,7 +278,7 @@
 lemma rders_simp_append:
   "rders_simp c (s1 @ s2) = rders_simp (rders_simp c s1) s2"
   apply(induct s1 arbitrary: c s2)
-  apply(simp_all)
+   apply(simp_all)
   done
 
 lemma inside_simp_removal:
@@ -332,7 +346,7 @@
   by (meson map_idI rsimp_inner_idem2)
 
 corollary rsimp_inner_idem4:
-  shows "rsimp r = RALTS rs \<Longrightarrow> flts rs = rs"
+  shows "rsimp r = RALTS rs \<Longrightarrow> rflts rs = rs"
   sorry
 
 
--- a/thys2/ClosedForms.thy	Wed Mar 23 10:09:32 2022 +0000
+++ b/thys2/ClosedForms.thy	Thu Mar 24 20:52:34 2022 +0000
@@ -2,15 +2,17 @@
 "BasicIdentities"
 begin
 
-lemma add0_isomorphic:
-  shows "rsimp_ALTs (rdistinct (rflts [rsimp r, RZERO]) {}) = rsimp r"
-  sorry
 
-
+lemma idem_after_simp1:
+  shows "rsimp_ALTs (rdistinct (rflts [rsimp aa]) {}) = rsimp aa"
+  apply(case_tac "rsimp aa")
+  apply simp+
+  apply (metis no_alt_short_list_after_simp no_further_dB_after_simp)
+  by simp
 
 
 lemma distinct_removes_last:
-  shows "\<lbrakk>(a::rrexp) \<in> set as\<rbrakk>
+  shows "\<lbrakk>a \<in> set as\<rbrakk>
     \<Longrightarrow> rdistinct as rset = rdistinct (as @ [a]) rset"
 and "rdistinct (ab # as @ [ab]) rset1 = rdistinct (ab # as) rset1"
   apply(induct as arbitrary: rset ab rset1 a)
@@ -48,36 +50,61 @@
   by (metis append_Cons insert_iff insert_is_Un rdistinct.simps(2))
 
 
-lemma distinct_removes_last2:
-  shows "\<lbrakk>(a::rrexp) \<in> set as\<rbrakk>
-    \<Longrightarrow> rdistinct as rset = rdistinct (as @ [a]) rset"
-  using distinct_removes_last(1) by presburger
-
-lemma distinct_append_simp:
-  shows " rsimp (rsimp_ALTs rs1) = rsimp (rsimp_ALTs rs2) \<Longrightarrow>
-           rsimp (rsimp_ALTs (f a # rs1)) =
-           rsimp (rsimp_ALTs (f a # rs2))"
-  apply(case_tac rs1)
+lemma distinct_removes_middle:
+  shows  "\<lbrakk>a \<in> set as\<rbrakk>
+    \<Longrightarrow> rdistinct (as @ as2) rset = rdistinct (as @ [a] @ as2) rset"
+and "rdistinct (ab # as @ [ab] @ as3) rset1 = rdistinct (ab # as @ as3) rset1"
+   apply(induct as arbitrary: rset rset1 ab as2 as3 a)
+     apply simp
+    apply simp
+   apply(case_tac "a \<in> rset")
+    apply simp
+    apply metis
    apply simp
-   apply(case_tac rs2)
-    apply simp
+   apply (metis insertI1)
+  apply(case_tac "a = ab")
    apply simp
-   prefer 2
-   apply(case_tac list)
-    apply(case_tac rs2)
-     apply simp
-  using add0_isomorphic apply blast 
+   apply(case_tac "ab \<in> rset")
     apply simp
-  sorry
-
-(*  apply (smt (z3) append.right_neutral empty_iff list.distinct(1) list.inject no_alt_short_list_after_simp no_further_dB_after_simp rdistinct.elims rflts.elims rflts.simps(2) rsimp_ALTs.simps(1) rsimp_ALTs.simps(2)))*)
+    apply presburger
+   apply (meson insertI1)
+  apply(case_tac "a \<in> rset")
+  apply (metis (no_types, opaque_lifting) Un_insert_left append_Cons insert_iff rdistinct.simps(2) sup_bot_left)
+  apply(case_tac "ab \<in> rset")
+  apply simp
+   apply (meson insert_iff)
+  apply simp
+  by (metis insertI1)
 
 
+lemma distinct_removes_middle3:
+  shows  "\<lbrakk>a \<in> set as\<rbrakk>
+    \<Longrightarrow> rdistinct (as @ a #as2) rset = rdistinct (as @ as2) rset"
+  using distinct_removes_middle(1) by fastforce
+
+lemma distinct_removes_last2:
+  shows "\<lbrakk>a \<in> set as\<rbrakk>
+    \<Longrightarrow> rdistinct as rset = rdistinct (as @ [a]) rset"
+  by (simp add: distinct_removes_last(1))
+
+lemma distinct_removes_middle2:
+  shows "a \<in> set as \<Longrightarrow> rdistinct (as @ [a] @ rs) {} = rdistinct (as @ rs) {}"
+  by (metis distinct_removes_middle(1))
+
+lemma distinct_removes_list:
+  shows "\<lbrakk>a \<in> set as; \<forall>r \<in> set rs. r \<in> set as\<rbrakk> \<Longrightarrow> rdistinct (as @ rs) {} = rdistinct as {}"
+  apply(induct rs)
+   apply simp+
+  apply(subgoal_tac "rdistinct (as @ aa # rs) {} = rdistinct (as @ rs) {}")
+   prefer 2
+  apply (metis append_Cons append_Nil distinct_removes_middle(1))
+  by presburger
 
 
 
 lemma simp_rdistinct_f: shows 
-"f ` rset = frset \<Longrightarrow> rsimp (rsimp_ALTs (map f (rdistinct rs rset))) = rsimp (rsimp_ALTs (rdistinct (map f rs) frset))  "
+"f ` rset = frset \<Longrightarrow> rsimp (rsimp_ALTs (map f (rdistinct rs rset))) = 
+                      rsimp (rsimp_ALTs (rdistinct (map f rs) frset))  "
   apply(induct rs arbitrary: rset)
    apply simp
    apply(case_tac "a \<in> rset")
@@ -209,14 +236,259 @@
 
   oops
 
+lemma simp_more_distinct1:
+  shows "rsimp (rsimp_ALTs rs) = rsimp (rsimp_ALTs (rdistinct rs {}))"
+  apply(induct rs)
+   apply simp
+  apply simp
+  oops
+
+
+(*
+\<and>
+  rsimp (rsimp_ALTs (rsb @ (rdistinct rs (set rsb)))) = 
+  rsimp (rsimp_ALTs (rsb @ (rdistinct (rflts rs) (set rsb))))
+*)
+lemma simp_removes_duplicate2:
+  shows "a "
+
+  oops
+
+lemma flts_removes0:
+  shows "  rflts (rs @ [RZERO])  =
+           rflts rs"
+  apply(induct rs)
+   apply simp
+  by (metis append_Cons rflts.simps(2) rflts.simps(3) rflts_def_idiot)
+  
+lemma flts_keeps1:
+  shows " rflts (rs @ [RONE]) = 
+          rflts  rs @ [RONE] "
+  apply (induct rs)
+   apply simp
+  by (metis append.assoc append_Cons rflts.simps(2) rflts.simps(3) rflts_def_idiot)
+
+lemma flts_keeps_others:
+  shows "\<lbrakk>a \<noteq> RZERO; \<nexists>rs1. a = RALTS rs1\<rbrakk> \<Longrightarrow>rflts (rs @ [a]) = rflts rs @ [a]"
+  apply(induct rs)
+   apply simp
+  apply (simp add: rflts_def_idiot)
+  apply(case_tac a)
+       apply simp
+  using flts_keeps1 apply blast
+     apply (metis append.assoc append_Cons rflts.simps(2) rflts.simps(3) rflts_def_idiot)
+  apply (metis append.assoc append_Cons rflts.simps(2) rflts.simps(3) rflts_def_idiot)
+  apply blast
+  by (metis append.assoc append_Cons rflts.simps(2) rflts.simps(3) rflts_def_idiot)
+
+
+lemma rflts_def_idiot2:
+  shows "\<lbrakk>a \<noteq> RZERO; \<nexists>rs1. a = RALTS rs1; a \<in> set rs\<rbrakk> \<Longrightarrow> a \<in> set (rflts rs)"
+  apply(induct rs)
+   apply simp
+  by (metis append.assoc in_set_conv_decomp insert_iff list.simps(15) rflts.simps(2) rflts.simps(3) rflts_def_idiot)
+
+lemma rflts_spills_last:
+  shows "a = RALTS rs \<Longrightarrow> rflts (rs1 @ [a]) = rflts rs1 @ rs"
+  apply (induct rs1)
+  apply simp
+  by (metis append.assoc append_Cons rflts.simps(2) rflts.simps(3) rflts_def_idiot)
+
+
+lemma spilled_alts_contained:
+  shows "\<lbrakk>a = RALTS rs ; a \<in> set rs1\<rbrakk> \<Longrightarrow> \<forall>r \<in> set rs. r \<in> set (rflts rs1)"
+  apply(induct rs1)
+   apply simp 
+  apply(case_tac "a = aa")
+   apply simp
+  apply(subgoal_tac " a \<in> set rs1")
+  prefer 2
+   apply (meson set_ConsD)
+  apply(case_tac aa)
+  using rflts.simps(2) apply presburger
+      apply fastforce
+  apply fastforce
+  apply fastforce
+  apply fastforce
+  by fastforce
+
+lemma distinct_removes_duplicate_flts:
+  shows " a \<in> set rsa
+       \<Longrightarrow> rdistinct (rflts (map rsimp rsa @ [rsimp a])) {} =
+           rdistinct (rflts (map rsimp rsa)) {}"
+  apply(subgoal_tac "rsimp a \<in> set (map rsimp rsa)")
+  prefer 2
+  apply simp
+  apply(induct "rsimp a")
+       apply simp
+  using flts_removes0 apply presburger
+      apply(subgoal_tac " rdistinct (rflts (map rsimp rsa @ [rsimp a])) {} =  
+                          rdistinct (rflts (map rsimp rsa @ [RONE])) {}")
+      apply (simp only:)
+       apply(subst flts_keeps1)
+  apply (metis distinct_removes_last2 rflts_def_idiot2 rrexp.simps(20) rrexp.simps(6))
+      apply presburger
+        apply(subgoal_tac " rdistinct (rflts (map rsimp rsa @ [rsimp a]))    {} =  
+                            rdistinct ((rflts (map rsimp rsa)) @ [RCHAR x]) {}")
+      apply (simp only:)
+      prefer 2
+      apply (metis flts_keeps_others rrexp.distinct(21) rrexp.distinct(3))
+  apply (metis distinct_removes_last2 rflts_def_idiot2 rrexp.distinct(21) rrexp.distinct(3))
+
+    apply (metis distinct_removes_last2 flts_keeps_others rflts_def_idiot2 rrexp.distinct(25) rrexp.distinct(5))
+   prefer 2
+   apply (metis distinct_removes_last2 flts_keeps_others flts_removes0 rflts_def_idiot2 rrexp.distinct(29))
+  apply(subgoal_tac "rflts (map rsimp rsa @ [rsimp a]) = rflts (map rsimp rsa) @ x")
+  prefer 2
+  apply (simp add: rflts_spills_last)
+  apply(simp only:)
+  apply(subgoal_tac "\<forall> r \<in> set x. r \<in> set (rflts (map rsimp rsa))")
+  prefer 2
+  using spilled_alts_contained apply presburger
+  by (metis append_self_conv distinct_removes_list in_set_conv_decomp rev_exhaust)
+
+lemma flts_middle0:
+  shows "rflts (rsa @ RZERO # rsb) = rflts (rsa @ rsb)"
+  apply(induct rsa)
+  apply simp
+  by (metis append_Cons rflts.simps(2) rflts.simps(3) rflts_def_idiot)
+
+lemma flts_middle01:
+  shows "rflts (rsa @ [RZERO] @ rsb) = rflts (rsa @ rsb)"
+  by (simp add: flts_middle0)
+
+lemma flts_append1:
+  shows "\<lbrakk>a \<noteq> RZERO; \<nexists>rs1. a = RALTS rs1; a \<in> set rs\<rbrakk>  \<Longrightarrow>
+         rflts (rsa @ [a] @ rsb) = rflts rsa @ [a] @ (rflts rsb)"
+  apply(induct rsa arbitrary: rsb)
+   apply simp
+  using rflts_def_idiot apply presburger
+  apply(case_tac aa)  
+       apply simp+
+  done
+
+lemma flts_append:
+  shows "rflts (rs1 @ rs2) = rflts rs1 @ rflts rs2"
+  apply(induct rs1)
+   apply simp
+  apply(case_tac a)
+       apply simp+
+  done
+
+lemma simp_removes_duplicate1:
+  shows  " a \<in> set rsa \<Longrightarrow> rsimp (RALTS (rsa @ [a])) =  rsimp (RALTS (rsa))"
+and " rsimp (RALTS (a1 # rsa @ [a1])) = rsimp (RALTS (a1 # rsa))"
+  apply(induct rsa arbitrary: a1)
+     apply simp
+    apply simp
+  prefer 2
+  apply(case_tac "a = aa")
+     apply simp
+    apply simp
+  apply (metis Cons_eq_appendI Cons_eq_map_conv distinct_removes_duplicate_flts list.set_intros(2))
+  apply (metis append_Cons append_Nil distinct_removes_duplicate_flts list.set_intros(1) list.simps(8) list.simps(9))
+  by (metis (mono_tags, lifting) append_Cons distinct_removes_duplicate_flts list.set_intros(1) list.simps(8) list.simps(9) map_append rsimp.simps(2))
+  
+lemma simp_removes_duplicate2:
+  shows "a \<in> set rsa \<Longrightarrow> rsimp (RALTS (rsa @ [a] @ rsb)) = rsimp (RALTS (rsa @ rsb))"
+  apply(induct rsb arbitrary: rsa)
+   apply simp
+  using distinct_removes_duplicate_flts apply auto[1]
+  by (metis append.assoc head_one_more_simp rsimp.simps(2) simp_flatten simp_removes_duplicate1(1))
+
+lemma simp_removes_duplicate3:
+  shows "a \<in> set rsa \<Longrightarrow> rsimp (RALTS (rsa @ a # rsb)) = rsimp (RALTS (rsa @ rsb))"
+  using simp_removes_duplicate2 by auto
+
+lemma distinct_removes_middle4:
+  shows "a \<in> set rsa \<Longrightarrow> rdistinct (rsa @ [a] @ rsb) rset = rdistinct (rsa @ rsb) rset"
+  using distinct_removes_middle(1) by fastforce
+
+lemma distinct_removes_middle_list:
+  shows "\<forall>a \<in> set x. a \<in> set rsa \<Longrightarrow> rdistinct (rsa @ x @ rsb) rset = rdistinct (rsa @ rsb) rset"
+  apply(induct x)
+   apply simp
+  by (simp add: distinct_removes_middle3)
+
+
+lemma distinct_removes_duplicate_flts2:
+  shows " a \<in> set rsa
+       \<Longrightarrow> rdistinct (rflts (rsa @ [a] @ rsb)) {} =
+           rdistinct (rflts (rsa @ rsb)) {}"
+  apply(induct a arbitrary: rsb)
+  using flts_middle01 apply presburger
+      apply(subgoal_tac "rflts (rsa @ [RONE] @ rsb) = rflts rsa @ [RONE] @ rflts rsb")
+  prefer 2
+  using flts_append1 apply blast
+      apply simp
+      apply(subgoal_tac "RONE \<in> set (rflts rsa)")
+  prefer 2
+  using rflts_def_idiot2 apply blast
+      apply(subst distinct_removes_middle3)
+       apply simp
+  using flts_append apply presburger
+     apply simp
+  apply (metis distinct_removes_middle3 flts_append in_set_conv_decomp rflts.simps(5))
+  apply (metis distinct_removes_middle(1) flts_append flts_append1 rflts_def_idiot2 rrexp.distinct(25) rrexp.distinct(5))
+   apply(subgoal_tac "rflts (rsa @ [RALTS x] @ rsb) = rflts rsa @ x @ rflts rsb")
+    prefer 2
+  apply (simp add: flts_append)
+   apply (simp only:)
+
+   apply(subgoal_tac "\<forall>r1 \<in> set x. r1 \<in> set (rflts rsa)")
+    prefer 2
+  using spilled_alts_contained apply blast
+  apply(subst flts_append)
+  using distinct_removes_middle_list apply blast
+  using distinct_removes_middle2 flts_append rflts_def_idiot2 by fastforce
+
+
+lemma simp_removes_duplicate:
+  shows "a \<in> set rsa \<Longrightarrow> rsimp (rsimp_ALTs (rsa @ a # rs)) =  rsimp (rsimp_ALTs (rsa @ rs))"
+  apply(subgoal_tac "rsimp (rsimp_ALTs (rsa @ a # rs)) = rsimp (RALTS (rsa @ a # rs))")
+   prefer 2
+  apply (smt (verit, best) Cons_eq_append_conv append_is_Nil_conv empty_set equals0D list.distinct(1) rsimp_ALTs.elims)
+  apply(simp only:)
+  apply simp
+  apply(subgoal_tac "(rdistinct (rflts (map rsimp rsa @ rsimp a # map rsimp rs)) {}) = (rdistinct (rflts (map rsimp rsa @  map rsimp rs)) {})")
+   apply(simp only:)
+  prefer 2
+   apply(subgoal_tac "rsimp a \<in> set (map rsimp rsa)")
+    prefer 2
+  apply simp
+  using distinct_removes_duplicate_flts2 apply force
+  apply(case_tac rsa)
+   apply simp
+  apply(case_tac rs)
+   apply simp
+   apply(case_tac list)
+    apply simp
+  using idem_after_simp1 apply presburger
+   apply simp+
+  apply(subgoal_tac "rsimp_ALTs (aa # list @ aaa # lista) = RALTS (aa # list @ aaa # lista)")
+   apply simp
+  using rsimpalts_conscons by presburger
+  
+lemma simp_der_pierce_flts:
+  shows " rsimp (rsimp_ALTs (rdistinct (map (rder x) (rflts (map (rsimp \<circ> (\<lambda>r. rders_simp r xs)) rs))) {})) =
+           rsimp (rsimp_ALTs (rdistinct (rflts (map (rder x) (map (rsimp \<circ> (\<lambda>r. rders_simp r xs)) rs))) {}))"
+  oops
+
 
 
 lemma simp_more_distinct:
-  shows "rsimp  (rsimp_ALTs (rsa @ rs)) = rsimp (rsimp_ALTs (rsa @ (rdistinct rs (set rsa)))) \<and>
-  rsimp (rsimp_ALTs (rsb @ (rdistinct rs (set rsb)))) = 
-  rsimp (rsimp_ALTs (rsb @ (rdistinct (rflts rs) (set rsb))))"
-  apply(induct rs arbitrary: rsa rsb)
+  shows "rsimp  (rsimp_ALTs (rsa @ rs)) = rsimp (rsimp_ALTs (rsa @ (rdistinct rs (set rsa)))) "
+and  "a1 \<in> set rsb \<Longrightarrow> rsimp (rsimp_ALTs (rsb @ a1 # rs)) =  rsimp (rsimp_ALTs (rsb @ rs))"
+  apply(induct rs arbitrary: rsa rsb a1)
+     apply simp
+  apply simp
+  apply(case_tac " a \<in> set rsa")
    apply simp
+   prefer 2
+   apply simp
+   apply(drule_tac x = "rsa @ [a]" in meta_spec)
+   apply simp
+
 
   sorry
 
@@ -316,9 +588,9 @@
   done
 
 lemma distinct_removes_last3:
-  shows "\<lbrakk>(a::rrexp) \<in> set as\<rbrakk>
+  shows "\<lbrakk>a \<in> set as\<rbrakk>
     \<Longrightarrow> rdistinct as {} = rdistinct (as @ [a]) {}"
-  using distinct_removes_last2 by blast
+  by (simp add: distinct_removes_last2)
 
 lemma set_inclusion_with_flts1:
   shows " \<lbrakk>RONE \<in> set rs\<rbrakk> \<Longrightarrow> RONE  \<in> set (rflts rs)"
@@ -367,6 +639,8 @@
           \<Longrightarrow> rsimp_ALTs (rdistinct (rflts (map rsimp as @ [rsimp a])) {}) = 
 rsimp_ALTs (rdistinct (rflts (map rsimp as @ x5)) {})"
 
+  sorry
+
 
 lemma last_elem_dup1:
   shows " a \<in> set as \<Longrightarrow> rsimp (RALTS (as @ [a] )) = rsimp (RALTS (as ))"
@@ -461,6 +735,29 @@
            rsimp_ALTs (rdistinct (rflts rs) {})"
   by (metis map_idI rsimp.simps(2) rsimp_idem)
 
+
+lemma add0_isomorphic:
+  shows "rsimp_ALTs (rdistinct (rflts [rsimp r, RZERO]) {}) = rsimp r"
+  sorry
+
+
+lemma distinct_append_simp:
+  shows " rsimp (rsimp_ALTs rs1) = rsimp (rsimp_ALTs rs2) \<Longrightarrow>
+           rsimp (rsimp_ALTs (f a # rs1)) =
+           rsimp (rsimp_ALTs (f a # rs2))"
+  apply(case_tac rs1)
+   apply simp
+   apply(case_tac rs2)
+    apply simp
+   apply simp
+   prefer 2
+   apply(case_tac list)
+    apply(case_tac rs2)
+     apply simp
+  using add0_isomorphic apply blast 
+    apply simp
+  sorry
+
 lemma alts_closed_form: shows 
 "rsimp (rders_simp (RALTS rs) s) = 
 rsimp (RALTS (map (\<lambda>r. rders_simp r s) rs))"
@@ -501,12 +798,11 @@
     apply(subst repeated_altssimp)
      apply simp
   apply fastforce
-  apply (metis inside_simp_removal list.map_comp rder.simps(4) rsimp.simps(2) rsimp_idem)
-  
-(*  apply (metis head_one_more_simp list.inject list.map_comp list.simps(9) rders_simp_lambda rsimp.simps(2))
-*)
+   apply (metis inside_simp_removal list.map_comp rder.simps(4) rsimp.simps(2) rsimp_idem)
+  sledgehammer
+ (* by (metis inside_simp_removal rder_rsimp_ALTs_commute self_append_conv2 set_empty simp_more_distinct)
 
-  sorry
+  *)
 
 lemma alts_closed_form_variant: shows 
 "s \<noteq> [] \<Longrightarrow> rders_simp (RALTS rs) s =