diff -r c80720289645 -r 0efa7ffd96ff thys2/Journal/Paper.thy~ --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/thys2/Journal/Paper.thy~ Tue Jan 11 23:58:39 2022 +0000 @@ -0,0 +1,2313 @@ +(*<*) +theory Paper +imports + "../Lexer" + "../Simplifying" + "../Positions" + + "../SizeBound" + "HOL-Library.LaTeXsugar" +begin + +lemma Suc_0_fold: + "Suc 0 = 1" +by simp + + + +declare [[show_question_marks = false]] + +syntax (latex output) + "_Collect" :: "pttrn => bool => 'a set" ("(1{_ \<^latex>\\\mbox{\\boldmath$\\mid$}\ _})") + "_CollectIn" :: "pttrn => 'a set => bool => 'a set" ("(1{_ \ _ |e _})") + +syntax + "_Not_Ex" :: "idts \ bool \ bool" ("(3\_.a./ _)" [0, 10] 10) + "_Not_Ex1" :: "pttrn \ bool \ bool" ("(3\!_.a./ _)" [0, 10] 10) + + +abbreviation + "der_syn r c \ der c r" + +abbreviation + "ders_syn r s \ ders s r" + + abbreviation + "bder_syn r c \ bder c r" + +abbreviation + "bders_syn r s \ bders r s" + + +abbreviation + "nprec v1 v2 \ \(v1 :\val v2)" + + + + +notation (latex output) + If ("(\<^latex>\\\textrm{\if\<^latex>\}\ (_)/ \<^latex>\\\textrm{\then\<^latex>\}\ (_)/ \<^latex>\\\textrm{\else\<^latex>\}\ (_))" 10) and + Cons ("_\<^latex>\\\mbox{$\\,$}\::\<^latex>\\\mbox{$\\,$}\_" [75,73] 73) and + + ZERO ("\<^bold>0" 81) and + ONE ("\<^bold>1" 81) and + CH ("_" [1000] 80) and + ALT ("_ + _" [77,77] 78) and + SEQ ("_ \ _" [77,77] 78) and + STAR ("_\<^sup>\" [79] 78) and + + val.Void ("Empty" 78) and + val.Char ("Char _" [1000] 78) and + val.Left ("Left _" [79] 78) and + val.Right ("Right _" [1000] 78) and + val.Seq ("Seq _ _" [79,79] 78) and + val.Stars ("Stars _" [79] 78) and + + L ("L'(_')" [10] 78) and + LV ("LV _ _" [80,73] 78) and + der_syn ("_\\_" [79, 1000] 76) and + ders_syn ("_\\_" [79, 1000] 76) and + flat ("|_|" [75] 74) and + flats ("|_|" [72] 74) and + Sequ ("_ @ _" [78,77] 63) and + injval ("inj _ _ _" [79,77,79] 76) and + mkeps ("mkeps _" [79] 76) and + length ("len _" [73] 73) and + intlen ("len _" [73] 73) and + set ("_" [73] 73) and + + Prf ("_ : _" [75,75] 75) and + Posix ("'(_, _') \ _" [63,75,75] 75) and + + lexer ("lexer _ _" [78,78] 77) and + F_RIGHT ("F\<^bsub>Right\<^esub> _") and + F_LEFT ("F\<^bsub>Left\<^esub> _") and + F_ALT ("F\<^bsub>Alt\<^esub> _ _") and + F_SEQ1 ("F\<^bsub>Seq1\<^esub> _ _") and + F_SEQ2 ("F\<^bsub>Seq2\<^esub> _ _") and + F_SEQ ("F\<^bsub>Seq\<^esub> _ _") and + simp_SEQ ("simp\<^bsub>Seq\<^esub> _ _" [1000, 1000] 1) and + simp_ALT ("simp\<^bsub>Alt\<^esub> _ _" [1000, 1000] 1) and + slexer ("lexer\<^sup>+" 1000) and + + at ("_\<^latex>\\\mbox{$\\downharpoonleft$}\\<^bsub>_\<^esub>") and + lex_list ("_ \\<^bsub>lex\<^esub> _") and + PosOrd ("_ \\<^bsub>_\<^esub> _" [77,77,77] 77) and + PosOrd_ex ("_ \ _" [77,77] 77) and + PosOrd_ex_eq ("_ \<^latex>\\\mbox{$\\preccurlyeq$}\ _" [77,77] 77) and + pflat_len ("\_\\<^bsub>_\<^esub>") and + nprec ("_ \<^latex>\\\mbox{$\\not\\prec$}\ _" [77,77] 77) and + + bder_syn ("_\<^latex>\\\mbox{$\\bbslash$}\_" [79, 1000] 76) and + bders_syn ("_\<^latex>\\\mbox{$\\bbslash$}\_" [79, 1000] 76) and + intern ("_\<^latex>\\\mbox{$^\\uparrow$}\" [900] 80) and + erase ("_\<^latex>\\\mbox{$^\\downarrow$}\" [1000] 74) and + bnullable ("nullable\<^latex>\\\mbox{$_b$}\ _" [1000] 80) and + bmkeps ("mkeps\<^latex>\\\mbox{$_b$}\ _" [1000] 80) and + blexer ("lexer\<^latex>\\\mbox{$_b$}\ _ _" [77, 77] 80) and + code ("code _" [79] 74) and + + DUMMY ("\<^latex>\\\underline{\\hspace{2mm}}\") + + +definition + "match r s \ nullable (ders s r)" + + +lemma LV_STAR_ONE_empty: + shows "LV (STAR ONE) [] = {Stars []}" +by(auto simp add: LV_def elim: Prf.cases intro: Prf.intros) + + + +(* +comments not implemented + +p9. The condition "not exists s3 s4..." appears often enough (in particular in +the proof of Lemma 3) to warrant a definition. + +*) + + +(*>*) + +section\Core of the proof\ +text \ +This paper builds on previous work by Ausaf and Urban using +regular expression'd bit-coded derivatives to do lexing that +is both fast and satisfies the POSIX specification. +In their work, a bit-coded algorithm introduced by Sulzmann and Lu +was formally verified in Isabelle, by a very clever use of +flex function and retrieve to carefully mimic the way a value is +built up by the injection funciton. + +In the previous work, Ausaf and Urban established the below equality: +\begin{lemma} +@{thm [mode=IfThen] MAIN_decode} +\end{lemma} + +This lemma establishes a link with the lexer without bit-codes. + +With it we get the correctness of bit-coded algorithm. +\begin{lemma} +@{thm [mode=IfThen] blexer_correctness} +\end{lemma} + +However what is not certain is whether we can add simplification +to the bit-coded algorithm, without breaking the correct lexing output. + + +The reason that we do need to add a simplification phase +after each derivative step of $\textit{blexer}$ is +because it produces intermediate +regular expressions that can grow exponentially. +For example, the regular expression $(a+aa)^*$ after taking +derivative against just 10 $a$s will have size 8192. + +%TODO: add figure for this? + + +Therefore, we insert a simplification phase +after each derivation step, as defined below: +\begin{lemma} +@{thm blexer_simp_def} +\end{lemma} + +The simplification function is given as follows: + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) bsimp.simps(1)[of "bs" "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bsimp.simps(1)[of "bs" "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bsimp.simps(2)} & $\dn$ & @{thm (rhs) bsimp.simps(2)}\\ + @{thm (lhs) bsimp.simps(3)} & $\dn$ & @{thm (rhs) bsimp.simps(3)}\\ + +\end{tabular} +\end{center} + +And the two helper functions are: +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) bsimp_AALTs.simps(2)[of "bs\<^sub>1" "r" ]} & $\dn$ & @{thm (rhs) bsimp.simps(1)[of "bs\<^sub>1" "r" ]}\\ + @{thm (lhs) bsimp_AALTs.simps(2)} & $\dn$ & @{thm (rhs) bsimp.simps(2)}\\ + @{thm (lhs) bsimp_AALTs.simps(3)} & $\dn$ & @{thm (rhs) bsimp.simps(3)}\\ + +\end{tabular} +\end{center} + + +This might sound trivial in the case of producing a YES/NO answer, +but once we require a lexing output to be produced (which is required +in applications like compiler front-end, malicious attack domain extraction, +etc.), it is not straightforward if we still extract what is needed according +to the POSIX standard. + + + + + +By simplification, we mean specifically the following rules: + +\begin{center} + \begin{tabular}{lcl} + @{thm[mode=Axiom] rrewrite.intros(1)[of "bs" "r\<^sub>2"]}\\ + @{thm[mode=Axiom] rrewrite.intros(2)[of "bs" "r\<^sub>1"]}\\ + @{thm[mode=Axiom] rrewrite.intros(3)[of "bs" "bs\<^sub>1" "r\<^sub>1"]}\\ + @{thm[mode=Rule] rrewrite.intros(4)[of "r\<^sub>1" "r\<^sub>2" "bs" "r\<^sub>3"]}\\ + @{thm[mode=Rule] rrewrite.intros(5)[of "r\<^sub>3" "r\<^sub>4" "bs" "r\<^sub>1"]}\\ + @{thm[mode=Rule] rrewrite.intros(6)[of "r" "r'" "bs" "rs\<^sub>1" "rs\<^sub>2"]}\\ + @{thm[mode=Axiom] rrewrite.intros(7)[of "bs" "rs\<^sub>a" "rs\<^sub>b"]}\\ + @{thm[mode=Axiom] rrewrite.intros(8)[of "bs" "rs\<^sub>a" "bs\<^sub>1" "rs\<^sub>1" "rs\<^sub>b"]}\\ + @{thm[mode=Axiom] rrewrite.intros(9)[of "bs" "bs\<^sub>1" "rs"]}\\ + @{thm[mode=Axiom] rrewrite.intros(10)[of "bs" ]}\\ + @{thm[mode=Axiom] rrewrite.intros(11)[of "bs" "r\<^sub>1"]}\\ + @{thm[mode=Rule] rrewrite.intros(12)[of "a\<^sub>1" "a\<^sub>2" "bs" "rs\<^sub>a" "rs\<^sub>b" "rs\<^sub>c"]}\\ + + \end{tabular} +\end{center} + + +And these can be made compact by the following simplification function: + +where the function $\mathit{bsimp_AALTs}$ + +The core idea of the proof is that two regular expressions, +if "isomorphic" up to a finite number of rewrite steps, will +remain "isomorphic" when we take the same sequence of +derivatives on both of them. +This can be expressed by the following rewrite relation lemma: +\begin{lemma} +@{thm [mode=IfThen] central} +\end{lemma} + +This isomorphic relation implies a property that leads to the +correctness result: +if two (nullable) regular expressions are "rewritable" in many steps +from one another, +then a call to function $\textit{bmkeps}$ gives the same +bit-sequence : +\begin{lemma} +@{thm [mode=IfThen] rewrites_bmkeps} +\end{lemma} + +Given the same bit-sequence, the decode function +will give out the same value, which is the output +of both lexers: +\begin{lemma} +@{thm blexer_def} +\end{lemma} + +\begin{lemma} +@{thm blexer_simp_def} +\end{lemma} + +And that yields the correctness result: +\begin{lemma} +@{thm blexersimp_correctness} +\end{lemma} + +The nice thing about the above +\ + + +section \ Additional Simp Rules?\ + + +text \ +One question someone would ask is: +can we add more "atomic" simplification/rewriting rules, +so the simplification is even more aggressive, making +the intermediate results smaller, and therefore more space-efficient? +For example, one might want to do open up alternatives who is a child +of a sequence: + +\begin{center} + \begin{tabular}{lcl} + @{thm[mode=Rule] aggressive.intros(1)[of "bs" "bs1" "rs" "r"]}\\ + \end{tabular} +\end{center} + +This rule allows us to simplify \mbox{@{term "(ALT (SEQ (ALT a b) c) (SEQ a c))"}} + into \mbox{@{term "ALT (SEQ a c) (SEQ b c)"}}, +which is cannot be done under the \mbox{ \} rule because only alternatives which are +children of another alternative can be spilled out. +\ + +(*This rule allows us to simplify \mbox{@{term "ALT (SEQ (ALT a b) c) (SEQ a c)"}} + into \mbox{@{term "ALT (SEQ a c) (SEQ b c)"}}, +which is cannot be done under the \ rule because only alternatives which are +children of another alternative can be spilled out.*) +section \Introduction\ + + +text \ + + +Brzozowski \cite{Brzozowski1964} introduced the notion of the {\em +derivative} @{term "der c r"} of a regular expression \r\ w.r.t.\ +a character~\c\, and showed that it gave a simple solution to the +problem of matching a string @{term s} with a regular expression @{term +r}: if the derivative of @{term r} w.r.t.\ (in succession) all the +characters of the string matches the empty string, then @{term r} +matches @{term s} (and {\em vice versa}). The derivative has the +property (which may almost be regarded as its specification) that, for +every string @{term s} and regular expression @{term r} and character +@{term c}, one has @{term "cs \ L(r)"} if and only if \mbox{@{term "s \ L(der c r)"}}. +The beauty of Brzozowski's derivatives is that +they are neatly expressible in any functional language, and easily +definable and reasoned about in theorem provers---the definitions just +consist of inductive datatypes and simple recursive functions. A +mechanised correctness proof of Brzozowski's matcher in for example HOL4 +has been mentioned by Owens and Slind~\cite{Owens2008}. Another one in +Isabelle/HOL is part of the work by Krauss and Nipkow \cite{Krauss2011}. +And another one in Coq is given by Coquand and Siles \cite{Coquand2012}. + +If a regular expression matches a string, then in general there is more +than one way of how the string is matched. There are two commonly used +disambiguation strategies to generate a unique answer: one is called +GREEDY matching \cite{Frisch2004} and the other is POSIX +matching~\cite{POSIX,Kuklewicz,OkuiSuzuki2010,Sulzmann2014,Vansummeren2006}. +For example consider the string @{term xy} and the regular expression +\mbox{@{term "STAR (ALT (ALT x y) xy)"}}. Either the string can be +matched in two `iterations' by the single letter-regular expressions +@{term x} and @{term y}, or directly in one iteration by @{term xy}. The +first case corresponds to GREEDY matching, which first matches with the +left-most symbol and only matches the next symbol in case of a mismatch +(this is greedy in the sense of preferring instant gratification to +delayed repletion). The second case is POSIX matching, which prefers the +longest match. + +In the context of lexing, where an input string needs to be split up +into a sequence of tokens, POSIX is the more natural disambiguation +strategy for what programmers consider basic syntactic building blocks +in their programs. These building blocks are often specified by some +regular expressions, say \r\<^bsub>key\<^esub>\ and \r\<^bsub>id\<^esub>\ for recognising keywords and identifiers, +respectively. There are a few underlying (informal) rules behind +tokenising a string in a POSIX \cite{POSIX} fashion: + +\begin{itemize} +\item[$\bullet$] \emph{The Longest Match Rule} (or \emph{``{M}aximal {M}unch {R}ule''}): +The longest initial substring matched by any regular expression is taken as +next token.\smallskip + +\item[$\bullet$] \emph{Priority Rule:} +For a particular longest initial substring, the first (leftmost) regular expression +that can match determines the token.\smallskip + +\item[$\bullet$] \emph{Star Rule:} A subexpression repeated by ${}^\star$ shall +not match an empty string unless this is the only match for the repetition.\smallskip + +\item[$\bullet$] \emph{Empty String Rule:} An empty string shall be considered to +be longer than no match at all. +\end{itemize} + +\noindent Consider for example a regular expression \r\<^bsub>key\<^esub>\ for recognising keywords such as \if\, +\then\ and so on; and \r\<^bsub>id\<^esub>\ +recognising identifiers (say, a single character followed by +characters or numbers). Then we can form the regular expression +\(r\<^bsub>key\<^esub> + r\<^bsub>id\<^esub>)\<^sup>\\ +and use POSIX matching to tokenise strings, say \iffoo\ and +\if\. For \iffoo\ we obtain by the Longest Match Rule +a single identifier token, not a keyword followed by an +identifier. For \if\ we obtain by the Priority Rule a keyword +token, not an identifier token---even if \r\<^bsub>id\<^esub>\ +matches also. By the Star Rule we know \(r\<^bsub>key\<^esub> + +r\<^bsub>id\<^esub>)\<^sup>\\ matches \iffoo\, +respectively \if\, in exactly one `iteration' of the star. The +Empty String Rule is for cases where, for example, the regular expression +\(a\<^sup>\)\<^sup>\\ matches against the +string \bc\. Then the longest initial matched substring is the +empty string, which is matched by both the whole regular expression +and the parenthesised subexpression. + + +One limitation of Brzozowski's matcher is that it only generates a +YES/NO answer for whether a string is being matched by a regular +expression. Sulzmann and Lu~\cite{Sulzmann2014} extended this matcher +to allow generation not just of a YES/NO answer but of an actual +matching, called a [lexical] {\em value}. Assuming a regular +expression matches a string, values encode the information of +\emph{how} the string is matched by the regular expression---that is, +which part of the string is matched by which part of the regular +expression. For this consider again the string \xy\ and +the regular expression \mbox{\(x + (y + xy))\<^sup>\\} +(this time fully parenthesised). We can view this regular expression +as tree and if the string \xy\ is matched by two Star +`iterations', then the \x\ is matched by the left-most +alternative in this tree and the \y\ by the right-left alternative. This +suggests to record this matching as + +\begin{center} +@{term "Stars [Left(Char x), Right(Left(Char y))]"} +\end{center} + +\noindent where @{const Stars}, \Left\, \Right\ and \Char\ are constructors for values. \Stars\ records how many +iterations were used; \Left\, respectively \Right\, which +alternative is used. This `tree view' leads naturally to the idea that +regular expressions act as types and values as inhabiting those types +(see, for example, \cite{HosoyaVouillonPierce2005}). The value for +matching \xy\ in a single `iteration', i.e.~the POSIX value, +would look as follows + +\begin{center} +@{term "Stars [Seq (Char x) (Char y)]"} +\end{center} + +\noindent where @{const Stars} has only a single-element list for the +single iteration and @{const Seq} indicates that @{term xy} is matched +by a sequence regular expression. + +%, which we will in what follows +%write more formally as @{term "SEQ x y"}. + + +Sulzmann and Lu give a simple algorithm to calculate a value that +appears to be the value associated with POSIX matching. The challenge +then is to specify that value, in an algorithm-independent fashion, +and to show that Sulzmann and Lu's derivative-based algorithm does +indeed calculate a value that is correct according to the +specification. The answer given by Sulzmann and Lu +\cite{Sulzmann2014} is to define a relation (called an ``order +relation'') on the set of values of @{term r}, and to show that (once +a string to be matched is chosen) there is a maximum element and that +it is computed by their derivative-based algorithm. This proof idea is +inspired by work of Frisch and Cardelli \cite{Frisch2004} on a GREEDY +regular expression matching algorithm. However, we were not able to +establish transitivity and totality for the ``order relation'' by +Sulzmann and Lu. There are some inherent problems with their approach +(of which some of the proofs are not published in +\cite{Sulzmann2014}); perhaps more importantly, we give in this paper +a simple inductive (and algorithm-independent) definition of what we +call being a {\em POSIX value} for a regular expression @{term r} and +a string @{term s}; we show that the algorithm by Sulzmann and Lu +computes such a value and that such a value is unique. Our proofs are +both done by hand and checked in Isabelle/HOL. The experience of +doing our proofs has been that this mechanical checking was absolutely +essential: this subject area has hidden snares. This was also noted by +Kuklewicz \cite{Kuklewicz} who found that nearly all POSIX matching +implementations are ``buggy'' \cite[Page 203]{Sulzmann2014} and by +Grathwohl et al \cite[Page 36]{CrashCourse2014} who wrote: + +\begin{quote} +\it{}``The POSIX strategy is more complicated than the greedy because of +the dependence on information about the length of matched strings in the +various subexpressions.'' +\end{quote} + + + +\noindent {\bf Contributions:} We have implemented in Isabelle/HOL the +derivative-based regular expression matching algorithm of +Sulzmann and Lu \cite{Sulzmann2014}. We have proved the correctness of this +algorithm according to our specification of what a POSIX value is (inspired +by work of Vansummeren \cite{Vansummeren2006}). Sulzmann +and Lu sketch in \cite{Sulzmann2014} an informal correctness proof: but to +us it contains unfillable gaps.\footnote{An extended version of +\cite{Sulzmann2014} is available at the website of its first author; this +extended version already includes remarks in the appendix that their +informal proof contains gaps, and possible fixes are not fully worked out.} +Our specification of a POSIX value consists of a simple inductive definition +that given a string and a regular expression uniquely determines this value. +We also show that our definition is equivalent to an ordering +of values based on positions by Okui and Suzuki \cite{OkuiSuzuki2010}. + +%Derivatives as calculated by Brzozowski's method are usually more complex +%regular expressions than the initial one; various optimisations are +%possible. We prove the correctness when simplifications of @{term "ALT ZERO r"}, +%@{term "ALT r ZERO"}, @{term "SEQ ONE r"} and @{term "SEQ r ONE"} to +%@{term r} are applied. + +We extend our results to ??? Bitcoded version?? + +\ + + + + +section \Preliminaries\ + +text \\noindent Strings in Isabelle/HOL are lists of characters with +the empty string being represented by the empty list, written @{term +"[]"}, and list-cons being written as @{term "DUMMY # DUMMY"}. Often +we use the usual bracket notation for lists also for strings; for +example a string consisting of just a single character @{term c} is +written @{term "[c]"}. We use the usual definitions for +\emph{prefixes} and \emph{strict prefixes} of strings. By using the +type @{type char} for characters we have a supply of finitely many +characters roughly corresponding to the ASCII character set. Regular +expressions are defined as usual as the elements of the following +inductive datatype: + + \begin{center} + \r :=\ + @{const "ZERO"} $\mid$ + @{const "ONE"} $\mid$ + @{term "CH c"} $\mid$ + @{term "ALT r\<^sub>1 r\<^sub>2"} $\mid$ + @{term "SEQ r\<^sub>1 r\<^sub>2"} $\mid$ + @{term "STAR r"} + \end{center} + + \noindent where @{const ZERO} stands for the regular expression that does + not match any string, @{const ONE} for the regular expression that matches + only the empty string and @{term c} for matching a character literal. The + language of a regular expression is also defined as usual by the + recursive function @{term L} with the six clauses: + + \begin{center} + \begin{tabular}{l@ {\hspace{4mm}}rcl} + \textit{(1)} & @{thm (lhs) L.simps(1)} & $\dn$ & @{thm (rhs) L.simps(1)}\\ + \textit{(2)} & @{thm (lhs) L.simps(2)} & $\dn$ & @{thm (rhs) L.simps(2)}\\ + \textit{(3)} & @{thm (lhs) L.simps(3)} & $\dn$ & @{thm (rhs) L.simps(3)}\\ + \textit{(4)} & @{thm (lhs) L.simps(4)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & + @{thm (rhs) L.simps(4)[of "r\<^sub>1" "r\<^sub>2"]}\\ + \textit{(5)} & @{thm (lhs) L.simps(5)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & + @{thm (rhs) L.simps(5)[of "r\<^sub>1" "r\<^sub>2"]}\\ + \textit{(6)} & @{thm (lhs) L.simps(6)} & $\dn$ & @{thm (rhs) L.simps(6)}\\ + \end{tabular} + \end{center} + + \noindent In clause \textit{(4)} we use the operation @{term "DUMMY ;; + DUMMY"} for the concatenation of two languages (it is also list-append for + strings). We use the star-notation for regular expressions and for + languages (in the last clause above). The star for languages is defined + inductively by two clauses: \(i)\ the empty string being in + the star of a language and \(ii)\ if @{term "s\<^sub>1"} is in a + language and @{term "s\<^sub>2"} in the star of this language, then also @{term + "s\<^sub>1 @ s\<^sub>2"} is in the star of this language. It will also be convenient + to use the following notion of a \emph{semantic derivative} (or \emph{left + quotient}) of a language defined as + % + \begin{center} + @{thm Der_def}\;. + \end{center} + + \noindent + For semantic derivatives we have the following equations (for example + mechanically proved in \cite{Krauss2011}): + % + \begin{equation}\label{SemDer} + \begin{array}{lcl} + @{thm (lhs) Der_null} & \dn & @{thm (rhs) Der_null}\\ + @{thm (lhs) Der_empty} & \dn & @{thm (rhs) Der_empty}\\ + @{thm (lhs) Der_char} & \dn & @{thm (rhs) Der_char}\\ + @{thm (lhs) Der_union} & \dn & @{thm (rhs) Der_union}\\ + @{thm (lhs) Der_Sequ} & \dn & @{thm (rhs) Der_Sequ}\\ + @{thm (lhs) Der_star} & \dn & @{thm (rhs) Der_star} + \end{array} + \end{equation} + + + \noindent \emph{\Brz's derivatives} of regular expressions + \cite{Brzozowski1964} can be easily defined by two recursive functions: + the first is from regular expressions to booleans (implementing a test + when a regular expression can match the empty string), and the second + takes a regular expression and a character to a (derivative) regular + expression: + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) nullable.simps(1)} & $\dn$ & @{thm (rhs) nullable.simps(1)}\\ + @{thm (lhs) nullable.simps(2)} & $\dn$ & @{thm (rhs) nullable.simps(2)}\\ + @{thm (lhs) nullable.simps(3)} & $\dn$ & @{thm (rhs) nullable.simps(3)}\\ + @{thm (lhs) nullable.simps(4)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) nullable.simps(4)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) nullable.simps(5)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) nullable.simps(5)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) nullable.simps(6)} & $\dn$ & @{thm (rhs) nullable.simps(6)}\medskip\\ + +% \end{tabular} +% \end{center} + +% \begin{center} +% \begin{tabular}{lcl} + + @{thm (lhs) der.simps(1)} & $\dn$ & @{thm (rhs) der.simps(1)}\\ + @{thm (lhs) der.simps(2)} & $\dn$ & @{thm (rhs) der.simps(2)}\\ + @{thm (lhs) der.simps(3)} & $\dn$ & @{thm (rhs) der.simps(3)}\\ + @{thm (lhs) der.simps(4)[of c "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) der.simps(4)[of c "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) der.simps(6)} & $\dn$ & @{thm (rhs) der.simps(6)} + \end{tabular} + \end{center} + + \noindent + We may extend this definition to give derivatives w.r.t.~strings: + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) ders.simps(1)} & $\dn$ & @{thm (rhs) ders.simps(1)}\\ + @{thm (lhs) ders.simps(2)} & $\dn$ & @{thm (rhs) ders.simps(2)}\\ + \end{tabular} + \end{center} + + \noindent Given the equations in \eqref{SemDer}, it is a relatively easy + exercise in mechanical reasoning to establish that + + \begin{proposition}\label{derprop}\mbox{}\\ + \begin{tabular}{ll} + \textit{(1)} & @{thm (lhs) nullable_correctness} if and only if + @{thm (rhs) nullable_correctness}, and \\ + \textit{(2)} & @{thm[mode=IfThen] der_correctness}. + \end{tabular} + \end{proposition} + + \noindent With this in place it is also very routine to prove that the + regular expression matcher defined as + % + \begin{center} + @{thm match_def} + \end{center} + + \noindent gives a positive answer if and only if @{term "s \ L r"}. + Consequently, this regular expression matching algorithm satisfies the + usual specification for regular expression matching. While the matcher + above calculates a provably correct YES/NO answer for whether a regular + expression matches a string or not, the novel idea of Sulzmann and Lu + \cite{Sulzmann2014} is to append another phase to this algorithm in order + to calculate a [lexical] value. We will explain the details next. + +\ + +section \POSIX Regular Expression Matching\label{posixsec}\ + +text \ + + There have been many previous works that use values for encoding + \emph{how} a regular expression matches a string. + The clever idea by Sulzmann and Lu \cite{Sulzmann2014} is to + define a function on values that mirrors (but inverts) the + construction of the derivative on regular expressions. \emph{Values} + are defined as the inductive datatype + + \begin{center} + \v :=\ + @{const "Void"} $\mid$ + @{term "val.Char c"} $\mid$ + @{term "Left v"} $\mid$ + @{term "Right v"} $\mid$ + @{term "Seq v\<^sub>1 v\<^sub>2"} $\mid$ + @{term "Stars vs"} + \end{center} + + \noindent where we use @{term vs} to stand for a list of + values. (This is similar to the approach taken by Frisch and + Cardelli for GREEDY matching \cite{Frisch2004}, and Sulzmann and Lu + for POSIX matching \cite{Sulzmann2014}). The string underlying a + value can be calculated by the @{const flat} function, written + @{term "flat DUMMY"} and defined as: + + \begin{center} + \begin{tabular}[t]{lcl} + @{thm (lhs) flat.simps(1)} & $\dn$ & @{thm (rhs) flat.simps(1)}\\ + @{thm (lhs) flat.simps(2)} & $\dn$ & @{thm (rhs) flat.simps(2)}\\ + @{thm (lhs) flat.simps(3)} & $\dn$ & @{thm (rhs) flat.simps(3)}\\ + @{thm (lhs) flat.simps(4)} & $\dn$ & @{thm (rhs) flat.simps(4)} + \end{tabular}\hspace{14mm} + \begin{tabular}[t]{lcl} + @{thm (lhs) flat.simps(5)[of "v\<^sub>1" "v\<^sub>2"]} & $\dn$ & @{thm (rhs) flat.simps(5)[of "v\<^sub>1" "v\<^sub>2"]}\\ + @{thm (lhs) flat.simps(6)} & $\dn$ & @{thm (rhs) flat.simps(6)}\\ + @{thm (lhs) flat.simps(7)} & $\dn$ & @{thm (rhs) flat.simps(7)}\\ + \end{tabular} + \end{center} + + \noindent We will sometimes refer to the underlying string of a + value as \emph{flattened value}. We will also overload our notation and + use @{term "flats vs"} for flattening a list of values and concatenating + the resulting strings. + + Sulzmann and Lu define + inductively an \emph{inhabitation relation} that associates values to + regular expressions. We define this relation as + follows:\footnote{Note that the rule for @{term Stars} differs from + our earlier paper \cite{AusafDyckhoffUrban2016}. There we used the + original definition by Sulzmann and Lu which does not require that + the values @{term "v \ set vs"} flatten to a non-empty + string. The reason for introducing the more restricted version of + lexical values is convenience later on when reasoning about an + ordering relation for values.} + + \begin{center} + \begin{tabular}{c@ {\hspace{12mm}}c}\label{prfintros} + \\[-8mm] + @{thm[mode=Axiom] Prf.intros(4)} & + @{thm[mode=Axiom] Prf.intros(5)[of "c"]}\\[4mm] + @{thm[mode=Rule] Prf.intros(2)[of "v\<^sub>1" "r\<^sub>1" "r\<^sub>2"]} & + @{thm[mode=Rule] Prf.intros(3)[of "v\<^sub>2" "r\<^sub>1" "r\<^sub>2"]}\\[4mm] + @{thm[mode=Rule] Prf.intros(1)[of "v\<^sub>1" "r\<^sub>1" "v\<^sub>2" "r\<^sub>2"]} & + @{thm[mode=Rule] Prf.intros(6)[of "vs"]} + \end{tabular} + \end{center} + + \noindent where in the clause for @{const "Stars"} we use the + notation @{term "v \ set vs"} for indicating that \v\ is a + member in the list \vs\. We require in this rule that every + value in @{term vs} flattens to a non-empty string. The idea is that + @{term "Stars"}-values satisfy the informal Star Rule (see Introduction) + where the $^\star$ does not match the empty string unless this is + the only match for the repetition. Note also that no values are + associated with the regular expression @{term ZERO}, and that the + only value associated with the regular expression @{term ONE} is + @{term Void}. It is routine to establish how values ``inhabiting'' + a regular expression correspond to the language of a regular + expression, namely + + \begin{proposition}\label{inhabs} + @{thm L_flat_Prf} + \end{proposition} + + \noindent + Given a regular expression \r\ and a string \s\, we define the + set of all \emph{Lexical Values} inhabited by \r\ with the underlying string + being \s\:\footnote{Okui and Suzuki refer to our lexical values + as \emph{canonical values} in \cite{OkuiSuzuki2010}. The notion of \emph{non-problematic + values} by Cardelli and Frisch \cite{Frisch2004} is related, but not identical + to our lexical values.} + + \begin{center} + @{thm LV_def} + \end{center} + + \noindent The main property of @{term "LV r s"} is that it is alway finite. + + \begin{proposition} + @{thm LV_finite} + \end{proposition} + + \noindent This finiteness property does not hold in general if we + remove the side-condition about @{term "flat v \ []"} in the + @{term Stars}-rule above. For example using Sulzmann and Lu's + less restrictive definition, @{term "LV (STAR ONE) []"} would contain + infinitely many values, but according to our more restricted + definition only a single value, namely @{thm LV_STAR_ONE_empty}. + + If a regular expression \r\ matches a string \s\, then + generally the set @{term "LV r s"} is not just a singleton set. In + case of POSIX matching the problem is to calculate the unique lexical value + that satisfies the (informal) POSIX rules from the Introduction. + Graphically the POSIX value calculation algorithm by Sulzmann and Lu + can be illustrated by the picture in Figure~\ref{Sulz} where the + path from the left to the right involving @{term + derivatives}/@{const nullable} is the first phase of the algorithm + (calculating successive \Brz's derivatives) and @{const + mkeps}/\inj\, the path from right to left, the second + phase. This picture shows the steps required when a regular + expression, say \r\<^sub>1\, matches the string @{term + "[a,b,c]"}. We first build the three derivatives (according to + @{term a}, @{term b} and @{term c}). We then use @{const nullable} + to find out whether the resulting derivative regular expression + @{term "r\<^sub>4"} can match the empty string. If yes, we call the + function @{const mkeps} that produces a value @{term "v\<^sub>4"} + for how @{term "r\<^sub>4"} can match the empty string (taking into + account the POSIX constraints in case there are several ways). This + function is defined by the clauses: + +\begin{figure}[t] +\begin{center} +\begin{tikzpicture}[scale=2,node distance=1.3cm, + every node/.style={minimum size=6mm}] +\node (r1) {@{term "r\<^sub>1"}}; +\node (r2) [right=of r1]{@{term "r\<^sub>2"}}; +\draw[->,line width=1mm](r1)--(r2) node[above,midway] {@{term "der a DUMMY"}}; +\node (r3) [right=of r2]{@{term "r\<^sub>3"}}; +\draw[->,line width=1mm](r2)--(r3) node[above,midway] {@{term "der b DUMMY"}}; +\node (r4) [right=of r3]{@{term "r\<^sub>4"}}; +\draw[->,line width=1mm](r3)--(r4) node[above,midway] {@{term "der c DUMMY"}}; +\draw (r4) node[anchor=west] {\;\raisebox{3mm}{@{term nullable}}}; +\node (v4) [below=of r4]{@{term "v\<^sub>4"}}; +\draw[->,line width=1mm](r4) -- (v4); +\node (v3) [left=of v4] {@{term "v\<^sub>3"}}; +\draw[->,line width=1mm](v4)--(v3) node[below,midway] {\inj r\<^sub>3 c\}; +\node (v2) [left=of v3]{@{term "v\<^sub>2"}}; +\draw[->,line width=1mm](v3)--(v2) node[below,midway] {\inj r\<^sub>2 b\}; +\node (v1) [left=of v2] {@{term "v\<^sub>1"}}; +\draw[->,line width=1mm](v2)--(v1) node[below,midway] {\inj r\<^sub>1 a\}; +\draw (r4) node[anchor=north west] {\;\raisebox{-8mm}{@{term "mkeps"}}}; +\end{tikzpicture} +\end{center} +\mbox{}\\[-13mm] + +\caption{The two phases of the algorithm by Sulzmann \& Lu \cite{Sulzmann2014}, +matching the string @{term "[a,b,c]"}. The first phase (the arrows from +left to right) is \Brz's matcher building successive derivatives. If the +last regular expression is @{term nullable}, then the functions of the +second phase are called (the top-down and right-to-left arrows): first +@{term mkeps} calculates a value @{term "v\<^sub>4"} witnessing +how the empty string has been recognised by @{term "r\<^sub>4"}. After +that the function @{term inj} ``injects back'' the characters of the string into +the values. +\label{Sulz}} +\end{figure} + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) mkeps.simps(1)} & $\dn$ & @{thm (rhs) mkeps.simps(1)}\\ + @{thm (lhs) mkeps.simps(2)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) mkeps.simps(2)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) mkeps.simps(3)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) mkeps.simps(3)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) mkeps.simps(4)} & $\dn$ & @{thm (rhs) mkeps.simps(4)}\\ + \end{tabular} + \end{center} + + \noindent Note that this function needs only to be partially defined, + namely only for regular expressions that are nullable. In case @{const + nullable} fails, the string @{term "[a,b,c]"} cannot be matched by @{term + "r\<^sub>1"} and the null value @{term "None"} is returned. Note also how this function + makes some subtle choices leading to a POSIX value: for example if an + alternative regular expression, say @{term "ALT r\<^sub>1 r\<^sub>2"}, can + match the empty string and furthermore @{term "r\<^sub>1"} can match the + empty string, then we return a \Left\-value. The \Right\-value will only be returned if @{term "r\<^sub>1"} cannot match the empty + string. + + The most interesting idea from Sulzmann and Lu \cite{Sulzmann2014} is + the construction of a value for how @{term "r\<^sub>1"} can match the + string @{term "[a,b,c]"} from the value how the last derivative, @{term + "r\<^sub>4"} in Fig.~\ref{Sulz}, can match the empty string. Sulzmann and + Lu achieve this by stepwise ``injecting back'' the characters into the + values thus inverting the operation of building derivatives, but on the level + of values. The corresponding function, called @{term inj}, takes three + arguments, a regular expression, a character and a value. For example in + the first (or right-most) @{term inj}-step in Fig.~\ref{Sulz} the regular + expression @{term "r\<^sub>3"}, the character @{term c} from the last + derivative step and @{term "v\<^sub>4"}, which is the value corresponding + to the derivative regular expression @{term "r\<^sub>4"}. The result is + the new value @{term "v\<^sub>3"}. The final result of the algorithm is + the value @{term "v\<^sub>1"}. The @{term inj} function is defined by recursion on regular + expressions and by analysing the shape of values (corresponding to + the derivative regular expressions). + % + \begin{center} + \begin{tabular}{l@ {\hspace{5mm}}lcl} + \textit{(1)} & @{thm (lhs) injval.simps(1)} & $\dn$ & @{thm (rhs) injval.simps(1)}\\ + \textit{(2)} & @{thm (lhs) injval.simps(2)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1"]} & $\dn$ & + @{thm (rhs) injval.simps(2)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1"]}\\ + \textit{(3)} & @{thm (lhs) injval.simps(3)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>2"]} & $\dn$ & + @{thm (rhs) injval.simps(3)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>2"]}\\ + \textit{(4)} & @{thm (lhs) injval.simps(4)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1" "v\<^sub>2"]} & $\dn$ + & @{thm (rhs) injval.simps(4)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1" "v\<^sub>2"]}\\ + \textit{(5)} & @{thm (lhs) injval.simps(5)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1" "v\<^sub>2"]} & $\dn$ + & @{thm (rhs) injval.simps(5)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1" "v\<^sub>2"]}\\ + \textit{(6)} & @{thm (lhs) injval.simps(6)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>2"]} & $\dn$ + & @{thm (rhs) injval.simps(6)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>2"]}\\ + \textit{(7)} & @{thm (lhs) injval.simps(7)[of "r" "c" "v" "vs"]} & $\dn$ + & @{thm (rhs) injval.simps(7)[of "r" "c" "v" "vs"]}\\ + \end{tabular} + \end{center} + + \noindent To better understand what is going on in this definition it + might be instructive to look first at the three sequence cases (clauses + \textit{(4)} -- \textit{(6)}). In each case we need to construct an ``injected value'' for + @{term "SEQ r\<^sub>1 r\<^sub>2"}. This must be a value of the form @{term + "Seq DUMMY DUMMY"}\,. Recall the clause of the \derivative\-function + for sequence regular expressions: + + \begin{center} + @{thm (lhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]} $\dn$ @{thm (rhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]} + \end{center} + + \noindent Consider first the \else\-branch where the derivative is @{term + "SEQ (der c r\<^sub>1) r\<^sub>2"}. The corresponding value must therefore + be of the form @{term "Seq v\<^sub>1 v\<^sub>2"}, which matches the left-hand + side in clause~\textit{(4)} of @{term inj}. In the \if\-branch the derivative is an + alternative, namely @{term "ALT (SEQ (der c r\<^sub>1) r\<^sub>2) (der c + r\<^sub>2)"}. This means we either have to consider a \Left\- or + \Right\-value. In case of the \Left\-value we know further it + must be a value for a sequence regular expression. Therefore the pattern + we match in the clause \textit{(5)} is @{term "Left (Seq v\<^sub>1 v\<^sub>2)"}, + while in \textit{(6)} it is just @{term "Right v\<^sub>2"}. One more interesting + point is in the right-hand side of clause \textit{(6)}: since in this case the + regular expression \r\<^sub>1\ does not ``contribute'' to + matching the string, that means it only matches the empty string, we need to + call @{const mkeps} in order to construct a value for how @{term "r\<^sub>1"} + can match this empty string. A similar argument applies for why we can + expect in the left-hand side of clause \textit{(7)} that the value is of the form + @{term "Seq v (Stars vs)"}---the derivative of a star is @{term "SEQ (der c r) + (STAR r)"}. Finally, the reason for why we can ignore the second argument + in clause \textit{(1)} of @{term inj} is that it will only ever be called in cases + where @{term "c=d"}, but the usual linearity restrictions in patterns do + not allow us to build this constraint explicitly into our function + definition.\footnote{Sulzmann and Lu state this clause as @{thm (lhs) + injval.simps(1)[of "c" "c"]} $\dn$ @{thm (rhs) injval.simps(1)[of "c"]}, + but our deviation is harmless.} + + The idea of the @{term inj}-function to ``inject'' a character, say + @{term c}, into a value can be made precise by the first part of the + following lemma, which shows that the underlying string of an injected + value has a prepended character @{term c}; the second part shows that + the underlying string of an @{const mkeps}-value is always the empty + string (given the regular expression is nullable since otherwise + \mkeps\ might not be defined). + + \begin{lemma}\mbox{}\smallskip\\\label{Prf_injval_flat} + \begin{tabular}{ll} + (1) & @{thm[mode=IfThen] Prf_injval_flat}\\ + (2) & @{thm[mode=IfThen] mkeps_flat} + \end{tabular} + \end{lemma} + + \begin{proof} + Both properties are by routine inductions: the first one can, for example, + be proved by induction over the definition of @{term derivatives}; the second by + an induction on @{term r}. There are no interesting cases.\qed + \end{proof} + + Having defined the @{const mkeps} and \inj\ function we can extend + \Brz's matcher so that a value is constructed (assuming the + regular expression matches the string). The clauses of the Sulzmann and Lu lexer are + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) lexer.simps(1)} & $\dn$ & @{thm (rhs) lexer.simps(1)}\\ + @{thm (lhs) lexer.simps(2)} & $\dn$ & \case\ @{term "lexer (der c r) s"} \of\\\ + & & \phantom{$|$} @{term "None"} \\\ @{term None}\\ + & & $|$ @{term "Some v"} \\\ @{term "Some (injval r c v)"} + \end{tabular} + \end{center} + + \noindent If the regular expression does not match the string, @{const None} is + returned. If the regular expression \emph{does} + match the string, then @{const Some} value is returned. One important + virtue of this algorithm is that it can be implemented with ease in any + functional programming language and also in Isabelle/HOL. In the remaining + part of this section we prove that this algorithm is correct. + + The well-known idea of POSIX matching is informally defined by some + rules such as the Longest Match and Priority Rules (see + Introduction); as correctly argued in \cite{Sulzmann2014}, this + needs formal specification. Sulzmann and Lu define an ``ordering + relation'' between values and argue that there is a maximum value, + as given by the derivative-based algorithm. In contrast, we shall + introduce a simple inductive definition that specifies directly what + a \emph{POSIX value} is, incorporating the POSIX-specific choices + into the side-conditions of our rules. Our definition is inspired by + the matching relation given by Vansummeren~\cite{Vansummeren2006}. + The relation we define is ternary and + written as \mbox{@{term "s \ r \ v"}}, relating + strings, regular expressions and values; the inductive rules are given in + Figure~\ref{POSIXrules}. + We can prove that given a string @{term s} and regular expression @{term + r}, the POSIX value @{term v} is uniquely determined by @{term "s \ r \ v"}. + + % + \begin{figure}[t] + \begin{center} + \begin{tabular}{c} + @{thm[mode=Axiom] Posix.intros(1)}\P\@{term "ONE"} \qquad + @{thm[mode=Axiom] Posix.intros(2)}\P\@{term "c"}\medskip\\ + @{thm[mode=Rule] Posix.intros(3)[of "s" "r\<^sub>1" "v" "r\<^sub>2"]}\P+L\\qquad + @{thm[mode=Rule] Posix.intros(4)[of "s" "r\<^sub>2" "v" "r\<^sub>1"]}\P+R\\medskip\\ + $\mprset{flushleft} + \inferrule + {@{thm (prem 1) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]} \qquad + @{thm (prem 2) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]} \\\\ + @{thm (prem 3) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]}} + {@{thm (concl) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]}}$\PS\\\ + @{thm[mode=Axiom] Posix.intros(7)}\P[]\\medskip\\ + $\mprset{flushleft} + \inferrule + {@{thm (prem 1) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]} \qquad + @{thm (prem 2) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]} \qquad + @{thm (prem 3) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]} \\\\ + @{thm (prem 4) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]}} + {@{thm (concl) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]}}$\P\\ + \end{tabular} + \end{center} + \caption{Our inductive definition of POSIX values.}\label{POSIXrules} + \end{figure} + + + + \begin{theorem}\mbox{}\smallskip\\\label{posixdeterm} + \begin{tabular}{ll} + (1) & If @{thm (prem 1) Posix1(1)} then @{thm (concl) + Posix1(1)} and @{thm (concl) Posix1(2)}.\\ + (2) & @{thm[mode=IfThen] Posix_determ(1)[of _ _ "v" "v'"]} + \end{tabular} + \end{theorem} + + \begin{proof} Both by induction on the definition of @{term "s \ r \ v"}. + The second parts follows by a case analysis of @{term "s \ r \ v'"} and + the first part.\qed + \end{proof} + + \noindent + We claim that our @{term "s \ r \ v"} relation captures the idea behind the four + informal POSIX rules shown in the Introduction: Consider for example the + rules \P+L\ and \P+R\ where the POSIX value for a string + and an alternative regular expression, that is @{term "(s, ALT r\<^sub>1 r\<^sub>2)"}, + is specified---it is always a \Left\-value, \emph{except} when the + string to be matched is not in the language of @{term "r\<^sub>1"}; only then it + is a \Right\-value (see the side-condition in \P+R\). + Interesting is also the rule for sequence regular expressions (\PS\). The first two premises state that @{term "v\<^sub>1"} and @{term "v\<^sub>2"} + are the POSIX values for @{term "(s\<^sub>1, r\<^sub>1)"} and @{term "(s\<^sub>2, r\<^sub>2)"} + respectively. Consider now the third premise and note that the POSIX value + of this rule should match the string \mbox{@{term "s\<^sub>1 @ s\<^sub>2"}}. According to the + Longest Match Rule, we want that the @{term "s\<^sub>1"} is the longest initial + split of \mbox{@{term "s\<^sub>1 @ s\<^sub>2"}} such that @{term "s\<^sub>2"} is still recognised + by @{term "r\<^sub>2"}. Let us assume, contrary to the third premise, that there + \emph{exist} an @{term "s\<^sub>3"} and @{term "s\<^sub>4"} such that @{term "s\<^sub>2"} + can be split up into a non-empty string @{term "s\<^sub>3"} and a possibly empty + string @{term "s\<^sub>4"}. Moreover the longer string @{term "s\<^sub>1 @ s\<^sub>3"} can be + matched by \r\<^sub>1\ and the shorter @{term "s\<^sub>4"} can still be + matched by @{term "r\<^sub>2"}. In this case @{term "s\<^sub>1"} would \emph{not} be the + longest initial split of \mbox{@{term "s\<^sub>1 @ s\<^sub>2"}} and therefore @{term "Seq v\<^sub>1 + v\<^sub>2"} cannot be a POSIX value for @{term "(s\<^sub>1 @ s\<^sub>2, SEQ r\<^sub>1 r\<^sub>2)"}. + The main point is that our side-condition ensures the Longest + Match Rule is satisfied. + + A similar condition is imposed on the POSIX value in the \P\\-rule. Also there we want that @{term "s\<^sub>1"} is the longest initial + split of @{term "s\<^sub>1 @ s\<^sub>2"} and furthermore the corresponding value + @{term v} cannot be flattened to the empty string. In effect, we require + that in each ``iteration'' of the star, some non-empty substring needs to + be ``chipped'' away; only in case of the empty string we accept @{term + "Stars []"} as the POSIX value. Indeed we can show that our POSIX values + are lexical values which exclude those \Stars\ that contain subvalues + that flatten to the empty string. + + \begin{lemma}\label{LVposix} + @{thm [mode=IfThen] Posix_LV} + \end{lemma} + + \begin{proof} + By routine induction on @{thm (prem 1) Posix_LV}.\qed + \end{proof} + + \noindent + Next is the lemma that shows the function @{term "mkeps"} calculates + the POSIX value for the empty string and a nullable regular expression. + + \begin{lemma}\label{lemmkeps} + @{thm[mode=IfThen] Posix_mkeps} + \end{lemma} + + \begin{proof} + By routine induction on @{term r}.\qed + \end{proof} + + \noindent + The central lemma for our POSIX relation is that the \inj\-function + preserves POSIX values. + + \begin{lemma}\label{Posix2} + @{thm[mode=IfThen] Posix_injval} + \end{lemma} + + \begin{proof} + By induction on \r\. We explain two cases. + + \begin{itemize} + \item[$\bullet$] Case @{term "r = ALT r\<^sub>1 r\<^sub>2"}. There are + two subcases, namely \(a)\ \mbox{@{term "v = Left v'"}} and @{term + "s \ der c r\<^sub>1 \ v'"}; and \(b)\ @{term "v = Right v'"}, @{term + "s \ L (der c r\<^sub>1)"} and @{term "s \ der c r\<^sub>2 \ v'"}. In \(a)\ we + know @{term "s \ der c r\<^sub>1 \ v'"}, from which we can infer @{term "(c # s) + \ r\<^sub>1 \ injval r\<^sub>1 c v'"} by induction hypothesis and hence @{term "(c # + s) \ ALT r\<^sub>1 r\<^sub>2 \ injval (ALT r\<^sub>1 r\<^sub>2) c (Left v')"} as needed. Similarly + in subcase \(b)\ where, however, in addition we have to use + Proposition~\ref{derprop}(2) in order to infer @{term "c # s \ L r\<^sub>1"} from @{term + "s \ L (der c r\<^sub>1)"}.\smallskip + + \item[$\bullet$] Case @{term "r = SEQ r\<^sub>1 r\<^sub>2"}. There are three subcases: + + \begin{quote} + \begin{description} + \item[\(a)\] @{term "v = Left (Seq v\<^sub>1 v\<^sub>2)"} and @{term "nullable r\<^sub>1"} + \item[\(b)\] @{term "v = Right v\<^sub>1"} and @{term "nullable r\<^sub>1"} + \item[\(c)\] @{term "v = Seq v\<^sub>1 v\<^sub>2"} and @{term "\ nullable r\<^sub>1"} + \end{description} + \end{quote} + + \noindent For \(a)\ we know @{term "s\<^sub>1 \ der c r\<^sub>1 \ v\<^sub>1"} and + @{term "s\<^sub>2 \ r\<^sub>2 \ v\<^sub>2"} as well as + % + \[@{term "\ (\s\<^sub>3 s\<^sub>4. s\<^sub>3 \ [] \ s\<^sub>3 @ s\<^sub>4 = s\<^sub>2 \ s\<^sub>1 @ s\<^sub>3 \ L (der c r\<^sub>1) \ s\<^sub>4 \ L r\<^sub>2)"}\] + + \noindent From the latter we can infer by Proposition~\ref{derprop}(2): + % + \[@{term "\ (\s\<^sub>3 s\<^sub>4. s\<^sub>3 \ [] \ s\<^sub>3 @ s\<^sub>4 = s\<^sub>2 \ (c # s\<^sub>1) @ s\<^sub>3 \ L r\<^sub>1 \ s\<^sub>4 \ L r\<^sub>2)"}\] + + \noindent We can use the induction hypothesis for \r\<^sub>1\ to obtain + @{term "(c # s\<^sub>1) \ r\<^sub>1 \ injval r\<^sub>1 c v\<^sub>1"}. Putting this all together allows us to infer + @{term "((c # s\<^sub>1) @ s\<^sub>2) \ SEQ r\<^sub>1 r\<^sub>2 \ Seq (injval r\<^sub>1 c v\<^sub>1) v\<^sub>2"}. The case \(c)\ + is similar. + + For \(b)\ we know @{term "s \ der c r\<^sub>2 \ v\<^sub>1"} and + @{term "s\<^sub>1 @ s\<^sub>2 \ L (SEQ (der c r\<^sub>1) r\<^sub>2)"}. From the former + we have @{term "(c # s) \ r\<^sub>2 \ (injval r\<^sub>2 c v\<^sub>1)"} by induction hypothesis + for @{term "r\<^sub>2"}. From the latter we can infer + % + \[@{term "\ (\s\<^sub>3 s\<^sub>4. s\<^sub>3 \ [] \ s\<^sub>3 @ s\<^sub>4 = c # s \ s\<^sub>3 \ L r\<^sub>1 \ s\<^sub>4 \ L r\<^sub>2)"}\] + + \noindent By Lemma~\ref{lemmkeps} we know @{term "[] \ r\<^sub>1 \ (mkeps r\<^sub>1)"} + holds. Putting this all together, we can conclude with @{term "(c # + s) \ SEQ r\<^sub>1 r\<^sub>2 \ Seq (mkeps r\<^sub>1) (injval r\<^sub>2 c v\<^sub>1)"}, as required. + + Finally suppose @{term "r = STAR r\<^sub>1"}. This case is very similar to the + sequence case, except that we need to also ensure that @{term "flat (injval r\<^sub>1 + c v\<^sub>1) \ []"}. This follows from @{term "(c # s\<^sub>1) + \ r\<^sub>1 \ injval r\<^sub>1 c v\<^sub>1"} (which in turn follows from @{term "s\<^sub>1 \ der c + r\<^sub>1 \ v\<^sub>1"} and the induction hypothesis).\qed + \end{itemize} + \end{proof} + + \noindent + With Lemma~\ref{Posix2} in place, it is completely routine to establish + that the Sulzmann and Lu lexer satisfies our specification (returning + the null value @{term "None"} iff the string is not in the language of the regular expression, + and returning a unique POSIX value iff the string \emph{is} in the language): + + \begin{theorem}\mbox{}\smallskip\\\label{lexercorrect} + \begin{tabular}{ll} + (1) & @{thm (lhs) lexer_correct_None} if and only if @{thm (rhs) lexer_correct_None}\\ + (2) & @{thm (lhs) lexer_correct_Some} if and only if @{thm (rhs) lexer_correct_Some}\\ + \end{tabular} + \end{theorem} + + \begin{proof} + By induction on @{term s} using Lemma~\ref{lemmkeps} and \ref{Posix2}.\qed + \end{proof} + + \noindent In \textit{(2)} we further know by Theorem~\ref{posixdeterm} that the + value returned by the lexer must be unique. A simple corollary + of our two theorems is: + + \begin{corollary}\mbox{}\smallskip\\\label{lexercorrectcor} + \begin{tabular}{ll} + (1) & @{thm (lhs) lexer_correctness(2)} if and only if @{thm (rhs) lexer_correctness(2)}\\ + (2) & @{thm (lhs) lexer_correctness(1)} if and only if @{thm (rhs) lexer_correctness(1)}\\ + \end{tabular} + \end{corollary} + + \noindent This concludes our correctness proof. Note that we have + not changed the algorithm of Sulzmann and Lu,\footnote{All + deviations we introduced are harmless.} but introduced our own + specification for what a correct result---a POSIX value---should be. + In the next section we show that our specification coincides with + another one given by Okui and Suzuki using a different technique. + +\ + +section \Ordering of Values according to Okui and Suzuki\ + +text \ + + While in the previous section we have defined POSIX values directly + in terms of a ternary relation (see inference rules in Figure~\ref{POSIXrules}), + Sulzmann and Lu took a different approach in \cite{Sulzmann2014}: + they introduced an ordering for values and identified POSIX values + as the maximal elements. An extended version of \cite{Sulzmann2014} + is available at the website of its first author; this includes more + details of their proofs, but which are evidently not in final form + yet. Unfortunately, we were not able to verify claims that their + ordering has properties such as being transitive or having maximal + elements. + + Okui and Suzuki \cite{OkuiSuzuki2010,OkuiSuzukiTech} described + another ordering of values, which they use to establish the + correctness of their automata-based algorithm for POSIX matching. + Their ordering resembles some aspects of the one given by Sulzmann + and Lu, but overall is quite different. To begin with, Okui and + Suzuki identify POSIX values as minimal, rather than maximal, + elements in their ordering. A more substantial difference is that + the ordering by Okui and Suzuki uses \emph{positions} in order to + identify and compare subvalues. Positions are lists of natural + numbers. This allows them to quite naturally formalise the Longest + Match and Priority rules of the informal POSIX standard. Consider + for example the value @{term v} + + \begin{center} + @{term "v == Stars [Seq (Char x) (Char y), Char z]"} + \end{center} + + \noindent + At position \[0,1]\ of this value is the + subvalue \Char y\ and at position \[1]\ the + subvalue @{term "Char z"}. At the `root' position, or empty list + @{term "[]"}, is the whole value @{term v}. Positions such as \[0,1,0]\ or \[2]\ are outside of \v\. If it exists, the subvalue of @{term v} at a position \p\, written @{term "at v p"}, can be recursively defined by + + \begin{center} + \begin{tabular}{r@ {\hspace{0mm}}lcl} + @{term v} & \\\<^bsub>[]\<^esub>\ & \\\& @{thm (rhs) at.simps(1)}\\ + @{term "Left v"} & \\\<^bsub>0::ps\<^esub>\ & \\\& @{thm (rhs) at.simps(2)}\\ + @{term "Right v"} & \\\<^bsub>1::ps\<^esub>\ & \\\ & + @{thm (rhs) at.simps(3)[simplified Suc_0_fold]}\\ + @{term "Seq v\<^sub>1 v\<^sub>2"} & \\\<^bsub>0::ps\<^esub>\ & \\\ & + @{thm (rhs) at.simps(4)[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} \\ + @{term "Seq v\<^sub>1 v\<^sub>2"} & \\\<^bsub>1::ps\<^esub>\ + & \\\ & + @{thm (rhs) at.simps(5)[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2", simplified Suc_0_fold]} \\ + @{term "Stars vs"} & \\\<^bsub>n::ps\<^esub>\ & \\\& @{thm (rhs) at.simps(6)}\\ + \end{tabular} + \end{center} + + \noindent In the last clause we use Isabelle's notation @{term "vs ! n"} for the + \n\th element in a list. The set of positions inside a value \v\, + written @{term "Pos v"}, is given by + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) Pos.simps(1)} & \\\ & @{thm (rhs) Pos.simps(1)}\\ + @{thm (lhs) Pos.simps(2)} & \\\ & @{thm (rhs) Pos.simps(2)}\\ + @{thm (lhs) Pos.simps(3)} & \\\ & @{thm (rhs) Pos.simps(3)}\\ + @{thm (lhs) Pos.simps(4)} & \\\ & @{thm (rhs) Pos.simps(4)}\\ + @{thm (lhs) Pos.simps(5)[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} + & \\\ + & @{thm (rhs) Pos.simps(5)[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\ + @{thm (lhs) Pos_stars} & \\\ & @{thm (rhs) Pos_stars}\\ + \end{tabular} + \end{center} + + \noindent + whereby \len\ in the last clause stands for the length of a list. Clearly + for every position inside a value there exists a subvalue at that position. + + + To help understanding the ordering of Okui and Suzuki, consider again + the earlier value + \v\ and compare it with the following \w\: + + \begin{center} + \begin{tabular}{l} + @{term "v == Stars [Seq (Char x) (Char y), Char z]"}\\ + @{term "w == Stars [Char x, Char y, Char z]"} + \end{tabular} + \end{center} + + \noindent Both values match the string \xyz\, that means if + we flatten these values at their respective root position, we obtain + \xyz\. However, at position \[0]\, \v\ matches + \xy\ whereas \w\ matches only the shorter \x\. So + according to the Longest Match Rule, we should prefer \v\, + rather than \w\ as POSIX value for string \xyz\ (and + corresponding regular expression). In order to + formalise this idea, Okui and Suzuki introduce a measure for + subvalues at position \p\, called the \emph{norm} of \v\ + at position \p\. We can define this measure in Isabelle as an + integer as follows + + \begin{center} + @{thm pflat_len_def} + \end{center} + + \noindent where we take the length of the flattened value at + position \p\, provided the position is inside \v\; if + not, then the norm is \-1\. The default for outside + positions is crucial for the POSIX requirement of preferring a + \Left\-value over a \Right\-value (if they can match the + same string---see the Priority Rule from the Introduction). For this + consider + + \begin{center} + @{term "v == Left (Char x)"} \qquad and \qquad @{term "w == Right (Char x)"} + \end{center} + + \noindent Both values match \x\. At position \[0]\ + the norm of @{term v} is \1\ (the subvalue matches \x\), + but the norm of \w\ is \-1\ (the position is outside + \w\ according to how we defined the `inside' positions of + \Left\- and \Right\-values). Of course at position + \[1]\, the norms @{term "pflat_len v [1]"} and @{term + "pflat_len w [1]"} are reversed, but the point is that subvalues + will be analysed according to lexicographically ordered + positions. According to this ordering, the position \[0]\ + takes precedence over \[1]\ and thus also \v\ will be + preferred over \w\. The lexicographic ordering of positions, written + @{term "DUMMY \lex DUMMY"}, can be conveniently formalised + by three inference rules + + \begin{center} + \begin{tabular}{ccc} + @{thm [mode=Axiom] lex_list.intros(1)}\hspace{1cm} & + @{thm [mode=Rule] lex_list.intros(3)[where ?p1.0="p\<^sub>1" and ?p2.0="p\<^sub>2" and + ?ps1.0="ps\<^sub>1" and ?ps2.0="ps\<^sub>2"]}\hspace{1cm} & + @{thm [mode=Rule] lex_list.intros(2)[where ?ps1.0="ps\<^sub>1" and ?ps2.0="ps\<^sub>2"]} + \end{tabular} + \end{center} + + With the norm and lexicographic order in place, + we can state the key definition of Okui and Suzuki + \cite{OkuiSuzuki2010}: a value @{term "v\<^sub>1"} is \emph{smaller at position \p\} than + @{term "v\<^sub>2"}, written @{term "v\<^sub>1 \val p v\<^sub>2"}, + if and only if $(i)$ the norm at position \p\ is + greater in @{term "v\<^sub>1"} (that is the string @{term "flat (at v\<^sub>1 p)"} is longer + than @{term "flat (at v\<^sub>2 p)"}) and $(ii)$ all subvalues at + positions that are inside @{term "v\<^sub>1"} or @{term "v\<^sub>2"} and that are + lexicographically smaller than \p\, we have the same norm, namely + + \begin{center} + \begin{tabular}{c} + @{thm (lhs) PosOrd_def[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} + \\\ + $\begin{cases} + (i) & @{term "pflat_len v\<^sub>1 p > pflat_len v\<^sub>2 p"} \quad\text{and}\smallskip \\ + (ii) & @{term "(\q \ Pos v\<^sub>1 \ Pos v\<^sub>2. q \lex p --> pflat_len v\<^sub>1 q = pflat_len v\<^sub>2 q)"} + \end{cases}$ + \end{tabular} + \end{center} + + \noindent The position \p\ in this definition acts as the + \emph{first distinct position} of \v\<^sub>1\ and \v\<^sub>2\, where both values match strings of different length + \cite{OkuiSuzuki2010}. Since at \p\ the values \v\<^sub>1\ and \v\<^sub>2\ match different strings, the + ordering is irreflexive. Derived from the definition above + are the following two orderings: + + \begin{center} + \begin{tabular}{l} + @{thm PosOrd_ex_def[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\ + @{thm PosOrd_ex_eq_def[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} + \end{tabular} + \end{center} + + While we encountered a number of obstacles for establishing properties like + transitivity for the ordering of Sulzmann and Lu (and which we failed + to overcome), it is relatively straightforward to establish this + property for the orderings + @{term "DUMMY :\val DUMMY"} and @{term "DUMMY :\val DUMMY"} + by Okui and Suzuki. + + \begin{lemma}[Transitivity]\label{transitivity} + @{thm [mode=IfThen] PosOrd_trans[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and ?v3.0="v\<^sub>3"]} + \end{lemma} + + \begin{proof} From the assumption we obtain two positions \p\ + and \q\, where the values \v\<^sub>1\ and \v\<^sub>2\ (respectively \v\<^sub>2\ and \v\<^sub>3\) are `distinct'. Since \\\<^bsub>lex\<^esub>\ is trichotomous, we need to consider + three cases, namely @{term "p = q"}, @{term "p \lex q"} and + @{term "q \lex p"}. Let us look at the first case. Clearly + @{term "pflat_len v\<^sub>2 p < pflat_len v\<^sub>1 p"} and @{term + "pflat_len v\<^sub>3 p < pflat_len v\<^sub>2 p"} imply @{term + "pflat_len v\<^sub>3 p < pflat_len v\<^sub>1 p"}. It remains to show + that for a @{term "p' \ Pos v\<^sub>1 \ Pos v\<^sub>3"} + with @{term "p' \lex p"} that @{term "pflat_len v\<^sub>1 + p' = pflat_len v\<^sub>3 p'"} holds. Suppose @{term "p' \ Pos + v\<^sub>1"}, then we can infer from the first assumption that @{term + "pflat_len v\<^sub>1 p' = pflat_len v\<^sub>2 p'"}. But this means + that @{term "p'"} must be in @{term "Pos v\<^sub>2"} too (the norm + cannot be \-1\ given @{term "p' \ Pos v\<^sub>1"}). + Hence we can use the second assumption and + infer @{term "pflat_len v\<^sub>2 p' = pflat_len v\<^sub>3 p'"}, + which concludes this case with @{term "v\<^sub>1 :\val + v\<^sub>3"}. The reasoning in the other cases is similar.\qed + \end{proof} + + \noindent + The proof for $\preccurlyeq$ is similar and omitted. + It is also straightforward to show that \\\ and + $\preccurlyeq$ are partial orders. Okui and Suzuki furthermore show that they + are linear orderings for lexical values \cite{OkuiSuzuki2010} of a given + regular expression and given string, but we have not formalised this in Isabelle. It is + not essential for our results. What we are going to show below is + that for a given \r\ and \s\, the orderings have a unique + minimal element on the set @{term "LV r s"}, which is the POSIX value + we defined in the previous section. We start with two properties that + show how the length of a flattened value relates to the \\\-ordering. + + \begin{proposition}\mbox{}\smallskip\\\label{ordlen} + \begin{tabular}{@ {}ll} + (1) & + @{thm [mode=IfThen] PosOrd_shorterE[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\ + (2) & + @{thm [mode=IfThen] PosOrd_shorterI[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} + \end{tabular} + \end{proposition} + + \noindent Both properties follow from the definition of the ordering. Note that + \textit{(2)} entails that a value, say @{term "v\<^sub>2"}, whose underlying + string is a strict prefix of another flattened value, say @{term "v\<^sub>1"}, then + @{term "v\<^sub>1"} must be smaller than @{term "v\<^sub>2"}. For our proofs it + will be useful to have the following properties---in each case the underlying strings + of the compared values are the same: + + \begin{proposition}\mbox{}\smallskip\\\label{ordintros} + \begin{tabular}{ll} + \textit{(1)} & + @{thm [mode=IfThen] PosOrd_Left_Right[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\ + \textit{(2)} & If + @{thm (prem 1) PosOrd_Left_eq[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} \;then\; + @{thm (lhs) PosOrd_Left_eq[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} \;iff\; + @{thm (rhs) PosOrd_Left_eq[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\ + \textit{(3)} & If + @{thm (prem 1) PosOrd_Right_eq[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} \;then\; + @{thm (lhs) PosOrd_Right_eq[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} \;iff\; + @{thm (rhs) PosOrd_Right_eq[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\ + \textit{(4)} & If + @{thm (prem 1) PosOrd_Seq_eq[where ?v2.0="v\<^sub>2" and ?w2.0="w\<^sub>2"]} \;then\; + @{thm (lhs) PosOrd_Seq_eq[where ?v2.0="v\<^sub>2" and ?w2.0="w\<^sub>2"]} \;iff\; + @{thm (rhs) PosOrd_Seq_eq[where ?v2.0="v\<^sub>2" and ?w2.0="w\<^sub>2"]}\\ + \textit{(5)} & If + @{thm (prem 2) PosOrd_SeqI1[simplified, where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and + ?w1.0="w\<^sub>1" and ?w2.0="w\<^sub>2"]} \;and\; + @{thm (prem 1) PosOrd_SeqI1[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and + ?w1.0="w\<^sub>1" and ?w2.0="w\<^sub>2"]} \;then\; + @{thm (concl) PosOrd_SeqI1[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and + ?w1.0="w\<^sub>1" and ?w2.0="w\<^sub>2"]}\\ + \textit{(6)} & If + @{thm (prem 1) PosOrd_Stars_append_eq[where ?vs1.0="vs\<^sub>1" and ?vs2.0="vs\<^sub>2"]} \;then\; + @{thm (lhs) PosOrd_Stars_append_eq[where ?vs1.0="vs\<^sub>1" and ?vs2.0="vs\<^sub>2"]} \;iff\; + @{thm (rhs) PosOrd_Stars_append_eq[where ?vs1.0="vs\<^sub>1" and ?vs2.0="vs\<^sub>2"]}\\ + + \textit{(7)} & If + @{thm (prem 2) PosOrd_StarsI[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and + ?vs1.0="vs\<^sub>1" and ?vs2.0="vs\<^sub>2"]} \;and\; + @{thm (prem 1) PosOrd_StarsI[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and + ?vs1.0="vs\<^sub>1" and ?vs2.0="vs\<^sub>2"]} \;then\; + @{thm (concl) PosOrd_StarsI[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and + ?vs1.0="vs\<^sub>1" and ?vs2.0="vs\<^sub>2"]}\\ + \end{tabular} + \end{proposition} + + \noindent One might prefer that statements \textit{(4)} and \textit{(5)} + (respectively \textit{(6)} and \textit{(7)}) + are combined into a single \textit{iff}-statement (like the ones for \Left\ and \Right\). Unfortunately this cannot be done easily: such + a single statement would require an additional assumption about the + two values @{term "Seq v\<^sub>1 v\<^sub>2"} and @{term "Seq w\<^sub>1 w\<^sub>2"} + being inhabited by the same regular expression. The + complexity of the proofs involved seems to not justify such a + `cleaner' single statement. The statements given are just the properties that + allow us to establish our theorems without any difficulty. The proofs + for Proposition~\ref{ordintros} are routine. + + + Next we establish how Okui and Suzuki's orderings relate to our + definition of POSIX values. Given a \POSIX\ value \v\<^sub>1\ + for \r\ and \s\, then any other lexical value \v\<^sub>2\ in @{term "LV r s"} is greater or equal than \v\<^sub>1\, namely: + + + \begin{theorem}\label{orderone} + @{thm [mode=IfThen] Posix_PosOrd[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} + \end{theorem} + + \begin{proof} By induction on our POSIX rules. By + Theorem~\ref{posixdeterm} and the definition of @{const LV}, it is clear + that \v\<^sub>1\ and \v\<^sub>2\ have the same + underlying string @{term s}. The three base cases are + straightforward: for example for @{term "v\<^sub>1 = Void"}, we have + that @{term "v\<^sub>2 \ LV ONE []"} must also be of the form + \mbox{@{term "v\<^sub>2 = Void"}}. Therefore we have @{term + "v\<^sub>1 :\val v\<^sub>2"}. The inductive cases for + \r\ being of the form @{term "ALT r\<^sub>1 r\<^sub>2"} and + @{term "SEQ r\<^sub>1 r\<^sub>2"} are as follows: + + + \begin{itemize} + + \item[$\bullet$] Case \P+L\ with @{term "s \ (ALT r\<^sub>1 r\<^sub>2) + \ (Left w\<^sub>1)"}: In this case the value + @{term "v\<^sub>2"} is either of the + form @{term "Left w\<^sub>2"} or @{term "Right w\<^sub>2"}. In the + latter case we can immediately conclude with \mbox{@{term "v\<^sub>1 + :\val v\<^sub>2"}} since a \Left\-value with the + same underlying string \s\ is always smaller than a + \Right\-value by Proposition~\ref{ordintros}\textit{(1)}. + In the former case we have @{term "w\<^sub>2 + \ LV r\<^sub>1 s"} and can use the induction hypothesis to infer + @{term "w\<^sub>1 :\val w\<^sub>2"}. Because @{term + "w\<^sub>1"} and @{term "w\<^sub>2"} have the same underlying string + \s\, we can conclude with @{term "Left w\<^sub>1 + :\val Left w\<^sub>2"} using + Proposition~\ref{ordintros}\textit{(2)}.\smallskip + + \item[$\bullet$] Case \P+R\ with @{term "s \ (ALT r\<^sub>1 r\<^sub>2) + \ (Right w\<^sub>1)"}: This case similar to the previous + case, except that we additionally know @{term "s \ L + r\<^sub>1"}. This is needed when @{term "v\<^sub>2"} is of the form + \mbox{@{term "Left w\<^sub>2"}}. Since \mbox{@{term "flat v\<^sub>2 = flat + w\<^sub>2"} \= s\} and @{term "\ w\<^sub>2 : + r\<^sub>1"}, we can derive a contradiction for \mbox{@{term "s \ L + r\<^sub>1"}} using + Proposition~\ref{inhabs}. So also in this case \mbox{@{term "v\<^sub>1 + :\val v\<^sub>2"}}.\smallskip + + \item[$\bullet$] Case \PS\ with @{term "(s\<^sub>1 @ + s\<^sub>2) \ (SEQ r\<^sub>1 r\<^sub>2) \ (Seq + w\<^sub>1 w\<^sub>2)"}: We can assume @{term "v\<^sub>2 = Seq + (u\<^sub>1) (u\<^sub>2)"} with @{term "\ u\<^sub>1 : + r\<^sub>1"} and \mbox{@{term "\ u\<^sub>2 : + r\<^sub>2"}}. We have @{term "s\<^sub>1 @ s\<^sub>2 = (flat + u\<^sub>1) @ (flat u\<^sub>2)"}. By the side-condition of the + \PS\-rule we know that either @{term "s\<^sub>1 = flat + u\<^sub>1"} or that @{term "flat u\<^sub>1"} is a strict prefix of + @{term "s\<^sub>1"}. In the latter case we can infer @{term + "w\<^sub>1 :\val u\<^sub>1"} by + Proposition~\ref{ordlen}\textit{(2)} and from this @{term "v\<^sub>1 + :\val v\<^sub>2"} by Proposition~\ref{ordintros}\textit{(5)} + (as noted above @{term "v\<^sub>1"} and @{term "v\<^sub>2"} must have the + same underlying string). + In the former case we know + @{term "u\<^sub>1 \ LV r\<^sub>1 s\<^sub>1"} and @{term + "u\<^sub>2 \ LV r\<^sub>2 s\<^sub>2"}. With this we can use the + induction hypotheses to infer @{term "w\<^sub>1 :\val + u\<^sub>1"} and @{term "w\<^sub>2 :\val u\<^sub>2"}. By + Proposition~\ref{ordintros}\textit{(4,5)} we can again infer + @{term "v\<^sub>1 :\val + v\<^sub>2"}. + + \end{itemize} + + \noindent The case for \P\\ is similar to the \PS\-case and omitted.\qed + \end{proof} + + \noindent This theorem shows that our \POSIX\ value for a + regular expression \r\ and string @{term s} is in fact a + minimal element of the values in \LV r s\. By + Proposition~\ref{ordlen}\textit{(2)} we also know that any value in + \LV r s'\, with @{term "s'"} being a strict prefix, cannot be + smaller than \v\<^sub>1\. The next theorem shows the + opposite---namely any minimal element in @{term "LV r s"} must be a + \POSIX\ value. This can be established by induction on \r\, but the proof can be drastically simplified by using the fact + from the previous section about the existence of a \POSIX\ value + whenever a string @{term "s \ L r"}. + + + \begin{theorem} + @{thm [mode=IfThen] PosOrd_Posix[where ?v1.0="v\<^sub>1"]} + \end{theorem} + + \begin{proof} + If @{thm (prem 1) PosOrd_Posix[where ?v1.0="v\<^sub>1"]} then + @{term "s \ L r"} by Proposition~\ref{inhabs}. Hence by Theorem~\ref{lexercorrect}(2) + there exists a + \POSIX\ value @{term "v\<^sub>P"} with @{term "s \ r \ v\<^sub>P"} + and by Lemma~\ref{LVposix} we also have \mbox{@{term "v\<^sub>P \ LV r s"}}. + By Theorem~\ref{orderone} we therefore have + @{term "v\<^sub>P :\val v\<^sub>1"}. If @{term "v\<^sub>P = v\<^sub>1"} then + we are done. Otherwise we have @{term "v\<^sub>P :\val v\<^sub>1"}, which + however contradicts the second assumption about @{term "v\<^sub>1"} being the smallest + element in @{term "LV r s"}. So we are done in this case too.\qed + \end{proof} + + \noindent + From this we can also show + that if @{term "LV r s"} is non-empty (or equivalently @{term "s \ L r"}) then + it has a unique minimal element: + + \begin{corollary} + @{thm [mode=IfThen] Least_existence1} + \end{corollary} + + + + \noindent To sum up, we have shown that the (unique) minimal elements + of the ordering by Okui and Suzuki are exactly the \POSIX\ + values we defined inductively in Section~\ref{posixsec}. This provides + an independent confirmation that our ternary relation formalises the + informal POSIX rules. + +\ + +section \Bitcoded Lexing\ + + + + +text \ + +Incremental calculation of the value. To simplify the proof we first define the function +@{const flex} which calculates the ``iterated'' injection function. With this we can +rewrite the lexer as + +\begin{center} +@{thm lexer_flex} +\end{center} + + +\ + +section \Optimisations\ + +text \ + + Derivatives as calculated by \Brz's method are usually more complex + regular expressions than the initial one; the result is that the + derivative-based matching and lexing algorithms are often abysmally slow. + However, various optimisations are possible, such as the simplifications + of @{term "ALT ZERO r"}, @{term "ALT r ZERO"}, @{term "SEQ ONE r"} and + @{term "SEQ r ONE"} to @{term r}. These simplifications can speed up the + algorithms considerably, as noted in \cite{Sulzmann2014}. One of the + advantages of having a simple specification and correctness proof is that + the latter can be refined to prove the correctness of such simplification + steps. While the simplification of regular expressions according to + rules like + + \begin{equation}\label{Simpl} + \begin{array}{lcllcllcllcl} + @{term "ALT ZERO r"} & \\\ & @{term r} \hspace{8mm}%\\ + @{term "ALT r ZERO"} & \\\ & @{term r} \hspace{8mm}%\\ + @{term "SEQ ONE r"} & \\\ & @{term r} \hspace{8mm}%\\ + @{term "SEQ r ONE"} & \\\ & @{term r} + \end{array} + \end{equation} + + \noindent is well understood, there is an obstacle with the POSIX value + calculation algorithm by Sulzmann and Lu: if we build a derivative regular + expression and then simplify it, we will calculate a POSIX value for this + simplified derivative regular expression, \emph{not} for the original (unsimplified) + derivative regular expression. Sulzmann and Lu \cite{Sulzmann2014} overcome this obstacle by + not just calculating a simplified regular expression, but also calculating + a \emph{rectification function} that ``repairs'' the incorrect value. + + The rectification functions can be (slightly clumsily) implemented in + Isabelle/HOL as follows using some auxiliary functions: + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) F_RIGHT.simps(1)} & $\dn$ & \Right (f v)\\\ + @{thm (lhs) F_LEFT.simps(1)} & $\dn$ & \Left (f v)\\\ + + @{thm (lhs) F_ALT.simps(1)} & $\dn$ & \Right (f\<^sub>2 v)\\\ + @{thm (lhs) F_ALT.simps(2)} & $\dn$ & \Left (f\<^sub>1 v)\\\ + + @{thm (lhs) F_SEQ1.simps(1)} & $\dn$ & \Seq (f\<^sub>1 ()) (f\<^sub>2 v)\\\ + @{thm (lhs) F_SEQ2.simps(1)} & $\dn$ & \Seq (f\<^sub>1 v) (f\<^sub>2 ())\\\ + @{thm (lhs) F_SEQ.simps(1)} & $\dn$ & \Seq (f\<^sub>1 v\<^sub>1) (f\<^sub>2 v\<^sub>2)\\medskip\\ + %\end{tabular} + % + %\begin{tabular}{lcl} + @{term "simp_ALT (ZERO, DUMMY) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>2, F_RIGHT f\<^sub>2)"}\\ + @{term "simp_ALT (r\<^sub>1, f\<^sub>1) (ZERO, DUMMY)"} & $\dn$ & @{term "(r\<^sub>1, F_LEFT f\<^sub>1)"}\\ + @{term "simp_ALT (r\<^sub>1, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(ALT r\<^sub>1 r\<^sub>2, F_ALT f\<^sub>1 f\<^sub>2)"}\\ + @{term "simp_SEQ (ONE, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>2, F_SEQ1 f\<^sub>1 f\<^sub>2)"}\\ + @{term "simp_SEQ (r\<^sub>1, f\<^sub>1) (ONE, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>1, F_SEQ2 f\<^sub>1 f\<^sub>2)"}\\ + @{term "simp_SEQ (r\<^sub>1, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(SEQ r\<^sub>1 r\<^sub>2, F_SEQ f\<^sub>1 f\<^sub>2)"}\\ + \end{tabular} + \end{center} + + \noindent + The functions \simp\<^bsub>Alt\<^esub>\ and \simp\<^bsub>Seq\<^esub>\ encode the simplification rules + in \eqref{Simpl} and compose the rectification functions (simplifications can occur + deep inside the regular expression). The main simplification function is then + + \begin{center} + \begin{tabular}{lcl} + @{term "simp (ALT r\<^sub>1 r\<^sub>2)"} & $\dn$ & @{term "simp_ALT (simp r\<^sub>1) (simp r\<^sub>2)"}\\ + @{term "simp (SEQ r\<^sub>1 r\<^sub>2)"} & $\dn$ & @{term "simp_SEQ (simp r\<^sub>1) (simp r\<^sub>2)"}\\ + @{term "simp r"} & $\dn$ & @{term "(r, id)"}\\ + \end{tabular} + \end{center} + + \noindent where @{term "id"} stands for the identity function. The + function @{const simp} returns a simplified regular expression and a corresponding + rectification function. Note that we do not simplify under stars: this + seems to slow down the algorithm, rather than speed it up. The optimised + lexer is then given by the clauses: + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) slexer.simps(1)} & $\dn$ & @{thm (rhs) slexer.simps(1)}\\ + @{thm (lhs) slexer.simps(2)} & $\dn$ & + \let (r\<^sub>s, f\<^sub>r) = simp (r \$\backslash$\ c) in\\\ + & & \case\ @{term "slexer r\<^sub>s s"} \of\\\ + & & \phantom{$|$} @{term "None"} \\\ @{term None}\\ + & & $|$ @{term "Some v"} \\\ \Some (inj r c (f\<^sub>r v))\ + \end{tabular} + \end{center} + + \noindent + In the second clause we first calculate the derivative @{term "der c r"} + and then simpli + +text \ + +Incremental calculation of the value. To simplify the proof we first define the function +@{const flex} which calculates the ``iterated'' injection function. With this we can +rewrite the lexer as + +\begin{center} +@{thm lexer_flex} +\end{center} + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) code.simps(1)} & $\dn$ & @{thm (rhs) code.simps(1)}\\ + @{thm (lhs) code.simps(2)} & $\dn$ & @{thm (rhs) code.simps(2)}\\ + @{thm (lhs) code.simps(3)} & $\dn$ & @{thm (rhs) code.simps(3)}\\ + @{thm (lhs) code.simps(4)} & $\dn$ & @{thm (rhs) code.simps(4)}\\ + @{thm (lhs) code.simps(5)[of "v\<^sub>1" "v\<^sub>2"]} & $\dn$ & @{thm (rhs) code.simps(5)[of "v\<^sub>1" "v\<^sub>2"]}\\ + @{thm (lhs) code.simps(6)} & $\dn$ & @{thm (rhs) code.simps(6)}\\ + @{thm (lhs) code.simps(7)} & $\dn$ & @{thm (rhs) code.simps(7)} +\end{tabular} +\end{center} + +\begin{center} +\begin{tabular}{lcl} + @{term areg} & $::=$ & @{term "AZERO"}\\ + & $\mid$ & @{term "AONE bs"}\\ + & $\mid$ & @{term "ACHAR bs c"}\\ + & $\mid$ & @{term "AALT bs r1 r2"}\\ + & $\mid$ & @{term "ASEQ bs r\<^sub>1 r\<^sub>2"}\\ + & $\mid$ & @{term "ASTAR bs r"} +\end{tabular} +\end{center} + + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) intern.simps(1)} & $\dn$ & @{thm (rhs) intern.simps(1)}\\ + @{thm (lhs) intern.simps(2)} & $\dn$ & @{thm (rhs) intern.simps(2)}\\ + @{thm (lhs) intern.simps(3)} & $\dn$ & @{thm (rhs) intern.simps(3)}\\ + @{thm (lhs) intern.simps(4)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) intern.simps(4)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) intern.simps(5)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) intern.simps(5)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) intern.simps(6)} & $\dn$ & @{thm (rhs) intern.simps(6)}\\ +\end{tabular} +\end{center} + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) erase.simps(1)} & $\dn$ & @{thm (rhs) erase.simps(1)}\\ + @{thm (lhs) erase.simps(2)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(2)[of bs]}\\ + @{thm (lhs) erase.simps(3)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(3)[of bs]}\\ + @{thm (lhs) erase.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) erase.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) erase.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) erase.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) erase.simps(6)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(6)[of bs]}\\ +\end{tabular} +\end{center} + +Some simple facts about erase + +\begin{lemma}\mbox{}\\ +@{thm erase_bder}\\ +@{thm erase_intern} +\end{lemma} + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) bnullable.simps(1)} & $\dn$ & @{thm (rhs) bnullable.simps(1)}\\ + @{thm (lhs) bnullable.simps(2)} & $\dn$ & @{thm (rhs) bnullable.simps(2)}\\ + @{thm (lhs) bnullable.simps(3)} & $\dn$ & @{thm (rhs) bnullable.simps(3)}\\ + @{thm (lhs) bnullable.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bnullable.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bnullable.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bnullable.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bnullable.simps(6)} & $\dn$ & @{thm (rhs) bnullable.simps(6)}\medskip\\ + +% \end{tabular} +% \end{center} + +% \begin{center} +% \begin{tabular}{lcl} + + @{thm (lhs) bder.simps(1)} & $\dn$ & @{thm (rhs) bder.simps(1)}\\ + @{thm (lhs) bder.simps(2)} & $\dn$ & @{thm (rhs) bder.simps(2)}\\ + @{thm (lhs) bder.simps(3)} & $\dn$ & @{thm (rhs) bder.simps(3)}\\ + @{thm (lhs) bder.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bder.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bder.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bder.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bder.simps(6)} & $\dn$ & @{thm (rhs) bder.simps(6)} + \end{tabular} + \end{center} + + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) bmkeps.simps(1)} & $\dn$ & @{thm (rhs) bmkeps.simps(1)}\\ + @{thm (lhs) bmkeps.simps(2)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bmkeps.simps(2)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bmkeps.simps(3)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bmkeps.simps(3)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bmkeps.simps(4)} & $\dn$ & @{thm (rhs) bmkeps.simps(4)}\medskip\\ +\end{tabular} +\end{center} + + +@{thm [mode=IfThen] bder_retrieve} + +By induction on \r\ + +\begin{theorem}[Main Lemma]\mbox{}\\ +@{thm [mode=IfThen] MAIN_decode} +\end{theorem} + +\noindent +Definition of the bitcoded lexer + +@{thm blexer_def} + + +\begin{theorem} +@{thm blexer_correctness} +\end{theorem} + +\ + +section \Optimisations\ + +text \ + + Derivatives as calculated by \Brz's method are usually more complex + regular expressions than the initial one; the result is that the + derivative-based matching and lexing algorithms are often abysmally slow. + However, various optimisations are possible, such as the simplifications + of @{term "ALT ZERO r"}, @{term "ALT r ZERO"}, @{term "SEQ ONE r"} and + @{term "SEQ r ONE"} to @{term r}. These simplifications can speed up the + algorithms considerably, as noted in \cite{Sulzmann2014}. One of the + advantages of having a simple specification and correctness proof is that + the latter can be refined to prove the correctness of such simplification + steps. While the simplification of regular expressions according to + rules like + + \begin{equation}\label{Simpl} + \begin{array}{lcllcllcllcl} + @{term "ALT ZERO r"} & \\\ & @{term r} \hspace{8mm}%\\ + @{term "ALT r ZERO"} & \\\ & @{term r} \hspace{8mm}%\\ + @{term "SEQ ONE r"} & \\\ & @{term r} \hspace{8mm}%\\ + @{term "SEQ r ONE"} & \\\ & @{term r} + \end{array} + \end{equation} + + \noindent is well understood, there is an obstacle with the POSIX value + calculation algorithm by Sulzmann and Lu: if we build a derivative regular + expression and then simplify it, we will calculate a POSIX value for this + simplified derivative regular expression, \emph{not} for the original (unsimplified) + derivative regular expression. Sulzmann and Lu \cite{Sulzmann2014} overcome this obstacle by + not just calculating a simplified regular expression, but also calculating + a \emph{rectification function} that ``repairs'' the incorrect value. + + The rectification functions can be (slightly clumsily) implemented in + Isabelle/HOL as follows using some auxiliary functions: + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) F_RIGHT.simps(1)} & $\dn$ & \Right (f v)\\\ + @{thm (lhs) F_LEFT.simps(1)} & $\dn$ & \Left (f v)\\\ + + @{thm (lhs) F_ALT.simps(1)} & $\dn$ & \Right (f\<^sub>2 v)\\\ + @{thm (lhs) F_ALT.simps(2)} & $\dn$ & \Left (f\<^sub>1 v)\\\ + + @{thm (lhs) F_SEQ1.simps(1)} & $\dn$ & \Seq (f\<^sub>1 ()) (f\<^sub>2 v)\\\ + @{thm (lhs) F_SEQ2.simps(1)} & $\dn$ & \Seq (f\<^sub>1 v) (f\<^sub>2 ())\\\ + @{thm (lhs) F_SEQ.simps(1)} & $\dn$ & \Seq (f\<^sub>1 v\<^sub>1) (f\<^sub>2 v\<^sub>2)\\medskip\\ + %\end{tabular} + % + %\begin{tabular}{lcl} + @{term "simp_ALT (ZERO, DUMMY) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>2, F_RIGHT f\<^sub>2)"}\\ + @{term "simp_ALT (r\<^sub>1, f\<^sub>1) (ZERO, DUMMY)"} & $\dn$ & @{term "(r\<^sub>1, F_LEFT f\<^sub>1)"}\\ + @{term "simp_ALT (r\<^sub>1, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(ALT r\<^sub>1 r\<^sub>2, F_ALT f\<^sub>1 f\<^sub>2)"}\\ + @{term "simp_SEQ (ONE, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>2, F_SEQ1 f\<^sub>1 f\<^sub>2)"}\\ + @{term "simp_SEQ (r\<^sub>1, f\<^sub>1) (ONE, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>1, F_SEQ2 f\<^sub>1 f\<^sub>2)"}\\ + @{term "simp_SEQ (r\<^sub>1, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(SEQ r\<^sub>1 r\<^sub>2, F_SEQ f\<^sub>1 f\<^sub>2)"}\\ + \end{tabular} + \end{center} + + \noindent + The functions \simp\<^bsub>Alt\<^esub>\ and \simp\<^bsub>Seq\<^esub>\ encode the simplification rules + in \eqref{Simpl} and compose the rectification functions (simplifications can occur + deep inside the regular expression). The main simplification function is then + + \begin{center} + \begin{tabular}{lcl} + @{term "simp (ALT r\<^sub>1 r\<^sub>2)"} & $\dn$ & @{term "simp_ALT (simp r\<^sub>1) (simp r\<^sub>2)"}\\ + @{term "simp (SEQ r\<^sub>1 r\<^sub>2)"} & $\dn$ & @{term "simp_SEQ (simp r\<^sub>1) (simp r\<^sub>2)"}\\ + @{term "simp r"} & $\dn$ & @{term "(r, id)"}\\ + \end{tabular} + \end{center} + + \noindent where @{term "id"} stands for the identity function. The + function @{const simp} returns a simplified regular expression and a corresponding + rectification function. Note that we do not simplify under stars: this + seems to slow down the algorithm, rather than speed it up. The optimised + lexer is then given by the clauses: + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) slexer.simps(1)} & $\dn$ & @{thm (rhs) slexer.simps(1)}\\ + @{thm (lhs) slexer.simps(2)} & $\dn$ & + \let (r\<^sub>s, f\<^sub>r) = simp (r \$\backslash$\ c) in\\\ + & & \case\ @{term "slexer r\<^sub>s s"} \of\\\ + & & \phantom{$|$} @{term "None"} \\\ @{term None}\\ + & & $|$ @{term "Some v"} \\\ \Some (inj r c (f\<^sub>r v))\ + \end{tabular} + \end{center} + + \noindent + In the second clause we first calculate the derivative @{term "der c r"} + and then simplify the result. This gives us a simplified derivative + \r\<^sub>s\ and a rectification function \f\<^sub>r\. The lexer + is then recursively called with the simplified derivative, but before + we inject the character @{term c} into the value @{term v}, we need to rectify + @{term v} (that is construct @{term "f\<^sub>r v"}). Before we can establish the correctness + of @{term "slexer"}, we need to show that simplification preserves the language + and simplification preserves our POSIX relation once the value is rectified + (recall @{const "simp"} generates a (regular expression, rectification function) pair): + + \begin{lemma}\mbox{}\smallskip\\\label{slexeraux} + \begin{tabular}{ll} + (1) & @{thm L_fst_simp[symmetric]}\\ + (2) & @{thm[mode=IfThen] Posix_simp} + \end{tabular} + \end{lemma} + + \begin{proof} Both are by induction on \r\. There is no + interesting case for the first statement. For the second statement, + of interest are the @{term "r = ALT r\<^sub>1 r\<^sub>2"} and @{term "r = SEQ r\<^sub>1 + r\<^sub>2"} cases. In each case we have to analyse four subcases whether + @{term "fst (simp r\<^sub>1)"} and @{term "fst (simp r\<^sub>2)"} equals @{const + ZERO} (respectively @{const ONE}). For example for @{term "r = ALT + r\<^sub>1 r\<^sub>2"}, consider the subcase @{term "fst (simp r\<^sub>1) = ZERO"} and + @{term "fst (simp r\<^sub>2) \ ZERO"}. By assumption we know @{term "s \ + fst (simp (ALT r\<^sub>1 r\<^sub>2)) \ v"}. From this we can infer @{term "s \ fst (simp r\<^sub>2) \ v"} + and by IH also (*) @{term "s \ r\<^sub>2 \ (snd (simp r\<^sub>2) v)"}. Given @{term "fst (simp r\<^sub>1) = ZERO"} + we know @{term "L (fst (simp r\<^sub>1)) = {}"}. By the first statement + @{term "L r\<^sub>1"} is the empty set, meaning (**) @{term "s \ L r\<^sub>1"}. + Taking (*) and (**) together gives by the \mbox{\P+R\}-rule + @{term "s \ ALT r\<^sub>1 r\<^sub>2 \ Right (snd (simp r\<^sub>2) v)"}. In turn this + gives @{term "s \ ALT r\<^sub>1 r\<^sub>2 \ snd (simp (ALT r\<^sub>1 r\<^sub>2)) v"} as we need to show. + The other cases are similar.\qed + \end{proof} + + \noindent We can now prove relatively straightforwardly that the + optimised lexer produces the expected result: + + \begin{theorem} + @{thm slexer_correctness} + \end{theorem} + + \begin{proof} By induction on @{term s} generalising over @{term + r}. The case @{term "[]"} is trivial. For the cons-case suppose the + string is of the form @{term "c # s"}. By induction hypothesis we + know @{term "slexer r s = lexer r s"} holds for all @{term r} (in + particular for @{term "r"} being the derivative @{term "der c + r"}). Let @{term "r\<^sub>s"} be the simplified derivative regular expression, that is @{term + "fst (simp (der c r))"}, and @{term "f\<^sub>r"} be the rectification + function, that is @{term "snd (simp (der c r))"}. We distinguish the cases + whether (*) @{term "s \ L (der c r)"} or not. In the first case we + have by Theorem~\ref{lexercorrect}(2) a value @{term "v"} so that @{term + "lexer (der c r) s = Some v"} and @{term "s \ der c r \ v"} hold. + By Lemma~\ref{slexeraux}(1) we can also infer from~(*) that @{term "s + \ L r\<^sub>s"} holds. Hence we know by Theorem~\ref{lexercorrect}(2) that + there exists a @{term "v'"} with @{term "lexer r\<^sub>s s = Some v'"} and + @{term "s \ r\<^sub>s \ v'"}. From the latter we know by + Lemma~\ref{slexeraux}(2) that @{term "s \ der c r \ (f\<^sub>r v')"} holds. + By the uniqueness of the POSIX relation (Theorem~\ref{posixdeterm}) we + can infer that @{term v} is equal to @{term "f\<^sub>r v'"}---that is the + rectification function applied to @{term "v'"} + produces the original @{term "v"}. Now the case follows by the + definitions of @{const lexer} and @{const slexer}. + + In the second case where @{term "s \ L (der c r)"} we have that + @{term "lexer (der c r) s = None"} by Theorem~\ref{lexercorrect}(1). We + also know by Lemma~\ref{slexeraux}(1) that @{term "s \ L r\<^sub>s"}. Hence + @{term "lexer r\<^sub>s s = None"} by Theorem~\ref{lexercorrect}(1) and + by IH then also @{term "slexer r\<^sub>s s = None"}. With this we can + conclude in this case too.\qed + + \end{proof} + +\ +fy the result. This gives us a simplified derivative + \r\<^sub>s\ and a rectification function \f\<^sub>r\. The lexer + is then recursively called with the simplified derivative, but before + we inject the character @{term c} into the value @{term v}, we need to rectify + @{term v} (that is construct @{term "f\<^sub>r v"}). Before we can establish the correctness + of @{term "slexer"}, we need to show that simplification preserves the language + and simplification preserves our POSIX relation once the value is rectified + (recall @{const "simp"} generates a (regular expression, rectification function) pair): + + \begin{lemma}\mbox{}\smallskip\\\label{slexeraux} + \begin{tabular}{ll} + (1) & @{thm L_fst_simp[symmetric]}\\ + (2) & @{thm[mode=IfThen] Posix_simp} + \end{tabular} + \end{lemma} + + \begin{proof} Both are by induction on \r\. There is no + interesting case for the first statement. For the second statement, + of interest are the @{term "r = ALT r\<^sub>1 r\<^sub>2"} and @{term "r = SEQ r\<^sub>1 + r\<^sub>2"} cases. In each case we have to analyse four subcases whether + @{term "fst (simp r\<^sub>1)"} and @{term "fst (simp r\<^sub>2)"} equals @{const + ZERO} (respectively @{const ONE}). For example for @{term "r = ALT + r\<^sub>1 r\<^sub>2"}, consider the subcase @{term "fst (simp r\<^sub>1) = ZERO"} and + @{term "fst (simp r\<^sub>2) \ ZERO"}. By assumption we know @{term "s \ + fst (simp (ALT r\<^sub>1 r\<^sub>2)) \ v"}. From this we can infer @{term "s \ fst (simp r\<^sub>2) \ v"} + and by IH also (*) @{term "s \ r\<^sub>2 \ (snd (simp r\<^sub>2) v)"}. Given @{term "fst (simp r\<^sub>1) = ZERO"} + we know @{term "L (fst (simp r\<^sub>1)) = {}"}. By the first statement + @{term "L r\<^sub>1"} is the empty set, meaning (**) @{term "s \ L r\<^sub>1"}. + Taking (*) and (**) together gives by the \mbox{\P+R\}-rule + @{term "s \ ALT r\<^sub>1 r\<^sub>2 \ Right (snd (simp r\<^sub>2) v)"}. In turn this + gives @{term "s \ ALT r\<^sub>1 r\<^sub>2 \ snd (simp (ALT r\<^sub>1 r\<^sub>2)) v"} as we need to show. + The other cases are similar.\qed + \end{proof} + + \noindent We can now prove relatively straightforwardly that the + optimised lexer produces the expected result: + + \begin{theorem} + @{thm slexer_correctness} + \end{theorem} + + \begin{proof} By induction on @{term s} generalising over @{term + r}. The case @{term "[]"} is trivial. For the cons-case suppose the + string is of the form @{term "c # s"}. By induction hypothesis we + know @{term "slexer r s = lexer r s"} holds for all @{term r} (in + particular for @{term "r"} being the derivative @{term "der c + r"}). Let @{term "r\<^sub>s"} be the simplified derivative regular expression, that is @{term + "fst (simp (der c r))"}, and @{term "f\<^sub>r"} be the rectification + function, that is @{term "snd (simp (der c r))"}. We distinguish the cases + whether (*) @{term "s \ L (der c r)"} or not. In the first case we + have by Theorem~\ref{lexercorrect}(2) a value @{term "v"} so that @{term + "lexer (der c r) s = Some v"} and @{term "s \ der c r \ v"} hold. + By Lemma~\ref{slexeraux}(1) we can also infer from~(*) that @{term "s + \ L r\<^sub>s"} holds. Hence we know by Theorem~\ref{lexercorrect}(2) that + there exists a @{term "v'"} with @{term "lexer r\<^sub>s s = Some v'"} and + @{term "s \ r\<^sub>s \ v'"}. From the latter we know by + Lemma~\ref{slexeraux}(2) that @{term "s \ der c r \ (f\<^sub>r v')"} holds. + By the uniqueness of the POSIX relation (Theorem~\ref{posixdeterm}) we + can infer that @{term v} is equal to @{term "f\<^sub>r v'"}---that is the + rectification function applied to @{term "v'"} + produces the original @{term "v"}. Now the case follows by the + definitions of @{const lexer} and @{const slexer}. + + In the second case where @{term "s \ L (der c r)"} we have that + @{term "lexer (der c r) s = None"} by Theorem~\ref{lexercorrect}(1). We + also know by Lemma~\ref{slexeraux}(1) that @{term "s \ L r\<^sub>s"}. Hence + @{term "lexer r\<^sub>s s = None"} by Theorem~\ref{lexercorrect}(1) and + by IH then also @{term "slexer r\<^sub>s s = None"}. With this we can + conclude in this case too.\qed + + \end{proof} + +\ + + +section \HERE\ + +text \ + + \begin{lemma} + @{thm [mode=IfThen] bder_retrieve} + \end{lemma} + + \begin{proof} + By induction on the definition of @{term "erase r"}. The cases for rule 1) and 2) are + straightforward as @{term "der c ZERO"} and @{term "der c ONE"} are both equal to + @{term ZERO}. This means @{term "\ v : ZERO"} cannot hold. Similarly in case of rule 3) + where @{term r} is of the form @{term "ACHAR d"} with @{term "c = d"}. Then by assumption + we know @{term "\ v : ONE"}, which implies @{term "v = Void"}. The equation follows by + simplification of left- and right-hand side. In case @{term "c \ d"} we have again + @{term "\ v : ZERO"}, which cannot hold. + + For rule 4a) we have again @{term "\ v : ZERO"}. The property holds by IH for rule 4b). + The induction hypothesis is + \[ + @{term "retrieve (bder c r) v = retrieve r (injval (erase r) c v)"} + \] + which is what left- and right-hand side simplify to. The slightly more interesting case + is for 4c). By assumption we have + @{term "\ v : ALT (der c (erase r\<^sub>1)) (der c (erase (AALTs bs (r\<^sub>2 # rs))))"}. This means we + have either (*) @{term "\ v1 : der c (erase r\<^sub>1)"} with @{term "v = Left v1"} or + (**) @{term "\ v2 : der c (erase (AALTs bs (r\<^sub>2 # rs)))"} with @{term "v = Right v2"}. + The former case is straightforward by simplification. The second case is \ldots TBD. + + Rule 5) TBD. + + Finally for rule 6) the reasoning is as follows: By assumption we have + @{term "\ v : SEQ (der c (erase r)) (STAR (erase r))"}. This means we also have + @{term "v = Seq v1 v2"}, @{term "\ v1 : der c (erase r)"} and @{term "v2 = Stars vs"}. + We want to prove + \begin{align} + & @{term "retrieve (ASEQ bs (fuse [Z] (bder c r)) (ASTAR [] r)) v"}\\ + &= @{term "retrieve (ASTAR bs r) (injval (STAR (erase r)) c v)"} + \end{align} + The right-hand side @{term inj}-expression is equal to + @{term "Stars (injval (erase r) c v1 # vs)"}, which means the @{term retrieve}-expression + simplifies to + \[ + @{term "bs @ [Z] @ retrieve r (injval (erase r) c v1) @ retrieve (ASTAR [] r) (Stars vs)"} + \] + The left-hand side (3) above simplifies to + \[ + @{term "bs @ retrieve (fuse [Z] (bder c r)) v1 @ retrieve (ASTAR [] r) (Stars vs)"} + \] + We can move out the @{term "fuse [Z]"} and then use the IH to show that left-hand side + and right-hand side are equal. This completes the proof. + \end{proof} + + + + \bibliographystyle{plain} + \bibliography{root} + +\ +(*<*) +end +(*>*) + + + +(* + +\begin{center} + \begin{tabular} + @{thm[mode=Rule] aggressive.intros(1)[of "bs" "bs1" "rs" "r"]} + end{tabular} +\end{center} + + + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) code.simps(1)} & $\dn$ & @{thm (rhs) code.simps(1)}\\ + @{thm (lhs) code.simps(2)} & $\dn$ & @{thm (rhs) code.simps(2)}\\ + @{thm (lhs) code.simps(3)} & $\dn$ & @{thm (rhs) code.simps(3)}\\ + @{thm (lhs) code.simps(4)} & $\dn$ & @{thm (rhs) code.simps(4)}\\ + @{thm (lhs) code.simps(5)[of "v\<^sub>1" "v\<^sub>2"]} & $\dn$ & @{thm (rhs) code.simps(5)[of "v\<^sub>1" "v\<^sub>2"]}\\ + @{thm (lhs) code.simps(6)} & $\dn$ & @{thm (rhs) code.simps(6)}\\ + @{thm (lhs) code.simps(7)} & $\dn$ & @{thm (rhs) code.simps(7)} +\end{tabular} +\end{center} + +\begin{center} +\begin{tabular}{lcl} + @{term areg} & $::=$ & @{term "AZERO"}\\ + & $\mid$ & @{term "AONE bs"}\\ + & $\mid$ & @{term "ACHAR bs c"}\\ + & $\mid$ & @{term "AALT bs r1 r2"}\\ + & $\mid$ & @{term "ASEQ bs r\<^sub>1 r\<^sub>2"}\\ + & $\mid$ & @{term "ASTAR bs r"} +\end{tabular} +\end{center} + + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) intern.simps(1)} & $\dn$ & @{thm (rhs) intern.simps(1)}\\ + @{thm (lhs) intern.simps(2)} & $\dn$ & @{thm (rhs) intern.simps(2)}\\ + @{thm (lhs) intern.simps(3)} & $\dn$ & @{thm (rhs) intern.simps(3)}\\ + @{thm (lhs) intern.simps(4)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) intern.simps(4)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) intern.simps(5)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) intern.simps(5)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) intern.simps(6)} & $\dn$ & @{thm (rhs) intern.simps(6)}\\ +\end{tabular} +\end{center} + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) erase.simps(1)} & $\dn$ & @{thm (rhs) erase.simps(1)}\\ + @{thm (lhs) erase.simps(2)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(2)[of bs]}\\ + @{thm (lhs) erase.simps(3)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(3)[of bs]}\\ + @{thm (lhs) erase.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) erase.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) erase.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) erase.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) erase.simps(6)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(6)[of bs]}\\ +\end{tabular} +\end{center} + +Some simple facts about erase + +\begin{lemma}\mbox{}\\ +@{thm erase_bder}\\ +@{thm erase_intern} +\end{lemma} + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) bnullable.simps(1)} & $\dn$ & @{thm (rhs) bnullable.simps(1)}\\ + @{thm (lhs) bnullable.simps(2)} & $\dn$ & @{thm (rhs) bnullable.simps(2)}\\ + @{thm (lhs) bnullable.simps(3)} & $\dn$ & @{thm (rhs) bnullable.simps(3)}\\ + @{thm (lhs) bnullable.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bnullable.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bnullable.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bnullable.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bnullable.simps(6)} & $\dn$ & @{thm (rhs) bnullable.simps(6)}\medskip\\ + +% \end{tabular} +% \end{center} + +% \begin{center} +% \begin{tabular}{lcl} + + @{thm (lhs) bder.simps(1)} & $\dn$ & @{thm (rhs) bder.simps(1)}\\ + @{thm (lhs) bder.simps(2)} & $\dn$ & @{thm (rhs) bder.simps(2)}\\ + @{thm (lhs) bder.simps(3)} & $\dn$ & @{thm (rhs) bder.simps(3)}\\ + @{thm (lhs) bder.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bder.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bder.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bder.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bder.simps(6)} & $\dn$ & @{thm (rhs) bder.simps(6)} + \end{tabular} + \end{center} + + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) bmkeps.simps(1)} & $\dn$ & @{thm (rhs) bmkeps.simps(1)}\\ + @{thm (lhs) bmkeps.simps(2)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bmkeps.simps(2)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bmkeps.simps(3)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bmkeps.simps(3)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bmkeps.simps(4)} & $\dn$ & @{thm (rhs) bmkeps.simps(4)}\medskip\\ +\end{tabular} +\end{center} + + +@{thm [mode=IfThen] bder_retrieve} + +By induction on \r\ + +\begin{theorem}[Main Lemma]\mbox{}\\ +@{thm [mode=IfThen] MAIN_decode} +\end{theorem} + +\noindent +Definition of the bitcoded lexer + +@{thm blexer_def} + + +\begin{theorem} +@{thm blexer_correctness} +\end{theorem} + + + + + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) code.simps(1)} & $\dn$ & @{thm (rhs) code.simps(1)}\\ + @{thm (lhs) code.simps(2)} & $\dn$ & @{thm (rhs) code.simps(2)}\\ + @{thm (lhs) code.simps(3)} & $\dn$ & @{thm (rhs) code.simps(3)}\\ + @{thm (lhs) code.simps(4)} & $\dn$ & @{thm (rhs) code.simps(4)}\\ + @{thm (lhs) code.simps(5)[of "v\<^sub>1" "v\<^sub>2"]} & $\dn$ & @{thm (rhs) code.simps(5)[of "v\<^sub>1" "v\<^sub>2"]}\\ + @{thm (lhs) code.simps(6)} & $\dn$ & @{thm (rhs) code.simps(6)}\\ + @{thm (lhs) code.simps(7)} & $\dn$ & @{thm (rhs) code.simps(7)} +\end{tabular} +\end{center} + +\begin{center} +\begin{tabular}{lcl} + @{term areg} & $::=$ & @{term "AZERO"}\\ + & $\mid$ & @{term "AONE bs"}\\ + & $\mid$ & @{term "ACHAR bs c"}\\ + & $\mid$ & @{term "AALT bs r1 r2"}\\ + & $\mid$ & @{term "ASEQ bs r\<^sub>1 r\<^sub>2"}\\ + & $\mid$ & @{term "ASTAR bs r"} +\end{tabular} +\end{center} + + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) intern.simps(1)} & $\dn$ & @{thm (rhs) intern.simps(1)}\\ + @{thm (lhs) intern.simps(2)} & $\dn$ & @{thm (rhs) intern.simps(2)}\\ + @{thm (lhs) intern.simps(3)} & $\dn$ & @{thm (rhs) intern.simps(3)}\\ + @{thm (lhs) intern.simps(4)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) intern.simps(4)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) intern.simps(5)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) intern.simps(5)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) intern.simps(6)} & $\dn$ & @{thm (rhs) intern.simps(6)}\\ +\end{tabular} +\end{center} + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) erase.simps(1)} & $\dn$ & @{thm (rhs) erase.simps(1)}\\ + @{thm (lhs) erase.simps(2)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(2)[of bs]}\\ + @{thm (lhs) erase.simps(3)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(3)[of bs]}\\ + @{thm (lhs) erase.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) erase.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) erase.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) erase.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) erase.simps(6)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(6)[of bs]}\\ +\end{tabular} +\end{center} + +Some simple facts about erase + +\begin{lemma}\mbox{}\\ +@{thm erase_bder}\\ +@{thm erase_intern} +\end{lemma} + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) bnullable.simps(1)} & $\dn$ & @{thm (rhs) bnullable.simps(1)}\\ + @{thm (lhs) bnullable.simps(2)} & $\dn$ & @{thm (rhs) bnullable.simps(2)}\\ + @{thm (lhs) bnullable.simps(3)} & $\dn$ & @{thm (rhs) bnullable.simps(3)}\\ + @{thm (lhs) bnullable.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bnullable.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bnullable.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bnullable.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bnullable.simps(6)} & $\dn$ & @{thm (rhs) bnullable.simps(6)}\medskip\\ + +% \end{tabular} +% \end{center} + +% \begin{center} +% \begin{tabular}{lcl} + + @{thm (lhs) bder.simps(1)} & $\dn$ & @{thm (rhs) bder.simps(1)}\\ + @{thm (lhs) bder.simps(2)} & $\dn$ & @{thm (rhs) bder.simps(2)}\\ + @{thm (lhs) bder.simps(3)} & $\dn$ & @{thm (rhs) bder.simps(3)}\\ + @{thm (lhs) bder.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bder.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bder.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bder.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bder.simps(6)} & $\dn$ & @{thm (rhs) bder.simps(6)} + \end{tabular} + \end{center} + + +\begin{center} + \begin{tabular}{lcl} + @{thm (lhs) bmkeps.simps(1)} & $\dn$ & @{thm (rhs) bmkeps.simps(1)}\\ + @{thm (lhs) bmkeps.simps(2)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bmkeps.simps(2)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bmkeps.simps(3)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bmkeps.simps(3)[of bs "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) bmkeps.simps(4)} & $\dn$ & @{thm (rhs) bmkeps.simps(4)}\medskip\\ +\end{tabular} +\end{center} + + +@{thm [mode=IfThen] bder_retrieve} + +By induction on \r\ + +\begin{theorem}[Main Lemma]\mbox{}\\ +@{thm [mode=IfThen] MAIN_decode} +\end{theorem} + +\noindent +Definition of the bitcoded lexer + +@{thm blexer_def} + + +\begin{theorem} +@{thm blexer_correctness} +\end{theorem} + +\ +\*) \ No newline at end of file