# HG changeset patch # User Christian Urban # Date 1488152782 0 # Node ID 16af5b8bd285ef73c6eda9ad1286077ffd8f333f # Parent 47179a172c54f4b19fe2b49ebe5c097fd064145c updated diff -r 47179a172c54 -r 16af5b8bd285 thys/Journal/Paper.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/thys/Journal/Paper.thy Sun Feb 26 23:46:22 2017 +0000 @@ -0,0 +1,1260 @@ +(*<*) +theory Paper +imports + "../Lexer" + "../Simplifying" + "../Sulzmann" + "~~/src/HOL/Library/LaTeXsugar" +begin + +declare [[show_question_marks = false]] + +abbreviation + "der_syn r c \ der c r" + +abbreviation + "ders_syn r s \ ders s r" + +notation (latex output) + If ("(\<^raw:\textrm{>if\<^raw:}> (_)/ \<^raw:\textrm{>then\<^raw:}> (_)/ \<^raw:\textrm{>else\<^raw:}> (_))" 10) and + Cons ("_\<^raw:\mbox{$\,$}>::\<^raw:\mbox{$\,$}>_" [75,73] 73) and + + ZERO ("\<^bold>0" 78) and + ONE ("\<^bold>1" 78) and + CHAR ("_" [1000] 80) and + ALT ("_ + _" [77,77] 78) and + SEQ ("_ \ _" [77,77] 78) and + STAR ("_\<^sup>\" [1000] 78) and + + val.Void ("'(')" 1000) and + val.Char ("Char _" [1000] 78) and + val.Left ("Left _" [79] 78) and + val.Right ("Right _" [1000] 78) and + val.Seq ("Seq _ _" [79,79] 78) and + val.Stars ("Stars _" [79] 78) and + + L ("L'(_')" [10] 78) and + der_syn ("_\\_" [79, 1000] 76) and + ders_syn ("_\\_" [79, 1000] 76) and + flat ("|_|" [75] 74) and + Sequ ("_ @ _" [78,77] 63) and + injval ("inj _ _ _" [79,77,79] 76) and + mkeps ("mkeps _" [79] 76) and + length ("len _" [73] 73) and + + Prf ("_ : _" [75,75] 75) and + Posix ("'(_, _') \ _" [63,75,75] 75) and + + lexer ("lexer _ _" [78,78] 77) and + F_RIGHT ("F\<^bsub>Right\<^esub> _") and + F_LEFT ("F\<^bsub>Left\<^esub> _") and + F_ALT ("F\<^bsub>Alt\<^esub> _ _") and + F_SEQ1 ("F\<^bsub>Seq1\<^esub> _ _") and + F_SEQ2 ("F\<^bsub>Seq2\<^esub> _ _") and + F_SEQ ("F\<^bsub>Seq\<^esub> _ _") and + simp_SEQ ("simp\<^bsub>Seq\<^esub> _ _" [1000, 1000] 1) and + simp_ALT ("simp\<^bsub>Alt\<^esub> _ _" [1000, 1000] 1) and + slexer ("lexer\<^sup>+" 1000) and + + ValOrd ("_ >\<^bsub>_\<^esub> _" [77,77,77] 77) and + ValOrdEq ("_ \\<^bsub>_\<^esub> _" [77,77,77] 77) + +definition + "match r s \ nullable (ders s r)" + +(* +comments not implemented + +p9. The condtion "not exists s3 s4..." appears often enough (in particular in +the proof of Lemma 3) to warrant a definition. + +*) + +(*>*) + +section {* Introduction *} + + +text {* + +Brzozowski \cite{Brzozowski1964} introduced the notion of the {\em +derivative} @{term "der c r"} of a regular expression @{text r} w.r.t.\ a +character~@{text c}, and showed that it gave a simple solution to the +problem of matching a string @{term s} with a regular expression @{term r}: +if the derivative of @{term r} w.r.t.\ (in succession) all the characters of +the string matches the empty string, then @{term r} matches @{term s} (and +{\em vice versa}). The derivative has the property (which may almost be +regarded as its specification) that, for every string @{term s} and regular +expression @{term r} and character @{term c}, one has @{term "cs \ L(r)"} if +and only if \mbox{@{term "s \ L(der c r)"}}. The beauty of Brzozowski's +derivatives is that they are neatly expressible in any functional language, +and easily definable and reasoned about in theorem provers---the definitions +just consist of inductive datatypes and simple recursive functions. A +mechanised correctness proof of Brzozowski's matcher in for example HOL4 +has been mentioned by Owens and Slind~\cite{Owens2008}. Another one in Isabelle/HOL is part +of the work by Krauss and Nipkow \cite{Krauss2011}. And another one in Coq is given +by Coquand and Siles \cite{Coquand2012}. + +If a regular expression matches a string, then in general there is more than +one way of how the string is matched. There are two commonly used +disambiguation strategies to generate a unique answer: one is called GREEDY +matching \cite{Frisch2004} and the other is POSIX +matching~\cite{Kuklewicz,Sulzmann2014,Vansummeren2006}. For example consider +the string @{term xy} and the regular expression \mbox{@{term "STAR (ALT +(ALT x y) xy)"}}. Either the string can be matched in two `iterations' by +the single letter-regular expressions @{term x} and @{term y}, or directly +in one iteration by @{term xy}. The first case corresponds to GREEDY +matching, which first matches with the left-most symbol and only matches the +next symbol in case of a mismatch (this is greedy in the sense of preferring +instant gratification to delayed repletion). The second case is POSIX +matching, which prefers the longest match. + +In the context of lexing, where an input string needs to be split up into a +sequence of tokens, POSIX is the more natural disambiguation strategy for +what programmers consider basic syntactic building blocks in their programs. +These building blocks are often specified by some regular expressions, say +@{text "r\<^bsub>key\<^esub>"} and @{text "r\<^bsub>id\<^esub>"} for recognising keywords and +identifiers, respectively. There are two underlying (informal) rules behind +tokenising a string in a POSIX fashion according to a collection of regular +expressions: + +\begin{itemize} +\item[$\bullet$] \emph{The Longest Match Rule} (or \emph{``maximal munch rule''}): +The longest initial substring matched by any regular expression is taken as +next token.\smallskip + +\item[$\bullet$] \emph{Priority Rule:} +For a particular longest initial substring, the first regular expression +that can match determines the token. +\end{itemize} + +\noindent Consider for example a regular expression @{text "r\<^bsub>key\<^esub>"} for recognising keywords +such as @{text "if"}, @{text "then"} and so on; and @{text "r\<^bsub>id\<^esub>"} +recognising identifiers (say, a single character followed by +characters or numbers). Then we can form the regular expression +@{text "(r\<^bsub>key\<^esub> + r\<^bsub>id\<^esub>)\<^sup>\"} and use POSIX matching to tokenise strings, +say @{text "iffoo"} and @{text "if"}. For @{text "iffoo"} we obtain +by the Longest Match Rule a single identifier token, not a keyword +followed by an identifier. For @{text "if"} we obtain by the Priority +Rule a keyword token, not an identifier token---even if @{text "r\<^bsub>id\<^esub>"} +matches also. + +One limitation of Brzozowski's matcher is that it only generates a +YES/NO answer for whether a string is being matched by a regular +expression. Sulzmann and Lu~\cite{Sulzmann2014} extended this matcher +to allow generation not just of a YES/NO answer but of an actual +matching, called a [lexical] {\em value}. They give a simple algorithm +to calculate a value that appears to be the value associated with +POSIX matching. The challenge then is to specify that value, in an +algorithm-independent fashion, and to show that Sulzmann and Lu's +derivative-based algorithm does indeed calculate a value that is +correct according to the specification. + +The answer given by Sulzmann and Lu \cite{Sulzmann2014} is to define a +relation (called an ``order relation'') on the set of values of @{term +r}, and to show that (once a string to be matched is chosen) there is +a maximum element and that it is computed by their derivative-based +algorithm. This proof idea is inspired by work of Frisch and Cardelli +\cite{Frisch2004} on a GREEDY regular expression matching +algorithm. However, we were not able to establish transitivity and +totality for the ``order relation'' by Sulzmann and Lu. In Section +\ref{argu} we identify some inherent problems with their approach (of +which some of the proofs are not published in \cite{Sulzmann2014}); +perhaps more importantly, we give a simple inductive (and +algorithm-independent) definition of what we call being a {\em POSIX +value} for a regular expression @{term r} and a string @{term s}; we +show that the algorithm computes such a value and that such a value is +unique. Our proofs are both done by hand and checked in Isabelle/HOL. The +experience of doing our proofs has been that this mechanical checking +was absolutely essential: this subject area has hidden snares. This +was also noted by Kuklewicz \cite{Kuklewicz} who found that nearly all +POSIX matching implementations are ``buggy'' \cite[Page +203]{Sulzmann2014} and by Grathwohl et al \cite[Page 36]{CrashCourse2014} +who wrote: + +\begin{quote} +\it{}``The POSIX strategy is more complicated than the greedy because of +the dependence on information about the length of matched strings in the +various subexpressions.'' +\end{quote} + +%\footnote{The relation @{text "\\<^bsub>r\<^esub>"} defined by Sulzmann and Lu \cite{Sulzmann2014} +%is a relation on the +%values for the regular expression @{term r}; but it only holds between +%@{term "v\<^sub>1"} and @{term "v\<^sub>2"} in cases where @{term "v\<^sub>1"} and @{term "v\<^sub>2"} have +%the same flattening (underlying string). So a counterexample to totality is +%given by taking two values @{term "v\<^sub>1"} and @{term "v\<^sub>2"} for @{term r} that +%have different flattenings (see Section~\ref{posixsec}). A different +%relation @{text "\\<^bsub>r,s\<^esub>"} on the set of values for @{term r} +%with flattening @{term s} is definable by the same approach, and is indeed +%total; but that is not what Proposition 1 of \cite{Sulzmann2014} does.} + + +\noindent {\bf Contributions:} We have implemented in Isabelle/HOL the +derivative-based regular expression matching algorithm of +Sulzmann and Lu \cite{Sulzmann2014}. We have proved the correctness of this +algorithm according to our specification of what a POSIX value is (inspired +by work of Vansummeren \cite{Vansummeren2006}). Sulzmann +and Lu sketch in \cite{Sulzmann2014} an informal correctness proof: but to +us it contains unfillable gaps.\footnote{An extended version of +\cite{Sulzmann2014} is available at the website of its first author; this +extended version already includes remarks in the appendix that their +informal proof contains gaps, and possible fixes are not fully worked out.} +Our specification of a POSIX value consists of a simple inductive definition +that given a string and a regular expression uniquely determines this value. +Derivatives as calculated by Brzozowski's method are usually more complex +regular expressions than the initial one; various optimisations are +possible. We prove the correctness when simplifications of @{term "ALT ZERO +r"}, @{term "ALT r ZERO"}, @{term "SEQ ONE r"} and @{term "SEQ r ONE"} to +@{term r} are applied. + +*} + +section {* Preliminaries *} + +text {* \noindent Strings in Isabelle/HOL are lists of characters with the +empty string being represented by the empty list, written @{term "[]"}, and +list-cons being written as @{term "DUMMY # DUMMY"}. Often we use the usual +bracket notation for lists also for strings; for example a string consisting +of just a single character @{term c} is written @{term "[c]"}. By using the +type @{type char} for characters we have a supply of finitely many +characters roughly corresponding to the ASCII character set. Regular +expressions are defined as usual as the elements of the following inductive +datatype: + + \begin{center} + @{text "r :="} + @{const "ZERO"} $\mid$ + @{const "ONE"} $\mid$ + @{term "CHAR c"} $\mid$ + @{term "ALT r\<^sub>1 r\<^sub>2"} $\mid$ + @{term "SEQ r\<^sub>1 r\<^sub>2"} $\mid$ + @{term "STAR r"} + \end{center} + + \noindent where @{const ZERO} stands for the regular expression that does + not match any string, @{const ONE} for the regular expression that matches + only the empty string and @{term c} for matching a character literal. The + language of a regular expression is also defined as usual by the + recursive function @{term L} with the six clauses: + + \begin{center} + \begin{tabular}{l@ {\hspace{3mm}}rcl} + (1) & @{thm (lhs) L.simps(1)} & $\dn$ & @{thm (rhs) L.simps(1)}\\ + (2) & @{thm (lhs) L.simps(2)} & $\dn$ & @{thm (rhs) L.simps(2)}\\ + (3) & @{thm (lhs) L.simps(3)} & $\dn$ & @{thm (rhs) L.simps(3)}\\ + \end{tabular} + \hspace{14mm} + \begin{tabular}{l@ {\hspace{3mm}}rcl} + (4) & @{thm (lhs) L.simps(4)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) L.simps(4)[of "r\<^sub>1" "r\<^sub>2"]}\\ + (5) & @{thm (lhs) L.simps(5)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) L.simps(5)[of "r\<^sub>1" "r\<^sub>2"]}\\ + (6) & @{thm (lhs) L.simps(6)} & $\dn$ & @{thm (rhs) L.simps(6)}\\ + \end{tabular} + \end{center} + + \noindent In clause (4) we use the operation @{term "DUMMY ;; + DUMMY"} for the concatenation of two languages (it is also list-append for + strings). We use the star-notation for regular expressions and for + languages (in the last clause above). The star for languages is defined + inductively by two clauses: @{text "(i)"} the empty string being in + the star of a language and @{text "(ii)"} if @{term "s\<^sub>1"} is in a + language and @{term "s\<^sub>2"} in the star of this language, then also @{term + "s\<^sub>1 @ s\<^sub>2"} is in the star of this language. It will also be convenient + to use the following notion of a \emph{semantic derivative} (or \emph{left + quotient}) of a language defined as + % + \begin{center} + @{thm Der_def}\;. + \end{center} + + \noindent + For semantic derivatives we have the following equations (for example + mechanically proved in \cite{Krauss2011}): + % + \begin{equation}\label{SemDer} + \begin{array}{lcl} + @{thm (lhs) Der_null} & \dn & @{thm (rhs) Der_null}\\ + @{thm (lhs) Der_empty} & \dn & @{thm (rhs) Der_empty}\\ + @{thm (lhs) Der_char} & \dn & @{thm (rhs) Der_char}\\ + @{thm (lhs) Der_union} & \dn & @{thm (rhs) Der_union}\\ + @{thm (lhs) Der_Sequ} & \dn & @{thm (rhs) Der_Sequ}\\ + @{thm (lhs) Der_star} & \dn & @{thm (rhs) Der_star} + \end{array} + \end{equation} + + + \noindent \emph{\Brz's derivatives} of regular expressions + \cite{Brzozowski1964} can be easily defined by two recursive functions: + the first is from regular expressions to booleans (implementing a test + when a regular expression can match the empty string), and the second + takes a regular expression and a character to a (derivative) regular + expression: + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) nullable.simps(1)} & $\dn$ & @{thm (rhs) nullable.simps(1)}\\ + @{thm (lhs) nullable.simps(2)} & $\dn$ & @{thm (rhs) nullable.simps(2)}\\ + @{thm (lhs) nullable.simps(3)} & $\dn$ & @{thm (rhs) nullable.simps(3)}\\ + @{thm (lhs) nullable.simps(4)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) nullable.simps(4)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) nullable.simps(5)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) nullable.simps(5)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) nullable.simps(6)} & $\dn$ & @{thm (rhs) nullable.simps(6)}\medskip\\ + + %\end{tabular} + %\end{center} + + %\begin{center} + %\begin{tabular}{lcl} + + @{thm (lhs) der.simps(1)} & $\dn$ & @{thm (rhs) der.simps(1)}\\ + @{thm (lhs) der.simps(2)} & $\dn$ & @{thm (rhs) der.simps(2)}\\ + @{thm (lhs) der.simps(3)} & $\dn$ & @{thm (rhs) der.simps(3)}\\ + @{thm (lhs) der.simps(4)[of c "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) der.simps(4)[of c "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) der.simps(6)} & $\dn$ & @{thm (rhs) der.simps(6)} + \end{tabular} + \end{center} + + \noindent + We may extend this definition to give derivatives w.r.t.~strings: + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) ders.simps(1)} & $\dn$ & @{thm (rhs) ders.simps(1)}\\ + @{thm (lhs) ders.simps(2)} & $\dn$ & @{thm (rhs) ders.simps(2)}\\ + \end{tabular} + \end{center} + + \noindent Given the equations in \eqref{SemDer}, it is a relatively easy + exercise in mechanical reasoning to establish that + + \begin{proposition}\label{derprop}\mbox{}\\ + \begin{tabular}{ll} + @{text "(1)"} & @{thm (lhs) nullable_correctness} if and only if + @{thm (rhs) nullable_correctness}, and \\ + @{text "(2)"} & @{thm[mode=IfThen] der_correctness}. + \end{tabular} + \end{proposition} + + \noindent With this in place it is also very routine to prove that the + regular expression matcher defined as + % + \begin{center} + @{thm match_def} + \end{center} + + \noindent gives a positive answer if and only if @{term "s \ L r"}. + Consequently, this regular expression matching algorithm satisfies the + usual specification for regular expression matching. While the matcher + above calculates a provably correct YES/NO answer for whether a regular + expression matches a string or not, the novel idea of Sulzmann and Lu + \cite{Sulzmann2014} is to append another phase to this algorithm in order + to calculate a [lexical] value. We will explain the details next. + +*} + +section {* POSIX Regular Expression Matching\label{posixsec} *} + +text {* + + The clever idea by Sulzmann and Lu \cite{Sulzmann2014} is to define + values for encoding \emph{how} a regular expression matches a string + and then define a function on values that mirrors (but inverts) the + construction of the derivative on regular expressions. \emph{Values} + are defined as the inductive datatype + + \begin{center} + @{text "v :="} + @{const "Void"} $\mid$ + @{term "val.Char c"} $\mid$ + @{term "Left v"} $\mid$ + @{term "Right v"} $\mid$ + @{term "Seq v\<^sub>1 v\<^sub>2"} $\mid$ + @{term "Stars vs"} + \end{center} + + \noindent where we use @{term vs} to stand for a list of + values. (This is similar to the approach taken by Frisch and + Cardelli for GREEDY matching \cite{Frisch2004}, and Sulzmann and Lu + for POSIX matching \cite{Sulzmann2014}). The string underlying a + value can be calculated by the @{const flat} function, written + @{term "flat DUMMY"} and defined as: + + \begin{center} + \begin{tabular}[t]{lcl} + @{thm (lhs) flat.simps(1)} & $\dn$ & @{thm (rhs) flat.simps(1)}\\ + @{thm (lhs) flat.simps(2)} & $\dn$ & @{thm (rhs) flat.simps(2)}\\ + @{thm (lhs) flat.simps(3)} & $\dn$ & @{thm (rhs) flat.simps(3)}\\ + @{thm (lhs) flat.simps(4)} & $\dn$ & @{thm (rhs) flat.simps(4)} + \end{tabular}\hspace{14mm} + \begin{tabular}[t]{lcl} + @{thm (lhs) flat.simps(5)[of "v\<^sub>1" "v\<^sub>2"]} & $\dn$ & @{thm (rhs) flat.simps(5)[of "v\<^sub>1" "v\<^sub>2"]}\\ + @{thm (lhs) flat.simps(6)} & $\dn$ & @{thm (rhs) flat.simps(6)}\\ + @{thm (lhs) flat.simps(7)} & $\dn$ & @{thm (rhs) flat.simps(7)}\\ + \end{tabular} + \end{center} + + \noindent Sulzmann and Lu also define inductively an inhabitation relation + that associates values to regular expressions: + + \begin{center} + \begin{tabular}{c} + \\[-8mm] + @{thm[mode=Axiom] Prf.intros(4)} \qquad + @{thm[mode=Axiom] Prf.intros(5)[of "c"]}\\[4mm] + @{thm[mode=Rule] Prf.intros(2)[of "v\<^sub>1" "r\<^sub>1" "r\<^sub>2"]} \qquad + @{thm[mode=Rule] Prf.intros(3)[of "v\<^sub>2" "r\<^sub>1" "r\<^sub>2"]}\\[4mm] + @{thm[mode=Rule] Prf.intros(1)[of "v\<^sub>1" "r\<^sub>1" "v\<^sub>2" "r\<^sub>2"]}\\[4mm] + @{thm[mode=Axiom] Prf.intros(6)[of "r"]} \qquad + @{thm[mode=Rule] Prf.intros(7)[of "v" "r" "vs"]} + \end{tabular} + \end{center} + + \noindent Note that no values are associated with the regular expression + @{term ZERO}, and that the only value associated with the regular + expression @{term ONE} is @{term Void}, pronounced (if one must) as @{text + "Void"}. It is routine to establish how values ``inhabiting'' a regular + expression correspond to the language of a regular expression, namely + + \begin{proposition} + @{thm L_flat_Prf} + \end{proposition} + + In general there is more than one value associated with a regular + expression. In case of POSIX matching the problem is to calculate the + unique value that satisfies the (informal) POSIX rules from the + Introduction. Graphically the POSIX value calculation algorithm by + Sulzmann and Lu can be illustrated by the picture in Figure~\ref{Sulz} + where the path from the left to the right involving @{term derivatives}/@{const + nullable} is the first phase of the algorithm (calculating successive + \Brz's derivatives) and @{const mkeps}/@{text inj}, the path from right to + left, the second phase. This picture shows the steps required when a + regular expression, say @{text "r\<^sub>1"}, matches the string @{term + "[a,b,c]"}. We first build the three derivatives (according to @{term a}, + @{term b} and @{term c}). We then use @{const nullable} to find out + whether the resulting derivative regular expression @{term "r\<^sub>4"} + can match the empty string. If yes, we call the function @{const mkeps} + that produces a value @{term "v\<^sub>4"} for how @{term "r\<^sub>4"} can + match the empty string (taking into account the POSIX constraints in case + there are several ways). This function is defined by the clauses: + +\begin{figure}[t] +\begin{center} +\begin{tikzpicture}[scale=2,node distance=1.3cm, + every node/.style={minimum size=6mm}] +\node (r1) {@{term "r\<^sub>1"}}; +\node (r2) [right=of r1]{@{term "r\<^sub>2"}}; +\draw[->,line width=1mm](r1)--(r2) node[above,midway] {@{term "der a DUMMY"}}; +\node (r3) [right=of r2]{@{term "r\<^sub>3"}}; +\draw[->,line width=1mm](r2)--(r3) node[above,midway] {@{term "der b DUMMY"}}; +\node (r4) [right=of r3]{@{term "r\<^sub>4"}}; +\draw[->,line width=1mm](r3)--(r4) node[above,midway] {@{term "der c DUMMY"}}; +\draw (r4) node[anchor=west] {\;\raisebox{3mm}{@{term nullable}}}; +\node (v4) [below=of r4]{@{term "v\<^sub>4"}}; +\draw[->,line width=1mm](r4) -- (v4); +\node (v3) [left=of v4] {@{term "v\<^sub>3"}}; +\draw[->,line width=1mm](v4)--(v3) node[below,midway] {@{text "inj r\<^sub>3 c"}}; +\node (v2) [left=of v3]{@{term "v\<^sub>2"}}; +\draw[->,line width=1mm](v3)--(v2) node[below,midway] {@{text "inj r\<^sub>2 b"}}; +\node (v1) [left=of v2] {@{term "v\<^sub>1"}}; +\draw[->,line width=1mm](v2)--(v1) node[below,midway] {@{text "inj r\<^sub>1 a"}}; +\draw (r4) node[anchor=north west] {\;\raisebox{-8mm}{@{term "mkeps"}}}; +\end{tikzpicture} +\end{center} +\mbox{}\\[-13mm] + +\caption{The two phases of the algorithm by Sulzmann \& Lu \cite{Sulzmann2014}, +matching the string @{term "[a,b,c]"}. The first phase (the arrows from +left to right) is \Brz's matcher building successive derivatives. If the +last regular expression is @{term nullable}, then the functions of the +second phase are called (the top-down and right-to-left arrows): first +@{term mkeps} calculates a value @{term "v\<^sub>4"} witnessing +how the empty string has been recognised by @{term "r\<^sub>4"}. After +that the function @{term inj} ``injects back'' the characters of the string into +the values. +\label{Sulz}} +\end{figure} + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) mkeps.simps(1)} & $\dn$ & @{thm (rhs) mkeps.simps(1)}\\ + @{thm (lhs) mkeps.simps(2)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) mkeps.simps(2)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) mkeps.simps(3)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) mkeps.simps(3)[of "r\<^sub>1" "r\<^sub>2"]}\\ + @{thm (lhs) mkeps.simps(4)} & $\dn$ & @{thm (rhs) mkeps.simps(4)}\\ + \end{tabular} + \end{center} + + \noindent Note that this function needs only to be partially defined, + namely only for regular expressions that are nullable. In case @{const + nullable} fails, the string @{term "[a,b,c]"} cannot be matched by @{term + "r\<^sub>1"} and the null value @{term "None"} is returned. Note also how this function + makes some subtle choices leading to a POSIX value: for example if an + alternative regular expression, say @{term "ALT r\<^sub>1 r\<^sub>2"}, can + match the empty string and furthermore @{term "r\<^sub>1"} can match the + empty string, then we return a @{text Left}-value. The @{text + Right}-value will only be returned if @{term "r\<^sub>1"} cannot match the empty + string. + + The most interesting idea from Sulzmann and Lu \cite{Sulzmann2014} is + the construction of a value for how @{term "r\<^sub>1"} can match the + string @{term "[a,b,c]"} from the value how the last derivative, @{term + "r\<^sub>4"} in Fig.~\ref{Sulz}, can match the empty string. Sulzmann and + Lu achieve this by stepwise ``injecting back'' the characters into the + values thus inverting the operation of building derivatives, but on the level + of values. The corresponding function, called @{term inj}, takes three + arguments, a regular expression, a character and a value. For example in + the first (or right-most) @{term inj}-step in Fig.~\ref{Sulz} the regular + expression @{term "r\<^sub>3"}, the character @{term c} from the last + derivative step and @{term "v\<^sub>4"}, which is the value corresponding + to the derivative regular expression @{term "r\<^sub>4"}. The result is + the new value @{term "v\<^sub>3"}. The final result of the algorithm is + the value @{term "v\<^sub>1"}. The @{term inj} function is defined by recursion on regular + expressions and by analysing the shape of values (corresponding to + the derivative regular expressions). + % + \begin{center} + \begin{tabular}{l@ {\hspace{5mm}}lcl} + (1) & @{thm (lhs) injval.simps(1)} & $\dn$ & @{thm (rhs) injval.simps(1)}\\ + (2) & @{thm (lhs) injval.simps(2)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1"]} & $\dn$ & + @{thm (rhs) injval.simps(2)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1"]}\\ + (3) & @{thm (lhs) injval.simps(3)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>2"]} & $\dn$ & + @{thm (rhs) injval.simps(3)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>2"]}\\ + (4) & @{thm (lhs) injval.simps(4)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1" "v\<^sub>2"]} & $\dn$ + & @{thm (rhs) injval.simps(4)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1" "v\<^sub>2"]}\\ + (5) & @{thm (lhs) injval.simps(5)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1" "v\<^sub>2"]} & $\dn$ + & @{thm (rhs) injval.simps(5)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1" "v\<^sub>2"]}\\ + (6) & @{thm (lhs) injval.simps(6)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>2"]} & $\dn$ + & @{thm (rhs) injval.simps(6)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>2"]}\\ + (7) & @{thm (lhs) injval.simps(7)[of "r" "c" "v" "vs"]} & $\dn$ + & @{thm (rhs) injval.simps(7)[of "r" "c" "v" "vs"]}\\ + \end{tabular} + \end{center} + + \noindent To better understand what is going on in this definition it + might be instructive to look first at the three sequence cases (clauses + (4)--(6)). In each case we need to construct an ``injected value'' for + @{term "SEQ r\<^sub>1 r\<^sub>2"}. This must be a value of the form @{term + "Seq DUMMY DUMMY"}\,. Recall the clause of the @{text derivative}-function + for sequence regular expressions: + + \begin{center} + @{thm (lhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]} $\dn$ @{thm (rhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]} + \end{center} + + \noindent Consider first the @{text "else"}-branch where the derivative is @{term + "SEQ (der c r\<^sub>1) r\<^sub>2"}. The corresponding value must therefore + be of the form @{term "Seq v\<^sub>1 v\<^sub>2"}, which matches the left-hand + side in clause~(4) of @{term inj}. In the @{text "if"}-branch the derivative is an + alternative, namely @{term "ALT (SEQ (der c r\<^sub>1) r\<^sub>2) (der c + r\<^sub>2)"}. This means we either have to consider a @{text Left}- or + @{text Right}-value. In case of the @{text Left}-value we know further it + must be a value for a sequence regular expression. Therefore the pattern + we match in the clause (5) is @{term "Left (Seq v\<^sub>1 v\<^sub>2)"}, + while in (6) it is just @{term "Right v\<^sub>2"}. One more interesting + point is in the right-hand side of clause (6): since in this case the + regular expression @{text "r\<^sub>1"} does not ``contribute'' to + matching the string, that means it only matches the empty string, we need to + call @{const mkeps} in order to construct a value for how @{term "r\<^sub>1"} + can match this empty string. A similar argument applies for why we can + expect in the left-hand side of clause (7) that the value is of the form + @{term "Seq v (Stars vs)"}---the derivative of a star is @{term "SEQ (der c r) + (STAR r)"}. Finally, the reason for why we can ignore the second argument + in clause (1) of @{term inj} is that it will only ever be called in cases + where @{term "c=d"}, but the usual linearity restrictions in patterns do + not allow us to build this constraint explicitly into our function + definition.\footnote{Sulzmann and Lu state this clause as @{thm (lhs) + injval.simps(1)[of "c" "c"]} $\dn$ @{thm (rhs) injval.simps(1)[of "c"]}, + but our deviation is harmless.} + + The idea of the @{term inj}-function to ``inject'' a character, say + @{term c}, into a value can be made precise by the first part of the + following lemma, which shows that the underlying string of an injected + value has a prepended character @{term c}; the second part shows that the + underlying string of an @{const mkeps}-value is always the empty string + (given the regular expression is nullable since otherwise @{text mkeps} + might not be defined). + + \begin{lemma}\mbox{}\smallskip\\\label{Prf_injval_flat} + \begin{tabular}{ll} + (1) & @{thm[mode=IfThen] Prf_injval_flat}\\ + (2) & @{thm[mode=IfThen] mkeps_flat} + \end{tabular} + \end{lemma} + + \begin{proof} + Both properties are by routine inductions: the first one can, for example, + be proved by induction over the definition of @{term derivatives}; the second by + an induction on @{term r}. There are no interesting cases.\qed + \end{proof} + + Having defined the @{const mkeps} and @{text inj} function we can extend + \Brz's matcher so that a [lexical] value is constructed (assuming the + regular expression matches the string). The clauses of the Sulzmann and Lu lexer are + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) lexer.simps(1)} & $\dn$ & @{thm (rhs) lexer.simps(1)}\\ + @{thm (lhs) lexer.simps(2)} & $\dn$ & @{text "case"} @{term "lexer (der c r) s"} @{text of}\\ + & & \phantom{$|$} @{term "None"} @{text "\"} @{term None}\\ + & & $|$ @{term "Some v"} @{text "\"} @{term "Some (injval r c v)"} + \end{tabular} + \end{center} + + \noindent If the regular expression does not match the string, @{const None} is + returned. If the regular expression \emph{does} + match the string, then @{const Some} value is returned. One important + virtue of this algorithm is that it can be implemented with ease in any + functional programming language and also in Isabelle/HOL. In the remaining + part of this section we prove that this algorithm is correct. + + The well-known idea of POSIX matching is informally defined by the longest + match and priority rule (see Introduction); as correctly argued in \cite{Sulzmann2014}, this + needs formal specification. Sulzmann and Lu define an ``ordering + relation'' between values and argue + that there is a maximum value, as given by the derivative-based algorithm. + In contrast, we shall introduce a simple inductive definition that + specifies directly what a \emph{POSIX value} is, incorporating the + POSIX-specific choices into the side-conditions of our rules. Our + definition is inspired by the matching relation given by Vansummeren + \cite{Vansummeren2006}. The relation we define is ternary and written as + \mbox{@{term "s \ r \ v"}}, relating strings, regular expressions and + values. + % + \begin{center} + \begin{tabular}{c} + @{thm[mode=Axiom] Posix.intros(1)}@{text "P"}@{term "ONE"} \qquad + @{thm[mode=Axiom] Posix.intros(2)}@{text "P"}@{term "c"}\medskip\\ + @{thm[mode=Rule] Posix.intros(3)[of "s" "r\<^sub>1" "v" "r\<^sub>2"]}@{text "P+L"}\qquad + @{thm[mode=Rule] Posix.intros(4)[of "s" "r\<^sub>2" "v" "r\<^sub>1"]}@{text "P+R"}\medskip\\ + $\mprset{flushleft} + \inferrule + {@{thm (prem 1) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]} \qquad + @{thm (prem 2) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]} \\\\ + @{thm (prem 3) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]}} + {@{thm (concl) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]}}$@{text "PS"}\\ + @{thm[mode=Axiom] Posix.intros(7)}@{text "P[]"}\medskip\\ + $\mprset{flushleft} + \inferrule + {@{thm (prem 1) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]} \qquad + @{thm (prem 2) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]} \qquad + @{thm (prem 3) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]} \\\\ + @{thm (prem 4) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]}} + {@{thm (concl) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]}}$@{text "P\"} + \end{tabular} + \end{center} + + \noindent + We can prove that given a string @{term s} and regular expression @{term + r}, the POSIX value @{term v} is uniquely determined by @{term "s \ r \ v"}. + + \begin{theorem}\mbox{}\smallskip\\\label{posixdeterm} + \begin{tabular}{ll} + @{text "(1)"} & If @{thm (prem 1) Posix1(1)} then @{thm (concl) + Posix1(1)} and @{thm (concl) Posix1(2)}.\\ + @{text "(2)"} & @{thm[mode=IfThen] Posix_determ(1)[of _ _ "v" "v'"]} + \end{tabular} + \end{theorem} + + \begin{proof} Both by induction on the definition of @{term "s \ r \ v"}. + The second parts follows by a case analysis of @{term "s \ r \ v'"} and + the first part.\qed + \end{proof} + + \noindent + We claim that our @{term "s \ r \ v"} relation captures the idea behind the two + informal POSIX rules shown in the Introduction: Consider for example the + rules @{text "P+L"} and @{text "P+R"} where the POSIX value for a string + and an alternative regular expression, that is @{term "(s, ALT r\<^sub>1 r\<^sub>2)"}, + is specified---it is always a @{text "Left"}-value, \emph{except} when the + string to be matched is not in the language of @{term "r\<^sub>1"}; only then it + is a @{text Right}-value (see the side-condition in @{text "P+R"}). + Interesting is also the rule for sequence regular expressions (@{text + "PS"}). The first two premises state that @{term "v\<^sub>1"} and @{term "v\<^sub>2"} + are the POSIX values for @{term "(s\<^sub>1, r\<^sub>1)"} and @{term "(s\<^sub>2, r\<^sub>2)"} + respectively. Consider now the third premise and note that the POSIX value + of this rule should match the string \mbox{@{term "s\<^sub>1 @ s\<^sub>2"}}. According to the + longest match rule, we want that the @{term "s\<^sub>1"} is the longest initial + split of \mbox{@{term "s\<^sub>1 @ s\<^sub>2"}} such that @{term "s\<^sub>2"} is still recognised + by @{term "r\<^sub>2"}. Let us assume, contrary to the third premise, that there + \emph{exist} an @{term "s\<^sub>3"} and @{term "s\<^sub>4"} such that @{term "s\<^sub>2"} + can be split up into a non-empty string @{term "s\<^sub>3"} and a possibly empty + string @{term "s\<^sub>4"}. Moreover the longer string @{term "s\<^sub>1 @ s\<^sub>3"} can be + matched by @{text "r\<^sub>1"} and the shorter @{term "s\<^sub>4"} can still be + matched by @{term "r\<^sub>2"}. In this case @{term "s\<^sub>1"} would \emph{not} be the + longest initial split of \mbox{@{term "s\<^sub>1 @ s\<^sub>2"}} and therefore @{term "Seq v\<^sub>1 + v\<^sub>2"} cannot be a POSIX value for @{term "(s\<^sub>1 @ s\<^sub>2, SEQ r\<^sub>1 r\<^sub>2)"}. + The main point is that our side-condition ensures the longest + match rule is satisfied. + + A similar condition is imposed on the POSIX value in the @{text + "P\"}-rule. Also there we want that @{term "s\<^sub>1"} is the longest initial + split of @{term "s\<^sub>1 @ s\<^sub>2"} and furthermore the corresponding value + @{term v} cannot be flattened to the empty string. In effect, we require + that in each ``iteration'' of the star, some non-empty substring needs to + be ``chipped'' away; only in case of the empty string we accept @{term + "Stars []"} as the POSIX value. + + Next is the lemma that shows the function @{term "mkeps"} calculates + the POSIX value for the empty string and a nullable regular expression. + + \begin{lemma}\label{lemmkeps} + @{thm[mode=IfThen] Posix_mkeps} + \end{lemma} + + \begin{proof} + By routine induction on @{term r}.\qed + \end{proof} + + \noindent + The central lemma for our POSIX relation is that the @{text inj}-function + preserves POSIX values. + + \begin{lemma}\label{Posix2} + @{thm[mode=IfThen] Posix_injval} + \end{lemma} + + \begin{proof} + By induction on @{text r}. We explain two cases. + + \begin{itemize} + \item[$\bullet$] Case @{term "r = ALT r\<^sub>1 r\<^sub>2"}. There are + two subcases, namely @{text "(a)"} \mbox{@{term "v = Left v'"}} and @{term + "s \ der c r\<^sub>1 \ v'"}; and @{text "(b)"} @{term "v = Right v'"}, @{term + "s \ L (der c r\<^sub>1)"} and @{term "s \ der c r\<^sub>2 \ v'"}. In @{text "(a)"} we + know @{term "s \ der c r\<^sub>1 \ v'"}, from which we can infer @{term "(c # s) + \ r\<^sub>1 \ injval r\<^sub>1 c v'"} by induction hypothesis and hence @{term "(c # + s) \ ALT r\<^sub>1 r\<^sub>2 \ injval (ALT r\<^sub>1 r\<^sub>2) c (Left v')"} as needed. Similarly + in subcase @{text "(b)"} where, however, in addition we have to use + Prop.~\ref{derprop}(2) in order to infer @{term "c # s \ L r\<^sub>1"} from @{term + "s \ L (der c r\<^sub>1)"}. + + \item[$\bullet$] Case @{term "r = SEQ r\<^sub>1 r\<^sub>2"}. There are three subcases: + + \begin{quote} + \begin{description} + \item[@{text "(a)"}] @{term "v = Left (Seq v\<^sub>1 v\<^sub>2)"} and @{term "nullable r\<^sub>1"} + \item[@{text "(b)"}] @{term "v = Right v\<^sub>1"} and @{term "nullable r\<^sub>1"} + \item[@{text "(c)"}] @{term "v = Seq v\<^sub>1 v\<^sub>2"} and @{term "\ nullable r\<^sub>1"} + \end{description} + \end{quote} + + \noindent For @{text "(a)"} we know @{term "s\<^sub>1 \ der c r\<^sub>1 \ v\<^sub>1"} and + @{term "s\<^sub>2 \ r\<^sub>2 \ v\<^sub>2"} as well as + % + \[@{term "\ (\s\<^sub>3 s\<^sub>4. s\<^sub>3 \ [] \ s\<^sub>3 @ s\<^sub>4 = s\<^sub>2 \ s\<^sub>1 @ s\<^sub>3 \ L (der c r\<^sub>1) \ s\<^sub>4 \ L r\<^sub>2)"}\] + + \noindent From the latter we can infer by Prop.~\ref{derprop}(2): + % + \[@{term "\ (\s\<^sub>3 s\<^sub>4. s\<^sub>3 \ [] \ s\<^sub>3 @ s\<^sub>4 = s\<^sub>2 \ (c # s\<^sub>1) @ s\<^sub>3 \ L r\<^sub>1 \ s\<^sub>4 \ L r\<^sub>2)"}\] + + \noindent We can use the induction hypothesis for @{text "r\<^sub>1"} to obtain + @{term "(c # s\<^sub>1) \ r\<^sub>1 \ injval r\<^sub>1 c v\<^sub>1"}. Putting this all together allows us to infer + @{term "((c # s\<^sub>1) @ s\<^sub>2) \ SEQ r\<^sub>1 r\<^sub>2 \ Seq (injval r\<^sub>1 c v\<^sub>1) v\<^sub>2"}. The case @{text "(c)"} + is similar. + + For @{text "(b)"} we know @{term "s \ der c r\<^sub>2 \ v\<^sub>1"} and + @{term "s\<^sub>1 @ s\<^sub>2 \ L (SEQ (der c r\<^sub>1) r\<^sub>2)"}. From the former + we have @{term "(c # s) \ r\<^sub>2 \ (injval r\<^sub>2 c v\<^sub>1)"} by induction hypothesis + for @{term "r\<^sub>2"}. From the latter we can infer + % + \[@{term "\ (\s\<^sub>3 s\<^sub>4. s\<^sub>3 \ [] \ s\<^sub>3 @ s\<^sub>4 = c # s \ s\<^sub>3 \ L r\<^sub>1 \ s\<^sub>4 \ L r\<^sub>2)"}\] + + \noindent By Lem.~\ref{lemmkeps} we know @{term "[] \ r\<^sub>1 \ (mkeps r\<^sub>1)"} + holds. Putting this all together, we can conclude with @{term "(c # + s) \ SEQ r\<^sub>1 r\<^sub>2 \ Seq (mkeps r\<^sub>1) (injval r\<^sub>2 c v\<^sub>1)"}, as required. + + Finally suppose @{term "r = STAR r\<^sub>1"}. This case is very similar to the + sequence case, except that we need to also ensure that @{term "flat (injval r\<^sub>1 + c v\<^sub>1) \ []"}. This follows from @{term "(c # s\<^sub>1) + \ r\<^sub>1 \ injval r\<^sub>1 c v\<^sub>1"} (which in turn follows from @{term "s\<^sub>1 \ der c + r\<^sub>1 \ v\<^sub>1"} and the induction hypothesis).\qed + \end{itemize} + \end{proof} + + \noindent + With Lem.~\ref{Posix2} in place, it is completely routine to establish + that the Sulzmann and Lu lexer satisfies our specification (returning + the null value @{term "None"} iff the string is not in the language of the regular expression, + and returning a unique POSIX value iff the string \emph{is} in the language): + + \begin{theorem}\mbox{}\smallskip\\\label{lexercorrect} + \begin{tabular}{ll} + (1) & @{thm (lhs) lexer_correct_None} if and only if @{thm (rhs) lexer_correct_None}\\ + (2) & @{thm (lhs) lexer_correct_Some} if and only if @{thm (rhs) lexer_correct_Some}\\ + \end{tabular} + \end{theorem} + + \begin{proof} + By induction on @{term s} using Lem.~\ref{lemmkeps} and \ref{Posix2}.\qed + \end{proof} + + \noindent In (2) we further know by Thm.~\ref{posixdeterm} that the + value returned by the lexer must be unique. A simple corollary + of our two theorems is: + + \begin{corollary}\mbox{}\smallskip\\\label{lexercorrectcor} + \begin{tabular}{ll} + (1) & @{thm (lhs) lexer_correctness(2)} if and only if @{thm (rhs) lexer_correctness(2)}\\ + (2) & @{thm (lhs) lexer_correctness(1)} if and only if @{thm (rhs) lexer_correctness(1)}\\ + \end{tabular} + \end{corollary} + + \noindent + This concludes our + correctness proof. Note that we have not changed the algorithm of + Sulzmann and Lu,\footnote{All deviations we introduced are + harmless.} but introduced our own specification for what a correct + result---a POSIX value---should be. A strong point in favour of + Sulzmann and Lu's algorithm is that it can be extended in various + ways. + +*} + +section {* Extensions and Optimisations*} + +text {* + + If we are interested in tokenising a string, then we need to not just + split up the string into tokens, but also ``classify'' the tokens (for + example whether it is a keyword or an identifier). This can be done with + only minor modifications to the algorithm by introducing \emph{record + regular expressions} and \emph{record values} (for example + \cite{Sulzmann2014b}): + + \begin{center} + @{text "r :="} + @{text "..."} $\mid$ + @{text "(l : r)"} \qquad\qquad + @{text "v :="} + @{text "..."} $\mid$ + @{text "(l : v)"} + \end{center} + + \noindent where @{text l} is a label, say a string, @{text r} a regular + expression and @{text v} a value. All functions can be smoothly extended + to these regular expressions and values. For example \mbox{@{text "(l : + r)"}} is nullable iff @{term r} is, and so on. The purpose of the record + regular expression is to mark certain parts of a regular expression and + then record in the calculated value which parts of the string were matched + by this part. The label can then serve as classification for the tokens. + For this recall the regular expression @{text "(r\<^bsub>key\<^esub> + r\<^bsub>id\<^esub>)\<^sup>\"} for + keywords and identifiers from the Introduction. With the record regular + expression we can form \mbox{@{text "((key : r\<^bsub>key\<^esub>) + (id : r\<^bsub>id\<^esub>))\<^sup>\"}} + and then traverse the calculated value and only collect the underlying + strings in record values. With this we obtain finite sequences of pairs of + labels and strings, for example + + \[@{text "(l\<^sub>1 : s\<^sub>1), ..., (l\<^sub>n : s\<^sub>n)"}\] + + \noindent from which tokens with classifications (keyword-token, + identifier-token and so on) can be extracted. + + Derivatives as calculated by \Brz's method are usually more complex + regular expressions than the initial one; the result is that the + derivative-based matching and lexing algorithms are often abysmally slow. + However, various optimisations are possible, such as the simplifications + of @{term "ALT ZERO r"}, @{term "ALT r ZERO"}, @{term "SEQ ONE r"} and + @{term "SEQ r ONE"} to @{term r}. These simplifications can speed up the + algorithms considerably, as noted in \cite{Sulzmann2014}. One of the + advantages of having a simple specification and correctness proof is that + the latter can be refined to prove the correctness of such simplification + steps. While the simplification of regular expressions according to + rules like + + \begin{equation}\label{Simpl} + \begin{array}{lcllcllcllcl} + @{term "ALT ZERO r"} & @{text "\"} & @{term r} \hspace{8mm}%\\ + @{term "ALT r ZERO"} & @{text "\"} & @{term r} \hspace{8mm}%\\ + @{term "SEQ ONE r"} & @{text "\"} & @{term r} \hspace{8mm}%\\ + @{term "SEQ r ONE"} & @{text "\"} & @{term r} + \end{array} + \end{equation} + + \noindent is well understood, there is an obstacle with the POSIX value + calculation algorithm by Sulzmann and Lu: if we build a derivative regular + expression and then simplify it, we will calculate a POSIX value for this + simplified derivative regular expression, \emph{not} for the original (unsimplified) + derivative regular expression. Sulzmann and Lu \cite{Sulzmann2014} overcome this obstacle by + not just calculating a simplified regular expression, but also calculating + a \emph{rectification function} that ``repairs'' the incorrect value. + + The rectification functions can be (slightly clumsily) implemented in + Isabelle/HOL as follows using some auxiliary functions: + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) F_RIGHT.simps(1)} & $\dn$ & @{text "Right (f v)"}\\ + @{thm (lhs) F_LEFT.simps(1)} & $\dn$ & @{text "Left (f v)"}\\ + + @{thm (lhs) F_ALT.simps(1)} & $\dn$ & @{text "Right (f\<^sub>2 v)"}\\ + @{thm (lhs) F_ALT.simps(2)} & $\dn$ & @{text "Left (f\<^sub>1 v)"}\\ + + @{thm (lhs) F_SEQ1.simps(1)} & $\dn$ & @{text "Seq (f\<^sub>1 ()) (f\<^sub>2 v)"}\\ + @{thm (lhs) F_SEQ2.simps(1)} & $\dn$ & @{text "Seq (f\<^sub>1 v) (f\<^sub>2 ())"}\\ + @{thm (lhs) F_SEQ.simps(1)} & $\dn$ & @{text "Seq (f\<^sub>1 v\<^sub>1) (f\<^sub>2 v\<^sub>2)"}\medskip\\ + %\end{tabular} + % + %\begin{tabular}{lcl} + @{term "simp_ALT (ZERO, DUMMY) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>2, F_RIGHT f\<^sub>2)"}\\ + @{term "simp_ALT (r\<^sub>1, f\<^sub>1) (ZERO, DUMMY)"} & $\dn$ & @{term "(r\<^sub>1, F_LEFT f\<^sub>1)"}\\ + @{term "simp_ALT (r\<^sub>1, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(ALT r\<^sub>1 r\<^sub>2, F_ALT f\<^sub>1 f\<^sub>2)"}\\ + @{term "simp_SEQ (ONE, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>2, F_SEQ1 f\<^sub>1 f\<^sub>2)"}\\ + @{term "simp_SEQ (r\<^sub>1, f\<^sub>1) (ONE, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>1, F_SEQ2 f\<^sub>1 f\<^sub>2)"}\\ + @{term "simp_SEQ (r\<^sub>1, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(SEQ r\<^sub>1 r\<^sub>2, F_SEQ f\<^sub>1 f\<^sub>2)"}\\ + \end{tabular} + \end{center} + + \noindent + The functions @{text "simp\<^bsub>Alt\<^esub>"} and @{text "simp\<^bsub>Seq\<^esub>"} encode the simplification rules + in \eqref{Simpl} and compose the rectification functions (simplifications can occur + deep inside the regular expression). The main simplification function is then + + \begin{center} + \begin{tabular}{lcl} + @{term "simp (ALT r\<^sub>1 r\<^sub>2)"} & $\dn$ & @{term "simp_ALT (simp r\<^sub>1) (simp r\<^sub>2)"}\\ + @{term "simp (SEQ r\<^sub>1 r\<^sub>2)"} & $\dn$ & @{term "simp_SEQ (simp r\<^sub>1) (simp r\<^sub>2)"}\\ + @{term "simp r"} & $\dn$ & @{term "(r, id)"}\\ + \end{tabular} + \end{center} + + \noindent where @{term "id"} stands for the identity function. The + function @{const simp} returns a simplified regular expression and a corresponding + rectification function. Note that we do not simplify under stars: this + seems to slow down the algorithm, rather than speed it up. The optimised + lexer is then given by the clauses: + + \begin{center} + \begin{tabular}{lcl} + @{thm (lhs) slexer.simps(1)} & $\dn$ & @{thm (rhs) slexer.simps(1)}\\ + @{thm (lhs) slexer.simps(2)} & $\dn$ & + @{text "let (r\<^sub>s, f\<^sub>r) = simp (r "}$\backslash$@{text " c) in"}\\ + & & @{text "case"} @{term "slexer r\<^sub>s s"} @{text of}\\ + & & \phantom{$|$} @{term "None"} @{text "\"} @{term None}\\ + & & $|$ @{term "Some v"} @{text "\"} @{text "Some (inj r c (f\<^sub>r v))"} + \end{tabular} + \end{center} + + \noindent + In the second clause we first calculate the derivative @{term "der c r"} + and then simplify the result. This gives us a simplified derivative + @{text "r\<^sub>s"} and a rectification function @{text "f\<^sub>r"}. The lexer + is then recursively called with the simplified derivative, but before + we inject the character @{term c} into the value @{term v}, we need to rectify + @{term v} (that is construct @{term "f\<^sub>r v"}). Before we can establish the correctness + of @{term "slexer"}, we need to show that simplification preserves the language + and simplification preserves our POSIX relation once the value is rectified + (recall @{const "simp"} generates a (regular expression, rectification function) pair): + + \begin{lemma}\mbox{}\smallskip\\\label{slexeraux} + \begin{tabular}{ll} + (1) & @{thm L_fst_simp[symmetric]}\\ + (2) & @{thm[mode=IfThen] Posix_simp} + \end{tabular} + \end{lemma} + + \begin{proof} Both are by induction on @{text r}. There is no + interesting case for the first statement. For the second statement, + of interest are the @{term "r = ALT r\<^sub>1 r\<^sub>2"} and @{term "r = SEQ r\<^sub>1 + r\<^sub>2"} cases. In each case we have to analyse four subcases whether + @{term "fst (simp r\<^sub>1)"} and @{term "fst (simp r\<^sub>2)"} equals @{const + ZERO} (respectively @{const ONE}). For example for @{term "r = ALT + r\<^sub>1 r\<^sub>2"}, consider the subcase @{term "fst (simp r\<^sub>1) = ZERO"} and + @{term "fst (simp r\<^sub>2) \ ZERO"}. By assumption we know @{term "s \ + fst (simp (ALT r\<^sub>1 r\<^sub>2)) \ v"}. From this we can infer @{term "s \ fst (simp r\<^sub>2) \ v"} + and by IH also (*) @{term "s \ r\<^sub>2 \ (snd (simp r\<^sub>2) v)"}. Given @{term "fst (simp r\<^sub>1) = ZERO"} + we know @{term "L (fst (simp r\<^sub>1)) = {}"}. By the first statement + @{term "L r\<^sub>1"} is the empty set, meaning (**) @{term "s \ L r\<^sub>1"}. + Taking (*) and (**) together gives by the \mbox{@{text "P+R"}}-rule + @{term "s \ ALT r\<^sub>1 r\<^sub>2 \ Right (snd (simp r\<^sub>2) v)"}. In turn this + gives @{term "s \ ALT r\<^sub>1 r\<^sub>2 \ snd (simp (ALT r\<^sub>1 r\<^sub>2)) v"} as we need to show. + The other cases are similar.\qed + \end{proof} + + \noindent We can now prove relatively straightforwardly that the + optimised lexer produces the expected result: + + \begin{theorem} + @{thm slexer_correctness} + \end{theorem} + + \begin{proof} By induction on @{term s} generalising over @{term + r}. The case @{term "[]"} is trivial. For the cons-case suppose the + string is of the form @{term "c # s"}. By induction hypothesis we + know @{term "slexer r s = lexer r s"} holds for all @{term r} (in + particular for @{term "r"} being the derivative @{term "der c + r"}). Let @{term "r\<^sub>s"} be the simplified derivative regular expression, that is @{term + "fst (simp (der c r))"}, and @{term "f\<^sub>r"} be the rectification + function, that is @{term "snd (simp (der c r))"}. We distinguish the cases + whether (*) @{term "s \ L (der c r)"} or not. In the first case we + have by Thm.~\ref{lexercorrect}(2) a value @{term "v"} so that @{term + "lexer (der c r) s = Some v"} and @{term "s \ der c r \ v"} hold. + By Lem.~\ref{slexeraux}(1) we can also infer from~(*) that @{term "s + \ L r\<^sub>s"} holds. Hence we know by Thm.~\ref{lexercorrect}(2) that + there exists a @{term "v'"} with @{term "lexer r\<^sub>s s = Some v'"} and + @{term "s \ r\<^sub>s \ v'"}. From the latter we know by + Lem.~\ref{slexeraux}(2) that @{term "s \ der c r \ (f\<^sub>r v')"} holds. + By the uniqueness of the POSIX relation (Thm.~\ref{posixdeterm}) we + can infer that @{term v} is equal to @{term "f\<^sub>r v'"}---that is the + rectification function applied to @{term "v'"} + produces the original @{term "v"}. Now the case follows by the + definitions of @{const lexer} and @{const slexer}. + + In the second case where @{term "s \ L (der c r)"} we have that + @{term "lexer (der c r) s = None"} by Thm.~\ref{lexercorrect}(1). We + also know by Lem.~\ref{slexeraux}(1) that @{term "s \ L r\<^sub>s"}. Hence + @{term "lexer r\<^sub>s s = None"} by Thm.~\ref{lexercorrect}(1) and + by IH then also @{term "slexer r\<^sub>s s = None"}. With this we can + conclude in this case too.\qed + + \end{proof} +*} + +section {* The Correctness Argument by Sulzmann and Lu\label{argu} *} + +text {* +% \newcommand{\greedy}{\succcurlyeq_{gr}} + \newcommand{\posix}{>} + + An extended version of \cite{Sulzmann2014} is available at the website of + its first author; this includes some ``proofs'', claimed in + \cite{Sulzmann2014} to be ``rigorous''. Since these are evidently not in + final form, we make no comment thereon, preferring to give general reasons + for our belief that the approach of \cite{Sulzmann2014} is problematic. + Their central definition is an ``ordering relation'' defined by the + rules (slightly adapted to fit our notation): + +\begin{center} +\begin{tabular}{@ {}c@ {\hspace{4mm}}c@ {}} +@{thm[mode=Rule] C2[of "v\<^sub>1" "r\<^sub>1" "v\<^sub>1\" "v\<^sub>2" "r\<^sub>2" "v\<^sub>2\"]}\,(C2) & +@{thm[mode=Rule] C1[of "v\<^sub>2" "r\<^sub>2" "v\<^sub>2\" "v\<^sub>1" "r\<^sub>1"]}\,(C1)\smallskip\\ + +@{thm[mode=Rule] A1[of "v\<^sub>1" "v\<^sub>2" "r\<^sub>1" "r\<^sub>2"]}\,(A1) & +@{thm[mode=Rule] A2[of "v\<^sub>2" "v\<^sub>1" "r\<^sub>1" "r\<^sub>2"]}\,(A2)\smallskip\\ + +@{thm[mode=Rule] A3[of "v\<^sub>1" "r\<^sub>2" "v\<^sub>2" "r\<^sub>1"]}\,(A3) & +@{thm[mode=Rule] A4[of "v\<^sub>1" "r\<^sub>1" "v\<^sub>2" "r\<^sub>2"]}\,(A4)\smallskip\\ + +@{thm[mode=Rule] K1[of "v" "vs" "r"]}\,(K1) & +@{thm[mode=Rule] K2[of "v" "vs" "r"]}\,(K2)\smallskip\\ + +@{thm[mode=Rule] K3[of "v\<^sub>1" "r" "v\<^sub>2" "vs\<^sub>1" "vs\<^sub>2"]}\,(K3) & +@{thm[mode=Rule] K4[of "vs\<^sub>1" "r" "vs\<^sub>2" "v"]}\,(K4) +\end{tabular} +\end{center} + + \noindent The idea behind the rules (A1) and (A2), for example, is that a + @{text Left}-value is bigger than a @{text Right}-value, if the underlying + string of the @{text Left}-value is longer or of equal length to the + underlying string of the @{text Right}-value. The order is reversed, + however, if the @{text Right}-value can match a longer string than a + @{text Left}-value. In this way the POSIX value is supposed to be the + biggest value for a given string and regular expression. + + Sulzmann and Lu explicitly refer to the paper \cite{Frisch2004} by Frisch + and Cardelli from where they have taken the idea for their correctness + proof. Frisch and Cardelli introduced a similar ordering for GREEDY + matching and they showed that their GREEDY matching algorithm always + produces a maximal element according to this ordering (from all possible + solutions). The only difference between their GREEDY ordering and the + ``ordering'' by Sulzmann and Lu is that GREEDY always prefers a @{text + Left}-value over a @{text Right}-value, no matter what the underlying + string is. This seems to be only a very minor difference, but it has + drastic consequences in terms of what properties both orderings enjoy. + What is interesting for our purposes is that the properties reflexivity, + totality and transitivity for this GREEDY ordering can be proved + relatively easily by induction. + + These properties of GREEDY, however, do not transfer to the POSIX + ``ordering'' by Sulzmann and Lu, which they define as @{text "v\<^sub>1 \\<^sub>r v\<^sub>2"}. + To start with, @{text "v\<^sub>1 \\<^sub>r v\<^sub>2"} is + not defined inductively, but as $($@{term "v\<^sub>1 = v\<^sub>2"}$)$ $\vee$ $($@{term "(v\<^sub>1 >r + v\<^sub>2) \ (flat v\<^sub>1 = flat (v\<^sub>2::val))"}$)$. This means that @{term "v\<^sub>1 + >(r::rexp) (v\<^sub>2::val)"} does not necessarily imply @{term "v\<^sub>1 \(r::rexp) + (v\<^sub>2::val)"}. Moreover, transitivity does not hold in the ``usual'' + formulation, for example: + + \begin{falsehood} + Suppose @{term "\ v\<^sub>1 : r"}, @{term "\ v\<^sub>2 : r"} and @{term "\ v\<^sub>3 : r"}. + If @{term "v\<^sub>1 >(r::rexp) (v\<^sub>2::val)"} and @{term "v\<^sub>2 >(r::rexp) (v\<^sub>3::val)"} + then @{term "v\<^sub>1 >(r::rexp) (v\<^sub>3::val)"}. + \end{falsehood} + + \noindent If formulated in this way, then there are various counter + examples: For example let @{term r} be @{text "a + ((a + a)\(a + \))"} + then the @{term "v\<^sub>1"}, @{term "v\<^sub>2"} and @{term "v\<^sub>3"} below are values + of @{term r}: + + \begin{center} + \begin{tabular}{lcl} + @{term "v\<^sub>1"} & $=$ & @{term "Left(Char a)"}\\ + @{term "v\<^sub>2"} & $=$ & @{term "Right(Seq (Left(Char a)) (Right Void))"}\\ + @{term "v\<^sub>3"} & $=$ & @{term "Right(Seq (Right(Char a)) (Left(Char a)))"} + \end{tabular} + \end{center} + + \noindent Moreover @{term "v\<^sub>1 >(r::rexp) v\<^sub>2"} and @{term "v\<^sub>2 >(r::rexp) + v\<^sub>3"}, but \emph{not} @{term "v\<^sub>1 >(r::rexp) v\<^sub>3"}! The reason is that + although @{term "v\<^sub>3"} is a @{text "Right"}-value, it can match a longer + string, namely @{term "flat v\<^sub>3 = [a,a]"}, while @{term "flat v\<^sub>1"} (and + @{term "flat v\<^sub>2"}) matches only @{term "[a]"}. So transitivity in this + formulation does not hold---in this example actually @{term "v\<^sub>3 + >(r::rexp) v\<^sub>1"}! + + Sulzmann and Lu ``fix'' this problem by weakening the transitivity + property. They require in addition that the underlying strings are of the + same length. This excludes the counter example above and any + counter-example we were able to find (by hand and by machine). Thus the + transitivity lemma should be formulated as: + + \begin{conject} + Suppose @{term "\ v\<^sub>1 : r"}, @{term "\ v\<^sub>2 : r"} and @{term "\ v\<^sub>3 : r"}, + and also @{text "|v\<^sub>1| = |v\<^sub>2| = |v\<^sub>3|"}.\\ + If @{term "v\<^sub>1 >(r::rexp) (v\<^sub>2::val)"} and @{term "v\<^sub>2 >(r::rexp) (v\<^sub>3::val)"} + then @{term "v\<^sub>1 >(r::rexp) (v\<^sub>3::val)"}. + \end{conject} + + \noindent While we agree with Sulzmann and Lu that this property + probably(!) holds, proving it seems not so straightforward: although one + begins with the assumption that the values have the same flattening, this + cannot be maintained as one descends into the induction. This is a problem + that occurs in a number of places in the proofs by Sulzmann and Lu. + + Although they do not give an explicit proof of the transitivity property, + they give a closely related property about the existence of maximal + elements. They state that this can be verified by an induction on @{term r}. We + disagree with this as we shall show next in case of transitivity. The case + where the reasoning breaks down is the sequence case, say @{term "SEQ r\<^sub>1 r\<^sub>2"}. + The induction hypotheses in this case are + +\begin{center} +\begin{tabular}{@ {}c@ {\hspace{10mm}}c@ {}} +\begin{tabular}{@ {}l@ {\hspace{-7mm}}l@ {}} +IH @{term "r\<^sub>1"}:\\ +@{text "\ v\<^sub>1, v\<^sub>2, v\<^sub>3."} \\ + & @{term "\ v\<^sub>1 : r\<^sub>1"}\;@{text "\"} + @{term "\ v\<^sub>2 : r\<^sub>1"}\;@{text "\"} + @{term "\ v\<^sub>3 : r\<^sub>1"}\\ + & @{text "\"} @{text "|v\<^sub>1| = |v\<^sub>2| = |v\<^sub>3|"}\\ + & @{text "\"} @{term "v\<^sub>1 >(r\<^sub>1::rexp) v\<^sub>2 \ v\<^sub>2 >(r\<^sub>1::rexp) v\<^sub>3"}\medskip\\ + & $\Rightarrow$ @{term "v\<^sub>1 >(r\<^sub>1::rexp) v\<^sub>3"} +\end{tabular} & +\begin{tabular}{@ {}l@ {\hspace{-7mm}}l@ {}} +IH @{term "r\<^sub>2"}:\\ +@{text "\ v\<^sub>1, v\<^sub>2, v\<^sub>3."}\\ + & @{term "\ v\<^sub>1 : r\<^sub>2"}\;@{text "\"} + @{term "\ v\<^sub>2 : r\<^sub>2"}\;@{text "\"} + @{term "\ v\<^sub>3 : r\<^sub>2"}\\ + & @{text "\"} @{text "|v\<^sub>1| = |v\<^sub>2| = |v\<^sub>3|"}\\ + & @{text "\"} @{term "v\<^sub>1 >(r\<^sub>2::rexp) v\<^sub>2 \ v\<^sub>2 >(r\<^sub>2::rexp) v\<^sub>3"}\medskip\\ + & $\Rightarrow$ @{term "v\<^sub>1 >(r\<^sub>2::rexp) v\<^sub>3"} +\end{tabular} +\end{tabular} +\end{center} + + \noindent We can assume that + % + \begin{equation} + @{term "(Seq (v\<^sub>1\<^sub>l) (v\<^sub>1\<^sub>r)) >(SEQ r\<^sub>1 r\<^sub>2) (Seq (v\<^sub>2\<^sub>l) (v\<^sub>2\<^sub>r))"} + \qquad\textrm{and}\qquad + @{term "(Seq (v\<^sub>2\<^sub>l) (v\<^sub>2\<^sub>r)) >(SEQ r\<^sub>1 r\<^sub>2) (Seq (v\<^sub>3\<^sub>l) (v\<^sub>3\<^sub>r))"} + \label{assms} + \end{equation} + + \noindent hold, and furthermore that the values have equal length, namely: + % + \begin{equation} + @{term "flat (Seq (v\<^sub>1\<^sub>l) (v\<^sub>1\<^sub>r)) = flat (Seq (v\<^sub>2\<^sub>l) (v\<^sub>2\<^sub>r))"} + \qquad\textrm{and}\qquad + @{term "flat (Seq (v\<^sub>2\<^sub>l) (v\<^sub>2\<^sub>r)) = flat (Seq (v\<^sub>3\<^sub>l) (v\<^sub>3\<^sub>r))"} + \label{lens} + \end{equation} + + \noindent We need to show that @{term "(Seq (v\<^sub>1\<^sub>l) (v\<^sub>1\<^sub>r)) >(SEQ r\<^sub>1 r\<^sub>2) + (Seq (v\<^sub>3\<^sub>l) (v\<^sub>3\<^sub>r))"} holds. We can proceed by analysing how the + assumptions in \eqref{assms} have arisen. There are four cases. Let us + assume we are in the case where we know + + \[ + @{term "v\<^sub>1\<^sub>l >(r\<^sub>1::rexp) v\<^sub>2\<^sub>l"} + \qquad\textrm{and}\qquad + @{term "v\<^sub>2\<^sub>l >(r\<^sub>1::rexp) v\<^sub>3\<^sub>l"} + \] + + \noindent and also know the corresponding inhabitation judgements. This is + exactly a case where we would like to apply the induction hypothesis + IH~$r_1$. But we cannot! We still need to show that @{term "flat (v\<^sub>1\<^sub>l) = + flat(v\<^sub>2\<^sub>l)"} and @{term "flat(v\<^sub>2\<^sub>l) = flat(v\<^sub>3\<^sub>l)"}. We know from + \eqref{lens} that the lengths of the sequence values are equal, but from + this we cannot infer anything about the lengths of the component values. + Indeed in general they will be unequal, that is + + \[ + @{term "flat(v\<^sub>1\<^sub>l) \ flat(v\<^sub>2\<^sub>l)"} + \qquad\textrm{and}\qquad + @{term "flat(v\<^sub>1\<^sub>r) \ flat(v\<^sub>2\<^sub>r)"} + \] + + \noindent but still \eqref{lens} will hold. Now we are stuck, since the IH + does not apply. As said, this problem where the induction hypothesis does + not apply arises in several places in the proof of Sulzmann and Lu, not + just for proving transitivity. + +*} + +section {* Conclusion *} + +text {* + + We have implemented the POSIX value calculation algorithm introduced by + Sulzmann and Lu + \cite{Sulzmann2014}. Our implementation is nearly identical to the + original and all modifications we introduced are harmless (like our char-clause for + @{text inj}). We have proved this algorithm to be correct, but correct + according to our own specification of what POSIX values are. Our + specification (inspired from work by Vansummeren \cite{Vansummeren2006}) appears to be + much simpler than in \cite{Sulzmann2014} and our proofs are nearly always + straightforward. We have attempted to formalise the original proof + by Sulzmann and Lu \cite{Sulzmann2014}, but we believe it contains + unfillable gaps. In the online version of \cite{Sulzmann2014}, the authors + already acknowledge some small problems, but our experience suggests + that there are more serious problems. + + Having proved the correctness of the POSIX lexing algorithm in + \cite{Sulzmann2014}, which lessons have we learned? Well, this is a + perfect example for the importance of the \emph{right} definitions. We + have (on and off) explored mechanisations as soon as first versions + of \cite{Sulzmann2014} appeared, but have made little progress with + turning the relatively detailed proof sketch in \cite{Sulzmann2014} into a + formalisable proof. Having seen \cite{Vansummeren2006} and adapted the + POSIX definition given there for the algorithm by Sulzmann and Lu made all + the difference: the proofs, as said, are nearly straightforward. The + question remains whether the original proof idea of \cite{Sulzmann2014}, + potentially using our result as a stepping stone, can be made to work? + Alas, we really do not know despite considerable effort. + + + Closely related to our work is an automata-based lexer formalised by + Nipkow \cite{Nipkow98}. This lexer also splits up strings into longest + initial substrings, but Nipkow's algorithm is not completely + computational. The algorithm by Sulzmann and Lu, in contrast, can be + implemented with ease in any functional language. A bespoke lexer for the + Imp-language is formalised in Coq as part of the Software Foundations book + by Pierce et al \cite{Pierce2015}. The disadvantage of such bespoke lexers is that they + do not generalise easily to more advanced features. + Our formalisation is available from the Archive of Formal Proofs \cite{aduAFP16} + under \url{http://www.isa-afp.org/entries/Posix-Lexing.shtml}.\medskip + + \noindent + {\bf Acknowledgements:} + We are very grateful to Martin Sulzmann for his comments on our work and + moreover for patiently explaining to us the details in \cite{Sulzmann2014}. We + also received very helpful comments from James Cheney and anonymous referees. + % \small + \bibliographystyle{plain} + \bibliography{root} + +*} + + +(*<*) +end +(*>*) \ No newline at end of file diff -r 47179a172c54 -r 16af5b8bd285 thys/Journal/document/root.bib --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/thys/Journal/document/root.bib Sun Feb 26 23:46:22 2017 +0000 @@ -0,0 +1,381 @@ +@article{aduAFP16, + author = {Fahad Ausaf and Roy Dyckhoff and Christian Urban}, + title = {{POSIX} {L}exing with {D}erivatives of {R}egular {E}xpressions}, + journal = {Archive of Formal Proofs}, + year = 2016, + note = {\url{http://www.isa-afp.org/entries/Posix-Lexing.shtml}, Formal proof development}, + ISSN = {2150-914x} +} + + +@TechReport{CrashCourse2014, + author = {N.~B.~B.~Grathwohl and F.~Henglein and U.~T.~Rasmussen}, + title = {{A} {C}rash-{C}ourse in {R}egular {E}xpression {P}arsing and {R}egular + {E}xpressions as {T}ypes}, + institution = {University of Copenhagen}, + year = {2014}, + annote = {draft report} +} + +@inproceedings{Sulzmann2014, + author = {M.~Sulzmann and K.~Lu}, + title = {{POSIX} {R}egular {E}xpression {P}arsing with {D}erivatives}, + booktitle = {Proc.~of the 12th International Conference on Functional and Logic Programming (FLOPS)}, + pages = {203--220}, + year = {2014}, + volume = {8475}, + series = {LNCS} +} + +@inproceedings{Sulzmann2014b, + author = {M.~Sulzmann and P.~van Steenhoven}, + title = {{A} {F}lexible and {E}fficient {ML} {L}exer {T}ool {B}ased on {E}xtended {R}egular + {E}xpression {S}ubmatching}, + booktitle = {Proc.~of the 23rd International Conference on Compiler Construction (CC)}, + pages = {174--191}, + year = {2014}, + volume = {8409}, + series = {LNCS} +} + +@book{Pierce2015, + author = {B.~C.~Pierce and C.~Casinghino and M.~Gaboardi and + M.~Greenberg and C.~Hri\c{t}cu and + V.~Sj\"{o}berg and B.~Yorgey}, + title = {{S}oftware {F}oundations}, + year = {2015}, + publisher = {Electronic textbook}, + note = {\url{http://www.cis.upenn.edu/~bcpierce/sf}} +} + +@Misc{Kuklewicz, + author = {C.~Kuklewicz}, + title = {{R}egex {P}osix}, + howpublished = "\url{https://wiki.haskell.org/Regex_Posix}" +} + +@article{Vansummeren2006, + author = {S.~Vansummeren}, + title = {{T}ype {I}nference for {U}nique {P}attern {M}atching}, + year = {2006}, + journal = {ACM Transactions on Programming Languages and Systems}, + volume = {28}, + number = {3}, + pages = {389--428} +} + +@InProceedings{Asperti12, + author = {A.~Asperti}, + title = {{A} {C}ompact {P}roof of {D}ecidability for {R}egular {E}xpression {E}quivalence}, + booktitle = {Proc.~of the 3rd International Conference on Interactive Theorem Proving (ITP)}, + pages = {283--298}, + year = {2012}, + volume = {7406}, + series = {LNCS} +} + +@inproceedings{Frisch2004, + author = {A.~Frisch and L.~Cardelli}, + title = {{G}reedy {R}egular {E}xpression {M}atching}, + booktitle = {Proc.~of the 31st International Conference on Automata, Languages and Programming (ICALP)}, + pages = {618--629}, + year = {2004}, + volume = {3142}, + series = {LNCS} +} + +@ARTICLE{Antimirov95, + author = {V.~Antimirov}, + title = {{P}artial {D}erivatives of {R}egular {E}xpressions and + {F}inite {A}utomata {C}onstructions}, + journal = {Theoretical Computer Science}, + year = {1995}, + volume = {155}, + pages = {291--319} +} + +@inproceedings{Nipkow98, + author={T.~Nipkow}, + title={{V}erified {L}exical {A}nalysis}, + booktitle={Proc.~of the 11th International Conference on Theorem Proving in Higher Order Logics (TPHOLs)}, + series={LNCS}, + volume=1479, + pages={1--15}, + year=1998 +} + +@article{Brzozowski1964, + author = {J.~A.~Brzozowski}, + title = {{D}erivatives of {R}egular {E}xpressions}, + journal = {Journal of the {ACM}}, + volume = {11}, + number = {4}, + pages = {481--494}, + year = {1964} +} + +@article{Leroy2009, + author = {X.~Leroy}, + title = {{F}ormal {V}erification of a {R}ealistic {C}ompiler}, + journal = {Communications of the ACM}, + year = 2009, + volume = 52, + number = 7, + pages = {107--115} +} + +@InProceedings{Paulson2015, + author = {L.~C.~Paulson}, + title = {{A} {F}ormalisation of {F}inite {A}utomata {U}sing {H}ereditarily {F}inite {S}ets}, + booktitle = {Proc.~of the 25th International Conference on Automated Deduction (CADE)}, + pages = {231--245}, + year = {2015}, + volume = {9195}, + series = {LNAI} +} + +@Article{Wu2014, + author = {C.~Wu and X.~Zhang and C.~Urban}, + title = {{A} {F}ormalisation of the {M}yhill-{N}erode {T}heorem based on {R}egular {E}xpressions}, + journal = {Journal of Automatic Reasoning}, + year = {2014}, + volume = {52}, + number = {4}, + pages = {451--480} +} + +@InProceedings{Regehr2011, + author = {X.~Yang and Y.~Chen and E.~Eide and J.~Regehr}, + title = {{F}inding and {U}nderstanding {B}ugs in {C} {C}ompilers}, + pages = {283--294}, + year = {2011}, + booktitle = {Proc.~of the 32nd ACM SIGPLAN Conference on Programming Language Design and + Implementation (PLDI)} +} + +@Article{Norrish2014, + author = {A.~Barthwal and M.~Norrish}, + title = {{A} {M}echanisation of {S}ome {C}ontext-{F}ree {L}anguage {T}heory in {HOL4}}, + journal = {Journal of Computer and System Sciences}, + year = {2014}, + volume = {80}, + number = {2}, + pages = {346--362} +} + +@Article{Thompson1968, + author = {K.~Thompson}, + title = {{P}rogramming {T}echniques: {R}egular {E}xpression {S}earch {A}lgorithm}, + journal = {Communications of the ACM}, + issue_date = {June 1968}, + volume = {11}, + number = {6}, + year = {1968}, + pages = {419--422} +} + +@article{Owens2009, + author = {S.~Owens and J.~H.~Reppy and A.~Turon}, + title = {{R}egular-{E}xpression {D}erivatives {R}e-{E}xamined}, + journal = {Journal of Functinal Programming}, + volume = {19}, + number = {2}, + pages = {173--190}, + year = {2009} +} + +@inproceedings{Sulzmann2015, + author = {M.~Sulzmann and P.~Thiemann}, + title = {{D}erivatives for {R}egular {S}huffle {E}xpressions}, + booktitle = {Proc.~of the 9th International Conference on Language and Automata Theory + and Applications (LATA)}, + pages = {275--286}, + year = {2015}, + volume = {8977}, + series = {LNCS} +} + +@inproceedings{Chen2012, + author = {H.~Chen and S.~Yu}, + title = {{D}erivatives of {R}egular {E}xpressions and an {A}pplication}, + booktitle = {Proc.~in the International Workshop on Theoretical + Computer Science (WTCS)}, + pages = {343--356}, + year = {2012}, + volume = {7160}, + series = {LNCS} +} + +@article{Krauss2011, + author={A.~Krauss and T.~Nipkow}, + title={{P}roof {P}earl: {R}egular {E}xpression {E}quivalence and {R}elation {A}lgebra}, + journal={Journal of Automated Reasoning}, + volume=49, + pages={95--106}, + year=2012 +} + +@InProceedings{Traytel2015, + author = {D.~Traytel}, + title = {{A} {C}oalgebraic {D}ecision {P}rocedure for {WS1S}}, + booktitle = {Proc.~of the 24th Annual Conference on Computer Science Logic (CSL)}, + pages = {487--503}, + series = {LIPIcs}, + year = {2015}, + volume = {41} +} + +@inproceedings{Traytel2013, + author={D.~Traytel and T.~Nipkow}, + title={{A} {V}erified {D}ecision {P}rocedure for {MSO} on + {W}ords {B}ased on {D}erivatives of {R}egular {E}xpressions}, + booktitle={Proc.~of the 18th ACM SIGPLAN International Conference on Functional Programming (ICFP)}, + pages={3-12}, + year=2013 +} + +@InProceedings{Coquand2012, + author = {T.~Coquand and V.~Siles}, + title = {{A} {D}ecision {P}rocedure for {R}egular {E}xpression {E}quivalence in {T}ype {T}heory}, + booktitle = {Proc.~of the 1st International Conference on Certified Programs and Proofs (CPP)}, + pages = {119--134}, + year = {2011}, + volume = {7086}, + series = {LNCS} +} + +@InProceedings{Almeidaetal10, + author = {J.~B.~Almeida and N.~Moriera and D.~Pereira and S.~M.~de Sousa}, + title = {{P}artial {D}erivative {A}utomata {F}ormalized in {C}oq}, + booktitle = {Proc.~of the 15th International Conference on Implementation + and Application of Automata (CIAA)}, + pages = {59-68}, + year = {2010}, + volume = {6482}, + series = {LNCS} +} + +@book{Michaelson, + title={An introduction to functional programming through lambda calculus}, + author={Michaelson, Greg}, + year={2011}, + publisher={Courier Corporation} +} + +@article{Owens2008, + author = {S.~Owens and K.~Slind}, + title = {{A}dapting {F}unctional {P}rograms to {H}igher {O}rder {L}ogic}, + journal = {Higher-Order and Symbolic Computation}, + volume = {21}, + number = {4}, + year = {2008}, + pages = {377--409} +} + + +@book{Velleman, + title={How to prove it: a structured approach}, + author={Velleman, Daniel J}, + year={2006}, + publisher={Cambridge University Press} +} + +@book{Jones, + title={Implementing functional languages:[a tutorial]}, + author={Jones, Simon L Peyton and Lester, David R}, + volume={124}, + year={1992}, + publisher={Prentice Hall Reading} +} + +@article{Cardelli, + title={Type systems}, + author={Cardelli, Luca}, + journal={ACM Computing Surveys}, + volume={28}, + number={1}, + pages={263--264}, + year={1996} +} + +@article{Morrisett, + author = {J. Gregory Morrisett and + Karl Crary and + Neal Glew and + David Walker}, + title = {Stack-based typed assembly language}, + journal = {J. Funct. Program.}, + volume = {13}, + number = {5}, + pages = {957--959}, + year = {2003}, + url = {http://dx.doi.org/10.1017/S0956796802004446}, + doi = {10.1017/S0956796802004446}, + timestamp = {Fri, 19 Mar 2004 13:48:27 +0100}, + biburl = {http://dblp.uni-trier.de/rec/bib/journals/jfp/MorrisettCGW03}, + bibsource = {dblp computer science bibliography, http://dblp.org} +} + +@book{Nipkow, + title={Concrete Semantics: With Isabelle/HOL}, + author={Nipkow, Tobias and Klein, Gerwin}, + year={2014}, + publisher={Springer} +} + +@article{Dube, + author = {Danny Dub{\'{e}} and + Marc Feeley}, + title = {Efficiently building a parse tree from a regular expression}, + journal = {Acta Inf.}, + volume = {37}, + number = {2}, + pages = {121--144}, + year = {2000}, + url = {http://link.springer.de/link/service/journals/00236/bibs/0037002/00370121.htm}, + timestamp = {Tue, 25 Nov 2003 14:51:21 +0100}, + biburl = {http://dblp.uni-trier.de/rec/bib/journals/acta/DubeF00}, + bibsource = {dblp computer science bibliography, http://dblp.org} +} + +@article{Morrisett2, + author = {J. Gregory Morrisett and + David Walker and + Karl Crary and + Neal Glew}, + title = {From system {F} to typed assembly language}, + journal = {{ACM} Trans. Program. Lang. Syst.}, + volume = {21}, + number = {3}, + pages = {527--568}, + year = {1999}, + url = {http://doi.acm.org/10.1145/319301.319345}, + doi = {10.1145/319301.319345}, + timestamp = {Wed, 26 Nov 2003 14:26:46 +0100}, + biburl = {http://dblp.uni-trier.de/rec/bib/journals/toplas/MorrisettWCG99}, + bibsource = {dblp computer science bibliography, http://dblp.org} +} + +@article{Owens2, + author = {Scott Owens and + Konrad Slind}, + title = {Adapting functional programs to higher order logic}, + journal = {Higher-Order and Symbolic Computation}, + volume = {21}, + number = {4}, + pages = {377--409}, + year = {2008}, + url = {http://dx.doi.org/10.1007/s10990-008-9038-0}, + doi = {10.1007/s10990-008-9038-0}, + timestamp = {Wed, 16 Dec 2009 13:51:02 +0100}, + biburl = {http://dblp.uni-trier.de/rec/bib/journals/lisp/OwensS08}, + bibsource = {dblp computer science bibliography, http://dblp.org} +} + +@misc{PCRE, + title = "{PCRE - Perl Compatible Regular Expressions}", + url = {http://www.pcre.org}, +} + + + diff -r 47179a172c54 -r 16af5b8bd285 thys/Journal/document/root.tex --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/thys/Journal/document/root.tex Sun Feb 26 23:46:22 2017 +0000 @@ -0,0 +1,79 @@ +\documentclass[runningheads]{llncs} +\usepackage{times} +\usepackage{isabelle} +\usepackage{isabellesym} +\usepackage{amsmath} +\usepackage{amssymb} +\usepackage{mathpartir} +\usepackage{tikz} +\usepackage{pgf} +\usetikzlibrary{positioning} +\usepackage{pdfsetup} +%%\usepackage{stmaryrd} +\usepackage{url} +\usepackage{color} + +\titlerunning{POSIX Lexing with Derivatives of Regular Expressions} + +\urlstyle{rm} +\isabellestyle{it} +\renewcommand{\isastyleminor}{\it}% +\renewcommand{\isastyle}{\normalsize\it}% + + +\def\dn{\,\stackrel{\mbox{\scriptsize def}}{=}\,} +\renewcommand{\isasymequiv}{$\dn$} +\renewcommand{\isasymemptyset}{$\varnothing$} +\renewcommand{\isacharunderscore}{\mbox{$\_\!\_$}} +\renewcommand{\isasymiota}{\makebox[0mm]{${}^{\prime}$}} + +\def\Brz{Brzozowski} +\def\der{\backslash} +\newtheorem{falsehood}{Falsehood} +\newtheorem{conject}{Conjecture} + +\begin{document} + +\title{POSIX {L}exing with {D}erivatives of {R}egular {E}xpressions (Proof Pearl)} +\author{Fahad Ausaf\inst{1} \and Roy Dyckhoff\inst{2} \and Christian Urban\inst{3}} +\institute{King's College London\\ + \email{fahad.ausaf@icloud.com} +\and University of St Andrews\\ + \email{roy.dyckhoff@st-andrews.ac.uk} +\and King's College London\\ + \email{christian.urban@kcl.ac.uk}} +\maketitle + +\begin{abstract} + +Brzozowski introduced the notion of derivatives for regular +expressions. They can be used for a very simple regular expression +matching algorithm. Sulzmann and Lu cleverly extended this algorithm +in order to deal with POSIX matching, which is the underlying +disambiguation strategy for regular expressions needed in lexers. +Sulzmann and Lu have made available on-line what they call a +``rigorous proof'' of the correctness of their algorithm w.r.t.\ their +specification; regrettably, it appears to us to have unfillable gaps. +In the first part of this paper we give our inductive definition of +what a POSIX value is and show $(i)$ that such a value is unique (for +given regular expression and string being matched) and $(ii)$ that +Sulzmann and Lu's algorithm always generates such a value (provided +that the regular expression matches the string). We also prove the +correctness of an optimised version of the POSIX matching +algorithm. Our definitions and proof are much simpler than those by +Sulzmann and Lu and can be easily formalised in Isabelle/HOL. In the +second part we analyse the correctness argument by Sulzmann and Lu and +explain why the gaps in this argument cannot be filled easily.\smallskip + +{\bf Keywords:} POSIX matching, Derivatives of Regular Expressions, +Isabelle/HOL +\end{abstract} + +\input{session} + +\end{document} + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: t +%%% End: diff -r 47179a172c54 -r 16af5b8bd285 thys/Journal/llncs.cls --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/thys/Journal/llncs.cls Sun Feb 26 23:46:22 2017 +0000 @@ -0,0 +1,1208 @@ +% LLNCS DOCUMENT CLASS -- version 2.19 (31-Mar-2014) +% Springer Verlag LaTeX2e support for Lecture Notes in Computer Science +% +%% +%% \CharacterTable +%% {Upper-case \A\B\C\D\E\F\G\H\I\J\K\L\M\N\O\P\Q\R\S\T\U\V\W\X\Y\Z +%% Lower-case \a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\x\y\z +%% Digits \0\1\2\3\4\5\6\7\8\9 +%% Exclamation \! Double quote \" Hash (number) \# +%% Dollar \$ Percent \% Ampersand \& +%% Acute accent \' Left paren \( Right paren \) +%% Asterisk \* Plus \+ Comma \, +%% Minus \- Point \. Solidus \/ +%% Colon \: Semicolon \; Less than \< +%% Equals \= Greater than \> Question mark \? +%% Commercial at \@ Left bracket \[ Backslash \\ +%% Right bracket \] Circumflex \^ Underscore \_ +%% Grave accent \` Left brace \{ Vertical bar \| +%% Right brace \} Tilde \~} +%% +\NeedsTeXFormat{LaTeX2e}[1995/12/01] +\ProvidesClass{llncs}[2014/03/31 v2.19 +^^J LaTeX document class for Lecture Notes in Computer Science] +% Options +\let\if@envcntreset\iffalse +\DeclareOption{envcountreset}{\let\if@envcntreset\iftrue} +\DeclareOption{citeauthoryear}{\let\citeauthoryear=Y} +\DeclareOption{oribibl}{\let\oribibl=Y} +\let\if@custvec\iftrue +\DeclareOption{orivec}{\let\if@custvec\iffalse} +\let\if@envcntsame\iffalse +\DeclareOption{envcountsame}{\let\if@envcntsame\iftrue} +\let\if@envcntsect\iffalse +\DeclareOption{envcountsect}{\let\if@envcntsect\iftrue} +\let\if@runhead\iffalse +\DeclareOption{runningheads}{\let\if@runhead\iftrue} + +\let\if@openright\iftrue +\let\if@openbib\iffalse +\DeclareOption{openbib}{\let\if@openbib\iftrue} + +% languages +\let\switcht@@therlang\relax +\def\ds@deutsch{\def\switcht@@therlang{\switcht@deutsch}} +\def\ds@francais{\def\switcht@@therlang{\switcht@francais}} + +\DeclareOption*{\PassOptionsToClass{\CurrentOption}{article}} + +\ProcessOptions + +\LoadClass[twoside]{article} +\RequirePackage{multicol} % needed for the list of participants, index +\RequirePackage{aliascnt} + +\setlength{\textwidth}{12.2cm} +\setlength{\textheight}{19.3cm} +\renewcommand\@pnumwidth{2em} +\renewcommand\@tocrmarg{3.5em} +% +\def\@dottedtocline#1#2#3#4#5{% + \ifnum #1>\c@tocdepth \else + \vskip \z@ \@plus.2\p@ + {\leftskip #2\relax \rightskip \@tocrmarg \advance\rightskip by 0pt plus 2cm + \parfillskip -\rightskip \pretolerance=10000 + \parindent #2\relax\@afterindenttrue + \interlinepenalty\@M + \leavevmode + \@tempdima #3\relax + \advance\leftskip \@tempdima \null\nobreak\hskip -\leftskip + {#4}\nobreak + \leaders\hbox{$\m@th + \mkern \@dotsep mu\hbox{.}\mkern \@dotsep + mu$}\hfill + \nobreak + \hb@xt@\@pnumwidth{\hfil\normalfont \normalcolor #5}% + \par}% + \fi} +% +\def\switcht@albion{% +\def\abstractname{Abstract.}% +\def\ackname{Acknowledgement.}% +\def\andname{and}% +\def\lastandname{\unskip, and}% +\def\appendixname{Appendix}% +\def\chaptername{Chapter}% +\def\claimname{Claim}% +\def\conjecturename{Conjecture}% +\def\contentsname{Table of Contents}% +\def\corollaryname{Corollary}% +\def\definitionname{Definition}% +\def\examplename{Example}% +\def\exercisename{Exercise}% +\def\figurename{Fig.}% +\def\keywordname{{\bf Keywords:}}% +\def\indexname{Index}% +\def\lemmaname{Lemma}% +\def\contriblistname{List of Contributors}% +\def\listfigurename{List of Figures}% +\def\listtablename{List of Tables}% +\def\mailname{{\it Correspondence to\/}:}% +\def\noteaddname{Note added in proof}% +\def\notename{Note}% +\def\partname{Part}% +\def\problemname{Problem}% +\def\proofname{Proof}% +\def\propertyname{Property}% +\def\propositionname{Proposition}% +\def\questionname{Question}% +\def\remarkname{Remark}% +\def\seename{see}% +\def\solutionname{Solution}% +\def\subclassname{{\it Subject Classifications\/}:}% +\def\tablename{Table}% +\def\theoremname{Theorem}} +\switcht@albion +% Names of theorem like environments are already defined +% but must be translated if another language is chosen +% +% French section +\def\switcht@francais{%\typeout{On parle francais.}% + \def\abstractname{R\'esum\'e.}% + \def\ackname{Remerciements.}% + \def\andname{et}% + \def\lastandname{ et}% + \def\appendixname{Appendice}% + \def\chaptername{Chapitre}% + \def\claimname{Pr\'etention}% + \def\conjecturename{Hypoth\`ese}% + \def\contentsname{Table des mati\`eres}% + \def\corollaryname{Corollaire}% + \def\definitionname{D\'efinition}% + \def\examplename{Exemple}% + \def\exercisename{Exercice}% + \def\figurename{Fig.}% + \def\keywordname{{\bf Mots-cl\'e:}}% + \def\indexname{Index}% + \def\lemmaname{Lemme}% + \def\contriblistname{Liste des contributeurs}% + \def\listfigurename{Liste des figures}% + \def\listtablename{Liste des tables}% + \def\mailname{{\it Correspondence to\/}:}% + \def\noteaddname{Note ajout\'ee \`a l'\'epreuve}% + \def\notename{Remarque}% + \def\partname{Partie}% + \def\problemname{Probl\`eme}% + \def\proofname{Preuve}% + \def\propertyname{Caract\'eristique}% +%\def\propositionname{Proposition}% + \def\questionname{Question}% + \def\remarkname{Remarque}% + \def\seename{voir}% + \def\solutionname{Solution}% + \def\subclassname{{\it Subject Classifications\/}:}% + \def\tablename{Tableau}% + \def\theoremname{Th\'eor\`eme}% +} +% +% German section +\def\switcht@deutsch{%\typeout{Man spricht deutsch.}% + \def\abstractname{Zusammenfassung.}% + \def\ackname{Danksagung.}% + \def\andname{und}% + \def\lastandname{ und}% + \def\appendixname{Anhang}% + \def\chaptername{Kapitel}% + \def\claimname{Behauptung}% + \def\conjecturename{Hypothese}% + \def\contentsname{Inhaltsverzeichnis}% + \def\corollaryname{Korollar}% +%\def\definitionname{Definition}% + \def\examplename{Beispiel}% + \def\exercisename{\"Ubung}% + \def\figurename{Abb.}% + \def\keywordname{{\bf Schl\"usselw\"orter:}}% + \def\indexname{Index}% +%\def\lemmaname{Lemma}% + \def\contriblistname{Mitarbeiter}% + \def\listfigurename{Abbildungsverzeichnis}% + \def\listtablename{Tabellenverzeichnis}% + \def\mailname{{\it Correspondence to\/}:}% + \def\noteaddname{Nachtrag}% + \def\notename{Anmerkung}% + \def\partname{Teil}% +%\def\problemname{Problem}% + \def\proofname{Beweis}% + \def\propertyname{Eigenschaft}% +%\def\propositionname{Proposition}% + \def\questionname{Frage}% + \def\remarkname{Anmerkung}% + \def\seename{siehe}% + \def\solutionname{L\"osung}% + \def\subclassname{{\it Subject Classifications\/}:}% + \def\tablename{Tabelle}% +%\def\theoremname{Theorem}% +} + +% Ragged bottom for the actual page +\def\thisbottomragged{\def\@textbottom{\vskip\z@ plus.0001fil +\global\let\@textbottom\relax}} + +\renewcommand\small{% + \@setfontsize\small\@ixpt{11}% + \abovedisplayskip 8.5\p@ \@plus3\p@ \@minus4\p@ + \abovedisplayshortskip \z@ \@plus2\p@ + \belowdisplayshortskip 4\p@ \@plus2\p@ \@minus2\p@ + \def\@listi{\leftmargin\leftmargini + \parsep 0\p@ \@plus1\p@ \@minus\p@ + \topsep 8\p@ \@plus2\p@ \@minus4\p@ + \itemsep0\p@}% + \belowdisplayskip \abovedisplayskip +} + +\frenchspacing +\widowpenalty=10000 +\clubpenalty=10000 + +\setlength\oddsidemargin {63\p@} +\setlength\evensidemargin {63\p@} +\setlength\marginparwidth {90\p@} + +\setlength\headsep {16\p@} + +\setlength\footnotesep{7.7\p@} +\setlength\textfloatsep{8mm\@plus 2\p@ \@minus 4\p@} +\setlength\intextsep {8mm\@plus 2\p@ \@minus 2\p@} + +\setcounter{secnumdepth}{2} + +\newcounter {chapter} +\renewcommand\thechapter {\@arabic\c@chapter} + +\newif\if@mainmatter \@mainmattertrue +\newcommand\frontmatter{\cleardoublepage + \@mainmatterfalse\pagenumbering{Roman}} +\newcommand\mainmatter{\cleardoublepage + \@mainmattertrue\pagenumbering{arabic}} +\newcommand\backmatter{\if@openright\cleardoublepage\else\clearpage\fi + \@mainmatterfalse} + +\renewcommand\part{\cleardoublepage + \thispagestyle{empty}% + \if@twocolumn + \onecolumn + \@tempswatrue + \else + \@tempswafalse + \fi + \null\vfil + \secdef\@part\@spart} + +\def\@part[#1]#2{% + \ifnum \c@secnumdepth >-2\relax + \refstepcounter{part}% + \addcontentsline{toc}{part}{\thepart\hspace{1em}#1}% + \else + \addcontentsline{toc}{part}{#1}% + \fi + \markboth{}{}% + {\centering + \interlinepenalty \@M + \normalfont + \ifnum \c@secnumdepth >-2\relax + \huge\bfseries \partname~\thepart + \par + \vskip 20\p@ + \fi + \Huge \bfseries #2\par}% + \@endpart} +\def\@spart#1{% + {\centering + \interlinepenalty \@M + \normalfont + \Huge \bfseries #1\par}% + \@endpart} +\def\@endpart{\vfil\newpage + \if@twoside + \null + \thispagestyle{empty}% + \newpage + \fi + \if@tempswa + \twocolumn + \fi} + +\newcommand\chapter{\clearpage + \thispagestyle{empty}% + \global\@topnum\z@ + \@afterindentfalse + \secdef\@chapter\@schapter} +\def\@chapter[#1]#2{\ifnum \c@secnumdepth >\m@ne + \if@mainmatter + \refstepcounter{chapter}% + \typeout{\@chapapp\space\thechapter.}% + \addcontentsline{toc}{chapter}% + {\protect\numberline{\thechapter}#1}% + \else + \addcontentsline{toc}{chapter}{#1}% + \fi + \else + \addcontentsline{toc}{chapter}{#1}% + \fi + \chaptermark{#1}% + \addtocontents{lof}{\protect\addvspace{10\p@}}% + \addtocontents{lot}{\protect\addvspace{10\p@}}% + \if@twocolumn + \@topnewpage[\@makechapterhead{#2}]% + \else + \@makechapterhead{#2}% + \@afterheading + \fi} +\def\@makechapterhead#1{% +% \vspace*{50\p@}% + {\centering + \ifnum \c@secnumdepth >\m@ne + \if@mainmatter + \large\bfseries \@chapapp{} \thechapter + \par\nobreak + \vskip 20\p@ + \fi + \fi + \interlinepenalty\@M + \Large \bfseries #1\par\nobreak + \vskip 40\p@ + }} +\def\@schapter#1{\if@twocolumn + \@topnewpage[\@makeschapterhead{#1}]% + \else + \@makeschapterhead{#1}% + \@afterheading + \fi} +\def\@makeschapterhead#1{% +% \vspace*{50\p@}% + {\centering + \normalfont + \interlinepenalty\@M + \Large \bfseries #1\par\nobreak + \vskip 40\p@ + }} + +\renewcommand\section{\@startsection{section}{1}{\z@}% + {-18\p@ \@plus -4\p@ \@minus -4\p@}% + {12\p@ \@plus 4\p@ \@minus 4\p@}% + {\normalfont\large\bfseries\boldmath + \rightskip=\z@ \@plus 8em\pretolerance=10000 }} +\renewcommand\subsection{\@startsection{subsection}{2}{\z@}% + {-18\p@ \@plus -4\p@ \@minus -4\p@}% + {8\p@ \@plus 4\p@ \@minus 4\p@}% + {\normalfont\normalsize\bfseries\boldmath + \rightskip=\z@ \@plus 8em\pretolerance=10000 }} +\renewcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}% + {-18\p@ \@plus -4\p@ \@minus -4\p@}% + {-0.5em \@plus -0.22em \@minus -0.1em}% + {\normalfont\normalsize\bfseries\boldmath}} +\renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}% + {-12\p@ \@plus -4\p@ \@minus -4\p@}% + {-0.5em \@plus -0.22em \@minus -0.1em}% + {\normalfont\normalsize\itshape}} +\renewcommand\subparagraph[1]{\typeout{LLNCS warning: You should not use + \string\subparagraph\space with this class}\vskip0.5cm +You should not use \verb|\subparagraph| with this class.\vskip0.5cm} + +\DeclareMathSymbol{\Gamma}{\mathalpha}{letters}{"00} +\DeclareMathSymbol{\Delta}{\mathalpha}{letters}{"01} +\DeclareMathSymbol{\Theta}{\mathalpha}{letters}{"02} +\DeclareMathSymbol{\Lambda}{\mathalpha}{letters}{"03} +\DeclareMathSymbol{\Xi}{\mathalpha}{letters}{"04} +\DeclareMathSymbol{\Pi}{\mathalpha}{letters}{"05} +\DeclareMathSymbol{\Sigma}{\mathalpha}{letters}{"06} +\DeclareMathSymbol{\Upsilon}{\mathalpha}{letters}{"07} +\DeclareMathSymbol{\Phi}{\mathalpha}{letters}{"08} +\DeclareMathSymbol{\Psi}{\mathalpha}{letters}{"09} +\DeclareMathSymbol{\Omega}{\mathalpha}{letters}{"0A} + +\let\footnotesize\small + +\if@custvec +\def\vec#1{\mathchoice{\mbox{\boldmath$\displaystyle#1$}} +{\mbox{\boldmath$\textstyle#1$}} +{\mbox{\boldmath$\scriptstyle#1$}} +{\mbox{\boldmath$\scriptscriptstyle#1$}}} +\fi + +\def\squareforqed{\hbox{\rlap{$\sqcap$}$\sqcup$}} +\def\qed{\ifmmode\squareforqed\else{\unskip\nobreak\hfil +\penalty50\hskip1em\null\nobreak\hfil\squareforqed +\parfillskip=0pt\finalhyphendemerits=0\endgraf}\fi} + +\def\getsto{\mathrel{\mathchoice {\vcenter{\offinterlineskip +\halign{\hfil +$\displaystyle##$\hfil\cr\gets\cr\to\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr\gets +\cr\to\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr\gets +\cr\to\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr +\gets\cr\to\cr}}}}} +\def\lid{\mathrel{\mathchoice {\vcenter{\offinterlineskip\halign{\hfil +$\displaystyle##$\hfil\cr<\cr\noalign{\vskip1.2pt}=\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr<\cr +\noalign{\vskip1.2pt}=\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr<\cr +\noalign{\vskip1pt}=\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr +<\cr +\noalign{\vskip0.9pt}=\cr}}}}} +\def\gid{\mathrel{\mathchoice {\vcenter{\offinterlineskip\halign{\hfil +$\displaystyle##$\hfil\cr>\cr\noalign{\vskip1.2pt}=\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr>\cr +\noalign{\vskip1.2pt}=\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr>\cr +\noalign{\vskip1pt}=\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr +>\cr +\noalign{\vskip0.9pt}=\cr}}}}} +\def\grole{\mathrel{\mathchoice {\vcenter{\offinterlineskip +\halign{\hfil +$\displaystyle##$\hfil\cr>\cr\noalign{\vskip-1pt}<\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr +>\cr\noalign{\vskip-1pt}<\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr +>\cr\noalign{\vskip-0.8pt}<\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr +>\cr\noalign{\vskip-0.3pt}<\cr}}}}} +\def\bbbr{{\rm I\!R}} %reelle Zahlen +\def\bbbm{{\rm I\!M}} +\def\bbbn{{\rm I\!N}} %natuerliche Zahlen +\def\bbbf{{\rm I\!F}} +\def\bbbh{{\rm I\!H}} +\def\bbbk{{\rm I\!K}} +\def\bbbp{{\rm I\!P}} +\def\bbbone{{\mathchoice {\rm 1\mskip-4mu l} {\rm 1\mskip-4mu l} +{\rm 1\mskip-4.5mu l} {\rm 1\mskip-5mu l}}} +\def\bbbc{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm C$}\hbox{\hbox +to0pt{\kern0.4\wd0\vrule height0.9\ht0\hss}\box0}} +{\setbox0=\hbox{$\textstyle\rm C$}\hbox{\hbox +to0pt{\kern0.4\wd0\vrule height0.9\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptstyle\rm C$}\hbox{\hbox +to0pt{\kern0.4\wd0\vrule height0.9\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptscriptstyle\rm C$}\hbox{\hbox +to0pt{\kern0.4\wd0\vrule height0.9\ht0\hss}\box0}}}} +\def\bbbq{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm +Q$}\hbox{\raise +0.15\ht0\hbox to0pt{\kern0.4\wd0\vrule height0.8\ht0\hss}\box0}} +{\setbox0=\hbox{$\textstyle\rm Q$}\hbox{\raise +0.15\ht0\hbox to0pt{\kern0.4\wd0\vrule height0.8\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptstyle\rm Q$}\hbox{\raise +0.15\ht0\hbox to0pt{\kern0.4\wd0\vrule height0.7\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptscriptstyle\rm Q$}\hbox{\raise +0.15\ht0\hbox to0pt{\kern0.4\wd0\vrule height0.7\ht0\hss}\box0}}}} +\def\bbbt{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm +T$}\hbox{\hbox to0pt{\kern0.3\wd0\vrule height0.9\ht0\hss}\box0}} +{\setbox0=\hbox{$\textstyle\rm T$}\hbox{\hbox +to0pt{\kern0.3\wd0\vrule height0.9\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptstyle\rm T$}\hbox{\hbox +to0pt{\kern0.3\wd0\vrule height0.9\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptscriptstyle\rm T$}\hbox{\hbox +to0pt{\kern0.3\wd0\vrule height0.9\ht0\hss}\box0}}}} +\def\bbbs{{\mathchoice +{\setbox0=\hbox{$\displaystyle \rm S$}\hbox{\raise0.5\ht0\hbox +to0pt{\kern0.35\wd0\vrule height0.45\ht0\hss}\hbox +to0pt{\kern0.55\wd0\vrule height0.5\ht0\hss}\box0}} +{\setbox0=\hbox{$\textstyle \rm S$}\hbox{\raise0.5\ht0\hbox +to0pt{\kern0.35\wd0\vrule height0.45\ht0\hss}\hbox +to0pt{\kern0.55\wd0\vrule height0.5\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptstyle \rm S$}\hbox{\raise0.5\ht0\hbox +to0pt{\kern0.35\wd0\vrule height0.45\ht0\hss}\raise0.05\ht0\hbox +to0pt{\kern0.5\wd0\vrule height0.45\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptscriptstyle\rm S$}\hbox{\raise0.5\ht0\hbox +to0pt{\kern0.4\wd0\vrule height0.45\ht0\hss}\raise0.05\ht0\hbox +to0pt{\kern0.55\wd0\vrule height0.45\ht0\hss}\box0}}}} +\def\bbbz{{\mathchoice {\hbox{$\mathsf\textstyle Z\kern-0.4em Z$}} +{\hbox{$\mathsf\textstyle Z\kern-0.4em Z$}} +{\hbox{$\mathsf\scriptstyle Z\kern-0.3em Z$}} +{\hbox{$\mathsf\scriptscriptstyle Z\kern-0.2em Z$}}}} + +\let\ts\, + +\setlength\leftmargini {17\p@} +\setlength\leftmargin {\leftmargini} +\setlength\leftmarginii {\leftmargini} +\setlength\leftmarginiii {\leftmargini} +\setlength\leftmarginiv {\leftmargini} +\setlength \labelsep {.5em} +\setlength \labelwidth{\leftmargini} +\addtolength\labelwidth{-\labelsep} + +\def\@listI{\leftmargin\leftmargini + \parsep 0\p@ \@plus1\p@ \@minus\p@ + \topsep 8\p@ \@plus2\p@ \@minus4\p@ + \itemsep0\p@} +\let\@listi\@listI +\@listi +\def\@listii {\leftmargin\leftmarginii + \labelwidth\leftmarginii + \advance\labelwidth-\labelsep + \topsep 0\p@ \@plus2\p@ \@minus\p@} +\def\@listiii{\leftmargin\leftmarginiii + \labelwidth\leftmarginiii + \advance\labelwidth-\labelsep + \topsep 0\p@ \@plus\p@\@minus\p@ + \parsep \z@ + \partopsep \p@ \@plus\z@ \@minus\p@} + +\renewcommand\labelitemi{\normalfont\bfseries --} +\renewcommand\labelitemii{$\m@th\bullet$} + +\setlength\arraycolsep{1.4\p@} +\setlength\tabcolsep{1.4\p@} + +\def\tableofcontents{\chapter*{\contentsname\@mkboth{{\contentsname}}% + {{\contentsname}}} + \def\authcount##1{\setcounter{auco}{##1}\setcounter{@auth}{1}} + \def\lastand{\ifnum\value{auco}=2\relax + \unskip{} \andname\ + \else + \unskip \lastandname\ + \fi}% + \def\and{\stepcounter{@auth}\relax + \ifnum\value{@auth}=\value{auco}% + \lastand + \else + \unskip, + \fi}% + \@starttoc{toc}\if@restonecol\twocolumn\fi} + +\def\l@part#1#2{\addpenalty{\@secpenalty}% + \addvspace{2em plus\p@}% % space above part line + \begingroup + \parindent \z@ + \rightskip \z@ plus 5em + \hrule\vskip5pt + \large % same size as for a contribution heading + \bfseries\boldmath % set line in boldface + \leavevmode % TeX command to enter horizontal mode. + #1\par + \vskip5pt + \hrule + \vskip1pt + \nobreak % Never break after part entry + \endgroup} + +\def\@dotsep{2} + +\let\phantomsection=\relax + +\def\hyperhrefextend{\ifx\hyper@anchor\@undefined\else +{}\fi} + +\def\addnumcontentsmark#1#2#3{% +\addtocontents{#1}{\protect\contentsline{#2}{\protect\numberline + {\thechapter}#3}{\thepage}\hyperhrefextend}}% +\def\addcontentsmark#1#2#3{% +\addtocontents{#1}{\protect\contentsline{#2}{#3}{\thepage}\hyperhrefextend}}% +\def\addcontentsmarkwop#1#2#3{% +\addtocontents{#1}{\protect\contentsline{#2}{#3}{0}\hyperhrefextend}}% + +\def\@adcmk[#1]{\ifcase #1 \or +\def\@gtempa{\addnumcontentsmark}% + \or \def\@gtempa{\addcontentsmark}% + \or \def\@gtempa{\addcontentsmarkwop}% + \fi\@gtempa{toc}{chapter}% +} +\def\addtocmark{% +\phantomsection +\@ifnextchar[{\@adcmk}{\@adcmk[3]}% +} + +\def\l@chapter#1#2{\addpenalty{-\@highpenalty} + \vskip 1.0em plus 1pt \@tempdima 1.5em \begingroup + \parindent \z@ \rightskip \@tocrmarg + \advance\rightskip by 0pt plus 2cm + \parfillskip -\rightskip \pretolerance=10000 + \leavevmode \advance\leftskip\@tempdima \hskip -\leftskip + {\large\bfseries\boldmath#1}\ifx0#2\hfil\null + \else + \nobreak + \leaders\hbox{$\m@th \mkern \@dotsep mu.\mkern + \@dotsep mu$}\hfill + \nobreak\hbox to\@pnumwidth{\hss #2}% + \fi\par + \penalty\@highpenalty \endgroup} + +\def\l@title#1#2{\addpenalty{-\@highpenalty} + \addvspace{8pt plus 1pt} + \@tempdima \z@ + \begingroup + \parindent \z@ \rightskip \@tocrmarg + \advance\rightskip by 0pt plus 2cm + \parfillskip -\rightskip \pretolerance=10000 + \leavevmode \advance\leftskip\@tempdima \hskip -\leftskip + #1\nobreak + \leaders\hbox{$\m@th \mkern \@dotsep mu.\mkern + \@dotsep mu$}\hfill + \nobreak\hbox to\@pnumwidth{\hss #2}\par + \penalty\@highpenalty \endgroup} + +\def\l@author#1#2{\addpenalty{\@highpenalty} + \@tempdima=15\p@ %\z@ + \begingroup + \parindent \z@ \rightskip \@tocrmarg + \advance\rightskip by 0pt plus 2cm + \pretolerance=10000 + \leavevmode \advance\leftskip\@tempdima %\hskip -\leftskip + \textit{#1}\par + \penalty\@highpenalty \endgroup} + +\setcounter{tocdepth}{0} +\newdimen\tocchpnum +\newdimen\tocsecnum +\newdimen\tocsectotal +\newdimen\tocsubsecnum +\newdimen\tocsubsectotal +\newdimen\tocsubsubsecnum +\newdimen\tocsubsubsectotal +\newdimen\tocparanum +\newdimen\tocparatotal +\newdimen\tocsubparanum +\tocchpnum=\z@ % no chapter numbers +\tocsecnum=15\p@ % section 88. plus 2.222pt +\tocsubsecnum=23\p@ % subsection 88.8 plus 2.222pt +\tocsubsubsecnum=27\p@ % subsubsection 88.8.8 plus 1.444pt +\tocparanum=35\p@ % paragraph 88.8.8.8 plus 1.666pt +\tocsubparanum=43\p@ % subparagraph 88.8.8.8.8 plus 1.888pt +\def\calctocindent{% +\tocsectotal=\tocchpnum +\advance\tocsectotal by\tocsecnum +\tocsubsectotal=\tocsectotal +\advance\tocsubsectotal by\tocsubsecnum +\tocsubsubsectotal=\tocsubsectotal +\advance\tocsubsubsectotal by\tocsubsubsecnum +\tocparatotal=\tocsubsubsectotal +\advance\tocparatotal by\tocparanum} +\calctocindent + +\def\l@section{\@dottedtocline{1}{\tocchpnum}{\tocsecnum}} +\def\l@subsection{\@dottedtocline{2}{\tocsectotal}{\tocsubsecnum}} +\def\l@subsubsection{\@dottedtocline{3}{\tocsubsectotal}{\tocsubsubsecnum}} +\def\l@paragraph{\@dottedtocline{4}{\tocsubsubsectotal}{\tocparanum}} +\def\l@subparagraph{\@dottedtocline{5}{\tocparatotal}{\tocsubparanum}} + +\def\listoffigures{\@restonecolfalse\if@twocolumn\@restonecoltrue\onecolumn + \fi\section*{\listfigurename\@mkboth{{\listfigurename}}{{\listfigurename}}} + \@starttoc{lof}\if@restonecol\twocolumn\fi} +\def\l@figure{\@dottedtocline{1}{0em}{1.5em}} + +\def\listoftables{\@restonecolfalse\if@twocolumn\@restonecoltrue\onecolumn + \fi\section*{\listtablename\@mkboth{{\listtablename}}{{\listtablename}}} + \@starttoc{lot}\if@restonecol\twocolumn\fi} +\let\l@table\l@figure + +\renewcommand\listoffigures{% + \section*{\listfigurename + \@mkboth{\listfigurename}{\listfigurename}}% + \@starttoc{lof}% + } + +\renewcommand\listoftables{% + \section*{\listtablename + \@mkboth{\listtablename}{\listtablename}}% + \@starttoc{lot}% + } + +\ifx\oribibl\undefined +\ifx\citeauthoryear\undefined +\renewenvironment{thebibliography}[1] + {\section*{\refname} + \def\@biblabel##1{##1.} + \small + \list{\@biblabel{\@arabic\c@enumiv}}% + {\settowidth\labelwidth{\@biblabel{#1}}% + \leftmargin\labelwidth + \advance\leftmargin\labelsep + \if@openbib + \advance\leftmargin\bibindent + \itemindent -\bibindent + \listparindent \itemindent + \parsep \z@ + \fi + \usecounter{enumiv}% + \let\p@enumiv\@empty + \renewcommand\theenumiv{\@arabic\c@enumiv}}% + \if@openbib + \renewcommand\newblock{\par}% + \else + \renewcommand\newblock{\hskip .11em \@plus.33em \@minus.07em}% + \fi + \sloppy\clubpenalty4000\widowpenalty4000% + \sfcode`\.=\@m} + {\def\@noitemerr + {\@latex@warning{Empty `thebibliography' environment}}% + \endlist} +\def\@lbibitem[#1]#2{\item[{[#1]}\hfill]\if@filesw + {\let\protect\noexpand\immediate + \write\@auxout{\string\bibcite{#2}{#1}}}\fi\ignorespaces} +\newcount\@tempcntc +\def\@citex[#1]#2{\if@filesw\immediate\write\@auxout{\string\citation{#2}}\fi + \@tempcnta\z@\@tempcntb\m@ne\def\@citea{}\@cite{\@for\@citeb:=#2\do + {\@ifundefined + {b@\@citeb}{\@citeo\@tempcntb\m@ne\@citea\def\@citea{,}{\bfseries + ?}\@warning + {Citation `\@citeb' on page \thepage \space undefined}}% + {\setbox\z@\hbox{\global\@tempcntc0\csname b@\@citeb\endcsname\relax}% + \ifnum\@tempcntc=\z@ \@citeo\@tempcntb\m@ne + \@citea\def\@citea{,}\hbox{\csname b@\@citeb\endcsname}% + \else + \advance\@tempcntb\@ne + \ifnum\@tempcntb=\@tempcntc + \else\advance\@tempcntb\m@ne\@citeo + \@tempcnta\@tempcntc\@tempcntb\@tempcntc\fi\fi}}\@citeo}{#1}} +\def\@citeo{\ifnum\@tempcnta>\@tempcntb\else + \@citea\def\@citea{,\,\hskip\z@skip}% + \ifnum\@tempcnta=\@tempcntb\the\@tempcnta\else + {\advance\@tempcnta\@ne\ifnum\@tempcnta=\@tempcntb \else + \def\@citea{--}\fi + \advance\@tempcnta\m@ne\the\@tempcnta\@citea\the\@tempcntb}\fi\fi} +\else +\renewenvironment{thebibliography}[1] + {\section*{\refname} + \small + \list{}% + {\settowidth\labelwidth{}% + \leftmargin\parindent + \itemindent=-\parindent + \labelsep=\z@ + \if@openbib + \advance\leftmargin\bibindent + \itemindent -\bibindent + \listparindent \itemindent + \parsep \z@ + \fi + \usecounter{enumiv}% + \let\p@enumiv\@empty + \renewcommand\theenumiv{}}% + \if@openbib + \renewcommand\newblock{\par}% + \else + \renewcommand\newblock{\hskip .11em \@plus.33em \@minus.07em}% + \fi + \sloppy\clubpenalty4000\widowpenalty4000% + \sfcode`\.=\@m} + {\def\@noitemerr + {\@latex@warning{Empty `thebibliography' environment}}% + \endlist} + \def\@cite#1{#1}% + \def\@lbibitem[#1]#2{\item[]\if@filesw + {\def\protect##1{\string ##1\space}\immediate + \write\@auxout{\string\bibcite{#2}{#1}}}\fi\ignorespaces} + \fi +\else +\@cons\@openbib@code{\noexpand\small} +\fi + +\def\idxquad{\hskip 10\p@}% space that divides entry from number + +\def\@idxitem{\par\hangindent 10\p@} + +\def\subitem{\par\setbox0=\hbox{--\enspace}% second order + \noindent\hangindent\wd0\box0}% index entry + +\def\subsubitem{\par\setbox0=\hbox{--\,--\enspace}% third + \noindent\hangindent\wd0\box0}% order index entry + +\def\indexspace{\par \vskip 10\p@ plus5\p@ minus3\p@\relax} + +\renewenvironment{theindex} + {\@mkboth{\indexname}{\indexname}% + \thispagestyle{empty}\parindent\z@ + \parskip\z@ \@plus .3\p@\relax + \let\item\par + \def\,{\relax\ifmmode\mskip\thinmuskip + \else\hskip0.2em\ignorespaces\fi}% + \normalfont\small + \begin{multicols}{2}[\@makeschapterhead{\indexname}]% + } + {\end{multicols}} + +\renewcommand\footnoterule{% + \kern-3\p@ + \hrule\@width 2truecm + \kern2.6\p@} + \newdimen\fnindent + \fnindent1em +\long\def\@makefntext#1{% + \parindent \fnindent% + \leftskip \fnindent% + \noindent + \llap{\hb@xt@1em{\hss\@makefnmark\ }}\ignorespaces#1} + +\long\def\@makecaption#1#2{% + \small + \vskip\abovecaptionskip + \sbox\@tempboxa{{\bfseries #1.} #2}% + \ifdim \wd\@tempboxa >\hsize + {\bfseries #1.} #2\par + \else + \global \@minipagefalse + \hb@xt@\hsize{\hfil\box\@tempboxa\hfil}% + \fi + \vskip\belowcaptionskip} + +\def\fps@figure{htbp} +\def\fnum@figure{\figurename\thinspace\thefigure} +\def \@floatboxreset {% + \reset@font + \small + \@setnobreak + \@setminipage +} +\def\fps@table{htbp} +\def\fnum@table{\tablename~\thetable} +\renewenvironment{table} + {\setlength\abovecaptionskip{0\p@}% + \setlength\belowcaptionskip{10\p@}% + \@float{table}} + {\end@float} +\renewenvironment{table*} + {\setlength\abovecaptionskip{0\p@}% + \setlength\belowcaptionskip{10\p@}% + \@dblfloat{table}} + {\end@dblfloat} + +\long\def\@caption#1[#2]#3{\par\addcontentsline{\csname + ext@#1\endcsname}{#1}{\protect\numberline{\csname + the#1\endcsname}{\ignorespaces #2}}\begingroup + \@parboxrestore + \@makecaption{\csname fnum@#1\endcsname}{\ignorespaces #3}\par + \endgroup} + +% LaTeX does not provide a command to enter the authors institute +% addresses. The \institute command is defined here. + +\newcounter{@inst} +\newcounter{@auth} +\newcounter{auco} +\newdimen\instindent +\newbox\authrun +\newtoks\authorrunning +\newtoks\tocauthor +\newbox\titrun +\newtoks\titlerunning +\newtoks\toctitle + +\def\clearheadinfo{\gdef\@author{No Author Given}% + \gdef\@title{No Title Given}% + \gdef\@subtitle{}% + \gdef\@institute{No Institute Given}% + \gdef\@thanks{}% + \global\titlerunning={}\global\authorrunning={}% + \global\toctitle={}\global\tocauthor={}} + +\def\institute#1{\gdef\@institute{#1}} + +\def\institutename{\par + \begingroup + \parskip=\z@ + \parindent=\z@ + \setcounter{@inst}{1}% + \def\and{\par\stepcounter{@inst}% + \noindent$^{\the@inst}$\enspace\ignorespaces}% + \setbox0=\vbox{\def\thanks##1{}\@institute}% + \ifnum\c@@inst=1\relax + \gdef\fnnstart{0}% + \else + \xdef\fnnstart{\c@@inst}% + \setcounter{@inst}{1}% + \noindent$^{\the@inst}$\enspace + \fi + \ignorespaces + \@institute\par + \endgroup} + +\def\@fnsymbol#1{\ensuremath{\ifcase#1\or\star\or{\star\star}\or + {\star\star\star}\or \dagger\or \ddagger\or + \mathchar "278\or \mathchar "27B\or \|\or **\or \dagger\dagger + \or \ddagger\ddagger \else\@ctrerr\fi}} + +\def\inst#1{\unskip$^{#1}$} +\def\fnmsep{\unskip$^,$} +\def\email#1{{\tt#1}} +\AtBeginDocument{\@ifundefined{url}{\def\url#1{#1}}{}% +\@ifpackageloaded{babel}{% +\@ifundefined{extrasenglish}{}{\addto\extrasenglish{\switcht@albion}}% +\@ifundefined{extrasfrenchb}{}{\addto\extrasfrenchb{\switcht@francais}}% +\@ifundefined{extrasgerman}{}{\addto\extrasgerman{\switcht@deutsch}}% +\@ifundefined{extrasngerman}{}{\addto\extrasngerman{\switcht@deutsch}}% +}{\switcht@@therlang}% +\providecommand{\keywords}[1]{\par\addvspace\baselineskip +\noindent\keywordname\enspace\ignorespaces#1}% +} +\def\homedir{\~{ }} + +\def\subtitle#1{\gdef\@subtitle{#1}} +\clearheadinfo +% +%%% to avoid hyperref warnings +\providecommand*{\toclevel@author}{999} +%%% to make title-entry parent of section-entries +\providecommand*{\toclevel@title}{0} +% +\renewcommand\maketitle{\newpage +\phantomsection + \refstepcounter{chapter}% + \stepcounter{section}% + \setcounter{section}{0}% + \setcounter{subsection}{0}% + \setcounter{figure}{0} + \setcounter{table}{0} + \setcounter{equation}{0} + \setcounter{footnote}{0}% + \begingroup + \parindent=\z@ + \renewcommand\thefootnote{\@fnsymbol\c@footnote}% + \if@twocolumn + \ifnum \col@number=\@ne + \@maketitle + \else + \twocolumn[\@maketitle]% + \fi + \else + \newpage + \global\@topnum\z@ % Prevents figures from going at top of page. + \@maketitle + \fi + \thispagestyle{empty}\@thanks +% + \def\\{\unskip\ \ignorespaces}\def\inst##1{\unskip{}}% + \def\thanks##1{\unskip{}}\def\fnmsep{\unskip}% + \instindent=\hsize + \advance\instindent by-\headlineindent + \if!\the\toctitle!\addcontentsline{toc}{title}{\@title}\else + \addcontentsline{toc}{title}{\the\toctitle}\fi + \if@runhead + \if!\the\titlerunning!\else + \edef\@title{\the\titlerunning}% + \fi + \global\setbox\titrun=\hbox{\small\rm\unboldmath\ignorespaces\@title}% + \ifdim\wd\titrun>\instindent + \typeout{Title too long for running head. Please supply}% + \typeout{a shorter form with \string\titlerunning\space prior to + \string\maketitle}% + \global\setbox\titrun=\hbox{\small\rm + Title Suppressed Due to Excessive Length}% + \fi + \xdef\@title{\copy\titrun}% + \fi +% + \if!\the\tocauthor!\relax + {\def\and{\noexpand\protect\noexpand\and}% + \protected@xdef\toc@uthor{\@author}}% + \else + \def\\{\noexpand\protect\noexpand\newline}% + \protected@xdef\scratch{\the\tocauthor}% + \protected@xdef\toc@uthor{\scratch}% + \fi + \addtocontents{toc}{\noexpand\protect\noexpand\authcount{\the\c@auco}}% + \addcontentsline{toc}{author}{\toc@uthor}% + \if@runhead + \if!\the\authorrunning! + \value{@inst}=\value{@auth}% + \setcounter{@auth}{1}% + \else + \edef\@author{\the\authorrunning}% + \fi + \global\setbox\authrun=\hbox{\small\unboldmath\@author\unskip}% + \ifdim\wd\authrun>\instindent + \typeout{Names of authors too long for running head. Please supply}% + \typeout{a shorter form with \string\authorrunning\space prior to + \string\maketitle}% + \global\setbox\authrun=\hbox{\small\rm + Authors Suppressed Due to Excessive Length}% + \fi + \xdef\@author{\copy\authrun}% + \markboth{\@author}{\@title}% + \fi + \endgroup + \setcounter{footnote}{\fnnstart}% + \clearheadinfo} +% +\def\@maketitle{\newpage + \markboth{}{}% + \def\lastand{\ifnum\value{@inst}=2\relax + \unskip{} \andname\ + \else + \unskip \lastandname\ + \fi}% + \def\and{\stepcounter{@auth}\relax + \ifnum\value{@auth}=\value{@inst}% + \lastand + \else + \unskip, + \fi}% + \begin{center}% + \let\newline\\ + {\Large \bfseries\boldmath + \pretolerance=10000 + \@title \par}\vskip .8cm +\if!\@subtitle!\else {\large \bfseries\boldmath + \vskip -.65cm + \pretolerance=10000 + \@subtitle \par}\vskip .8cm\fi + \setbox0=\vbox{\setcounter{@auth}{1}\def\and{\stepcounter{@auth}}% + \def\thanks##1{}\@author}% + \global\value{@inst}=\value{@auth}% + \global\value{auco}=\value{@auth}% + \setcounter{@auth}{1}% +{\lineskip .5em +\noindent\ignorespaces +\@author\vskip.35cm} + {\small\institutename} + \end{center}% + } + +% definition of the "\spnewtheorem" command. +% +% Usage: +% +% \spnewtheorem{env_nam}{caption}[within]{cap_font}{body_font} +% or \spnewtheorem{env_nam}[numbered_like]{caption}{cap_font}{body_font} +% or \spnewtheorem*{env_nam}{caption}{cap_font}{body_font} +% +% New is "cap_font" and "body_font". It stands for +% fontdefinition of the caption and the text itself. +% +% "\spnewtheorem*" gives a theorem without number. +% +% A defined spnewthoerem environment is used as described +% by Lamport. +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\def\@thmcountersep{} +\def\@thmcounterend{.} + +\def\spnewtheorem{\@ifstar{\@sthm}{\@Sthm}} + +% definition of \spnewtheorem with number + +\def\@spnthm#1#2{% + \@ifnextchar[{\@spxnthm{#1}{#2}}{\@spynthm{#1}{#2}}} +\def\@Sthm#1{\@ifnextchar[{\@spothm{#1}}{\@spnthm{#1}}} + +\def\@spxnthm#1#2[#3]#4#5{\expandafter\@ifdefinable\csname #1\endcsname + {\@definecounter{#1}\@addtoreset{#1}{#3}% + \expandafter\xdef\csname the#1\endcsname{\expandafter\noexpand + \csname the#3\endcsname \noexpand\@thmcountersep \@thmcounter{#1}}% + \expandafter\xdef\csname #1name\endcsname{#2}% + \global\@namedef{#1}{\@spthm{#1}{\csname #1name\endcsname}{#4}{#5}}% + \global\@namedef{end#1}{\@endtheorem}}} + +\def\@spynthm#1#2#3#4{\expandafter\@ifdefinable\csname #1\endcsname + {\@definecounter{#1}% + \expandafter\xdef\csname the#1\endcsname{\@thmcounter{#1}}% + \expandafter\xdef\csname #1name\endcsname{#2}% + \global\@namedef{#1}{\@spthm{#1}{\csname #1name\endcsname}{#3}{#4}}% + \global\@namedef{end#1}{\@endtheorem}}} + +\def\@spothm#1[#2]#3#4#5{% + \@ifundefined{c@#2}{\@latexerr{No theorem environment `#2' defined}\@eha}% + {\expandafter\@ifdefinable\csname #1\endcsname + {\newaliascnt{#1}{#2}% + \expandafter\xdef\csname #1name\endcsname{#3}% + \global\@namedef{#1}{\@spthm{#1}{\csname #1name\endcsname}{#4}{#5}}% + \global\@namedef{end#1}{\@endtheorem}}}} + +\def\@spthm#1#2#3#4{\topsep 7\p@ \@plus2\p@ \@minus4\p@ +\refstepcounter{#1}% +\@ifnextchar[{\@spythm{#1}{#2}{#3}{#4}}{\@spxthm{#1}{#2}{#3}{#4}}} + +\def\@spxthm#1#2#3#4{\@spbegintheorem{#2}{\csname the#1\endcsname}{#3}{#4}% + \ignorespaces} + +\def\@spythm#1#2#3#4[#5]{\@spopargbegintheorem{#2}{\csname + the#1\endcsname}{#5}{#3}{#4}\ignorespaces} + +\def\@spbegintheorem#1#2#3#4{\trivlist + \item[\hskip\labelsep{#3#1\ #2\@thmcounterend}]#4} + +\def\@spopargbegintheorem#1#2#3#4#5{\trivlist + \item[\hskip\labelsep{#4#1\ #2}]{#4(#3)\@thmcounterend\ }#5} + +% definition of \spnewtheorem* without number + +\def\@sthm#1#2{\@Ynthm{#1}{#2}} + +\def\@Ynthm#1#2#3#4{\expandafter\@ifdefinable\csname #1\endcsname + {\global\@namedef{#1}{\@Thm{\csname #1name\endcsname}{#3}{#4}}% + \expandafter\xdef\csname #1name\endcsname{#2}% + \global\@namedef{end#1}{\@endtheorem}}} + +\def\@Thm#1#2#3{\topsep 7\p@ \@plus2\p@ \@minus4\p@ +\@ifnextchar[{\@Ythm{#1}{#2}{#3}}{\@Xthm{#1}{#2}{#3}}} + +\def\@Xthm#1#2#3{\@Begintheorem{#1}{#2}{#3}\ignorespaces} + +\def\@Ythm#1#2#3[#4]{\@Opargbegintheorem{#1} + {#4}{#2}{#3}\ignorespaces} + +\def\@Begintheorem#1#2#3{#3\trivlist + \item[\hskip\labelsep{#2#1\@thmcounterend}]} + +\def\@Opargbegintheorem#1#2#3#4{#4\trivlist + \item[\hskip\labelsep{#3#1}]{#3(#2)\@thmcounterend\ }} + +\if@envcntsect + \def\@thmcountersep{.} + \spnewtheorem{theorem}{Theorem}[section]{\bfseries}{\itshape} +\else + \spnewtheorem{theorem}{Theorem}{\bfseries}{\itshape} + \if@envcntreset + \@addtoreset{theorem}{section} + \else + \@addtoreset{theorem}{chapter} + \fi +\fi + +%definition of divers theorem environments +\spnewtheorem*{claim}{Claim}{\itshape}{\rmfamily} +\spnewtheorem*{proof}{Proof}{\itshape}{\rmfamily} +\if@envcntsame % alle Umgebungen wie Theorem. + \def\spn@wtheorem#1#2#3#4{\@spothm{#1}[theorem]{#2}{#3}{#4}} +\else % alle Umgebungen mit eigenem Zaehler + \if@envcntsect % mit section numeriert + \def\spn@wtheorem#1#2#3#4{\@spxnthm{#1}{#2}[section]{#3}{#4}} + \else % nicht mit section numeriert + \if@envcntreset + \def\spn@wtheorem#1#2#3#4{\@spynthm{#1}{#2}{#3}{#4} + \@addtoreset{#1}{section}} + \else + \def\spn@wtheorem#1#2#3#4{\@spynthm{#1}{#2}{#3}{#4} + \@addtoreset{#1}{chapter}}% + \fi + \fi +\fi +\spn@wtheorem{case}{Case}{\itshape}{\rmfamily} +\spn@wtheorem{conjecture}{Conjecture}{\itshape}{\rmfamily} +\spn@wtheorem{corollary}{Corollary}{\bfseries}{\itshape} +\spn@wtheorem{definition}{Definition}{\bfseries}{\itshape} +\spn@wtheorem{example}{Example}{\itshape}{\rmfamily} +\spn@wtheorem{exercise}{Exercise}{\itshape}{\rmfamily} +\spn@wtheorem{lemma}{Lemma}{\bfseries}{\itshape} +\spn@wtheorem{note}{Note}{\itshape}{\rmfamily} +\spn@wtheorem{problem}{Problem}{\itshape}{\rmfamily} +\spn@wtheorem{property}{Property}{\itshape}{\rmfamily} +\spn@wtheorem{proposition}{Proposition}{\bfseries}{\itshape} +\spn@wtheorem{question}{Question}{\itshape}{\rmfamily} +\spn@wtheorem{solution}{Solution}{\itshape}{\rmfamily} +\spn@wtheorem{remark}{Remark}{\itshape}{\rmfamily} + +\def\@takefromreset#1#2{% + \def\@tempa{#1}% + \let\@tempd\@elt + \def\@elt##1{% + \def\@tempb{##1}% + \ifx\@tempa\@tempb\else + \@addtoreset{##1}{#2}% + \fi}% + \expandafter\expandafter\let\expandafter\@tempc\csname cl@#2\endcsname + \expandafter\def\csname cl@#2\endcsname{}% + \@tempc + \let\@elt\@tempd} + +\def\theopargself{\def\@spopargbegintheorem##1##2##3##4##5{\trivlist + \item[\hskip\labelsep{##4##1\ ##2}]{##4##3\@thmcounterend\ }##5} + \def\@Opargbegintheorem##1##2##3##4{##4\trivlist + \item[\hskip\labelsep{##3##1}]{##3##2\@thmcounterend\ }} + } + +\renewenvironment{abstract}{% + \list{}{\advance\topsep by0.35cm\relax\small + \leftmargin=1cm + \labelwidth=\z@ + \listparindent=\z@ + \itemindent\listparindent + \rightmargin\leftmargin}\item[\hskip\labelsep + \bfseries\abstractname]} + {\endlist} + +\newdimen\headlineindent % dimension for space between +\headlineindent=1.166cm % number and text of headings. + +\def\ps@headings{\let\@mkboth\@gobbletwo + \let\@oddfoot\@empty\let\@evenfoot\@empty + \def\@evenhead{\normalfont\small\rlap{\thepage}\hspace{\headlineindent}% + \leftmark\hfil} + \def\@oddhead{\normalfont\small\hfil\rightmark\hspace{\headlineindent}% + \llap{\thepage}} + \def\chaptermark##1{}% + \def\sectionmark##1{}% + \def\subsectionmark##1{}} + +\def\ps@titlepage{\let\@mkboth\@gobbletwo + \let\@oddfoot\@empty\let\@evenfoot\@empty + \def\@evenhead{\normalfont\small\rlap{\thepage}\hspace{\headlineindent}% + \hfil} + \def\@oddhead{\normalfont\small\hfil\hspace{\headlineindent}% + \llap{\thepage}} + \def\chaptermark##1{}% + \def\sectionmark##1{}% + \def\subsectionmark##1{}} + +\if@runhead\ps@headings\else +\ps@empty\fi + +\setlength\arraycolsep{1.4\p@} +\setlength\tabcolsep{1.4\p@} + +\endinput +%end of file llncs.cls diff -r 47179a172c54 -r 16af5b8bd285 thys/README --- a/thys/README Sun Feb 26 00:12:18 2017 +0000 +++ b/thys/README Sun Feb 26 23:46:22 2017 +0000 @@ -10,6 +10,8 @@ isabelle build -c -v -d . Paper + isabelle build -c -v -d . Journal + Othe directories are: ===================== diff -r 47179a172c54 -r 16af5b8bd285 thys/ROOT --- a/thys/ROOT Sun Feb 26 00:12:18 2017 +0000 +++ b/thys/ROOT Sun Feb 26 23:46:22 2017 +0000 @@ -14,3 +14,12 @@ "root.bib" "root.tex" + +session Journal in "Journal" = Lex + + options [document = pdf, document_output = "..", document_variants="journal"] + theories + "~~/src/HOL/Library/LaTeXsugar" + "Paper" + document_files + "root.bib" + "root.tex"