--- a/exps/both.scala Tue Jul 23 21:21:49 2019 +0100
+++ b/exps/both.scala Mon Jul 29 09:37:20 2019 +0100
@@ -54,6 +54,27 @@
case class ASEQ(bs: Bits, r1: ARexp, r2: ARexp) extends ARexp
case class ASTAR(bs: Bits, r: ARexp) extends ARexp
+// "faster" equality
+def equ(a1: ARexp, a2: ARexp) : Boolean = (a1, a2) match {
+ case (AZERO, AZERO) => true
+ case (AONE(_), AONE(_)) => true
+ case (APRED(_, _, s1), APRED(_, _, s2)) => s1 == s2
+ case (AALTS(_, Nil), AALTS(_, Nil)) => true
+ case (AALTS(_, r1::rs1), AALTS(_, r2::rs2)) =>
+ equ(r1, r2) && equ(AALTS(Nil, rs1), AALTS(Nil, rs2))
+ case (ASEQ(_, r1, r2), ASEQ(_, r3, r4)) => equ(r1, r3) && equ(r2, r4)
+ case (ASTAR(_, r1), ASTAR(_, r2)) => equ(r1, r2)
+ case _ => false
+}
+
+def distinctEq(xs: List[ARexp], acc: List[ARexp] = Nil): List[ARexp] = xs match {
+ case Nil => Nil
+ case (x::xs) => {
+ if (acc.exists(equ(_, x))) distinctEq(xs, acc)
+ else x::distinctEq(xs, x::acc)
+ }
+}
+
// abbreviations
def AALT(bs: Bits, r1: ARexp, r2: ARexp) = AALTS(bs, List(r1, r2))
@@ -449,7 +470,7 @@
case (AONE(bs2), r2s) => fuse(bs1 ++ bs2, r2s)
case (r1s, r2s) => ASEQ(bs1, r1s, r2s)
}
- case AALTS(bs1, rs) => distinctBy(flats(rs.map(bsimp)), erase) match {
+ case AALTS(bs1, rs) => distinctEq(flats(rs.map(bsimp))) match {
case Nil => AZERO
case r :: Nil => fuse(bs1, r)
case rs => AALTS(bs1, rs)
@@ -479,7 +500,7 @@
// Quick example
-val r : Rexp = ZERO | "a"
+val r : Rexp = (ZERO | "a") | ZERO
lexing(r, "a")
@@ -775,11 +796,12 @@
println(btokenise_simp(WHILE_REGS, fib_prog))
println("equal? " + (tokenise_simp(WHILE_REGS, fib_prog) == btokenise_simp(WHILE_REGS, fib_prog)))
-for (i <- 1 to 20) {
+for (i <- 1 to 40) {
print("Old: " + time(tokenise_simp(WHILE_REGS, fib_prog * i)))
print(" Bit: " + time(btokenise_simp(WHILE_REGS, fib_prog * i)))
- println(" Bit full simp: " + time(btokenise_simp_full(WHILE_REGS, fib_prog * i)))
+ //println(" Bit full simp: " + time(btokenise_simp_full(WHILE_REGS, fib_prog * i)))
//println(" Bit2: " + time(btokenise2_simp(WHILE_REGS, fib_prog * i)))
+ println("")
}
--- a/thys/Journal/Paper.thy Tue Jul 23 21:21:49 2019 +0100
+++ b/thys/Journal/Paper.thy Mon Jul 29 09:37:20 2019 +0100
@@ -132,14 +132,14 @@
-section {* Introduction *}
+section \<open>Introduction\<close>
-text {*
+text \<open>
Brzozowski \cite{Brzozowski1964} introduced the notion of the {\em
-derivative} @{term "der c r"} of a regular expression @{text r} w.r.t.\
-a character~@{text c}, and showed that it gave a simple solution to the
+derivative} @{term "der c r"} of a regular expression \<open>r\<close> w.r.t.\
+a character~\<open>c\<close>, and showed that it gave a simple solution to the
problem of matching a string @{term s} with a regular expression @{term
r}: if the derivative of @{term r} w.r.t.\ (in succession) all the
characters of the string matches the empty string, then @{term r}
@@ -175,8 +175,7 @@
into a sequence of tokens, POSIX is the more natural disambiguation
strategy for what programmers consider basic syntactic building blocks
in their programs. These building blocks are often specified by some
-regular expressions, say @{text "r\<^bsub>key\<^esub>"} and @{text
-"r\<^bsub>id\<^esub>"} for recognising keywords and identifiers,
+regular expressions, say \<open>r\<^bsub>key\<^esub>\<close> and \<open>r\<^bsub>id\<^esub>\<close> for recognising keywords and identifiers,
respectively. There are a few underlying (informal) rules behind
tokenising a string in a POSIX \cite{POSIX} fashion:
@@ -196,23 +195,22 @@
be longer than no match at all.
\end{itemize}
-\noindent Consider for example a regular expression @{text
-"r\<^bsub>key\<^esub>"} for recognising keywords such as @{text "if"},
-@{text "then"} and so on; and @{text "r\<^bsub>id\<^esub>"}
+\noindent Consider for example a regular expression \<open>r\<^bsub>key\<^esub>\<close> for recognising keywords such as \<open>if\<close>,
+\<open>then\<close> and so on; and \<open>r\<^bsub>id\<^esub>\<close>
recognising identifiers (say, a single character followed by
characters or numbers). Then we can form the regular expression
-@{text "(r\<^bsub>key\<^esub> + r\<^bsub>id\<^esub>)\<^sup>\<star>"}
-and use POSIX matching to tokenise strings, say @{text "iffoo"} and
-@{text "if"}. For @{text "iffoo"} we obtain by the Longest Match Rule
+\<open>(r\<^bsub>key\<^esub> + r\<^bsub>id\<^esub>)\<^sup>\<star>\<close>
+and use POSIX matching to tokenise strings, say \<open>iffoo\<close> and
+\<open>if\<close>. For \<open>iffoo\<close> we obtain by the Longest Match Rule
a single identifier token, not a keyword followed by an
-identifier. For @{text "if"} we obtain by the Priority Rule a keyword
-token, not an identifier token---even if @{text "r\<^bsub>id\<^esub>"}
-matches also. By the Star Rule we know @{text "(r\<^bsub>key\<^esub> +
-r\<^bsub>id\<^esub>)\<^sup>\<star>"} matches @{text "iffoo"},
-respectively @{text "if"}, in exactly one `iteration' of the star. The
+identifier. For \<open>if\<close> we obtain by the Priority Rule a keyword
+token, not an identifier token---even if \<open>r\<^bsub>id\<^esub>\<close>
+matches also. By the Star Rule we know \<open>(r\<^bsub>key\<^esub> +
+r\<^bsub>id\<^esub>)\<^sup>\<star>\<close> matches \<open>iffoo\<close>,
+respectively \<open>if\<close>, in exactly one `iteration' of the star. The
Empty String Rule is for cases where, for example, the regular expression
-@{text "(a\<^sup>\<star>)\<^sup>\<star>"} matches against the
-string @{text "bc"}. Then the longest initial matched substring is the
+\<open>(a\<^sup>\<star>)\<^sup>\<star>\<close> matches against the
+string \<open>bc\<close>. Then the longest initial matched substring is the
empty string, which is matched by both the whole regular expression
and the parenthesised subexpression.
@@ -225,25 +223,24 @@
expression matches a string, values encode the information of
\emph{how} the string is matched by the regular expression---that is,
which part of the string is matched by which part of the regular
-expression. For this consider again the string @{text "xy"} and
-the regular expression \mbox{@{text "(x + (y + xy))\<^sup>\<star>"}}
+expression. For this consider again the string \<open>xy\<close> and
+the regular expression \mbox{\<open>(x + (y + xy))\<^sup>\<star>\<close>}
(this time fully parenthesised). We can view this regular expression
-as tree and if the string @{text xy} is matched by two Star
-`iterations', then the @{text x} is matched by the left-most
-alternative in this tree and the @{text y} by the right-left alternative. This
+as tree and if the string \<open>xy\<close> is matched by two Star
+`iterations', then the \<open>x\<close> is matched by the left-most
+alternative in this tree and the \<open>y\<close> by the right-left alternative. This
suggests to record this matching as
\begin{center}
@{term "Stars [Left(Char x), Right(Left(Char y))]"}
\end{center}
-\noindent where @{const Stars}, @{text Left}, @{text Right} and @{text
-Char} are constructors for values. @{text Stars} records how many
-iterations were used; @{text Left}, respectively @{text Right}, which
+\noindent where @{const Stars}, \<open>Left\<close>, \<open>Right\<close> and \<open>Char\<close> are constructors for values. \<open>Stars\<close> records how many
+iterations were used; \<open>Left\<close>, respectively \<open>Right\<close>, which
alternative is used. This `tree view' leads naturally to the idea that
regular expressions act as types and values as inhabiting those types
(see, for example, \cite{HosoyaVouillonPierce2005}). The value for
-matching @{text "xy"} in a single `iteration', i.e.~the POSIX value,
+matching \<open>xy\<close> in a single `iteration', i.e.~the POSIX value,
would look as follows
\begin{center}
@@ -316,11 +313,11 @@
We extend our results to ??? Bitcoded version??
-*}
+\<close>
-section {* Preliminaries *}
+section \<open>Preliminaries\<close>
-text {* \noindent Strings in Isabelle/HOL are lists of characters with
+text \<open>\noindent Strings in Isabelle/HOL are lists of characters with
the empty string being represented by the empty list, written @{term
"[]"}, and list-cons being written as @{term "DUMMY # DUMMY"}. Often
we use the usual bracket notation for lists also for strings; for
@@ -333,7 +330,7 @@
inductive datatype:
\begin{center}
- @{text "r :="}
+ \<open>r :=\<close>
@{const "ZERO"} $\mid$
@{const "ONE"} $\mid$
@{term "CHAR c"} $\mid$
@@ -365,8 +362,8 @@
DUMMY"} for the concatenation of two languages (it is also list-append for
strings). We use the star-notation for regular expressions and for
languages (in the last clause above). The star for languages is defined
- inductively by two clauses: @{text "(i)"} the empty string being in
- the star of a language and @{text "(ii)"} if @{term "s\<^sub>1"} is in a
+ inductively by two clauses: \<open>(i)\<close> the empty string being in
+ the star of a language and \<open>(ii)\<close> if @{term "s\<^sub>1"} is in a
language and @{term "s\<^sub>2"} in the star of this language, then also @{term
"s\<^sub>1 @ s\<^sub>2"} is in the star of this language. It will also be convenient
to use the following notion of a \emph{semantic derivative} (or \emph{left
@@ -459,11 +456,11 @@
\cite{Sulzmann2014} is to append another phase to this algorithm in order
to calculate a [lexical] value. We will explain the details next.
-*}
+\<close>
-section {* POSIX Regular Expression Matching\label{posixsec} *}
+section \<open>POSIX Regular Expression Matching\label{posixsec}\<close>
-text {*
+text \<open>
There have been many previous works that use values for encoding
\emph{how} a regular expression matches a string.
@@ -473,7 +470,7 @@
are defined as the inductive datatype
\begin{center}
- @{text "v :="}
+ \<open>v :=\<close>
@{const "Void"} $\mid$
@{term "val.Char c"} $\mid$
@{term "Left v"} $\mid$
@@ -532,8 +529,8 @@
\end{center}
\noindent where in the clause for @{const "Stars"} we use the
- notation @{term "v \<in> set vs"} for indicating that @{text v} is a
- member in the list @{text vs}. We require in this rule that every
+ notation @{term "v \<in> set vs"} for indicating that \<open>v\<close> is a
+ member in the list \<open>vs\<close>. We require in this rule that every
value in @{term vs} flattens to a non-empty string. The idea is that
@{term "Stars"}-values satisfy the informal Star Rule (see Introduction)
where the $^\star$ does not match the empty string unless this is
@@ -549,9 +546,9 @@
\end{proposition}
\noindent
- Given a regular expression @{text r} and a string @{text s}, we define the
- set of all \emph{Lexical Values} inhabited by @{text r} with the underlying string
- being @{text s}:\footnote{Okui and Suzuki refer to our lexical values
+ Given a regular expression \<open>r\<close> and a string \<open>s\<close>, we define the
+ set of all \emph{Lexical Values} inhabited by \<open>r\<close> with the underlying string
+ being \<open>s\<close>:\footnote{Okui and Suzuki refer to our lexical values
as \emph{canonical values} in \cite{OkuiSuzuki2010}. The notion of \emph{non-problematic
values} by Cardelli and Frisch \cite{Frisch2004} is related, but not identical
to our lexical values.}
@@ -573,7 +570,7 @@
infinitely many values, but according to our more restricted
definition only a single value, namely @{thm LV_STAR_ONE_empty}.
- If a regular expression @{text r} matches a string @{text s}, then
+ If a regular expression \<open>r\<close> matches a string \<open>s\<close>, then
generally the set @{term "LV r s"} is not just a singleton set. In
case of POSIX matching the problem is to calculate the unique lexical value
that satisfies the (informal) POSIX rules from the Introduction.
@@ -582,9 +579,9 @@
path from the left to the right involving @{term
derivatives}/@{const nullable} is the first phase of the algorithm
(calculating successive \Brz's derivatives) and @{const
- mkeps}/@{text inj}, the path from right to left, the second
+ mkeps}/\<open>inj\<close>, the path from right to left, the second
phase. This picture shows the steps required when a regular
- expression, say @{text "r\<^sub>1"}, matches the string @{term
+ expression, say \<open>r\<^sub>1\<close>, matches the string @{term
"[a,b,c]"}. We first build the three derivatives (according to
@{term a}, @{term b} and @{term c}). We then use @{const nullable}
to find out whether the resulting derivative regular expression
@@ -609,11 +606,11 @@
\node (v4) [below=of r4]{@{term "v\<^sub>4"}};
\draw[->,line width=1mm](r4) -- (v4);
\node (v3) [left=of v4] {@{term "v\<^sub>3"}};
-\draw[->,line width=1mm](v4)--(v3) node[below,midway] {@{text "inj r\<^sub>3 c"}};
+\draw[->,line width=1mm](v4)--(v3) node[below,midway] {\<open>inj r\<^sub>3 c\<close>};
\node (v2) [left=of v3]{@{term "v\<^sub>2"}};
-\draw[->,line width=1mm](v3)--(v2) node[below,midway] {@{text "inj r\<^sub>2 b"}};
+\draw[->,line width=1mm](v3)--(v2) node[below,midway] {\<open>inj r\<^sub>2 b\<close>};
\node (v1) [left=of v2] {@{term "v\<^sub>1"}};
-\draw[->,line width=1mm](v2)--(v1) node[below,midway] {@{text "inj r\<^sub>1 a"}};
+\draw[->,line width=1mm](v2)--(v1) node[below,midway] {\<open>inj r\<^sub>1 a\<close>};
\draw (r4) node[anchor=north west] {\;\raisebox{-8mm}{@{term "mkeps"}}};
\end{tikzpicture}
\end{center}
@@ -647,8 +644,7 @@
makes some subtle choices leading to a POSIX value: for example if an
alternative regular expression, say @{term "ALT r\<^sub>1 r\<^sub>2"}, can
match the empty string and furthermore @{term "r\<^sub>1"} can match the
- empty string, then we return a @{text Left}-value. The @{text
- Right}-value will only be returned if @{term "r\<^sub>1"} cannot match the empty
+ empty string, then we return a \<open>Left\<close>-value. The \<open>Right\<close>-value will only be returned if @{term "r\<^sub>1"} cannot match the empty
string.
The most interesting idea from Sulzmann and Lu \cite{Sulzmann2014} is
@@ -690,25 +686,25 @@
might be instructive to look first at the three sequence cases (clauses
\textit{(4)} -- \textit{(6)}). In each case we need to construct an ``injected value'' for
@{term "SEQ r\<^sub>1 r\<^sub>2"}. This must be a value of the form @{term
- "Seq DUMMY DUMMY"}\,. Recall the clause of the @{text derivative}-function
+ "Seq DUMMY DUMMY"}\,. Recall the clause of the \<open>derivative\<close>-function
for sequence regular expressions:
\begin{center}
@{thm (lhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]} $\dn$ @{thm (rhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]}
\end{center}
- \noindent Consider first the @{text "else"}-branch where the derivative is @{term
+ \noindent Consider first the \<open>else\<close>-branch where the derivative is @{term
"SEQ (der c r\<^sub>1) r\<^sub>2"}. The corresponding value must therefore
be of the form @{term "Seq v\<^sub>1 v\<^sub>2"}, which matches the left-hand
- side in clause~\textit{(4)} of @{term inj}. In the @{text "if"}-branch the derivative is an
+ side in clause~\textit{(4)} of @{term inj}. In the \<open>if\<close>-branch the derivative is an
alternative, namely @{term "ALT (SEQ (der c r\<^sub>1) r\<^sub>2) (der c
- r\<^sub>2)"}. This means we either have to consider a @{text Left}- or
- @{text Right}-value. In case of the @{text Left}-value we know further it
+ r\<^sub>2)"}. This means we either have to consider a \<open>Left\<close>- or
+ \<open>Right\<close>-value. In case of the \<open>Left\<close>-value we know further it
must be a value for a sequence regular expression. Therefore the pattern
we match in the clause \textit{(5)} is @{term "Left (Seq v\<^sub>1 v\<^sub>2)"},
while in \textit{(6)} it is just @{term "Right v\<^sub>2"}. One more interesting
point is in the right-hand side of clause \textit{(6)}: since in this case the
- regular expression @{text "r\<^sub>1"} does not ``contribute'' to
+ regular expression \<open>r\<^sub>1\<close> does not ``contribute'' to
matching the string, that means it only matches the empty string, we need to
call @{const mkeps} in order to construct a value for how @{term "r\<^sub>1"}
can match this empty string. A similar argument applies for why we can
@@ -728,7 +724,7 @@
value has a prepended character @{term c}; the second part shows that
the underlying string of an @{const mkeps}-value is always the empty
string (given the regular expression is nullable since otherwise
- @{text mkeps} might not be defined).
+ \<open>mkeps\<close> might not be defined).
\begin{lemma}\mbox{}\smallskip\\\label{Prf_injval_flat}
\begin{tabular}{ll}
@@ -743,16 +739,16 @@
an induction on @{term r}. There are no interesting cases.\qed
\end{proof}
- Having defined the @{const mkeps} and @{text inj} function we can extend
+ Having defined the @{const mkeps} and \<open>inj\<close> function we can extend
\Brz's matcher so that a value is constructed (assuming the
regular expression matches the string). The clauses of the Sulzmann and Lu lexer are
\begin{center}
\begin{tabular}{lcl}
@{thm (lhs) lexer.simps(1)} & $\dn$ & @{thm (rhs) lexer.simps(1)}\\
- @{thm (lhs) lexer.simps(2)} & $\dn$ & @{text "case"} @{term "lexer (der c r) s"} @{text of}\\
- & & \phantom{$|$} @{term "None"} @{text "\<Rightarrow>"} @{term None}\\
- & & $|$ @{term "Some v"} @{text "\<Rightarrow>"} @{term "Some (injval r c v)"}
+ @{thm (lhs) lexer.simps(2)} & $\dn$ & \<open>case\<close> @{term "lexer (der c r) s"} \<open>of\<close>\\
+ & & \phantom{$|$} @{term "None"} \<open>\<Rightarrow>\<close> @{term None}\\
+ & & $|$ @{term "Some v"} \<open>\<Rightarrow>\<close> @{term "Some (injval r c v)"}
\end{tabular}
\end{center}
@@ -784,24 +780,24 @@
\begin{figure}[t]
\begin{center}
\begin{tabular}{c}
- @{thm[mode=Axiom] Posix.intros(1)}@{text "P"}@{term "ONE"} \qquad
- @{thm[mode=Axiom] Posix.intros(2)}@{text "P"}@{term "c"}\medskip\\
- @{thm[mode=Rule] Posix.intros(3)[of "s" "r\<^sub>1" "v" "r\<^sub>2"]}@{text "P+L"}\qquad
- @{thm[mode=Rule] Posix.intros(4)[of "s" "r\<^sub>2" "v" "r\<^sub>1"]}@{text "P+R"}\medskip\\
+ @{thm[mode=Axiom] Posix.intros(1)}\<open>P\<close>@{term "ONE"} \qquad
+ @{thm[mode=Axiom] Posix.intros(2)}\<open>P\<close>@{term "c"}\medskip\\
+ @{thm[mode=Rule] Posix.intros(3)[of "s" "r\<^sub>1" "v" "r\<^sub>2"]}\<open>P+L\<close>\qquad
+ @{thm[mode=Rule] Posix.intros(4)[of "s" "r\<^sub>2" "v" "r\<^sub>1"]}\<open>P+R\<close>\medskip\\
$\mprset{flushleft}
\inferrule
{@{thm (prem 1) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]} \qquad
@{thm (prem 2) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]} \\\\
@{thm (prem 3) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]}}
- {@{thm (concl) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]}}$@{text "PS"}\\
- @{thm[mode=Axiom] Posix.intros(7)}@{text "P[]"}\medskip\\
+ {@{thm (concl) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]}}$\<open>PS\<close>\\
+ @{thm[mode=Axiom] Posix.intros(7)}\<open>P[]\<close>\medskip\\
$\mprset{flushleft}
\inferrule
{@{thm (prem 1) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]} \qquad
@{thm (prem 2) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]} \qquad
@{thm (prem 3) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]} \\\\
@{thm (prem 4) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]}}
- {@{thm (concl) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]}}$@{text "P\<star>"}
+ {@{thm (concl) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]}}$\<open>P\<star>\<close>
\end{tabular}
\end{center}
\caption{Our inductive definition of POSIX values.}\label{POSIXrules}
@@ -825,13 +821,12 @@
\noindent
We claim that our @{term "s \<in> r \<rightarrow> v"} relation captures the idea behind the four
informal POSIX rules shown in the Introduction: Consider for example the
- rules @{text "P+L"} and @{text "P+R"} where the POSIX value for a string
+ rules \<open>P+L\<close> and \<open>P+R\<close> where the POSIX value for a string
and an alternative regular expression, that is @{term "(s, ALT r\<^sub>1 r\<^sub>2)"},
- is specified---it is always a @{text "Left"}-value, \emph{except} when the
+ is specified---it is always a \<open>Left\<close>-value, \emph{except} when the
string to be matched is not in the language of @{term "r\<^sub>1"}; only then it
- is a @{text Right}-value (see the side-condition in @{text "P+R"}).
- Interesting is also the rule for sequence regular expressions (@{text
- "PS"}). The first two premises state that @{term "v\<^sub>1"} and @{term "v\<^sub>2"}
+ is a \<open>Right\<close>-value (see the side-condition in \<open>P+R\<close>).
+ Interesting is also the rule for sequence regular expressions (\<open>PS\<close>). The first two premises state that @{term "v\<^sub>1"} and @{term "v\<^sub>2"}
are the POSIX values for @{term "(s\<^sub>1, r\<^sub>1)"} and @{term "(s\<^sub>2, r\<^sub>2)"}
respectively. Consider now the third premise and note that the POSIX value
of this rule should match the string \mbox{@{term "s\<^sub>1 @ s\<^sub>2"}}. According to the
@@ -841,21 +836,20 @@
\emph{exist} an @{term "s\<^sub>3"} and @{term "s\<^sub>4"} such that @{term "s\<^sub>2"}
can be split up into a non-empty string @{term "s\<^sub>3"} and a possibly empty
string @{term "s\<^sub>4"}. Moreover the longer string @{term "s\<^sub>1 @ s\<^sub>3"} can be
- matched by @{text "r\<^sub>1"} and the shorter @{term "s\<^sub>4"} can still be
+ matched by \<open>r\<^sub>1\<close> and the shorter @{term "s\<^sub>4"} can still be
matched by @{term "r\<^sub>2"}. In this case @{term "s\<^sub>1"} would \emph{not} be the
longest initial split of \mbox{@{term "s\<^sub>1 @ s\<^sub>2"}} and therefore @{term "Seq v\<^sub>1
v\<^sub>2"} cannot be a POSIX value for @{term "(s\<^sub>1 @ s\<^sub>2, SEQ r\<^sub>1 r\<^sub>2)"}.
The main point is that our side-condition ensures the Longest
Match Rule is satisfied.
- A similar condition is imposed on the POSIX value in the @{text
- "P\<star>"}-rule. Also there we want that @{term "s\<^sub>1"} is the longest initial
+ A similar condition is imposed on the POSIX value in the \<open>P\<star>\<close>-rule. Also there we want that @{term "s\<^sub>1"} is the longest initial
split of @{term "s\<^sub>1 @ s\<^sub>2"} and furthermore the corresponding value
@{term v} cannot be flattened to the empty string. In effect, we require
that in each ``iteration'' of the star, some non-empty substring needs to
be ``chipped'' away; only in case of the empty string we accept @{term
"Stars []"} as the POSIX value. Indeed we can show that our POSIX values
- are lexical values which exclude those @{text Stars} that contain subvalues
+ are lexical values which exclude those \<open>Stars\<close> that contain subvalues
that flatten to the empty string.
\begin{lemma}\label{LVposix}
@@ -879,7 +873,7 @@
\end{proof}
\noindent
- The central lemma for our POSIX relation is that the @{text inj}-function
+ The central lemma for our POSIX relation is that the \<open>inj\<close>-function
preserves POSIX values.
\begin{lemma}\label{Posix2}
@@ -887,17 +881,17 @@
\end{lemma}
\begin{proof}
- By induction on @{text r}. We explain two cases.
+ By induction on \<open>r\<close>. We explain two cases.
\begin{itemize}
\item[$\bullet$] Case @{term "r = ALT r\<^sub>1 r\<^sub>2"}. There are
- two subcases, namely @{text "(a)"} \mbox{@{term "v = Left v'"}} and @{term
- "s \<in> der c r\<^sub>1 \<rightarrow> v'"}; and @{text "(b)"} @{term "v = Right v'"}, @{term
- "s \<notin> L (der c r\<^sub>1)"} and @{term "s \<in> der c r\<^sub>2 \<rightarrow> v'"}. In @{text "(a)"} we
+ two subcases, namely \<open>(a)\<close> \mbox{@{term "v = Left v'"}} and @{term
+ "s \<in> der c r\<^sub>1 \<rightarrow> v'"}; and \<open>(b)\<close> @{term "v = Right v'"}, @{term
+ "s \<notin> L (der c r\<^sub>1)"} and @{term "s \<in> der c r\<^sub>2 \<rightarrow> v'"}. In \<open>(a)\<close> we
know @{term "s \<in> der c r\<^sub>1 \<rightarrow> v'"}, from which we can infer @{term "(c # s)
\<in> r\<^sub>1 \<rightarrow> injval r\<^sub>1 c v'"} by induction hypothesis and hence @{term "(c #
s) \<in> ALT r\<^sub>1 r\<^sub>2 \<rightarrow> injval (ALT r\<^sub>1 r\<^sub>2) c (Left v')"} as needed. Similarly
- in subcase @{text "(b)"} where, however, in addition we have to use
+ in subcase \<open>(b)\<close> where, however, in addition we have to use
Proposition~\ref{derprop}(2) in order to infer @{term "c # s \<notin> L r\<^sub>1"} from @{term
"s \<notin> L (der c r\<^sub>1)"}.\smallskip
@@ -905,13 +899,13 @@
\begin{quote}
\begin{description}
- \item[@{text "(a)"}] @{term "v = Left (Seq v\<^sub>1 v\<^sub>2)"} and @{term "nullable r\<^sub>1"}
- \item[@{text "(b)"}] @{term "v = Right v\<^sub>1"} and @{term "nullable r\<^sub>1"}
- \item[@{text "(c)"}] @{term "v = Seq v\<^sub>1 v\<^sub>2"} and @{term "\<not> nullable r\<^sub>1"}
+ \item[\<open>(a)\<close>] @{term "v = Left (Seq v\<^sub>1 v\<^sub>2)"} and @{term "nullable r\<^sub>1"}
+ \item[\<open>(b)\<close>] @{term "v = Right v\<^sub>1"} and @{term "nullable r\<^sub>1"}
+ \item[\<open>(c)\<close>] @{term "v = Seq v\<^sub>1 v\<^sub>2"} and @{term "\<not> nullable r\<^sub>1"}
\end{description}
\end{quote}
- \noindent For @{text "(a)"} we know @{term "s\<^sub>1 \<in> der c r\<^sub>1 \<rightarrow> v\<^sub>1"} and
+ \noindent For \<open>(a)\<close> we know @{term "s\<^sub>1 \<in> der c r\<^sub>1 \<rightarrow> v\<^sub>1"} and
@{term "s\<^sub>2 \<in> r\<^sub>2 \<rightarrow> v\<^sub>2"} as well as
%
\[@{term "\<not> (\<exists>s\<^sub>3 s\<^sub>4. s\<^sub>3 \<noteq> [] \<and> s\<^sub>3 @ s\<^sub>4 = s\<^sub>2 \<and> s\<^sub>1 @ s\<^sub>3 \<in> L (der c r\<^sub>1) \<and> s\<^sub>4 \<in> L r\<^sub>2)"}\]
@@ -920,12 +914,12 @@
%
\[@{term "\<not> (\<exists>s\<^sub>3 s\<^sub>4. s\<^sub>3 \<noteq> [] \<and> s\<^sub>3 @ s\<^sub>4 = s\<^sub>2 \<and> (c # s\<^sub>1) @ s\<^sub>3 \<in> L r\<^sub>1 \<and> s\<^sub>4 \<in> L r\<^sub>2)"}\]
- \noindent We can use the induction hypothesis for @{text "r\<^sub>1"} to obtain
+ \noindent We can use the induction hypothesis for \<open>r\<^sub>1\<close> to obtain
@{term "(c # s\<^sub>1) \<in> r\<^sub>1 \<rightarrow> injval r\<^sub>1 c v\<^sub>1"}. Putting this all together allows us to infer
- @{term "((c # s\<^sub>1) @ s\<^sub>2) \<in> SEQ r\<^sub>1 r\<^sub>2 \<rightarrow> Seq (injval r\<^sub>1 c v\<^sub>1) v\<^sub>2"}. The case @{text "(c)"}
+ @{term "((c # s\<^sub>1) @ s\<^sub>2) \<in> SEQ r\<^sub>1 r\<^sub>2 \<rightarrow> Seq (injval r\<^sub>1 c v\<^sub>1) v\<^sub>2"}. The case \<open>(c)\<close>
is similar.
- For @{text "(b)"} we know @{term "s \<in> der c r\<^sub>2 \<rightarrow> v\<^sub>1"} and
+ For \<open>(b)\<close> we know @{term "s \<in> der c r\<^sub>2 \<rightarrow> v\<^sub>1"} and
@{term "s\<^sub>1 @ s\<^sub>2 \<notin> L (SEQ (der c r\<^sub>1) r\<^sub>2)"}. From the former
we have @{term "(c # s) \<in> r\<^sub>2 \<rightarrow> (injval r\<^sub>2 c v\<^sub>1)"} by induction hypothesis
for @{term "r\<^sub>2"}. From the latter we can infer
@@ -979,11 +973,11 @@
In the next section we show that our specification coincides with
another one given by Okui and Suzuki using a different technique.
-*}
+\<close>
-section {* Ordering of Values according to Okui and Suzuki*}
+section \<open>Ordering of Values according to Okui and Suzuki\<close>
-text {*
+text \<open>
While in the previous section we have defined POSIX values directly
in terms of a ternary relation (see inference rules in Figure~\ref{POSIXrules}),
@@ -1014,54 +1008,51 @@
\end{center}
\noindent
- At position @{text "[0,1]"} of this value is the
- subvalue @{text "Char y"} and at position @{text "[1]"} the
+ At position \<open>[0,1]\<close> of this value is the
+ subvalue \<open>Char y\<close> and at position \<open>[1]\<close> the
subvalue @{term "Char z"}. At the `root' position, or empty list
- @{term "[]"}, is the whole value @{term v}. Positions such as @{text
- "[0,1,0]"} or @{text "[2]"} are outside of @{text
- v}. If it exists, the subvalue of @{term v} at a position @{text
- p}, written @{term "at v p"}, can be recursively defined by
+ @{term "[]"}, is the whole value @{term v}. Positions such as \<open>[0,1,0]\<close> or \<open>[2]\<close> are outside of \<open>v\<close>. If it exists, the subvalue of @{term v} at a position \<open>p\<close>, written @{term "at v p"}, can be recursively defined by
\begin{center}
\begin{tabular}{r@ {\hspace{0mm}}lcl}
- @{term v} & @{text "\<downharpoonleft>\<^bsub>[]\<^esub>"} & @{text "\<equiv>"}& @{thm (rhs) at.simps(1)}\\
- @{term "Left v"} & @{text "\<downharpoonleft>\<^bsub>0::ps\<^esub>"} & @{text "\<equiv>"}& @{thm (rhs) at.simps(2)}\\
- @{term "Right v"} & @{text "\<downharpoonleft>\<^bsub>1::ps\<^esub>"} & @{text "\<equiv>"} &
+ @{term v} & \<open>\<downharpoonleft>\<^bsub>[]\<^esub>\<close> & \<open>\<equiv>\<close>& @{thm (rhs) at.simps(1)}\\
+ @{term "Left v"} & \<open>\<downharpoonleft>\<^bsub>0::ps\<^esub>\<close> & \<open>\<equiv>\<close>& @{thm (rhs) at.simps(2)}\\
+ @{term "Right v"} & \<open>\<downharpoonleft>\<^bsub>1::ps\<^esub>\<close> & \<open>\<equiv>\<close> &
@{thm (rhs) at.simps(3)[simplified Suc_0_fold]}\\
- @{term "Seq v\<^sub>1 v\<^sub>2"} & @{text "\<downharpoonleft>\<^bsub>0::ps\<^esub>"} & @{text "\<equiv>"} &
+ @{term "Seq v\<^sub>1 v\<^sub>2"} & \<open>\<downharpoonleft>\<^bsub>0::ps\<^esub>\<close> & \<open>\<equiv>\<close> &
@{thm (rhs) at.simps(4)[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} \\
- @{term "Seq v\<^sub>1 v\<^sub>2"} & @{text "\<downharpoonleft>\<^bsub>1::ps\<^esub>"}
- & @{text "\<equiv>"} &
+ @{term "Seq v\<^sub>1 v\<^sub>2"} & \<open>\<downharpoonleft>\<^bsub>1::ps\<^esub>\<close>
+ & \<open>\<equiv>\<close> &
@{thm (rhs) at.simps(5)[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2", simplified Suc_0_fold]} \\
- @{term "Stars vs"} & @{text "\<downharpoonleft>\<^bsub>n::ps\<^esub>"} & @{text "\<equiv>"}& @{thm (rhs) at.simps(6)}\\
+ @{term "Stars vs"} & \<open>\<downharpoonleft>\<^bsub>n::ps\<^esub>\<close> & \<open>\<equiv>\<close>& @{thm (rhs) at.simps(6)}\\
\end{tabular}
\end{center}
\noindent In the last clause we use Isabelle's notation @{term "vs ! n"} for the
- @{text n}th element in a list. The set of positions inside a value @{text v},
+ \<open>n\<close>th element in a list. The set of positions inside a value \<open>v\<close>,
written @{term "Pos v"}, is given by
\begin{center}
\begin{tabular}{lcl}
- @{thm (lhs) Pos.simps(1)} & @{text "\<equiv>"} & @{thm (rhs) Pos.simps(1)}\\
- @{thm (lhs) Pos.simps(2)} & @{text "\<equiv>"} & @{thm (rhs) Pos.simps(2)}\\
- @{thm (lhs) Pos.simps(3)} & @{text "\<equiv>"} & @{thm (rhs) Pos.simps(3)}\\
- @{thm (lhs) Pos.simps(4)} & @{text "\<equiv>"} & @{thm (rhs) Pos.simps(4)}\\
+ @{thm (lhs) Pos.simps(1)} & \<open>\<equiv>\<close> & @{thm (rhs) Pos.simps(1)}\\
+ @{thm (lhs) Pos.simps(2)} & \<open>\<equiv>\<close> & @{thm (rhs) Pos.simps(2)}\\
+ @{thm (lhs) Pos.simps(3)} & \<open>\<equiv>\<close> & @{thm (rhs) Pos.simps(3)}\\
+ @{thm (lhs) Pos.simps(4)} & \<open>\<equiv>\<close> & @{thm (rhs) Pos.simps(4)}\\
@{thm (lhs) Pos.simps(5)[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}
- & @{text "\<equiv>"}
+ & \<open>\<equiv>\<close>
& @{thm (rhs) Pos.simps(5)[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\
- @{thm (lhs) Pos_stars} & @{text "\<equiv>"} & @{thm (rhs) Pos_stars}\\
+ @{thm (lhs) Pos_stars} & \<open>\<equiv>\<close> & @{thm (rhs) Pos_stars}\\
\end{tabular}
\end{center}
\noindent
- whereby @{text len} in the last clause stands for the length of a list. Clearly
+ whereby \<open>len\<close> in the last clause stands for the length of a list. Clearly
for every position inside a value there exists a subvalue at that position.
To help understanding the ordering of Okui and Suzuki, consider again
the earlier value
- @{text v} and compare it with the following @{text w}:
+ \<open>v\<close> and compare it with the following \<open>w\<close>:
\begin{center}
\begin{tabular}{l}
@@ -1070,16 +1061,16 @@
\end{tabular}
\end{center}
- \noindent Both values match the string @{text "xyz"}, that means if
+ \noindent Both values match the string \<open>xyz\<close>, that means if
we flatten these values at their respective root position, we obtain
- @{text "xyz"}. However, at position @{text "[0]"}, @{text v} matches
- @{text xy} whereas @{text w} matches only the shorter @{text x}. So
- according to the Longest Match Rule, we should prefer @{text v},
- rather than @{text w} as POSIX value for string @{text xyz} (and
+ \<open>xyz\<close>. However, at position \<open>[0]\<close>, \<open>v\<close> matches
+ \<open>xy\<close> whereas \<open>w\<close> matches only the shorter \<open>x\<close>. So
+ according to the Longest Match Rule, we should prefer \<open>v\<close>,
+ rather than \<open>w\<close> as POSIX value for string \<open>xyz\<close> (and
corresponding regular expression). In order to
formalise this idea, Okui and Suzuki introduce a measure for
- subvalues at position @{text p}, called the \emph{norm} of @{text v}
- at position @{text p}. We can define this measure in Isabelle as an
+ subvalues at position \<open>p\<close>, called the \emph{norm} of \<open>v\<close>
+ at position \<open>p\<close>. We can define this measure in Isabelle as an
integer as follows
\begin{center}
@@ -1087,10 +1078,10 @@
\end{center}
\noindent where we take the length of the flattened value at
- position @{text p}, provided the position is inside @{text v}; if
- not, then the norm is @{text "-1"}. The default for outside
+ position \<open>p\<close>, provided the position is inside \<open>v\<close>; if
+ not, then the norm is \<open>-1\<close>. The default for outside
positions is crucial for the POSIX requirement of preferring a
- @{text Left}-value over a @{text Right}-value (if they can match the
+ \<open>Left\<close>-value over a \<open>Right\<close>-value (if they can match the
same string---see the Priority Rule from the Introduction). For this
consider
@@ -1098,17 +1089,17 @@
@{term "v == Left (Char x)"} \qquad and \qquad @{term "w == Right (Char x)"}
\end{center}
- \noindent Both values match @{text x}. At position @{text "[0]"}
- the norm of @{term v} is @{text 1} (the subvalue matches @{text x}),
- but the norm of @{text w} is @{text "-1"} (the position is outside
- @{text w} according to how we defined the `inside' positions of
- @{text Left}- and @{text Right}-values). Of course at position
- @{text "[1]"}, the norms @{term "pflat_len v [1]"} and @{term
+ \noindent Both values match \<open>x\<close>. At position \<open>[0]\<close>
+ the norm of @{term v} is \<open>1\<close> (the subvalue matches \<open>x\<close>),
+ but the norm of \<open>w\<close> is \<open>-1\<close> (the position is outside
+ \<open>w\<close> according to how we defined the `inside' positions of
+ \<open>Left\<close>- and \<open>Right\<close>-values). Of course at position
+ \<open>[1]\<close>, the norms @{term "pflat_len v [1]"} and @{term
"pflat_len w [1]"} are reversed, but the point is that subvalues
will be analysed according to lexicographically ordered
- positions. According to this ordering, the position @{text "[0]"}
- takes precedence over @{text "[1]"} and thus also @{text v} will be
- preferred over @{text w}. The lexicographic ordering of positions, written
+ positions. According to this ordering, the position \<open>[0]\<close>
+ takes precedence over \<open>[1]\<close> and thus also \<open>v\<close> will be
+ preferred over \<open>w\<close>. The lexicographic ordering of positions, written
@{term "DUMMY \<sqsubset>lex DUMMY"}, can be conveniently formalised
by three inference rules
@@ -1123,18 +1114,18 @@
With the norm and lexicographic order in place,
we can state the key definition of Okui and Suzuki
- \cite{OkuiSuzuki2010}: a value @{term "v\<^sub>1"} is \emph{smaller at position @{text p}} than
+ \cite{OkuiSuzuki2010}: a value @{term "v\<^sub>1"} is \emph{smaller at position \<open>p\<close>} than
@{term "v\<^sub>2"}, written @{term "v\<^sub>1 \<sqsubset>val p v\<^sub>2"},
- if and only if $(i)$ the norm at position @{text p} is
+ if and only if $(i)$ the norm at position \<open>p\<close> is
greater in @{term "v\<^sub>1"} (that is the string @{term "flat (at v\<^sub>1 p)"} is longer
than @{term "flat (at v\<^sub>2 p)"}) and $(ii)$ all subvalues at
positions that are inside @{term "v\<^sub>1"} or @{term "v\<^sub>2"} and that are
- lexicographically smaller than @{text p}, we have the same norm, namely
+ lexicographically smaller than \<open>p\<close>, we have the same norm, namely
\begin{center}
\begin{tabular}{c}
@{thm (lhs) PosOrd_def[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}
- @{text "\<equiv>"}
+ \<open>\<equiv>\<close>
$\begin{cases}
(i) & @{term "pflat_len v\<^sub>1 p > pflat_len v\<^sub>2 p"} \quad\text{and}\smallskip \\
(ii) & @{term "(\<forall>q \<in> Pos v\<^sub>1 \<union> Pos v\<^sub>2. q \<sqsubset>lex p --> pflat_len v\<^sub>1 q = pflat_len v\<^sub>2 q)"}
@@ -1142,11 +1133,9 @@
\end{tabular}
\end{center}
- \noindent The position @{text p} in this definition acts as the
- \emph{first distinct position} of @{text "v\<^sub>1"} and @{text
- "v\<^sub>2"}, where both values match strings of different length
- \cite{OkuiSuzuki2010}. Since at @{text p} the values @{text
- "v\<^sub>1"} and @{text "v\<^sub>2"} match different strings, the
+ \noindent The position \<open>p\<close> in this definition acts as the
+ \emph{first distinct position} of \<open>v\<^sub>1\<close> and \<open>v\<^sub>2\<close>, where both values match strings of different length
+ \cite{OkuiSuzuki2010}. Since at \<open>p\<close> the values \<open>v\<^sub>1\<close> and \<open>v\<^sub>2\<close> match different strings, the
ordering is irreflexive. Derived from the definition above
are the following two orderings:
@@ -1168,11 +1157,8 @@
@{thm [mode=IfThen] PosOrd_trans[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and ?v3.0="v\<^sub>3"]}
\end{lemma}
- \begin{proof} From the assumption we obtain two positions @{text p}
- and @{text q}, where the values @{text "v\<^sub>1"} and @{text
- "v\<^sub>2"} (respectively @{text "v\<^sub>2"} and @{text
- "v\<^sub>3"}) are `distinct'. Since @{text
- "\<prec>\<^bsub>lex\<^esub>"} is trichotomous, we need to consider
+ \begin{proof} From the assumption we obtain two positions \<open>p\<close>
+ and \<open>q\<close>, where the values \<open>v\<^sub>1\<close> and \<open>v\<^sub>2\<close> (respectively \<open>v\<^sub>2\<close> and \<open>v\<^sub>3\<close>) are `distinct'. Since \<open>\<prec>\<^bsub>lex\<^esub>\<close> is trichotomous, we need to consider
three cases, namely @{term "p = q"}, @{term "p \<sqsubset>lex q"} and
@{term "q \<sqsubset>lex p"}. Let us look at the first case. Clearly
@{term "pflat_len v\<^sub>2 p < pflat_len v\<^sub>1 p"} and @{term
@@ -1184,7 +1170,7 @@
v\<^sub>1"}, then we can infer from the first assumption that @{term
"pflat_len v\<^sub>1 p' = pflat_len v\<^sub>2 p'"}. But this means
that @{term "p'"} must be in @{term "Pos v\<^sub>2"} too (the norm
- cannot be @{text "-1"} given @{term "p' \<in> Pos v\<^sub>1"}).
+ cannot be \<open>-1\<close> given @{term "p' \<in> Pos v\<^sub>1"}).
Hence we can use the second assumption and
infer @{term "pflat_len v\<^sub>2 p' = pflat_len v\<^sub>3 p'"},
which concludes this case with @{term "v\<^sub>1 :\<sqsubset>val
@@ -1193,15 +1179,15 @@
\noindent
The proof for $\preccurlyeq$ is similar and omitted.
- It is also straightforward to show that @{text "\<prec>"} and
+ It is also straightforward to show that \<open>\<prec>\<close> and
$\preccurlyeq$ are partial orders. Okui and Suzuki furthermore show that they
are linear orderings for lexical values \cite{OkuiSuzuki2010} of a given
regular expression and given string, but we have not formalised this in Isabelle. It is
not essential for our results. What we are going to show below is
- that for a given @{text r} and @{text s}, the orderings have a unique
+ that for a given \<open>r\<close> and \<open>s\<close>, the orderings have a unique
minimal element on the set @{term "LV r s"}, which is the POSIX value
we defined in the previous section. We start with two properties that
- show how the length of a flattened value relates to the @{text "\<prec>"}-ordering.
+ show how the length of a flattened value relates to the \<open>\<prec>\<close>-ordering.
\begin{proposition}\mbox{}\smallskip\\\label{ordlen}
\begin{tabular}{@ {}ll}
@@ -1259,8 +1245,7 @@
\noindent One might prefer that statements \textit{(4)} and \textit{(5)}
(respectively \textit{(6)} and \textit{(7)})
- are combined into a single \textit{iff}-statement (like the ones for @{text
- Left} and @{text Right}). Unfortunately this cannot be done easily: such
+ are combined into a single \textit{iff}-statement (like the ones for \<open>Left\<close> and \<open>Right\<close>). Unfortunately this cannot be done easily: such
a single statement would require an additional assumption about the
two values @{term "Seq v\<^sub>1 v\<^sub>2"} and @{term "Seq w\<^sub>1 w\<^sub>2"}
being inhabited by the same regular expression. The
@@ -1271,10 +1256,8 @@
Next we establish how Okui and Suzuki's orderings relate to our
- definition of POSIX values. Given a @{text POSIX} value @{text "v\<^sub>1"}
- for @{text r} and @{text s}, then any other lexical value @{text
- "v\<^sub>2"} in @{term "LV r s"} is greater or equal than @{text
- "v\<^sub>1"}, namely:
+ definition of POSIX values. Given a \<open>POSIX\<close> value \<open>v\<^sub>1\<close>
+ for \<open>r\<close> and \<open>s\<close>, then any other lexical value \<open>v\<^sub>2\<close> in @{term "LV r s"} is greater or equal than \<open>v\<^sub>1\<close>, namely:
\begin{theorem}\label{orderone}
@@ -1283,53 +1266,53 @@
\begin{proof} By induction on our POSIX rules. By
Theorem~\ref{posixdeterm} and the definition of @{const LV}, it is clear
- that @{text "v\<^sub>1"} and @{text "v\<^sub>2"} have the same
+ that \<open>v\<^sub>1\<close> and \<open>v\<^sub>2\<close> have the same
underlying string @{term s}. The three base cases are
straightforward: for example for @{term "v\<^sub>1 = Void"}, we have
that @{term "v\<^sub>2 \<in> LV ONE []"} must also be of the form
\mbox{@{term "v\<^sub>2 = Void"}}. Therefore we have @{term
"v\<^sub>1 :\<sqsubseteq>val v\<^sub>2"}. The inductive cases for
- @{text r} being of the form @{term "ALT r\<^sub>1 r\<^sub>2"} and
+ \<open>r\<close> being of the form @{term "ALT r\<^sub>1 r\<^sub>2"} and
@{term "SEQ r\<^sub>1 r\<^sub>2"} are as follows:
\begin{itemize}
- \item[$\bullet$] Case @{text "P+L"} with @{term "s \<in> (ALT r\<^sub>1 r\<^sub>2)
+ \item[$\bullet$] Case \<open>P+L\<close> with @{term "s \<in> (ALT r\<^sub>1 r\<^sub>2)
\<rightarrow> (Left w\<^sub>1)"}: In this case the value
@{term "v\<^sub>2"} is either of the
form @{term "Left w\<^sub>2"} or @{term "Right w\<^sub>2"}. In the
latter case we can immediately conclude with \mbox{@{term "v\<^sub>1
- :\<sqsubseteq>val v\<^sub>2"}} since a @{text Left}-value with the
- same underlying string @{text s} is always smaller than a
- @{text Right}-value by Proposition~\ref{ordintros}\textit{(1)}.
+ :\<sqsubseteq>val v\<^sub>2"}} since a \<open>Left\<close>-value with the
+ same underlying string \<open>s\<close> is always smaller than a
+ \<open>Right\<close>-value by Proposition~\ref{ordintros}\textit{(1)}.
In the former case we have @{term "w\<^sub>2
\<in> LV r\<^sub>1 s"} and can use the induction hypothesis to infer
@{term "w\<^sub>1 :\<sqsubseteq>val w\<^sub>2"}. Because @{term
"w\<^sub>1"} and @{term "w\<^sub>2"} have the same underlying string
- @{text s}, we can conclude with @{term "Left w\<^sub>1
+ \<open>s\<close>, we can conclude with @{term "Left w\<^sub>1
:\<sqsubseteq>val Left w\<^sub>2"} using
Proposition~\ref{ordintros}\textit{(2)}.\smallskip
- \item[$\bullet$] Case @{text "P+R"} with @{term "s \<in> (ALT r\<^sub>1 r\<^sub>2)
+ \item[$\bullet$] Case \<open>P+R\<close> with @{term "s \<in> (ALT r\<^sub>1 r\<^sub>2)
\<rightarrow> (Right w\<^sub>1)"}: This case similar to the previous
case, except that we additionally know @{term "s \<notin> L
r\<^sub>1"}. This is needed when @{term "v\<^sub>2"} is of the form
\mbox{@{term "Left w\<^sub>2"}}. Since \mbox{@{term "flat v\<^sub>2 = flat
- w\<^sub>2"} @{text "= s"}} and @{term "\<Turnstile> w\<^sub>2 :
+ w\<^sub>2"} \<open>= s\<close>} and @{term "\<Turnstile> w\<^sub>2 :
r\<^sub>1"}, we can derive a contradiction for \mbox{@{term "s \<notin> L
r\<^sub>1"}} using
Proposition~\ref{inhabs}. So also in this case \mbox{@{term "v\<^sub>1
:\<sqsubseteq>val v\<^sub>2"}}.\smallskip
- \item[$\bullet$] Case @{text "PS"} with @{term "(s\<^sub>1 @
+ \item[$\bullet$] Case \<open>PS\<close> with @{term "(s\<^sub>1 @
s\<^sub>2) \<in> (SEQ r\<^sub>1 r\<^sub>2) \<rightarrow> (Seq
w\<^sub>1 w\<^sub>2)"}: We can assume @{term "v\<^sub>2 = Seq
(u\<^sub>1) (u\<^sub>2)"} with @{term "\<Turnstile> u\<^sub>1 :
r\<^sub>1"} and \mbox{@{term "\<Turnstile> u\<^sub>2 :
r\<^sub>2"}}. We have @{term "s\<^sub>1 @ s\<^sub>2 = (flat
u\<^sub>1) @ (flat u\<^sub>2)"}. By the side-condition of the
- @{text PS}-rule we know that either @{term "s\<^sub>1 = flat
+ \<open>PS\<close>-rule we know that either @{term "s\<^sub>1 = flat
u\<^sub>1"} or that @{term "flat u\<^sub>1"} is a strict prefix of
@{term "s\<^sub>1"}. In the latter case we can infer @{term
"w\<^sub>1 :\<sqsubset>val u\<^sub>1"} by
@@ -1348,19 +1331,18 @@
\end{itemize}
- \noindent The case for @{text "P\<star>"} is similar to the @{text PS}-case and omitted.\qed
+ \noindent The case for \<open>P\<star>\<close> is similar to the \<open>PS\<close>-case and omitted.\qed
\end{proof}
- \noindent This theorem shows that our @{text POSIX} value for a
- regular expression @{text r} and string @{term s} is in fact a
- minimal element of the values in @{text "LV r s"}. By
+ \noindent This theorem shows that our \<open>POSIX\<close> value for a
+ regular expression \<open>r\<close> and string @{term s} is in fact a
+ minimal element of the values in \<open>LV r s\<close>. By
Proposition~\ref{ordlen}\textit{(2)} we also know that any value in
- @{text "LV r s'"}, with @{term "s'"} being a strict prefix, cannot be
- smaller than @{text "v\<^sub>1"}. The next theorem shows the
+ \<open>LV r s'\<close>, with @{term "s'"} being a strict prefix, cannot be
+ smaller than \<open>v\<^sub>1\<close>. The next theorem shows the
opposite---namely any minimal element in @{term "LV r s"} must be a
- @{text POSIX} value. This can be established by induction on @{text
- r}, but the proof can be drastically simplified by using the fact
- from the previous section about the existence of a @{text POSIX} value
+ \<open>POSIX\<close> value. This can be established by induction on \<open>r\<close>, but the proof can be drastically simplified by using the fact
+ from the previous section about the existence of a \<open>POSIX\<close> value
whenever a string @{term "s \<in> L r"}.
@@ -1372,7 +1354,7 @@
If @{thm (prem 1) PosOrd_Posix[where ?v1.0="v\<^sub>1"]} then
@{term "s \<in> L r"} by Proposition~\ref{inhabs}. Hence by Theorem~\ref{lexercorrect}(2)
there exists a
- @{text POSIX} value @{term "v\<^sub>P"} with @{term "s \<in> r \<rightarrow> v\<^sub>P"}
+ \<open>POSIX\<close> value @{term "v\<^sub>P"} with @{term "s \<in> r \<rightarrow> v\<^sub>P"}
and by Lemma~\ref{LVposix} we also have \mbox{@{term "v\<^sub>P \<in> LV r s"}}.
By Theorem~\ref{orderone} we therefore have
@{term "v\<^sub>P :\<sqsubseteq>val v\<^sub>1"}. If @{term "v\<^sub>P = v\<^sub>1"} then
@@ -1393,17 +1375,17 @@
\noindent To sum up, we have shown that the (unique) minimal elements
- of the ordering by Okui and Suzuki are exactly the @{text POSIX}
+ of the ordering by Okui and Suzuki are exactly the \<open>POSIX\<close>
values we defined inductively in Section~\ref{posixsec}. This provides
- an independent confirmation that our ternary relation formalise the
+ an independent confirmation that our ternary relation formalises the
informal POSIX rules.
-*}
+\<close>
-section {* Bitcoded Lexing *}
+section \<open>Bitcoded Lexing\<close>
-text {*
+text \<open>
Incremental calculation of the value. To simplify the proof we first define the function
@{const flex} which calculates the ``iterated'' injection function. With this we can
@@ -1502,7 +1484,7 @@
@{thm [mode=IfThen] bder_retrieve}
-By induction on @{text r}
+By induction on \<open>r\<close>
\begin{theorem}[Main Lemma]\mbox{}\\
@{thm [mode=IfThen] MAIN_decode}
@@ -1518,11 +1500,11 @@
@{thm blexer_correctness}
\end{theorem}
-*}
+\<close>
-section {* Optimisations *}
+section \<open>Optimisations\<close>
-text {*
+text \<open>
Derivatives as calculated by \Brz's method are usually more complex
regular expressions than the initial one; the result is that the
@@ -1538,10 +1520,10 @@
\begin{equation}\label{Simpl}
\begin{array}{lcllcllcllcl}
- @{term "ALT ZERO r"} & @{text "\<Rightarrow>"} & @{term r} \hspace{8mm}%\\
- @{term "ALT r ZERO"} & @{text "\<Rightarrow>"} & @{term r} \hspace{8mm}%\\
- @{term "SEQ ONE r"} & @{text "\<Rightarrow>"} & @{term r} \hspace{8mm}%\\
- @{term "SEQ r ONE"} & @{text "\<Rightarrow>"} & @{term r}
+ @{term "ALT ZERO r"} & \<open>\<Rightarrow>\<close> & @{term r} \hspace{8mm}%\\
+ @{term "ALT r ZERO"} & \<open>\<Rightarrow>\<close> & @{term r} \hspace{8mm}%\\
+ @{term "SEQ ONE r"} & \<open>\<Rightarrow>\<close> & @{term r} \hspace{8mm}%\\
+ @{term "SEQ r ONE"} & \<open>\<Rightarrow>\<close> & @{term r}
\end{array}
\end{equation}
@@ -1558,15 +1540,15 @@
\begin{center}
\begin{tabular}{lcl}
- @{thm (lhs) F_RIGHT.simps(1)} & $\dn$ & @{text "Right (f v)"}\\
- @{thm (lhs) F_LEFT.simps(1)} & $\dn$ & @{text "Left (f v)"}\\
+ @{thm (lhs) F_RIGHT.simps(1)} & $\dn$ & \<open>Right (f v)\<close>\\
+ @{thm (lhs) F_LEFT.simps(1)} & $\dn$ & \<open>Left (f v)\<close>\\
- @{thm (lhs) F_ALT.simps(1)} & $\dn$ & @{text "Right (f\<^sub>2 v)"}\\
- @{thm (lhs) F_ALT.simps(2)} & $\dn$ & @{text "Left (f\<^sub>1 v)"}\\
+ @{thm (lhs) F_ALT.simps(1)} & $\dn$ & \<open>Right (f\<^sub>2 v)\<close>\\
+ @{thm (lhs) F_ALT.simps(2)} & $\dn$ & \<open>Left (f\<^sub>1 v)\<close>\\
- @{thm (lhs) F_SEQ1.simps(1)} & $\dn$ & @{text "Seq (f\<^sub>1 ()) (f\<^sub>2 v)"}\\
- @{thm (lhs) F_SEQ2.simps(1)} & $\dn$ & @{text "Seq (f\<^sub>1 v) (f\<^sub>2 ())"}\\
- @{thm (lhs) F_SEQ.simps(1)} & $\dn$ & @{text "Seq (f\<^sub>1 v\<^sub>1) (f\<^sub>2 v\<^sub>2)"}\medskip\\
+ @{thm (lhs) F_SEQ1.simps(1)} & $\dn$ & \<open>Seq (f\<^sub>1 ()) (f\<^sub>2 v)\<close>\\
+ @{thm (lhs) F_SEQ2.simps(1)} & $\dn$ & \<open>Seq (f\<^sub>1 v) (f\<^sub>2 ())\<close>\\
+ @{thm (lhs) F_SEQ.simps(1)} & $\dn$ & \<open>Seq (f\<^sub>1 v\<^sub>1) (f\<^sub>2 v\<^sub>2)\<close>\medskip\\
%\end{tabular}
%
%\begin{tabular}{lcl}
@@ -1580,7 +1562,7 @@
\end{center}
\noindent
- The functions @{text "simp\<^bsub>Alt\<^esub>"} and @{text "simp\<^bsub>Seq\<^esub>"} encode the simplification rules
+ The functions \<open>simp\<^bsub>Alt\<^esub>\<close> and \<open>simp\<^bsub>Seq\<^esub>\<close> encode the simplification rules
in \eqref{Simpl} and compose the rectification functions (simplifications can occur
deep inside the regular expression). The main simplification function is then
@@ -1602,17 +1584,17 @@
\begin{tabular}{lcl}
@{thm (lhs) slexer.simps(1)} & $\dn$ & @{thm (rhs) slexer.simps(1)}\\
@{thm (lhs) slexer.simps(2)} & $\dn$ &
- @{text "let (r\<^sub>s, f\<^sub>r) = simp (r "}$\backslash$@{text " c) in"}\\
- & & @{text "case"} @{term "slexer r\<^sub>s s"} @{text of}\\
- & & \phantom{$|$} @{term "None"} @{text "\<Rightarrow>"} @{term None}\\
- & & $|$ @{term "Some v"} @{text "\<Rightarrow>"} @{text "Some (inj r c (f\<^sub>r v))"}
+ \<open>let (r\<^sub>s, f\<^sub>r) = simp (r \<close>$\backslash$\<open> c) in\<close>\\
+ & & \<open>case\<close> @{term "slexer r\<^sub>s s"} \<open>of\<close>\\
+ & & \phantom{$|$} @{term "None"} \<open>\<Rightarrow>\<close> @{term None}\\
+ & & $|$ @{term "Some v"} \<open>\<Rightarrow>\<close> \<open>Some (inj r c (f\<^sub>r v))\<close>
\end{tabular}
\end{center}
\noindent
In the second clause we first calculate the derivative @{term "der c r"}
and then simplify the result. This gives us a simplified derivative
- @{text "r\<^sub>s"} and a rectification function @{text "f\<^sub>r"}. The lexer
+ \<open>r\<^sub>s\<close> and a rectification function \<open>f\<^sub>r\<close>. The lexer
is then recursively called with the simplified derivative, but before
we inject the character @{term c} into the value @{term v}, we need to rectify
@{term v} (that is construct @{term "f\<^sub>r v"}). Before we can establish the correctness
@@ -1627,7 +1609,7 @@
\end{tabular}
\end{lemma}
- \begin{proof} Both are by induction on @{text r}. There is no
+ \begin{proof} Both are by induction on \<open>r\<close>. There is no
interesting case for the first statement. For the second statement,
of interest are the @{term "r = ALT r\<^sub>1 r\<^sub>2"} and @{term "r = SEQ r\<^sub>1
r\<^sub>2"} cases. In each case we have to analyse four subcases whether
@@ -1639,7 +1621,7 @@
and by IH also (*) @{term "s \<in> r\<^sub>2 \<rightarrow> (snd (simp r\<^sub>2) v)"}. Given @{term "fst (simp r\<^sub>1) = ZERO"}
we know @{term "L (fst (simp r\<^sub>1)) = {}"}. By the first statement
@{term "L r\<^sub>1"} is the empty set, meaning (**) @{term "s \<notin> L r\<^sub>1"}.
- Taking (*) and (**) together gives by the \mbox{@{text "P+R"}}-rule
+ Taking (*) and (**) together gives by the \mbox{\<open>P+R\<close>}-rule
@{term "s \<in> ALT r\<^sub>1 r\<^sub>2 \<rightarrow> Right (snd (simp r\<^sub>2) v)"}. In turn this
gives @{term "s \<in> ALT r\<^sub>1 r\<^sub>2 \<rightarrow> snd (simp (ALT r\<^sub>1 r\<^sub>2)) v"} as we need to show.
The other cases are similar.\qed
@@ -1683,12 +1665,12 @@
\end{proof}
-*}
+\<close>
-section {* HERE *}
+section \<open>HERE\<close>
-text {*
+text \<open>
\begin{center}
\begin{tabular}{llcl}
1) & @{thm (lhs) erase.simps(1)} & $\dn$ & @{thm (rhs) erase.simps(1)}\\
@@ -1751,7 +1733,7 @@
We can move out the @{term "fuse [Z]"} and then use the IH to show that left-hand side
and right-hand side are equal. This completes the proof.
\end{proof}
-*}
+\<close>