367
+ − 1
(*<*)
+ − 2
theory Paper
+ − 3
imports
+ − 4
"../Lexer"
+ − 5
"../Simplifying"
+ − 6
"../Positions"
+ − 7
+ − 8
"../SizeBound"
+ − 9
"HOL-Library.LaTeXsugar"
+ − 10
begin
+ − 11
+ − 12
lemma Suc_0_fold:
+ − 13
"Suc 0 = 1"
+ − 14
by simp
+ − 15
+ − 16
+ − 17
+ − 18
declare [[show_question_marks = false]]
+ − 19
+ − 20
syntax (latex output)
+ − 21
"_Collect" :: "pttrn => bool => 'a set" ("(1{_ \<^latex>\<open>\\mbox{\\boldmath$\\mid$}\<close> _})")
+ − 22
"_CollectIn" :: "pttrn => 'a set => bool => 'a set" ("(1{_ \<in> _ |e _})")
+ − 23
+ − 24
syntax
+ − 25
"_Not_Ex" :: "idts \<Rightarrow> bool \<Rightarrow> bool" ("(3\<nexists>_.a./ _)" [0, 10] 10)
+ − 26
"_Not_Ex1" :: "pttrn \<Rightarrow> bool \<Rightarrow> bool" ("(3\<nexists>!_.a./ _)" [0, 10] 10)
+ − 27
+ − 28
+ − 29
abbreviation
+ − 30
"der_syn r c \<equiv> der c r"
+ − 31
+ − 32
abbreviation
+ − 33
"ders_syn r s \<equiv> ders s r"
+ − 34
+ − 35
abbreviation
+ − 36
"bder_syn r c \<equiv> bder c r"
+ − 37
+ − 38
abbreviation
+ − 39
"bders_syn r s \<equiv> bders r s"
+ − 40
+ − 41
+ − 42
abbreviation
+ − 43
"nprec v1 v2 \<equiv> \<not>(v1 :\<sqsubset>val v2)"
+ − 44
+ − 45
+ − 46
+ − 47
+ − 48
notation (latex output)
+ − 49
If ("(\<^latex>\<open>\\textrm{\<close>if\<^latex>\<open>}\<close> (_)/ \<^latex>\<open>\\textrm{\<close>then\<^latex>\<open>}\<close> (_)/ \<^latex>\<open>\\textrm{\<close>else\<^latex>\<open>}\<close> (_))" 10) and
+ − 50
Cons ("_\<^latex>\<open>\\mbox{$\\,$}\<close>::\<^latex>\<open>\\mbox{$\\,$}\<close>_" [75,73] 73) and
+ − 51
+ − 52
ZERO ("\<^bold>0" 81) and
384
+ − 53
ONE ("\<^bold>1" 81) and
367
+ − 54
CH ("_" [1000] 80) and
384
+ − 55
ALT ("_ + _" [77,77] 77) and
+ − 56
SEQ ("_ \<cdot> _" [78,78] 78) and
367
+ − 57
STAR ("_\<^sup>\<star>" [79] 78) and
+ − 58
+ − 59
val.Void ("Empty" 78) and
+ − 60
val.Char ("Char _" [1000] 78) and
+ − 61
val.Left ("Left _" [79] 78) and
+ − 62
val.Right ("Right _" [1000] 78) and
+ − 63
val.Seq ("Seq _ _" [79,79] 78) and
+ − 64
val.Stars ("Stars _" [79] 78) and
+ − 65
+ − 66
L ("L'(_')" [10] 78) and
+ − 67
LV ("LV _ _" [80,73] 78) and
+ − 68
der_syn ("_\\_" [79, 1000] 76) and
+ − 69
ders_syn ("_\\_" [79, 1000] 76) and
+ − 70
flat ("|_|" [75] 74) and
+ − 71
flats ("|_|" [72] 74) and
+ − 72
Sequ ("_ @ _" [78,77] 63) and
+ − 73
injval ("inj _ _ _" [79,77,79] 76) and
+ − 74
mkeps ("mkeps _" [79] 76) and
+ − 75
length ("len _" [73] 73) and
+ − 76
intlen ("len _" [73] 73) and
+ − 77
set ("_" [73] 73) and
+ − 78
+ − 79
Prf ("_ : _" [75,75] 75) and
+ − 80
Posix ("'(_, _') \<rightarrow> _" [63,75,75] 75) and
+ − 81
+ − 82
lexer ("lexer _ _" [78,78] 77) and
+ − 83
F_RIGHT ("F\<^bsub>Right\<^esub> _") and
+ − 84
F_LEFT ("F\<^bsub>Left\<^esub> _") and
+ − 85
F_ALT ("F\<^bsub>Alt\<^esub> _ _") and
+ − 86
F_SEQ1 ("F\<^bsub>Seq1\<^esub> _ _") and
+ − 87
F_SEQ2 ("F\<^bsub>Seq2\<^esub> _ _") and
+ − 88
F_SEQ ("F\<^bsub>Seq\<^esub> _ _") and
+ − 89
simp_SEQ ("simp\<^bsub>Seq\<^esub> _ _" [1000, 1000] 1) and
+ − 90
simp_ALT ("simp\<^bsub>Alt\<^esub> _ _" [1000, 1000] 1) and
+ − 91
slexer ("lexer\<^sup>+" 1000) and
+ − 92
+ − 93
at ("_\<^latex>\<open>\\mbox{$\\downharpoonleft$}\<close>\<^bsub>_\<^esub>") and
+ − 94
lex_list ("_ \<prec>\<^bsub>lex\<^esub> _") and
+ − 95
PosOrd ("_ \<prec>\<^bsub>_\<^esub> _" [77,77,77] 77) and
+ − 96
PosOrd_ex ("_ \<prec> _" [77,77] 77) and
+ − 97
PosOrd_ex_eq ("_ \<^latex>\<open>\\mbox{$\\preccurlyeq$}\<close> _" [77,77] 77) and
+ − 98
pflat_len ("\<parallel>_\<parallel>\<^bsub>_\<^esub>") and
+ − 99
nprec ("_ \<^latex>\<open>\\mbox{$\\not\\prec$}\<close> _" [77,77] 77) and
+ − 100
+ − 101
bder_syn ("_\<^latex>\<open>\\mbox{$\\bbslash$}\<close>_" [79, 1000] 76) and
+ − 102
bders_syn ("_\<^latex>\<open>\\mbox{$\\bbslash$}\<close>_" [79, 1000] 76) and
+ − 103
intern ("_\<^latex>\<open>\\mbox{$^\\uparrow$}\<close>" [900] 80) and
+ − 104
erase ("_\<^latex>\<open>\\mbox{$^\\downarrow$}\<close>" [1000] 74) and
+ − 105
bnullable ("nullable\<^latex>\<open>\\mbox{$_b$}\<close> _" [1000] 80) and
+ − 106
bmkeps ("mkeps\<^latex>\<open>\\mbox{$_b$}\<close> _" [1000] 80) and
+ − 107
blexer ("lexer\<^latex>\<open>\\mbox{$_b$}\<close> _ _" [77, 77] 80) and
+ − 108
code ("code _" [79] 74) and
+ − 109
+ − 110
DUMMY ("\<^latex>\<open>\\underline{\\hspace{2mm}}\<close>")
+ − 111
+ − 112
+ − 113
definition
+ − 114
"match r s \<equiv> nullable (ders s r)"
+ − 115
+ − 116
+ − 117
lemma LV_STAR_ONE_empty:
+ − 118
shows "LV (STAR ONE) [] = {Stars []}"
+ − 119
by(auto simp add: LV_def elim: Prf.cases intro: Prf.intros)
+ − 120
+ − 121
+ − 122
+ − 123
(*
+ − 124
comments not implemented
+ − 125
+ − 126
p9. The condition "not exists s3 s4..." appears often enough (in particular in
+ − 127
the proof of Lemma 3) to warrant a definition.
+ − 128
+ − 129
*)
+ − 130
+ − 131
+ − 132
(*>*)
+ − 133
371
+ − 134
section\<open>Core of the proof\<close>
367
+ − 135
text \<open>
371
+ − 136
This paper builds on previous work by Ausaf and Urban using
367
+ − 137
regular expression'd bit-coded derivatives to do lexing that
371
+ − 138
is both fast and satisfies the POSIX specification.
367
+ − 139
In their work, a bit-coded algorithm introduced by Sulzmann and Lu
+ − 140
was formally verified in Isabelle, by a very clever use of
+ − 141
flex function and retrieve to carefully mimic the way a value is
+ − 142
built up by the injection funciton.
+ − 143
+ − 144
In the previous work, Ausaf and Urban established the below equality:
+ − 145
\begin{lemma}
371
+ − 146
@{thm [mode=IfThen] MAIN_decode}
+ − 147
\end{lemma}
+ − 148
+ − 149
This lemma establishes a link with the lexer without bit-codes.
+ − 150
+ − 151
With it we get the correctness of bit-coded algorithm.
+ − 152
\begin{lemma}
+ − 153
@{thm [mode=IfThen] blexer_correctness}
367
+ − 154
\end{lemma}
+ − 155
371
+ − 156
However what is not certain is whether we can add simplification
+ − 157
to the bit-coded algorithm, without breaking the correct lexing output.
372
+ − 158
+ − 159
+ − 160
The reason that we do need to add a simplification phase
+ − 161
after each derivative step of $\textit{blexer}$ is
+ − 162
because it produces intermediate
+ − 163
regular expressions that can grow exponentially.
+ − 164
For example, the regular expression $(a+aa)^*$ after taking
+ − 165
derivative against just 10 $a$s will have size 8192.
+ − 166
+ − 167
%TODO: add figure for this?
+ − 168
376
+ − 169
+ − 170
Therefore, we insert a simplification phase
+ − 171
after each derivation step, as defined below:
372
+ − 172
\begin{lemma}
+ − 173
@{thm blexer_simp_def}
+ − 174
\end{lemma}
+ − 175
376
+ − 176
The simplification function is given as follows:
372
+ − 177
+ − 178
\begin{center}
+ − 179
\begin{tabular}{lcl}
+ − 180
@{thm (lhs) bsimp.simps(1)[of "bs" "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bsimp.simps(1)[of "bs" "r\<^sub>1" "r\<^sub>2"]}\\
+ − 181
@{thm (lhs) bsimp.simps(2)} & $\dn$ & @{thm (rhs) bsimp.simps(2)}\\
+ − 182
@{thm (lhs) bsimp.simps(3)} & $\dn$ & @{thm (rhs) bsimp.simps(3)}\\
376
+ − 183
+ − 184
\end{tabular}
+ − 185
\end{center}
+ − 186
+ − 187
And the two helper functions are:
+ − 188
\begin{center}
+ − 189
\begin{tabular}{lcl}
+ − 190
@{thm (lhs) bsimp_AALTs.simps(2)[of "bs\<^sub>1" "r" ]} & $\dn$ & @{thm (rhs) bsimp.simps(1)[of "bs\<^sub>1" "r" ]}\\
+ − 191
@{thm (lhs) bsimp_AALTs.simps(2)} & $\dn$ & @{thm (rhs) bsimp.simps(2)}\\
+ − 192
@{thm (lhs) bsimp_AALTs.simps(3)} & $\dn$ & @{thm (rhs) bsimp.simps(3)}\\
+ − 193
372
+ − 194
\end{tabular}
+ − 195
\end{center}
+ − 196
+ − 197
371
+ − 198
This might sound trivial in the case of producing a YES/NO answer,
+ − 199
but once we require a lexing output to be produced (which is required
+ − 200
in applications like compiler front-end, malicious attack domain extraction,
+ − 201
etc.), it is not straightforward if we still extract what is needed according
+ − 202
to the POSIX standard.
+ − 203
372
+ − 204
+ − 205
+ − 206
+ − 207
371
+ − 208
By simplification, we mean specifically the following rules:
+ − 209
+ − 210
\begin{center}
+ − 211
\begin{tabular}{lcl}
+ − 212
@{thm[mode=Axiom] rrewrite.intros(1)[of "bs" "r\<^sub>2"]}\\
+ − 213
@{thm[mode=Axiom] rrewrite.intros(2)[of "bs" "r\<^sub>1"]}\\
+ − 214
@{thm[mode=Axiom] rrewrite.intros(3)[of "bs" "bs\<^sub>1" "r\<^sub>1"]}\\
+ − 215
@{thm[mode=Rule] rrewrite.intros(4)[of "r\<^sub>1" "r\<^sub>2" "bs" "r\<^sub>3"]}\\
372
+ − 216
@{thm[mode=Rule] rrewrite.intros(5)[of "r\<^sub>3" "r\<^sub>4" "bs" "r\<^sub>1"]}\\
+ − 217
@{thm[mode=Rule] rrewrite.intros(6)[of "r" "r'" "bs" "rs\<^sub>1" "rs\<^sub>2"]}\\
+ − 218
@{thm[mode=Axiom] rrewrite.intros(7)[of "bs" "rs\<^sub>a" "rs\<^sub>b"]}\\
+ − 219
@{thm[mode=Axiom] rrewrite.intros(8)[of "bs" "rs\<^sub>a" "bs\<^sub>1" "rs\<^sub>1" "rs\<^sub>b"]}\\
371
+ − 220
\end{tabular}
+ − 221
\end{center}
+ − 222
+ − 223
+ − 224
And these can be made compact by the following simplification function:
+ − 225
+ − 226
where the function $\mathit{bsimp_AALTs}$
+ − 227
+ − 228
The core idea of the proof is that two regular expressions,
+ − 229
if "isomorphic" up to a finite number of rewrite steps, will
+ − 230
remain "isomorphic" when we take the same sequence of
+ − 231
derivatives on both of them.
+ − 232
This can be expressed by the following rewrite relation lemma:
+ − 233
\begin{lemma}
+ − 234
@{thm [mode=IfThen] central}
+ − 235
\end{lemma}
+ − 236
+ − 237
This isomorphic relation implies a property that leads to the
+ − 238
correctness result:
+ − 239
if two (nullable) regular expressions are "rewritable" in many steps
+ − 240
from one another,
372
+ − 241
then a call to function $\textit{bmkeps}$ gives the same
371
+ − 242
bit-sequence :
+ − 243
\begin{lemma}
+ − 244
@{thm [mode=IfThen] rewrites_bmkeps}
+ − 245
\end{lemma}
+ − 246
+ − 247
Given the same bit-sequence, the decode function
+ − 248
will give out the same value, which is the output
+ − 249
of both lexers:
+ − 250
\begin{lemma}
+ − 251
@{thm blexer_def}
+ − 252
\end{lemma}
+ − 253
+ − 254
\begin{lemma}
+ − 255
@{thm blexer_simp_def}
+ − 256
\end{lemma}
+ − 257
+ − 258
And that yields the correctness result:
+ − 259
\begin{lemma}
+ − 260
@{thm blexersimp_correctness}
+ − 261
\end{lemma}
+ − 262
378
+ − 263
The nice thing about the above
376
+ − 264
\<close>
371
+ − 265
389
+ − 266
(* @{thm[mode=Axiom] rrewrite.intros(9)[of "bs" "bs\<^sub>1" "rs"]}\\
+ − 267
@{thm[mode=Axiom] rrewrite.intros(10)[of "bs" ]}\\
+ − 268
@{thm[mode=Axiom] rrewrite.intros(11)[of "bs" "r\<^sub>1"]}\\
+ − 269
@{thm[mode=Rule] rrewrite.intros(12)[of "a\<^sub>1" "a\<^sub>2" "bs" "rs\<^sub>a" "rs\<^sub>b" "rs\<^sub>c"]}\\
+ − 270
*)
378
+ − 271
section \<open> Additional Simp Rules?\<close>
+ − 272
+ − 273
+ − 274
text \<open>
+ − 275
One question someone would ask is:
+ − 276
can we add more "atomic" simplification/rewriting rules,
+ − 277
so the simplification is even more aggressive, making
+ − 278
the intermediate results smaller, and therefore more space-efficient?
+ − 279
For example, one might want to do open up alternatives who is a child
+ − 280
of a sequence:
+ − 281
+ − 282
\begin{center}
+ − 283
\begin{tabular}{lcl}
+ − 284
@{thm[mode=Rule] aggressive.intros(1)[of "bs" "bs1" "rs" "r"]}\\
+ − 285
\end{tabular}
+ − 286
\end{center}
+ − 287
384
+ − 288
This rule allows us to simplify \mbox{@{term "(ALT (SEQ (ALT a b) c) (SEQ a c))"}}
+ − 289
into \mbox{@{term "ALT (SEQ a c) (SEQ b c)"}},
+ − 290
which cannot be done under the rrewrite rule because only alternatives which are
+ − 291
children of another alternative can be spilled out.
389
+ − 292
+ − 293
Now with this rule we have some examples where we get
+ − 294
even smaller intermediate derivatives than not having this
+ − 295
rule:
+ − 296
$\textit{(a+aa)}^* \rightarrow \textit{(1+1a)(a+aa)}^* \rightarrow
+ − 297
\textit{(1+a)(a+aa)}^* $
380
+ − 298
\<close>
+ − 299
+ − 300
(*This rule allows us to simplify \mbox{@{term "ALT (SEQ (ALT a b) c) (SEQ a c)"}}
378
+ − 301
into \mbox{@{term "ALT (SEQ a c) (SEQ b c)"}},
+ − 302
which is cannot be done under the \<leadsto> rule because only alternatives which are
380
+ − 303
children of another alternative can be spilled out.*)
371
+ − 304
section \<open>Introduction\<close>
+ − 305
+ − 306
+ − 307
text \<open>
+ − 308
367
+ − 309
+ − 310
Brzozowski \cite{Brzozowski1964} introduced the notion of the {\em
+ − 311
derivative} @{term "der c r"} of a regular expression \<open>r\<close> w.r.t.\
+ − 312
a character~\<open>c\<close>, and showed that it gave a simple solution to the
+ − 313
problem of matching a string @{term s} with a regular expression @{term
+ − 314
r}: if the derivative of @{term r} w.r.t.\ (in succession) all the
+ − 315
characters of the string matches the empty string, then @{term r}
+ − 316
matches @{term s} (and {\em vice versa}). The derivative has the
+ − 317
property (which may almost be regarded as its specification) that, for
+ − 318
every string @{term s} and regular expression @{term r} and character
+ − 319
@{term c}, one has @{term "cs \<in> L(r)"} if and only if \mbox{@{term "s \<in> L(der c r)"}}.
+ − 320
The beauty of Brzozowski's derivatives is that
+ − 321
they are neatly expressible in any functional language, and easily
+ − 322
definable and reasoned about in theorem provers---the definitions just
+ − 323
consist of inductive datatypes and simple recursive functions. A
+ − 324
mechanised correctness proof of Brzozowski's matcher in for example HOL4
+ − 325
has been mentioned by Owens and Slind~\cite{Owens2008}. Another one in
+ − 326
Isabelle/HOL is part of the work by Krauss and Nipkow \cite{Krauss2011}.
+ − 327
And another one in Coq is given by Coquand and Siles \cite{Coquand2012}.
+ − 328
+ − 329
If a regular expression matches a string, then in general there is more
+ − 330
than one way of how the string is matched. There are two commonly used
+ − 331
disambiguation strategies to generate a unique answer: one is called
+ − 332
GREEDY matching \cite{Frisch2004} and the other is POSIX
+ − 333
matching~\cite{POSIX,Kuklewicz,OkuiSuzuki2010,Sulzmann2014,Vansummeren2006}.
+ − 334
For example consider the string @{term xy} and the regular expression
+ − 335
\mbox{@{term "STAR (ALT (ALT x y) xy)"}}. Either the string can be
+ − 336
matched in two `iterations' by the single letter-regular expressions
+ − 337
@{term x} and @{term y}, or directly in one iteration by @{term xy}. The
+ − 338
first case corresponds to GREEDY matching, which first matches with the
+ − 339
left-most symbol and only matches the next symbol in case of a mismatch
+ − 340
(this is greedy in the sense of preferring instant gratification to
+ − 341
delayed repletion). The second case is POSIX matching, which prefers the
+ − 342
longest match.
+ − 343
+ − 344
In the context of lexing, where an input string needs to be split up
+ − 345
into a sequence of tokens, POSIX is the more natural disambiguation
+ − 346
strategy for what programmers consider basic syntactic building blocks
+ − 347
in their programs. These building blocks are often specified by some
+ − 348
regular expressions, say \<open>r\<^bsub>key\<^esub>\<close> and \<open>r\<^bsub>id\<^esub>\<close> for recognising keywords and identifiers,
+ − 349
respectively. There are a few underlying (informal) rules behind
+ − 350
tokenising a string in a POSIX \cite{POSIX} fashion:
+ − 351
+ − 352
\begin{itemize}
+ − 353
\item[$\bullet$] \emph{The Longest Match Rule} (or \emph{``{M}aximal {M}unch {R}ule''}):
+ − 354
The longest initial substring matched by any regular expression is taken as
+ − 355
next token.\smallskip
+ − 356
+ − 357
\item[$\bullet$] \emph{Priority Rule:}
+ − 358
For a particular longest initial substring, the first (leftmost) regular expression
+ − 359
that can match determines the token.\smallskip
+ − 360
+ − 361
\item[$\bullet$] \emph{Star Rule:} A subexpression repeated by ${}^\star$ shall
+ − 362
not match an empty string unless this is the only match for the repetition.\smallskip
+ − 363
+ − 364
\item[$\bullet$] \emph{Empty String Rule:} An empty string shall be considered to
+ − 365
be longer than no match at all.
+ − 366
\end{itemize}
+ − 367
+ − 368
\noindent Consider for example a regular expression \<open>r\<^bsub>key\<^esub>\<close> for recognising keywords such as \<open>if\<close>,
+ − 369
\<open>then\<close> and so on; and \<open>r\<^bsub>id\<^esub>\<close>
+ − 370
recognising identifiers (say, a single character followed by
+ − 371
characters or numbers). Then we can form the regular expression
+ − 372
\<open>(r\<^bsub>key\<^esub> + r\<^bsub>id\<^esub>)\<^sup>\<star>\<close>
+ − 373
and use POSIX matching to tokenise strings, say \<open>iffoo\<close> and
+ − 374
\<open>if\<close>. For \<open>iffoo\<close> we obtain by the Longest Match Rule
+ − 375
a single identifier token, not a keyword followed by an
+ − 376
identifier. For \<open>if\<close> we obtain by the Priority Rule a keyword
+ − 377
token, not an identifier token---even if \<open>r\<^bsub>id\<^esub>\<close>
+ − 378
matches also. By the Star Rule we know \<open>(r\<^bsub>key\<^esub> +
+ − 379
r\<^bsub>id\<^esub>)\<^sup>\<star>\<close> matches \<open>iffoo\<close>,
+ − 380
respectively \<open>if\<close>, in exactly one `iteration' of the star. The
+ − 381
Empty String Rule is for cases where, for example, the regular expression
+ − 382
\<open>(a\<^sup>\<star>)\<^sup>\<star>\<close> matches against the
+ − 383
string \<open>bc\<close>. Then the longest initial matched substring is the
+ − 384
empty string, which is matched by both the whole regular expression
+ − 385
and the parenthesised subexpression.
+ − 386
+ − 387
+ − 388
One limitation of Brzozowski's matcher is that it only generates a
+ − 389
YES/NO answer for whether a string is being matched by a regular
+ − 390
expression. Sulzmann and Lu~\cite{Sulzmann2014} extended this matcher
+ − 391
to allow generation not just of a YES/NO answer but of an actual
+ − 392
matching, called a [lexical] {\em value}. Assuming a regular
+ − 393
expression matches a string, values encode the information of
+ − 394
\emph{how} the string is matched by the regular expression---that is,
+ − 395
which part of the string is matched by which part of the regular
+ − 396
expression. For this consider again the string \<open>xy\<close> and
+ − 397
the regular expression \mbox{\<open>(x + (y + xy))\<^sup>\<star>\<close>}
+ − 398
(this time fully parenthesised). We can view this regular expression
+ − 399
as tree and if the string \<open>xy\<close> is matched by two Star
+ − 400
`iterations', then the \<open>x\<close> is matched by the left-most
+ − 401
alternative in this tree and the \<open>y\<close> by the right-left alternative. This
+ − 402
suggests to record this matching as
+ − 403
+ − 404
\begin{center}
+ − 405
@{term "Stars [Left(Char x), Right(Left(Char y))]"}
+ − 406
\end{center}
+ − 407
+ − 408
\noindent where @{const Stars}, \<open>Left\<close>, \<open>Right\<close> and \<open>Char\<close> are constructors for values. \<open>Stars\<close> records how many
+ − 409
iterations were used; \<open>Left\<close>, respectively \<open>Right\<close>, which
+ − 410
alternative is used. This `tree view' leads naturally to the idea that
+ − 411
regular expressions act as types and values as inhabiting those types
+ − 412
(see, for example, \cite{HosoyaVouillonPierce2005}). The value for
+ − 413
matching \<open>xy\<close> in a single `iteration', i.e.~the POSIX value,
+ − 414
would look as follows
+ − 415
+ − 416
\begin{center}
+ − 417
@{term "Stars [Seq (Char x) (Char y)]"}
+ − 418
\end{center}
+ − 419
+ − 420
\noindent where @{const Stars} has only a single-element list for the
+ − 421
single iteration and @{const Seq} indicates that @{term xy} is matched
+ − 422
by a sequence regular expression.
+ − 423
+ − 424
%, which we will in what follows
+ − 425
%write more formally as @{term "SEQ x y"}.
+ − 426
+ − 427
+ − 428
Sulzmann and Lu give a simple algorithm to calculate a value that
+ − 429
appears to be the value associated with POSIX matching. The challenge
+ − 430
then is to specify that value, in an algorithm-independent fashion,
+ − 431
and to show that Sulzmann and Lu's derivative-based algorithm does
+ − 432
indeed calculate a value that is correct according to the
+ − 433
specification. The answer given by Sulzmann and Lu
+ − 434
\cite{Sulzmann2014} is to define a relation (called an ``order
+ − 435
relation'') on the set of values of @{term r}, and to show that (once
+ − 436
a string to be matched is chosen) there is a maximum element and that
+ − 437
it is computed by their derivative-based algorithm. This proof idea is
+ − 438
inspired by work of Frisch and Cardelli \cite{Frisch2004} on a GREEDY
+ − 439
regular expression matching algorithm. However, we were not able to
+ − 440
establish transitivity and totality for the ``order relation'' by
+ − 441
Sulzmann and Lu. There are some inherent problems with their approach
+ − 442
(of which some of the proofs are not published in
+ − 443
\cite{Sulzmann2014}); perhaps more importantly, we give in this paper
+ − 444
a simple inductive (and algorithm-independent) definition of what we
+ − 445
call being a {\em POSIX value} for a regular expression @{term r} and
+ − 446
a string @{term s}; we show that the algorithm by Sulzmann and Lu
+ − 447
computes such a value and that such a value is unique. Our proofs are
+ − 448
both done by hand and checked in Isabelle/HOL. The experience of
+ − 449
doing our proofs has been that this mechanical checking was absolutely
+ − 450
essential: this subject area has hidden snares. This was also noted by
+ − 451
Kuklewicz \cite{Kuklewicz} who found that nearly all POSIX matching
+ − 452
implementations are ``buggy'' \cite[Page 203]{Sulzmann2014} and by
+ − 453
Grathwohl et al \cite[Page 36]{CrashCourse2014} who wrote:
+ − 454
+ − 455
\begin{quote}
+ − 456
\it{}``The POSIX strategy is more complicated than the greedy because of
+ − 457
the dependence on information about the length of matched strings in the
+ − 458
various subexpressions.''
+ − 459
\end{quote}
+ − 460
+ − 461
+ − 462
+ − 463
\noindent {\bf Contributions:} We have implemented in Isabelle/HOL the
+ − 464
derivative-based regular expression matching algorithm of
+ − 465
Sulzmann and Lu \cite{Sulzmann2014}. We have proved the correctness of this
+ − 466
algorithm according to our specification of what a POSIX value is (inspired
+ − 467
by work of Vansummeren \cite{Vansummeren2006}). Sulzmann
+ − 468
and Lu sketch in \cite{Sulzmann2014} an informal correctness proof: but to
+ − 469
us it contains unfillable gaps.\footnote{An extended version of
+ − 470
\cite{Sulzmann2014} is available at the website of its first author; this
+ − 471
extended version already includes remarks in the appendix that their
+ − 472
informal proof contains gaps, and possible fixes are not fully worked out.}
+ − 473
Our specification of a POSIX value consists of a simple inductive definition
+ − 474
that given a string and a regular expression uniquely determines this value.
+ − 475
We also show that our definition is equivalent to an ordering
+ − 476
of values based on positions by Okui and Suzuki \cite{OkuiSuzuki2010}.
+ − 477
+ − 478
%Derivatives as calculated by Brzozowski's method are usually more complex
+ − 479
%regular expressions than the initial one; various optimisations are
+ − 480
%possible. We prove the correctness when simplifications of @{term "ALT ZERO r"},
+ − 481
%@{term "ALT r ZERO"}, @{term "SEQ ONE r"} and @{term "SEQ r ONE"} to
+ − 482
%@{term r} are applied.
+ − 483
+ − 484
We extend our results to ??? Bitcoded version??
+ − 485
+ − 486
\<close>
+ − 487
371
+ − 488
+ − 489
+ − 490
367
+ − 491
section \<open>Preliminaries\<close>
+ − 492
+ − 493
text \<open>\noindent Strings in Isabelle/HOL are lists of characters with
+ − 494
the empty string being represented by the empty list, written @{term
+ − 495
"[]"}, and list-cons being written as @{term "DUMMY # DUMMY"}. Often
+ − 496
we use the usual bracket notation for lists also for strings; for
+ − 497
example a string consisting of just a single character @{term c} is
+ − 498
written @{term "[c]"}. We use the usual definitions for
+ − 499
\emph{prefixes} and \emph{strict prefixes} of strings. By using the
+ − 500
type @{type char} for characters we have a supply of finitely many
+ − 501
characters roughly corresponding to the ASCII character set. Regular
+ − 502
expressions are defined as usual as the elements of the following
+ − 503
inductive datatype:
+ − 504
+ − 505
\begin{center}
+ − 506
\<open>r :=\<close>
+ − 507
@{const "ZERO"} $\mid$
+ − 508
@{const "ONE"} $\mid$
+ − 509
@{term "CH c"} $\mid$
+ − 510
@{term "ALT r\<^sub>1 r\<^sub>2"} $\mid$
+ − 511
@{term "SEQ r\<^sub>1 r\<^sub>2"} $\mid$
+ − 512
@{term "STAR r"}
+ − 513
\end{center}
+ − 514
+ − 515
\noindent where @{const ZERO} stands for the regular expression that does
+ − 516
not match any string, @{const ONE} for the regular expression that matches
+ − 517
only the empty string and @{term c} for matching a character literal. The
+ − 518
language of a regular expression is also defined as usual by the
+ − 519
recursive function @{term L} with the six clauses:
+ − 520
+ − 521
\begin{center}
+ − 522
\begin{tabular}{l@ {\hspace{4mm}}rcl}
+ − 523
\textit{(1)} & @{thm (lhs) L.simps(1)} & $\dn$ & @{thm (rhs) L.simps(1)}\\
+ − 524
\textit{(2)} & @{thm (lhs) L.simps(2)} & $\dn$ & @{thm (rhs) L.simps(2)}\\
+ − 525
\textit{(3)} & @{thm (lhs) L.simps(3)} & $\dn$ & @{thm (rhs) L.simps(3)}\\
+ − 526
\textit{(4)} & @{thm (lhs) L.simps(4)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ &
+ − 527
@{thm (rhs) L.simps(4)[of "r\<^sub>1" "r\<^sub>2"]}\\
+ − 528
\textit{(5)} & @{thm (lhs) L.simps(5)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ &
+ − 529
@{thm (rhs) L.simps(5)[of "r\<^sub>1" "r\<^sub>2"]}\\
+ − 530
\textit{(6)} & @{thm (lhs) L.simps(6)} & $\dn$ & @{thm (rhs) L.simps(6)}\\
+ − 531
\end{tabular}
+ − 532
\end{center}
+ − 533
+ − 534
\noindent In clause \textit{(4)} we use the operation @{term "DUMMY ;;
+ − 535
DUMMY"} for the concatenation of two languages (it is also list-append for
+ − 536
strings). We use the star-notation for regular expressions and for
+ − 537
languages (in the last clause above). The star for languages is defined
+ − 538
inductively by two clauses: \<open>(i)\<close> the empty string being in
+ − 539
the star of a language and \<open>(ii)\<close> if @{term "s\<^sub>1"} is in a
+ − 540
language and @{term "s\<^sub>2"} in the star of this language, then also @{term
+ − 541
"s\<^sub>1 @ s\<^sub>2"} is in the star of this language. It will also be convenient
+ − 542
to use the following notion of a \emph{semantic derivative} (or \emph{left
+ − 543
quotient}) of a language defined as
+ − 544
%
+ − 545
\begin{center}
+ − 546
@{thm Der_def}\;.
+ − 547
\end{center}
+ − 548
+ − 549
\noindent
+ − 550
For semantic derivatives we have the following equations (for example
+ − 551
mechanically proved in \cite{Krauss2011}):
+ − 552
%
+ − 553
\begin{equation}\label{SemDer}
+ − 554
\begin{array}{lcl}
+ − 555
@{thm (lhs) Der_null} & \dn & @{thm (rhs) Der_null}\\
+ − 556
@{thm (lhs) Der_empty} & \dn & @{thm (rhs) Der_empty}\\
+ − 557
@{thm (lhs) Der_char} & \dn & @{thm (rhs) Der_char}\\
+ − 558
@{thm (lhs) Der_union} & \dn & @{thm (rhs) Der_union}\\
+ − 559
@{thm (lhs) Der_Sequ} & \dn & @{thm (rhs) Der_Sequ}\\
+ − 560
@{thm (lhs) Der_star} & \dn & @{thm (rhs) Der_star}
+ − 561
\end{array}
+ − 562
\end{equation}
+ − 563
+ − 564
+ − 565
\noindent \emph{\Brz's derivatives} of regular expressions
+ − 566
\cite{Brzozowski1964} can be easily defined by two recursive functions:
+ − 567
the first is from regular expressions to booleans (implementing a test
+ − 568
when a regular expression can match the empty string), and the second
+ − 569
takes a regular expression and a character to a (derivative) regular
+ − 570
expression:
+ − 571
+ − 572
\begin{center}
+ − 573
\begin{tabular}{lcl}
+ − 574
@{thm (lhs) nullable.simps(1)} & $\dn$ & @{thm (rhs) nullable.simps(1)}\\
+ − 575
@{thm (lhs) nullable.simps(2)} & $\dn$ & @{thm (rhs) nullable.simps(2)}\\
+ − 576
@{thm (lhs) nullable.simps(3)} & $\dn$ & @{thm (rhs) nullable.simps(3)}\\
+ − 577
@{thm (lhs) nullable.simps(4)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) nullable.simps(4)[of "r\<^sub>1" "r\<^sub>2"]}\\
+ − 578
@{thm (lhs) nullable.simps(5)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) nullable.simps(5)[of "r\<^sub>1" "r\<^sub>2"]}\\
+ − 579
@{thm (lhs) nullable.simps(6)} & $\dn$ & @{thm (rhs) nullable.simps(6)}\medskip\\
+ − 580
+ − 581
% \end{tabular}
+ − 582
% \end{center}
+ − 583
+ − 584
% \begin{center}
+ − 585
% \begin{tabular}{lcl}
+ − 586
+ − 587
@{thm (lhs) der.simps(1)} & $\dn$ & @{thm (rhs) der.simps(1)}\\
+ − 588
@{thm (lhs) der.simps(2)} & $\dn$ & @{thm (rhs) der.simps(2)}\\
+ − 589
@{thm (lhs) der.simps(3)} & $\dn$ & @{thm (rhs) der.simps(3)}\\
+ − 590
@{thm (lhs) der.simps(4)[of c "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) der.simps(4)[of c "r\<^sub>1" "r\<^sub>2"]}\\
+ − 591
@{thm (lhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]}\\
+ − 592
@{thm (lhs) der.simps(6)} & $\dn$ & @{thm (rhs) der.simps(6)}
+ − 593
\end{tabular}
+ − 594
\end{center}
+ − 595
+ − 596
\noindent
+ − 597
We may extend this definition to give derivatives w.r.t.~strings:
+ − 598
+ − 599
\begin{center}
+ − 600
\begin{tabular}{lcl}
+ − 601
@{thm (lhs) ders.simps(1)} & $\dn$ & @{thm (rhs) ders.simps(1)}\\
+ − 602
@{thm (lhs) ders.simps(2)} & $\dn$ & @{thm (rhs) ders.simps(2)}\\
+ − 603
\end{tabular}
+ − 604
\end{center}
+ − 605
+ − 606
\noindent Given the equations in \eqref{SemDer}, it is a relatively easy
+ − 607
exercise in mechanical reasoning to establish that
+ − 608
+ − 609
\begin{proposition}\label{derprop}\mbox{}\\
+ − 610
\begin{tabular}{ll}
+ − 611
\textit{(1)} & @{thm (lhs) nullable_correctness} if and only if
+ − 612
@{thm (rhs) nullable_correctness}, and \\
+ − 613
\textit{(2)} & @{thm[mode=IfThen] der_correctness}.
+ − 614
\end{tabular}
+ − 615
\end{proposition}
+ − 616
+ − 617
\noindent With this in place it is also very routine to prove that the
+ − 618
regular expression matcher defined as
+ − 619
%
+ − 620
\begin{center}
+ − 621
@{thm match_def}
+ − 622
\end{center}
+ − 623
+ − 624
\noindent gives a positive answer if and only if @{term "s \<in> L r"}.
+ − 625
Consequently, this regular expression matching algorithm satisfies the
+ − 626
usual specification for regular expression matching. While the matcher
+ − 627
above calculates a provably correct YES/NO answer for whether a regular
+ − 628
expression matches a string or not, the novel idea of Sulzmann and Lu
+ − 629
\cite{Sulzmann2014} is to append another phase to this algorithm in order
+ − 630
to calculate a [lexical] value. We will explain the details next.
+ − 631
+ − 632
\<close>
+ − 633
+ − 634
section \<open>POSIX Regular Expression Matching\label{posixsec}\<close>
+ − 635
+ − 636
text \<open>
+ − 637
+ − 638
There have been many previous works that use values for encoding
+ − 639
\emph{how} a regular expression matches a string.
+ − 640
The clever idea by Sulzmann and Lu \cite{Sulzmann2014} is to
+ − 641
define a function on values that mirrors (but inverts) the
+ − 642
construction of the derivative on regular expressions. \emph{Values}
+ − 643
are defined as the inductive datatype
+ − 644
+ − 645
\begin{center}
+ − 646
\<open>v :=\<close>
+ − 647
@{const "Void"} $\mid$
+ − 648
@{term "val.Char c"} $\mid$
+ − 649
@{term "Left v"} $\mid$
+ − 650
@{term "Right v"} $\mid$
+ − 651
@{term "Seq v\<^sub>1 v\<^sub>2"} $\mid$
+ − 652
@{term "Stars vs"}
+ − 653
\end{center}
+ − 654
+ − 655
\noindent where we use @{term vs} to stand for a list of
+ − 656
values. (This is similar to the approach taken by Frisch and
+ − 657
Cardelli for GREEDY matching \cite{Frisch2004}, and Sulzmann and Lu
+ − 658
for POSIX matching \cite{Sulzmann2014}). The string underlying a
+ − 659
value can be calculated by the @{const flat} function, written
+ − 660
@{term "flat DUMMY"} and defined as:
+ − 661
+ − 662
\begin{center}
+ − 663
\begin{tabular}[t]{lcl}
+ − 664
@{thm (lhs) flat.simps(1)} & $\dn$ & @{thm (rhs) flat.simps(1)}\\
+ − 665
@{thm (lhs) flat.simps(2)} & $\dn$ & @{thm (rhs) flat.simps(2)}\\
+ − 666
@{thm (lhs) flat.simps(3)} & $\dn$ & @{thm (rhs) flat.simps(3)}\\
+ − 667
@{thm (lhs) flat.simps(4)} & $\dn$ & @{thm (rhs) flat.simps(4)}
+ − 668
\end{tabular}\hspace{14mm}
+ − 669
\begin{tabular}[t]{lcl}
+ − 670
@{thm (lhs) flat.simps(5)[of "v\<^sub>1" "v\<^sub>2"]} & $\dn$ & @{thm (rhs) flat.simps(5)[of "v\<^sub>1" "v\<^sub>2"]}\\
+ − 671
@{thm (lhs) flat.simps(6)} & $\dn$ & @{thm (rhs) flat.simps(6)}\\
+ − 672
@{thm (lhs) flat.simps(7)} & $\dn$ & @{thm (rhs) flat.simps(7)}\\
+ − 673
\end{tabular}
+ − 674
\end{center}
+ − 675
+ − 676
\noindent We will sometimes refer to the underlying string of a
+ − 677
value as \emph{flattened value}. We will also overload our notation and
+ − 678
use @{term "flats vs"} for flattening a list of values and concatenating
+ − 679
the resulting strings.
+ − 680
+ − 681
Sulzmann and Lu define
+ − 682
inductively an \emph{inhabitation relation} that associates values to
+ − 683
regular expressions. We define this relation as
+ − 684
follows:\footnote{Note that the rule for @{term Stars} differs from
+ − 685
our earlier paper \cite{AusafDyckhoffUrban2016}. There we used the
+ − 686
original definition by Sulzmann and Lu which does not require that
+ − 687
the values @{term "v \<in> set vs"} flatten to a non-empty
+ − 688
string. The reason for introducing the more restricted version of
+ − 689
lexical values is convenience later on when reasoning about an
+ − 690
ordering relation for values.}
+ − 691
+ − 692
\begin{center}
+ − 693
\begin{tabular}{c@ {\hspace{12mm}}c}\label{prfintros}
+ − 694
\\[-8mm]
+ − 695
@{thm[mode=Axiom] Prf.intros(4)} &
+ − 696
@{thm[mode=Axiom] Prf.intros(5)[of "c"]}\\[4mm]
+ − 697
@{thm[mode=Rule] Prf.intros(2)[of "v\<^sub>1" "r\<^sub>1" "r\<^sub>2"]} &
+ − 698
@{thm[mode=Rule] Prf.intros(3)[of "v\<^sub>2" "r\<^sub>1" "r\<^sub>2"]}\\[4mm]
+ − 699
@{thm[mode=Rule] Prf.intros(1)[of "v\<^sub>1" "r\<^sub>1" "v\<^sub>2" "r\<^sub>2"]} &
+ − 700
@{thm[mode=Rule] Prf.intros(6)[of "vs"]}
+ − 701
\end{tabular}
+ − 702
\end{center}
+ − 703
+ − 704
\noindent where in the clause for @{const "Stars"} we use the
+ − 705
notation @{term "v \<in> set vs"} for indicating that \<open>v\<close> is a
+ − 706
member in the list \<open>vs\<close>. We require in this rule that every
+ − 707
value in @{term vs} flattens to a non-empty string. The idea is that
+ − 708
@{term "Stars"}-values satisfy the informal Star Rule (see Introduction)
+ − 709
where the $^\star$ does not match the empty string unless this is
+ − 710
the only match for the repetition. Note also that no values are
+ − 711
associated with the regular expression @{term ZERO}, and that the
+ − 712
only value associated with the regular expression @{term ONE} is
+ − 713
@{term Void}. It is routine to establish how values ``inhabiting''
+ − 714
a regular expression correspond to the language of a regular
+ − 715
expression, namely
+ − 716
+ − 717
\begin{proposition}\label{inhabs}
+ − 718
@{thm L_flat_Prf}
+ − 719
\end{proposition}
+ − 720
+ − 721
\noindent
+ − 722
Given a regular expression \<open>r\<close> and a string \<open>s\<close>, we define the
+ − 723
set of all \emph{Lexical Values} inhabited by \<open>r\<close> with the underlying string
+ − 724
being \<open>s\<close>:\footnote{Okui and Suzuki refer to our lexical values
+ − 725
as \emph{canonical values} in \cite{OkuiSuzuki2010}. The notion of \emph{non-problematic
+ − 726
values} by Cardelli and Frisch \cite{Frisch2004} is related, but not identical
+ − 727
to our lexical values.}
+ − 728
+ − 729
\begin{center}
+ − 730
@{thm LV_def}
+ − 731
\end{center}
+ − 732
+ − 733
\noindent The main property of @{term "LV r s"} is that it is alway finite.
+ − 734
+ − 735
\begin{proposition}
+ − 736
@{thm LV_finite}
+ − 737
\end{proposition}
+ − 738
+ − 739
\noindent This finiteness property does not hold in general if we
+ − 740
remove the side-condition about @{term "flat v \<noteq> []"} in the
+ − 741
@{term Stars}-rule above. For example using Sulzmann and Lu's
+ − 742
less restrictive definition, @{term "LV (STAR ONE) []"} would contain
+ − 743
infinitely many values, but according to our more restricted
+ − 744
definition only a single value, namely @{thm LV_STAR_ONE_empty}.
+ − 745
+ − 746
If a regular expression \<open>r\<close> matches a string \<open>s\<close>, then
+ − 747
generally the set @{term "LV r s"} is not just a singleton set. In
+ − 748
case of POSIX matching the problem is to calculate the unique lexical value
+ − 749
that satisfies the (informal) POSIX rules from the Introduction.
+ − 750
Graphically the POSIX value calculation algorithm by Sulzmann and Lu
+ − 751
can be illustrated by the picture in Figure~\ref{Sulz} where the
+ − 752
path from the left to the right involving @{term
+ − 753
derivatives}/@{const nullable} is the first phase of the algorithm
+ − 754
(calculating successive \Brz's derivatives) and @{const
+ − 755
mkeps}/\<open>inj\<close>, the path from right to left, the second
+ − 756
phase. This picture shows the steps required when a regular
+ − 757
expression, say \<open>r\<^sub>1\<close>, matches the string @{term
+ − 758
"[a,b,c]"}. We first build the three derivatives (according to
+ − 759
@{term a}, @{term b} and @{term c}). We then use @{const nullable}
+ − 760
to find out whether the resulting derivative regular expression
+ − 761
@{term "r\<^sub>4"} can match the empty string. If yes, we call the
+ − 762
function @{const mkeps} that produces a value @{term "v\<^sub>4"}
+ − 763
for how @{term "r\<^sub>4"} can match the empty string (taking into
+ − 764
account the POSIX constraints in case there are several ways). This
+ − 765
function is defined by the clauses:
+ − 766
+ − 767
\begin{figure}[t]
+ − 768
\begin{center}
+ − 769
\begin{tikzpicture}[scale=2,node distance=1.3cm,
+ − 770
every node/.style={minimum size=6mm}]
+ − 771
\node (r1) {@{term "r\<^sub>1"}};
+ − 772
\node (r2) [right=of r1]{@{term "r\<^sub>2"}};
+ − 773
\draw[->,line width=1mm](r1)--(r2) node[above,midway] {@{term "der a DUMMY"}};
+ − 774
\node (r3) [right=of r2]{@{term "r\<^sub>3"}};
+ − 775
\draw[->,line width=1mm](r2)--(r3) node[above,midway] {@{term "der b DUMMY"}};
+ − 776
\node (r4) [right=of r3]{@{term "r\<^sub>4"}};
+ − 777
\draw[->,line width=1mm](r3)--(r4) node[above,midway] {@{term "der c DUMMY"}};
+ − 778
\draw (r4) node[anchor=west] {\;\raisebox{3mm}{@{term nullable}}};
+ − 779
\node (v4) [below=of r4]{@{term "v\<^sub>4"}};
+ − 780
\draw[->,line width=1mm](r4) -- (v4);
+ − 781
\node (v3) [left=of v4] {@{term "v\<^sub>3"}};
+ − 782
\draw[->,line width=1mm](v4)--(v3) node[below,midway] {\<open>inj r\<^sub>3 c\<close>};
+ − 783
\node (v2) [left=of v3]{@{term "v\<^sub>2"}};
+ − 784
\draw[->,line width=1mm](v3)--(v2) node[below,midway] {\<open>inj r\<^sub>2 b\<close>};
+ − 785
\node (v1) [left=of v2] {@{term "v\<^sub>1"}};
+ − 786
\draw[->,line width=1mm](v2)--(v1) node[below,midway] {\<open>inj r\<^sub>1 a\<close>};
+ − 787
\draw (r4) node[anchor=north west] {\;\raisebox{-8mm}{@{term "mkeps"}}};
+ − 788
\end{tikzpicture}
+ − 789
\end{center}
+ − 790
\mbox{}\\[-13mm]
+ − 791
+ − 792
\caption{The two phases of the algorithm by Sulzmann \& Lu \cite{Sulzmann2014},
+ − 793
matching the string @{term "[a,b,c]"}. The first phase (the arrows from
+ − 794
left to right) is \Brz's matcher building successive derivatives. If the
+ − 795
last regular expression is @{term nullable}, then the functions of the
+ − 796
second phase are called (the top-down and right-to-left arrows): first
+ − 797
@{term mkeps} calculates a value @{term "v\<^sub>4"} witnessing
+ − 798
how the empty string has been recognised by @{term "r\<^sub>4"}. After
+ − 799
that the function @{term inj} ``injects back'' the characters of the string into
+ − 800
the values.
+ − 801
\label{Sulz}}
+ − 802
\end{figure}
+ − 803
+ − 804
\begin{center}
+ − 805
\begin{tabular}{lcl}
+ − 806
@{thm (lhs) mkeps.simps(1)} & $\dn$ & @{thm (rhs) mkeps.simps(1)}\\
+ − 807
@{thm (lhs) mkeps.simps(2)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) mkeps.simps(2)[of "r\<^sub>1" "r\<^sub>2"]}\\
+ − 808
@{thm (lhs) mkeps.simps(3)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) mkeps.simps(3)[of "r\<^sub>1" "r\<^sub>2"]}\\
+ − 809
@{thm (lhs) mkeps.simps(4)} & $\dn$ & @{thm (rhs) mkeps.simps(4)}\\
+ − 810
\end{tabular}
+ − 811
\end{center}
+ − 812
+ − 813
\noindent Note that this function needs only to be partially defined,
+ − 814
namely only for regular expressions that are nullable. In case @{const
+ − 815
nullable} fails, the string @{term "[a,b,c]"} cannot be matched by @{term
+ − 816
"r\<^sub>1"} and the null value @{term "None"} is returned. Note also how this function
+ − 817
makes some subtle choices leading to a POSIX value: for example if an
+ − 818
alternative regular expression, say @{term "ALT r\<^sub>1 r\<^sub>2"}, can
+ − 819
match the empty string and furthermore @{term "r\<^sub>1"} can match the
+ − 820
empty string, then we return a \<open>Left\<close>-value. The \<open>Right\<close>-value will only be returned if @{term "r\<^sub>1"} cannot match the empty
+ − 821
string.
+ − 822
+ − 823
The most interesting idea from Sulzmann and Lu \cite{Sulzmann2014} is
+ − 824
the construction of a value for how @{term "r\<^sub>1"} can match the
+ − 825
string @{term "[a,b,c]"} from the value how the last derivative, @{term
+ − 826
"r\<^sub>4"} in Fig.~\ref{Sulz}, can match the empty string. Sulzmann and
+ − 827
Lu achieve this by stepwise ``injecting back'' the characters into the
+ − 828
values thus inverting the operation of building derivatives, but on the level
+ − 829
of values. The corresponding function, called @{term inj}, takes three
+ − 830
arguments, a regular expression, a character and a value. For example in
+ − 831
the first (or right-most) @{term inj}-step in Fig.~\ref{Sulz} the regular
+ − 832
expression @{term "r\<^sub>3"}, the character @{term c} from the last
+ − 833
derivative step and @{term "v\<^sub>4"}, which is the value corresponding
+ − 834
to the derivative regular expression @{term "r\<^sub>4"}. The result is
+ − 835
the new value @{term "v\<^sub>3"}. The final result of the algorithm is
+ − 836
the value @{term "v\<^sub>1"}. The @{term inj} function is defined by recursion on regular
+ − 837
expressions and by analysing the shape of values (corresponding to
+ − 838
the derivative regular expressions).
+ − 839
%
+ − 840
\begin{center}
+ − 841
\begin{tabular}{l@ {\hspace{5mm}}lcl}
+ − 842
\textit{(1)} & @{thm (lhs) injval.simps(1)} & $\dn$ & @{thm (rhs) injval.simps(1)}\\
+ − 843
\textit{(2)} & @{thm (lhs) injval.simps(2)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1"]} & $\dn$ &
+ − 844
@{thm (rhs) injval.simps(2)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1"]}\\
+ − 845
\textit{(3)} & @{thm (lhs) injval.simps(3)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>2"]} & $\dn$ &
+ − 846
@{thm (rhs) injval.simps(3)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>2"]}\\
+ − 847
\textit{(4)} & @{thm (lhs) injval.simps(4)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1" "v\<^sub>2"]} & $\dn$
+ − 848
& @{thm (rhs) injval.simps(4)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1" "v\<^sub>2"]}\\
+ − 849
\textit{(5)} & @{thm (lhs) injval.simps(5)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1" "v\<^sub>2"]} & $\dn$
+ − 850
& @{thm (rhs) injval.simps(5)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>1" "v\<^sub>2"]}\\
+ − 851
\textit{(6)} & @{thm (lhs) injval.simps(6)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>2"]} & $\dn$
+ − 852
& @{thm (rhs) injval.simps(6)[of "r\<^sub>1" "r\<^sub>2" "c" "v\<^sub>2"]}\\
+ − 853
\textit{(7)} & @{thm (lhs) injval.simps(7)[of "r" "c" "v" "vs"]} & $\dn$
+ − 854
& @{thm (rhs) injval.simps(7)[of "r" "c" "v" "vs"]}\\
+ − 855
\end{tabular}
+ − 856
\end{center}
+ − 857
+ − 858
\noindent To better understand what is going on in this definition it
+ − 859
might be instructive to look first at the three sequence cases (clauses
+ − 860
\textit{(4)} -- \textit{(6)}). In each case we need to construct an ``injected value'' for
+ − 861
@{term "SEQ r\<^sub>1 r\<^sub>2"}. This must be a value of the form @{term
+ − 862
"Seq DUMMY DUMMY"}\,. Recall the clause of the \<open>derivative\<close>-function
+ − 863
for sequence regular expressions:
+ − 864
+ − 865
\begin{center}
+ − 866
@{thm (lhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]} $\dn$ @{thm (rhs) der.simps(5)[of c "r\<^sub>1" "r\<^sub>2"]}
+ − 867
\end{center}
+ − 868
+ − 869
\noindent Consider first the \<open>else\<close>-branch where the derivative is @{term
+ − 870
"SEQ (der c r\<^sub>1) r\<^sub>2"}. The corresponding value must therefore
+ − 871
be of the form @{term "Seq v\<^sub>1 v\<^sub>2"}, which matches the left-hand
+ − 872
side in clause~\textit{(4)} of @{term inj}. In the \<open>if\<close>-branch the derivative is an
+ − 873
alternative, namely @{term "ALT (SEQ (der c r\<^sub>1) r\<^sub>2) (der c
+ − 874
r\<^sub>2)"}. This means we either have to consider a \<open>Left\<close>- or
+ − 875
\<open>Right\<close>-value. In case of the \<open>Left\<close>-value we know further it
+ − 876
must be a value for a sequence regular expression. Therefore the pattern
+ − 877
we match in the clause \textit{(5)} is @{term "Left (Seq v\<^sub>1 v\<^sub>2)"},
+ − 878
while in \textit{(6)} it is just @{term "Right v\<^sub>2"}. One more interesting
+ − 879
point is in the right-hand side of clause \textit{(6)}: since in this case the
+ − 880
regular expression \<open>r\<^sub>1\<close> does not ``contribute'' to
+ − 881
matching the string, that means it only matches the empty string, we need to
+ − 882
call @{const mkeps} in order to construct a value for how @{term "r\<^sub>1"}
+ − 883
can match this empty string. A similar argument applies for why we can
+ − 884
expect in the left-hand side of clause \textit{(7)} that the value is of the form
+ − 885
@{term "Seq v (Stars vs)"}---the derivative of a star is @{term "SEQ (der c r)
+ − 886
(STAR r)"}. Finally, the reason for why we can ignore the second argument
+ − 887
in clause \textit{(1)} of @{term inj} is that it will only ever be called in cases
+ − 888
where @{term "c=d"}, but the usual linearity restrictions in patterns do
+ − 889
not allow us to build this constraint explicitly into our function
+ − 890
definition.\footnote{Sulzmann and Lu state this clause as @{thm (lhs)
+ − 891
injval.simps(1)[of "c" "c"]} $\dn$ @{thm (rhs) injval.simps(1)[of "c"]},
+ − 892
but our deviation is harmless.}
+ − 893
+ − 894
The idea of the @{term inj}-function to ``inject'' a character, say
+ − 895
@{term c}, into a value can be made precise by the first part of the
+ − 896
following lemma, which shows that the underlying string of an injected
+ − 897
value has a prepended character @{term c}; the second part shows that
+ − 898
the underlying string of an @{const mkeps}-value is always the empty
+ − 899
string (given the regular expression is nullable since otherwise
+ − 900
\<open>mkeps\<close> might not be defined).
+ − 901
+ − 902
\begin{lemma}\mbox{}\smallskip\\\label{Prf_injval_flat}
+ − 903
\begin{tabular}{ll}
+ − 904
(1) & @{thm[mode=IfThen] Prf_injval_flat}\\
+ − 905
(2) & @{thm[mode=IfThen] mkeps_flat}
+ − 906
\end{tabular}
+ − 907
\end{lemma}
+ − 908
+ − 909
\begin{proof}
+ − 910
Both properties are by routine inductions: the first one can, for example,
+ − 911
be proved by induction over the definition of @{term derivatives}; the second by
+ − 912
an induction on @{term r}. There are no interesting cases.\qed
+ − 913
\end{proof}
+ − 914
+ − 915
Having defined the @{const mkeps} and \<open>inj\<close> function we can extend
+ − 916
\Brz's matcher so that a value is constructed (assuming the
+ − 917
regular expression matches the string). The clauses of the Sulzmann and Lu lexer are
+ − 918
+ − 919
\begin{center}
+ − 920
\begin{tabular}{lcl}
+ − 921
@{thm (lhs) lexer.simps(1)} & $\dn$ & @{thm (rhs) lexer.simps(1)}\\
+ − 922
@{thm (lhs) lexer.simps(2)} & $\dn$ & \<open>case\<close> @{term "lexer (der c r) s"} \<open>of\<close>\\
+ − 923
& & \phantom{$|$} @{term "None"} \<open>\<Rightarrow>\<close> @{term None}\\
+ − 924
& & $|$ @{term "Some v"} \<open>\<Rightarrow>\<close> @{term "Some (injval r c v)"}
+ − 925
\end{tabular}
+ − 926
\end{center}
+ − 927
+ − 928
\noindent If the regular expression does not match the string, @{const None} is
+ − 929
returned. If the regular expression \emph{does}
+ − 930
match the string, then @{const Some} value is returned. One important
+ − 931
virtue of this algorithm is that it can be implemented with ease in any
+ − 932
functional programming language and also in Isabelle/HOL. In the remaining
+ − 933
part of this section we prove that this algorithm is correct.
+ − 934
+ − 935
The well-known idea of POSIX matching is informally defined by some
+ − 936
rules such as the Longest Match and Priority Rules (see
+ − 937
Introduction); as correctly argued in \cite{Sulzmann2014}, this
+ − 938
needs formal specification. Sulzmann and Lu define an ``ordering
+ − 939
relation'' between values and argue that there is a maximum value,
+ − 940
as given by the derivative-based algorithm. In contrast, we shall
+ − 941
introduce a simple inductive definition that specifies directly what
+ − 942
a \emph{POSIX value} is, incorporating the POSIX-specific choices
+ − 943
into the side-conditions of our rules. Our definition is inspired by
+ − 944
the matching relation given by Vansummeren~\cite{Vansummeren2006}.
+ − 945
The relation we define is ternary and
+ − 946
written as \mbox{@{term "s \<in> r \<rightarrow> v"}}, relating
+ − 947
strings, regular expressions and values; the inductive rules are given in
+ − 948
Figure~\ref{POSIXrules}.
+ − 949
We can prove that given a string @{term s} and regular expression @{term
+ − 950
r}, the POSIX value @{term v} is uniquely determined by @{term "s \<in> r \<rightarrow> v"}.
+ − 951
+ − 952
%
+ − 953
\begin{figure}[t]
+ − 954
\begin{center}
+ − 955
\begin{tabular}{c}
+ − 956
@{thm[mode=Axiom] Posix.intros(1)}\<open>P\<close>@{term "ONE"} \qquad
+ − 957
@{thm[mode=Axiom] Posix.intros(2)}\<open>P\<close>@{term "c"}\medskip\\
+ − 958
@{thm[mode=Rule] Posix.intros(3)[of "s" "r\<^sub>1" "v" "r\<^sub>2"]}\<open>P+L\<close>\qquad
+ − 959
@{thm[mode=Rule] Posix.intros(4)[of "s" "r\<^sub>2" "v" "r\<^sub>1"]}\<open>P+R\<close>\medskip\\
+ − 960
$\mprset{flushleft}
+ − 961
\inferrule
+ − 962
{@{thm (prem 1) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]} \qquad
+ − 963
@{thm (prem 2) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]} \\\\
+ − 964
@{thm (prem 3) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]}}
+ − 965
{@{thm (concl) Posix.intros(5)[of "s\<^sub>1" "r\<^sub>1" "v\<^sub>1" "s\<^sub>2" "r\<^sub>2" "v\<^sub>2"]}}$\<open>PS\<close>\\
+ − 966
@{thm[mode=Axiom] Posix.intros(7)}\<open>P[]\<close>\medskip\\
+ − 967
$\mprset{flushleft}
+ − 968
\inferrule
+ − 969
{@{thm (prem 1) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]} \qquad
+ − 970
@{thm (prem 2) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]} \qquad
+ − 971
@{thm (prem 3) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]} \\\\
+ − 972
@{thm (prem 4) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]}}
+ − 973
{@{thm (concl) Posix.intros(6)[of "s\<^sub>1" "r" "v" "s\<^sub>2" "vs"]}}$\<open>P\<star>\<close>
+ − 974
\end{tabular}
+ − 975
\end{center}
+ − 976
\caption{Our inductive definition of POSIX values.}\label{POSIXrules}
+ − 977
\end{figure}
+ − 978
+ − 979
+ − 980
+ − 981
\begin{theorem}\mbox{}\smallskip\\\label{posixdeterm}
+ − 982
\begin{tabular}{ll}
+ − 983
(1) & If @{thm (prem 1) Posix1(1)} then @{thm (concl)
+ − 984
Posix1(1)} and @{thm (concl) Posix1(2)}.\\
+ − 985
(2) & @{thm[mode=IfThen] Posix_determ(1)[of _ _ "v" "v'"]}
+ − 986
\end{tabular}
+ − 987
\end{theorem}
+ − 988
+ − 989
\begin{proof} Both by induction on the definition of @{term "s \<in> r \<rightarrow> v"}.
+ − 990
The second parts follows by a case analysis of @{term "s \<in> r \<rightarrow> v'"} and
+ − 991
the first part.\qed
+ − 992
\end{proof}
+ − 993
+ − 994
\noindent
+ − 995
We claim that our @{term "s \<in> r \<rightarrow> v"} relation captures the idea behind the four
+ − 996
informal POSIX rules shown in the Introduction: Consider for example the
+ − 997
rules \<open>P+L\<close> and \<open>P+R\<close> where the POSIX value for a string
+ − 998
and an alternative regular expression, that is @{term "(s, ALT r\<^sub>1 r\<^sub>2)"},
+ − 999
is specified---it is always a \<open>Left\<close>-value, \emph{except} when the
+ − 1000
string to be matched is not in the language of @{term "r\<^sub>1"}; only then it
+ − 1001
is a \<open>Right\<close>-value (see the side-condition in \<open>P+R\<close>).
+ − 1002
Interesting is also the rule for sequence regular expressions (\<open>PS\<close>). The first two premises state that @{term "v\<^sub>1"} and @{term "v\<^sub>2"}
+ − 1003
are the POSIX values for @{term "(s\<^sub>1, r\<^sub>1)"} and @{term "(s\<^sub>2, r\<^sub>2)"}
+ − 1004
respectively. Consider now the third premise and note that the POSIX value
+ − 1005
of this rule should match the string \mbox{@{term "s\<^sub>1 @ s\<^sub>2"}}. According to the
+ − 1006
Longest Match Rule, we want that the @{term "s\<^sub>1"} is the longest initial
+ − 1007
split of \mbox{@{term "s\<^sub>1 @ s\<^sub>2"}} such that @{term "s\<^sub>2"} is still recognised
+ − 1008
by @{term "r\<^sub>2"}. Let us assume, contrary to the third premise, that there
+ − 1009
\emph{exist} an @{term "s\<^sub>3"} and @{term "s\<^sub>4"} such that @{term "s\<^sub>2"}
+ − 1010
can be split up into a non-empty string @{term "s\<^sub>3"} and a possibly empty
+ − 1011
string @{term "s\<^sub>4"}. Moreover the longer string @{term "s\<^sub>1 @ s\<^sub>3"} can be
+ − 1012
matched by \<open>r\<^sub>1\<close> and the shorter @{term "s\<^sub>4"} can still be
+ − 1013
matched by @{term "r\<^sub>2"}. In this case @{term "s\<^sub>1"} would \emph{not} be the
+ − 1014
longest initial split of \mbox{@{term "s\<^sub>1 @ s\<^sub>2"}} and therefore @{term "Seq v\<^sub>1
+ − 1015
v\<^sub>2"} cannot be a POSIX value for @{term "(s\<^sub>1 @ s\<^sub>2, SEQ r\<^sub>1 r\<^sub>2)"}.
+ − 1016
The main point is that our side-condition ensures the Longest
+ − 1017
Match Rule is satisfied.
+ − 1018
+ − 1019
A similar condition is imposed on the POSIX value in the \<open>P\<star>\<close>-rule. Also there we want that @{term "s\<^sub>1"} is the longest initial
+ − 1020
split of @{term "s\<^sub>1 @ s\<^sub>2"} and furthermore the corresponding value
+ − 1021
@{term v} cannot be flattened to the empty string. In effect, we require
+ − 1022
that in each ``iteration'' of the star, some non-empty substring needs to
+ − 1023
be ``chipped'' away; only in case of the empty string we accept @{term
+ − 1024
"Stars []"} as the POSIX value. Indeed we can show that our POSIX values
+ − 1025
are lexical values which exclude those \<open>Stars\<close> that contain subvalues
+ − 1026
that flatten to the empty string.
+ − 1027
+ − 1028
\begin{lemma}\label{LVposix}
+ − 1029
@{thm [mode=IfThen] Posix_LV}
+ − 1030
\end{lemma}
+ − 1031
+ − 1032
\begin{proof}
+ − 1033
By routine induction on @{thm (prem 1) Posix_LV}.\qed
+ − 1034
\end{proof}
+ − 1035
+ − 1036
\noindent
+ − 1037
Next is the lemma that shows the function @{term "mkeps"} calculates
+ − 1038
the POSIX value for the empty string and a nullable regular expression.
+ − 1039
+ − 1040
\begin{lemma}\label{lemmkeps}
+ − 1041
@{thm[mode=IfThen] Posix_mkeps}
+ − 1042
\end{lemma}
+ − 1043
+ − 1044
\begin{proof}
+ − 1045
By routine induction on @{term r}.\qed
+ − 1046
\end{proof}
+ − 1047
+ − 1048
\noindent
+ − 1049
The central lemma for our POSIX relation is that the \<open>inj\<close>-function
+ − 1050
preserves POSIX values.
+ − 1051
+ − 1052
\begin{lemma}\label{Posix2}
+ − 1053
@{thm[mode=IfThen] Posix_injval}
+ − 1054
\end{lemma}
+ − 1055
+ − 1056
\begin{proof}
+ − 1057
By induction on \<open>r\<close>. We explain two cases.
+ − 1058
+ − 1059
\begin{itemize}
+ − 1060
\item[$\bullet$] Case @{term "r = ALT r\<^sub>1 r\<^sub>2"}. There are
+ − 1061
two subcases, namely \<open>(a)\<close> \mbox{@{term "v = Left v'"}} and @{term
+ − 1062
"s \<in> der c r\<^sub>1 \<rightarrow> v'"}; and \<open>(b)\<close> @{term "v = Right v'"}, @{term
+ − 1063
"s \<notin> L (der c r\<^sub>1)"} and @{term "s \<in> der c r\<^sub>2 \<rightarrow> v'"}. In \<open>(a)\<close> we
+ − 1064
know @{term "s \<in> der c r\<^sub>1 \<rightarrow> v'"}, from which we can infer @{term "(c # s)
+ − 1065
\<in> r\<^sub>1 \<rightarrow> injval r\<^sub>1 c v'"} by induction hypothesis and hence @{term "(c #
+ − 1066
s) \<in> ALT r\<^sub>1 r\<^sub>2 \<rightarrow> injval (ALT r\<^sub>1 r\<^sub>2) c (Left v')"} as needed. Similarly
+ − 1067
in subcase \<open>(b)\<close> where, however, in addition we have to use
+ − 1068
Proposition~\ref{derprop}(2) in order to infer @{term "c # s \<notin> L r\<^sub>1"} from @{term
+ − 1069
"s \<notin> L (der c r\<^sub>1)"}.\smallskip
+ − 1070
+ − 1071
\item[$\bullet$] Case @{term "r = SEQ r\<^sub>1 r\<^sub>2"}. There are three subcases:
+ − 1072
+ − 1073
\begin{quote}
+ − 1074
\begin{description}
+ − 1075
\item[\<open>(a)\<close>] @{term "v = Left (Seq v\<^sub>1 v\<^sub>2)"} and @{term "nullable r\<^sub>1"}
+ − 1076
\item[\<open>(b)\<close>] @{term "v = Right v\<^sub>1"} and @{term "nullable r\<^sub>1"}
+ − 1077
\item[\<open>(c)\<close>] @{term "v = Seq v\<^sub>1 v\<^sub>2"} and @{term "\<not> nullable r\<^sub>1"}
+ − 1078
\end{description}
+ − 1079
\end{quote}
+ − 1080
+ − 1081
\noindent For \<open>(a)\<close> we know @{term "s\<^sub>1 \<in> der c r\<^sub>1 \<rightarrow> v\<^sub>1"} and
+ − 1082
@{term "s\<^sub>2 \<in> r\<^sub>2 \<rightarrow> v\<^sub>2"} as well as
+ − 1083
%
+ − 1084
\[@{term "\<not> (\<exists>s\<^sub>3 s\<^sub>4. s\<^sub>3 \<noteq> [] \<and> s\<^sub>3 @ s\<^sub>4 = s\<^sub>2 \<and> s\<^sub>1 @ s\<^sub>3 \<in> L (der c r\<^sub>1) \<and> s\<^sub>4 \<in> L r\<^sub>2)"}\]
+ − 1085
+ − 1086
\noindent From the latter we can infer by Proposition~\ref{derprop}(2):
+ − 1087
%
+ − 1088
\[@{term "\<not> (\<exists>s\<^sub>3 s\<^sub>4. s\<^sub>3 \<noteq> [] \<and> s\<^sub>3 @ s\<^sub>4 = s\<^sub>2 \<and> (c # s\<^sub>1) @ s\<^sub>3 \<in> L r\<^sub>1 \<and> s\<^sub>4 \<in> L r\<^sub>2)"}\]
+ − 1089
+ − 1090
\noindent We can use the induction hypothesis for \<open>r\<^sub>1\<close> to obtain
+ − 1091
@{term "(c # s\<^sub>1) \<in> r\<^sub>1 \<rightarrow> injval r\<^sub>1 c v\<^sub>1"}. Putting this all together allows us to infer
+ − 1092
@{term "((c # s\<^sub>1) @ s\<^sub>2) \<in> SEQ r\<^sub>1 r\<^sub>2 \<rightarrow> Seq (injval r\<^sub>1 c v\<^sub>1) v\<^sub>2"}. The case \<open>(c)\<close>
+ − 1093
is similar.
+ − 1094
+ − 1095
For \<open>(b)\<close> we know @{term "s \<in> der c r\<^sub>2 \<rightarrow> v\<^sub>1"} and
+ − 1096
@{term "s\<^sub>1 @ s\<^sub>2 \<notin> L (SEQ (der c r\<^sub>1) r\<^sub>2)"}. From the former
+ − 1097
we have @{term "(c # s) \<in> r\<^sub>2 \<rightarrow> (injval r\<^sub>2 c v\<^sub>1)"} by induction hypothesis
+ − 1098
for @{term "r\<^sub>2"}. From the latter we can infer
+ − 1099
%
+ − 1100
\[@{term "\<not> (\<exists>s\<^sub>3 s\<^sub>4. s\<^sub>3 \<noteq> [] \<and> s\<^sub>3 @ s\<^sub>4 = c # s \<and> s\<^sub>3 \<in> L r\<^sub>1 \<and> s\<^sub>4 \<in> L r\<^sub>2)"}\]
+ − 1101
+ − 1102
\noindent By Lemma~\ref{lemmkeps} we know @{term "[] \<in> r\<^sub>1 \<rightarrow> (mkeps r\<^sub>1)"}
+ − 1103
holds. Putting this all together, we can conclude with @{term "(c #
+ − 1104
s) \<in> SEQ r\<^sub>1 r\<^sub>2 \<rightarrow> Seq (mkeps r\<^sub>1) (injval r\<^sub>2 c v\<^sub>1)"}, as required.
+ − 1105
+ − 1106
Finally suppose @{term "r = STAR r\<^sub>1"}. This case is very similar to the
+ − 1107
sequence case, except that we need to also ensure that @{term "flat (injval r\<^sub>1
+ − 1108
c v\<^sub>1) \<noteq> []"}. This follows from @{term "(c # s\<^sub>1)
+ − 1109
\<in> r\<^sub>1 \<rightarrow> injval r\<^sub>1 c v\<^sub>1"} (which in turn follows from @{term "s\<^sub>1 \<in> der c
+ − 1110
r\<^sub>1 \<rightarrow> v\<^sub>1"} and the induction hypothesis).\qed
+ − 1111
\end{itemize}
+ − 1112
\end{proof}
+ − 1113
+ − 1114
\noindent
+ − 1115
With Lemma~\ref{Posix2} in place, it is completely routine to establish
+ − 1116
that the Sulzmann and Lu lexer satisfies our specification (returning
+ − 1117
the null value @{term "None"} iff the string is not in the language of the regular expression,
+ − 1118
and returning a unique POSIX value iff the string \emph{is} in the language):
+ − 1119
+ − 1120
\begin{theorem}\mbox{}\smallskip\\\label{lexercorrect}
+ − 1121
\begin{tabular}{ll}
+ − 1122
(1) & @{thm (lhs) lexer_correct_None} if and only if @{thm (rhs) lexer_correct_None}\\
+ − 1123
(2) & @{thm (lhs) lexer_correct_Some} if and only if @{thm (rhs) lexer_correct_Some}\\
+ − 1124
\end{tabular}
+ − 1125
\end{theorem}
+ − 1126
+ − 1127
\begin{proof}
+ − 1128
By induction on @{term s} using Lemma~\ref{lemmkeps} and \ref{Posix2}.\qed
+ − 1129
\end{proof}
+ − 1130
+ − 1131
\noindent In \textit{(2)} we further know by Theorem~\ref{posixdeterm} that the
+ − 1132
value returned by the lexer must be unique. A simple corollary
+ − 1133
of our two theorems is:
+ − 1134
+ − 1135
\begin{corollary}\mbox{}\smallskip\\\label{lexercorrectcor}
+ − 1136
\begin{tabular}{ll}
+ − 1137
(1) & @{thm (lhs) lexer_correctness(2)} if and only if @{thm (rhs) lexer_correctness(2)}\\
+ − 1138
(2) & @{thm (lhs) lexer_correctness(1)} if and only if @{thm (rhs) lexer_correctness(1)}\\
+ − 1139
\end{tabular}
+ − 1140
\end{corollary}
+ − 1141
+ − 1142
\noindent This concludes our correctness proof. Note that we have
+ − 1143
not changed the algorithm of Sulzmann and Lu,\footnote{All
+ − 1144
deviations we introduced are harmless.} but introduced our own
+ − 1145
specification for what a correct result---a POSIX value---should be.
+ − 1146
In the next section we show that our specification coincides with
+ − 1147
another one given by Okui and Suzuki using a different technique.
+ − 1148
+ − 1149
\<close>
+ − 1150
+ − 1151
section \<open>Ordering of Values according to Okui and Suzuki\<close>
+ − 1152
+ − 1153
text \<open>
+ − 1154
+ − 1155
While in the previous section we have defined POSIX values directly
+ − 1156
in terms of a ternary relation (see inference rules in Figure~\ref{POSIXrules}),
+ − 1157
Sulzmann and Lu took a different approach in \cite{Sulzmann2014}:
+ − 1158
they introduced an ordering for values and identified POSIX values
+ − 1159
as the maximal elements. An extended version of \cite{Sulzmann2014}
+ − 1160
is available at the website of its first author; this includes more
+ − 1161
details of their proofs, but which are evidently not in final form
+ − 1162
yet. Unfortunately, we were not able to verify claims that their
+ − 1163
ordering has properties such as being transitive or having maximal
+ − 1164
elements.
+ − 1165
+ − 1166
Okui and Suzuki \cite{OkuiSuzuki2010,OkuiSuzukiTech} described
+ − 1167
another ordering of values, which they use to establish the
+ − 1168
correctness of their automata-based algorithm for POSIX matching.
+ − 1169
Their ordering resembles some aspects of the one given by Sulzmann
+ − 1170
and Lu, but overall is quite different. To begin with, Okui and
+ − 1171
Suzuki identify POSIX values as minimal, rather than maximal,
+ − 1172
elements in their ordering. A more substantial difference is that
+ − 1173
the ordering by Okui and Suzuki uses \emph{positions} in order to
+ − 1174
identify and compare subvalues. Positions are lists of natural
+ − 1175
numbers. This allows them to quite naturally formalise the Longest
+ − 1176
Match and Priority rules of the informal POSIX standard. Consider
+ − 1177
for example the value @{term v}
+ − 1178
+ − 1179
\begin{center}
+ − 1180
@{term "v == Stars [Seq (Char x) (Char y), Char z]"}
+ − 1181
\end{center}
+ − 1182
+ − 1183
\noindent
+ − 1184
At position \<open>[0,1]\<close> of this value is the
+ − 1185
subvalue \<open>Char y\<close> and at position \<open>[1]\<close> the
+ − 1186
subvalue @{term "Char z"}. At the `root' position, or empty list
+ − 1187
@{term "[]"}, is the whole value @{term v}. Positions such as \<open>[0,1,0]\<close> or \<open>[2]\<close> are outside of \<open>v\<close>. If it exists, the subvalue of @{term v} at a position \<open>p\<close>, written @{term "at v p"}, can be recursively defined by
+ − 1188
+ − 1189
\begin{center}
+ − 1190
\begin{tabular}{r@ {\hspace{0mm}}lcl}
+ − 1191
@{term v} & \<open>\<downharpoonleft>\<^bsub>[]\<^esub>\<close> & \<open>\<equiv>\<close>& @{thm (rhs) at.simps(1)}\\
+ − 1192
@{term "Left v"} & \<open>\<downharpoonleft>\<^bsub>0::ps\<^esub>\<close> & \<open>\<equiv>\<close>& @{thm (rhs) at.simps(2)}\\
+ − 1193
@{term "Right v"} & \<open>\<downharpoonleft>\<^bsub>1::ps\<^esub>\<close> & \<open>\<equiv>\<close> &
+ − 1194
@{thm (rhs) at.simps(3)[simplified Suc_0_fold]}\\
+ − 1195
@{term "Seq v\<^sub>1 v\<^sub>2"} & \<open>\<downharpoonleft>\<^bsub>0::ps\<^esub>\<close> & \<open>\<equiv>\<close> &
+ − 1196
@{thm (rhs) at.simps(4)[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} \\
+ − 1197
@{term "Seq v\<^sub>1 v\<^sub>2"} & \<open>\<downharpoonleft>\<^bsub>1::ps\<^esub>\<close>
+ − 1198
& \<open>\<equiv>\<close> &
+ − 1199
@{thm (rhs) at.simps(5)[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2", simplified Suc_0_fold]} \\
+ − 1200
@{term "Stars vs"} & \<open>\<downharpoonleft>\<^bsub>n::ps\<^esub>\<close> & \<open>\<equiv>\<close>& @{thm (rhs) at.simps(6)}\\
+ − 1201
\end{tabular}
+ − 1202
\end{center}
+ − 1203
+ − 1204
\noindent In the last clause we use Isabelle's notation @{term "vs ! n"} for the
+ − 1205
\<open>n\<close>th element in a list. The set of positions inside a value \<open>v\<close>,
+ − 1206
written @{term "Pos v"}, is given by
+ − 1207
+ − 1208
\begin{center}
+ − 1209
\begin{tabular}{lcl}
+ − 1210
@{thm (lhs) Pos.simps(1)} & \<open>\<equiv>\<close> & @{thm (rhs) Pos.simps(1)}\\
+ − 1211
@{thm (lhs) Pos.simps(2)} & \<open>\<equiv>\<close> & @{thm (rhs) Pos.simps(2)}\\
+ − 1212
@{thm (lhs) Pos.simps(3)} & \<open>\<equiv>\<close> & @{thm (rhs) Pos.simps(3)}\\
+ − 1213
@{thm (lhs) Pos.simps(4)} & \<open>\<equiv>\<close> & @{thm (rhs) Pos.simps(4)}\\
+ − 1214
@{thm (lhs) Pos.simps(5)[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}
+ − 1215
& \<open>\<equiv>\<close>
+ − 1216
& @{thm (rhs) Pos.simps(5)[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\
+ − 1217
@{thm (lhs) Pos_stars} & \<open>\<equiv>\<close> & @{thm (rhs) Pos_stars}\\
+ − 1218
\end{tabular}
+ − 1219
\end{center}
+ − 1220
+ − 1221
\noindent
+ − 1222
whereby \<open>len\<close> in the last clause stands for the length of a list. Clearly
+ − 1223
for every position inside a value there exists a subvalue at that position.
+ − 1224
+ − 1225
+ − 1226
To help understanding the ordering of Okui and Suzuki, consider again
+ − 1227
the earlier value
+ − 1228
\<open>v\<close> and compare it with the following \<open>w\<close>:
+ − 1229
+ − 1230
\begin{center}
+ − 1231
\begin{tabular}{l}
+ − 1232
@{term "v == Stars [Seq (Char x) (Char y), Char z]"}\\
+ − 1233
@{term "w == Stars [Char x, Char y, Char z]"}
+ − 1234
\end{tabular}
+ − 1235
\end{center}
+ − 1236
+ − 1237
\noindent Both values match the string \<open>xyz\<close>, that means if
+ − 1238
we flatten these values at their respective root position, we obtain
+ − 1239
\<open>xyz\<close>. However, at position \<open>[0]\<close>, \<open>v\<close> matches
+ − 1240
\<open>xy\<close> whereas \<open>w\<close> matches only the shorter \<open>x\<close>. So
+ − 1241
according to the Longest Match Rule, we should prefer \<open>v\<close>,
+ − 1242
rather than \<open>w\<close> as POSIX value for string \<open>xyz\<close> (and
+ − 1243
corresponding regular expression). In order to
+ − 1244
formalise this idea, Okui and Suzuki introduce a measure for
+ − 1245
subvalues at position \<open>p\<close>, called the \emph{norm} of \<open>v\<close>
+ − 1246
at position \<open>p\<close>. We can define this measure in Isabelle as an
+ − 1247
integer as follows
+ − 1248
+ − 1249
\begin{center}
+ − 1250
@{thm pflat_len_def}
+ − 1251
\end{center}
+ − 1252
+ − 1253
\noindent where we take the length of the flattened value at
+ − 1254
position \<open>p\<close>, provided the position is inside \<open>v\<close>; if
+ − 1255
not, then the norm is \<open>-1\<close>. The default for outside
+ − 1256
positions is crucial for the POSIX requirement of preferring a
+ − 1257
\<open>Left\<close>-value over a \<open>Right\<close>-value (if they can match the
+ − 1258
same string---see the Priority Rule from the Introduction). For this
+ − 1259
consider
+ − 1260
+ − 1261
\begin{center}
+ − 1262
@{term "v == Left (Char x)"} \qquad and \qquad @{term "w == Right (Char x)"}
+ − 1263
\end{center}
+ − 1264
+ − 1265
\noindent Both values match \<open>x\<close>. At position \<open>[0]\<close>
+ − 1266
the norm of @{term v} is \<open>1\<close> (the subvalue matches \<open>x\<close>),
+ − 1267
but the norm of \<open>w\<close> is \<open>-1\<close> (the position is outside
+ − 1268
\<open>w\<close> according to how we defined the `inside' positions of
+ − 1269
\<open>Left\<close>- and \<open>Right\<close>-values). Of course at position
+ − 1270
\<open>[1]\<close>, the norms @{term "pflat_len v [1]"} and @{term
+ − 1271
"pflat_len w [1]"} are reversed, but the point is that subvalues
+ − 1272
will be analysed according to lexicographically ordered
+ − 1273
positions. According to this ordering, the position \<open>[0]\<close>
+ − 1274
takes precedence over \<open>[1]\<close> and thus also \<open>v\<close> will be
+ − 1275
preferred over \<open>w\<close>. The lexicographic ordering of positions, written
+ − 1276
@{term "DUMMY \<sqsubset>lex DUMMY"}, can be conveniently formalised
+ − 1277
by three inference rules
+ − 1278
+ − 1279
\begin{center}
+ − 1280
\begin{tabular}{ccc}
+ − 1281
@{thm [mode=Axiom] lex_list.intros(1)}\hspace{1cm} &
+ − 1282
@{thm [mode=Rule] lex_list.intros(3)[where ?p1.0="p\<^sub>1" and ?p2.0="p\<^sub>2" and
+ − 1283
?ps1.0="ps\<^sub>1" and ?ps2.0="ps\<^sub>2"]}\hspace{1cm} &
+ − 1284
@{thm [mode=Rule] lex_list.intros(2)[where ?ps1.0="ps\<^sub>1" and ?ps2.0="ps\<^sub>2"]}
+ − 1285
\end{tabular}
+ − 1286
\end{center}
+ − 1287
+ − 1288
With the norm and lexicographic order in place,
+ − 1289
we can state the key definition of Okui and Suzuki
+ − 1290
\cite{OkuiSuzuki2010}: a value @{term "v\<^sub>1"} is \emph{smaller at position \<open>p\<close>} than
+ − 1291
@{term "v\<^sub>2"}, written @{term "v\<^sub>1 \<sqsubset>val p v\<^sub>2"},
+ − 1292
if and only if $(i)$ the norm at position \<open>p\<close> is
+ − 1293
greater in @{term "v\<^sub>1"} (that is the string @{term "flat (at v\<^sub>1 p)"} is longer
+ − 1294
than @{term "flat (at v\<^sub>2 p)"}) and $(ii)$ all subvalues at
+ − 1295
positions that are inside @{term "v\<^sub>1"} or @{term "v\<^sub>2"} and that are
+ − 1296
lexicographically smaller than \<open>p\<close>, we have the same norm, namely
+ − 1297
+ − 1298
\begin{center}
+ − 1299
\begin{tabular}{c}
+ − 1300
@{thm (lhs) PosOrd_def[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}
+ − 1301
\<open>\<equiv>\<close>
+ − 1302
$\begin{cases}
+ − 1303
(i) & @{term "pflat_len v\<^sub>1 p > pflat_len v\<^sub>2 p"} \quad\text{and}\smallskip \\
+ − 1304
(ii) & @{term "(\<forall>q \<in> Pos v\<^sub>1 \<union> Pos v\<^sub>2. q \<sqsubset>lex p --> pflat_len v\<^sub>1 q = pflat_len v\<^sub>2 q)"}
+ − 1305
\end{cases}$
+ − 1306
\end{tabular}
+ − 1307
\end{center}
+ − 1308
+ − 1309
\noindent The position \<open>p\<close> in this definition acts as the
+ − 1310
\emph{first distinct position} of \<open>v\<^sub>1\<close> and \<open>v\<^sub>2\<close>, where both values match strings of different length
+ − 1311
\cite{OkuiSuzuki2010}. Since at \<open>p\<close> the values \<open>v\<^sub>1\<close> and \<open>v\<^sub>2\<close> match different strings, the
+ − 1312
ordering is irreflexive. Derived from the definition above
+ − 1313
are the following two orderings:
+ − 1314
+ − 1315
\begin{center}
+ − 1316
\begin{tabular}{l}
+ − 1317
@{thm PosOrd_ex_def[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\
+ − 1318
@{thm PosOrd_ex_eq_def[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}
+ − 1319
\end{tabular}
+ − 1320
\end{center}
+ − 1321
+ − 1322
While we encountered a number of obstacles for establishing properties like
+ − 1323
transitivity for the ordering of Sulzmann and Lu (and which we failed
+ − 1324
to overcome), it is relatively straightforward to establish this
+ − 1325
property for the orderings
+ − 1326
@{term "DUMMY :\<sqsubset>val DUMMY"} and @{term "DUMMY :\<sqsubseteq>val DUMMY"}
+ − 1327
by Okui and Suzuki.
+ − 1328
+ − 1329
\begin{lemma}[Transitivity]\label{transitivity}
+ − 1330
@{thm [mode=IfThen] PosOrd_trans[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and ?v3.0="v\<^sub>3"]}
+ − 1331
\end{lemma}
+ − 1332
+ − 1333
\begin{proof} From the assumption we obtain two positions \<open>p\<close>
+ − 1334
and \<open>q\<close>, where the values \<open>v\<^sub>1\<close> and \<open>v\<^sub>2\<close> (respectively \<open>v\<^sub>2\<close> and \<open>v\<^sub>3\<close>) are `distinct'. Since \<open>\<prec>\<^bsub>lex\<^esub>\<close> is trichotomous, we need to consider
+ − 1335
three cases, namely @{term "p = q"}, @{term "p \<sqsubset>lex q"} and
+ − 1336
@{term "q \<sqsubset>lex p"}. Let us look at the first case. Clearly
+ − 1337
@{term "pflat_len v\<^sub>2 p < pflat_len v\<^sub>1 p"} and @{term
+ − 1338
"pflat_len v\<^sub>3 p < pflat_len v\<^sub>2 p"} imply @{term
+ − 1339
"pflat_len v\<^sub>3 p < pflat_len v\<^sub>1 p"}. It remains to show
+ − 1340
that for a @{term "p' \<in> Pos v\<^sub>1 \<union> Pos v\<^sub>3"}
+ − 1341
with @{term "p' \<sqsubset>lex p"} that @{term "pflat_len v\<^sub>1
+ − 1342
p' = pflat_len v\<^sub>3 p'"} holds. Suppose @{term "p' \<in> Pos
+ − 1343
v\<^sub>1"}, then we can infer from the first assumption that @{term
+ − 1344
"pflat_len v\<^sub>1 p' = pflat_len v\<^sub>2 p'"}. But this means
+ − 1345
that @{term "p'"} must be in @{term "Pos v\<^sub>2"} too (the norm
+ − 1346
cannot be \<open>-1\<close> given @{term "p' \<in> Pos v\<^sub>1"}).
+ − 1347
Hence we can use the second assumption and
+ − 1348
infer @{term "pflat_len v\<^sub>2 p' = pflat_len v\<^sub>3 p'"},
+ − 1349
which concludes this case with @{term "v\<^sub>1 :\<sqsubset>val
+ − 1350
v\<^sub>3"}. The reasoning in the other cases is similar.\qed
+ − 1351
\end{proof}
+ − 1352
+ − 1353
\noindent
+ − 1354
The proof for $\preccurlyeq$ is similar and omitted.
+ − 1355
It is also straightforward to show that \<open>\<prec>\<close> and
+ − 1356
$\preccurlyeq$ are partial orders. Okui and Suzuki furthermore show that they
+ − 1357
are linear orderings for lexical values \cite{OkuiSuzuki2010} of a given
+ − 1358
regular expression and given string, but we have not formalised this in Isabelle. It is
+ − 1359
not essential for our results. What we are going to show below is
+ − 1360
that for a given \<open>r\<close> and \<open>s\<close>, the orderings have a unique
+ − 1361
minimal element on the set @{term "LV r s"}, which is the POSIX value
+ − 1362
we defined in the previous section. We start with two properties that
+ − 1363
show how the length of a flattened value relates to the \<open>\<prec>\<close>-ordering.
+ − 1364
+ − 1365
\begin{proposition}\mbox{}\smallskip\\\label{ordlen}
+ − 1366
\begin{tabular}{@ {}ll}
+ − 1367
(1) &
+ − 1368
@{thm [mode=IfThen] PosOrd_shorterE[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\
+ − 1369
(2) &
+ − 1370
@{thm [mode=IfThen] PosOrd_shorterI[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}
+ − 1371
\end{tabular}
+ − 1372
\end{proposition}
+ − 1373
+ − 1374
\noindent Both properties follow from the definition of the ordering. Note that
+ − 1375
\textit{(2)} entails that a value, say @{term "v\<^sub>2"}, whose underlying
+ − 1376
string is a strict prefix of another flattened value, say @{term "v\<^sub>1"}, then
+ − 1377
@{term "v\<^sub>1"} must be smaller than @{term "v\<^sub>2"}. For our proofs it
+ − 1378
will be useful to have the following properties---in each case the underlying strings
+ − 1379
of the compared values are the same:
+ − 1380
+ − 1381
\begin{proposition}\mbox{}\smallskip\\\label{ordintros}
+ − 1382
\begin{tabular}{ll}
+ − 1383
\textit{(1)} &
+ − 1384
@{thm [mode=IfThen] PosOrd_Left_Right[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\
+ − 1385
\textit{(2)} & If
+ − 1386
@{thm (prem 1) PosOrd_Left_eq[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} \;then\;
+ − 1387
@{thm (lhs) PosOrd_Left_eq[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} \;iff\;
+ − 1388
@{thm (rhs) PosOrd_Left_eq[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\
+ − 1389
\textit{(3)} & If
+ − 1390
@{thm (prem 1) PosOrd_Right_eq[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} \;then\;
+ − 1391
@{thm (lhs) PosOrd_Right_eq[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]} \;iff\;
+ − 1392
@{thm (rhs) PosOrd_Right_eq[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}\\
+ − 1393
\textit{(4)} & If
+ − 1394
@{thm (prem 1) PosOrd_Seq_eq[where ?v2.0="v\<^sub>2" and ?w2.0="w\<^sub>2"]} \;then\;
+ − 1395
@{thm (lhs) PosOrd_Seq_eq[where ?v2.0="v\<^sub>2" and ?w2.0="w\<^sub>2"]} \;iff\;
+ − 1396
@{thm (rhs) PosOrd_Seq_eq[where ?v2.0="v\<^sub>2" and ?w2.0="w\<^sub>2"]}\\
+ − 1397
\textit{(5)} & If
+ − 1398
@{thm (prem 2) PosOrd_SeqI1[simplified, where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and
+ − 1399
?w1.0="w\<^sub>1" and ?w2.0="w\<^sub>2"]} \;and\;
+ − 1400
@{thm (prem 1) PosOrd_SeqI1[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and
+ − 1401
?w1.0="w\<^sub>1" and ?w2.0="w\<^sub>2"]} \;then\;
+ − 1402
@{thm (concl) PosOrd_SeqI1[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and
+ − 1403
?w1.0="w\<^sub>1" and ?w2.0="w\<^sub>2"]}\\
+ − 1404
\textit{(6)} & If
+ − 1405
@{thm (prem 1) PosOrd_Stars_append_eq[where ?vs1.0="vs\<^sub>1" and ?vs2.0="vs\<^sub>2"]} \;then\;
+ − 1406
@{thm (lhs) PosOrd_Stars_append_eq[where ?vs1.0="vs\<^sub>1" and ?vs2.0="vs\<^sub>2"]} \;iff\;
+ − 1407
@{thm (rhs) PosOrd_Stars_append_eq[where ?vs1.0="vs\<^sub>1" and ?vs2.0="vs\<^sub>2"]}\\
+ − 1408
+ − 1409
\textit{(7)} & If
+ − 1410
@{thm (prem 2) PosOrd_StarsI[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and
+ − 1411
?vs1.0="vs\<^sub>1" and ?vs2.0="vs\<^sub>2"]} \;and\;
+ − 1412
@{thm (prem 1) PosOrd_StarsI[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and
+ − 1413
?vs1.0="vs\<^sub>1" and ?vs2.0="vs\<^sub>2"]} \;then\;
+ − 1414
@{thm (concl) PosOrd_StarsI[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2" and
+ − 1415
?vs1.0="vs\<^sub>1" and ?vs2.0="vs\<^sub>2"]}\\
+ − 1416
\end{tabular}
+ − 1417
\end{proposition}
+ − 1418
+ − 1419
\noindent One might prefer that statements \textit{(4)} and \textit{(5)}
+ − 1420
(respectively \textit{(6)} and \textit{(7)})
+ − 1421
are combined into a single \textit{iff}-statement (like the ones for \<open>Left\<close> and \<open>Right\<close>). Unfortunately this cannot be done easily: such
+ − 1422
a single statement would require an additional assumption about the
+ − 1423
two values @{term "Seq v\<^sub>1 v\<^sub>2"} and @{term "Seq w\<^sub>1 w\<^sub>2"}
+ − 1424
being inhabited by the same regular expression. The
+ − 1425
complexity of the proofs involved seems to not justify such a
+ − 1426
`cleaner' single statement. The statements given are just the properties that
+ − 1427
allow us to establish our theorems without any difficulty. The proofs
+ − 1428
for Proposition~\ref{ordintros} are routine.
+ − 1429
+ − 1430
+ − 1431
Next we establish how Okui and Suzuki's orderings relate to our
+ − 1432
definition of POSIX values. Given a \<open>POSIX\<close> value \<open>v\<^sub>1\<close>
+ − 1433
for \<open>r\<close> and \<open>s\<close>, then any other lexical value \<open>v\<^sub>2\<close> in @{term "LV r s"} is greater or equal than \<open>v\<^sub>1\<close>, namely:
+ − 1434
+ − 1435
+ − 1436
\begin{theorem}\label{orderone}
+ − 1437
@{thm [mode=IfThen] Posix_PosOrd[where ?v1.0="v\<^sub>1" and ?v2.0="v\<^sub>2"]}
+ − 1438
\end{theorem}
+ − 1439
+ − 1440
\begin{proof} By induction on our POSIX rules. By
+ − 1441
Theorem~\ref{posixdeterm} and the definition of @{const LV}, it is clear
+ − 1442
that \<open>v\<^sub>1\<close> and \<open>v\<^sub>2\<close> have the same
+ − 1443
underlying string @{term s}. The three base cases are
+ − 1444
straightforward: for example for @{term "v\<^sub>1 = Void"}, we have
+ − 1445
that @{term "v\<^sub>2 \<in> LV ONE []"} must also be of the form
+ − 1446
\mbox{@{term "v\<^sub>2 = Void"}}. Therefore we have @{term
+ − 1447
"v\<^sub>1 :\<sqsubseteq>val v\<^sub>2"}. The inductive cases for
+ − 1448
\<open>r\<close> being of the form @{term "ALT r\<^sub>1 r\<^sub>2"} and
+ − 1449
@{term "SEQ r\<^sub>1 r\<^sub>2"} are as follows:
+ − 1450
+ − 1451
+ − 1452
\begin{itemize}
+ − 1453
+ − 1454
\item[$\bullet$] Case \<open>P+L\<close> with @{term "s \<in> (ALT r\<^sub>1 r\<^sub>2)
+ − 1455
\<rightarrow> (Left w\<^sub>1)"}: In this case the value
+ − 1456
@{term "v\<^sub>2"} is either of the
+ − 1457
form @{term "Left w\<^sub>2"} or @{term "Right w\<^sub>2"}. In the
+ − 1458
latter case we can immediately conclude with \mbox{@{term "v\<^sub>1
+ − 1459
:\<sqsubseteq>val v\<^sub>2"}} since a \<open>Left\<close>-value with the
+ − 1460
same underlying string \<open>s\<close> is always smaller than a
+ − 1461
\<open>Right\<close>-value by Proposition~\ref{ordintros}\textit{(1)}.
+ − 1462
In the former case we have @{term "w\<^sub>2
+ − 1463
\<in> LV r\<^sub>1 s"} and can use the induction hypothesis to infer
+ − 1464
@{term "w\<^sub>1 :\<sqsubseteq>val w\<^sub>2"}. Because @{term
+ − 1465
"w\<^sub>1"} and @{term "w\<^sub>2"} have the same underlying string
+ − 1466
\<open>s\<close>, we can conclude with @{term "Left w\<^sub>1
+ − 1467
:\<sqsubseteq>val Left w\<^sub>2"} using
+ − 1468
Proposition~\ref{ordintros}\textit{(2)}.\smallskip
+ − 1469
+ − 1470
\item[$\bullet$] Case \<open>P+R\<close> with @{term "s \<in> (ALT r\<^sub>1 r\<^sub>2)
+ − 1471
\<rightarrow> (Right w\<^sub>1)"}: This case similar to the previous
+ − 1472
case, except that we additionally know @{term "s \<notin> L
+ − 1473
r\<^sub>1"}. This is needed when @{term "v\<^sub>2"} is of the form
+ − 1474
\mbox{@{term "Left w\<^sub>2"}}. Since \mbox{@{term "flat v\<^sub>2 = flat
+ − 1475
w\<^sub>2"} \<open>= s\<close>} and @{term "\<Turnstile> w\<^sub>2 :
+ − 1476
r\<^sub>1"}, we can derive a contradiction for \mbox{@{term "s \<notin> L
+ − 1477
r\<^sub>1"}} using
+ − 1478
Proposition~\ref{inhabs}. So also in this case \mbox{@{term "v\<^sub>1
+ − 1479
:\<sqsubseteq>val v\<^sub>2"}}.\smallskip
+ − 1480
+ − 1481
\item[$\bullet$] Case \<open>PS\<close> with @{term "(s\<^sub>1 @
+ − 1482
s\<^sub>2) \<in> (SEQ r\<^sub>1 r\<^sub>2) \<rightarrow> (Seq
+ − 1483
w\<^sub>1 w\<^sub>2)"}: We can assume @{term "v\<^sub>2 = Seq
+ − 1484
(u\<^sub>1) (u\<^sub>2)"} with @{term "\<Turnstile> u\<^sub>1 :
+ − 1485
r\<^sub>1"} and \mbox{@{term "\<Turnstile> u\<^sub>2 :
+ − 1486
r\<^sub>2"}}. We have @{term "s\<^sub>1 @ s\<^sub>2 = (flat
+ − 1487
u\<^sub>1) @ (flat u\<^sub>2)"}. By the side-condition of the
+ − 1488
\<open>PS\<close>-rule we know that either @{term "s\<^sub>1 = flat
+ − 1489
u\<^sub>1"} or that @{term "flat u\<^sub>1"} is a strict prefix of
+ − 1490
@{term "s\<^sub>1"}. In the latter case we can infer @{term
+ − 1491
"w\<^sub>1 :\<sqsubset>val u\<^sub>1"} by
+ − 1492
Proposition~\ref{ordlen}\textit{(2)} and from this @{term "v\<^sub>1
+ − 1493
:\<sqsubseteq>val v\<^sub>2"} by Proposition~\ref{ordintros}\textit{(5)}
+ − 1494
(as noted above @{term "v\<^sub>1"} and @{term "v\<^sub>2"} must have the
+ − 1495
same underlying string).
+ − 1496
In the former case we know
+ − 1497
@{term "u\<^sub>1 \<in> LV r\<^sub>1 s\<^sub>1"} and @{term
+ − 1498
"u\<^sub>2 \<in> LV r\<^sub>2 s\<^sub>2"}. With this we can use the
+ − 1499
induction hypotheses to infer @{term "w\<^sub>1 :\<sqsubseteq>val
+ − 1500
u\<^sub>1"} and @{term "w\<^sub>2 :\<sqsubseteq>val u\<^sub>2"}. By
+ − 1501
Proposition~\ref{ordintros}\textit{(4,5)} we can again infer
+ − 1502
@{term "v\<^sub>1 :\<sqsubseteq>val
+ − 1503
v\<^sub>2"}.
+ − 1504
+ − 1505
\end{itemize}
+ − 1506
+ − 1507
\noindent The case for \<open>P\<star>\<close> is similar to the \<open>PS\<close>-case and omitted.\qed
+ − 1508
\end{proof}
+ − 1509
+ − 1510
\noindent This theorem shows that our \<open>POSIX\<close> value for a
+ − 1511
regular expression \<open>r\<close> and string @{term s} is in fact a
+ − 1512
minimal element of the values in \<open>LV r s\<close>. By
+ − 1513
Proposition~\ref{ordlen}\textit{(2)} we also know that any value in
+ − 1514
\<open>LV r s'\<close>, with @{term "s'"} being a strict prefix, cannot be
+ − 1515
smaller than \<open>v\<^sub>1\<close>. The next theorem shows the
+ − 1516
opposite---namely any minimal element in @{term "LV r s"} must be a
+ − 1517
\<open>POSIX\<close> value. This can be established by induction on \<open>r\<close>, but the proof can be drastically simplified by using the fact
+ − 1518
from the previous section about the existence of a \<open>POSIX\<close> value
+ − 1519
whenever a string @{term "s \<in> L r"}.
+ − 1520
+ − 1521
+ − 1522
\begin{theorem}
+ − 1523
@{thm [mode=IfThen] PosOrd_Posix[where ?v1.0="v\<^sub>1"]}
+ − 1524
\end{theorem}
+ − 1525
+ − 1526
\begin{proof}
+ − 1527
If @{thm (prem 1) PosOrd_Posix[where ?v1.0="v\<^sub>1"]} then
+ − 1528
@{term "s \<in> L r"} by Proposition~\ref{inhabs}. Hence by Theorem~\ref{lexercorrect}(2)
+ − 1529
there exists a
+ − 1530
\<open>POSIX\<close> value @{term "v\<^sub>P"} with @{term "s \<in> r \<rightarrow> v\<^sub>P"}
+ − 1531
and by Lemma~\ref{LVposix} we also have \mbox{@{term "v\<^sub>P \<in> LV r s"}}.
+ − 1532
By Theorem~\ref{orderone} we therefore have
+ − 1533
@{term "v\<^sub>P :\<sqsubseteq>val v\<^sub>1"}. If @{term "v\<^sub>P = v\<^sub>1"} then
+ − 1534
we are done. Otherwise we have @{term "v\<^sub>P :\<sqsubset>val v\<^sub>1"}, which
+ − 1535
however contradicts the second assumption about @{term "v\<^sub>1"} being the smallest
+ − 1536
element in @{term "LV r s"}. So we are done in this case too.\qed
+ − 1537
\end{proof}
+ − 1538
+ − 1539
\noindent
+ − 1540
From this we can also show
+ − 1541
that if @{term "LV r s"} is non-empty (or equivalently @{term "s \<in> L r"}) then
+ − 1542
it has a unique minimal element:
+ − 1543
+ − 1544
\begin{corollary}
+ − 1545
@{thm [mode=IfThen] Least_existence1}
+ − 1546
\end{corollary}
+ − 1547
+ − 1548
+ − 1549
+ − 1550
\noindent To sum up, we have shown that the (unique) minimal elements
+ − 1551
of the ordering by Okui and Suzuki are exactly the \<open>POSIX\<close>
+ − 1552
values we defined inductively in Section~\ref{posixsec}. This provides
+ − 1553
an independent confirmation that our ternary relation formalises the
+ − 1554
informal POSIX rules.
+ − 1555
+ − 1556
\<close>
+ − 1557
+ − 1558
section \<open>Bitcoded Lexing\<close>
+ − 1559
+ − 1560
+ − 1561
+ − 1562
+ − 1563
text \<open>
+ − 1564
+ − 1565
Incremental calculation of the value. To simplify the proof we first define the function
+ − 1566
@{const flex} which calculates the ``iterated'' injection function. With this we can
+ − 1567
rewrite the lexer as
+ − 1568
+ − 1569
\begin{center}
+ − 1570
@{thm lexer_flex}
+ − 1571
\end{center}
+ − 1572
+ − 1573
+ − 1574
\<close>
+ − 1575
+ − 1576
section \<open>Optimisations\<close>
+ − 1577
+ − 1578
text \<open>
+ − 1579
+ − 1580
Derivatives as calculated by \Brz's method are usually more complex
+ − 1581
regular expressions than the initial one; the result is that the
+ − 1582
derivative-based matching and lexing algorithms are often abysmally slow.
+ − 1583
However, various optimisations are possible, such as the simplifications
+ − 1584
of @{term "ALT ZERO r"}, @{term "ALT r ZERO"}, @{term "SEQ ONE r"} and
+ − 1585
@{term "SEQ r ONE"} to @{term r}. These simplifications can speed up the
+ − 1586
algorithms considerably, as noted in \cite{Sulzmann2014}. One of the
+ − 1587
advantages of having a simple specification and correctness proof is that
+ − 1588
the latter can be refined to prove the correctness of such simplification
+ − 1589
steps. While the simplification of regular expressions according to
+ − 1590
rules like
+ − 1591
+ − 1592
\begin{equation}\label{Simpl}
+ − 1593
\begin{array}{lcllcllcllcl}
+ − 1594
@{term "ALT ZERO r"} & \<open>\<Rightarrow>\<close> & @{term r} \hspace{8mm}%\\
+ − 1595
@{term "ALT r ZERO"} & \<open>\<Rightarrow>\<close> & @{term r} \hspace{8mm}%\\
+ − 1596
@{term "SEQ ONE r"} & \<open>\<Rightarrow>\<close> & @{term r} \hspace{8mm}%\\
+ − 1597
@{term "SEQ r ONE"} & \<open>\<Rightarrow>\<close> & @{term r}
+ − 1598
\end{array}
+ − 1599
\end{equation}
+ − 1600
+ − 1601
\noindent is well understood, there is an obstacle with the POSIX value
+ − 1602
calculation algorithm by Sulzmann and Lu: if we build a derivative regular
+ − 1603
expression and then simplify it, we will calculate a POSIX value for this
+ − 1604
simplified derivative regular expression, \emph{not} for the original (unsimplified)
+ − 1605
derivative regular expression. Sulzmann and Lu \cite{Sulzmann2014} overcome this obstacle by
+ − 1606
not just calculating a simplified regular expression, but also calculating
+ − 1607
a \emph{rectification function} that ``repairs'' the incorrect value.
+ − 1608
+ − 1609
The rectification functions can be (slightly clumsily) implemented in
+ − 1610
Isabelle/HOL as follows using some auxiliary functions:
+ − 1611
+ − 1612
\begin{center}
+ − 1613
\begin{tabular}{lcl}
+ − 1614
@{thm (lhs) F_RIGHT.simps(1)} & $\dn$ & \<open>Right (f v)\<close>\\
+ − 1615
@{thm (lhs) F_LEFT.simps(1)} & $\dn$ & \<open>Left (f v)\<close>\\
+ − 1616
+ − 1617
@{thm (lhs) F_ALT.simps(1)} & $\dn$ & \<open>Right (f\<^sub>2 v)\<close>\\
+ − 1618
@{thm (lhs) F_ALT.simps(2)} & $\dn$ & \<open>Left (f\<^sub>1 v)\<close>\\
+ − 1619
+ − 1620
@{thm (lhs) F_SEQ1.simps(1)} & $\dn$ & \<open>Seq (f\<^sub>1 ()) (f\<^sub>2 v)\<close>\\
+ − 1621
@{thm (lhs) F_SEQ2.simps(1)} & $\dn$ & \<open>Seq (f\<^sub>1 v) (f\<^sub>2 ())\<close>\\
+ − 1622
@{thm (lhs) F_SEQ.simps(1)} & $\dn$ & \<open>Seq (f\<^sub>1 v\<^sub>1) (f\<^sub>2 v\<^sub>2)\<close>\medskip\\
+ − 1623
%\end{tabular}
+ − 1624
%
+ − 1625
%\begin{tabular}{lcl}
+ − 1626
@{term "simp_ALT (ZERO, DUMMY) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>2, F_RIGHT f\<^sub>2)"}\\
+ − 1627
@{term "simp_ALT (r\<^sub>1, f\<^sub>1) (ZERO, DUMMY)"} & $\dn$ & @{term "(r\<^sub>1, F_LEFT f\<^sub>1)"}\\
+ − 1628
@{term "simp_ALT (r\<^sub>1, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(ALT r\<^sub>1 r\<^sub>2, F_ALT f\<^sub>1 f\<^sub>2)"}\\
+ − 1629
@{term "simp_SEQ (ONE, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>2, F_SEQ1 f\<^sub>1 f\<^sub>2)"}\\
+ − 1630
@{term "simp_SEQ (r\<^sub>1, f\<^sub>1) (ONE, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>1, F_SEQ2 f\<^sub>1 f\<^sub>2)"}\\
+ − 1631
@{term "simp_SEQ (r\<^sub>1, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(SEQ r\<^sub>1 r\<^sub>2, F_SEQ f\<^sub>1 f\<^sub>2)"}\\
+ − 1632
\end{tabular}
+ − 1633
\end{center}
+ − 1634
+ − 1635
\noindent
+ − 1636
The functions \<open>simp\<^bsub>Alt\<^esub>\<close> and \<open>simp\<^bsub>Seq\<^esub>\<close> encode the simplification rules
+ − 1637
in \eqref{Simpl} and compose the rectification functions (simplifications can occur
+ − 1638
deep inside the regular expression). The main simplification function is then
+ − 1639
+ − 1640
\begin{center}
+ − 1641
\begin{tabular}{lcl}
+ − 1642
@{term "simp (ALT r\<^sub>1 r\<^sub>2)"} & $\dn$ & @{term "simp_ALT (simp r\<^sub>1) (simp r\<^sub>2)"}\\
+ − 1643
@{term "simp (SEQ r\<^sub>1 r\<^sub>2)"} & $\dn$ & @{term "simp_SEQ (simp r\<^sub>1) (simp r\<^sub>2)"}\\
+ − 1644
@{term "simp r"} & $\dn$ & @{term "(r, id)"}\\
+ − 1645
\end{tabular}
+ − 1646
\end{center}
+ − 1647
+ − 1648
\noindent where @{term "id"} stands for the identity function. The
+ − 1649
function @{const simp} returns a simplified regular expression and a corresponding
+ − 1650
rectification function. Note that we do not simplify under stars: this
+ − 1651
seems to slow down the algorithm, rather than speed it up. The optimised
+ − 1652
lexer is then given by the clauses:
+ − 1653
+ − 1654
\begin{center}
+ − 1655
\begin{tabular}{lcl}
+ − 1656
@{thm (lhs) slexer.simps(1)} & $\dn$ & @{thm (rhs) slexer.simps(1)}\\
+ − 1657
@{thm (lhs) slexer.simps(2)} & $\dn$ &
+ − 1658
\<open>let (r\<^sub>s, f\<^sub>r) = simp (r \<close>$\backslash$\<open> c) in\<close>\\
+ − 1659
& & \<open>case\<close> @{term "slexer r\<^sub>s s"} \<open>of\<close>\\
+ − 1660
& & \phantom{$|$} @{term "None"} \<open>\<Rightarrow>\<close> @{term None}\\
+ − 1661
& & $|$ @{term "Some v"} \<open>\<Rightarrow>\<close> \<open>Some (inj r c (f\<^sub>r v))\<close>
+ − 1662
\end{tabular}
+ − 1663
\end{center}
+ − 1664
+ − 1665
\noindent
+ − 1666
In the second clause we first calculate the derivative @{term "der c r"}
+ − 1667
and then simpli
+ − 1668
+ − 1669
text \<open>
+ − 1670
+ − 1671
Incremental calculation of the value. To simplify the proof we first define the function
+ − 1672
@{const flex} which calculates the ``iterated'' injection function. With this we can
+ − 1673
rewrite the lexer as
+ − 1674
+ − 1675
\begin{center}
+ − 1676
@{thm lexer_flex}
+ − 1677
\end{center}
+ − 1678
+ − 1679
\begin{center}
+ − 1680
\begin{tabular}{lcl}
+ − 1681
@{thm (lhs) code.simps(1)} & $\dn$ & @{thm (rhs) code.simps(1)}\\
+ − 1682
@{thm (lhs) code.simps(2)} & $\dn$ & @{thm (rhs) code.simps(2)}\\
+ − 1683
@{thm (lhs) code.simps(3)} & $\dn$ & @{thm (rhs) code.simps(3)}\\
+ − 1684
@{thm (lhs) code.simps(4)} & $\dn$ & @{thm (rhs) code.simps(4)}\\
+ − 1685
@{thm (lhs) code.simps(5)[of "v\<^sub>1" "v\<^sub>2"]} & $\dn$ & @{thm (rhs) code.simps(5)[of "v\<^sub>1" "v\<^sub>2"]}\\
+ − 1686
@{thm (lhs) code.simps(6)} & $\dn$ & @{thm (rhs) code.simps(6)}\\
+ − 1687
@{thm (lhs) code.simps(7)} & $\dn$ & @{thm (rhs) code.simps(7)}
+ − 1688
\end{tabular}
+ − 1689
\end{center}
+ − 1690
+ − 1691
\begin{center}
+ − 1692
\begin{tabular}{lcl}
+ − 1693
@{term areg} & $::=$ & @{term "AZERO"}\\
+ − 1694
& $\mid$ & @{term "AONE bs"}\\
+ − 1695
& $\mid$ & @{term "ACHAR bs c"}\\
+ − 1696
& $\mid$ & @{term "AALT bs r1 r2"}\\
+ − 1697
& $\mid$ & @{term "ASEQ bs r\<^sub>1 r\<^sub>2"}\\
+ − 1698
& $\mid$ & @{term "ASTAR bs r"}
+ − 1699
\end{tabular}
+ − 1700
\end{center}
+ − 1701
+ − 1702
+ − 1703
\begin{center}
+ − 1704
\begin{tabular}{lcl}
+ − 1705
@{thm (lhs) intern.simps(1)} & $\dn$ & @{thm (rhs) intern.simps(1)}\\
+ − 1706
@{thm (lhs) intern.simps(2)} & $\dn$ & @{thm (rhs) intern.simps(2)}\\
+ − 1707
@{thm (lhs) intern.simps(3)} & $\dn$ & @{thm (rhs) intern.simps(3)}\\
+ − 1708
@{thm (lhs) intern.simps(4)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) intern.simps(4)[of "r\<^sub>1" "r\<^sub>2"]}\\
+ − 1709
@{thm (lhs) intern.simps(5)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) intern.simps(5)[of "r\<^sub>1" "r\<^sub>2"]}\\
+ − 1710
@{thm (lhs) intern.simps(6)} & $\dn$ & @{thm (rhs) intern.simps(6)}\\
+ − 1711
\end{tabular}
+ − 1712
\end{center}
+ − 1713
+ − 1714
\begin{center}
+ − 1715
\begin{tabular}{lcl}
+ − 1716
@{thm (lhs) erase.simps(1)} & $\dn$ & @{thm (rhs) erase.simps(1)}\\
+ − 1717
@{thm (lhs) erase.simps(2)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(2)[of bs]}\\
+ − 1718
@{thm (lhs) erase.simps(3)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(3)[of bs]}\\
+ − 1719
@{thm (lhs) erase.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) erase.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 1720
@{thm (lhs) erase.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) erase.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 1721
@{thm (lhs) erase.simps(6)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(6)[of bs]}\\
+ − 1722
\end{tabular}
+ − 1723
\end{center}
+ − 1724
+ − 1725
Some simple facts about erase
+ − 1726
+ − 1727
\begin{lemma}\mbox{}\\
+ − 1728
@{thm erase_bder}\\
+ − 1729
@{thm erase_intern}
+ − 1730
\end{lemma}
+ − 1731
+ − 1732
\begin{center}
+ − 1733
\begin{tabular}{lcl}
+ − 1734
@{thm (lhs) bnullable.simps(1)} & $\dn$ & @{thm (rhs) bnullable.simps(1)}\\
+ − 1735
@{thm (lhs) bnullable.simps(2)} & $\dn$ & @{thm (rhs) bnullable.simps(2)}\\
+ − 1736
@{thm (lhs) bnullable.simps(3)} & $\dn$ & @{thm (rhs) bnullable.simps(3)}\\
+ − 1737
@{thm (lhs) bnullable.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bnullable.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 1738
@{thm (lhs) bnullable.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bnullable.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 1739
@{thm (lhs) bnullable.simps(6)} & $\dn$ & @{thm (rhs) bnullable.simps(6)}\medskip\\
+ − 1740
+ − 1741
% \end{tabular}
+ − 1742
% \end{center}
+ − 1743
+ − 1744
% \begin{center}
+ − 1745
% \begin{tabular}{lcl}
+ − 1746
+ − 1747
@{thm (lhs) bder.simps(1)} & $\dn$ & @{thm (rhs) bder.simps(1)}\\
+ − 1748
@{thm (lhs) bder.simps(2)} & $\dn$ & @{thm (rhs) bder.simps(2)}\\
+ − 1749
@{thm (lhs) bder.simps(3)} & $\dn$ & @{thm (rhs) bder.simps(3)}\\
+ − 1750
@{thm (lhs) bder.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bder.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 1751
@{thm (lhs) bder.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bder.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 1752
@{thm (lhs) bder.simps(6)} & $\dn$ & @{thm (rhs) bder.simps(6)}
+ − 1753
\end{tabular}
+ − 1754
\end{center}
+ − 1755
+ − 1756
+ − 1757
\begin{center}
+ − 1758
\begin{tabular}{lcl}
+ − 1759
@{thm (lhs) bmkeps.simps(1)} & $\dn$ & @{thm (rhs) bmkeps.simps(1)}\\
+ − 1760
@{thm (lhs) bmkeps.simps(2)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bmkeps.simps(2)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 1761
@{thm (lhs) bmkeps.simps(3)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bmkeps.simps(3)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 1762
@{thm (lhs) bmkeps.simps(4)} & $\dn$ & @{thm (rhs) bmkeps.simps(4)}\medskip\\
+ − 1763
\end{tabular}
+ − 1764
\end{center}
+ − 1765
+ − 1766
+ − 1767
@{thm [mode=IfThen] bder_retrieve}
+ − 1768
+ − 1769
By induction on \<open>r\<close>
+ − 1770
+ − 1771
\begin{theorem}[Main Lemma]\mbox{}\\
+ − 1772
@{thm [mode=IfThen] MAIN_decode}
+ − 1773
\end{theorem}
+ − 1774
+ − 1775
\noindent
+ − 1776
Definition of the bitcoded lexer
+ − 1777
+ − 1778
@{thm blexer_def}
+ − 1779
+ − 1780
+ − 1781
\begin{theorem}
+ − 1782
@{thm blexer_correctness}
+ − 1783
\end{theorem}
+ − 1784
+ − 1785
\<close>
+ − 1786
+ − 1787
section \<open>Optimisations\<close>
+ − 1788
+ − 1789
text \<open>
+ − 1790
+ − 1791
Derivatives as calculated by \Brz's method are usually more complex
+ − 1792
regular expressions than the initial one; the result is that the
+ − 1793
derivative-based matching and lexing algorithms are often abysmally slow.
+ − 1794
However, various optimisations are possible, such as the simplifications
+ − 1795
of @{term "ALT ZERO r"}, @{term "ALT r ZERO"}, @{term "SEQ ONE r"} and
+ − 1796
@{term "SEQ r ONE"} to @{term r}. These simplifications can speed up the
+ − 1797
algorithms considerably, as noted in \cite{Sulzmann2014}. One of the
+ − 1798
advantages of having a simple specification and correctness proof is that
+ − 1799
the latter can be refined to prove the correctness of such simplification
+ − 1800
steps. While the simplification of regular expressions according to
+ − 1801
rules like
+ − 1802
+ − 1803
\begin{equation}\label{Simpl}
+ − 1804
\begin{array}{lcllcllcllcl}
+ − 1805
@{term "ALT ZERO r"} & \<open>\<Rightarrow>\<close> & @{term r} \hspace{8mm}%\\
+ − 1806
@{term "ALT r ZERO"} & \<open>\<Rightarrow>\<close> & @{term r} \hspace{8mm}%\\
+ − 1807
@{term "SEQ ONE r"} & \<open>\<Rightarrow>\<close> & @{term r} \hspace{8mm}%\\
+ − 1808
@{term "SEQ r ONE"} & \<open>\<Rightarrow>\<close> & @{term r}
+ − 1809
\end{array}
+ − 1810
\end{equation}
+ − 1811
+ − 1812
\noindent is well understood, there is an obstacle with the POSIX value
+ − 1813
calculation algorithm by Sulzmann and Lu: if we build a derivative regular
+ − 1814
expression and then simplify it, we will calculate a POSIX value for this
+ − 1815
simplified derivative regular expression, \emph{not} for the original (unsimplified)
+ − 1816
derivative regular expression. Sulzmann and Lu \cite{Sulzmann2014} overcome this obstacle by
+ − 1817
not just calculating a simplified regular expression, but also calculating
+ − 1818
a \emph{rectification function} that ``repairs'' the incorrect value.
+ − 1819
+ − 1820
The rectification functions can be (slightly clumsily) implemented in
+ − 1821
Isabelle/HOL as follows using some auxiliary functions:
+ − 1822
+ − 1823
\begin{center}
+ − 1824
\begin{tabular}{lcl}
+ − 1825
@{thm (lhs) F_RIGHT.simps(1)} & $\dn$ & \<open>Right (f v)\<close>\\
+ − 1826
@{thm (lhs) F_LEFT.simps(1)} & $\dn$ & \<open>Left (f v)\<close>\\
+ − 1827
+ − 1828
@{thm (lhs) F_ALT.simps(1)} & $\dn$ & \<open>Right (f\<^sub>2 v)\<close>\\
+ − 1829
@{thm (lhs) F_ALT.simps(2)} & $\dn$ & \<open>Left (f\<^sub>1 v)\<close>\\
+ − 1830
+ − 1831
@{thm (lhs) F_SEQ1.simps(1)} & $\dn$ & \<open>Seq (f\<^sub>1 ()) (f\<^sub>2 v)\<close>\\
+ − 1832
@{thm (lhs) F_SEQ2.simps(1)} & $\dn$ & \<open>Seq (f\<^sub>1 v) (f\<^sub>2 ())\<close>\\
+ − 1833
@{thm (lhs) F_SEQ.simps(1)} & $\dn$ & \<open>Seq (f\<^sub>1 v\<^sub>1) (f\<^sub>2 v\<^sub>2)\<close>\medskip\\
+ − 1834
%\end{tabular}
+ − 1835
%
+ − 1836
%\begin{tabular}{lcl}
+ − 1837
@{term "simp_ALT (ZERO, DUMMY) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>2, F_RIGHT f\<^sub>2)"}\\
+ − 1838
@{term "simp_ALT (r\<^sub>1, f\<^sub>1) (ZERO, DUMMY)"} & $\dn$ & @{term "(r\<^sub>1, F_LEFT f\<^sub>1)"}\\
+ − 1839
@{term "simp_ALT (r\<^sub>1, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(ALT r\<^sub>1 r\<^sub>2, F_ALT f\<^sub>1 f\<^sub>2)"}\\
+ − 1840
@{term "simp_SEQ (ONE, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>2, F_SEQ1 f\<^sub>1 f\<^sub>2)"}\\
+ − 1841
@{term "simp_SEQ (r\<^sub>1, f\<^sub>1) (ONE, f\<^sub>2)"} & $\dn$ & @{term "(r\<^sub>1, F_SEQ2 f\<^sub>1 f\<^sub>2)"}\\
+ − 1842
@{term "simp_SEQ (r\<^sub>1, f\<^sub>1) (r\<^sub>2, f\<^sub>2)"} & $\dn$ & @{term "(SEQ r\<^sub>1 r\<^sub>2, F_SEQ f\<^sub>1 f\<^sub>2)"}\\
+ − 1843
\end{tabular}
+ − 1844
\end{center}
+ − 1845
+ − 1846
\noindent
+ − 1847
The functions \<open>simp\<^bsub>Alt\<^esub>\<close> and \<open>simp\<^bsub>Seq\<^esub>\<close> encode the simplification rules
+ − 1848
in \eqref{Simpl} and compose the rectification functions (simplifications can occur
+ − 1849
deep inside the regular expression). The main simplification function is then
+ − 1850
+ − 1851
\begin{center}
+ − 1852
\begin{tabular}{lcl}
+ − 1853
@{term "simp (ALT r\<^sub>1 r\<^sub>2)"} & $\dn$ & @{term "simp_ALT (simp r\<^sub>1) (simp r\<^sub>2)"}\\
+ − 1854
@{term "simp (SEQ r\<^sub>1 r\<^sub>2)"} & $\dn$ & @{term "simp_SEQ (simp r\<^sub>1) (simp r\<^sub>2)"}\\
+ − 1855
@{term "simp r"} & $\dn$ & @{term "(r, id)"}\\
+ − 1856
\end{tabular}
+ − 1857
\end{center}
+ − 1858
+ − 1859
\noindent where @{term "id"} stands for the identity function. The
+ − 1860
function @{const simp} returns a simplified regular expression and a corresponding
+ − 1861
rectification function. Note that we do not simplify under stars: this
+ − 1862
seems to slow down the algorithm, rather than speed it up. The optimised
+ − 1863
lexer is then given by the clauses:
+ − 1864
+ − 1865
\begin{center}
+ − 1866
\begin{tabular}{lcl}
+ − 1867
@{thm (lhs) slexer.simps(1)} & $\dn$ & @{thm (rhs) slexer.simps(1)}\\
+ − 1868
@{thm (lhs) slexer.simps(2)} & $\dn$ &
+ − 1869
\<open>let (r\<^sub>s, f\<^sub>r) = simp (r \<close>$\backslash$\<open> c) in\<close>\\
+ − 1870
& & \<open>case\<close> @{term "slexer r\<^sub>s s"} \<open>of\<close>\\
+ − 1871
& & \phantom{$|$} @{term "None"} \<open>\<Rightarrow>\<close> @{term None}\\
+ − 1872
& & $|$ @{term "Some v"} \<open>\<Rightarrow>\<close> \<open>Some (inj r c (f\<^sub>r v))\<close>
+ − 1873
\end{tabular}
+ − 1874
\end{center}
+ − 1875
+ − 1876
\noindent
+ − 1877
In the second clause we first calculate the derivative @{term "der c r"}
+ − 1878
and then simplify the result. This gives us a simplified derivative
+ − 1879
\<open>r\<^sub>s\<close> and a rectification function \<open>f\<^sub>r\<close>. The lexer
+ − 1880
is then recursively called with the simplified derivative, but before
+ − 1881
we inject the character @{term c} into the value @{term v}, we need to rectify
+ − 1882
@{term v} (that is construct @{term "f\<^sub>r v"}). Before we can establish the correctness
+ − 1883
of @{term "slexer"}, we need to show that simplification preserves the language
+ − 1884
and simplification preserves our POSIX relation once the value is rectified
+ − 1885
(recall @{const "simp"} generates a (regular expression, rectification function) pair):
+ − 1886
+ − 1887
\begin{lemma}\mbox{}\smallskip\\\label{slexeraux}
+ − 1888
\begin{tabular}{ll}
+ − 1889
(1) & @{thm L_fst_simp[symmetric]}\\
+ − 1890
(2) & @{thm[mode=IfThen] Posix_simp}
+ − 1891
\end{tabular}
+ − 1892
\end{lemma}
+ − 1893
+ − 1894
\begin{proof} Both are by induction on \<open>r\<close>. There is no
+ − 1895
interesting case for the first statement. For the second statement,
+ − 1896
of interest are the @{term "r = ALT r\<^sub>1 r\<^sub>2"} and @{term "r = SEQ r\<^sub>1
+ − 1897
r\<^sub>2"} cases. In each case we have to analyse four subcases whether
+ − 1898
@{term "fst (simp r\<^sub>1)"} and @{term "fst (simp r\<^sub>2)"} equals @{const
+ − 1899
ZERO} (respectively @{const ONE}). For example for @{term "r = ALT
+ − 1900
r\<^sub>1 r\<^sub>2"}, consider the subcase @{term "fst (simp r\<^sub>1) = ZERO"} and
+ − 1901
@{term "fst (simp r\<^sub>2) \<noteq> ZERO"}. By assumption we know @{term "s \<in>
+ − 1902
fst (simp (ALT r\<^sub>1 r\<^sub>2)) \<rightarrow> v"}. From this we can infer @{term "s \<in> fst (simp r\<^sub>2) \<rightarrow> v"}
+ − 1903
and by IH also (*) @{term "s \<in> r\<^sub>2 \<rightarrow> (snd (simp r\<^sub>2) v)"}. Given @{term "fst (simp r\<^sub>1) = ZERO"}
+ − 1904
we know @{term "L (fst (simp r\<^sub>1)) = {}"}. By the first statement
+ − 1905
@{term "L r\<^sub>1"} is the empty set, meaning (**) @{term "s \<notin> L r\<^sub>1"}.
+ − 1906
Taking (*) and (**) together gives by the \mbox{\<open>P+R\<close>}-rule
+ − 1907
@{term "s \<in> ALT r\<^sub>1 r\<^sub>2 \<rightarrow> Right (snd (simp r\<^sub>2) v)"}. In turn this
+ − 1908
gives @{term "s \<in> ALT r\<^sub>1 r\<^sub>2 \<rightarrow> snd (simp (ALT r\<^sub>1 r\<^sub>2)) v"} as we need to show.
+ − 1909
The other cases are similar.\qed
+ − 1910
\end{proof}
+ − 1911
+ − 1912
\noindent We can now prove relatively straightforwardly that the
+ − 1913
optimised lexer produces the expected result:
+ − 1914
+ − 1915
\begin{theorem}
+ − 1916
@{thm slexer_correctness}
+ − 1917
\end{theorem}
+ − 1918
+ − 1919
\begin{proof} By induction on @{term s} generalising over @{term
+ − 1920
r}. The case @{term "[]"} is trivial. For the cons-case suppose the
+ − 1921
string is of the form @{term "c # s"}. By induction hypothesis we
+ − 1922
know @{term "slexer r s = lexer r s"} holds for all @{term r} (in
+ − 1923
particular for @{term "r"} being the derivative @{term "der c
+ − 1924
r"}). Let @{term "r\<^sub>s"} be the simplified derivative regular expression, that is @{term
+ − 1925
"fst (simp (der c r))"}, and @{term "f\<^sub>r"} be the rectification
+ − 1926
function, that is @{term "snd (simp (der c r))"}. We distinguish the cases
+ − 1927
whether (*) @{term "s \<in> L (der c r)"} or not. In the first case we
+ − 1928
have by Theorem~\ref{lexercorrect}(2) a value @{term "v"} so that @{term
+ − 1929
"lexer (der c r) s = Some v"} and @{term "s \<in> der c r \<rightarrow> v"} hold.
+ − 1930
By Lemma~\ref{slexeraux}(1) we can also infer from~(*) that @{term "s
+ − 1931
\<in> L r\<^sub>s"} holds. Hence we know by Theorem~\ref{lexercorrect}(2) that
+ − 1932
there exists a @{term "v'"} with @{term "lexer r\<^sub>s s = Some v'"} and
+ − 1933
@{term "s \<in> r\<^sub>s \<rightarrow> v'"}. From the latter we know by
+ − 1934
Lemma~\ref{slexeraux}(2) that @{term "s \<in> der c r \<rightarrow> (f\<^sub>r v')"} holds.
+ − 1935
By the uniqueness of the POSIX relation (Theorem~\ref{posixdeterm}) we
+ − 1936
can infer that @{term v} is equal to @{term "f\<^sub>r v'"}---that is the
+ − 1937
rectification function applied to @{term "v'"}
+ − 1938
produces the original @{term "v"}. Now the case follows by the
+ − 1939
definitions of @{const lexer} and @{const slexer}.
+ − 1940
+ − 1941
In the second case where @{term "s \<notin> L (der c r)"} we have that
+ − 1942
@{term "lexer (der c r) s = None"} by Theorem~\ref{lexercorrect}(1). We
+ − 1943
also know by Lemma~\ref{slexeraux}(1) that @{term "s \<notin> L r\<^sub>s"}. Hence
+ − 1944
@{term "lexer r\<^sub>s s = None"} by Theorem~\ref{lexercorrect}(1) and
+ − 1945
by IH then also @{term "slexer r\<^sub>s s = None"}. With this we can
+ − 1946
conclude in this case too.\qed
+ − 1947
+ − 1948
\end{proof}
+ − 1949
+ − 1950
\<close>
+ − 1951
fy the result. This gives us a simplified derivative
+ − 1952
\<open>r\<^sub>s\<close> and a rectification function \<open>f\<^sub>r\<close>. The lexer
+ − 1953
is then recursively called with the simplified derivative, but before
+ − 1954
we inject the character @{term c} into the value @{term v}, we need to rectify
+ − 1955
@{term v} (that is construct @{term "f\<^sub>r v"}). Before we can establish the correctness
+ − 1956
of @{term "slexer"}, we need to show that simplification preserves the language
+ − 1957
and simplification preserves our POSIX relation once the value is rectified
+ − 1958
(recall @{const "simp"} generates a (regular expression, rectification function) pair):
+ − 1959
+ − 1960
\begin{lemma}\mbox{}\smallskip\\\label{slexeraux}
+ − 1961
\begin{tabular}{ll}
+ − 1962
(1) & @{thm L_fst_simp[symmetric]}\\
+ − 1963
(2) & @{thm[mode=IfThen] Posix_simp}
+ − 1964
\end{tabular}
+ − 1965
\end{lemma}
+ − 1966
+ − 1967
\begin{proof} Both are by induction on \<open>r\<close>. There is no
+ − 1968
interesting case for the first statement. For the second statement,
+ − 1969
of interest are the @{term "r = ALT r\<^sub>1 r\<^sub>2"} and @{term "r = SEQ r\<^sub>1
+ − 1970
r\<^sub>2"} cases. In each case we have to analyse four subcases whether
+ − 1971
@{term "fst (simp r\<^sub>1)"} and @{term "fst (simp r\<^sub>2)"} equals @{const
+ − 1972
ZERO} (respectively @{const ONE}). For example for @{term "r = ALT
+ − 1973
r\<^sub>1 r\<^sub>2"}, consider the subcase @{term "fst (simp r\<^sub>1) = ZERO"} and
+ − 1974
@{term "fst (simp r\<^sub>2) \<noteq> ZERO"}. By assumption we know @{term "s \<in>
+ − 1975
fst (simp (ALT r\<^sub>1 r\<^sub>2)) \<rightarrow> v"}. From this we can infer @{term "s \<in> fst (simp r\<^sub>2) \<rightarrow> v"}
+ − 1976
and by IH also (*) @{term "s \<in> r\<^sub>2 \<rightarrow> (snd (simp r\<^sub>2) v)"}. Given @{term "fst (simp r\<^sub>1) = ZERO"}
+ − 1977
we know @{term "L (fst (simp r\<^sub>1)) = {}"}. By the first statement
+ − 1978
@{term "L r\<^sub>1"} is the empty set, meaning (**) @{term "s \<notin> L r\<^sub>1"}.
+ − 1979
Taking (*) and (**) together gives by the \mbox{\<open>P+R\<close>}-rule
+ − 1980
@{term "s \<in> ALT r\<^sub>1 r\<^sub>2 \<rightarrow> Right (snd (simp r\<^sub>2) v)"}. In turn this
+ − 1981
gives @{term "s \<in> ALT r\<^sub>1 r\<^sub>2 \<rightarrow> snd (simp (ALT r\<^sub>1 r\<^sub>2)) v"} as we need to show.
+ − 1982
The other cases are similar.\qed
+ − 1983
\end{proof}
+ − 1984
+ − 1985
\noindent We can now prove relatively straightforwardly that the
+ − 1986
optimised lexer produces the expected result:
+ − 1987
+ − 1988
\begin{theorem}
+ − 1989
@{thm slexer_correctness}
+ − 1990
\end{theorem}
+ − 1991
+ − 1992
\begin{proof} By induction on @{term s} generalising over @{term
+ − 1993
r}. The case @{term "[]"} is trivial. For the cons-case suppose the
+ − 1994
string is of the form @{term "c # s"}. By induction hypothesis we
+ − 1995
know @{term "slexer r s = lexer r s"} holds for all @{term r} (in
+ − 1996
particular for @{term "r"} being the derivative @{term "der c
+ − 1997
r"}). Let @{term "r\<^sub>s"} be the simplified derivative regular expression, that is @{term
+ − 1998
"fst (simp (der c r))"}, and @{term "f\<^sub>r"} be the rectification
+ − 1999
function, that is @{term "snd (simp (der c r))"}. We distinguish the cases
+ − 2000
whether (*) @{term "s \<in> L (der c r)"} or not. In the first case we
+ − 2001
have by Theorem~\ref{lexercorrect}(2) a value @{term "v"} so that @{term
+ − 2002
"lexer (der c r) s = Some v"} and @{term "s \<in> der c r \<rightarrow> v"} hold.
+ − 2003
By Lemma~\ref{slexeraux}(1) we can also infer from~(*) that @{term "s
+ − 2004
\<in> L r\<^sub>s"} holds. Hence we know by Theorem~\ref{lexercorrect}(2) that
+ − 2005
there exists a @{term "v'"} with @{term "lexer r\<^sub>s s = Some v'"} and
+ − 2006
@{term "s \<in> r\<^sub>s \<rightarrow> v'"}. From the latter we know by
+ − 2007
Lemma~\ref{slexeraux}(2) that @{term "s \<in> der c r \<rightarrow> (f\<^sub>r v')"} holds.
+ − 2008
By the uniqueness of the POSIX relation (Theorem~\ref{posixdeterm}) we
+ − 2009
can infer that @{term v} is equal to @{term "f\<^sub>r v'"}---that is the
+ − 2010
rectification function applied to @{term "v'"}
+ − 2011
produces the original @{term "v"}. Now the case follows by the
+ − 2012
definitions of @{const lexer} and @{const slexer}.
+ − 2013
+ − 2014
In the second case where @{term "s \<notin> L (der c r)"} we have that
+ − 2015
@{term "lexer (der c r) s = None"} by Theorem~\ref{lexercorrect}(1). We
+ − 2016
also know by Lemma~\ref{slexeraux}(1) that @{term "s \<notin> L r\<^sub>s"}. Hence
+ − 2017
@{term "lexer r\<^sub>s s = None"} by Theorem~\ref{lexercorrect}(1) and
+ − 2018
by IH then also @{term "slexer r\<^sub>s s = None"}. With this we can
+ − 2019
conclude in this case too.\qed
+ − 2020
+ − 2021
\end{proof}
+ − 2022
+ − 2023
\<close>
+ − 2024
+ − 2025
+ − 2026
section \<open>HERE\<close>
+ − 2027
+ − 2028
text \<open>
+ − 2029
+ − 2030
\begin{lemma}
+ − 2031
@{thm [mode=IfThen] bder_retrieve}
+ − 2032
\end{lemma}
+ − 2033
+ − 2034
\begin{proof}
+ − 2035
By induction on the definition of @{term "erase r"}. The cases for rule 1) and 2) are
+ − 2036
straightforward as @{term "der c ZERO"} and @{term "der c ONE"} are both equal to
+ − 2037
@{term ZERO}. This means @{term "\<Turnstile> v : ZERO"} cannot hold. Similarly in case of rule 3)
+ − 2038
where @{term r} is of the form @{term "ACHAR d"} with @{term "c = d"}. Then by assumption
+ − 2039
we know @{term "\<Turnstile> v : ONE"}, which implies @{term "v = Void"}. The equation follows by
+ − 2040
simplification of left- and right-hand side. In case @{term "c \<noteq> d"} we have again
+ − 2041
@{term "\<Turnstile> v : ZERO"}, which cannot hold.
+ − 2042
+ − 2043
For rule 4a) we have again @{term "\<Turnstile> v : ZERO"}. The property holds by IH for rule 4b).
+ − 2044
The induction hypothesis is
+ − 2045
\[
+ − 2046
@{term "retrieve (bder c r) v = retrieve r (injval (erase r) c v)"}
+ − 2047
\]
+ − 2048
which is what left- and right-hand side simplify to. The slightly more interesting case
+ − 2049
is for 4c). By assumption we have
+ − 2050
@{term "\<Turnstile> v : ALT (der c (erase r\<^sub>1)) (der c (erase (AALTs bs (r\<^sub>2 # rs))))"}. This means we
+ − 2051
have either (*) @{term "\<Turnstile> v1 : der c (erase r\<^sub>1)"} with @{term "v = Left v1"} or
+ − 2052
(**) @{term "\<Turnstile> v2 : der c (erase (AALTs bs (r\<^sub>2 # rs)))"} with @{term "v = Right v2"}.
+ − 2053
The former case is straightforward by simplification. The second case is \ldots TBD.
+ − 2054
+ − 2055
Rule 5) TBD.
+ − 2056
+ − 2057
Finally for rule 6) the reasoning is as follows: By assumption we have
+ − 2058
@{term "\<Turnstile> v : SEQ (der c (erase r)) (STAR (erase r))"}. This means we also have
+ − 2059
@{term "v = Seq v1 v2"}, @{term "\<Turnstile> v1 : der c (erase r)"} and @{term "v2 = Stars vs"}.
+ − 2060
We want to prove
+ − 2061
\begin{align}
+ − 2062
& @{term "retrieve (ASEQ bs (fuse [Z] (bder c r)) (ASTAR [] r)) v"}\\
+ − 2063
&= @{term "retrieve (ASTAR bs r) (injval (STAR (erase r)) c v)"}
+ − 2064
\end{align}
+ − 2065
The right-hand side @{term inj}-expression is equal to
+ − 2066
@{term "Stars (injval (erase r) c v1 # vs)"}, which means the @{term retrieve}-expression
+ − 2067
simplifies to
+ − 2068
\[
+ − 2069
@{term "bs @ [Z] @ retrieve r (injval (erase r) c v1) @ retrieve (ASTAR [] r) (Stars vs)"}
+ − 2070
\]
+ − 2071
The left-hand side (3) above simplifies to
+ − 2072
\[
+ − 2073
@{term "bs @ retrieve (fuse [Z] (bder c r)) v1 @ retrieve (ASTAR [] r) (Stars vs)"}
+ − 2074
\]
+ − 2075
We can move out the @{term "fuse [Z]"} and then use the IH to show that left-hand side
+ − 2076
and right-hand side are equal. This completes the proof.
+ − 2077
\end{proof}
+ − 2078
+ − 2079
+ − 2080
+ − 2081
\bibliographystyle{plain}
+ − 2082
\bibliography{root}
+ − 2083
+ − 2084
\<close>
+ − 2085
(*<*)
+ − 2086
end
+ − 2087
(*>*)
+ − 2088
378
+ − 2089
+ − 2090
367
+ − 2091
(*
+ − 2092
+ − 2093
\begin{center}
378
+ − 2094
\begin{tabular}
+ − 2095
@{thm[mode=Rule] aggressive.intros(1)[of "bs" "bs1" "rs" "r"]}
+ − 2096
end{tabular}
+ − 2097
\end{center}
+ − 2098
+ − 2099
+ − 2100
+ − 2101
\begin{center}
367
+ − 2102
\begin{tabular}{lcl}
+ − 2103
@{thm (lhs) code.simps(1)} & $\dn$ & @{thm (rhs) code.simps(1)}\\
+ − 2104
@{thm (lhs) code.simps(2)} & $\dn$ & @{thm (rhs) code.simps(2)}\\
+ − 2105
@{thm (lhs) code.simps(3)} & $\dn$ & @{thm (rhs) code.simps(3)}\\
+ − 2106
@{thm (lhs) code.simps(4)} & $\dn$ & @{thm (rhs) code.simps(4)}\\
+ − 2107
@{thm (lhs) code.simps(5)[of "v\<^sub>1" "v\<^sub>2"]} & $\dn$ & @{thm (rhs) code.simps(5)[of "v\<^sub>1" "v\<^sub>2"]}\\
+ − 2108
@{thm (lhs) code.simps(6)} & $\dn$ & @{thm (rhs) code.simps(6)}\\
+ − 2109
@{thm (lhs) code.simps(7)} & $\dn$ & @{thm (rhs) code.simps(7)}
+ − 2110
\end{tabular}
+ − 2111
\end{center}
+ − 2112
+ − 2113
\begin{center}
+ − 2114
\begin{tabular}{lcl}
+ − 2115
@{term areg} & $::=$ & @{term "AZERO"}\\
+ − 2116
& $\mid$ & @{term "AONE bs"}\\
+ − 2117
& $\mid$ & @{term "ACHAR bs c"}\\
+ − 2118
& $\mid$ & @{term "AALT bs r1 r2"}\\
+ − 2119
& $\mid$ & @{term "ASEQ bs r\<^sub>1 r\<^sub>2"}\\
+ − 2120
& $\mid$ & @{term "ASTAR bs r"}
+ − 2121
\end{tabular}
+ − 2122
\end{center}
+ − 2123
+ − 2124
+ − 2125
\begin{center}
+ − 2126
\begin{tabular}{lcl}
+ − 2127
@{thm (lhs) intern.simps(1)} & $\dn$ & @{thm (rhs) intern.simps(1)}\\
+ − 2128
@{thm (lhs) intern.simps(2)} & $\dn$ & @{thm (rhs) intern.simps(2)}\\
+ − 2129
@{thm (lhs) intern.simps(3)} & $\dn$ & @{thm (rhs) intern.simps(3)}\\
+ − 2130
@{thm (lhs) intern.simps(4)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) intern.simps(4)[of "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2131
@{thm (lhs) intern.simps(5)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) intern.simps(5)[of "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2132
@{thm (lhs) intern.simps(6)} & $\dn$ & @{thm (rhs) intern.simps(6)}\\
+ − 2133
\end{tabular}
+ − 2134
\end{center}
+ − 2135
+ − 2136
\begin{center}
+ − 2137
\begin{tabular}{lcl}
+ − 2138
@{thm (lhs) erase.simps(1)} & $\dn$ & @{thm (rhs) erase.simps(1)}\\
+ − 2139
@{thm (lhs) erase.simps(2)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(2)[of bs]}\\
+ − 2140
@{thm (lhs) erase.simps(3)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(3)[of bs]}\\
+ − 2141
@{thm (lhs) erase.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) erase.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2142
@{thm (lhs) erase.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) erase.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2143
@{thm (lhs) erase.simps(6)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(6)[of bs]}\\
+ − 2144
\end{tabular}
+ − 2145
\end{center}
+ − 2146
+ − 2147
Some simple facts about erase
+ − 2148
+ − 2149
\begin{lemma}\mbox{}\\
+ − 2150
@{thm erase_bder}\\
+ − 2151
@{thm erase_intern}
+ − 2152
\end{lemma}
+ − 2153
+ − 2154
\begin{center}
+ − 2155
\begin{tabular}{lcl}
+ − 2156
@{thm (lhs) bnullable.simps(1)} & $\dn$ & @{thm (rhs) bnullable.simps(1)}\\
+ − 2157
@{thm (lhs) bnullable.simps(2)} & $\dn$ & @{thm (rhs) bnullable.simps(2)}\\
+ − 2158
@{thm (lhs) bnullable.simps(3)} & $\dn$ & @{thm (rhs) bnullable.simps(3)}\\
+ − 2159
@{thm (lhs) bnullable.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bnullable.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2160
@{thm (lhs) bnullable.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bnullable.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2161
@{thm (lhs) bnullable.simps(6)} & $\dn$ & @{thm (rhs) bnullable.simps(6)}\medskip\\
+ − 2162
+ − 2163
% \end{tabular}
+ − 2164
% \end{center}
+ − 2165
+ − 2166
% \begin{center}
+ − 2167
% \begin{tabular}{lcl}
+ − 2168
+ − 2169
@{thm (lhs) bder.simps(1)} & $\dn$ & @{thm (rhs) bder.simps(1)}\\
+ − 2170
@{thm (lhs) bder.simps(2)} & $\dn$ & @{thm (rhs) bder.simps(2)}\\
+ − 2171
@{thm (lhs) bder.simps(3)} & $\dn$ & @{thm (rhs) bder.simps(3)}\\
+ − 2172
@{thm (lhs) bder.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bder.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2173
@{thm (lhs) bder.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bder.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2174
@{thm (lhs) bder.simps(6)} & $\dn$ & @{thm (rhs) bder.simps(6)}
+ − 2175
\end{tabular}
+ − 2176
\end{center}
+ − 2177
+ − 2178
+ − 2179
\begin{center}
+ − 2180
\begin{tabular}{lcl}
+ − 2181
@{thm (lhs) bmkeps.simps(1)} & $\dn$ & @{thm (rhs) bmkeps.simps(1)}\\
+ − 2182
@{thm (lhs) bmkeps.simps(2)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bmkeps.simps(2)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2183
@{thm (lhs) bmkeps.simps(3)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bmkeps.simps(3)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2184
@{thm (lhs) bmkeps.simps(4)} & $\dn$ & @{thm (rhs) bmkeps.simps(4)}\medskip\\
+ − 2185
\end{tabular}
+ − 2186
\end{center}
+ − 2187
+ − 2188
+ − 2189
@{thm [mode=IfThen] bder_retrieve}
+ − 2190
+ − 2191
By induction on \<open>r\<close>
+ − 2192
+ − 2193
\begin{theorem}[Main Lemma]\mbox{}\\
+ − 2194
@{thm [mode=IfThen] MAIN_decode}
+ − 2195
\end{theorem}
+ − 2196
+ − 2197
\noindent
+ − 2198
Definition of the bitcoded lexer
+ − 2199
+ − 2200
@{thm blexer_def}
+ − 2201
+ − 2202
+ − 2203
\begin{theorem}
+ − 2204
@{thm blexer_correctness}
+ − 2205
\end{theorem}
+ − 2206
+ − 2207
+ − 2208
+ − 2209
+ − 2210
+ − 2211
\begin{center}
+ − 2212
\begin{tabular}{lcl}
+ − 2213
@{thm (lhs) code.simps(1)} & $\dn$ & @{thm (rhs) code.simps(1)}\\
+ − 2214
@{thm (lhs) code.simps(2)} & $\dn$ & @{thm (rhs) code.simps(2)}\\
+ − 2215
@{thm (lhs) code.simps(3)} & $\dn$ & @{thm (rhs) code.simps(3)}\\
+ − 2216
@{thm (lhs) code.simps(4)} & $\dn$ & @{thm (rhs) code.simps(4)}\\
+ − 2217
@{thm (lhs) code.simps(5)[of "v\<^sub>1" "v\<^sub>2"]} & $\dn$ & @{thm (rhs) code.simps(5)[of "v\<^sub>1" "v\<^sub>2"]}\\
+ − 2218
@{thm (lhs) code.simps(6)} & $\dn$ & @{thm (rhs) code.simps(6)}\\
+ − 2219
@{thm (lhs) code.simps(7)} & $\dn$ & @{thm (rhs) code.simps(7)}
+ − 2220
\end{tabular}
+ − 2221
\end{center}
+ − 2222
+ − 2223
\begin{center}
+ − 2224
\begin{tabular}{lcl}
+ − 2225
@{term areg} & $::=$ & @{term "AZERO"}\\
+ − 2226
& $\mid$ & @{term "AONE bs"}\\
+ − 2227
& $\mid$ & @{term "ACHAR bs c"}\\
+ − 2228
& $\mid$ & @{term "AALT bs r1 r2"}\\
+ − 2229
& $\mid$ & @{term "ASEQ bs r\<^sub>1 r\<^sub>2"}\\
+ − 2230
& $\mid$ & @{term "ASTAR bs r"}
+ − 2231
\end{tabular}
+ − 2232
\end{center}
+ − 2233
+ − 2234
+ − 2235
\begin{center}
+ − 2236
\begin{tabular}{lcl}
+ − 2237
@{thm (lhs) intern.simps(1)} & $\dn$ & @{thm (rhs) intern.simps(1)}\\
+ − 2238
@{thm (lhs) intern.simps(2)} & $\dn$ & @{thm (rhs) intern.simps(2)}\\
+ − 2239
@{thm (lhs) intern.simps(3)} & $\dn$ & @{thm (rhs) intern.simps(3)}\\
+ − 2240
@{thm (lhs) intern.simps(4)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) intern.simps(4)[of "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2241
@{thm (lhs) intern.simps(5)[of "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) intern.simps(5)[of "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2242
@{thm (lhs) intern.simps(6)} & $\dn$ & @{thm (rhs) intern.simps(6)}\\
+ − 2243
\end{tabular}
+ − 2244
\end{center}
+ − 2245
+ − 2246
\begin{center}
+ − 2247
\begin{tabular}{lcl}
+ − 2248
@{thm (lhs) erase.simps(1)} & $\dn$ & @{thm (rhs) erase.simps(1)}\\
+ − 2249
@{thm (lhs) erase.simps(2)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(2)[of bs]}\\
+ − 2250
@{thm (lhs) erase.simps(3)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(3)[of bs]}\\
+ − 2251
@{thm (lhs) erase.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) erase.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2252
@{thm (lhs) erase.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) erase.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2253
@{thm (lhs) erase.simps(6)[of bs]} & $\dn$ & @{thm (rhs) erase.simps(6)[of bs]}\\
+ − 2254
\end{tabular}
+ − 2255
\end{center}
+ − 2256
+ − 2257
Some simple facts about erase
+ − 2258
+ − 2259
\begin{lemma}\mbox{}\\
+ − 2260
@{thm erase_bder}\\
+ − 2261
@{thm erase_intern}
+ − 2262
\end{lemma}
+ − 2263
+ − 2264
\begin{center}
+ − 2265
\begin{tabular}{lcl}
+ − 2266
@{thm (lhs) bnullable.simps(1)} & $\dn$ & @{thm (rhs) bnullable.simps(1)}\\
+ − 2267
@{thm (lhs) bnullable.simps(2)} & $\dn$ & @{thm (rhs) bnullable.simps(2)}\\
+ − 2268
@{thm (lhs) bnullable.simps(3)} & $\dn$ & @{thm (rhs) bnullable.simps(3)}\\
+ − 2269
@{thm (lhs) bnullable.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bnullable.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2270
@{thm (lhs) bnullable.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bnullable.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2271
@{thm (lhs) bnullable.simps(6)} & $\dn$ & @{thm (rhs) bnullable.simps(6)}\medskip\\
+ − 2272
+ − 2273
% \end{tabular}
+ − 2274
% \end{center}
+ − 2275
+ − 2276
% \begin{center}
+ − 2277
% \begin{tabular}{lcl}
+ − 2278
+ − 2279
@{thm (lhs) bder.simps(1)} & $\dn$ & @{thm (rhs) bder.simps(1)}\\
+ − 2280
@{thm (lhs) bder.simps(2)} & $\dn$ & @{thm (rhs) bder.simps(2)}\\
+ − 2281
@{thm (lhs) bder.simps(3)} & $\dn$ & @{thm (rhs) bder.simps(3)}\\
+ − 2282
@{thm (lhs) bder.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bder.simps(4)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2283
@{thm (lhs) bder.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bder.simps(5)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2284
@{thm (lhs) bder.simps(6)} & $\dn$ & @{thm (rhs) bder.simps(6)}
+ − 2285
\end{tabular}
+ − 2286
\end{center}
+ − 2287
+ − 2288
+ − 2289
\begin{center}
+ − 2290
\begin{tabular}{lcl}
+ − 2291
@{thm (lhs) bmkeps.simps(1)} & $\dn$ & @{thm (rhs) bmkeps.simps(1)}\\
+ − 2292
@{thm (lhs) bmkeps.simps(2)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bmkeps.simps(2)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2293
@{thm (lhs) bmkeps.simps(3)[of bs "r\<^sub>1" "r\<^sub>2"]} & $\dn$ & @{thm (rhs) bmkeps.simps(3)[of bs "r\<^sub>1" "r\<^sub>2"]}\\
+ − 2294
@{thm (lhs) bmkeps.simps(4)} & $\dn$ & @{thm (rhs) bmkeps.simps(4)}\medskip\\
+ − 2295
\end{tabular}
+ − 2296
\end{center}
+ − 2297
+ − 2298
+ − 2299
@{thm [mode=IfThen] bder_retrieve}
+ − 2300
+ − 2301
By induction on \<open>r\<close>
+ − 2302
+ − 2303
\begin{theorem}[Main Lemma]\mbox{}\\
+ − 2304
@{thm [mode=IfThen] MAIN_decode}
+ − 2305
\end{theorem}
+ − 2306
+ − 2307
\noindent
+ − 2308
Definition of the bitcoded lexer
+ − 2309
+ − 2310
@{thm blexer_def}
+ − 2311
+ − 2312
+ − 2313
\begin{theorem}
+ − 2314
@{thm blexer_correctness}
+ − 2315
\end{theorem}
+ − 2316
+ − 2317
\<close>
+ − 2318
\<close>*)