--- a/ChengsongPhdThesis/ChengsongPhDThesis.tex Thu Mar 24 20:52:34 2022 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,2132 +0,0 @@
-\documentclass[a4paper,UKenglish]{lipics}
-\usepackage{graphic}
-\usepackage{data}
-
-%\usepackage{algorithm}
-\usepackage{amsmath}
-\usepackage[noend]{algpseudocode}
-\usepackage{enumitem}
-\usepackage{nccmath}
-\usepackage{tikz-cd}
-\usetikzlibrary{positioning}
-
-\definecolor{darkblue}{rgb}{0,0,0.6}
-\hypersetup{colorlinks=true,allcolors=darkblue}
-\newcommand{\comment}[1]%
-{{\color{red}$\Rightarrow$}\marginpar{\raggedright\small{\bf\color{red}#1}}}
-
-% \documentclass{article}
-%\usepackage[utf8]{inputenc}
-%\usepackage[english]{babel}
-%\usepackage{listings}
-% \usepackage{amsthm}
-%\usepackage{hyperref}
-% \usepackage[margin=0.5in]{geometry}
-%\usepackage{pmboxdraw}
-
-\title{POSIX Regular Expression Matching and Lexing}
-\author{Chengsong Tan}
-\affil{King's College London\\
-London, UK\\
-\texttt{chengsong.tan@kcl.ac.uk}}
-\authorrunning{Chengsong Tan}
-\Copyright{Chengsong Tan}
-
-\newcommand{\dn}{\stackrel{\mbox{\scriptsize def}}{=}}%
-\newcommand{\ZERO}{\mbox{\bf 0}}
-\newcommand{\ONE}{\mbox{\bf 1}}
-\def\lexer{\mathit{lexer}}
-\def\mkeps{\mathit{mkeps}}
-
-\def\DFA{\textit{DFA}}
-\def\bmkeps{\textit{bmkeps}}
-\def\retrieve{\textit{retrieve}}
-\def\blexer{\textit{blexer}}
-\def\flex{\textit{flex}}
-\def\inj{\mathit{inj}}
-\def\Empty{\mathit{Empty}}
-\def\Left{\mathit{Left}}
-\def\Right{\mathit{Right}}
-\def\Stars{\mathit{Stars}}
-\def\Char{\mathit{Char}}
-\def\Seq{\mathit{Seq}}
-\def\Der{\mathit{Der}}
-\def\nullable{\mathit{nullable}}
-\def\Z{\mathit{Z}}
-\def\S{\mathit{S}}
-\def\rup{r^\uparrow}
-
-\newcommand{\PDER}{\textit{PDER}}
-\newcommand{\flts}{\textit{flts}}
-\newcommand{\distinctBy}{\textit{distinctBy}}
-\newcommand{\map}{\textit{map}}
-\newcommand{\size}{\textit{size}}
-\def\awidth{\mathit{awidth}}
-\def\pder{\mathit{pder}}
-\def\maxterms{\mathit{maxterms}}
-\def\bsimp{\mathit{bsimp}}
-
-%\theoremstyle{theorem}
-%\newtheorem{theorem}{Theorem}
-%\theoremstyle{lemma}
-%\newtheorem{lemma}{Lemma}
-%\newcommand{\lemmaautorefname}{Lemma}
-%\theoremstyle{definition}
-%\newtheorem{definition}{Definition}
-\algnewcommand\algorithmicswitch{\textbf{switch}}
-\algnewcommand\algorithmiccase{\textbf{case}}
-\algnewcommand\algorithmicassert{\texttt{assert}}
-\algnewcommand\Assert[1]{\State \algorithmicassert(#1)}%
-% New "environments"
-\algdef{SE}[SWITCH]{Switch}{EndSwitch}[1]{\algorithmicswitch\ #1\ \algorithmicdo}{\algorithmicend\ \algorithmicswitch}%
-\algdef{SE}[CASE]{Case}{EndCase}[1]{\algorithmiccase\ #1}{\algorithmicend\ \algorithmiccase}%
-\algtext*{EndSwitch}%
-\algtext*{EndCase}%
-
-
-\begin{document}
-
-\maketitle
-
-\begin{abstract}
- Brzozowski introduced in 1964 a beautifully simple algorithm for
- regular expression matching based on the notion of derivatives of
- regular expressions. In 2014, Sulzmann and Lu extended this
- algorithm to not just give a YES/NO answer for whether or not a
- regular expression matches a string, but in case it does also
- answers with \emph{how} it matches the string. This is important for
- applications such as lexing (tokenising a string). The problem is to
- make the algorithm by Sulzmann and Lu fast on all inputs without
- breaking its correctness. We have already developed some
- simplification rules for this, but have not yet proved that they
- preserve the correctness of the algorithm. We also have not yet
- looked at extended regular expressions, such as bounded repetitions,
- negation and back-references.
-\end{abstract}
-
-\section{Introduction}
-\subsection{Basic Regex Introduction}
-
-Suppose (basic) regular expressions are given by the following grammar:
-\[ r ::= \ZERO \mid \ONE
- \mid c
- \mid r_1 \cdot r_2
- \mid r_1 + r_2
- \mid r^*
-\]
-
-Problem of matching:
-
-\begin{center}
-\begin{tabular}{lcr}
-$\textit{Match}(r, s)$ & $ = $ & $\textit{if}\; s \in L(r)\; \textit{output} \; \textit{YES}$\\
-& & $\textit{else} \; \textit{output} \; \textit{NO}$
-\end{tabular}
-\end{center}
-Omnipresent use of regexes in modern
-software.
-Examples: Snort, Bro, etc?
-\subsubsection{The rules for network intrusion analysis tools }
-TODO: read rules libraries such as Snort and the explanation for some of the rules
-TODO: pcre/pcre2?
-TODO: any other libraries?
-
-
-There has been many widely used libraries such as
-Henry Spencer's regexp(3), RE2, etc.
-They are fast and successful, but ugly corner cases
-allowing the $\textit{ReDoS}$ attack exist, and
-is a non-negligible protion.
-\subsection{The practical problem}
-These corner cases either
-\begin{itemize}
-\item
-go unnoticed until they
-cause considerable grief in real life
-\item
-or force the regex library writers to pose
-restrictions on the input, limiting the
-choice a programmer has when using regexes.
-\end{itemize}
-
-Motivation:
-We want some library that supports as many constructs as possible,
-but still gives formal guarantees on the correctness and running
-time.
-
-\subsection{Regexes that brought down CloudFlare}
-
-
-matching some string $s$ with a regex
-
-\begin{verbatim}
-(?:(?:\"|'|\]|\}|\\|\d|
-(?:nan|infinity|true|false|null|undefined|symbol|math)
-|\`|\-|\+)+[)]*;?((?:\s|-|~|!|{}|\|\||\+)*.*(?:.*=.*)))
-\end{verbatim}
-
-
-%Could be from a network intrusion detection algorithm.
-%Checking whether there is some malicious code
-%in the network data blocks being routed.
-%If so, discard the data and identify the sender for future alert.
-\section{Existing approaches}
-\subsection{Shortcomings of different methods}
-
-
-\subsubsection{ NFA's}
-$\bold{Problems With This:}$
-\begin{itemize}
-\item
-Can be slow especially when many states are active.
-\item
-Want Lexing Results: Can have Exponential different matching results.
-\end{itemize}
-
-
-One regular expression can have multiple lexical values. For example
-for the regular expression $(a+b)^*$, it has a infinite list of
-values corresponding to it: $\Stars\,[]$, $\Stars\,[\Left(Char(a))]$,
-$\Stars\,[\Right(Char(b))]$, $\Stars\,[\Left(Char(a),\,\Right(Char(b))]$,
-$\ldots$, and vice versa.
-Even for the regular expression matching a certain string, there could
-still be more than one value corresponding to it.
-Take the example where $r= (a^*\cdot a^*)^*$ and the string
-$s=\underbrace{aa\ldots a}_\text{n \textit{a}s}$.
-The number of different ways of matching
-without allowing any value under a star to be flattened
-to an empty string can be given by the following formula:
-\begin{center}
- $C_n = (n+1)+n C_1+\ldots + 2 C_{n-1}$
-\end{center}
-and a closed form formula can be calculated to be
-\begin{equation}
- C_n =\frac{(2+\sqrt{2})^n - (2-\sqrt{2})^n}{4\sqrt{2}}
-\end{equation}
-which is clearly in exponential order.
-A lexer aimed at getting all the possible values has an exponential
-worst case runtime. Therefore it is impractical to try to generate
-all possible matches in a run. In practice, we are usually
-interested about POSIX values, which by intuition always
-match the leftmost regular expression when there is a choice
-and always match a sub part as much as possible before proceeding
-to the next token. For example, the above example has the POSIX value
-$ \Stars\,[\Seq(Stars\,[\underbrace{\Char(a),\ldots,\Char(a)}_\text{n iterations}], Stars\,[])]$.
-The output of an algorithm we want would be a POSIX matching
-encoded as a value.\\
-$\mathbf{TODO:}$
-\begin{itemize}
-\item
-Illustrate graphically how you can match $a*a**$ with $aaa$ in different ways.
-\item
-Give a backtracking algorithm, and explain briefly why this can be exponentially slow.
-(When there is a matching, it finds straight away; where there is not one, this fails to
-recognize immediately that a match cannot be possibly found, and tries out all remaining
-possibilities, etc.)
-\item
-From the above point, are there statical analysis tools that single out those malicious
-patterns and tell before a lexer is even run?
-Have a more thorough survey of the Birmingham paper.
-Give out the suitable scenarios for such static analysis algorithms.
-
-\end{itemize}
-
-\subsubsection{DFAs}
-The tool JFLEX uses it.
-Advantages: super fast on most regexes \\
-TODO: show it being fast on a lot of inputs\\
-Disavantages:
-state explosion for bounded repetitions due to
-theoretic bottleneck of having to remember exactly what the
-suffix up to length $n$ of input string is.
-"Countdown States activation problem":
-$.*a.{100}$ requires $2^100$ + DFA states.
-Example:
-Converting $((a|b )*b.{10}){3}$ to a $\DFA$
-gives the error:
-\begin{verbatim}
-147972 states before minimization, 79107 states in minimized DFA
-Old file "search.java" saved as "search.java~"
-Writing code to "search.java"
-
-Unexpected exception encountered. This indicates a bug in JFlex.
-Please consider filing an issue at http://github.com/jflex-de/jflex/issues/new
-
-
-character value expected
-java.lang.IllegalArgumentException: character value expected
- at jflex.generator.PackEmitter.emitUC(PackEmitter.java:105)
- at jflex.generator.CountEmitter.emit(CountEmitter.java:116)
- at jflex.generator.Emitter.emitDynamicInit(Emitter.java:530)
- at jflex.generator.Emitter.emit(Emitter.java:1369)
- at jflex.generator.LexGenerator.generate(LexGenerator.java:115)
- at jflex.Main.generate(Main.java:320)
- at jflex.Main.main(Main.java:336)
-\end{verbatim}
-
-\subsubsection{variant of DFA's}
-counting set automata
-\\
-TODO: microsoft 2020 oopsla CsA work, need to add bibli entry, and read, understand key novelty, learn to benchmark like it
-\\
-TODO: find weakness of such counting set automata?
-\\
-Other variants?
-
-\subsubsection{NFA and Regex: isomorphic structure}
-TODO: define mathematically an isomorphism?\\
-
-
-
-\subsubsection{variants of NFA's}
-How about acting on regular expressions themselves? Certain copies represent verbose info--that they will always match the same string--prune away!
-
-\subsection{Brzozowski's derivatives}
-
-\subsection{Sulzmann and Lu's algorithm}
-
-\subsection{Bit-coded algorithm}
-+bitcodes!
-Built on top of derivatives, but with auxiliary bits
-\subsection{Correctness Proof}
-
-Not proven by Sulzmann and Lu
-
-Proven by Ausaf and Urban!!
-
-
-For this we have started with looking at the proof of
-\begin{equation}\label{lexer}
-\blexer \; (r^\uparrow) s = \lexer \;r \;s,
-\end{equation}
-
-%\noindent
-%might provide us insight into proving
-%\begin{center}
-%$\blexer \; r^\uparrow \;s = \blexers \; r^\uparrow \;s$
-%\end{center}
-
-\noindent
-which established that the bit-sequence algorithm produces the same
-result as the original algorithm, which does not use
-bit-sequences.
-The proof uses two ``tricks''. One is that it uses a \flex-function
-
-\begin{center}
-\begin{tabular}{lcl}
-$\textit{flex} \;r\; f\; (c\!::\!s) $ & $\dn$ & $\textit{flex} \; (r\backslash c) \;(\lambda v. f (inj \; r \; c \; v)) \;s$ \\
-$\textit{flex} \;r\; f\; [\,] $ & $\dn$ & $f$
-\end{tabular}
-\end{center}
-
-\noindent
-
-The intuition behind the $\flex$ function is that
- it accumulates a series of $\inj$ function applications when doing derivatives
- in a $\mathit{LIFO}$ manner. The arguments of the $\inj$ functions are kept by
- remembering which character
- was chopped off and what the regular expression looks like before
- chopping off that character.
- The $\mathit{LIFO}$ order was achieved by putting the newest $\inj$ application
- always before the application of $f$, the previously accumulated function applications.\\
-Therefore, the function $\flex$, when acted on a string $s@[c]$ where the last
-character is $c$, by nature can have its last injection function revealed already:
-\begin{equation}\label{flex}
-\flex \; r \; id \; (s@[c]) \; v = \flex \; r \; id \; s \; (inj \; (r\backslash s) \; c\; v).
-\end{equation}
-that the last character can be taken off, and the injection it causes be applied to
-the argument value $v$.
-
-Ausaf and Urban proved that the Sulzmann and Lu's lexers
-can be charactarized by the $\flex$ function:
-\begin{center}
-$\lexer \;r\; s = \flex \;\textit{id} \; r\;s \;(\mkeps \; (r\backslash s))$.
-\end{center}
-
-\noindent
-This property says that the Sulzmann and Lu's $\lexer$ does lexing by
-stacking up injection functions while doing derivatives,
-explicitly showing the order of characters being
-injected back in each step.
-
-\noindent
-The other trick, which is the crux in the existing proof,
-is the use of the $\retrieve$-function:
-\begin{center}
-\begin{tabular}{@{}l@{\hspace{2mm}}c@{\hspace{2mm}}l@{}}
- $\textit{retrieve}\,(_{bs}\ONE)\,\Empty$ & $\dn$ & $bs$\\
- $\textit{retrieve}\,(_{bs}{\bf c})\,(\Char\,d)$ & $\dn$ & $bs$\\
- $\textit{retrieve}\,(_{bs}\sum a::as)\,(\Left\,v)$ & $\dn$ &
- $bs \,@\, \textit{retrieve}\,a\,v$\\
- $\textit{retrieve}\,(_{bs}\sum a::as)\,(\Right\,v)$ & $\dn$ &
- $\textit{bs} \,@\, \textit{retrieve}\,(_{[]}\sum as)\,v$\\
- $\textit{retrieve}\,(_{bs}a_1\cdot a_2)\,(\Seq\,v_1\,v_2)$ & $\dn$ &
- $bs \,@\,\textit{retrieve}\,a_1\,v_1\,@\, \textit{retrieve}\,a_2\,v_2$\\
- $\textit{retrieve}\,(_{bs}a^*)\,(\Stars\,[])$ & $\dn$ &
- $bs \,@\, [0]$\\
- $\textit{retrieve}\,(_{bs}a^*)\,(\Stars\,(v\!::\!vs))$ & $\dn$ &\\
- \multicolumn{3}{l}{
- \hspace{3cm}$bs \,@\, [1] \,@\, \textit{retrieve}\,a\,v\,@\,
- \textit{retrieve}\,(_{[]}a^*)\,(\Stars\,vs)$}\\
-\end{tabular}
-\end{center}
-
-\noindent
-Sulzmann and Lu proposed this function, but did not prove
-anything about it. Ausaf and Urban made use of the
-fact about $\retrieve$ in their proof:
- \begin{equation}\label{retrieve_reversible}
- \retrieve\; \rup \backslash c \; v = \retrieve \; \rup (\inj \;r \;c \; v)
- \end{equation}
-This says that $\retrieve$ will always pick up
-partial information about a lexing value value and transform it into suitable bitcodes.
-If the information is in the regular expression (stored as bitcodes), it will keep those
-bitcodes with the guidance of the value,
-if the information is in the value, which has been injected back to the value,
-it will "digest" and transform that part of the value to bitcodes.
-
-\noindent
-
-Using this together with ~\eqref{flex}, we can prove that the bitcoded version of
-lexer is the same as Sulzmann and Lu's lexer:
-\begin{center}
-$\lexer \; r \; s = \flex \; r\; id\; s\; v = \textit{decode} \;( \textit{bmkeps}\; (\rup \backslash s) ) r = \blexer \; r \; s$
-\end{center}
-\noindent
-\begin{proof}
-We express $\bmkeps$ using $\retrieve$, and the theorem to prove becomes:
-\begin{center}
-$ \flex \; r\; id\; s\; v = \textit{decode} \;( \textit{retrieve}\; (\rup \backslash s) \; v \;) r$
-\end{center}
-\noindent
-We prove the above by reverse induction on string $s$(meaning that the inductive step is on
-$s @ [c]$ rather than $c :: s$).
-$v$ takes arbitrary values.\\
-The base case goes through trivially.\\
-For the inductive step, assuming
-$ \flex \; r\; id\; s\; v = \textit{decode} \;( \textit{retrieve}\; (\rup \backslash s) \; v \;) r$
-holds for all values $v$. Now we need to show that
-$ \flex \; r\; id\; s@[c]\; v = \textit{decode} \;( \textit{retrieve}\; (\rup \backslash (s@[c])) \; v \;) r$.\\
-~\eqref{flex} allows us to do the following rewrite:
-\begin{center}
-$ \flex \; r\; id\; (s@[c])\; v = \flex \; r \; id\; s\; (\inj \; (r \backslash s) \; c\; v)= \textit{decode} \;( \textit{retrieve}\; (\rup \backslash s) \; (\inj \; (r\backslash s) \;c\;v)\;) r$
-\end{center}
-~\eqref{retrieve_reversible} allows us to further rewrite the $\mathit{RHS}$ of the above to
-\begin{center}
-$\textit{decode} \; (\textit{retrieve}\; (\rup \backslash (s @ [c])) \; v\;) \;r$
-\end{center}
-
-
-\end{proof}
-
-
-
-\section{My Work}
-
-\subsection{an improved version of bit-coded algorithm: with simp!}
-
-\subsection{a correctness proof for bitcoded algorithm}
-
-\subsection{finiteness proof }
-\subsubsection{closed form}
-We can give the derivative of regular expressions
-with respect to string a closed form with respect to simplification:
-\begin{itemize}
-\item
-closed form for sequences:
-\begin{verbatim}
-lemma seq_closed_form: shows
-"rsimp (rders_simp (RSEQ r1 r2) s) =
-rsimp ( RALTS ( (RSEQ (rders_simp r1 s) r2) #
- (map (rders r2) (vsuf s r1))
- )
- )"
-\end{verbatim}
-where the recursive function $\textit{vsuf}$ is defined as
-\begin{verbatim}
-fun vsuf :: "char list -> rrexp -> char list list" where
-"vsuf [] _ = []"
-|"vsuf (c#cs) r1 = (if (rnullable r1) then (vsuf cs (rder c r1)) @ [c # cs]
- else (vsuf cs (rder c r1))
- ) "
-
-\end{verbatim}
-\item
-closed form for stars:
-\begin{verbatim}
-lemma star_closed_form:
- shows "rders_simp (RSTAR r0) (c#s) =
-rsimp ( RALTS (
-(map (\lambda s1. RSEQ (rders_simp r0 s1) (RSTAR r0) )
-(star_updates s r [[c]]) ) ))"
-\end{verbatim}
-where the recursive function $\textit{star}\_\textit{updates}$ is defined as
-\begin{verbatim}
-fun star_update :: "char -> rrexp -> char list list -> char list list" where
-"star_update c r [] = []"
-|"star_update c r (s # Ss) = (if (rnullable (rders_simp r s))
- then (s@[c]) # [c] # (star_update c r Ss)
- else (s@[c]) # (star_update c r Ss) )"
-
-fun star_updates :: "char list -> rrexp -> char list list -> char list list"
- where
-"star_updates [] r Ss = Ss"
-| "star_updates (c # cs) r Ss = star_updates cs r (star_update c r Ss)"
-
-\end{verbatim}
-
-
-\end{itemize}
-These closed form is a formalization of the intuition
- that we can push in the derivatives
-of compound regular expressions to its sub-expressions, and the resulting
-expression is a linear combination of those sub-expressions' derivatives.
-\subsubsection{Estimation of closed forms' size}
-And thanks to $\textit{distinctBy}$ helping with deduplication,
-the linear combination can be bounded by the set enumerating all
-regular expressions up to a certain size :
-\begin{verbatim}
-
-lemma star_closed_form_bounded_by_rdistinct_list_estimate:
- shows "rsize (rsimp ( RALTS ( (map (\lambda s1. RSEQ (rders_simp r0 s1) (RSTAR r0) )
- (star_updates s r [[c]]) ) ))) <=
- Suc (sum_list (map rsize (rdistinct (map (\lambda s1. RSEQ (rders_simp r0 s1) (RSTAR r0) )
- (star_updates s r [[c]]) ) {}) ) )"
-
- lemma distinct_list_rexp_up_to_certain_size_bouded_by_set_enumerating_up_to_that_size:
- shows "\forallr\in set rs. (rsize r ) <= N ==> sum_list (map rsize (rdistinct rs {})) <=
- (card (rexp_enum N))* N"
-
- lemma ind_hypo_on_ders_leads_to_stars_bounded:
- shows "\foralls. rsize (rders_simp r0 s) <= N ==>
- (sum_list (map rsize (rdistinct (map (\lambda s1. RSEQ (rders_simp r0 s1) (RSTAR r0) )
- (star_updates s r [[c]]) ) {}) ) ) <=
-(card (rexp_enum (Suc (N + rsize (RSTAR r0))))) * (Suc (N + rsize (RSTAR r0)))
-"
-\end{verbatim}
-
-With the above 3 lemmas, we can argue that the inductive hypothesis
-$r_0$'s derivatives is bounded above leads to $r_0^*$'s
-derivatives being bounded above.
-\begin{verbatim}
-
-lemma r0_bounded_star_bounded:
- shows "\foralls. rsize (rders_simp r0 s) <= N ==>
- \foralls. rsize (rders_simp (RSTAR r0) s) <=
-(card (rexp_enum (Suc (N + rsize (RSTAR r0))))) * (Suc (N + rsize (RSTAR r0)))"
-\end{verbatim}
-
-
-And we have a similar argument for the sequence case.
-\subsection{stronger simplification needed}
-
-\subsubsection{Bounded List of Terms}
-We have seen that without simplification the size of $(a+aa)^*$
-grows exponentially and unbounded(where we omit certain nested
-parentheses among the four terms in the last explicitly written out regex):
-
-\def\ll{\stackrel{\_\backslash{} a}{\longrightarrow}}
-\begin{center}
-\begin{tabular}{rll}
-$(a + aa)^*$ & $\ll$ & $(\ONE + \ONE{}a) \cdot (a + aa)^*$\\
-& $\ll$ & $(\ZERO + \ZERO{}a + \ONE) \cdot (a + aa)^* \;+\; (\ONE + \ONE{}a) \cdot (a + aa)^*$\\
-& $\ll$ & $(\ZERO + \ZERO{}a + \ZERO) \cdot (a + aa)^* + (\ONE + \ONE{}a) \cdot (a + aa)^* \;+\; $\\
-& & $\qquad(\ZERO + \ZERO{}a + \ONE) \cdot (a + aa)^* + (\ONE + \ONE{}a) \cdot (a + aa)^*$\\
-& $\ll$ & \ldots \hspace{15mm}(regular expressions of sizes 98, 169, 283, 468, 767, \ldots)
-\end{tabular}
-\end{center}
-
-But if we look closely at the regex
-\begin{center}
-\begin{tabular}{rll}
-& & $\qquad(\ZERO + \ZERO{}a + \ONE) \cdot (a + aa)^* + (\ONE + \ONE{}a) \cdot (a + aa)^*$\\
-\end{tabular}
-\end{center}
-we realize that:
-\begin{itemize}
-\item
-The regex is equivalent to an alternative taking a long-flattened list,
-where each list is a sequence, and the second child of that sequence
-is always $(a+aa)^*$. In other words, the regex is a "linear combination"
-of terms of the form $(a+aa)\backslash s \cdot (a+aa)^*$ ($s$ is any string).
-\item
-The number of different terms of the shape $(a+aa) \backslash s \cdot (a+aa)^*$ is
-bounded because the first child $(a+aa) \backslash s$ can only be one of
-$(\ZERO + \ZERO{}a + \ZERO)$, $(\ZERO + \ZERO{}a + \ONE)$,
-$(\ONE + \ONE{}a)$ and $(\ZERO + \ZERO{}a)$.
-\item
-With simplification we have that the regex is additionally reduced to,
-
-where each term $\bsimp((a+aa)\backslash s ) $
-is further reduced to only
-$\ONE$, $\ONE + a$ and $\ZERO$.
-
-
-\end{itemize}
-Generalizing this to any regular expression of the form
-$\sum_{s\in L(r)} \bsimp(r\backslash s ) \cdot r^*$,
-we have the closed-form for star regex's string derivative as below:
-$\forall r \;s.\; \exists sl. \; s.t.\;\bsimp(r^* \backslash s) = \bsimp(\sum_{s'\in sl}(r\backslash s') \cdot r^* )$.
-
-The regex $\bsimp(\sum_{s' \in sl}(r\backslash s') \cdot r^*)$ is bounded by
-$\distinctBy(\flts(\sum_{s'\in sl}(\bsimp(r \backslash s')) \cdot r^*))$,
-which again is bounded by $\distinctBy(\sum_{s'\in sl}(\bsimp(r\backslash s')) \cdot r^*)$.
-This might give us a polynomial bound on the length of the list
-$\distinctBy[(\bsimp(r\backslash s')) \cdot r^* | {s'\in sl} ]$, if the terms in
-$\distinctBy[(\bsimp (r\backslash s')) | {s' \in sl}]$ has a polynomial bound.
-This is unfortunately not true under our current $\distinctBy$ function:
-If we let $r_n = ( (aa)^* + (aaa)^* + \ldots + \underline{(a\ldots a)^*}{n \,a's}) $,
-then we have that $\textit{maxterms} r_n = \textit{sup} (\textit{length} [\bsimp(r_n\backslash s') | s' \in sl]) =
-L.C.M(1,\ldots, n)$. According to \href{http://oeis.org/A003418}{OEISA003418}
-this grows exponentially quickly. So we have found a regex $r_n$ where
-$\textit{maxterms} (r_n ^* \backslash s) \geq 2^{(n-1)}$.
-
-
-\subsubsection{stronger version of \distinctBy}
-\href{https://www.researchgate.net/publication/340874991_Partial_derivatives_of_regular_expressions_and_finite_automaton_constructions}{Antimirove}
-has proven a linear bound on the number of terms in the "partial derivatives" of a regular expression:
-\begin{center}
-$\size (\PDER(r)) \leq \awidth (r)$.
-\end{center}
-
-The proof is by structural induction on the regular expression r.
-The hard cases are the sequence case $r_1\cdot r_2$ and star case $r^*$.
-The central idea that allows the induction to go through for this bound is on the inclusion:
-\begin{center}
-$\pder_{s@[c]} (a\cdot b) \subseteq (\pder_{s@[c]} a ) \cdot b \cup (\bigcup_{s' \in Suf(s@[c])} (\pder_{s'} \; b))$
-\end{center}
-
-This way,
-
-\begin{center}
-\begin{tabular}{lcl}
-$| \pder_{s@[c]} (a\cdot b) |$ & $ \leq$ & $ | (\pder_{s@[c]} a ) \cdot b \cup (\bigcup_{s' \in Suf(s@[c])} (\pder_{s'} \; b))|$\\
-& $\leq$ & $| (\pder_{s@[c]} a ) \cdot b| + | (\bigcup_{s' \in Suf(s@[c])} (\pder_{s'} \; b))|$\\
-& $=$ & $\awidth(a) + \awidth(b)$ \\
-& $=$ & $\awidth(a+b)$
-\end{tabular}
-\end{center}
-
-we have that a compound regular expression $a\cdot b$'s subterms
- is bounded by its sub-expression's derivatives terms.
-
-This argument can be modified to bound the terms in
-our version of regex with strong simplification:
-\begin{center}
-\begin{tabular}{lcl}
-$| \maxterms (\bsimp (a\cdot b) \backslash s)|$ & $=$ & $ |maxterms(\bsimp( (a\backslash s \cdot b) + \sum_{s'\in sl}(b\backslash s') ))|$\\
-& $\leq$ & $| (\pder_{s@[c]} a ) \cdot b| + | (\bigcup_{s' \in Suf(s@[c])} (\pder_{s'} \; b))|$\\
-& $=$ & $\awidth(a) + \awidth(b)$ \\
-& $=$ & $\awidth(a+b)$
-\end{tabular}
-\end{center}
-
-
-
-
-\subsection{cubic bound}
-Bounding the regex's subterms by
-its alphabetic width.
-
-The entire expression's size can be bounded by
-number of subterms times each subterms' size.
-
-
-
-
-
-\section{Support for bounded repetitions and other constructs}
-Example:
-$.*a.\{100\}$ after translation to $\DFA$ and minimization will
-always take over $2^{100}$ states.
-
-\section{Towards a library with fast running time practically}
-
-registers and cache-related optimizations?
-JVM related optimizations?
-
-\section{Past Report materials}
-
-Deciding whether a string is in the language of the regex
-can be intuitively done by constructing an NFA\cite{Thompson_1968}:
-and simulate the running of it.
-
-Which should be simple enough that modern programmers
-have no problems with it at all?
-Not really:
-
-Take $(a^*)^*\,b$ and ask whether
-strings of the form $aa..a$ match this regular
-expression. Obviously this is not the case---the expected $b$ in the last
-position is missing. One would expect that modern regular expression
-matching engines can find this out very quickly. Alas, if one tries
-this example in JavaScript, Python or Java 8 with strings like 28
-$a$'s, one discovers that this decision takes around 30 seconds and
-takes considerably longer when adding a few more $a$'s, as the graphs
-below show:
-
-\begin{center}
-\begin{tabular}{@{}c@{\hspace{0mm}}c@{\hspace{0mm}}c@{}}
-\begin{tikzpicture}
-\begin{axis}[
- xlabel={$n$},
- x label style={at={(1.05,-0.05)}},
- ylabel={time in secs},
- enlargelimits=false,
- xtick={0,5,...,30},
- xmax=33,
- ymax=35,
- ytick={0,5,...,30},
- scaled ticks=false,
- axis lines=left,
- width=5cm,
- height=4cm,
- legend entries={JavaScript},
- legend pos=north west,
- legend cell align=left]
-\addplot[red,mark=*, mark options={fill=white}] table {re-js.data};
-\end{axis}
-\end{tikzpicture}
- &
-\begin{tikzpicture}
-\begin{axis}[
- xlabel={$n$},
- x label style={at={(1.05,-0.05)}},
- %ylabel={time in secs},
- enlargelimits=false,
- xtick={0,5,...,30},
- xmax=33,
- ymax=35,
- ytick={0,5,...,30},
- scaled ticks=false,
- axis lines=left,
- width=5cm,
- height=4cm,
- legend entries={Python},
- legend pos=north west,
- legend cell align=left]
-\addplot[blue,mark=*, mark options={fill=white}] table {re-python2.data};
-\end{axis}
-\end{tikzpicture}
- &
-\begin{tikzpicture}
-\begin{axis}[
- xlabel={$n$},
- x label style={at={(1.05,-0.05)}},
- %ylabel={time in secs},
- enlargelimits=false,
- xtick={0,5,...,30},
- xmax=33,
- ymax=35,
- ytick={0,5,...,30},
- scaled ticks=false,
- axis lines=left,
- width=5cm,
- height=4cm,
- legend entries={Java 8},
- legend pos=north west,
- legend cell align=left]
-\addplot[cyan,mark=*, mark options={fill=white}] table {re-java.data};
-\end{axis}
-\end{tikzpicture}\\
-\multicolumn{3}{c}{Graphs: Runtime for matching $(a^*)^*\,b$ with strings
- of the form $\underbrace{aa..a}_{n}$.}
-\end{tabular}
-\end{center}
-
-Why?
-Using $\textit{NFA}$'s that can backtrack.
-%TODO: what does it mean to do DFS BFS on NFA's
-
-
-Then how about determinization?
-\begin{itemize}
-\item
- Turning NFA's to DFA's can cause the size of the automata
-to blow up exponentially.
-\item
-Want to extract submatch information.
-For example,
-$r_1 \cdot r_2$ matches $s$,
-want to know $s = s_1@s_2$ where $s_i$
-corresponds to $r_i$. Where $s_i$ might be the
-attacker's ip address.
-\item
-Variants such as counting automaton exist.
-But usually made super fast on a certain class
-of regexes like bounded repetitions:
-\begin{verbatim}
-.*a.{100}
-\end{verbatim}
-On a lot of inputs this works very well.
-On average good practical performance.
-~10MiB per second.
-But cannot be super fast on all inputs of regexes and strings,
-can be imprecise (incorrect) when it comes to more complex regexes.
-
-\end{itemize}
-%TODO: real world example?
-
-
-
-\subsection{derivatives}
-Q:
-Is there an efficient lexing algorithm with provable guarantees on
-correctness and running time?
-Brzozowski Derivatives\cite{Brzozowski1964}!
-
-\begin{center}
- \begin{tabular}{lcl}
- $\nullable(\ZERO)$ & $\dn$ & $\mathit{false}$ \\
- $\nullable(\ONE)$ & $\dn$ & $\mathit{true}$ \\
- $\nullable(c)$ & $\dn$ & $\mathit{false}$ \\
- $\nullable(r_1 + r_2)$ & $\dn$ & $\nullable(r_1) \vee \nullable(r_2)$ \\
- $\nullable(r_1\cdot r_2)$ & $\dn$ & $\nullable(r_1) \wedge \nullable(r_2)$ \\
- $\nullable(r^*)$ & $\dn$ & $\mathit{true}$ \\
- \end{tabular}
- \end{center}
-
-
-
-This function simply tests whether the empty string is in $L(r)$.
-He then defined
-the following operation on regular expressions, written
-$r\backslash c$ (the derivative of $r$ w.r.t.~the character $c$):
-
-\begin{center}
-\begin{tabular}{lcl}
- $\ZERO \backslash c$ & $\dn$ & $\ZERO$\\
- $\ONE \backslash c$ & $\dn$ & $\ZERO$\\
- $d \backslash c$ & $\dn$ &
- $\mathit{if} \;c = d\;\mathit{then}\;\ONE\;\mathit{else}\;\ZERO$\\
-$(r_1 + r_2)\backslash c$ & $\dn$ & $r_1 \backslash c \,+\, r_2 \backslash c$\\
-$(r_1 \cdot r_2)\backslash c$ & $\dn$ & $\mathit{if} \, nullable(r_1)$\\
- & & $\mathit{then}\;(r_1\backslash c) \cdot r_2 \,+\, r_2\backslash c$\\
- & & $\mathit{else}\;(r_1\backslash c) \cdot r_2$\\
- $(r^*)\backslash c$ & $\dn$ & $(r\backslash c) \cdot r^*$\\
-\end{tabular}
-\end{center}
-
-
-
-
-\begin{ceqn}
-\begin{equation}\label{graph:01}
-\begin{tikzcd}
-r_0 \arrow[r, "\backslash c_0"] \arrow[d] & r_1 \arrow[r, "\backslash c_1"] \arrow[d] & r_2 \arrow[r, dashed] \arrow[d] & r_n \arrow[d, "mkeps" description] \\
-v_0 & v_1 \arrow[l,"inj_{r_0} c_0"] & v_2 \arrow[l, "inj_{r_1} c_1"] & v_n \arrow[l, dashed]
-\end{tikzcd}
-\end{equation}
-\end{ceqn}
-Nicely functional, correctness easily provable, but suffers
-from large stack size with long strings, and
-inability to perform even moderate simplification.
-
-
-
-The Sulzmann and Lu's bit-coded algorithm:
-\begin{figure}
-\centering
-\includegraphics[scale=0.3]{bitcoded_sulzmann.png}
-\end{figure}
-
-This one-phase algorithm is free from the burden of large stack usage:
-
-\begin{center}
-\begin{tikzpicture}[scale=2,node distance=1.9cm,
- every node/.style={minimum size=7mm}]
-\node (r0) {$r_0$};
-\node (r1) [right=of r0]{$r_1$};
-\draw[->,line width=0.2mm](r0)--(r1) node[above,midway] {$\backslash\,c_0$};
-\node (r2) [right=of r1]{$r_2$};
-\draw[->, line width = 0.2mm](r1)--(r2) node[above,midway] {$\backslash\,c_1$};
-\node (rn) [right=of r2]{$r_n$};
-\draw[dashed,->,line width=0.2mm](r2)--(rn) node[above,midway] {} ;
-\draw (rn) node[anchor=west] {\;\raisebox{3mm}{$\nullable$}};
-\node (bs) [below=of rn]{$bs$};
-\draw[->,line width=0.2mm](rn) -- (bs);
-\node (v0) [left=of bs] {$v_0$};
-\draw[->,line width=0.2mm](bs)--(v0) node[below,midway] {$\textit{decode}$};
-\draw (rn) node[anchor=north west] {\;\raisebox{-8mm}{$\textit{collect bits}$}};
-\draw[->, line width=0.2mm](v0)--(r0) node[below, midway] {};
-\end{tikzpicture}
-\end{center}
-
-
-
-This is functional code, and easily provable (proof by Urban and Ausaf).
-
-But it suffers from exponential blows even with the simplification steps:
-\begin{figure}
-\centering
-\includegraphics[scale= 0.3]{pics/nub_filter_simp.png}
-\end{figure}
-claim: Sulzmann and Lu claimed it linear $w.r.t$ input.
-
-example that blows it up:
-$(a+aa)^*$
-\section{Contributions}
-\subsection{Our contribution 1}
-an improved version of the above algorithm that solves most blow up
-cases, including the above example.
-
-a formalized closed-form for string derivatives:
-\[ (\sum rs) \backslash_s s = simp(\sum_{r \in rs}(r \backslash_s s)) \]
-\[ (r1\cdot r2) \backslash_s s = simp(r_1 \backslash_s s \cdot r_2 + \sum_{s' \in Suffix(s)} r_2 \backslash_s s' )\]
-\[r0^* \backslash_s s = simp(\sum_{s' \in substr(s)} (r0 \backslash_s s') \cdot r0^*) \]
-
-
-
-Also with a size guarantee that make sure the size of the derivatives
-don't go up unbounded.
-
-
-\begin{theorem}
-Given a regular expression r, we have
-\begin{center}
-$\exists N_r.\; s.t. \;\forall s. \; |r \backslash_s s| < N_r$
-\end{center}
-\end{theorem}
-
-The proof for this is using partial derivative's terms to bound it.
-\begin{center}
-\begin{tabular}{lcl}
-$| \maxterms (\bsimp (a\cdot b) \backslash s)|$ & $=$ & $ |maxterms(\bsimp( (a\backslash s \cdot b) + \sum_{s'\in sl}(b\backslash s') ))|$\\
-& $\leq$ & $| (\pder_{s@[c]} a ) \cdot b| + | (\bigcup_{s' \in Suf(s@[c])} (\pder_{s'} \; b))|$\\
-& $=$ & $\awidth(a) + \awidth(b)$ \\
-& $=$ & $\awidth(a+b)$
-\end{tabular}
-\end{center}
-
-
-\subsection{Our Contribution 2}
-more aggressive simplification that prunes away sub-parts
-of a regex based on what terms has appeared before.
-Which gives us a truly linear bound on the input length.
-
-
-
-\section{To be completed}
-
-benchmarking our algorithm against JFLEX
-counting set automata, Silex, other main regex engines (incorporate their ideas such
-as zippers and other data structures reducing memory use).
-
-extend to back references.
-
-
-
-
-
-
-\noindent These are clearly abysmal and possibly surprising results. One
-would expect these systems to do much better than that---after all,
-given a DFA and a string, deciding whether a string is matched by this
-DFA should be linear in terms of the size of the regular expression and
-the string?
-
-Admittedly, the regular expression $(a^*)^*\,b$ is carefully chosen to
-exhibit this super-linear behaviour. But unfortunately, such regular
-expressions are not just a few outliers. They are actually
-frequent enough to have a separate name created for
-them---\emph{evil regular expressions}. In empiric work, Davis et al
-report that they have found thousands of such evil regular expressions
-in the JavaScript and Python ecosystems \cite{Davis18}. Static analysis
-approach that is both sound and complete exists\cite{17Bir}, but the running
-time on certain examples in the RegExLib and Snort regular expressions
-libraries is unacceptable. Therefore the problem of efficiency still remains.
-
-This superlinear blowup in matching algorithms sometimes causes
-considerable grief in real life: for example on 20 July 2016 one evil
-regular expression brought the webpage
-\href{http://stackexchange.com}{Stack Exchange} to its
-knees.\footnote{\url{https://stackstatus.net/post/147710624694/outage-postmortem-july-20-2016}}
-In this instance, a regular expression intended to just trim white
-spaces from the beginning and the end of a line actually consumed
-massive amounts of CPU-resources---causing web servers to grind to a
-halt. This happened when a post with 20,000 white spaces was submitted,
-but importantly the white spaces were neither at the beginning nor at
-the end. As a result, the regular expression matching engine needed to
-backtrack over many choices. In this example, the time needed to process
-the string was $O(n^2)$ with respect to the string length. This
-quadratic overhead was enough for the homepage of Stack Exchange to
-respond so slowly that the load balancer assumed there must be some
-attack and therefore stopped the servers from responding to any
-requests. This made the whole site become unavailable. Another very
-recent example is a global outage of all Cloudflare servers on 2 July
-2019. A poorly written regular expression exhibited exponential
-behaviour and exhausted CPUs that serve HTTP traffic. Although the
-outage had several causes, at the heart was a regular expression that
-was used to monitor network
-traffic.\footnote{\url{https://blog.cloudflare.com/details-of-the-cloudflare-outage-on-july-2-2019/}}
-
-The underlying problem is that many ``real life'' regular expression
-matching engines do not use DFAs for matching. This is because they
-support regular expressions that are not covered by the classical
-automata theory, and in this more general setting there are quite a few
-research questions still unanswered and fast algorithms still need to be
-developed (for example how to treat efficiently bounded repetitions, negation and
-back-references).
-%question: dfa can have exponential states. isn't this the actual reason why they do not use dfas?
-%how do they avoid dfas exponential states if they use them for fast matching?
-
-There is also another under-researched problem to do with regular
-expressions and lexing, i.e.~the process of breaking up strings into
-sequences of tokens according to some regular expressions. In this
-setting one is not just interested in whether or not a regular
-expression matches a string, but also in \emph{how}. Consider for
-example a regular expression $r_{key}$ for recognising keywords such as
-\textit{if}, \textit{then} and so on; and a regular expression $r_{id}$
-for recognising identifiers (say, a single character followed by
-characters or numbers). One can then form the compound regular
-expression $(r_{key} + r_{id})^*$ and use it to tokenise strings. But
-then how should the string \textit{iffoo} be tokenised? It could be
-tokenised as a keyword followed by an identifier, or the entire string
-as a single identifier. Similarly, how should the string \textit{if} be
-tokenised? Both regular expressions, $r_{key}$ and $r_{id}$, would
-``fire''---so is it an identifier or a keyword? While in applications
-there is a well-known strategy to decide these questions, called POSIX
-matching, only relatively recently precise definitions of what POSIX
-matching actually means have been formalised
-\cite{AusafDyckhoffUrban2016,OkuiSuzuki2010,Vansummeren2006}. Such a
-definition has also been given by Sulzmann and Lu \cite{Sulzmann2014},
-but the corresponding correctness proof turned out to be faulty
-\cite{AusafDyckhoffUrban2016}. Roughly, POSIX matching means matching
-the longest initial substring. In the case of a tie, the initial
-sub-match is chosen according to some priorities attached to the regular
-expressions (e.g.~keywords have a higher priority than identifiers).
-This sounds rather simple, but according to Grathwohl et al \cite[Page
-36]{CrashCourse2014} this is not the case. They wrote:
-
-\begin{quote}
-\it{}``The POSIX strategy is more complicated than the greedy because of
-the dependence on information about the length of matched strings in the
-various subexpressions.''
-\end{quote}
-
-\noindent
-This is also supported by evidence collected by Kuklewicz
-\cite{Kuklewicz} who noticed that a number of POSIX regular expression
-matchers calculate incorrect results.
-
-Our focus in this project is on an algorithm introduced by Sulzmann and
-Lu in 2014 for regular expression matching according to the POSIX
-strategy \cite{Sulzmann2014}. Their algorithm is based on an older
-algorithm by Brzozowski from 1964 where he introduced the notion of
-derivatives of regular expressions~\cite{Brzozowski1964}. We shall
-briefly explain this algorithm next.
-
-\section{The Algorithm by Brzozowski based on Derivatives of Regular
-Expressions}
-
-Suppose (basic) regular expressions are given by the following grammar:
-\[ r ::= \ZERO \mid \ONE
- \mid c
- \mid r_1 \cdot r_2
- \mid r_1 + r_2
- \mid r^*
-\]
-
-\noindent
-The intended meaning of the constructors is as follows: $\ZERO$
-cannot match any string, $\ONE$ can match the empty string, the
-character regular expression $c$ can match the character $c$, and so
-on.
-
-The ingenious contribution by Brzozowski is the notion of
-\emph{derivatives} of regular expressions. The idea behind this
-notion is as follows: suppose a regular expression $r$ can match a
-string of the form $c\!::\! s$ (that is a list of characters starting
-with $c$), what does the regular expression look like that can match
-just $s$? Brzozowski gave a neat answer to this question. He started
-with the definition of $nullable$:
-\begin{center}
- \begin{tabular}{lcl}
- $\nullable(\ZERO)$ & $\dn$ & $\mathit{false}$ \\
- $\nullable(\ONE)$ & $\dn$ & $\mathit{true}$ \\
- $\nullable(c)$ & $\dn$ & $\mathit{false}$ \\
- $\nullable(r_1 + r_2)$ & $\dn$ & $\nullable(r_1) \vee \nullable(r_2)$ \\
- $\nullable(r_1\cdot r_2)$ & $\dn$ & $\nullable(r_1) \wedge \nullable(r_2)$ \\
- $\nullable(r^*)$ & $\dn$ & $\mathit{true}$ \\
- \end{tabular}
- \end{center}
-This function simply tests whether the empty string is in $L(r)$.
-He then defined
-the following operation on regular expressions, written
-$r\backslash c$ (the derivative of $r$ w.r.t.~the character $c$):
-
-\begin{center}
-\begin{tabular}{lcl}
- $\ZERO \backslash c$ & $\dn$ & $\ZERO$\\
- $\ONE \backslash c$ & $\dn$ & $\ZERO$\\
- $d \backslash c$ & $\dn$ &
- $\mathit{if} \;c = d\;\mathit{then}\;\ONE\;\mathit{else}\;\ZERO$\\
-$(r_1 + r_2)\backslash c$ & $\dn$ & $r_1 \backslash c \,+\, r_2 \backslash c$\\
-$(r_1 \cdot r_2)\backslash c$ & $\dn$ & $\mathit{if} \, nullable(r_1)$\\
- & & $\mathit{then}\;(r_1\backslash c) \cdot r_2 \,+\, r_2\backslash c$\\
- & & $\mathit{else}\;(r_1\backslash c) \cdot r_2$\\
- $(r^*)\backslash c$ & $\dn$ & $(r\backslash c) \cdot r^*$\\
-\end{tabular}
-\end{center}
-
-%Assuming the classic notion of a
-%\emph{language} of a regular expression, written $L(\_)$, t
-
-\noindent
-The main property of the derivative operation is that
-
-\begin{center}
-$c\!::\!s \in L(r)$ holds
-if and only if $s \in L(r\backslash c)$.
-\end{center}
-
-\noindent
-For us the main advantage is that derivatives can be
-straightforwardly implemented in any functional programming language,
-and are easily definable and reasoned about in theorem provers---the
-definitions just consist of inductive datatypes and simple recursive
-functions. Moreover, the notion of derivatives can be easily
-generalised to cover extended regular expression constructors such as
-the not-regular expression, written $\neg\,r$, or bounded repetitions
-(for example $r^{\{n\}}$ and $r^{\{n..m\}}$), which cannot be so
-straightforwardly realised within the classic automata approach.
-For the moment however, we focus only on the usual basic regular expressions.
-
-
-Now if we want to find out whether a string $s$ matches with a regular
-expression $r$, we can build the derivatives of $r$ w.r.t.\ (in succession)
-all the characters of the string $s$. Finally, test whether the
-resulting regular expression can match the empty string. If yes, then
-$r$ matches $s$, and no in the negative case. To implement this idea
-we can generalise the derivative operation to strings like this:
-
-\begin{center}
-\begin{tabular}{lcl}
-$r \backslash (c\!::\!s) $ & $\dn$ & $(r \backslash c) \backslash s$ \\
-$r \backslash [\,] $ & $\dn$ & $r$
-\end{tabular}
-\end{center}
-
-\noindent
-and then define as regular-expression matching algorithm:
-\[
-match\;s\;r \;\dn\; nullable(r\backslash s)
-\]
-
-\noindent
-This algorithm looks graphically as follows:
-\begin{equation}\label{graph:*}
-\begin{tikzcd}
-r_0 \arrow[r, "\backslash c_0"] & r_1 \arrow[r, "\backslash c_1"] & r_2 \arrow[r, dashed] & r_n \arrow[r,"\textit{nullable}?"] & \;\textrm{YES}/\textrm{NO}
-\end{tikzcd}
-\end{equation}
-
-\noindent
-where we start with a regular expression $r_0$, build successive
-derivatives until we exhaust the string and then use \textit{nullable}
-to test whether the result can match the empty string. It can be
-relatively easily shown that this matcher is correct (that is given
-an $s = c_0...c_{n-1}$ and an $r_0$, it generates YES if and only if $s \in L(r_0)$).
-
-
-\section{Values and the Algorithm by Sulzmann and Lu}
-
-One limitation of Brzozowski's algorithm is that it only produces a
-YES/NO answer for whether a string is being matched by a regular
-expression. Sulzmann and Lu~\cite{Sulzmann2014} extended this algorithm
-to allow generation of an actual matching, called a \emph{value} or
-sometimes also \emph{lexical value}. These values and regular
-expressions correspond to each other as illustrated in the following
-table:
-
-
-\begin{center}
- \begin{tabular}{c@{\hspace{20mm}}c}
- \begin{tabular}{@{}rrl@{}}
- \multicolumn{3}{@{}l}{\textbf{Regular Expressions}}\medskip\\
- $r$ & $::=$ & $\ZERO$\\
- & $\mid$ & $\ONE$ \\
- & $\mid$ & $c$ \\
- & $\mid$ & $r_1 \cdot r_2$\\
- & $\mid$ & $r_1 + r_2$ \\
- \\
- & $\mid$ & $r^*$ \\
- \end{tabular}
- &
- \begin{tabular}{@{\hspace{0mm}}rrl@{}}
- \multicolumn{3}{@{}l}{\textbf{Values}}\medskip\\
- $v$ & $::=$ & \\
- & & $\Empty$ \\
- & $\mid$ & $\Char(c)$ \\
- & $\mid$ & $\Seq\,v_1\, v_2$\\
- & $\mid$ & $\Left(v)$ \\
- & $\mid$ & $\Right(v)$ \\
- & $\mid$ & $\Stars\,[v_1,\ldots\,v_n]$ \\
- \end{tabular}
- \end{tabular}
-\end{center}
-
-\noindent
-No value corresponds to $\ZERO$; $\Empty$ corresponds to $\ONE$;
-$\Char$ to the character regular expression; $\Seq$ to the sequence
-regular expression and so on. The idea of values is to encode a kind of
-lexical value for how the sub-parts of a regular expression match the
-sub-parts of a string. To see this, suppose a \emph{flatten} operation,
-written $|v|$ for values. We can use this function to extract the
-underlying string of a value $v$. For example, $|\mathit{Seq} \,
-(\textit{Char x}) \, (\textit{Char y})|$ is the string $xy$. Using
-flatten, we can describe how values encode lexical values: $\Seq\,v_1\,
-v_2$ encodes a tree with two children nodes that tells how the string
-$|v_1| @ |v_2|$ matches the regex $r_1 \cdot r_2$ whereby $r_1$ matches
-the substring $|v_1|$ and, respectively, $r_2$ matches the substring
-$|v_2|$. Exactly how these two are matched is contained in the children
-nodes $v_1$ and $v_2$ of parent $\textit{Seq}$.
-
-To give a concrete example of how values work, consider the string $xy$
-and the regular expression $(x + (y + xy))^*$. We can view this regular
-expression as a tree and if the string $xy$ is matched by two Star
-``iterations'', then the $x$ is matched by the left-most alternative in
-this tree and the $y$ by the right-left alternative. This suggests to
-record this matching as
-
-\begin{center}
-$\Stars\,[\Left\,(\Char\,x), \Right(\Left(\Char\,y))]$
-\end{center}
-
-\noindent
-where $\Stars \; [\ldots]$ records all the
-iterations; and $\Left$, respectively $\Right$, which
-alternative is used. The value for
-matching $xy$ in a single ``iteration'', i.e.~the POSIX value,
-would look as follows
-
-\begin{center}
-$\Stars\,[\Seq\,(\Char\,x)\,(\Char\,y)]$
-\end{center}
-
-\noindent
-where $\Stars$ has only a single-element list for the single iteration
-and $\Seq$ indicates that $xy$ is matched by a sequence regular
-expression.
-
-The contribution of Sulzmann and Lu is an extension of Brzozowski's
-algorithm by a second phase (the first phase being building successive
-derivatives---see \eqref{graph:*}). In this second phase, a POSIX value
-is generated in case the regular expression matches the string.
-Pictorially, the Sulzmann and Lu algorithm is as follows:
-
-\begin{ceqn}
-\begin{equation}\label{graph:2}
-\begin{tikzcd}
-r_0 \arrow[r, "\backslash c_0"] \arrow[d] & r_1 \arrow[r, "\backslash c_1"] \arrow[d] & r_2 \arrow[r, dashed] \arrow[d] & r_n \arrow[d, "mkeps" description] \\
-v_0 & v_1 \arrow[l,"inj_{r_0} c_0"] & v_2 \arrow[l, "inj_{r_1} c_1"] & v_n \arrow[l, dashed]
-\end{tikzcd}
-\end{equation}
-\end{ceqn}
-
-\noindent
-For convenience, we shall employ the following notations: the regular
-expression we start with is $r_0$, and the given string $s$ is composed
-of characters $c_0 c_1 \ldots c_{n-1}$. In the first phase from the
-left to right, we build the derivatives $r_1$, $r_2$, \ldots according
-to the characters $c_0$, $c_1$ until we exhaust the string and obtain
-the derivative $r_n$. We test whether this derivative is
-$\textit{nullable}$ or not. If not, we know the string does not match
-$r$ and no value needs to be generated. If yes, we start building the
-values incrementally by \emph{injecting} back the characters into the
-earlier values $v_n, \ldots, v_0$. This is the second phase of the
-algorithm from the right to left. For the first value $v_n$, we call the
-function $\textit{mkeps}$, which builds the lexical value
-for how the empty string has been matched by the (nullable) regular
-expression $r_n$. This function is defined as
-
- \begin{center}
- \begin{tabular}{lcl}
- $\mkeps(\ONE)$ & $\dn$ & $\Empty$ \\
- $\mkeps(r_{1}+r_{2})$ & $\dn$
- & \textit{if} $\nullable(r_{1})$\\
- & & \textit{then} $\Left(\mkeps(r_{1}))$\\
- & & \textit{else} $\Right(\mkeps(r_{2}))$\\
- $\mkeps(r_1\cdot r_2)$ & $\dn$ & $\Seq\,(\mkeps\,r_1)\,(\mkeps\,r_2)$\\
- $mkeps(r^*)$ & $\dn$ & $\Stars\,[]$
- \end{tabular}
- \end{center}
-
-
-\noindent There are no cases for $\ZERO$ and $c$, since
-these regular expression cannot match the empty string. Note
-also that in case of alternatives we give preference to the
-regular expression on the left-hand side. This will become
-important later on about what value is calculated.
-
-After the $\mkeps$-call, we inject back the characters one by one in order to build
-the lexical value $v_i$ for how the regex $r_i$ matches the string $s_i$
-($s_i = c_i \ldots c_{n-1}$ ) from the previous lexical value $v_{i+1}$.
-After injecting back $n$ characters, we get the lexical value for how $r_0$
-matches $s$. For this Sulzmann and Lu defined a function that reverses
-the ``chopping off'' of characters during the derivative phase. The
-corresponding function is called \emph{injection}, written
-$\textit{inj}$; it takes three arguments: the first one is a regular
-expression ${r_{i-1}}$, before the character is chopped off, the second
-is a character ${c_{i-1}}$, the character we want to inject and the
-third argument is the value ${v_i}$, into which one wants to inject the
-character (it corresponds to the regular expression after the character
-has been chopped off). The result of this function is a new value. The
-definition of $\textit{inj}$ is as follows:
-
-\begin{center}
-\begin{tabular}{l@{\hspace{1mm}}c@{\hspace{1mm}}l}
- $\textit{inj}\,(c)\,c\,Empty$ & $\dn$ & $Char\,c$\\
- $\textit{inj}\,(r_1 + r_2)\,c\,\Left(v)$ & $\dn$ & $\Left(\textit{inj}\,r_1\,c\,v)$\\
- $\textit{inj}\,(r_1 + r_2)\,c\,Right(v)$ & $\dn$ & $Right(\textit{inj}\,r_2\,c\,v)$\\
- $\textit{inj}\,(r_1 \cdot r_2)\,c\,Seq(v_1,v_2)$ & $\dn$ & $Seq(\textit{inj}\,r_1\,c\,v_1,v_2)$\\
- $\textit{inj}\,(r_1 \cdot r_2)\,c\,\Left(Seq(v_1,v_2))$ & $\dn$ & $Seq(\textit{inj}\,r_1\,c\,v_1,v_2)$\\
- $\textit{inj}\,(r_1 \cdot r_2)\,c\,Right(v)$ & $\dn$ & $Seq(\textit{mkeps}(r_1),\textit{inj}\,r_2\,c\,v)$\\
- $\textit{inj}\,(r^*)\,c\,Seq(v,Stars\,vs)$ & $\dn$ & $Stars((\textit{inj}\,r\,c\,v)\,::\,vs)$\\
-\end{tabular}
-\end{center}
-
-\noindent This definition is by recursion on the ``shape'' of regular
-expressions and values. To understands this definition better consider
-the situation when we build the derivative on regular expression $r_{i-1}$.
-For this we chop off a character from $r_{i-1}$ to form $r_i$. This leaves a
-``hole'' in $r_i$ and its corresponding value $v_i$.
-To calculate $v_{i-1}$, we need to
-locate where that hole is and fill it.
-We can find this location by
-comparing $r_{i-1}$ and $v_i$. For instance, if $r_{i-1}$ is of shape
-$r_a \cdot r_b$, and $v_i$ is of shape $\Left(Seq(v_1,v_2))$, we know immediately that
-%
-\[ (r_a \cdot r_b)\backslash c = (r_a\backslash c) \cdot r_b \,+\, r_b\backslash c,\]
-
-\noindent
-otherwise if $r_a$ is not nullable,
-\[ (r_a \cdot r_b)\backslash c = (r_a\backslash c) \cdot r_b,\]
-
-\noindent
-the value $v_i$ should be $\Seq(\ldots)$, contradicting the fact that
-$v_i$ is actually of shape $\Left(\ldots)$. Furthermore, since $v_i$ is of shape
-$\Left(\ldots)$ instead of $\Right(\ldots)$, we know that the left
-branch of \[ (r_a \cdot r_b)\backslash c =
-\bold{\underline{ (r_a\backslash c) \cdot r_b} }\,+\, r_b\backslash c,\](underlined)
- is taken instead of the right one. This means $c$ is chopped off
-from $r_a$ rather than $r_b$.
-We have therefore found out
-that the hole will be on $r_a$. So we recursively call $\inj\,
-r_a\,c\,v_a$ to fill that hole in $v_a$. After injection, the value
-$v_i$ for $r_i = r_a \cdot r_b$ should be $\Seq\,(\inj\,r_a\,c\,v_a)\,v_b$.
-Other clauses can be understood in a similar way.
-
-%\comment{Other word: insight?}
-The following example gives an insight of $\textit{inj}$'s effect and
-how Sulzmann and Lu's algorithm works as a whole. Suppose we have a
-regular expression $((((a+b)+ab)+c)+abc)^*$, and want to match it
-against the string $abc$ (when $abc$ is written as a regular expression,
-the standard way of expressing it is $a \cdot (b \cdot c)$. But we
-usually omit the parentheses and dots here for better readability. This
-algorithm returns a POSIX value, which means it will produce the longest
-matching. Consequently, it matches the string $abc$ in one star
-iteration, using the longest alternative $abc$ in the sub-expression (we shall use $r$ to denote this
-sub-expression for conciseness):
-
-\[((((a+b)+ab)+c)+\underbrace{abc}_r)\]
-
-\noindent
-Before $\textit{inj}$ is called, our lexer first builds derivative using
-string $abc$ (we simplified some regular expressions like $\ZERO \cdot
-b$ to $\ZERO$ for conciseness; we also omit parentheses if they are
-clear from the context):
-
-%Similarly, we allow
-%$\textit{ALT}$ to take a list of regular expressions as an argument
-%instead of just 2 operands to reduce the nested depth of
-%$\textit{ALT}$
-
-\begin{center}
-\begin{tabular}{lcl}
-$r^*$ & $\xrightarrow{\backslash a}$ & $r_1 = (\ONE+\ZERO+\ONE \cdot b + \ZERO + \ONE \cdot b \cdot c) \cdot r^*$\\
- & $\xrightarrow{\backslash b}$ & $r_2 = (\ZERO+\ZERO+\ONE \cdot \ONE + \ZERO + \ONE \cdot \ONE \cdot c) \cdot r^* +(\ZERO+\ONE+\ZERO + \ZERO + \ZERO) \cdot r^*$\\
- & $\xrightarrow{\backslash c}$ & $r_3 = ((\ZERO+\ZERO+\ZERO + \ZERO + \ONE \cdot \ONE \cdot \ONE) \cdot r^* + (\ZERO+\ZERO+\ZERO + \ONE + \ZERO) \cdot r^*) + $\\
- & & $\phantom{r_3 = (} ((\ZERO+\ONE+\ZERO + \ZERO + \ZERO) \cdot r^* + (\ZERO+\ZERO+\ZERO + \ONE + \ZERO) \cdot r^* )$
-\end{tabular}
-\end{center}
-
-\noindent
-In case $r_3$ is nullable, we can call $\textit{mkeps}$
-to construct a lexical value for how $r_3$ matched the string $abc$.
-This function gives the following value $v_3$:
-
-
-\begin{center}
-$\Left(\Left(\Seq(\Right(\Seq(\Empty, \Seq(\Empty,\Empty))), \Stars [])))$
-\end{center}
-The outer $\Left(\Left(\ldots))$ tells us the leftmost nullable part of $r_3$(underlined):
-
-\begin{center}
- \begin{tabular}{l@{\hspace{2mm}}l}
- & $\big(\underline{(\ZERO+\ZERO+\ZERO+ \ZERO+ \ONE \cdot \ONE \cdot \ONE) \cdot r^*}
- \;+\; (\ZERO+\ZERO+\ZERO + \ONE + \ZERO) \cdot r^*\big)$ \smallskip\\
- $+$ & $\big((\ZERO+\ONE+\ZERO + \ZERO + \ZERO) \cdot r^*
- \;+\; (\ZERO+\ZERO+\ZERO + \ONE + \ZERO) \cdot r^* \big)$
- \end{tabular}
- \end{center}
-
-\noindent
- Note that the leftmost location of term $(\ZERO+\ZERO+\ZERO + \ZERO + \ONE \cdot \ONE \cdot
- \ONE) \cdot r^*$ (which corresponds to the initial sub-match $abc$) allows
- $\textit{mkeps}$ to pick it up because $\textit{mkeps}$ is defined to always choose the
- left one when it is nullable. In the case of this example, $abc$ is
- preferred over $a$ or $ab$. This $\Left(\Left(\ldots))$ location is
- generated by two applications of the splitting clause
-
-\begin{center}
- $(r_1 \cdot r_2)\backslash c \;\;(when \; r_1 \; nullable) \, = (r_1\backslash c) \cdot r_2 \,+\, r_2\backslash c.$
-\end{center}
-
-\noindent
-By this clause, we put $r_1 \backslash c \cdot r_2 $ at the
-$\textit{front}$ and $r_2 \backslash c$ at the $\textit{back}$. This
-allows $\textit{mkeps}$ to always pick up among two matches the one with a longer
-initial sub-match. Removing the outside $\Left(\Left(...))$, the inside
-sub-value
-
-\begin{center}
- $\Seq(\Right(\Seq(\Empty, \Seq(\Empty, \Empty))), \Stars [])$
-\end{center}
-
-\noindent
-tells us how the empty string $[]$ is matched with $(\ZERO+\ZERO+\ZERO + \ZERO + \ONE \cdot
-\ONE \cdot \ONE) \cdot r^*$. We match $[]$ by a sequence of two nullable regular
-expressions. The first one is an alternative, we take the rightmost
-alternative---whose language contains the empty string. The second
-nullable regular expression is a Kleene star. $\Stars$ tells us how it
-generates the nullable regular expression: by 0 iterations to form
-$\ONE$. Now $\textit{inj}$ injects characters back and incrementally
-builds a lexical value based on $v_3$. Using the value $v_3$, the character
-c, and the regular expression $r_2$, we can recover how $r_2$ matched
-the string $[c]$ : $\textit{inj} \; r_2 \; c \; v_3$ gives us
- \begin{center}
- $v_2 = \Left(\Seq(\Right(\Seq(\Empty, \Seq(\Empty, c))), \Stars [])),$
- \end{center}
-which tells us how $r_2$ matched $[c]$. After this we inject back the character $b$, and get
-\begin{center}
-$v_1 = \Seq(\Right(\Seq(\Empty, \Seq(b, c))), \Stars [])$
-\end{center}
- for how
- \begin{center}
- $r_1= (\ONE+\ZERO+\ONE \cdot b + \ZERO + \ONE \cdot b \cdot c) \cdot r*$
- \end{center}
- matched the string $bc$ before it split into two substrings.
- Finally, after injecting character $a$ back to $v_1$,
- we get the lexical value tree
- \begin{center}
- $v_0= \Stars [\Right(\Seq(a, \Seq(b, c)))]$
- \end{center}
- for how $r$ matched $abc$. This completes the algorithm.
-
-%We omit the details of injection function, which is provided by Sulzmann and Lu's paper \cite{Sulzmann2014}.
-Readers might have noticed that the lexical value information is actually
-already available when doing derivatives. For example, immediately after
-the operation $\backslash a$ we know that if we want to match a string
-that starts with $a$, we can either take the initial match to be
-
- \begin{center}
-\begin{enumerate}
- \item[1)] just $a$ or
- \item[2)] string $ab$ or
- \item[3)] string $abc$.
-\end{enumerate}
-\end{center}
-
-\noindent
-In order to differentiate between these choices, we just need to
-remember their positions---$a$ is on the left, $ab$ is in the middle ,
-and $abc$ is on the right. Which of these alternatives is chosen
-later does not affect their relative position because the algorithm does
-not change this order. If this parsing information can be determined and
-does not change because of later derivatives, there is no point in
-traversing this information twice. This leads to an optimisation---if we
-store the information for lexical values inside the regular expression,
-update it when we do derivative on them, and collect the information
-when finished with derivatives and call $\textit{mkeps}$ for deciding which
-branch is POSIX, we can generate the lexical value in one pass, instead of
-doing the rest $n$ injections. This leads to Sulzmann and Lu's novel
-idea of using bitcodes in derivatives.
-
-In the next section, we shall focus on the bitcoded algorithm and the
-process of simplification of regular expressions. This is needed in
-order to obtain \emph{fast} versions of the Brzozowski's, and Sulzmann
-and Lu's algorithms. This is where the PhD-project aims to advance the
-state-of-the-art.
-
-
-\section{Simplification of Regular Expressions}
-
-Using bitcodes to guide parsing is not a novel idea. It was applied to
-context free grammars and then adapted by Henglein and Nielson for
-efficient regular expression lexing using DFAs~\cite{nielson11bcre}.
-Sulzmann and Lu took this idea of bitcodes a step further by integrating
-bitcodes into derivatives. The reason why we want to use bitcodes in
-this project is that we want to introduce more aggressive simplification
-rules in order to keep the size of derivatives small throughout. This is
-because the main drawback of building successive derivatives according
-to Brzozowski's definition is that they can grow very quickly in size.
-This is mainly due to the fact that the derivative operation generates
-often ``useless'' $\ZERO$s and $\ONE$s in derivatives. As a result, if
-implemented naively both algorithms by Brzozowski and by Sulzmann and Lu
-are excruciatingly slow. For example when starting with the regular
-expression $(a + aa)^*$ and building 12 successive derivatives
-w.r.t.~the character $a$, one obtains a derivative regular expression
-with more than 8000 nodes (when viewed as a tree). Operations like
-$\textit{der}$ and $\nullable$ need to traverse such trees and
-consequently the bigger the size of the derivative the slower the
-algorithm.
-
-Fortunately, one can simplify regular expressions after each derivative
-step. Various simplifications of regular expressions are possible, such
-as the simplification of $\ZERO + r$, $r + \ZERO$, $\ONE\cdot r$, $r
-\cdot \ONE$, and $r + r$ to just $r$. These simplifications do not
-affect the answer for whether a regular expression matches a string or
-not, but fortunately also do not affect the POSIX strategy of how
-regular expressions match strings---although the latter is much harder
-to establish. Some initial results in this regard have been
-obtained in \cite{AusafDyckhoffUrban2016}.
-
-Unfortunately, the simplification rules outlined above are not
-sufficient to prevent a size explosion in all cases. We
-believe a tighter bound can be achieved that prevents an explosion in
-\emph{all} cases. Such a tighter bound is suggested by work of Antimirov who
-proved that (partial) derivatives can be bound by the number of
-characters contained in the initial regular expression
-\cite{Antimirov95}. He defined the \emph{partial derivatives} of regular
-expressions as follows:
-
-\begin{center}
-\begin{tabular}{lcl}
- $\textit{pder} \; c \; \ZERO$ & $\dn$ & $\emptyset$\\
- $\textit{pder} \; c \; \ONE$ & $\dn$ & $\emptyset$ \\
- $\textit{pder} \; c \; d$ & $\dn$ & $\textit{if} \; c \,=\, d \; \{ \ONE \} \; \textit{else} \; \emptyset$ \\
- $\textit{pder} \; c \; r_1+r_2$ & $\dn$ & $pder \; c \; r_1 \cup pder \; c \; r_2$ \\
- $\textit{pder} \; c \; r_1 \cdot r_2$ & $\dn$ & $\textit{if} \; nullable \; r_1 $\\
- & & $\textit{then} \; \{ r \cdot r_2 \mid r \in pder \; c \; r_1 \} \cup pder \; c \; r_2 \;$\\
- & & $\textit{else} \; \{ r \cdot r_2 \mid r \in pder \; c \; r_1 \} $ \\
- $\textit{pder} \; c \; r^*$ & $\dn$ & $ \{ r' \cdot r^* \mid r' \in pder \; c \; r \} $ \\
- \end{tabular}
- \end{center}
-
-\noindent
-A partial derivative of a regular expression $r$ is essentially a set of
-regular expressions that are either $r$'s children expressions or a
-concatenation of them. Antimirov has proved a tight bound of the sum of
-the size of \emph{all} partial derivatives no matter what the string
-looks like. Roughly speaking the size sum will be at most cubic in the
-size of the regular expression.
-
-If we want the size of derivatives in Sulzmann and Lu's algorithm to
-stay below this bound, we would need more aggressive simplifications.
-Essentially we need to delete useless $\ZERO$s and $\ONE$s, as well as
-deleting duplicates whenever possible. For example, the parentheses in
-$(a+b) \cdot c + bc$ can be opened up to get $a\cdot c + b \cdot c + b
-\cdot c$, and then simplified to just $a \cdot c + b \cdot c$. Another
-example is simplifying $(a^*+a) + (a^*+ \ONE) + (a +\ONE)$ to just
-$a^*+a+\ONE$. Adding these more aggressive simplification rules helps us
-to achieve the same size bound as that of the partial derivatives.
-
-In order to implement the idea of ``spilling out alternatives'' and to
-make them compatible with the $\text{inj}$-mechanism, we use
-\emph{bitcodes}. Bits and bitcodes (lists of bits) are just:
-
-%This allows us to prove a tight
-%bound on the size of regular expression during the running time of the
-%algorithm if we can establish the connection between our simplification
-%rules and partial derivatives.
-
- %We believe, and have generated test
-%data, that a similar bound can be obtained for the derivatives in
-%Sulzmann and Lu's algorithm. Let us give some details about this next.
-
-
-\begin{center}
- $b ::= S \mid Z \qquad
-bs ::= [] \mid b:bs
-$
-\end{center}
-
-\noindent
-The $S$ and $Z$ are arbitrary names for the bits in order to avoid
-confusion with the regular expressions $\ZERO$ and $\ONE$. Bitcodes (or
-bit-lists) can be used to encode values (or incomplete values) in a
-compact form. This can be straightforwardly seen in the following
-coding function from values to bitcodes:
-
-\begin{center}
-\begin{tabular}{lcl}
- $\textit{code}(\Empty)$ & $\dn$ & $[]$\\
- $\textit{code}(\Char\,c)$ & $\dn$ & $[]$\\
- $\textit{code}(\Left\,v)$ & $\dn$ & $\Z :: code(v)$\\
- $\textit{code}(\Right\,v)$ & $\dn$ & $\S :: code(v)$\\
- $\textit{code}(\Seq\,v_1\,v_2)$ & $\dn$ & $code(v_1) \,@\, code(v_2)$\\
- $\textit{code}(\Stars\,[])$ & $\dn$ & $[\Z]$\\
- $\textit{code}(\Stars\,(v\!::\!vs))$ & $\dn$ & $\S :: code(v) \;@\;
- code(\Stars\,vs)$
-\end{tabular}
-\end{center}
-
-\noindent
-Here $\textit{code}$ encodes a value into a bitcodes by converting
-$\Left$ into $\Z$, $\Right$ into $\S$, the start point of a non-empty
-star iteration into $\S$, and the border where a local star terminates
-into $\Z$. This coding is lossy, as it throws away the information about
-characters, and also does not encode the ``boundary'' between two
-sequence values. Moreover, with only the bitcode we cannot even tell
-whether the $\S$s and $\Z$s are for $\Left/\Right$ or $\Stars$. The
-reason for choosing this compact way of storing information is that the
-relatively small size of bits can be easily manipulated and ``moved
-around'' in a regular expression. In order to recover values, we will
-need the corresponding regular expression as an extra information. This
-means the decoding function is defined as:
-
-
-%\begin{definition}[Bitdecoding of Values]\mbox{}
-\begin{center}
-\begin{tabular}{@{}l@{\hspace{1mm}}c@{\hspace{1mm}}l@{}}
- $\textit{decode}'\,bs\,(\ONE)$ & $\dn$ & $(\Empty, bs)$\\
- $\textit{decode}'\,bs\,(c)$ & $\dn$ & $(\Char\,c, bs)$\\
- $\textit{decode}'\,(\Z\!::\!bs)\;(r_1 + r_2)$ & $\dn$ &
- $\textit{let}\,(v, bs_1) = \textit{decode}'\,bs\,r_1\;\textit{in}\;
- (\Left\,v, bs_1)$\\
- $\textit{decode}'\,(\S\!::\!bs)\;(r_1 + r_2)$ & $\dn$ &
- $\textit{let}\,(v, bs_1) = \textit{decode}'\,bs\,r_2\;\textit{in}\;
- (\Right\,v, bs_1)$\\
- $\textit{decode}'\,bs\;(r_1\cdot r_2)$ & $\dn$ &
- $\textit{let}\,(v_1, bs_1) = \textit{decode}'\,bs\,r_1\;\textit{in}$\\
- & & $\textit{let}\,(v_2, bs_2) = \textit{decode}'\,bs_1\,r_2$\\
- & & \hspace{35mm}$\textit{in}\;(\Seq\,v_1\,v_2, bs_2)$\\
- $\textit{decode}'\,(\Z\!::\!bs)\,(r^*)$ & $\dn$ & $(\Stars\,[], bs)$\\
- $\textit{decode}'\,(\S\!::\!bs)\,(r^*)$ & $\dn$ &
- $\textit{let}\,(v, bs_1) = \textit{decode}'\,bs\,r\;\textit{in}$\\
- & & $\textit{let}\,(\Stars\,vs, bs_2) = \textit{decode}'\,bs_1\,r^*$\\
- & & \hspace{35mm}$\textit{in}\;(\Stars\,v\!::\!vs, bs_2)$\bigskip\\
-
- $\textit{decode}\,bs\,r$ & $\dn$ &
- $\textit{let}\,(v, bs') = \textit{decode}'\,bs\,r\;\textit{in}$\\
- & & $\textit{if}\;bs' = []\;\textit{then}\;\textit{Some}\,v\;
- \textit{else}\;\textit{None}$
-\end{tabular}
-\end{center}
-%\end{definition}
-
-Sulzmann and Lu's integrated the bitcodes into regular expressions to
-create annotated regular expressions \cite{Sulzmann2014}.
-\emph{Annotated regular expressions} are defined by the following
-grammar:%\comment{ALTS should have an $as$ in the definitions, not just $a_1$ and $a_2$}
-
-\begin{center}
-\begin{tabular}{lcl}
- $\textit{a}$ & $::=$ & $\textit{ZERO}$\\
- & $\mid$ & $\textit{ONE}\;\;bs$\\
- & $\mid$ & $\textit{CHAR}\;\;bs\,c$\\
- & $\mid$ & $\textit{ALTS}\;\;bs\,as$\\
- & $\mid$ & $\textit{SEQ}\;\;bs\,a_1\,a_2$\\
- & $\mid$ & $\textit{STAR}\;\;bs\,a$
-\end{tabular}
-\end{center}
-%(in \textit{ALTS})
-
-\noindent
-where $bs$ stands for bitcodes, $a$ for $\bold{a}$nnotated regular
-expressions and $as$ for a list of annotated regular expressions.
-The alternative constructor($\textit{ALTS}$) has been generalized to
-accept a list of annotated regular expressions rather than just 2.
-We will show that these bitcodes encode information about
-the (POSIX) value that should be generated by the Sulzmann and Lu
-algorithm.
-
-
-To do lexing using annotated regular expressions, we shall first
-transform the usual (un-annotated) regular expressions into annotated
-regular expressions. This operation is called \emph{internalisation} and
-defined as follows:
-
-%\begin{definition}
-\begin{center}
-\begin{tabular}{lcl}
- $(\ZERO)^\uparrow$ & $\dn$ & $\textit{ZERO}$\\
- $(\ONE)^\uparrow$ & $\dn$ & $\textit{ONE}\,[]$\\
- $(c)^\uparrow$ & $\dn$ & $\textit{CHAR}\,[]\,c$\\
- $(r_1 + r_2)^\uparrow$ & $\dn$ &
- $\textit{ALTS}\;[]\,List((\textit{fuse}\,[\Z]\,r_1^\uparrow),\,
- (\textit{fuse}\,[\S]\,r_2^\uparrow))$\\
- $(r_1\cdot r_2)^\uparrow$ & $\dn$ &
- $\textit{SEQ}\;[]\,r_1^\uparrow\,r_2^\uparrow$\\
- $(r^*)^\uparrow$ & $\dn$ &
- $\textit{STAR}\;[]\,r^\uparrow$\\
-\end{tabular}
-\end{center}
-%\end{definition}
-
-\noindent
-We use up arrows here to indicate that the basic un-annotated regular
-expressions are ``lifted up'' into something slightly more complex. In the
-fourth clause, $\textit{fuse}$ is an auxiliary function that helps to
-attach bits to the front of an annotated regular expression. Its
-definition is as follows:
-
-\begin{center}
-\begin{tabular}{lcl}
- $\textit{fuse}\;bs\,(\textit{ZERO})$ & $\dn$ & $\textit{ZERO}$\\
- $\textit{fuse}\;bs\,(\textit{ONE}\,bs')$ & $\dn$ &
- $\textit{ONE}\,(bs\,@\,bs')$\\
- $\textit{fuse}\;bs\,(\textit{CHAR}\,bs'\,c)$ & $\dn$ &
- $\textit{CHAR}\,(bs\,@\,bs')\,c$\\
- $\textit{fuse}\;bs\,(\textit{ALTS}\,bs'\,as)$ & $\dn$ &
- $\textit{ALTS}\,(bs\,@\,bs')\,as$\\
- $\textit{fuse}\;bs\,(\textit{SEQ}\,bs'\,a_1\,a_2)$ & $\dn$ &
- $\textit{SEQ}\,(bs\,@\,bs')\,a_1\,a_2$\\
- $\textit{fuse}\;bs\,(\textit{STAR}\,bs'\,a)$ & $\dn$ &
- $\textit{STAR}\,(bs\,@\,bs')\,a$
-\end{tabular}
-\end{center}
-
-\noindent
-After internalising the regular expression, we perform successive
-derivative operations on the annotated regular expressions. This
-derivative operation is the same as what we had previously for the
-basic regular expressions, except that we beed to take care of
-the bitcodes:
-
- %\begin{definition}{bder}
-\begin{center}
- \begin{tabular}{@{}lcl@{}}
- $(\textit{ZERO})\,\backslash c$ & $\dn$ & $\textit{ZERO}$\\
- $(\textit{ONE}\;bs)\,\backslash c$ & $\dn$ & $\textit{ZERO}$\\
- $(\textit{CHAR}\;bs\,d)\,\backslash c$ & $\dn$ &
- $\textit{if}\;c=d\; \;\textit{then}\;
- \textit{ONE}\;bs\;\textit{else}\;\textit{ZERO}$\\
- $(\textit{ALTS}\;bs\,as)\,\backslash c$ & $\dn$ &
- $\textit{ALTS}\;bs\,(as.map(\backslash c))$\\
- $(\textit{SEQ}\;bs\,a_1\,a_2)\,\backslash c$ & $\dn$ &
- $\textit{if}\;\textit{bnullable}\,a_1$\\
- & &$\textit{then}\;\textit{ALTS}\,bs\,List((\textit{SEQ}\,[]\,(a_1\,\backslash c)\,a_2),$\\
- & &$\phantom{\textit{then}\;\textit{ALTS}\,bs\,}(\textit{fuse}\,(\textit{bmkeps}\,a_1)\,(a_2\,\backslash c)))$\\
- & &$\textit{else}\;\textit{SEQ}\,bs\,(a_1\,\backslash c)\,a_2$\\
- $(\textit{STAR}\,bs\,a)\,\backslash c$ & $\dn$ &
- $\textit{SEQ}\;bs\,(\textit{fuse}\, [\Z] (r\,\backslash c))\,
- (\textit{STAR}\,[]\,r)$
-\end{tabular}
-\end{center}
-%\end{definition}
-
-\noindent
-For instance, when we unfold $\textit{STAR} \; bs \; a$ into a sequence,
-we need to attach an additional bit $Z$ to the front of $r \backslash c$
-to indicate that there is one more star iteration. Also the $SEQ$ clause
-is more subtle---when $a_1$ is $\textit{bnullable}$ (here
-\textit{bnullable} is exactly the same as $\textit{nullable}$, except
-that it is for annotated regular expressions, therefore we omit the
-definition). Assume that $bmkeps$ correctly extracts the bitcode for how
-$a_1$ matches the string prior to character $c$ (more on this later),
-then the right branch of $ALTS$, which is $fuse \; bmkeps \; a_1 (a_2
-\backslash c)$ will collapse the regular expression $a_1$(as it has
-already been fully matched) and store the parsing information at the
-head of the regular expression $a_2 \backslash c$ by fusing to it. The
-bitsequence $bs$, which was initially attached to the head of $SEQ$, has
-now been elevated to the top-level of $ALTS$, as this information will be
-needed whichever way the $SEQ$ is matched---no matter whether $c$ belongs
-to $a_1$ or $ a_2$. After building these derivatives and maintaining all
-the lexing information, we complete the lexing by collecting the
-bitcodes using a generalised version of the $\textit{mkeps}$ function
-for annotated regular expressions, called $\textit{bmkeps}$:
-
-
-%\begin{definition}[\textit{bmkeps}]\mbox{}
-\begin{center}
-\begin{tabular}{lcl}
- $\textit{bmkeps}\,(\textit{ONE}\;bs)$ & $\dn$ & $bs$\\
- $\textit{bmkeps}\,(\textit{ALTS}\;bs\,a::as)$ & $\dn$ &
- $\textit{if}\;\textit{bnullable}\,a$\\
- & &$\textit{then}\;bs\,@\,\textit{bmkeps}\,a$\\
- & &$\textit{else}\;bs\,@\,\textit{bmkeps}\,(\textit{ALTS}\;bs\,as)$\\
- $\textit{bmkeps}\,(\textit{SEQ}\;bs\,a_1\,a_2)$ & $\dn$ &
- $bs \,@\,\textit{bmkeps}\,a_1\,@\, \textit{bmkeps}\,a_2$\\
- $\textit{bmkeps}\,(\textit{STAR}\;bs\,a)$ & $\dn$ &
- $bs \,@\, [\S]$
-\end{tabular}
-\end{center}
-%\end{definition}
-
-\noindent
-This function completes the value information by travelling along the
-path of the regular expression that corresponds to a POSIX value and
-collecting all the bitcodes, and using $S$ to indicate the end of star
-iterations. If we take the bitcodes produced by $\textit{bmkeps}$ and
-decode them, we get the value we expect. The corresponding lexing
-algorithm looks as follows:
-
-\begin{center}
-\begin{tabular}{lcl}
- $\textit{blexer}\;r\,s$ & $\dn$ &
- $\textit{let}\;a = (r^\uparrow)\backslash s\;\textit{in}$\\
- & & $\;\;\textit{if}\; \textit{bnullable}(a)$\\
- & & $\;\;\textit{then}\;\textit{decode}\,(\textit{bmkeps}\,a)\,r$\\
- & & $\;\;\textit{else}\;\textit{None}$
-\end{tabular}
-\end{center}
-
-\noindent
-In this definition $\_\backslash s$ is the generalisation of the derivative
-operation from characters to strings (just like the derivatives for un-annotated
-regular expressions).
-
-The main point of the bitcodes and annotated regular expressions is that
-we can apply rather aggressive (in terms of size) simplification rules
-in order to keep derivatives small. We have developed such
-``aggressive'' simplification rules and generated test data that show
-that the expected bound can be achieved. Obviously we could only
-partially cover the search space as there are infinitely many regular
-expressions and strings.
-
-One modification we introduced is to allow a list of annotated regular
-expressions in the \textit{ALTS} constructor. This allows us to not just
-delete unnecessary $\ZERO$s and $\ONE$s from regular expressions, but
-also unnecessary ``copies'' of regular expressions (very similar to
-simplifying $r + r$ to just $r$, but in a more general setting). Another
-modification is that we use simplification rules inspired by Antimirov's
-work on partial derivatives. They maintain the idea that only the first
-``copy'' of a regular expression in an alternative contributes to the
-calculation of a POSIX value. All subsequent copies can be pruned away from
-the regular expression. A recursive definition of our simplification function
-that looks somewhat similar to our Scala code is given below:
-%\comment{Use $\ZERO$, $\ONE$ and so on.
-%Is it $ALTS$ or $ALTS$?}\\
-
-\begin{center}
- \begin{tabular}{@{}lcl@{}}
-
- $\textit{simp} \; (\textit{SEQ}\;bs\,a_1\,a_2)$ & $\dn$ & $ (\textit{simp} \; a_1, \textit{simp} \; a_2) \; \textit{match} $ \\
- &&$\quad\textit{case} \; (\ZERO, \_) \Rightarrow \ZERO$ \\
- &&$\quad\textit{case} \; (\_, \ZERO) \Rightarrow \ZERO$ \\
- &&$\quad\textit{case} \; (\ONE, a_2') \Rightarrow \textit{fuse} \; bs \; a_2'$ \\
- &&$\quad\textit{case} \; (a_1', \ONE) \Rightarrow \textit{fuse} \; bs \; a_1'$ \\
- &&$\quad\textit{case} \; (a_1', a_2') \Rightarrow \textit{SEQ} \; bs \; a_1' \; a_2'$ \\
-
- $\textit{simp} \; (\textit{ALTS}\;bs\,as)$ & $\dn$ & $\textit{distinct}( \textit{flatten} ( \textit{map simp as})) \; \textit{match} $ \\
- &&$\quad\textit{case} \; [] \Rightarrow \ZERO$ \\
- &&$\quad\textit{case} \; a :: [] \Rightarrow \textit{fuse bs a}$ \\
- &&$\quad\textit{case} \; as' \Rightarrow \textit{ALTS}\;bs\;as'$\\
-
- $\textit{simp} \; a$ & $\dn$ & $\textit{a} \qquad \textit{otherwise}$
-\end{tabular}
-\end{center}
-
-\noindent
-The simplification does a pattern matching on the regular expression.
-When it detected that the regular expression is an alternative or
-sequence, it will try to simplify its children regular expressions
-recursively and then see if one of the children turn into $\ZERO$ or
-$\ONE$, which might trigger further simplification at the current level.
-The most involved part is the $\textit{ALTS}$ clause, where we use two
-auxiliary functions $\textit{flatten}$ and $\textit{distinct}$ to open up nested
-$\textit{ALTS}$ and reduce as many duplicates as possible. Function
-$\textit{distinct}$ keeps the first occurring copy only and remove all later ones
-when detected duplicates. Function $\textit{flatten}$ opens up nested \textit{ALTS}.
-Its recursive definition is given below:
-
- \begin{center}
- \begin{tabular}{@{}lcl@{}}
- $\textit{flatten} \; (\textit{ALTS}\;bs\,as) :: as'$ & $\dn$ & $(\textit{map} \;
- (\textit{fuse}\;bs)\; \textit{as}) \; @ \; \textit{flatten} \; as' $ \\
- $\textit{flatten} \; \textit{ZERO} :: as'$ & $\dn$ & $ \textit{flatten} \; as' $ \\
- $\textit{flatten} \; a :: as'$ & $\dn$ & $a :: \textit{flatten} \; as'$ \quad(otherwise)
-\end{tabular}
-\end{center}
-
-\noindent
-Here $\textit{flatten}$ behaves like the traditional functional programming flatten
-function, except that it also removes $\ZERO$s. Or in terms of regular expressions, it
-removes parentheses, for example changing $a+(b+c)$ into $a+b+c$.
-
-Suppose we apply simplification after each derivative step, and view
-these two operations as an atomic one: $a \backslash_{simp}\,c \dn
-\textit{simp}(a \backslash c)$. Then we can use the previous natural
-extension from derivative w.r.t.~character to derivative
-w.r.t.~string:%\comment{simp in the [] case?}
-
-\begin{center}
-\begin{tabular}{lcl}
-$r \backslash_{simp} (c\!::\!s) $ & $\dn$ & $(r \backslash_{simp}\, c) \backslash_{simp}\, s$ \\
-$r \backslash_{simp} [\,] $ & $\dn$ & $r$
-\end{tabular}
-\end{center}
-
-\noindent
-we obtain an optimised version of the algorithm:
-
- \begin{center}
-\begin{tabular}{lcl}
- $\textit{blexer\_simp}\;r\,s$ & $\dn$ &
- $\textit{let}\;a = (r^\uparrow)\backslash_{simp}\, s\;\textit{in}$\\
- & & $\;\;\textit{if}\; \textit{bnullable}(a)$\\
- & & $\;\;\textit{then}\;\textit{decode}\,(\textit{bmkeps}\,a)\,r$\\
- & & $\;\;\textit{else}\;\textit{None}$
-\end{tabular}
-\end{center}
-
-\noindent
-This algorithm keeps the regular expression size small, for example,
-with this simplification our previous $(a + aa)^*$ example's 8000 nodes
-will be reduced to just 6 and stays constant, no matter how long the
-input string is.
-
-
-
-\section{Current Work}
-
-We are currently engaged in two tasks related to this algorithm. The
-first task is proving that our simplification rules actually do not
-affect the POSIX value that should be generated by the algorithm
-according to the specification of a POSIX value and furthermore obtain a
-much tighter bound on the sizes of derivatives. The result is that our
-algorithm should be correct and faster on all inputs. The original
-blow-up, as observed in JavaScript, Python and Java, would be excluded
-from happening in our algorithm. For this proof we use the theorem prover
-Isabelle. Once completed, this result will advance the state-of-the-art:
-Sulzmann and Lu wrote in their paper~\cite{Sulzmann2014} about the
-bitcoded ``incremental parsing method'' (that is the lexing algorithm
-outlined in this section):
-
-\begin{quote}\it
- ``Correctness Claim: We further claim that the incremental parsing
- method in Figure~5 in combination with the simplification steps in
- Figure 6 yields POSIX parse tree [our lexical values]. We have tested this claim
- extensively by using the method in Figure~3 as a reference but yet
- have to work out all proof details.''
-\end{quote}
-
-\noindent
-We like to settle this correctness claim. It is relatively
-straightforward to establish that after one simplification step, the part of a
-nullable derivative that corresponds to a POSIX value remains intact and can
-still be collected, in other words, we can show that
-%\comment{Double-check....I
-%think this is not the case}
-%\comment{If i remember correctly, you have proved this lemma.
-%I feel this is indeed not true because you might place arbitrary
-%bits on the regex r, however if this is the case, did i remember wrongly that
-%you proved something like simplification does not affect $\textit{bmkeps}$ results?
-%Anyway, i have amended this a little bit so it does not allow arbitrary bits attached
-%to a regex. Maybe it works now.}
-
-\begin{center}
- $\textit{bmkeps} \; a = \textit{bmkeps} \; \textit{bsimp} \; a\;($\textit{provided}$ \; a\; is \; \textit{bnullable} )$
-\end{center}
-
-\noindent
-as this basically comes down to proving actions like removing the
-additional $r$ in $r+r$ does not delete important POSIX information in
-a regular expression. The hard part of this proof is to establish that
-
-\begin{center}
- $ \textit{blexer}\_{simp}(r, \; s) = \textit{blexer}(r, \; s)$
-\end{center}
-%comment{This is not true either...look at the definion blexer/blexer-simp}
-
-\noindent That is, if we do derivative on regular expression $r$ and then
-simplify it, and repeat this process until we exhaust the string, we get a
-regular expression $r''$($\textit{LHS}$) that provides the POSIX matching
-information, which is exactly the same as the result $r'$($\textit{RHS}$ of the
-normal derivative algorithm that only does derivative repeatedly and has no
-simplification at all. This might seem at first glance very unintuitive, as
-the $r'$ could be exponentially larger than $r''$, but can be explained in the
-following way: we are pruning away the possible matches that are not POSIX.
-Since there could be exponentially many
-non-POSIX matchings and only 1 POSIX matching, it
-is understandable that our $r''$ can be a lot smaller. we can still provide
-the same POSIX value if there is one. This is not as straightforward as the
-previous proposition, as the two regular expressions $r'$ and $r''$ might have
-become very different. The crucial point is to find the
-$\textit{POSIX}$ information of a regular expression and how it is modified,
-augmented and propagated
-during simplification in parallel with the regular expression that
-has not been simplified in the subsequent derivative operations. To aid this,
-we use the helper function retrieve described by Sulzmann and Lu:
-\begin{center}
-\begin{tabular}{@{}l@{\hspace{2mm}}c@{\hspace{2mm}}l@{}}
- $\textit{retrieve}\,(\textit{ONE}\,bs)\,\Empty$ & $\dn$ & $bs$\\
- $\textit{retrieve}\,(\textit{CHAR}\,bs\,c)\,(\Char\,d)$ & $\dn$ & $bs$\\
- $\textit{retrieve}\,(\textit{ALTS}\,bs\,a::as)\,(\Left\,v)$ & $\dn$ &
- $bs \,@\, \textit{retrieve}\,a\,v$\\
- $\textit{retrieve}\,(\textit{ALTS}\,bs\,a::as)\,(\Right\,v)$ & $\dn$ &
- $bs \,@\, \textit{retrieve}\,(\textit{ALTS}\,bs\,as)\,v$\\
- $\textit{retrieve}\,(\textit{SEQ}\,bs\,a_1\,a_2)\,(\Seq\,v_1\,v_2)$ & $\dn$ &
- $bs \,@\,\textit{retrieve}\,a_1\,v_1\,@\, \textit{retrieve}\,a_2\,v_2$\\
- $\textit{retrieve}\,(\textit{STAR}\,bs\,a)\,(\Stars\,[])$ & $\dn$ &
- $bs \,@\, [\S]$\\
- $\textit{retrieve}\,(\textit{STAR}\,bs\,a)\,(\Stars\,(v\!::\!vs))$ & $\dn$ &\\
- \multicolumn{3}{l}{
- \hspace{3cm}$bs \,@\, [\Z] \,@\, \textit{retrieve}\,a\,v\,@\,
- \textit{retrieve}\,(\textit{STAR}\,[]\,a)\,(\Stars\,vs)$}\\
-\end{tabular}
-\end{center}
-%\comment{Did not read further}\\
-This function assembles the bitcode
-%that corresponds to a lexical value for how
-%the current derivative matches the suffix of the string(the characters that
-%have not yet appeared, but will appear as the successive derivatives go on.
-%How do we get this "future" information? By the value $v$, which is
-%computed by a pass of the algorithm that uses
-%$inj$ as described in the previous section).
-using information from both the derivative regular expression and the
-value. Sulzmann and Lu poroposed this function, but did not prove
-anything about it. Ausaf and Urban used it to connect the bitcoded
-algorithm to the older algorithm by the following equation:
-
- \begin{center} $inj \;a\; c \; v = \textit{decode} \; (\textit{retrieve}\;
- (r^\uparrow)\backslash_{simp} \,c)\,v)$
- \end{center}
-
-\noindent
-whereby $r^\uparrow$ stands for the internalised version of $r$. Ausaf
-and Urban also used this fact to prove the correctness of bitcoded
-algorithm without simplification. Our purpose of using this, however,
-is to establish
-
-\begin{center}
-$ \textit{retrieve} \;
-a \; v \;=\; \textit{retrieve} \; (\textit{simp}\,a) \; v'.$
-\end{center}
-The idea is that using $v'$, a simplified version of $v$ that had gone
-through the same simplification step as $\textit{simp}(a)$, we are able
-to extract the bitcode that gives the same parsing information as the
-unsimplified one. However, we noticed that constructing such a $v'$
-from $v$ is not so straightforward. The point of this is that we might
-be able to finally bridge the gap by proving
-
-\begin{center}
-$\textit{retrieve} \; (r^\uparrow \backslash s) \; v = \;\textit{retrieve} \;
-(\textit{simp}(r^\uparrow) \backslash s) \; v'$
-\end{center}
-
-\noindent
-and subsequently
-
-\begin{center}
-$\textit{retrieve} \; (r^\uparrow \backslash s) \; v\; = \; \textit{retrieve} \;
-(r^\uparrow \backslash_{simp} \, s) \; v'$.
-\end{center}
-
-\noindent
-The $\textit{LHS}$ of the above equation is the bitcode we want. This
-would prove that our simplified version of regular expression still
-contains all the bitcodes needed. The task here is to find a way to
-compute the correct $v'$.
-
-The second task is to speed up the more aggressive simplification. Currently
-it is slower than the original naive simplification by Ausaf and Urban (the
-naive version as implemented by Ausaf and Urban of course can ``explode'' in
-some cases). It is therefore not surprising that the speed is also much slower
-than regular expression engines in popular programming languages such as Java
-and Python on most inputs that are linear. For example, just by rewriting the
-example regular expression in the beginning of this report $(a^*)^*\,b$ into
-$a^*\,b$ would eliminate the ambiguity in the matching and make the time
-for matching linear with respect to the input string size. This allows the
-DFA approach to become blindingly fast, and dwarf the speed of our current
-implementation. For example, here is a comparison of Java regex engine
-and our implementation on this example.
-
-\begin{center}
-\begin{tabular}{@{}c@{\hspace{0mm}}c@{\hspace{0mm}}c@{}}
-\begin{tikzpicture}
-\begin{axis}[
- xlabel={$n*1000$},
- x label style={at={(1.05,-0.05)}},
- ylabel={time in secs},
- enlargelimits=false,
- xtick={0,5,...,30},
- xmax=33,
- ymax=9,
- scaled ticks=true,
- axis lines=left,
- width=5cm,
- height=4cm,
- legend entries={Bitcoded Algorithm},
- legend pos=north west,
- legend cell align=left]
-\addplot[red,mark=*, mark options={fill=white}] table {bad-scala.data};
-\end{axis}
-\end{tikzpicture}
- &
-\begin{tikzpicture}
-\begin{axis}[
- xlabel={$n*1000$},
- x label style={at={(1.05,-0.05)}},
- %ylabel={time in secs},
- enlargelimits=false,
- xtick={0,5,...,30},
- xmax=33,
- ymax=9,
- scaled ticks=false,
- axis lines=left,
- width=5cm,
- height=4cm,
- legend entries={Java},
- legend pos=north west,
- legend cell align=left]
-\addplot[cyan,mark=*, mark options={fill=white}] table {good-java.data};
-\end{axis}
-\end{tikzpicture}\\
-\multicolumn{3}{c}{Graphs: Runtime for matching $a^*\,b$ with strings
- of the form $\underbrace{aa..a}_{n}$.}
-\end{tabular}
-\end{center}
-
-
-Java regex engine can match string of thousands of characters in a few milliseconds,
-whereas our current algorithm gets excruciatingly slow on input of this size.
-The running time in theory is linear, however it does not appear to be the
-case in an actual implementation. So it needs to be explored how to
-make our algorithm faster on all inputs. It could be the recursive calls that are
-needed to manipulate bits that are causing the slow down. A possible solution
-is to write recursive functions into tail-recusive form.
-Another possibility would be to explore
-again the connection to DFAs to speed up the algorithm on
-subcalls that are small enough. This is very much work in progress.
-
-\section{Conclusion}
-
-In this PhD-project we are interested in fast algorithms for regular
-expression matching. While this seems to be a ``settled'' area, in
-fact interesting research questions are popping up as soon as one steps
-outside the classic automata theory (for example in terms of what kind
-of regular expressions are supported). The reason why it is
-interesting for us to look at the derivative approach introduced by
-Brzozowski for regular expression matching, and then much further
-developed by Sulzmann and Lu, is that derivatives can elegantly deal
-with some of the regular expressions that are of interest in ``real
-life''. This includes the not-regular expression, written $\neg\,r$
-(that is all strings that are not recognised by $r$), but also bounded
-regular expressions such as $r^{\{n\}}$ and $r^{\{n..m\}}$). There is
-also hope that the derivatives can provide another angle for how to
-deal more efficiently with back-references, which are one of the
-reasons why regular expression engines in JavaScript, Python and Java
-choose to not implement the classic automata approach of transforming
-regular expressions into NFAs and then DFAs---because we simply do not
-know how such back-references can be represented by DFAs.
-We also plan to implement the bitcoded algorithm
-in some imperative language like C to see if the inefficiency of the
-Scala implementation
-is language specific. To make this research more comprehensive we also plan
-to contrast our (faster) version of bitcoded algorithm with the
-Symbolic Regex Matcher, the RE2, the Rust Regex Engine, and the static
-analysis approach by implementing them in the same language and then compare
-their performance.
-
-
-\section{discarded}
-haha
-\bibliographystyle{plain}
-\bibliography{root,regex_time_complexity}
-
-
-
-\end{document}
--- a/ChengsongPhdThesis/root.bib Thu Mar 24 20:52:34 2022 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,351 +0,0 @@
-%% This BibTeX bibliography file was created using BibDesk.
-%% https://bibdesk.sourceforge.io/
-
-%% Created for CS TAN at 2022-02-23 13:45:03 +0000
-
-
-%% Saved with string encoding Unicode (UTF-8)
-
-
-
-@article{Thompson_1968,
- author = {Ken Thompson},
- date-added = {2022-02-23 13:44:42 +0000},
- date-modified = {2022-02-23 13:44:42 +0000},
- doi = {10.1145/363347.363387},
- journal = {Communications of the {ACM}},
- month = {jun},
- number = {6},
- pages = {419--422},
- publisher = {Association for Computing Machinery ({ACM})},
- title = {Programming Techniques: Regular expression search algorithm},
- url = {https://doi.org/10.1145%2F363347.363387},
- volume = {11},
- year = 1968,
- bdsk-url-1 = {https://doi.org/10.1145%2F363347.363387},
- bdsk-url-2 = {https://doi.org/10.1145/363347.363387}}
-
-@article{17Bir,
- author = {Asiri Rathnayake and Hayo Thielecke},
- date-added = {2019-08-18 17:57:30 +0000},
- date-modified = {2019-08-18 18:00:13 +0000},
- journal = {arXiv:1405.7058},
- title = {Static Analysis for Regular Expression Exponential Runtime via Substructural Logics},
- year = {2017}}
-
-@article{nielson11bcre,
- author = {Lasse Nielsen, Fritz Henglein},
- date-added = {2019-07-03 21:09:39 +0000},
- date-modified = {2019-07-03 21:17:33 +0000},
- journal = {LATA},
- title = {Bit-coded Regular Expression Parsing},
- year = {2011},
- bdsk-file-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAyLi4vLi4vTXkgTWFjIChNYWNCb29rLVBybykvRGVza3RvcC9mcml0ei1wYXBlci5wZGZPEQF+AAAAAAF+AAIAAAxNYWNpbnRvc2ggSEQAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8PZnJpdHotcGFwZXIucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAACAAMAAAogY3UAAAAAAAAAAAAAAAAAB0Rlc2t0b3AAAAIAQi86VXNlcnM6Y3N0YW46RHJvcGJveDpNeSBNYWMgKE1hY0Jvb2stUHJvKTpEZXNrdG9wOmZyaXR6LXBhcGVyLnBkZgAOACAADwBmAHIAaQB0AHoALQBwAGEAcABlAHIALgBwAGQAZgAPABoADABNAGEAYwBpAG4AdABvAHMAaAAgAEgARAASAEBVc2Vycy9jc3Rhbi9Ecm9wYm94L015IE1hYyAoTWFjQm9vay1Qcm8pL0Rlc2t0b3AvZnJpdHotcGFwZXIucGRmABMAAS8AABUAAgAM//8AAAAIAA0AGgAkAFkAAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAB2w==}}
-
-@misc{SE16,
- author = {StackStatus},
- date-added = {2019-06-26 11:28:41 +0000},
- date-modified = {2019-06-26 16:07:31 +0000},
- keywords = {ReDos Attack},
- month = {July},
- rating = {5},
- read = {1},
- title = {Stack Overflow Outage Postmortem},
- url = {https://stackstatus.net/post/147710624694/outage-postmortem-july-20-2016},
- year = {2016},
- bdsk-url-1 = {https://stackstatus.net/post/147710624694/outage-postmortem-july-20-2016}}
-
-@article{HosoyaVouillonPierce2005,
- author = {H.~Hosoya and J.~Vouillon and B.~C.~Pierce},
- journal = {ACM Transactions on Programming Languages and Systems (TOPLAS)},
- number = 1,
- pages = {46--90},
- title = {{R}egular {E}xpression {T}ypes for {XML}},
- volume = 27,
- year = {2005}}
-
-@misc{POSIX,
- note = {\url{http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap09.html}},
- title = {{T}he {O}pen {G}roup {B}ase {S}pecification {I}ssue 6 {IEEE} {S}td 1003.1 2004 {E}dition},
- year = {2004}}
-
-@inproceedings{AusafDyckhoffUrban2016,
- author = {F.~Ausaf and R.~Dyckhoff and C.~Urban},
- booktitle = {Proc.~of the 7th International Conference on Interactive Theorem Proving (ITP)},
- pages = {69--86},
- series = {LNCS},
- title = {{POSIX} {L}exing with {D}erivatives of {R}egular {E}xpressions ({P}roof {P}earl)},
- volume = {9807},
- year = {2016}}
-
-@article{aduAFP16,
- author = {F.~Ausaf and R.~Dyckhoff and C.~Urban},
- issn = {2150-914x},
- journal = {Archive of Formal Proofs},
- note = {\url{http://www.isa-afp.org/entries/Posix-Lexing.shtml}, Formal proof development},
- title = {{POSIX} {L}exing with {D}erivatives of {R}egular {E}xpressions},
- year = 2016}
-
-@techreport{CrashCourse2014,
- annote = {draft report},
- author = {N.~B.~B.~Grathwohl and F.~Henglein and U.~T.~Rasmussen},
- institution = {University of Copenhagen},
- title = {{A} {C}rash-{C}ourse in {R}egular {E}xpression {P}arsing and {R}egular {E}xpressions as {T}ypes},
- year = {2014}}
-
-@inproceedings{Sulzmann2014,
- author = {M.~Sulzmann and K.~Lu},
- booktitle = {Proc.~of the 12th International Conference on Functional and Logic Programming (FLOPS)},
- pages = {203--220},
- series = {LNCS},
- title = {{POSIX} {R}egular {E}xpression {P}arsing with {D}erivatives},
- volume = {8475},
- year = {2014}}
-
-@inproceedings{Sulzmann2014b,
- author = {M.~Sulzmann and P.~van Steenhoven},
- booktitle = {Proc.~of the 23rd International Conference on Compiler Construction (CC)},
- pages = {174--191},
- series = {LNCS},
- title = {{A} {F}lexible and {E}fficient {ML} {L}exer {T}ool {B}ased on {E}xtended {R}egular {E}xpression {S}ubmatching},
- volume = {8409},
- year = {2014}}
-
-@book{Pierce2015,
- author = {B.~C.~Pierce and C.~Casinghino and M.~Gaboardi and M.~Greenberg and C.~Hri\c{t}cu and V.~Sj\"{o}berg and B.~Yorgey},
- note = {\url{http://www.cis.upenn.edu/~bcpierce/sf}},
- publisher = {Electronic textbook},
- title = {{S}oftware {F}oundations},
- year = {2015}}
-
-@misc{Kuklewicz,
- author = {C.~Kuklewicz},
- howpublished = {\url{https://wiki.haskell.org/Regex_Posix}},
- title = {{R}egex {P}osix}}
-
-@article{Vansummeren2006,
- author = {S.~Vansummeren},
- journal = {ACM Transactions on Programming Languages and Systems},
- number = {3},
- pages = {389--428},
- title = {{T}ype {I}nference for {U}nique {P}attern {M}atching},
- volume = {28},
- year = {2006}}
-
-@inproceedings{Asperti12,
- author = {A.~Asperti},
- booktitle = {Proc.~of the 3rd International Conference on Interactive Theorem Proving (ITP)},
- pages = {283--298},
- series = {LNCS},
- title = {{A} {C}ompact {P}roof of {D}ecidability for {R}egular {E}xpression {E}quivalence},
- volume = {7406},
- year = {2012}}
-
-@inproceedings{Frisch2004,
- author = {A.~Frisch and L.~Cardelli},
- booktitle = {Proc.~of the 31st International Conference on Automata, Languages and Programming (ICALP)},
- pages = {618--629},
- series = {LNCS},
- title = {{G}reedy {R}egular {E}xpression {M}atching},
- volume = {3142},
- year = {2004}}
-
-@article{Antimirov95,
- author = {V.~Antimirov},
- journal = {Theoretical Computer Science},
- pages = {291--319},
- title = {{P}artial {D}erivatives of {R}egular {E}xpressions and {F}inite {A}utomata {C}onstructions},
- volume = {155},
- year = {1995}}
-
-@inproceedings{Nipkow98,
- author = {T.~Nipkow},
- booktitle = {Proc.~of the 11th International Conference on Theorem Proving in Higher Order Logics (TPHOLs)},
- pages = {1--15},
- series = {LNCS},
- title = {{V}erified {L}exical {A}nalysis},
- volume = 1479,
- year = 1998}
-
-@article{Brzozowski1964,
- author = {J.~A.~Brzozowski},
- journal = {Journal of the {ACM}},
- number = {4},
- pages = {481--494},
- title = {{D}erivatives of {R}egular {E}xpressions},
- volume = {11},
- year = {1964}}
-
-@article{Leroy2009,
- author = {X.~Leroy},
- journal = {Communications of the ACM},
- number = 7,
- pages = {107--115},
- title = {{F}ormal {V}erification of a {R}ealistic {C}ompiler},
- volume = 52,
- year = 2009}
-
-@inproceedings{Paulson2015,
- author = {L.~C.~Paulson},
- booktitle = {Proc.~of the 25th International Conference on Automated Deduction (CADE)},
- pages = {231--245},
- series = {LNAI},
- title = {{A} {F}ormalisation of {F}inite {A}utomata {U}sing {H}ereditarily {F}inite {S}ets},
- volume = {9195},
- year = {2015}}
-
-@article{Wu2014,
- author = {C.~Wu and X.~Zhang and C.~Urban},
- journal = {Journal of Automatic Reasoning},
- number = {4},
- pages = {451--480},
- title = {{A} {F}ormalisation of the {M}yhill-{N}erode {T}heorem based on {R}egular {E}xpressions},
- volume = {52},
- year = {2014}}
-
-@inproceedings{Regehr2011,
- author = {X.~Yang and Y.~Chen and E.~Eide and J.~Regehr},
- booktitle = {Proc.~of the 32nd ACM SIGPLAN Conference on Programming Language Design and Implementation (PLDI)},
- pages = {283--294},
- title = {{F}inding and {U}nderstanding {B}ugs in {C} {C}ompilers},
- year = {2011}}
-
-@article{Norrish2014,
- author = {A.~Barthwal and M.~Norrish},
- journal = {Journal of Computer and System Sciences},
- number = {2},
- pages = {346--362},
- title = {{A} {M}echanisation of {S}ome {C}ontext-{F}ree {L}anguage {T}heory in {HOL4}},
- volume = {80},
- year = {2014}}
-
-@article{Thompson1968,
- author = {K.~Thompson},
- issue_date = {June 1968},
- journal = {Communications of the ACM},
- number = {6},
- pages = {419--422},
- title = {{P}rogramming {T}echniques: {R}egular {E}xpression {S}earch {A}lgorithm},
- volume = {11},
- year = {1968}}
-
-@article{Owens2009,
- author = {S.~Owens and J.~H.~Reppy and A.~Turon},
- journal = {Journal of Functinal Programming},
- number = {2},
- pages = {173--190},
- title = {{R}egular-{E}xpression {D}erivatives {R}e-{E}xamined},
- volume = {19},
- year = {2009}}
-
-@inproceedings{Sulzmann2015,
- author = {M.~Sulzmann and P.~Thiemann},
- booktitle = {Proc.~of the 9th International Conference on Language and Automata Theory and Applications (LATA)},
- pages = {275--286},
- series = {LNCS},
- title = {{D}erivatives for {R}egular {S}huffle {E}xpressions},
- volume = {8977},
- year = {2015}}
-
-@inproceedings{Chen2012,
- author = {H.~Chen and S.~Yu},
- booktitle = {Proc.~in the International Workshop on Theoretical Computer Science (WTCS)},
- pages = {343--356},
- series = {LNCS},
- title = {{D}erivatives of {R}egular {E}xpressions and an {A}pplication},
- volume = {7160},
- year = {2012}}
-
-@article{Krauss2011,
- author = {A.~Krauss and T.~Nipkow},
- journal = {Journal of Automated Reasoning},
- pages = {95--106},
- title = {{P}roof {P}earl: {R}egular {E}xpression {E}quivalence and {R}elation {A}lgebra},
- volume = 49,
- year = 2012}
-
-@inproceedings{Traytel2015,
- author = {D.~Traytel},
- booktitle = {Proc.~of the 24th Annual Conference on Computer Science Logic (CSL)},
- pages = {487--503},
- series = {LIPIcs},
- title = {{A} {C}oalgebraic {D}ecision {P}rocedure for {WS1S}},
- volume = {41},
- year = {2015}}
-
-@inproceedings{Traytel2013,
- author = {D.~Traytel and T.~Nipkow},
- booktitle = {Proc.~of the 18th ACM SIGPLAN International Conference on Functional Programming (ICFP)},
- pages = {3-12},
- title = {{A} {V}erified {D}ecision {P}rocedure for {MSO} on {W}ords {B}ased on {D}erivatives of {R}egular {E}xpressions},
- year = 2013}
-
-@inproceedings{Coquand2012,
- author = {T.~Coquand and V.~Siles},
- booktitle = {Proc.~of the 1st International Conference on Certified Programs and Proofs (CPP)},
- pages = {119--134},
- series = {LNCS},
- title = {{A} {D}ecision {P}rocedure for {R}egular {E}xpression {E}quivalence in {T}ype {T}heory},
- volume = {7086},
- year = {2011}}
-
-@inproceedings{Almeidaetal10,
- author = {J.~B.~Almeida and N.~Moriera and D.~Pereira and S.~M.~de Sousa},
- booktitle = {Proc.~of the 15th International Conference on Implementation and Application of Automata (CIAA)},
- pages = {59-68},
- series = {LNCS},
- title = {{P}artial {D}erivative {A}utomata {F}ormalized in {C}oq},
- volume = {6482},
- year = {2010}}
-
-@article{Owens2008,
- author = {S.~Owens and K.~Slind},
- journal = {Higher-Order and Symbolic Computation},
- number = {4},
- pages = {377--409},
- title = {{A}dapting {F}unctional {P}rograms to {H}igher {O}rder {L}ogic},
- volume = {21},
- year = {2008}}
-
-@article{Owens2,
- author = {S.~Owens and K.~Slind},
- bibsource = {dblp computer science bibliography, http://dblp.org},
- biburl = {http://dblp.uni-trier.de/rec/bib/journals/lisp/OwensS08},
- doi = {10.1007/s10990-008-9038-0},
- journal = {Higher-Order and Symbolic Computation},
- number = {4},
- pages = {377--409},
- timestamp = {Wed, 16 Dec 2009 13:51:02 +0100},
- title = {Adapting functional programs to higher order logic},
- url = {http://dx.doi.org/10.1007/s10990-008-9038-0},
- volume = {21},
- year = {2008},
- bdsk-url-1 = {http://dx.doi.org/10.1007/s10990-008-9038-0}}
-
-@misc{PCRE,
- title = {{PCRE - Perl Compatible Regular Expressions}},
- url = {http://www.pcre.org},
- bdsk-url-1 = {http://www.pcre.org}}
-
-@inproceedings{OkuiSuzuki2010,
- author = {S.~Okui and T.~Suzuki},
- booktitle = {Proc.~of the 15th International Conference on Implementation and Application of Automata (CIAA)},
- pages = {231--240},
- series = {LNCS},
- title = {{D}isambiguation in {R}egular {E}xpression {M}atching via {P}osition {A}utomata with {A}ugmented {T}ransitions},
- volume = {6482},
- year = {2010}}
-
-@techreport{OkuiSuzukiTech,
- author = {S.~Okui and T.~Suzuki},
- institution = {University of Aizu},
- title = {{D}isambiguation in {R}egular {E}xpression {M}atching via {P}osition {A}utomata with {A}ugmented {T}ransitions},
- year = {2013}}
-
-@inproceedings{Davis18,
- author = {J.~C.~Davis and C.~.A.~Coghlan and F.~Servant and D.~Lee},
- booktitle = {Proc.~of the 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering (ESEC/FSE)},
- numpages = {11},
- pages = {246--256},
- title = {{T}he {I}mpact of {R}egular {E}xpression {D}enial of {S}ervice ({ReDoS}) in {P}ractice: {A}n {E}mpirical {S}tudy at the {E}cosystem {S}cale},
- year = {2018}}