more work on the simple inductive chapter
authorChristian Urban <urbanc@in.tum.de>
Wed, 01 Apr 2009 12:26:56 +0100
changeset 219 98d43270024f
parent 218 7ff7325e3b4e
child 222 1dc03eaa7cb9
more work on the simple inductive chapter
ProgTutorial/Package/Ind_Code.thy
ProgTutorial/Package/Ind_General_Scheme.thy
ProgTutorial/Package/Ind_Interface.thy
ProgTutorial/Package/Ind_Intro.thy
ProgTutorial/Package/Ind_Prelims.thy
ProgTutorial/Parsing.thy
ProgTutorial/Tactical.thy
progtutorial.pdf
--- a/ProgTutorial/Package/Ind_Code.thy	Tue Mar 31 20:31:18 2009 +0100
+++ b/ProgTutorial/Package/Ind_Code.thy	Wed Apr 01 12:26:56 2009 +0100
@@ -1,5 +1,5 @@
 theory Ind_Code
-imports "../Base" "../FirstSteps" Ind_General_Scheme 
+imports "../Base" "../FirstSteps" Ind_Interface Ind_General_Scheme
 begin
 
 section {* The Gory Details\label{sec:code} *} 
@@ -798,7 +798,7 @@
 *}
 
 lemma accpartI:
-  shows "\<And>x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
+  shows "\<And>R x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
 apply(tactic {* expand_tac @{thms accpart_def} *})
 apply(tactic {* chop_test_tac [acc_pred] acc_rules @{context} *})
 apply(tactic {* apply_prem_tac 0 [acc_pred] acc_rules @{context} *})
@@ -879,7 +879,7 @@
 *}
 
 lemma accpartI:
-  shows "\<And>x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
+  shows "\<And>R x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
 apply(tactic {* expand_tac @{thms accpart_def} *})
 apply(tactic {* prove_intro_tac 0 [acc_pred] acc_rules @{context} 1 *})
 done
@@ -1058,7 +1058,7 @@
 
 ML{*val specification =
   spec_parser >>
-    (fn ((pred_specs), rule_specs) => add_inductive_cmd pred_specs rule_specs)*}
+    (fn (pred_specs, rule_specs) => add_inductive_cmd pred_specs rule_specs)*}
 
 ML{*val _ = OuterSyntax.local_theory "simple_inductive" 
               "define inductive predicates"
@@ -1083,6 +1083,7 @@
   
 *}
 
+(*
 simple_inductive
   Even and Odd
 where
@@ -1100,19 +1101,25 @@
 
 thm Even_def
 thm Odd_def
+*)
 
+(*
 text {* 
   Second, we want that the user can specify fixed parameters.
   Remember in the previous section we stated that the user can give the 
   specification for the transitive closure of a relation @{text R} as 
 *}
+*)
 
+(*
 simple_inductive
   trcl\<iota>\<iota> :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"
 where
   base: "trcl\<iota>\<iota> R x x"
 | step: "trcl\<iota>\<iota> R x y \<Longrightarrow> R y z \<Longrightarrow> trcl\<iota>\<iota> R x z"
+*)
 
+(*
 text {*
   Note that there is no locale given in this specification---the parameter
   @{text "R"} therefore needs to be included explicitly in @{term trcl\<iota>\<iota>}, but
@@ -1132,14 +1139,15 @@
   But this does not correspond to the induction principle we derived by hand, which
   was
   
-  \begin{center}\small
-  \mprset{flushleft}
-  \mbox{\inferrule{
-             @{thm_style prem1  trcl_induct[no_vars]}\\\\
-             @{thm_style prem2  trcl_induct[no_vars]}\\\\
-             @{thm_style prem3  trcl_induct[no_vars]}}
-            {@{thm_style concl  trcl_induct[no_vars]}}}  
-  \end{center}
+  
+  %\begin{center}\small
+  %\mprset{flushleft}
+  %\mbox{\inferrule{
+  %           @ { thm_style prem1  trcl_induct[no_vars]}\\\\
+  %           @ { thm_style prem2  trcl_induct[no_vars]}\\\\
+  %           @ { thm_style prem3  trcl_induct[no_vars]}}
+  %          {@ { thm_style concl  trcl_induct[no_vars]}}}  
+  %\end{center}
 
   The difference is that in the one derived by hand the relation @{term R} is not
   a parameter of the proposition @{term P} to be proved and it is also not universally
@@ -1151,7 +1159,7 @@
   to write 
 
 *}
-
+*)
 (*
 simple_inductive
   trcl'' for R :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
@@ -1164,5 +1172,4 @@
 where
   accpartI: "(\<And>y. R y x \<Longrightarrow> accpart'' R y) \<Longrightarrow> accpart'' R x"
 *)
-
-end
+(*<*)end(*>*)
\ No newline at end of file
--- a/ProgTutorial/Package/Ind_General_Scheme.thy	Tue Mar 31 20:31:18 2009 +0100
+++ b/ProgTutorial/Package/Ind_General_Scheme.thy	Wed Apr 01 12:26:56 2009 +0100
@@ -1,23 +1,10 @@
 theory Ind_General_Scheme 
-imports Simple_Inductive_Package Ind_Prelims
+imports Ind_Interface
 begin
 
-(*<*)
-datatype trm =
-  Var "string"
-| App "trm" "trm"
-| Lam "string" "trm"
+section {* The Code in a Nutshell\label{sec:nutshell} *}
 
-simple_inductive 
-  fresh :: "string \<Rightarrow> trm \<Rightarrow> bool" 
-where
-  fresh_var: "a\<noteq>b \<Longrightarrow> fresh a (Var b)"
-| fresh_app: "\<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"
-| fresh_lam1: "fresh a (Lam a t)"
-| fresh_lam2: "\<lbrakk>a\<noteq>b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
-(*>*)
 
-section {* The Code in a Nutshell\label{sec:nutshell} *}
 
 text {*
   The inductive package will generate the reasoning infrastructure for
--- a/ProgTutorial/Package/Ind_Interface.thy	Tue Mar 31 20:31:18 2009 +0100
+++ b/ProgTutorial/Package/Ind_Interface.thy	Wed Apr 01 12:26:56 2009 +0100
@@ -1,15 +1,106 @@
 theory Ind_Interface
-imports "../Base" "../Parsing" Ind_Prelims Simple_Inductive_Package
+imports "../Base" "../Parsing" Simple_Inductive_Package
 begin
 
 section {* Parsing and Typing the Specification\label{sec:interface} *}
 
+text_raw {*
+\begin{figure}[p]
+\begin{boxedminipage}{\textwidth}
+\begin{isabelle}
+*}
+simple_inductive
+  trcl :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"
+where
+  base: "trcl R x x"
+| step: "trcl R x y \<Longrightarrow> R y z \<Longrightarrow> trcl R x z"
+
+simple_inductive
+  even and odd
+where
+  even0: "even 0"
+| evenS: "odd n \<Longrightarrow> even (Suc n)"
+| oddS: "even n \<Longrightarrow> odd (Suc n)"
+
+simple_inductive
+  accpart :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> bool"
+where
+  accpartI: "(\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
+
+(*<*)
+datatype trm =
+  Var "string"
+| App "trm" "trm"
+| Lam "string" "trm"
+(*>*)
+
+simple_inductive 
+  fresh :: "string \<Rightarrow> trm \<Rightarrow> bool" 
+where
+  fresh_var: "a\<noteq>b \<Longrightarrow> fresh a (Var b)"
+| fresh_app: "\<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"
+| fresh_lam1: "fresh a (Lam a t)"
+| fresh_lam2: "\<lbrakk>a\<noteq>b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+text_raw {*
+\end{isabelle}
+\end{boxedminipage}
+\caption{Specification given by the user for the inductive predicates
+@{term "trcl"}, @{term "even"} and @{term "odd"}, @{term "accpart"} and 
+@{text "fresh"}.\label{fig:specs}}
+\end{figure}  
+*}
+
+text {*
+  \begin{figure}[p]
+  \begin{boxedminipage}{\textwidth}
+  \begin{isabelle}
+  \railnontermfont{\rmfamily\itshape}
+  \railterm{simpleinductive,where,for}
+  \railalias{simpleinductive}{\simpleinductive{}}
+  \railalias{where}{\isacommand{where}}
+  \railalias{for}{\isacommand{for}}
+  \begin{rail}
+  simpleinductive target?\\ fixes
+  (where (thmdecl? prop + '|'))?
+  ;
+  \end{rail}
+  \end{isabelle}
+  \end{boxedminipage}
+  \caption{A railroad diagram describing the syntax of \simpleinductive{}. 
+  The \emph{target} indicates an optional locale; the \emph{fixes} are an 
+  \isacommand{and}-separated list of names for the inductive predicates (they
+  can also contain typing- and syntax anotations); \emph{prop} stands for a 
+  introduction rule with an optional theorem declaration (\emph{thmdecl}).
+  \label{fig:railroad}}
+  \end{figure}
+*}
+
 text {* 
   To be able to write down the specification in Isabelle, we have to introduce
   a new command (see Section~\ref{sec:newcommand}).  As the keyword for the
-  new command we chose \simpleinductive{}. The ``infrastructure'' already 
-  provides an ``advanced'' feature for this command. For example we will 
-  be able to declare the locale
+  new command we chose \simpleinductive{}. Examples of specifications we expect
+  the user gives for the inductive predicates from the previous section are
+  shown in Figure~ref{fig:specs}. The general syntax we will parse is specified
+  in the railroad diagram shown in Figure~\ref{fig:railroad} for the syntax of 
+  \simpleinductive{}. This diagram more or less translates directly into 
+  the parser:
+*}
+
+ML{*val spec_parser = 
+   OuterParse.fixes -- 
+   Scan.optional 
+     (OuterParse.$$$ "where" |--
+        OuterParse.!!! 
+          (OuterParse.enum1 "|" 
+             (SpecParse.opt_thm_name ":" -- OuterParse.prop))) []*}
+
+text {*
+  which we explaind in Section~\ref{sec:parsingspecs}.
+  If you look closely, there is no code for parsing the optional
+  target in Figure~\ref{fig:railroad}. This is an ``advanced'' feature
+  which we will inherit for ``free'' from the infrastructure on which
+  we build the package. The target stands for a locale and allows us 
+  to specify
 *}
 
 locale rel =
@@ -32,44 +123,8 @@
   accpartI: "(\<And>y. R y x \<Longrightarrow> accpart' y) \<Longrightarrow> accpart' x"
 
 text {*
-  \begin{figure}[t]
-  \begin{isabelle}
-  \railnontermfont{\rmfamily\itshape}
-  \railterm{simpleinductive,where,for}
-  \railalias{simpleinductive}{\simpleinductive{}}
-  \railalias{where}{\isacommand{where}}
-  \railalias{for}{\isacommand{for}}
-  \begin{rail}
-  simpleinductive target?\\ fixes
-  (where (thmdecl? prop + '|'))?
-  ;
-  \end{rail}
-  \end{isabelle}
-  \caption{A railroad diagram describing the syntax of \simpleinductive{}. 
-  The \emph{target} indicates an optional locale; the \emph{fixes} are an 
-  \isacommand{and}-separated list of names for the inductive predicates (they
-  can also contain typing- and syntax anotations); \emph{prop} stands for a 
-  introduction rule with an optional theorem declaration (\emph{thmdecl}).
-  \label{fig:railroad}}
-  \end{figure}
-*}
-
-text {*
-  This leads directly to the railroad diagram shown in
-  Figure~\ref{fig:railroad} for the syntax of \simpleinductive{}. This diagram
-  more or less translates directly into the parser:
-*}
-
-ML %linenosgray{*val spec_parser = 
-     OuterParse.fixes -- 
-     Scan.optional 
-       (OuterParse.$$$ "where" |--
-          OuterParse.!!! 
-            (OuterParse.enum1 "|" 
-               (SpecParse.opt_thm_name ":" -- OuterParse.prop))) []*}
-
-text {*
-  which we described in Section~\ref{sec:parsingspecs}. If we feed into the 
+  Note that in these definitions the parameter @{text R} for the
+  relation is left implicit.  If we feed into the 
   parser the string (which corresponds to our definition of @{term even} and 
   @{term odd}):
 
@@ -88,22 +143,37 @@
      [((even0,\<dots>), \"\\^E\\^Ftoken\\^Eeven 0\\^E\\^F\\^E\"),
       ((evenS,\<dots>), \"\\^E\\^Ftoken\\^Eodd n \<Longrightarrow> even (Suc n)\\^E\\^F\\^E\"),
       ((oddS,\<dots>), \"\\^E\\^Ftoken\\^Eeven n \<Longrightarrow> odd (Suc n)\\^E\\^F\\^E\")]), [])"}
+
+  then we get back the predicates (with type
+  and syntax annotations), and the specifications of the introduction 
+  rules. This is all the information we
+  need for calling the package and setting up the keyword. The latter is
+  done in Lines 6 and 7 in the code below.
 *}
 
+(*<*)
+ML{* fun add_inductive pred_specs rule_specs lthy = lthy *}
+(*>*)
+
+ML{*fun add_inductive_cmd pred_specs rule_specs lthy =
+let
+  val ((pred_specs', rule_specs'), _) = 
+         Specification.read_spec pred_specs rule_specs lthy
+in
+  add_inductive pred_specs' rule_specs' lthy
+end*} 
+
+
+ML{*val specification =
+  spec_parser >>
+    (fn (pred_specs, rule_specs) => add_inductive_cmd pred_specs rule_specs)*}
+
+ML{*val _ = OuterSyntax.local_theory "simple_inductive" 
+              "definition of simple inductive predicates"
+                 OuterKeyword.thy_decl specification*}
+
 
 text {*
-  then we get back a locale (in this case @{ML NONE}), the predicates (with type
-  and syntax annotations), the parameters (similar as the predicates) and
-  the specifications of the introduction rules. 
-
-
-
-  This is all the information we
-  need for calling the package and setting up the keyword. The latter is
-  done in Lines 6 and 7 in the code below.
-
-  @{ML_chunk [display,gray,linenos] syntax}
-  
   We call @{ML OuterSyntax.command} with the kind-indicator @{ML
   OuterKeyword.thy_decl} since the package does not need to open up any goal
   state (see Section~\ref{sec:newcommand}). Note that the predicates and
@@ -399,8 +469,4 @@
   the third with the introduction rules. 
   
 *}
-
-
-(*<*)
-end
-(*>*)
+(*<*)end(*>*)
\ No newline at end of file
--- a/ProgTutorial/Package/Ind_Intro.thy	Tue Mar 31 20:31:18 2009 +0100
+++ b/ProgTutorial/Package/Ind_Intro.thy	Wed Apr 01 12:26:56 2009 +0100
@@ -17,9 +17,9 @@
   \medskip
   HOL is based on just a few primitive constants, like equality and
   implication, whose properties are described by axioms. All other concepts,
-  such as inductive predicates, datatypes, or recursive functions have to be defined
+  such as inductive predicates, datatypes or recursive functions, have to be defined
   in terms of those constants, and the desired properties, for example
-  induction theorems, or recursion equations have to be derived from the definitions
+  induction theorems or recursion equations, have to be derived from the definitions
   by a formal proof. Since it would be very tedious for a user to define
   complex inductive predicates or datatypes ``by hand'' just using the
   primitive operators of higher order logic, \emph{definitional packages} have
@@ -29,7 +29,7 @@
   definitions and proofs behind the scenes. In this chapter we explain how
   such a package can be implemented.
 
-  As a running example, we have chosen a rather simple package for defining
+  As the running example we have chosen a rather simple package for defining
   inductive predicates. To keep things really simple, we will not use the
   general Knaster-Tarski fixpoint theorem on complete lattices, which forms
   the basis of Isabelle's standard inductive definition package.  Instead, we
--- a/ProgTutorial/Package/Ind_Prelims.thy	Tue Mar 31 20:31:18 2009 +0100
+++ b/ProgTutorial/Package/Ind_Prelims.thy	Wed Apr 01 12:26:56 2009 +0100
@@ -1,5 +1,5 @@
 theory Ind_Prelims
-imports Main LaTeXsugar"../Base" Simple_Inductive_Package
+imports Main LaTeXsugar "../Base" 
 begin
 
 section{* Preliminaries *}
@@ -9,55 +9,25 @@
   expects from the package to produce a convenient reasoning
   infrastructure. This infrastructure needs to be derived from the definition
   that correspond to the specified predicate(s). Before we start with
-  explaining all parts of the package, let us first give four examples showing
-  how to define inductive predicates by hand and then also how to prove by
-  hand properties about them. See Figure \ref{fig:paperpreds} for their usual
-  ``pencil-and-paper'' definitions. From these examples, we will
-  figure out a general method for defining inductive predicates.  The aim in
-  this section is \emph{not} to write proofs that are as beautiful as
-  possible, but as close as possible to the ML-code we will develop in later
-  sections.
+  explaining all parts of the package, let us first give some examples 
+  showing how to define inductive predicates and then also how
+  to generate a reasoning infrastructure for them. From the examples 
+  we will figure out a general method for
+  defining inductive predicates.  The aim in this section is \emph{not} to
+  write proofs that are as beautiful as possible, but as close as possible to
+  the ML-code we will develop in later sections.
 
-  \begin{figure}[t]
-  \begin{boxedminipage}{\textwidth}
+
+
+  We first consider the transitive closure of a relation @{text R}. The 
+  ``pencil-and-paper'' specification for the transitive closure is:
+
   \begin{center}\small
   @{prop[mode=Axiom] "trcl R x x"} \hspace{5mm}
   @{prop[mode=Rule] "R x y \<Longrightarrow> trcl R y z \<Longrightarrow> trcl R x z"}
   \end{center}
-  \begin{center}\small
-  @{prop[mode=Axiom] "even (0::nat)"} \hspace{5mm}
-  @{prop[mode=Rule] "odd n \<Longrightarrow> even (Suc n)"} \hspace{5mm}
-  @{prop[mode=Rule] "even n \<Longrightarrow> odd (Suc n)"}
-  \end{center}
-  \begin{center}\small
-  \mbox{\inferrule{@{term "\<And>y. R y x \<Longrightarrow> accpart R y"}}{@{term "accpart R x"}}}
-  \end{center}
-  \begin{center}\small
-  @{prop[mode=Rule] "a\<noteq>b \<Longrightarrow> fresh a (Var b)"}\hspace{5mm}
-  @{prop[mode=Rule] "\<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"}\\[2mm]
-  @{prop[mode=Axiom] "fresh a (Lam a t)"}\hspace{5mm}
-  @{prop[mode=Rule] "\<lbrakk>a\<noteq>b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"}
-  \end{center}
-  \end{boxedminipage}
-  \caption{Examples of four ``Pencil-and-paper'' definitions of inductively defined
-  predicates. In formal reasoning with Isabelle, the user just wants to give such 
-  definitions and expects that the reasoning structure is derived automatically. 
-  For this definitional packages need to be implemented.\label{fig:paperpreds}}
-  \end{figure}
 
-  We first consider the transitive closure of a relation @{text R}. The user will 
-  state for @{term trcl\<iota>} the specification:
-*}
-
-simple_inductive
-  trcl\<iota> :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"
-where
-  base: "trcl\<iota> R x x"
-| step: "trcl\<iota> R x y \<Longrightarrow> R y z \<Longrightarrow> trcl\<iota> R x z"
-
-text {*
-  The package has to make an appropriate definition and provide
-  lemmas to reason about the predicate @{term trcl\<iota>}. Since an inductively
+  The package has to make an appropriate definition. Since an inductively
   defined predicate is the least predicate closed under a collection of
   introduction rules, the predicate @{text "trcl R x y"} can be defined so
   that it holds if and only if @{text "P x y"} holds for every predicate
@@ -79,7 +49,7 @@
   With this definition, the proof of the induction principle for @{term trcl}
   is almost immediate. It suffices to convert all the meta-level
   connectives in the lemma to object-level connectives using the
-  proof method @{text atomize} (Line 4), expand the definition of @{term trcl}
+  proof method @{text atomize} (Line 4 below), expand the definition of @{term trcl}
   (Line 5 and 6), eliminate the universal quantifier contained in it (Line~7),
   and then solve the goal by @{text assumption} (Line 8).
 
@@ -124,7 +94,7 @@
   The two assumptions come from the definition of @{term trcl} and correspond
   to the introduction rules. Thus, all we have to do is to eliminate the
   universal quantifier in front of the first assumption (Line 5), and then
-  solve the goal by assumption (Line 6).
+  solve the goal by @{text assumption} (Line 6).
 *}
 
 text {*
@@ -160,8 +130,8 @@
 txt {*
   The assumptions @{text "p1"} and @{text "p2"} correspond to the premises of
   the second introduction rule (unfolded); the assumptions @{text "r1"} and @{text "r2"}
-  come from the definition of @{term trcl} . We apply @{text "r2"} to the goal
-  @{term "P x z"}. In order for the assumption to be applicable as a rule, we
+  come from the definition of @{term trcl}. We apply @{text "r2"} to the goal
+  @{term "P x z"}. In order for this assumption to be applicable as a rule, we
   have to eliminate the universal quantifier and turn the object-level
   implications into meta-level ones. This can be accomplished using the @{text
   rule_format} attribute. So we continue the proof with:
@@ -211,28 +181,24 @@
 
   The method of defining inductive predicates by impredicative quantification
   also generalises to mutually inductive predicates. The next example defines
-  the predicates @{text even} and @{text odd}. The user will state for this 
-  inductive definition the specification:
-*}
+  the predicates @{text even} and @{text odd} given by
 
-simple_inductive
-  even and odd
-where
-  even0: "even 0"
-| evenS: "odd n \<Longrightarrow> even (Suc n)"
-| oddS: "even n \<Longrightarrow> odd (Suc n)"
+  \begin{center}\small
+  @{prop[mode=Axiom] "even (0::nat)"} \hspace{5mm}
+  @{prop[mode=Rule] "odd n \<Longrightarrow> even (Suc n)"} \hspace{5mm}
+  @{prop[mode=Rule] "even n \<Longrightarrow> odd (Suc n)"}
+  \end{center}
 
-text {*
   Since the predicates @{term even} and @{term odd} are mutually inductive, each 
   corresponding definition must quantify over both predicates (we name them 
   below @{text "P"} and @{text "Q"}).
 *}
 
-definition "even\<iota> \<equiv> 
+definition "even \<equiv> 
   \<lambda>n. \<forall>P Q. P 0 \<longrightarrow> (\<forall>m. Q m \<longrightarrow> P (Suc m)) 
                  \<longrightarrow> (\<forall>m. P m \<longrightarrow> Q (Suc m)) \<longrightarrow> P n"
 
-definition "odd\<iota> \<equiv>
+definition "odd \<equiv>
   \<lambda>n. \<forall>P Q. P 0 \<longrightarrow> (\<forall>m. Q m \<longrightarrow> P (Suc m)) 
                  \<longrightarrow> (\<forall>m. P m \<longrightarrow> Q (Suc m)) \<longrightarrow> Q n"
 
@@ -280,7 +246,7 @@
 qed
 
 text {*
-  The interesting lines are 7 to 15. The assumptions fall into to categories:
+  The interesting lines are 7 to 15. The assumptions fall into two categories:
   @{text p1} corresponds to the premise of the introduction rule; @{text "r1"}
   to @{text "r3"} come from the definition of @{text "even"}.
   In Line 13, we apply the assumption @{text "r2"} (since we prove the second
@@ -290,10 +256,14 @@
   need to be instantiated and then also the implications need to be resolved
   with the other rules.
 
-  As a final example, we define the accessible part of a relation @{text R}
-  (see Figure~\ref{fig:paperpreds}). There the premsise of the introduction 
-  rule involves a universal quantifier and an implication. The
-  definition of @{text accpart} is:
+  Next we define the accessible part of a relation @{text R} given by
+  the single rule:
+
+  \begin{center}\small
+  \mbox{\inferrule{@{term "\<And>y. R y x \<Longrightarrow> accpart R y"}}{@{term "accpart R x"}}}
+  \end{center}
+
+  The definition of @{text "accpart"} is:
 *}
 
 definition "accpart \<equiv> \<lambda>R x. \<forall>P. (\<forall>x. (\<forall>y. R y x \<longrightarrow> P y) \<longrightarrow> P x) \<longrightarrow> P x"
@@ -325,15 +295,32 @@
 qed
 
 text {*
-  There are now two subproofs. The assumptions fall again into two categories (Lines
-  7 to 9). In Line 11, applying the assumption @{text "r1"} generates a goal state 
-  with the new local assumption @{term "R y x"}, named @{text "r1_prem"} in the 
-  proof (Line 14). This local assumption is used to solve
-  the goal @{term "P y"} with the help of assumption @{text "p1"}.
+  As you can see, there are now two subproofs. The assumptions fall again into
+  two categories (Lines 7 to 9). In Line 11, applying the assumption @{text
+  "r1"} generates a goal state with the new local assumption @{term "R y x"},
+  named @{text "r1_prem"} in the second subproof (Line 14). This local assumption is
+  used to solve the goal @{term "P y"} with the help of assumption @{text
+  "p1"}.
+
 
-  The point of these examples is to get a feeling what the automatic proofs 
-  should do in order to solve all inductive definitions we throw at them.
-  This is usually the first step in writing a package. We next explain
+  \begin{exercise}
+  Give the definition for the freshness predicate for lambda-terms. The rules
+  for this predicate are:
+  
+  \begin{center}\small
+  @{prop[mode=Rule] "a\<noteq>b \<Longrightarrow> fresh a (Var b)"}\hspace{5mm}
+  @{prop[mode=Rule] "\<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"}\\[2mm]
+  @{prop[mode=Axiom] "fresh a (Lam a t)"}\hspace{5mm}
+  @{prop[mode=Rule] "\<lbrakk>a\<noteq>b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"}
+  \end{center}
+
+  From the definition derive the induction principle and the introduction 
+  rules. 
+  \end{exercise}
+
+  The point of all these examples is to get a feeling what the automatic
+  proofs should do in order to solve all inductive definitions we throw at
+  them.  This is usually the first step in writing a package. We next explain
   the parsing and typing part of the package.
 
 *}
--- a/ProgTutorial/Parsing.thy	Tue Mar 31 20:31:18 2009 +0100
+++ b/ProgTutorial/Parsing.thy	Wed Apr 01 12:26:56 2009 +0100
@@ -195,7 +195,7 @@
   @{ML_response_fake [display,gray] "Scan.error (!! (fn _ => \"foo\") ($$ \"h\"))"
                                "Exception Error \"foo\" raised"}
 
-  This ``prefixing'' is usually done by wrappers such as @{ML "OuterSyntax.command"} 
+  This ``prefixing'' is usually done by wrappers such as @{ML "OuterSyntax.local_theory"} 
   (see Section~\ref{sec:newcommand} which explains this function in more detail). 
   
   Let us now return to our example of parsing @{ML "(p -- q) || r" for p q
@@ -349,8 +349,6 @@
 section {* Parsing Theory Syntax *}
 
 text {*
-  (FIXME: context parser)
-
   Most of the time, however, Isabelle developers have to deal with parsing
   tokens, not strings.  These token parsers have the type:
 *}
@@ -619,12 +617,14 @@
   for inductive predicates of the form:
 *}
 
+(*
 simple_inductive
   even and odd
 where
   even0: "even 0"
 | evenS: "odd n \<Longrightarrow> even (Suc n)"
 | oddS: "even n \<Longrightarrow> odd (Suc n)"
+*)
 
 text {*
   For this we are going to use the parser:
@@ -679,7 +679,7 @@
   val input = filtered_input 
         \"foo::\\\"int \<Rightarrow> bool\\\" and bar::nat (\\\"BAR\\\" 100) and blonk\"
 in
-   parse OuterParse.fixes input
+  parse OuterParse.fixes input
 end"
 "([(foo, SOME \"\\^E\\^Ftoken\\^Eint \<Rightarrow> bool\\^E\\^F\\^E\", NoSyn), 
   (bar, SOME \"\\^E\\^Ftoken\\^Enat\\^E\\^F\\^E\", Mixfix (\"BAR\", [], 100)), 
@@ -699,10 +699,10 @@
   \end{readmore}
 
   Lines 3 to 7 in the function @{ML spec_parser} implement the parser for a
-  list of introduction rules, that is propositions with theorem
-  annotations. The introduction rules are propositions parsed by @{ML
-  OuterParse.prop}. However, they can include an optional theorem name plus
-  some attributes. For example
+  list of introduction rules, that is propositions with theorem annotations
+  such as rule names and attributes. The introduction rules are propositions
+  parsed by @{ML OuterParse.prop}. However, they can include an optional
+  theorem name plus some attributes. For example
 
 @{ML_response [display,gray] "let 
   val input = filtered_input \"foo_lemma[intro,dest!]:\"
@@ -752,8 +752,6 @@
 section {* New Commands and Keyword Files\label{sec:newcommand} *}
 
 text {*
-  (FIXME: update to the right command setup --- is this still needed?)
-
   Often new commands, for example for providing new definitional principles,
   need to be implemented. While this is not difficult on the ML-level,
   new commands, in order to be useful, need to be recognised by
@@ -766,16 +764,16 @@
 *}
 
 ML{*let
-  val do_nothing = Scan.succeed (Toplevel.theory I)
+  val do_nothing = Scan.succeed (LocalTheory.theory I)
   val kind = OuterKeyword.thy_decl
 in
-  OuterSyntax.command "foobar" "description of foobar" kind do_nothing
+  OuterSyntax.local_theory "foobar" "description of foobar" kind do_nothing
 end *}
 
 text {*
-  The crucial function @{ML OuterSyntax.command} expects a name for the command, a
-  short description, a kind indicator (which we will explain later on more thoroughly) and a
-  parser producing a top-level transition function (its purpose will also explained
+  The crucial function @{ML OuterSyntax.local_theory} expects a name for the command, a
+  short description, a kind indicator (which we will explain later more thoroughly) and a
+  parser producing a local theory transition (its purpose will also explained
   later). 
 
   While this is everything you have to do on the ML-level, you need a keyword
@@ -803,15 +801,15 @@
   \isacommand{ML}~@{text "\<verbopen>"}\\
   @{ML
 "let
-  val do_nothing = Scan.succeed (Toplevel.theory I)
+  val do_nothing = Scan.succeed (LocalTheory.theory I)
   val kind = OuterKeyword.thy_decl
 in
-  OuterSyntax.command \"foobar\" \"description of foobar\" kind do_nothing
+  OuterSyntax.local_theory \"foobar\" \"description of foobar\" kind do_nothing
 end"}\\
   @{text "\<verbclose>"}\\
   \isacommand{end}
   \end{graybox}
-  \caption{\small The file @{text "Command.thy"} is necessary for generating a log 
+  \caption{The file @{text "Command.thy"} is necessary for generating a log 
   file. This log file enables Isabelle to generate a keyword file containing 
   the command \isacommand{foobar}.\label{fig:commandtheory}}
   \end{figure}
@@ -909,25 +907,24 @@
   The crucial part of a command is the function that determines the behaviour
   of the command. In the code above we used a ``do-nothing''-function, which
   because of @{ML Scan.succeed} does not parse any argument, but immediately
-  returns the simple toplevel function @{ML "Toplevel.theory I"}. We can
+  returns the simple function @{ML "LocalTheory.theory I"}. We can
   replace this code by a function that first parses a proposition (using the
   parser @{ML OuterParse.prop}), then prints out the tracing
-  information (using a new top-level function @{text trace_top_lvl}) and 
+  information (using a new function @{text trace_prop}) and 
   finally does nothing. For this you can write:
 *}
 
 ML{*let
-  fun trace_top_lvl str = 
+  fun trace_prop str = 
      LocalTheory.theory (fn lthy => (tracing str; lthy))
 
-  val trace_prop = OuterParse.prop >> trace_top_lvl
-
+  val trace_prop_parser = OuterParse.prop >> trace_prop
   val kind = OuterKeyword.thy_decl
 in
-  OuterSyntax.local_theory "foobar" "traces a proposition" kind trace_prop
+  OuterSyntax.local_theory "foobar" "traces a proposition" 
+    kind trace_prop_parser
 end *}
 
-
 text {*
   Now you can type
 
@@ -938,17 +935,19 @@
   
   and see the proposition in the tracing buffer.  
 
-  Note that so far we used @{ML thy_decl in OuterKeyword} as the kind indicator
-  for the command.  This means that the command finishes as soon as the
-  arguments are processed. Examples of this kind of commands are
-  \isacommand{definition} and \isacommand{declare}.  In other cases,
-  commands are expected to parse some arguments, for example a proposition,
-  and then ``open up'' a proof in order to prove the proposition (for example
+  Note that so far we used @{ML thy_decl in OuterKeyword} as the kind
+  indicator for the command.  This means that the command finishes as soon as
+  the arguments are processed. Examples of this kind of commands are
+  \isacommand{definition} and \isacommand{declare}.  In other cases, commands
+  are expected to parse some arguments, for example a proposition, and then
+  ``open up'' a proof in order to prove the proposition (for example
   \isacommand{lemma}) or prove some other properties (for example
-  \isacommand{function}). To achieve this kind of behaviour, you have to use the kind
-  indicator @{ML thy_goal in OuterKeyword}.  Note, however, once you change the 
-  ``kind'' of a command from @{ML thy_decl in OuterKeyword} to @{ML thy_goal in OuterKeyword} 
-  then the keyword file needs to be re-created!
+  \isacommand{function}). To achieve this kind of behaviour, you have to use
+  the kind indicator @{ML thy_goal in OuterKeyword} and the function @{ML
+  "local_theory_to_proof" in OuterSyntax} to set up the command.  Note,
+  however, once you change the ``kind'' of a command from @{ML thy_decl in
+  OuterKeyword} to @{ML thy_goal in OuterKeyword} then the keyword file needs
+  to be re-created!
 
   Below we change \isacommand{foobar} so that it takes a proposition as
   argument and then starts a proof in order to prove it. Therefore in Line 13, 
@@ -956,34 +955,32 @@
 *}
 
 ML%linenosgray{*let
-  fun set_up_thm str ctxt =
+  fun prove_prop str ctxt =
     let
       val prop = Syntax.read_prop ctxt str
     in
       Proof.theorem_i NONE (K I) [[(prop,[])]] ctxt
     end;
   
-  val prove_prop = OuterParse.prop >>  
-      (fn str => Toplevel.print o 
-                    Toplevel.local_theory_to_proof NONE (set_up_thm str))
-  
+  val prove_prop_parser = OuterParse.prop >> prove_prop 
   val kind = OuterKeyword.thy_goal
 in
-  OuterSyntax.command "foobar" "proving a proposition" kind prove_prop
+  OuterSyntax.local_theory_to_proof "foobar" "proving a proposition" 
+    kind prove_prop_parser
 end *}
 
 text {*
-  The function @{text set_up_thm} in Lines 2 to 7 takes a string (the proposition to be
+  The function @{text prove_prop} in Lines 2 to 7 takes a string (the proposition to be
   proved) and a context as argument.  The context is necessary in order to be able to use
   @{ML Syntax.read_prop}, which converts a string into a proper proposition.
   In Line 6 the function @{ML Proof.theorem_i} starts the proof for the
   proposition. Its argument @{ML NONE} stands for a locale (which we chose to
   omit); the argument @{ML "(K I)"} stands for a function that determines what
   should be done with the theorem once it is proved (we chose to just forget
-  about it). Lines 9 to 11 contain the parser for the proposition.
+  about it). Line 9 contains the parser for the proposition.
 
   If you now type \isacommand{foobar}~@{text [quotes] "True \<and> True"}, you obtain the following 
-  proof state:
+  proof state
 
   \begin{isabelle}
   \isacommand{foobar}~@{text [quotes] "True \<and> True"}\\
@@ -991,7 +988,7 @@
   @{text "1. True \<and> True"}
   \end{isabelle}
 
-  and you can build the proof
+  and you can build the following proof
 
   \begin{isabelle}
   \isacommand{foobar}~@{text [quotes] "True \<and> True"}\\
@@ -1000,12 +997,6 @@
   \isacommand{done}
   \end{isabelle}
 
- 
-  
-  (FIXME What do @{ML "Toplevel.theory"} 
-  @{ML "Toplevel.print"} 
-  @{ML Toplevel.local_theory} do?)
-
   (FIXME read a name and show how to store theorems)
 *}
 
--- a/ProgTutorial/Tactical.thy	Tue Mar 31 20:31:18 2009 +0100
+++ b/ProgTutorial/Tactical.thy	Wed Apr 01 12:26:56 2009 +0100
@@ -1652,11 +1652,10 @@
   @{text num} (Line 3), and then generates the meta-equation @{text "t \<equiv> num"} 
   (Line 4), which it proves by the arithmetic tactic in Line 6. 
 
-  For our purpose at the moment, proving the meta-equation using 
-  @{ML Arith_Data.arith_tac} is
-  fine, but there is also an alternative employing the simplifier with a very
-  restricted simpset. For the kind of lemmas we want to prove, the simpset
-  @{text "num_ss"} in the code will suffice.
+  For our purpose at the moment, proving the meta-equation using @{ML
+  arith_tac in Arith_Data} is fine, but there is also an alternative employing
+  the simplifier with a special simpset. For the kind of lemmas we
+  want to prove here, the simpset @{text "num_ss"} should suffice.
 *}
 
 ML{*fun get_thm_alt ctxt (t, n) =
@@ -1672,7 +1671,7 @@
 text {*
   The advantage of @{ML get_thm_alt} is that it leaves very little room for 
   something to go wrong; in contrast it is much more difficult to predict 
-  what happens with @{ML Arith_Data.arith_tac}, especially in more complicated 
+  what happens with @{ML arith_tac in Arith_Data}, especially in more complicated 
   circumstances. The disatvantage of @{ML get_thm_alt} is to find a simpset
   that is sufficiently powerful to solve every instance of the lemmas
   we like to prove. This requires careful tuning, but is often necessary in 
Binary file progtutorial.pdf has changed