more work on simple inductive and marked all sections that are still seriously incomplete with TBD
authorChristian Urban <urbanc@in.tum.de>
Fri, 27 Mar 2009 12:49:28 +0000
changeset 211 d5accbc67e1b
parent 210 db8e302f44c8
child 212 ac01ddb285f6
more work on simple inductive and marked all sections that are still seriously incomplete with TBD
ProgTutorial/Package/Ind_Code.thy
ProgTutorial/Package/Ind_General_Scheme.thy
ProgTutorial/Parsing.thy
ProgTutorial/Recipes/ExternalSolver.thy
ProgTutorial/Recipes/Oracle.thy
ProgTutorial/Recipes/StoringData.thy
ProgTutorial/Recipes/USTypes.thy
ProgTutorial/document/root.tex
progtutorial.pdf
--- a/ProgTutorial/Package/Ind_Code.thy	Thu Mar 26 19:00:51 2009 +0000
+++ b/ProgTutorial/Package/Ind_Code.thy	Fri Mar 27 12:49:28 2009 +0000
@@ -2,7 +2,13 @@
 imports "../Base" "../FirstSteps" Ind_General_Scheme 
 begin
 
-section {* The Gory Details *} 
+section {* The Gory Details\label{sec:code} *} 
+
+text {*
+  As mentioned before the code falls roughly into three parts: the definitions,
+  the induction principles and the introduction rules. In addition there is an 
+  administrative function that strings everything together. 
+*}
 
 subsection {* Definitions *}
 
@@ -47,7 +53,7 @@
 text {*
   which introduces the definition @{prop "MyTrue \<equiv> True"} and then prints it out. 
   Since we are testing the function inside \isacommand{local\_setup}, i.e., make
-  changes to the ambient theory, we can query the definition with the usual
+  actual changes to the ambient theory, we can query the definition with the usual
   command \isacommand{thm}:
 
   \begin{isabelle}
@@ -65,10 +71,11 @@
   "preds"}.
 
 
-  The first function constructs the term for one particular predicate (the
-  argument @{text "pred"} in the code belo). The number of arguments of this predicate is
-  determined by the number of argument types given in @{text "arg_tys"}. 
-  The other arguments are all the @{text "preds"} and the @{text "orules"}.
+  The first function, named @{text defn_aux}, constructs the term for one
+  particular predicate (the argument @{text "pred"} in the code below). The
+  number of arguments of this predicate is determined by the number of
+  argument types given in @{text "arg_tys"}. The other arguments of the
+  function are the @{text orules} and all the @{text "preds"}.
 *}
 
 ML %linenosgray{*fun defn_aux lthy orules preds (pred, arg_tys) =
@@ -88,15 +95,15 @@
 end*}
 
 text {*
-  The function in Line 3 is just a helper function for constructing universal
-  quantifications. The code in Lines 5 to 9 produces the fresh @{text
+  The function @{text mk_all} in Line 3 is just a helper function for constructing 
+  universal quantifications. The code in Lines 5 to 9 produces the fresh @{text
   "zs"}. For this it pairs every argument type with the string
   @{text [quotes] "z"} (Line 7); then generates variants for all these strings
   so that they are unique w.r.t.~to the predicates and @{text "orules"} (Line 8);
   in Line 9 it generates the corresponding variable terms for the unique
   strings.
 
-  The unique free variables are applied to the predicate (Line 11) using the
+  The unique variables are applied to the predicate in Line 11 using the
   function @{ML list_comb}; then the @{text orules} are prefixed (Line 12); in
   Line 13 we quantify over all predicates; and in line 14 we just abstract
   over all the @{text "zs"}, i.e., the fresh arguments of the
@@ -105,22 +112,21 @@
 
 local_setup %gray{* fn lthy =>
 let
-  val pred = @{term "even::nat\<Rightarrow>bool"}
-  val arg_tys = [@{typ "nat"}]
-  val def = defn_aux lthy eo_orules eo_preds (pred, arg_tys)
+  val def = defn_aux lthy eo_orules eo_preds (e_pred, e_arg_tys)
 in
   warning (Syntax.string_of_term lthy def); lthy
 end *}
 
 text {*
-  The testcase  calls @{ML defn_aux} for the predicate @{text "even"} and prints
+  where we use the shorthands defined in Figure~\ref{fig:shorthands}.
+  The testcase calls @{ML defn_aux} for the predicate @{text "even"} and prints
   out the generated definition. So we obtain as printout 
 
   @{text [display] 
 "\<lambda>z. \<forall>even odd. (even 0) \<longrightarrow> (\<forall>n. odd n \<longrightarrow> even (Suc n)) 
                          \<longrightarrow> (\<forall>n. even n \<longrightarrow> odd (Suc n)) \<longrightarrow> even z"}
 
-  If we try out the function for the definition of freshness
+  If we try out the function with the rules for freshness
 *}
 
 local_setup %gray{* fn lthy =>
@@ -138,12 +144,12 @@
                 (\<forall>a b t. \<not> a = b \<longrightarrow> fresh a t \<longrightarrow> fresh a (Lam b t)) \<longrightarrow> fresh z za"}
 
 
-  The second function for the definitions has to just iterate the function
+  The second function, named @{text defns}, has to just iterate the function
   @{ML defn_aux} over all predicates. The argument @{text "preds"} is again
   the the list of predicates as @{ML_type term}s; the argument @{text
-  "prednames"} is the list of names of the predicates; @{text syns} are the
-  syntax annotations for each predicate; @{text "arg_tyss"} is
-  the list of argument-type-lists for each predicate.
+  "prednames"} is the list of binding names of the predicates; @{text syns} 
+  are the list of syntax annotations for the predicates; @{text "arg_tyss"} is
+  the list of argument-type-lists.
 *}
 
 ML %linenosgray{*fun defns rules preds prednames syns arg_typss lthy =
@@ -177,7 +183,7 @@
 end *}
 
 text {*
-  where we feed into the functions all parameters corresponding to
+  where we feed into the function all parameters corresponding to
   the @{text even}-@{text odd} example. The definitions we obtain
   are:
 
@@ -230,15 +236,15 @@
 text {*
   This helper function instantiates the @{text "?x"} in the theorem 
   @{thm spec} with a given @{ML_type cterm}. We call this helper function
-  in the tactic:
+  in the next tactic, called @{text inst_spec_tac}.
 *}
 
 ML{*fun inst_spec_tac ctrms = 
   EVERY' (map (dtac o inst_spec) ctrms)*}
 
 text {*
-  This tactic allows us to instantiate in the following proof the 
-  three quantifiers in the assumption. 
+  This tactic expects a list of @{ML_type cterm}s. It allows us in the following 
+  proof to instantiate the three quantifiers in the assumption. 
 *}
 
 lemma 
@@ -255,7 +261,7 @@
 (*<*)oops(*>*)
 
 text {*
-  Now the complete tactic for proving the induction principles can 
+  The complete tactic for proving the induction principles can now
   be implemented as follows:
 *}
 
@@ -269,7 +275,7 @@
 text {*
   We have to give it as arguments the definitions, the premise (this premise
   is @{text "even n"} in lemma @{thm [source] manual_ind_prin_even}) and the
-  instantiations. Compare this tactic with the manual for the lemma @{thm
+  instantiations. Compare this tactic with the manual proof for the lemma @{thm
   [source] manual_ind_prin_even}: as you can see there is almost a one-to-one
   correspondence between the \isacommand{apply}-script and the @{ML
   ind_tac}. Two testcases for this tactic are:
@@ -288,11 +294,14 @@
         (\<And>a t. P a (Lam a t)) \<Longrightarrow> 
         (\<And>a b t. \<lbrakk>a \<noteq> b; P a t\<rbrakk> \<Longrightarrow> P a (Lam b t)) \<Longrightarrow> P z za"
 by (tactic {* ind_tac @{thms fresh_def} @{thms prem} 
-                    [@{cterm "P::string\<Rightarrow>trm\<Rightarrow>bool"}] *})
+                          [@{cterm "P::string\<Rightarrow>trm\<Rightarrow>bool"}] *})
 
 
 text {*
-  Let us have a closer look at the first proved theorem:
+  While the tactic for proving the induction principles is relatively simple,
+  it will be a bit of work to construct the goals from the introduction rules
+  the user provides.  Therefore let us have a closer look at the first 
+  proved theorem:
 
   \begin{isabelle}
   \isacommand{thm}~@{thm [source] automatic_ind_prin_even}\\
@@ -301,26 +310,23 @@
 
   The variables @{text "z"}, @{text "P"} and @{text "Q"} are schematic
   variables (since they are not quantified in the lemma). These schematic
-  variables are needed so that they can be instantiated by the user later
-  on. We have to take care to also generate these schematic variables when
-  generating the goals for the induction principles. While the tactic for
-  proving the induction principles is relatively simple, it will be a bit
-  of work to construct the goals from the introduction rules the user
-  provides. In general we have to construct for each predicate @{text "pred"}
-  a goal of the form
+  variables are needed so that they can be instantiated by the user. 
+  We have to take care to also generate these schematic variables when
+  generating the goals for the induction principles.  In general we have 
+  to construct for each predicate @{text "pred"} a goal of the form
 
   @{text [display] 
   "pred ?zs \<Longrightarrow> rules[preds := ?Ps] \<Longrightarrow> ?P ?zs"}
 
-  where the predicates @{text preds} are replaced in the introduction 
-  rules by new distinct variables @{text "?Ps"}.
-  We also need to generate fresh arguments @{text "?zs"} for the predicate 
-  @{text "pred"} and the @{text "?P"} in the conclusion. Note 
-  that the  @{text "?Ps"}  and @{text "?zs"} need to be
-  schematic variables that can be instantiated by the user.
+  where the predicates @{text preds} are replaced in @{text rules} by new 
+  distinct variables @{text "?Ps"}. We also need to generate fresh arguments 
+  @{text "?zs"} for the predicate  @{text "pred"} and the @{text "?P"} in 
+  the conclusion. The crucial point is that the  @{text "?Ps"} and 
+  @{text "?zs"} need to be schematic variables that can be instantiated 
+  by the user.
 
-  We generate these goals in two steps. The first function expects that the
-  introduction rules are already appropriately substituted. The argument
+  We generate these goals in two steps. The first function, named @{text prove_ind}, 
+  expects that the introduction rules are already appropriately substituted. The argument
   @{text "srules"} stands for these substituted rules; @{text cnewpreds} are
   the certified terms coresponding to the variables @{text "?Ps"}; @{text
   "pred"} is the predicate for which we prove the induction principle;
@@ -363,7 +369,7 @@
   which the introduction rules have been substituted. The code in these two
   lines will return a theorem. However, it is a theorem
   proved inside the local theory @{text "lthy'"}, where the variables @{text
-  "zs"} are fixed, but free (see Line 4). By exporting this theorem from @{text
+  "zs"} are free, but fixed (see Line 4). By exporting this theorem from @{text
   "lthy'"} (which contains the @{text "zs"} as free variables) to @{text
   "lthy"} (which does not), we obtain the desired schematic variables @{text "?zs"}.
   A testcase for this function is
@@ -373,10 +379,10 @@
 let
   val newpreds = [@{term "P::nat\<Rightarrow>bool"}, @{term "Q::nat\<Rightarrow>bool"}]
   val cnewpreds = [@{cterm "P::nat\<Rightarrow>bool"}, @{cterm "Q::nat\<Rightarrow>bool"}]
+  val newpred = @{term "P::nat\<Rightarrow>bool"}
   val srules =  map (subst_free (eo_preds ~~ newpreds)) eo_rules
-  val newpred = @{term "P::nat\<Rightarrow>bool"}
   val intro = 
-       prove_ind lthy eo_defs srules cnewpreds ((e_pred, newpred), e_arg_tys)
+      prove_ind lthy eo_defs srules cnewpreds ((e_pred, newpred), e_arg_tys)
 in
   warning (str_of_thm lthy intro); lthy
 end *}
@@ -394,7 +400,7 @@
 
   We still have to produce the new predicates with which the introduction
   rules are substituted and iterate @{ML prove_ind} over all
-  predicates. This is what the second function does: 
+  predicates. This is what the second function, named @{text inds} does. 
 *}
 
 ML %linenosgray{*fun inds rules defs preds arg_tyss lthy  =
@@ -418,7 +424,7 @@
 text {*
   In Line 3, we generate a string @{text [quotes] "P"} for each predicate. 
   In Line 4, we use the same trick as in the previous function, that is making the 
-  @{text "Ps"} fresh and declaring them as fixed, but free, in
+  @{text "Ps"} fresh and declaring them as free, but fixed, in
   the new local theory @{text "lthy'"}. From the local theory we extract
   the ambient theory in Line 6. We need this theory in order to certify 
   the new predicates. In Line 8, we construct the types of these new predicates
@@ -519,7 +525,7 @@
   for freshness of applications. We set up the proof as follows:
 *}
 
-lemma fresh_App:
+lemma fresh_Lam:
   shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
 (*<*)oops(*>*)
 
@@ -535,12 +541,12 @@
   THEN (REPEAT (resolve_tac [@{thm allI}, @{thm impI}] 1)) *}
 
 text {*
-  The first step of ``rulifying'' the lemma will turn out important
+  The first step of ``rulifying'' the lemma will turn out to be important
   later on. Applying this tactic 
 *}
 
 (*<*)
-lemma fresh_App:
+lemma fresh_Lam:
   shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
 (*>*)
 apply(tactic {* expand_tac @{thms fresh_def} *})
@@ -566,9 +572,9 @@
   @{ML SUBPROOF}, since this tactic provides us
   with the parameters (as list of @{ML_type cterm}s) and the premises
   (as list of @{ML_type thm}s). The problem we have to overcome 
-  with @{ML SUBPROOF} is that this tactic always expects us to completely 
-  discharge with the goal (see Section ???). This is inconvenient for
-  our gradual explanation of the proof. To circumvent this inconvenience
+  with @{ML SUBPROOF} is, however, that this tactic always expects us to completely 
+  discharge the goal (see Section~\ref{sec:simpletacs}). This is inconvenient for
+  our gradual explanation of the proof here. To circumvent this inconvenience
   we use the following modified tactic: 
 *}
 (*<*)oops(*>*)
@@ -577,7 +583,7 @@
 text {*
   If the tactic inside @{ML SUBPROOF} fails, then the overall tactic will
   still succeed. With this testing tactic, we can gradually implement
-  all necessary proof steps.
+  all necessary proof steps inside a subproof.
 *}
 
 text_raw {*
@@ -599,7 +605,8 @@
 text_raw{*
 \end{isabelle}
 \end{minipage}
-\caption{FIXME\label{fig:chopprint}}
+\caption{A helper function that prints out the parameters and premises that
+  need to be treated differently.\label{fig:chopprint}}
 \end{figure}
 *}
 
@@ -624,17 +631,17 @@
 text {* 
   For the separation we can rely on that Isabelle deterministically 
   produces parameter and premises in a goal state. The last parameters
-  that were introduced, come from the quantifications in the definitions.
+  that were introduced come from the quantifications in the definitions.
   Therefore we only have to subtract the number of predicates (in this
-  case only @{text "1"} from the lenghts of all parameters. Similarly
-  with the @{text "prems"}. The last premises in the goal state must
-  come from unfolding of the conclusion. So we can just subtract
-  the number of rules from the number of all premises. Applying
-  this tactic in our example 
+  case only @{text "1"}) from the lenghts of all parameters. Similarly
+  with the @{text "prems"}: the last premises in the goal state come from 
+  unfolding the definition of the predicate in the conclusion. So we can 
+  just subtract the number of rules from the number of all premises. 
+  Applying this tactic in our example 
 *}
 
 (*<*)
-lemma fresh_App:
+lemma fresh_Lam:
   shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
 apply(tactic {* expand_tac @{thms fresh_def} *})
 (*>*)
@@ -671,9 +678,9 @@
   @{term [display] "\<forall>a t s. fresh a t \<longrightarrow> fresh a s \<longrightarrow> fresh a (App t s)"}
 
   To use this premise with @{ML rtac}, we need to instantiate its 
-  quantifiers (with @{text params1}) and transform it into a 
-  introduction rule (using @{ML "ObjectLogic.rulify"}. 
-  So we can modify the subproof as follows:
+  quantifiers (with @{text params1}) and transform it into rule 
+  format (using @{ML "ObjectLogic.rulify"}. So we can modify the 
+  subproof as follows:
 *}
 
 ML{*fun apply_prem_tac i preds rules =
@@ -687,10 +694,16 @@
     THEN no_tac
   end) *}
 
-text {* and apply it with *}
+text {* 
+  The argument @{text i} corresponds to the number of the 
+  introduction we want to analyse. We will later on lat it range
+  from @{text 0} to the number of introduction rules.
+  Below we applying this function with @{text 3}, since 
+  we are proving the fourth introduction rule. 
+*}
 
 (*<*)
-lemma fresh_App:
+lemma fresh_Lam:
   shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
 apply(tactic {* expand_tac @{thms fresh_def} *})
 (*>*)
@@ -698,19 +711,20 @@
 (*<*)oops(*>*)
 
 text {*
-  Since we print out the goal state after the @{ML rtac} we can see
-  the goal state has the two subgoals
+  Since we print out the goal state just after the application of 
+  @{ML rtac}, we can see the goal state we obtain: as expected it has 
+  the two subgoals
 
   \begin{isabelle}
   @{text "1."}~@{prop "a \<noteq> b"}\\
   @{text "2."}~@{prop "fresh a t"}
   \end{isabelle}
 
-  where the first comes from a non-recursive precondition of the rule
-  and the second comes from a recursive precondition. The first kind
-  can be solved immediately by @{text "prems1"}. The second kind
-  needs more work. It can be solved with the other premise in @{text "prems1"},
-  namely
+  where the first comes from a non-recursive premise of the rule
+  and the second comes from a recursive premise. The first goal
+  can be solved immediately by @{text "prems1"}. The second
+  needs more work. It can be solved with the other premise 
+  in @{text "prems1"}, namely
 
   @{term [break,display]
   "\<forall>fresh.
@@ -722,9 +736,11 @@
   but we have to instantiate it appropriately. These instantiations
   come from @{text "params1"} and @{text "prems2"}. We can determine
   whether we are in the simple or complicated case by checking whether
-  the topmost connective is @{text "\<forall>"}. The premises in the simple
+  the topmost connective is an @{text "\<forall>"}. The premises in the simple
   case cannot have such a quantification, since in the first step 
-  of @{ML "expand_tac"} was the ``rulification'' of the lemma. So 
+  of @{ML "expand_tac"} was the ``rulification'' of the lemma. 
+  The premise of the complicated case must have at least one  @{text "\<forall>"}
+  coming from the quantification over the @{text preds}. So 
   we can implement the following function
 *}
 
@@ -736,14 +752,14 @@
          | _ => prem) *}
 
 text {* 
-  which either applies the premise outright or if it had an
-  outermost universial quantification, instantiates it first with 
-  @{text "params1"} and then @{text "prems1"}. The following tactic 
-  will therefore prove the lemma.
+  which either applies the premise outright (the default case) or if 
+  it has an outermost universial quantification, instantiates it first 
+  with  @{text "params1"} and then @{text "prems1"}. The following 
+  tactic will therefore prove the lemma completely.
 *}
 
 ML{*fun prove_intro_tac i preds rules =
-  SUBPROOF (fn {params, prems, context, ...} =>
+  SUBPROOF (fn {params, prems, ...} =>
   let
     val (params1, params2) = chop (length params - length preds) params
     val (prems1, prems2) = chop (length prems - length rules) prems
@@ -753,19 +769,21 @@
   end) *}
 
 text {*
-  We can complete the proof of the introduction rule now as follows:
+  The full proof of the introduction rule now as follows:
 *}
 
-(*<*)
-lemma fresh_App:
+lemma fresh_Lam:
   shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
 apply(tactic {* expand_tac @{thms fresh_def} *})
-(*>*)
 apply(tactic {* prove_intro_tac 3 [fresh_pred] fresh_rules @{context} 1 *})
 done
 
 text {* 
-  Unfortunately, not everything is done yet.
+  Unfortunately, not everything is done yet. If you look closely
+  at the general principle outlined in Section~\ref{sec:nutshell}, 
+  we have  not yet dealt with the case when recursive premises 
+  in a rule have preconditions @{text Bs}. The introduction rule
+  of the accessible part is such a rule. 
 *}
 
 lemma accpartI:
@@ -775,8 +793,8 @@
 apply(tactic {* apply_prem_tac 0 [acc_pred] acc_rules @{context} *})
 
 txt {*
-  
-  @{subgoals [display]}
+  Here @{ML chop_test_tac} prints out the following
+  values for @{text "params1/2"} and @{text "prems1/2"}
 
   \begin{isabelle}
   @{text "Params1 from the rule:"}\\
@@ -789,11 +807,25 @@
   @{term "\<forall>x. (\<forall>y. R y x \<longrightarrow> P y) \<longrightarrow> P x"}\\
   \end{isabelle}
 
-*}
-(*<*)oops(*>*)
+  and after application of the introduction rule 
+  using @{ML apply_prem_tac}, we are in the goal state
+
+  \begin{isabelle}
+  @{text "1."}~@{term "\<And>y. R y x \<Longrightarrow> P y"}
+  \end{isabelle}
+  
+  
+*}(*<*)oops(*>*)
 
+text {*
+  In order to make progress as before, we have to use the precondition
+  @{text "R y x"} (in general there can be many of them). The best way
+  to get a handle on these preconditions is to open up another subproof,
+  since the preconditions will be bound to @{text prems}. Therfore we
+  modify the function @{ML prepare_prem} as follows
+*}
 
-ML{*fun prepare_prem params2 prems2 ctxt prem =  
+ML %linenosgray{*fun prepare_prem params2 prems2 ctxt prem =  
   SUBPROOF (fn {prems, ...} =>
   let
     val prem' = prems MRS prem
@@ -805,7 +837,18 @@
          | _ => prem') 1
   end) ctxt *}
 
-ML{*fun prove_intro_tac i preds rules =
+text {*
+  In Line 4 we use the @{text prems} from the @{ML SUBPROOF} and resolve 
+  them with @{text prem}. In the simple case, that is where the @{text prem} 
+  comes from a non-recursive premise of the rule, @{text prems} will be 
+  just the empty list and the @{ML MRS} does nothing. Similarly, in the 
+  cases where the recursive premises of the rule do not have preconditions. 
+  
+  The function @{ML prove_intro_tac} only needs to give the context to
+  @{ML prepare_prem} (Line 8) and is as follows.
+*}
+
+ML %linenosgray{*fun prove_intro_tac i preds rules =
   SUBPROOF (fn {params, prems, context, ...} =>
   let
     val (params1, params2) = chop (length params - length preds) params
@@ -815,58 +858,69 @@
     THEN EVERY1 (map (prepare_prem params2 prems2 context) prems1)
   end) *}
 
+text {*
+  With these extended function we can also prove the introduction
+  rule for the accessible part. 
+*}
+
 lemma accpartI:
   shows "\<And>x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
 apply(tactic {* expand_tac @{thms accpart_def} *})
 apply(tactic {* prove_intro_tac 0 [acc_pred] acc_rules @{context} 1 *})
 done
 
-
-
 text {*
-
+  Finally we need two functions that string everything together. The first
+  function is the tactic that performs the proofs.
 *}
 
-ML{*
-fun intros_tac defs rules preds i ctxt =
+ML %linenosgray{*fun intro_tac defs rules preds i ctxt =
   EVERY1 [ObjectLogic.rulify_tac,
           K (rewrite_goals_tac defs),
           REPEAT o (resolve_tac [@{thm allI}, @{thm impI}]),
           prove_intro_tac i preds rules ctxt]*}
 
 text {*
-  A test case
+  Lines 2 to 4 correspond to the function @{ML expand_tac}. Some testcases
+  dor this tactic are:
 *}
 
-ML{*fun intros_tac_test ctxt i =
-  intros_tac eo_defs eo_rules eo_preds i ctxt *}
-
-lemma intro0:
+lemma even0_intro:
   shows "even 0"
-apply(tactic {* intros_tac_test @{context} 0 *})
-done
+by (tactic {* intro_tac eo_defs eo_rules eo_preds 0 @{context} *})
+
 
-lemma intro1:
+lemma evenS_intro:
   shows "\<And>m. odd m \<Longrightarrow> even (Suc m)"
-apply(tactic {* intros_tac_test @{context} 1 *})
-done
+by (tactic {* intro_tac eo_defs eo_rules eo_preds 1 @{context} *})
+
+lemma fresh_App:
+  shows "\<And>a t s. \<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"
+by (tactic {* 
+  intro_tac @{thms fresh_def} fresh_rules [fresh_pred] 1 @{context} *})
 
-lemma intro2:
-  shows "\<And>m. even m \<Longrightarrow> odd (Suc m)"
-apply(tactic {* intros_tac_test @{context} 2 *})
-done
+text {*
+  The second function sets up in Line 4 the goals (in this case this is easy
+  since they are exactly the introduction rules the user gives)
+  and iterates @{ML intro_tac} over all introduction rules.
+*}
 
-ML{*fun intros rules preds defs lthy = 
+ML %linenosgray{*fun intros rules preds defs lthy = 
 let
-  fun prove_intro (i, goal) =
+  fun intros_aux (i, goal) =
     Goal.prove lthy [] [] goal
-      (fn {context, ...} => intros_tac defs rules preds i context)
+      (fn {context, ...} => intro_tac defs rules preds i context)
 in
-  map_index prove_intro rules
+  map_index intros_aux rules
 end*}
 
+subsection {* Main Function *}
+
 text {* main internal function *}
 
+ML {* LocalTheory.notes *}
+
+
 ML %linenosgray{*fun add_inductive pred_specs rule_specs lthy =
 let
   val syns = map snd pred_specs
@@ -927,6 +981,7 @@
   Things to include at the end:
 
   \begin{itemize}
+  \item include the code for the parameters
   \item say something about add-inductive-i to return
   the rules
   \item say that the induction principle is weaker (weaker than
--- a/ProgTutorial/Package/Ind_General_Scheme.thy	Thu Mar 26 19:00:51 2009 +0000
+++ b/ProgTutorial/Package/Ind_General_Scheme.thy	Fri Mar 27 12:49:28 2009 +0000
@@ -17,7 +17,7 @@
 | fresh_lam2: "\<lbrakk>a\<noteq>b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
 (*>*)
 
-section {* The Code in a Nutshell *}
+section {* The Code in a Nutshell\label{sec:nutshell} *}
 
 text {*
   (FIXME: perhaps move somewhere else)
@@ -34,14 +34,14 @@
   follows we will have the convention that various, possibly empty collections of 
   ``things'' are indicated either by adding an @{text "s"} or by adding 
   a superscript @{text [quotes] "\<^isup>*"}. The shorthand for the predicates 
-  will therefore be @{text "preds"} or @{text "pred\<^sup>*"}. In this case of the
-  predicates there must be at least a single one in order to obtain a meaningful
-  definition.
+  will therefore be @{text "preds"} or @{text "pred\<^sup>*"}. In the case of the
+  predicates there must be, of course, at least a single one in order to obtain a 
+  meaningful definition.
 
-  The input for the inductive predicate will be some @{text "preds"} with possible 
+  The input for the inductive package will be some @{text "preds"} with possible 
   typing and syntax annotations, and also some introduction rules. We call below the 
   introduction rules short as @{text "rules"}. Borrowing some idealised Isabelle 
-  notation, one such @{text "rule"} is of the form
+  notation, one such @{text "rule"} is assumed to be of the form
 
   \begin{isabelle}
   @{text "rule ::= 
@@ -50,17 +50,20 @@
   \<Longrightarrow> pred ts"}
   \end{isabelle}
   
-  We actually assume the user will always state rules of this form and we omit
-  any code that test this format. So things can go horribly wrong if the
-  @{text "rules"} are not of this form.\footnote{FIXME: Exercise to test this
-  format.} The @{text As} and @{text Bs} in a @{text "rule"} are formulae not
-  involving the inductive predicates @{text "preds"}; the instances @{text
-  "pred ss"} and @{text "pred ts"} can stand for different predicates.
-  Everything left of @{text [quotes] "\<Longrightarrow> pred ts"} are called the premises of
-  the rule. The variables @{text "xs"} are usually omitted in the user's
-  input. The variables @{text "ys"} are local with respect to on recursive
-  premise. Some examples of @{text "rule"}s the user might write are:
-
+  For the purposes here, we will assume the @{text rules} have
+  this format and omit any code that actually tests this. Therefore ``things'' 
+  can go horribly wrong, if the @{text "rules"} are not of this
+  form.\footnote{FIXME: Exercise to test this format.} The @{text As} and
+  @{text Bs} in a @{text "rule"} are formulae not involving the inductive
+  predicates @{text "preds"}; the instances @{text "pred ss"} and @{text "pred
+  ts"} can stand for different predicates, like @{text "pred\<^isub>1 ss"} and 
+  @{text "pred\<^isub>2 ts"}; @{text ss} and @{text ts} are the
+  arguments of the predicates. Every formula left of @{text [quotes]
+  "\<Longrightarrow> pred ts"} is a premise of the rule. The outermost quantified 
+  variables @{text "xs"} are usually omitted in the user's input. The 
+  quantification for the variables @{text "ys"} is local with respect to 
+  one recursive premise and must be given. Some examples of @{text "rule"}s 
+  are
 
   @{thm [display] fresh_var[no_vars]}
 
@@ -72,15 +75,16 @@
 
   @{thm [display] accpartI[no_vars]}
 
-  has a recursive premise which has a precondition. In the examples, all 
+  has a recursive premise that has a precondition. As usual all 
   rules are stated without the leading meta-quantification @{text "\<And>xs"}.
 
-  The code of the inductive package falls roughly in tree parts involving
-  the definitions, the induction principles and the introduction rules, 
-  respectively. For the definitions we need to have the @{text rules}
-  in a form where the meta-quantifiers and meta-implications are replaced
-  by their object logic equivalent. Therefore an @{text "orule"} is of the
-  form 
+  The code of the inductive package falls roughly in tree parts: the first
+  deals with the definitions, the second with the induction principles and 
+  the third with the introduction rules. 
+  
+  For the definitions we need to have the @{text rules} in a form where 
+  the meta-quantifiers and meta-implications are replaced by their object 
+  logic equivalents. Therefore an @{text "orule"} is of the form 
 
   @{text [display] "orule ::= \<forall>xs. As \<longrightarrow> (\<forall>ys. Bs \<longrightarrow> pred ss)\<^isup>* \<longrightarrow> pred ts"}
 
@@ -88,17 +92,17 @@
 
   @{text [display] "def ::= pred \<equiv> \<lambda>zs. \<forall>preds. orules \<longrightarrow> pred zs"}
 
-  The induction principles for the predicate @{text "pred"} are of the
+  The induction principles for every predicate @{text "pred"} are of the
   form
 
   @{text [display] "ind ::= pred ?zs \<Longrightarrow> rules[preds := ?Ps] \<Longrightarrow> ?P ?zs"}
 
-  where in the @{text "rules"} every @{text pred} is replaced by a new
-  (meta)variable @{text "?P"}.
+  where in the @{text "rules"} every @{text pred} is replaced by a fresh
+  meta-variable @{text "?P"}.
 
-  In order to derive an induction principle for the predicate @{text "pred"}
-  we first transform it into the object logic and fix the meta-variables. Hence 
-  we have to prove a formula of the form
+  In order to derive an induction principle for the predicate @{text "pred"},
+  we first transform @{text ind} into the object logic and fix the meta-variables. 
+  Hence we have to prove a formula of the form
 
   @{text [display] "pred zs \<longrightarrow> orules[preds := Ps] \<longrightarrow> P zs"}
 
@@ -110,11 +114,13 @@
 
   @{text [display] "orules[preds := Ps] \<longrightarrow> P zs"}
 
-  This can be done by instantiating the @{text "\<forall>preds"} with the @{text "Ps"}. 
-  Then we are done since we are left with a simple identity.
+  This can be done by instantiating the @{text "\<forall>preds"}-quantification 
+  with the @{text "Ps"}. Then we are done since we are left with a simple 
+  identity.
   
-  The proofs for the introduction rules are more involved. Assuming we want to
-  prove the introduction rule 
+  Although the user declares introduction rules @{text rules}, they must 
+  be derived from the @{text defs}. These derivations are a bit involved. 
+  Assuming we want to prove the introduction rule 
 
   @{text [display] "\<And>xs. As \<Longrightarrow> (\<And>ys. Bs \<Longrightarrow> pred ss)\<^isup>* \<Longrightarrow> pred ts"}
 
@@ -143,8 +149,8 @@
 
   In the last step we removed some quantifiers and moved the precondition @{text "orules"}  
   into the assumtion. The @{text "orules"} stand for all introduction rules that are given 
-  by the user. We apply the one which corresponds to introduction rule we are proving.
-  This introduction rule must be of the form 
+  by the user. We apply the @{text orule} that corresponds to introduction rule we are 
+  proving. This introduction rule must necessarily be of the form 
 
   @{text [display] "As \<Longrightarrow> (\<And>ys. Bs \<Longrightarrow> pred ss)\<^isup>* \<Longrightarrow> pred ts"}
 
@@ -155,20 +161,22 @@
   (b)~@{text "(\<And>ys. Bs \<Longrightarrow> pred ss)\<^isup>*"}
   \end{isabelle}
 
-  The goals @{text "As"} we can immediately discharge with the assumption in @{text "(i)"}.
-  The goals in @{text "(b)"} we discharge as follows: we assume the @{text "(iv)"} 
-  @{text "Bs"} and prove @{text "(c)"} @{text "pred ss"}. We then resolve the 
-  @{text "Bs"}  with the assumptions in @{text "(ii)"}. This gives us
+  We can immediately discharge the goals @{text "As"} using the assumption in 
+  @{text "(i)"}. The goals in @{text "(b)"} can be discharged as follows: we 
+  assume the @{text "Bs"} and prove @{text "pred ss"}. For this we resolve the 
+  @{text "Bs"}  with the assumptions in @{text "(ii)"}. This gives us the 
+  assumptions
 
   @{text [display] "(\<forall>preds. orules \<longrightarrow> pred ss)\<^isup>*"}
 
   Instantiating the universal quantifiers and then resolving with the assumptions 
   in @{text "(iii)"} gives us @{text "pred ss"}, which is the goal we are after.
+  This completes the proof for introduction rules.
 
-  
-  What remains is to implement the reasoning outlined above. 
-  For building testcases, we use some shorthands for the definitions 
-  of @{text "even/odd"}, @{term "fresh"} and @{term "accpart"}. 
+  What remains is to implement the reasoning outlined above. We do this in
+  the next section. For building testcases, we use the shorthands for 
+  @{text "even/odd"}, @{term "fresh"} and @{term "accpart"}
+  given in Figure~\ref{fig:shorthands}.
 *}
 
 
@@ -219,14 +227,15 @@
 (* accessible-part example *)
 val acc_rules = 
      [@{prop "\<And>R x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"}]
-val acc_pred = @{term "accpart:: ('a \<Rightarrow>'a\<Rightarrow>bool)\<Rightarrow>'a \<Rightarrow>bool"}*}
+val acc_pred = @{term "accpart::('a \<Rightarrow>'a\<Rightarrow>bool)\<Rightarrow>'a \<Rightarrow>bool"}*}
 text_raw{*
 \end{isabelle}
 \end{minipage}
-\caption{Shorthands for the inductive predicates of @{text "even"}-@{text "odd"}, 
-  @{text "fresh"} and @{text "accpart"}. The follow the convention @{text "rules"}, 
-  @{text "orules"}, @{text "preds"} and so on as used in Section ???. The purpose 
-  of these shorthands is to simplify the construction of testcases.}
+\caption{Shorthands for the inductive predicates @{text "even"}-@{text "odd"}, 
+  @{text "fresh"} and @{text "accpart"}. The names of these shorthands follow 
+  the convention @{text "rules"}, @{text "orules"}, @{text "preds"} and so on. 
+  The purpose of these shorthands is to simplify the construction of testcases
+  in Section~\ref{sec:code}.\label{fig:shorthands}}
 \end{figure}
 *}
 
--- a/ProgTutorial/Parsing.thy	Thu Mar 26 19:00:51 2009 +0000
+++ b/ProgTutorial/Parsing.thy	Fri Mar 27 12:49:28 2009 +0000
@@ -752,7 +752,7 @@
 section {* New Commands and Keyword Files\label{sec:newcommand} *}
 
 text {*
-  (FIXME: update to the right command setup --- is this still needed)
+  (FIXME: update to the right command setup --- is this still needed?)
 
   Often new commands, for example for providing new definitional principles,
   need to be implemented. While this is not difficult on the ML-level,
@@ -1008,7 +1008,7 @@
   (FIXME read a name and show how to store theorems)
 *}
 
-section {* Methods *}
+section {* Methods (TBD) *}
 
 text {*
   (FIXME: maybe move to after the tactic section)
--- a/ProgTutorial/Recipes/ExternalSolver.thy	Thu Mar 26 19:00:51 2009 +0000
+++ b/ProgTutorial/Recipes/ExternalSolver.thy	Fri Mar 27 12:49:28 2009 +0000
@@ -3,7 +3,7 @@
 begin
 
 
-section {* Executing an External Application \label{rec:external}*}
+section {* Executing an External Application (TBD) \label{rec:external}*}
 
 text {*
   {\bf Problem:}
--- a/ProgTutorial/Recipes/Oracle.thy	Thu Mar 26 19:00:51 2009 +0000
+++ b/ProgTutorial/Recipes/Oracle.thy	Fri Mar 27 12:49:28 2009 +0000
@@ -3,7 +3,7 @@
 uses ("external_solver.ML")
 begin
 
-section {* Writing an Oracle\label{rec:oracle} *} 
+section {* Writing an Oracle (TBD)\label{rec:oracle} *} 
 
 text {*
   {\bf Problem:}
--- a/ProgTutorial/Recipes/StoringData.thy	Thu Mar 26 19:00:51 2009 +0000
+++ b/ProgTutorial/Recipes/StoringData.thy	Fri Mar 27 12:49:28 2009 +0000
@@ -2,7 +2,7 @@
 imports "../Base"
 begin
 
-section {* Storing Data\label{rec:storingdata} *} 
+section {* Storing Data (TBD)\label{rec:storingdata} *} 
 
 
 text {*
--- a/ProgTutorial/Recipes/USTypes.thy	Thu Mar 26 19:00:51 2009 +0000
+++ b/ProgTutorial/Recipes/USTypes.thy	Fri Mar 27 12:49:28 2009 +0000
@@ -4,7 +4,7 @@
 begin
 
 
-section {* User Space Type-Systems *}
+section {* User Space Type-Systems (TBD) *}
 
 
 
--- a/ProgTutorial/document/root.tex	Thu Mar 26 19:00:51 2009 +0000
+++ b/ProgTutorial/document/root.tex	Fri Mar 27 12:49:28 2009 +0000
@@ -43,6 +43,7 @@
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 \hyphenation{Isabelle}
 \renewcommand{\isasymiota}{}
+\renewcommand{\isamarkupsubsection}[1]{\subsection*{#1}}
 
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 % to work around a problem with \isanewline
Binary file progtutorial.pdf has changed